Remove non-global Threadpool

This commit is contained in:
Magnus Ulimoen 2021-03-22 19:28:01 +01:00
parent 7aadda3de9
commit ff9a477b67
4 changed files with 31 additions and 113 deletions

View File

@ -9,7 +9,7 @@ edition = "2018"
sbp = { path = "../sbp", features = ["serde1", "fast-float"] }
euler = { path = "../euler", features = ["serde1"] }
hdf5 = "0.7.0"
integrate = { path = "../utils/integrate", features = ["rayon"] }
integrate = { path = "../utils/integrate" }
rayon = "1.3.0"
indicatif = "0.15.0"
structopt = "0.3.14"

View File

@ -1,3 +1,4 @@
use rayon::prelude::*;
use structopt::StructOpt;
use sbp::operators::SbpOperator2d;
@ -20,6 +21,21 @@ struct System {
operators: Vec<Box<dyn SbpOperator2d>>,
}
impl integrate::Integrable for System {
type State = Vec<euler::Field>;
type Diff = Vec<euler::Field>;
fn assign(s: &mut Self::State, o: &Self::State) {
s.par_iter_mut()
.zip(o.par_iter())
.for_each(|(s, o)| euler::Field::assign(s, o))
}
fn scaled_add(s: &mut Self::State, o: &Self::Diff, scale: Float) {
s.par_iter_mut()
.zip(o.par_iter())
.for_each(|(s, o)| euler::Field::scaled_add(s, o, scale))
}
}
impl System {
fn new(
grids: Vec<grid::Grid>,
@ -68,7 +84,7 @@ impl System {
}
}
fn advance(&mut self, dt: Float, pool: &rayon::ThreadPool) {
fn advance(&mut self, dt: Float) {
let metrics = &self.metrics;
let grids = &self.grids;
let bt = &self.bt;
@ -76,9 +92,9 @@ impl System {
let eb = &mut self.eb;
let operators = &self.operators;
let rhs = move |fut: &mut [euler::Field], prev: &[euler::Field], time: Float| {
let rhs = move |fut: &mut Vec<euler::Field>, prev: &Vec<euler::Field>, time: Float| {
let prev_all = &prev;
pool.scope(|s| {
rayon::scope(|s| {
for (((((((fut, prev), wb), grid), metrics), op), bt), eb) in fut
.iter_mut()
.zip(prev.iter())
@ -101,19 +117,13 @@ impl System {
});
};
let mut k = self
.k
.iter_mut()
.map(|k| k.as_mut_slice())
.collect::<Vec<_>>();
integrate::integrate_multigrid::<integrate::Rk4, euler::Field, _>(
integrate::integrate::<integrate::Rk4, System, _>(
rhs,
&self.fnow,
&mut self.fnext,
&mut self.time,
dt,
&mut k,
pool,
&mut self.k,
);
std::mem::swap(&mut self.fnow, &mut self.fnext);
@ -178,7 +188,7 @@ struct Options {
no_progressbar: bool,
/// Number of simultaneous threads
#[structopt(short, long)]
jobs: Option<Option<usize>>,
jobs: Option<usize>,
/// Name of output file
#[structopt(default_value = "output.hdf", long, short)]
output: std::path::PathBuf,
@ -241,20 +251,13 @@ fn main() {
let ntime = (integration_time / dt).round() as u64;
let pool = {
let builder = rayon::ThreadPoolBuilder::new();
if let Some(j) = opt.jobs {
if let Some(j) = j {
builder.num_threads(j)
} else {
builder
}
} else {
builder.num_threads(1)
}
.build()
.unwrap()
};
{
let nthreads = opt.jobs.unwrap_or(1);
rayon::ThreadPoolBuilder::new()
.num_threads(nthreads)
.build_global()
.unwrap();
}
let should_output = |itime| {
opt.number_of_outputs.map_or(false, |num_out| {
@ -282,7 +285,7 @@ fn main() {
output.add_timestep(itime, &sys.fnow);
}
progressbar.inc(1);
sys.advance(dt, &pool);
sys.advance(dt);
}
progressbar.finish_and_clear();

View File

@ -6,4 +6,3 @@ edition = "2018"
[dependencies]
float = { path = "../float/" }
rayon = { version = "1.5.0", optional = true }

View File

@ -243,90 +243,6 @@ pub fn integrate_embedded_rk<BTableau: EmbeddedButcherTableau, F: Integrable, RH
}
}
#[cfg(feature = "rayon")]
#[allow(clippy::too_many_arguments)]
/// Integrates a multigrid problem, much the same as [`integrate`],
/// using a `rayon` threadpool for parallelisation.
///
/// note that `rhs` accepts the full system state, and is responsible
/// for computing the full state difference.
/// `rhs` can be a mutable closure, so buffers can be used
/// and mutated inside the closure.
///
/// This function requires the `rayon` feature, and is not callable in
/// a `wasm` context.
pub fn integrate_multigrid<BTableau: ButcherTableau, F: Integrable, RHS>(
mut rhs: RHS,
prev: &[F::State],
fut: &mut [F::State],
time: &mut Float,
dt: Float,
k: &mut [&mut [F::Diff]],
pool: &rayon::ThreadPool,
) where
RHS: FnMut(&mut [F::Diff], &[F::State], Float),
F::State: Send + Sync,
F::Diff: Send + Sync,
{
for i in 0.. {
let simtime;
match i {
0 => {
pool.scope(|s| {
assert!(k.len() >= BTableau::S);
for (prev, fut) in prev.iter().zip(fut.iter_mut()) {
s.spawn(move |_| {
F::assign(fut, prev);
});
}
});
simtime = *time;
}
i if i < BTableau::S => {
pool.scope(|s| {
for (ig, (prev, fut)) in prev.iter().zip(fut.iter_mut()).enumerate() {
let k = &k;
s.spawn(move |_| {
F::assign(fut, prev);
for (ik, &a) in BTableau::A[i - 1].iter().enumerate() {
if a == 0.0 {
continue;
}
F::scaled_add(fut, &k[ik][ig], a * dt);
}
});
}
});
simtime = *time + dt * BTableau::C[i - 1];
}
_ if i == BTableau::S => {
pool.scope(|s| {
for (ig, (prev, fut)) in prev.iter().zip(fut.iter_mut()).enumerate() {
let k = &k;
s.spawn(move |_| {
F::assign(fut, prev);
for (ik, &b) in BTableau::B.iter().enumerate() {
if b == 0.0 {
continue;
}
F::scaled_add(fut, &k[ik][ig], b * dt);
}
});
}
});
*time += dt;
return;
}
_ => {
unreachable!();
}
};
rhs(&mut k[i], &fut, simtime);
}
}
#[test]
/// Solving a second order PDE
fn ballistic() {