Merge branch 'feature/distribute'

Replaces the `rayon` implementation with a new approach using a thread
per grid architectures which allows concurrent execution with less
communication with the main thread.
This commit is contained in:
Magnus Ulimoen 2021-10-10 14:45:16 +00:00
commit 86275d2c2e
13 changed files with 1501 additions and 569 deletions

View File

@ -5,6 +5,7 @@ use super::GAMMA;
use ndarray::{azip, ArrayView, ArrayViewMut, Dimension};
pub trait Evaluator<D: Dimension>: Send + Sync {
#[allow(clippy::too_many_arguments)]
fn evaluate(
&self,
t: Float,
@ -55,6 +56,7 @@ pub trait EvaluatorPressure<D: Dimension>: Send + Sync {
rho: ArrayView<Float, D>,
out: ArrayViewMut<Float, D>,
);
#[allow(clippy::too_many_arguments)]
fn p(
&self,
t: Float,
@ -70,6 +72,7 @@ pub trait EvaluatorPressure<D: Dimension>: Send + Sync {
impl<'a, D: Dimension, BP: EvaluatorPressure<D>> Evaluator<D>
for EvaluatorPressureWrapper<'a, D, BP>
{
#[allow(clippy::many_single_char_names)]
fn evaluate(
&self,
t: Float,

View File

@ -301,22 +301,18 @@ impl Field {
pub fn west(&self) -> ArrayView2<Float> {
self.slice(s![.., .., 0])
}
#[allow(unused)]
fn north_mut(&mut self) -> ArrayViewMut2<Float> {
pub fn north_mut(&mut self) -> ArrayViewMut2<Float> {
let ny = self.ny();
self.slice_mut(s![.., ny - 1, ..])
}
#[allow(unused)]
fn south_mut(&mut self) -> ArrayViewMut2<Float> {
pub fn south_mut(&mut self) -> ArrayViewMut2<Float> {
self.slice_mut(s![.., 0, ..])
}
#[allow(unused)]
fn east_mut(&mut self) -> ArrayViewMut2<Float> {
pub fn east_mut(&mut self) -> ArrayViewMut2<Float> {
let nx = self.nx();
self.slice_mut(s![.., .., nx - 1])
}
#[allow(unused)]
fn west_mut(&mut self) -> ArrayViewMut2<Float> {
pub fn west_mut(&mut self) -> ArrayViewMut2<Float> {
self.slice_mut(s![.., .., 0])
}
@ -330,6 +326,7 @@ impl Field {
let (rho, rhou, rhov, e) = self.components_mut();
vortex_param.evaluate(time, x, y, rho, rhou, rhov, e)
}
#[allow(clippy::erasing_op, clippy::identity_op)]
fn iter(&self) -> impl ExactSizeIterator<Item = FieldValue> + '_ {
let n = self.nx() * self.ny();
let slice = self.0.as_slice().unwrap();
@ -462,18 +459,18 @@ impl Diff {
pub fn zeros((ny, nx): (usize, usize)) -> Self {
Self(Array3::zeros((4, ny, nx)))
}
fn north_mut(&mut self) -> ArrayViewMut2<Float> {
pub fn north_mut(&mut self) -> ArrayViewMut2<Float> {
let ny = self.shape()[1];
self.0.slice_mut(s![.., ny - 1, ..])
}
fn south_mut(&mut self) -> ArrayViewMut2<Float> {
pub fn south_mut(&mut self) -> ArrayViewMut2<Float> {
self.0.slice_mut(s![.., 0, ..])
}
fn east_mut(&mut self) -> ArrayViewMut2<Float> {
pub fn east_mut(&mut self) -> ArrayViewMut2<Float> {
let nx = self.shape()[2];
self.0.slice_mut(s![.., .., nx - 1])
}
fn west_mut(&mut self) -> ArrayViewMut2<Float> {
pub fn west_mut(&mut self) -> ArrayViewMut2<Float> {
self.0.slice_mut(s![.., .., 0])
}
}
@ -517,6 +514,59 @@ pub fn RHS_trad(
SAT_characteristics(op, k, y, metrics, boundaries);
}
#[allow(non_snake_case)]
pub fn RHS_no_SAT(
op: &dyn SbpOperator2d,
k: &mut Diff,
y: &Field,
metrics: &Metrics,
tmp: &mut (Field, Field, Field, Field, Field, Field),
) {
let ehat = &mut tmp.0;
let fhat = &mut tmp.1;
fluxes((ehat, fhat), y, metrics, &mut tmp.2);
let dE = &mut tmp.2;
let dF = &mut tmp.3;
op.diffxi(ehat.rho(), dE.rho_mut());
op.diffxi(ehat.rhou(), dE.rhou_mut());
op.diffxi(ehat.rhov(), dE.rhov_mut());
op.diffxi(ehat.e(), dE.e_mut());
op.diffeta(fhat.rho(), dF.rho_mut());
op.diffeta(fhat.rhou(), dF.rhou_mut());
op.diffeta(fhat.rhov(), dF.rhov_mut());
op.diffeta(fhat.e(), dF.e_mut());
if let Some(diss_op) = op.upwind() {
let ad_xi = &mut tmp.4;
let ad_eta = &mut tmp.5;
upwind_dissipation(
&*diss_op,
(ad_xi, ad_eta),
y,
metrics,
(&mut tmp.0, &mut tmp.1),
);
azip!((out in &mut k.0,
eflux in &dE.0,
fflux in &dF.0,
ad_xi in &ad_xi.0,
ad_eta in &ad_eta.0,
detj in &metrics.detj().broadcast((4, y.ny(), y.nx())).unwrap()) {
*out = (-eflux - fflux + ad_xi + ad_eta)/detj
});
} else {
azip!((out in &mut k.0,
eflux in &dE.0,
fflux in &dF.0,
detj in &metrics.detj().broadcast((4, y.ny(), y.nx())).unwrap()) {
*out = (-eflux - fflux )/detj
});
}
}
#[allow(non_snake_case)]
pub fn RHS_upwind(
op: &dyn SbpOperator2d,
@ -949,104 +999,148 @@ fn vortexify(
#[allow(non_snake_case)]
/// Boundary conditions (SAT)
fn SAT_characteristics(
pub fn SAT_characteristics(
op: &dyn SbpOperator2d,
k: &mut Diff,
y: &Field,
metrics: &Metrics,
boundaries: &BoundaryTerms,
) {
SAT_north(op, k.north_mut(), y, metrics, boundaries.north);
SAT_south(op, k.south_mut(), y, metrics, boundaries.south);
SAT_east(op, k.east_mut(), y, metrics, boundaries.east);
SAT_west(op, k.west_mut(), y, metrics, boundaries.west);
}
pub const SAT_FUNCTIONS: Direction<
fn(&dyn SbpOperator2d, ArrayViewMut2<Float>, &Field, &Metrics, ArrayView2<Float>),
> = Direction {
north: SAT_north,
south: SAT_south,
west: SAT_west,
east: SAT_east,
};
#[allow(non_snake_case)]
pub fn SAT_north(
op: &dyn SbpOperator2d,
k: ArrayViewMut2<Float>,
y: &Field,
metrics: &Metrics,
boundary: ArrayView2<Float>,
) {
let ny = y.ny();
let hi = if op.is_h2eta() {
(ny - 2) as Float / op.heta()[0]
} else {
(ny - 1) as Float / op.heta()[0]
};
let sign = -1.0;
let tau = 1.0;
let slice = s![y.ny() - 1, ..];
SAT_characteristic(
k,
y.north(),
boundary,
hi,
sign,
tau,
metrics.detj().slice(slice),
metrics.detj_deta_dx().slice(slice),
metrics.detj_deta_dy().slice(slice),
);
}
#[allow(non_snake_case)]
pub fn SAT_south(
op: &dyn SbpOperator2d,
k: ArrayViewMut2<Float>,
y: &Field,
metrics: &Metrics,
boundary: ArrayView2<Float>,
) {
let ny = y.ny();
let hi = if op.is_h2eta() {
(ny - 2) as Float / op.heta()[0]
} else {
(ny - 1) as Float / op.heta()[0]
};
let sign = 1.0;
let tau = -1.0;
let slice = s![0, ..];
SAT_characteristic(
k,
y.south(),
boundary,
hi,
sign,
tau,
metrics.detj().slice(slice),
metrics.detj_deta_dx().slice(slice),
metrics.detj_deta_dy().slice(slice),
);
}
#[allow(non_snake_case)]
pub fn SAT_west(
op: &dyn SbpOperator2d,
k: ArrayViewMut2<Float>,
y: &Field,
metrics: &Metrics,
boundary: ArrayView2<Float>,
) {
let nx = y.nx();
// North boundary
{
let hi = if op.is_h2eta() {
(ny - 2) as Float / op.heta()[0]
} else {
(ny - 1) as Float / op.heta()[0]
};
let sign = -1.0;
let tau = 1.0;
let slice = s![y.ny() - 1, ..];
SAT_characteristic(
k.north_mut(),
y.north(),
boundaries.north,
hi,
sign,
tau,
metrics.detj().slice(slice),
metrics.detj_deta_dx().slice(slice),
metrics.detj_deta_dy().slice(slice),
);
}
// South boundary
{
let hi = if op.is_h2eta() {
(ny - 2) as Float / op.heta()[0]
} else {
(ny - 1) as Float / op.heta()[0]
};
let sign = 1.0;
let tau = -1.0;
let slice = s![0, ..];
SAT_characteristic(
k.south_mut(),
y.south(),
boundaries.south,
hi,
sign,
tau,
metrics.detj().slice(slice),
metrics.detj_deta_dx().slice(slice),
metrics.detj_deta_dy().slice(slice),
);
}
// West Boundary
{
let hi = if op.is_h2xi() {
(nx - 2) as Float / op.hxi()[0]
} else {
(nx - 1) as Float / op.hxi()[0]
};
let sign = 1.0;
let tau = -1.0;
let slice = s![.., 0];
SAT_characteristic(
k.west_mut(),
y.west(),
boundaries.west,
hi,
sign,
tau,
metrics.detj().slice(slice),
metrics.detj_dxi_dx().slice(slice),
metrics.detj_dxi_dy().slice(slice),
);
}
// East Boundary
{
let hi = if op.is_h2xi() {
(nx - 2) as Float / op.hxi()[0]
} else {
(nx - 1) as Float / op.hxi()[0]
};
let sign = -1.0;
let tau = 1.0;
let slice = s![.., y.nx() - 1];
SAT_characteristic(
k.east_mut(),
y.east(),
boundaries.east,
hi,
sign,
tau,
metrics.detj().slice(slice),
metrics.detj_dxi_dx().slice(slice),
metrics.detj_dxi_dy().slice(slice),
);
}
let hi = if op.is_h2xi() {
(nx - 2) as Float / op.hxi()[0]
} else {
(nx - 1) as Float / op.hxi()[0]
};
let sign = 1.0;
let tau = -1.0;
let slice = s![.., 0];
SAT_characteristic(
k,
y.west(),
boundary,
hi,
sign,
tau,
metrics.detj().slice(slice),
metrics.detj_dxi_dx().slice(slice),
metrics.detj_dxi_dy().slice(slice),
);
}
#[allow(non_snake_case)]
pub fn SAT_east(
op: &dyn SbpOperator2d,
k: ArrayViewMut2<Float>,
y: &Field,
metrics: &Metrics,
boundary: ArrayView2<Float>,
) {
let nx = y.nx();
let hi = if op.is_h2xi() {
(nx - 2) as Float / op.hxi()[0]
} else {
(nx - 1) as Float / op.hxi()[0]
};
let sign = -1.0;
let tau = 1.0;
let slice = s![.., y.nx() - 1];
SAT_characteristic(
k,
y.east(),
boundary,
hi,
sign,
tau,
metrics.detj().slice(slice),
metrics.detj_dxi_dx().slice(slice),
metrics.detj_dxi_dy().slice(slice),
);
}
#[allow(non_snake_case)]

View File

@ -11,10 +11,15 @@ euler = { path = "../euler", features = ["serde1"] }
hdf5 = "0.7.0"
integrate = { path = "../utils/integrate" }
rayon = "1.3.0"
indicatif = "0.15.0"
indicatif = "0.17.0-beta.1"
ndarray = { version = "0.14.0", features = ["serde"] }
serde = { version = "1.0.115", features = ["derive"] }
json5 = "0.3.0"
indexmap = { version = "1.5.2", features = ["serde-1"] }
argh = "0.1.4"
evalexpr = "6.3.0"
crossbeam-channel = "0.5.0"
crossbeam-utils = "0.8.5"
parking_lot = "0.11.1"
lock_api = "0.4.4"
arrayvec = "0.7.1"

View File

@ -10,6 +10,7 @@ pub enum Evaluator {
}
impl<D: Dimension> euler::eval::Evaluator<D> for Evaluator {
#[allow(clippy::many_single_char_names)]
fn evaluate(
&self,
t: Float,

View File

@ -79,6 +79,7 @@ pub struct EvaluatorConservation {
}
impl<D: Dimension> euler::eval::Evaluator<D> for Evaluator {
#[allow(clippy::many_single_char_names)]
fn evaluate(
&self,
t: Float,
@ -267,6 +268,7 @@ impl<D: Dimension> euler::eval::EvaluatorPressure<D> for EvaluatorPressure {
})
}
#[allow(clippy::many_single_char_names)]
fn p(
&self,
t: Float,

View File

@ -1,155 +0,0 @@
use super::*;
pub struct OutputThread {
rx: Option<std::sync::mpsc::Receiver<Vec<euler::Field>>>,
tx: Option<std::sync::mpsc::SyncSender<(u64, Vec<euler::Field>)>>,
thread: Option<std::thread::JoinHandle<()>>,
}
impl OutputThread {
pub fn new(file: File) -> Self {
// Pingpong back and forth a number of Vec<Field> to be used for the
// output. The sync_channel applies some backpressure
let (tx_thread, rx) = std::sync::mpsc::channel::<Vec<euler::Field>>();
let (tx, rx_thread) = std::sync::mpsc::sync_channel::<(u64, Vec<euler::Field>)>(3);
let thread = std::thread::Builder::new()
.name("multigrid_output".to_owned())
.spawn(move || {
let mut times = Vec::<u64>::new();
for (ntime, fields) in rx_thread.iter() {
if !times.contains(&ntime) {
file.add_timestep(ntime, fields.as_slice()).unwrap();
times.push(ntime);
}
tx_thread.send(fields).unwrap();
}
})
.unwrap();
Self {
tx: Some(tx),
rx: Some(rx),
thread: Some(thread),
}
}
pub fn add_timestep(&mut self, ntime: u64, fields: &[euler::Field]) {
match self.rx.as_ref().unwrap().try_recv() {
Ok(mut copy_fields) => {
for (from, to) in fields.iter().zip(copy_fields.iter_mut()) {
to.clone_from(&from);
}
self.tx
.as_ref()
.unwrap()
.send((ntime, copy_fields))
.unwrap();
}
Err(std::sync::mpsc::TryRecvError::Empty) => {
let fields = fields.to_vec();
self.tx.as_ref().unwrap().send((ntime, fields)).unwrap();
}
Err(e) => panic!("{:?}", e),
};
}
}
impl Drop for OutputThread {
fn drop(&mut self) {
let tx = self.tx.take();
std::mem::drop(tx);
let thread = self.thread.take().unwrap();
thread.join().unwrap();
}
}
#[derive(Debug, Clone)]
pub struct File(hdf5::File, Vec<String>);
impl File {
pub fn create<P: AsRef<std::path::Path>>(
path: P,
grids: &[sbp::grid::Grid],
names: Vec<String>,
) -> Result<Self, Box<dyn std::error::Error>> {
assert_eq!(grids.len(), names.len());
let file = hdf5::File::create(path.as_ref())?;
let _tds = file
.new_dataset::<u64>()
.resizable(true)
.chunk((1,))
.create("t", (0,))?;
for (name, grid) in names.iter().zip(grids.iter()) {
let g = file.create_group(name)?;
g.link_soft("/t", "t").unwrap();
let add_dim = |name| {
g.new_dataset::<Float>()
.chunk((grid.ny(), grid.nx()))
.gzip(9)
.create(name, (grid.ny(), grid.nx()))
};
let xds = add_dim("x")?;
xds.write(grid.x())?;
let yds = add_dim("y")?;
yds.write(grid.y())?;
let add_var = |name| {
g.new_dataset::<Float>()
.gzip(3)
.shuffle(true)
.chunk((1, grid.ny(), grid.nx()))
.resizable(true)
.create(name, (0, grid.ny(), grid.nx()))
};
add_var("rho")?;
add_var("rhou")?;
add_var("rhov")?;
add_var("e")?;
}
Ok(Self(file, names))
}
pub fn add_timestep(
&self,
t: u64,
fields: &[euler::Field],
) -> Result<(), Box<dyn std::error::Error>> {
let file = &self.0;
let tds = file.dataset("t")?;
let tpos = tds.size();
tds.resize((tpos + 1,))?;
tds.write_slice(&[t], ndarray::s![tpos..tpos + 1])?;
for (groupname, fnow) in self.1.iter().zip(fields.iter()) {
let g = file.group(groupname)?;
let (tpos, ny, nx) = {
let ds = g.dataset("rho")?;
let shape = ds.shape();
(shape[0], shape[1], shape[2])
};
let rhods = g.dataset("rho")?;
let rhouds = g.dataset("rhou")?;
let rhovds = g.dataset("rhov")?;
let eds = g.dataset("e")?;
let (rho, rhou, rhov, e) = fnow.components();
rhods.resize((tpos + 1, ny, nx))?;
rhods.write_slice(rho, ndarray::s![tpos, .., ..])?;
rhouds.resize((tpos + 1, ny, nx))?;
rhouds.write_slice(rhou, ndarray::s![tpos, .., ..])?;
rhovds.resize((tpos + 1, ny, nx))?;
rhovds.write_slice(rhov, ndarray::s![tpos, .., ..])?;
eds.resize((tpos + 1, ny, nx))?;
eds.write_slice(e, ndarray::s![tpos, .., ..])?;
}
Ok(())
}
}

View File

@ -1,225 +1,17 @@
use argh::FromArgs;
use rayon::prelude::*;
use euler::eval::Evaluator;
use sbp::operators::SbpOperator2d;
use sbp::*;
mod file;
mod eval;
mod input;
mod parsing;
use file::*;
mod eval;
struct System {
fnow: Vec<euler::Field>,
fnext: Vec<euler::Field>,
wb: Vec<euler::WorkBuffers>,
k: [Vec<euler::Diff>; 4],
grids: Vec<grid::Grid>,
metrics: Vec<grid::Metrics>,
bt: Vec<euler::BoundaryCharacteristics>,
eb: Vec<euler::BoundaryStorage>,
time: Float,
operators: Vec<Box<dyn SbpOperator2d>>,
}
use std::sync::atomic::{AtomicBool, Ordering};
pub(crate) static MULTITHREAD: AtomicBool = AtomicBool::new(false);
impl integrate::Integrable for System {
type State = Vec<euler::Field>;
type Diff = Vec<euler::Diff>;
fn scaled_add(s: &mut Self::State, o: &Self::Diff, scale: Float) {
if MULTITHREAD.load(Ordering::Acquire) {
s.par_iter_mut()
.zip(o.par_iter())
.for_each(|(s, o)| euler::Field::scaled_add(s, o, scale))
} else {
s.iter_mut()
.zip(o.iter())
.for_each(|(s, o)| euler::Field::scaled_add(s, o, scale))
}
}
}
impl System {
fn new(
grids: Vec<grid::Grid>,
bt: Vec<euler::BoundaryCharacteristics>,
operators: Vec<Box<dyn SbpOperator2d>>,
) -> Self {
let fnow = grids
.iter()
.map(|g| euler::Field::new(g.ny(), g.nx()))
.collect::<Vec<_>>();
let fnext = fnow.clone();
let wb = grids
.iter()
.map(|g| euler::WorkBuffers::new(g.ny(), g.nx()))
.collect();
let k = grids
.iter()
.map(|g| euler::Diff::zeros((g.ny(), g.nx())))
.collect::<Vec<_>>();
let k = [k.clone(), k.clone(), k.clone(), k];
let metrics = grids
.iter()
.zip(&operators)
.map(|(g, op)| g.metrics(&**op).unwrap())
.collect::<Vec<_>>();
let eb = bt
.iter()
.zip(&grids)
.map(|(bt, grid)| euler::BoundaryStorage::new(bt, grid))
.collect();
Self {
fnow,
fnext,
k,
wb,
grids,
metrics,
bt,
eb,
time: 0.0,
operators,
}
}
fn vortex(&mut self, t: Float, vortex_params: &euler::VortexParameters) {
for (f, g) in self.fnow.iter_mut().zip(&self.grids) {
f.vortex(g.x(), g.y(), t, &vortex_params);
}
}
fn advance(&mut self, dt: Float) {
let metrics = &self.metrics;
let grids = &self.grids;
let bt = &self.bt;
let wb = &mut self.wb;
let eb = &mut self.eb;
let operators = &self.operators;
let rhs = move |fut: &mut Vec<euler::Diff>, prev: &Vec<euler::Field>, time: Float| {
let prev_all = &prev;
if MULTITHREAD.load(Ordering::Acquire) {
rayon::scope(|s| {
for (((((((fut, prev), wb), grid), metrics), op), bt), eb) in fut
.iter_mut()
.zip(prev.iter())
.zip(wb.iter_mut())
.zip(grids)
.zip(metrics.iter())
.zip(operators.iter())
.zip(bt.iter())
.zip(eb.iter_mut())
{
s.spawn(move |_| {
let bc = euler::boundary_extracts(prev_all, bt, prev, grid, eb, time);
if op.upwind().is_some() {
euler::RHS_upwind(&**op, fut, prev, metrics, &bc, &mut wb.0);
} else {
euler::RHS_trad(&**op, fut, prev, metrics, &bc, &mut wb.0);
}
})
}
});
} else {
for (((((((fut, prev), wb), grid), metrics), op), bt), eb) in fut
.iter_mut()
.zip(prev.iter())
.zip(wb.iter_mut())
.zip(grids)
.zip(metrics.iter())
.zip(operators.iter())
.zip(bt.iter())
.zip(eb.iter_mut())
{
let bc = euler::boundary_extracts(prev_all, bt, prev, grid, eb, time);
if op.upwind().is_some() {
euler::RHS_upwind(&**op, fut, prev, metrics, &bc, &mut wb.0);
} else {
euler::RHS_trad(&**op, fut, prev, metrics, &bc, &mut wb.0);
}
}
}
};
integrate::integrate::<integrate::Rk4, System, _>(
rhs,
&self.fnow,
&mut self.fnext,
&mut self.time,
dt,
&mut self.k,
);
std::mem::swap(&mut self.fnow, &mut self.fnext);
}
/// Suggested maximum dt for this problem
fn max_dt(&self) -> Float {
let is_h2 = self
.operators
.iter()
.any(|op| op.is_h2xi() || op.is_h2eta());
let c_max = if is_h2 { 0.5 } else { 1.0 };
let mut max_dt: Float = Float::INFINITY;
for (field, metrics) in self.fnow.iter().zip(self.metrics.iter()) {
let nx = field.nx();
let ny = field.ny();
let rho = field.rho();
let rhou = field.rhou();
let rhov = field.rhov();
let mut max_u: Float = 0.0;
let mut max_v: Float = 0.0;
for ((((((rho, rhou), rhov), detj_dxi_dx), detj_dxi_dy), detj_deta_dx), detj_deta_dy) in
rho.iter()
.zip(rhou.iter())
.zip(rhov.iter())
.zip(metrics.detj_dxi_dx())
.zip(metrics.detj_dxi_dy())
.zip(metrics.detj_deta_dx())
.zip(metrics.detj_deta_dy())
{
let u = rhou / rho;
let v = rhov / rho;
let uhat: Float = detj_dxi_dx * u + detj_dxi_dy * v;
let vhat: Float = detj_deta_dx * u + detj_deta_dy * v;
max_u = max_u.max(uhat.abs());
max_v = max_v.max(vhat.abs());
}
let dx = 1.0 / nx as Float;
let dy = 1.0 / ny as Float;
let c_dt = Float::max(max_u / dx, max_v / dy);
max_dt = Float::min(max_dt, c_max / c_dt);
}
max_dt
}
}
mod system;
#[derive(Debug, FromArgs)]
/// Options for configuring and running the solver
struct CliOptions {
#[argh(positional)]
json: std::path::PathBuf,
/// number of simultaneous threads
#[argh(option, short = 'j')]
jobs: Option<usize>,
/// name of output file
#[argh(
option,
@ -243,6 +35,9 @@ struct CliOptions {
/// in json format
#[argh(switch)]
output_json: bool,
/// distribute the computation on multiple threads
#[argh(switch)]
distribute: bool,
}
#[derive(Default, serde::Serialize)]
@ -272,116 +67,87 @@ fn main() {
return;
}
};
let parsing::RuntimeConfiguration {
names,
grids,
grid_connections,
boundary_conditions,
op: operators,
integration_time,
initial_conditions,
boundary_conditions: _,
} = config.into_runtime();
let mut sys = System::new(grids, grid_connections, operators);
match &initial_conditions {
/*
parsing::InitialConditions::File(f) => {
for grid in &sys.grids {
// Copy initial conditions from file, requires name of field
todo!()
}
}
*/
parsing::InitialConditions::Vortex(vortexparams) => sys.vortex(0.0, &vortexparams),
parsing::InitialConditions::Expressions(expr) => {
let t = 0.0;
for (grid, field) in sys.grids.iter().zip(sys.fnow.iter_mut()) {
// Evaluate the expressions on all variables
let x = grid.x();
let y = grid.y();
let (rho, rhou, rhov, e) = field.components_mut();
(*expr).evaluate(t, x, y, rho, rhou, rhov, e);
}
}
}
let basesystem = system::BaseSystem::new(
names,
grids,
0.0,
operators,
boundary_conditions,
initial_conditions,
opt.output.clone(),
);
let dt = sys.max_dt();
let ntime = (integration_time / dt).round() as u64;
{
let nthreads = opt.jobs.unwrap_or(1);
if nthreads > 1 {
MULTITHREAD.store(true, Ordering::Release);
rayon::ThreadPoolBuilder::new()
.num_threads(nthreads)
.build_global()
.unwrap();
}
}
let should_output = |itime| {
opt.number_of_outputs.map_or(false, |num_out| {
if num_out == 0 {
false
} else {
itime % (std::cmp::max(ntime / (num_out - 1), 1)) == 0
}
})
let mut sys = if opt.distribute {
basesystem.create_distributed()
} else {
basesystem.create()
};
let output = File::create(&opt.output, sys.grids.as_slice(), names).unwrap();
let mut output = OutputThread::new(output);
output.add_timestep(0, &sys.fnow);
let dt = sys.max_dt();
sys.set_dt(dt);
let ntime = (integration_time / dt).round() as u64;
let steps_between_outputs = if let Some(n) = opt.number_of_outputs {
std::cmp::max(n / ntime, 1)
} else {
ntime
};
let progressbar = progressbar(opt.no_progressbar, ntime);
sys.output(0);
if !opt.no_progressbar {
sys.add_progressbar(ntime)
}
let timer = if opt.timings {
if let system::System::MultiThreaded(sys) = &sys {
sys.synchronise()
}
Some(std::time::Instant::now())
} else {
None
};
for itime in 0..ntime {
if should_output(itime) {
output.add_timestep(itime, &sys.fnow);
let mut itime = 0;
while itime < ntime {
let nexttime = (itime + steps_between_outputs).max(ntime);
sys.advance(nexttime - itime);
itime = nexttime;
if itime != ntime {
sys.output(itime);
}
progressbar.inc(1);
sys.advance(dt);
}
progressbar.finish_and_clear();
let timer = timer.map(|timer| {
if let system::System::MultiThreaded(sys) = &sys {
sys.synchronise();
}
timer.elapsed()
});
sys.output(ntime);
let mut outinfo = OutputInformation {
filename: opt.output,
time_elapsed: timer,
..Default::default()
};
if let Some(timer) = timer {
let duration = timer.elapsed();
outinfo.time_elapsed = Some(duration);
if !opt.no_progressbar {
sys.finish_progressbar();
}
output.add_timestep(ntime, &sys.fnow);
if opt.error {
let time = ntime as Float * dt;
let mut e = 0.0;
for ((fmod, grid), op) in sys.fnow.iter().zip(&sys.grids).zip(&sys.operators) {
let mut fvort = fmod.clone();
match &initial_conditions {
parsing::InitialConditions::Vortex(vortexparams) => {
fvort.vortex(grid.x(), grid.y(), time, &vortexparams);
}
parsing::InitialConditions::Expressions(expr) => {
let (rho, rhou, rhov, e) = fvort.components_mut();
expr.as_ref()
.evaluate(time, grid.x(), grid.y(), rho, rhou, rhov, e)
}
}
e += fmod.h2_err(&fvort, &**op);
}
outinfo.error = Some(e);
outinfo.error = Some(sys.error())
}
if opt.output_json {
@ -396,14 +162,10 @@ fn main() {
}
}
fn progressbar(dummy: bool, ntime: u64) -> indicatif::ProgressBar {
if dummy {
indicatif::ProgressBar::hidden()
} else {
let progressbar = indicatif::ProgressBar::new(ntime);
progressbar.with_style(
indicatif::ProgressStyle::default_bar()
.template("{wide_bar:.cyan/blue} {pos}/{len} ({eta})"),
)
}
fn progressbar(ntime: u64) -> indicatif::ProgressBar {
let progressbar = indicatif::ProgressBar::new(ntime);
progressbar.with_style(
indicatif::ProgressStyle::default_bar()
.template("{wide_bar:.cyan/blue} {pos}/{len} ({eta})"),
)
}

View File

@ -27,7 +27,7 @@ pub enum InitialConditions {
}
#[derive(Clone, Debug)]
pub enum BoundaryConditions {
enum BoundaryConditions {
Vortex(euler::VortexParameters),
Expressions(std::sync::Arc<eval::Evaluator>),
NotNeeded,
@ -36,11 +36,10 @@ pub enum BoundaryConditions {
pub struct RuntimeConfiguration {
pub names: Vec<String>,
pub grids: Vec<sbp::grid::Grid>,
pub grid_connections: Vec<euler::BoundaryCharacteristics>,
pub boundary_conditions: Vec<euler::BoundaryCharacteristics>,
pub op: Vec<Box<dyn SbpOperator2d>>,
pub integration_time: Float,
pub initial_conditions: InitialConditions,
pub boundary_conditions: BoundaryConditions,
}
impl input::Configuration {
@ -165,11 +164,10 @@ impl input::Configuration {
Box::new((matcher(eta), matcher(xi))) as Box<dyn SbpOperator2d>
})
.collect();
let grid_connections = self
let boundary_conditions = self
.grids
.iter()
.enumerate()
.map(|(i, (name, g))| {
.map(|(name, g)| {
let default_bc = default.boundary_conditions.clone().unwrap_or_default();
use input::BoundaryType;
g.boundary_conditions
@ -190,7 +188,7 @@ impl input::Configuration {
name
),
},
Some(BoundaryType::This) => euler::BoundaryCharacteristic::Grid(i),
Some(BoundaryType::This) => euler::BoundaryCharacteristic::This,
Some(BoundaryType::Vortex) => euler::BoundaryCharacteristic::Vortex(
if let BoundaryConditions::Vortex(vortex) = &boundary_conditions {
vortex.clone()
@ -227,11 +225,10 @@ impl input::Configuration {
RuntimeConfiguration {
names,
grids,
grid_connections,
boundary_conditions,
op,
integration_time: self.integration_time,
initial_conditions,
boundary_conditions,
}
}
}

1192
multigrid/src/system.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -12,7 +12,7 @@ serde = { version = "1.0.115", optional = true, default-features = false, featur
num-traits = "0.2.14"
float = { path = "../utils/float" }
constmatrix = { path = "../utils/constmatrix" }
core_simd = { git = "https://github.com/rust-lang/stdsimd" }
core_simd = { git = "https://github.com/rust-lang/portable-simd" }
[features]
# Use f32 as precision, default is f64

View File

@ -281,6 +281,7 @@ pub(crate) fn diff_op_2d_fallback<const M: usize, const N: usize, const D: usize
#[inline(always)]
/// 2D diff when first axis is contiguous
#[allow(unused)]
#[allow(clippy::assign_op_pattern)]
pub(crate) fn diff_op_2d_sliceable_y<const M: usize, const N: usize, const D: usize>(
matrix: &BlockMatrix<Float, M, N, D>,
optype: OperatorType,
@ -392,7 +393,6 @@ pub(crate) fn diff_op_2d_sliceable_y_simd<const M: usize, const N: usize, const
};
let idx = 1.0 / dx;
use core_simd::Vector;
#[cfg(not(feature = "f32"))]
type SimdT = core_simd::f64x8;
#[cfg(feature = "f32")]

View File

@ -53,6 +53,28 @@ impl<T> Direction<T> {
east: (self.east, other.east),
}
}
/// Flips all direction through origo
pub fn opposite(self) -> Self {
Self {
north: self.south,
south: self.north,
east: self.west,
west: self.east,
}
}
pub fn splat(t: T) -> Self
where
T: Copy,
{
Self {
north: t,
south: t,
east: t,
west: t,
}
}
}
impl<T> Direction<Option<T>> {
@ -96,6 +118,15 @@ impl<T> Direction<T> {
}
}
impl Direction<bool> {
pub fn all(&self) -> bool {
self.north && self.south && self.east && self.west
}
pub fn any(&self) -> bool {
self.north || self.south || self.east || self.west
}
}
/// Linearly spaced parameters, apart from the boundaries which
/// only have a distance of `h/2` from the boundary
pub fn h2linspace(start: Float, end: Float, n: usize) -> ndarray::Array1<Float> {

View File

@ -215,7 +215,7 @@ pub fn integrate<BTableau: ButcherTableau, F: Integrable, RHS>(
}
};
rhs(&mut k[i], &fut, simtime);
rhs(&mut k[i], fut, simtime);
}
}