IPPL (Independent Parallel Particle Layer)
IPPL
Loading...
Searching...
No Matches
BumponTailInstabilityManager.h
Go to the documentation of this file.
1#ifndef IPPL_BUMPON_TAIL_INSTABILITY_MANAGER_H
2#define IPPL_BUMPON_TAIL_INSTABILITY_MANAGER_H
3
4#include <memory>
5
6#include "AlpineManager.h"
7#include "FieldContainer.hpp"
8#include "FieldSolver.hpp"
9#include "LoadBalancer.hpp"
10#include "Manager/BaseManager.h"
11#include "ParticleContainer.hpp"
12#include "Random/Distribution.h"
15#include "Random/Randn.h"
17
19
20constexpr bool EnablePhaseDump = false;
21
22// define functions used in sampling particles
24 struct CDF {
25 KOKKOS_INLINE_FUNCTION double operator()(double x, unsigned int d,
26 const double* params_p) const {
27 if (d == Dim - 1)
28 return x
29 + (params_p[d * 2 + 0] / params_p[d * 2 + 1])
30 * Kokkos::sin(params_p[d * 2 + 1] * x);
31 else
33 }
34 };
35
36 struct PDF {
37 KOKKOS_INLINE_FUNCTION double operator()(double x, unsigned int d,
38 double const* params_p) const {
39 if (d == Dim - 1)
40 return (1.0 + params_p[d * 2 + 0] * Kokkos::cos(params_p[d * 2 + 1] * x));
41 else
43 }
44 };
45
46 struct Estimate {
47 KOKKOS_INLINE_FUNCTION double operator()(double u, unsigned int d,
48 double const* params_p) const {
49 return u + params_p[d] * 0.;
50 }
51 };
52};
53
54template <typename T, unsigned Dim>
56public:
61
62 BumponTailInstabilityManager(size_type totalP_, int nt_, Vector_t<int, Dim>& nr_, double lbt_,
63 std::string& solver_, std::string& stepMethod_)
64 : AlpineManager<T, Dim>(totalP_, nt_, nr_, lbt_, solver_, stepMethod_) {
65 phase_m = std::make_shared<PhaseDump>();
66 }
67
69
70 struct PhaseDump;
71
72private:
73 std::shared_ptr<PhaseDump> phase_m;
74
75private:
76 double sigma_m;
77 double muBulk_m;
78 double muBeam_m;
79 double epsilon_m;
80 double delta_m;
81
82public:
83 void pre_run() override {
84 Inform m("Pre Run");
85
86 const double pi = Kokkos::numbers::pi_v<T>;
87
88 if (this->solver_m == "OPEN") {
89 throw IpplException("BumpOnTailInstability",
90 "Open boundaries solver incompatible with this simulation!");
91 }
92
93 for (unsigned i = 0; i < Dim; i++) {
94 this->domain_m[i] = ippl::Index(this->nr_m[i]);
95 }
96
97 this->decomp_m.fill(true);
98
99 if (std::strcmp(TestName, "TwoStreamInstability") == 0) {
100 // Parameters for two stream instability as in
101 // https://www.frontiersin.org/articles/10.3389/fphy.2018.00105/full
102 this->kw_m = 0.5;
103 sigma_m = 0.1;
104 epsilon_m = 0.5;
105 muBulk_m = -pi / 2.0;
106 muBeam_m = pi / 2.0;
107 delta_m = 0.01;
108 } else if (std::strcmp(TestName, "BumponTailInstability") == 0) {
109 this->kw_m = 0.21;
110 sigma_m = 1.0 / std::sqrt(2.0);
111 epsilon_m = 0.1;
112 muBulk_m = 0.0;
113 muBeam_m = 4.0;
114 delta_m = 0.01;
115 } else {
116 // Default value is two stream instability
117 this->kw_m = 0.5;
118 sigma_m = 0.1;
119 epsilon_m = 0.5;
120 muBulk_m = -pi / 2.0;
121 muBeam_m = pi / 2.0;
122 delta_m = 0.01;
123 }
124
125 this->rmin_m(0.0);
126 this->rmax_m = 2 * pi / this->kw_m;
127 this->hr_m = this->rmax_m / this->nr_m;
128 // Q = -\int\int f dx dv
129 this->Q_m =
130 std::reduce(this->rmax_m.begin(), this->rmax_m.end(), -1., std::multiplies<double>());
131 this->origin_m = this->rmin_m;
132 this->dt_m = std::min(.05, 0.5 * *std::min_element(this->hr_m.begin(), this->hr_m.end()));
133 this->it_m = 0;
134 this->time_m = 0.0;
135
136 m << "Discretization:" << endl
137 << "nt " << this->nt_m << " Np= " << this->totalP_m << " grid = " << this->nr_m << endl;
138
139 this->isAllPeriodic_m = true;
140
141 this->setFieldContainer(std::make_shared<FieldContainer_t>(
142 this->hr_m, this->rmin_m, this->rmax_m, this->decomp_m, this->domain_m, this->origin_m,
143 this->isAllPeriodic_m));
144
145 this->setParticleContainer(std::make_shared<ParticleContainer_t>(
146 this->fcontainer_m->getMesh(), this->fcontainer_m->getFL()));
147
148 this->fcontainer_m->initializeFields(this->solver_m);
149
150 this->setFieldSolver(std::make_shared<FieldSolver_t>(
151 this->solver_m, &this->fcontainer_m->getRho(), &this->fcontainer_m->getE(),
152 &this->fcontainer_m->getPhi()));
153
154 this->fsolver_m->initSolver();
155
156 this->setLoadBalancer(std::make_shared<LoadBalancer_t>(
157 this->lbt_m, this->fcontainer_m, this->pcontainer_m, this->fsolver_m));
158
160
161 if constexpr (EnablePhaseDump) {
162 if (ippl::Comm->size() != 1) {
163 m << "Phase dump only supported on one rank" << endl;
164 ippl::Comm->abort();
165 }
166 phase_m->initialize(*std::max_element(this->nr_m.begin(), this->nr_m.end()),
167 *std::max_element(this->rmax_m.begin(), this->rmax_m.end()));
168 }
169
170 static IpplTimings::TimerRef DummySolveTimer = IpplTimings::getTimer("solveWarmup");
171 IpplTimings::startTimer(DummySolveTimer);
172
173 this->fcontainer_m->getRho() = 0.0;
174
175 this->fsolver_m->runSolver();
176
177 IpplTimings::stopTimer(DummySolveTimer);
178
179 this->par2grid();
180
181 static IpplTimings::TimerRef SolveTimer = IpplTimings::getTimer("solve");
182 IpplTimings::startTimer(SolveTimer);
183
184 this->fsolver_m->runSolver();
185
186 IpplTimings::stopTimer(SolveTimer);
187
188 this->grid2par();
189
190 this->dump();
191
192 m << "Done";
193 }
194
196 Inform m("Initialize Particles");
197
198 auto* mesh = &this->fcontainer_m->getMesh();
199 auto* FL = &this->fcontainer_m->getFL();
200 using DistR_t =
202 double parR[2 * Dim];
203 for (unsigned int i = 0; i < Dim; i++) {
204 parR[i * 2] = this->delta_m;
205 parR[i * 2 + 1] = this->kw_m[i];
206 }
207 DistR_t distR(parR);
208
209 Vector_t<double, Dim> hr = this->hr_m;
210 Vector_t<double, Dim> origin = this->origin_m;
211 static IpplTimings::TimerRef domainDecomposition = IpplTimings::getTimer("loadBalance");
212 if ((this->lbt_m != 1.0) && (ippl::Comm->size() > 1)) {
213 m << "Starting first repartition" << endl;
214 IpplTimings::startTimer(domainDecomposition);
215 this->isFirstRepartition_m = true;
216 const ippl::NDIndex<Dim>& lDom = FL->getLocalNDIndex();
217 const int nghost = this->fcontainer_m->getRho().getNghost();
218 auto rhoview = this->fcontainer_m->getRho().getView();
219
220 using index_array_type = typename ippl::RangePolicy<Dim>::index_array_type;
222 "Assign initial rho based on PDF",
223 this->fcontainer_m->getRho().getFieldRangePolicy(),
224 KOKKOS_LAMBDA(const index_array_type& args) {
225 // local to global index conversion
226 Vector_t<double, Dim> xvec = (args + lDom.first() - nghost + 0.5) * hr + origin;
227
228 // ippl::apply accesses the view at the given indices and obtains a
229 // reference; see src/Expression/IpplOperations.h
230 ippl::apply(rhoview, args) = distR.getFullPdf(xvec);
231 });
232
233 Kokkos::fence();
234
235 this->loadbalancer_m->initializeORB(FL, mesh);
236 this->loadbalancer_m->repartition(FL, mesh, this->isFirstRepartition_m);
237 IpplTimings::stopTimer(domainDecomposition);
238 }
239
240 static IpplTimings::TimerRef particleCreation = IpplTimings::getTimer("particlesCreation");
241 IpplTimings::startTimer(particleCreation);
242
245
246 size_type totalP = this->totalP_m;
247 int seed = 42;
248 Kokkos::Random_XorShift64_Pool<> rand_pool64((size_type)(seed + 100 * ippl::Comm->rank()));
249
250 using samplingR_t =
251 ippl::random::InverseTransformSampling<double, Dim, Kokkos::DefaultExecutionSpace,
252 DistR_t>;
253
254 Vector_t<double, Dim> rmin = this->rmin_m;
255 Vector_t<double, Dim> rmax = this->rmax_m;
256 samplingR_t samplingR(distR, rmax, rmin, rlayout, totalP);
257 size_type nlocal = samplingR.getLocalSamplesNum();
258
259 double factorVelBulk = 1.0 - epsilon_m;
260 double factorVelBeam = 1.0 - factorVelBulk;
261 size_type nlocBulk = (size_type)(factorVelBulk * nlocal);
262 size_type nlocBeam = (size_type)(factorVelBeam * nlocal);
263 nlocal = nlocBulk + nlocBeam;
264
265 int rank = ippl::Comm->rank();
266 size_type nglobal = 0;
267 MPI_Allreduce(&nlocal, &nglobal, 1, MPI_UNSIGNED_LONG, MPI_SUM,
268 ippl::Comm->getCommunicator());
269 int rest = (int)(totalP - nglobal);
270 if (rank < rest) {
271 ++nlocal;
272 }
273
274 this->pcontainer_m->create(nlocal);
275
276 view_type* R = &(this->pcontainer_m->R.getView());
277 samplingR.generate(*R, rand_pool64);
278
279 view_type* P = &(this->pcontainer_m->P.getView());
280
281 double mu[Dim];
282 double sd[Dim];
283 for (unsigned int i = 0; i < Dim; i++) {
284 mu[i] = 0.0;
285 sd[i] = sigma_m;
286 }
287 // sample first nlocBulk with muBulk as mean velocity
288 mu[Dim - 1] = muBulk_m;
289 Kokkos::parallel_for(Kokkos::RangePolicy<int>(0, nlocBulk),
290 ippl::random::randn<double, Dim>(*P, rand_pool64, mu, sd));
291
292 // sample remaining with muBeam as mean velocity
293 mu[Dim - 1] = muBeam_m;
294 Kokkos::parallel_for(Kokkos::RangePolicy<int>(nlocBulk, nlocal),
295 ippl::random::randn<double, Dim>(*P, rand_pool64, mu, sd));
296
297 Kokkos::fence();
298 ippl::Comm->barrier();
299
300 IpplTimings::stopTimer(particleCreation);
301
302 this->pcontainer_m->q = this->Q_m / totalP;
303 m << "particles created and initial conditions assigned " << endl;
304 }
305
306 void advance() override {
307 if (this->stepMethod_m == "LeapFrog") {
308 LeapFrogStep();
309 } else {
310 throw IpplException(TestName, "Step method is not set/recognized!");
311 }
312 }
313
315 // LeapFrog time stepping https://en.wikipedia.org/wiki/Leapfrog_integration
316 // Here, we assume a constant charge-to-mass ratio of -1 for
317 // all the particles hence eliminating the need to store mass as
318 // an attribute
319 static IpplTimings::TimerRef PTimer = IpplTimings::getTimer("pushVelocity");
320 static IpplTimings::TimerRef RTimer = IpplTimings::getTimer("pushPosition");
321 static IpplTimings::TimerRef updateTimer = IpplTimings::getTimer("update");
322 static IpplTimings::TimerRef domainDecomposition = IpplTimings::getTimer("loadBalance");
323 static IpplTimings::TimerRef SolveTimer = IpplTimings::getTimer("solve");
324
325 double dt = this->dt_m;
326 std::shared_ptr<ParticleContainer_t> pc = this->pcontainer_m;
327 std::shared_ptr<FieldContainer_t> fc = this->fcontainer_m;
328
330 pc->P = pc->P - 0.5 * dt * pc->E;
332
333 // drift
335 pc->R = pc->R + dt * pc->P;
337
338 // Since the particles have moved spatially update them to correct processors
339 IpplTimings::startTimer(updateTimer);
340 pc->update();
341 IpplTimings::stopTimer(updateTimer);
342
343 size_type totalP = this->totalP_m;
344 int it = this->it_m;
345 bool isFirstRepartition = false;
346 if (this->loadbalancer_m->balance(totalP, it + 1)) {
347 IpplTimings::startTimer(domainDecomposition);
348 auto* mesh = &fc->getRho().get_mesh();
349 auto* FL = &fc->getFL();
350 this->loadbalancer_m->repartition(FL, mesh, isFirstRepartition);
351 IpplTimings::stopTimer(domainDecomposition);
352 }
353
354 // scatter the charge onto the underlying grid
355 this->par2grid();
356
357 // Field solve
358 IpplTimings::startTimer(SolveTimer);
359 this->fsolver_m->runSolver();
360 IpplTimings::stopTimer(SolveTimer);
361
362 // gather E field
363 this->grid2par();
364
365 // kick
367 pc->P = pc->P - 0.5 * dt * pc->E;
369 }
370
371 struct PhaseDump {
372 void initialize(size_t nr_, double domain_) {
373 ippl::Index I(nr_);
374 ippl::NDIndex<2> owned(I, I);
375 layout = FieldLayout_t<2>(MPI_COMM_WORLD, owned, isParallel);
376
377 Vector_t<double, 2> hx = {domain_ / nr_, 16. / nr_};
378 Vector_t<double, 2> orgn{0, -8};
379
380 mesh = Mesh_t<2>(owned, hx, orgn);
381 phaseSpace.initialize(mesh, layout);
382 if (ippl::Comm->rank() == 0) {
383 phaseSpaceBuf.initialize(mesh, layout);
384 }
385 std::cout << ippl::Comm->rank() << ": " << phaseSpace.getOwned() << std::endl;
386 }
387
388 void dump(int it_, std::shared_ptr<ParticleContainer_t> pc, bool allDims = false) {
389 const auto pcount = pc->getLocalNum();
390 phase.realloc(pcount);
391 auto& Ri = pc->R;
392 auto& Pi = pc->P;
393 for (unsigned d = allDims ? 0 : Dim - 1; d < Dim; d++) {
394 Kokkos::parallel_for(
395 "Copy phase space", pcount, KOKKOS_CLASS_LAMBDA(const size_t i) {
396 phase(i) = {Ri(i)[d], Pi(i)[d]};
397 });
398 phaseSpace = 0;
399 Kokkos::fence();
400 scatter(pc->q, phaseSpace, phase);
401 auto& view = phaseSpace.getView();
402 MPI_Reduce(view.data(), phaseSpaceBuf.getView().data(), view.size(), MPI_DOUBLE,
403 MPI_SUM, 0, ippl::Comm->getCommunicator());
404 if (ippl::Comm->rank() == 0) {
405 std::stringstream fname;
406 fname << "PhaseSpace_t=" << it_ << "_d=" << d << ".csv";
407
408 Inform out("Phase Dump", fname.str().c_str(), Inform::OVERWRITE, 0);
409 phaseSpaceBuf.write(out);
410
411 auto max = phaseSpaceBuf.max();
412 auto min = phaseSpaceBuf.min();
413 if (max > maxValue) {
414 maxValue = max;
415 }
416 if (min < minValue) {
417 minValue = min;
418 }
419 }
420 ippl::Comm->barrier();
421 }
422 MPI_Bcast(&maxValue, 1, MPI_DOUBLE, 0, ippl::Comm->getCommunicator());
423 MPI_Bcast(&minValue, 1, MPI_DOUBLE, 0, ippl::Comm->getCommunicator());
424 }
425
426 double maxRecorded() const { return maxValue; }
427 double minRecorded() const { return minValue; }
428
429 private:
430 std::array<bool, 2> isParallel = {false, false};
435
436 double maxValue = 0, minValue = 0;
437 };
438
439 void dump() override {
440 static IpplTimings::TimerRef dumpDataTimer = IpplTimings::getTimer("dumpData");
441 IpplTimings::startTimer(dumpDataTimer);
442
443 dumpBumponTailInstability(this->fcontainer_m->getE().getView());
444 if constexpr (EnablePhaseDump) {
445 phase_m->dump(this->it_m, this->pcontainer_m);
446 }
447
448 IpplTimings::stopTimer(dumpDataTimer);
449 }
450
451 template <typename View>
452 void dumpBumponTailInstability(const View& Eview) {
453 const int nghostE = this->fcontainer_m->getE().getNghost();
454 double fieldEnergy, EzAmp;
455
456 using index_array_type = typename ippl::RangePolicy<Dim>::index_array_type;
457 double temp = 0.0;
458
460 "Ex inner product", ippl::getRangePolicy(Eview, nghostE),
461 KOKKOS_LAMBDA(const index_array_type& args, double& valL) {
462 // ippl::apply accesses the view at the given indices and obtains a
463 // reference; see src/Expression/IpplOperations.h
464 double myVal = std::pow(ippl::apply(Eview, args)[Dim - 1], 2);
465 valL += myVal;
466 },
467 Kokkos::Sum<double>(temp));
468
469 double globaltemp = 0.0;
470 ippl::Comm->reduce(temp, globaltemp, 1, std::plus<double>());
471
472 fieldEnergy =
473 std::reduce(this->fcontainer_m->getHr().begin(), this->fcontainer_m->getHr().end(),
474 globaltemp, std::multiplies<double>());
475
476 double tempMax = 0.0;
478 "Ex max norm", ippl::getRangePolicy(Eview, nghostE),
479 KOKKOS_LAMBDA(const index_array_type& args, double& valL) {
480 // ippl::apply accesses the view at the given indices and obtains a
481 // reference; see src/Expression/IpplOperations.h
482 double myVal = std::fabs(ippl::apply(Eview, args)[Dim - 1]);
483 if (myVal > valL) {
484 valL = myVal;
485 }
486 },
487 Kokkos::Max<double>(tempMax));
488
489 EzAmp = 0.0;
490 ippl::Comm->reduce(tempMax, EzAmp, 1, std::greater<double>());
491
492 if (ippl::Comm->rank() == 0) {
493 std::stringstream fname;
494 fname << "data/FieldBumponTail_";
495 fname << ippl::Comm->size();
496 fname << "_manager";
497 fname << ".csv";
498
499 Inform csvout(NULL, fname.str().c_str(), Inform::APPEND);
500 csvout.precision(16);
501 csvout.setf(std::ios::scientific, std::ios::floatfield);
502
503 if (std::fabs(this->time_m) < 1e-14) {
504 csvout << "time, Ez_field_energy, Ez_max_norm" << endl;
505 }
506
507 csvout << this->time_m << " " << fieldEnergy << " " << EzAmp << endl;
508 }
509 ippl::Comm->barrier();
510 }
511};
512#endif
const double pi
typename ippl::detail::ViewType< ippl::Vector< double, Dim >, 1 >::view_type view_type
constexpr unsigned Dim
constexpr bool EnablePhaseDump
typename ippl::detail::ViewType< ippl::Vector< double, Dim >, 1 >::view_type view_type
ippl::detail::size_type size_type
Definition datatypes.h:23
const char * TestName
Field< double, Dim, ViewArgs... > Field_t
Definition datatypes.h:41
ippl::FieldLayout< Dim > FieldLayout_t
Definition datatypes.h:21
ippl::Vector< T, Dim > Vector_t
Definition datatypes.h:38
ippl::UniformCartesian< double, Dim > Mesh_t
Definition datatypes.h:12
Inform & endl(Inform &inf)
Definition Inform.cpp:42
KOKKOS_INLINE_FUNCTION constexpr decltype(auto) apply(const View &view, const Coords &coords)
RangePolicy< View::rank, typenameView::execution_space, PolicyArgs... >::policy_type getRangePolicy(const View &view, int shift=0)
void parallel_for(const std::string &name, const ExecPolicy &policy, const FunctorType &functor)
void parallel_reduce(const std::string &name, const ExecPolicy &policy, const FunctorType &functor, ReducerArgument &&... reducer)
std::unique_ptr< mpi::Communicator > Comm
Definition Ippl.h:22
KOKKOS_FUNCTION T uniform_cdf_func(T x)
KOKKOS_FUNCTION T uniform_pdf_func()
KOKKOS_INLINE_FUNCTION Vector< int, Dim > first() const
Definition NDIndex.hpp:170
void setParticleContainer(std::shared_ptr< ParticleContainer< T, Dim > > pcontainer)
Definition PicManager.h:55
The class that represents a distribution.
A class for inverse transform sampling.
Functor to generate random numbers from a normal distribution.
Definition Randn.h:26
@ APPEND
Definition Inform.h:45
@ OVERWRITE
Definition Inform.h:44
FmtFlags_t setf(FmtFlags_t setbits, FmtFlags_t field)
Definition Inform.h:101
int precision() const
Definition Inform.h:111
Timing::TimerRef TimerRef
static TimerRef getTimer(const char *nm)
static void stopTimer(TimerRef t)
static void startTimer(TimerRef t)
::ippl::Vector< index_type, Dim > index_array_type
Vector_t< double, Dim > kw_m
void par2grid() override
Particle-to-grid operation.
Vector_t< double, Dim > origin_m
ippl::NDIndex< Dim > domain_m
Vector_t< int, Dim > nr_m
Vector_t< double, Dim > hr_m
Vector_t< double, Dim > rmin_m
bool isFirstRepartition_m
std::string stepMethod_m
std::array< bool, Dim > decomp_m
std::string solver_m
size_type totalP_m
Vector_t< double, Dim > rmax_m
AlpineManager(size_type totalP_, int nt_, Vector_t< int, Dim > &nr_, double lbt_, std::string &solver_, std::string &stepMethod_, std::vector< std::string > preconditioner_params={})
void grid2par() override
Grid-to-particle operation.
KOKKOS_INLINE_FUNCTION double operator()(double x, unsigned int d, const double *params_p) const
KOKKOS_INLINE_FUNCTION double operator()(double x, unsigned int d, double const *params_p) const
KOKKOS_INLINE_FUNCTION double operator()(double u, unsigned int d, double const *params_p) const
void dumpBumponTailInstability(const View &Eview)
BumponTailInstabilityManager(size_type totalP_, int nt_, Vector_t< int, Dim > &nr_, double lbt_, std::string &solver_, std::string &stepMethod_)
void pre_run() override
A method that should be used for setting up the simulation.
void advance() override
A method that should be used to execute/advance a step of simulation.
std::shared_ptr< PhaseDump > phase_m
ParticleContainer< T, Dim > ParticleContainer_t
void dump(int it_, std::shared_ptr< ParticleContainer_t > pc, bool allDims=false)
ippl::ParticleAttrib< Vector_t< double, 2 > > phase