OPALX (Object Oriented Parallel Accelerator Library for Exascal) MINIorX
OPALX
PartBunch.cpp
Go to the documentation of this file.
2#include <boost/numeric/ublas/io.hpp>
3#include "Utilities/Util.h"
4
5#undef doDEBUG
6
7template <typename T, unsigned Dim>
8PartBunch<T, Dim>::PartBunch(double qi, double mi, size_t totalP, int nt, double lbt,
9 std::string integration_method, std::shared_ptr<Distribution> &OPALdistribution,
10 std::shared_ptr<FieldSolverCmd> &OPALFieldSolver)
11 : ippl::PicManager<T, Dim, ParticleContainer<T, Dim>, FieldContainer<T, Dim>, LoadBalancer<T, Dim>>(),
12 time_m(0.0),
13 totalP_m(totalP),
14 nt_m(nt),
15 lbt_m(lbt),
16 dt_m(0),
17 it_m(0),
18 integration_method_m(integration_method),
19 solver_m(""),
21 qi_m(qi),
22 mi_m(mi),
23 rmsDensity_m(0.0),
24 RefPartR_m(0.0),
25 RefPartP_m(0.0),
28 OPALdist_m(OPALdistribution),
29 OPALFieldSolver_m(OPALFieldSolver) {
30
31 static IpplTimings::TimerRef gatherInfoPartBunch = IpplTimings::getTimer("gatherInfoPartBunch");
32 IpplTimings::startTimer(gatherInfoPartBunch);
33
34 *gmsg << "PartBunch Constructor" << endl;
35
36 // get the needed information from OPAL FieldSolver command
37
39 OPALFieldSolver_m->getNX(), OPALFieldSolver_m->getNY(), OPALFieldSolver_m->getNZ());
40
41 const Vector_t<bool, 3> domainDecomposition = OPALFieldSolver_m->getDomDec();
42
43 for (unsigned i = 0; i < Dim; i++) {
44 this->domain_m[i] = ippl::Index(nr_m[i]);
45 this->decomp_m[i] = domainDecomposition[i];
46 }
47
48 bool isAllPeriodic = true; // \fixme need to get BCs from OPAL Fieldsolver
49
50 // set stuff for pre_run i.e. warmup
51 // this will be reset when the correct computational
52 // domain is set
53
54 Vector_t<double, Dim> length (6.0);
55 this->hr_m = length / this->nr_m;
56 this->origin_m = -3.0;
57 this->dt_m = 0.5 / this->nr_m[2];
58
60 rmax_m = origin_m + length;
61
62 this->setFieldContainer( std::make_shared<FieldContainer_t>(hr_m, rmin_m, rmax_m, decomp_m, domain_m, origin_m, isAllPeriodic) );
63
64 this->setParticleContainer(std::make_shared<ParticleContainer_t>(
65 this->fcontainer_m->getMesh(), this->fcontainer_m->getFL()));
66
67 IpplTimings::stopTimer(gatherInfoPartBunch);
68
69 // ---------------- binning setup ----------------
70 using bin_index_type = typename ParticleContainer_t::bin_index_type;
71 bin_index_type maxBins = Options::maxBins;
72 this->setBins(std::make_shared<AdaptBins_t>(
73 this->getParticleContainer(),
74 BinningSelector_t(2), // TODO: hardcode z axis with coordinate selector at axis index 2
75 static_cast<bin_index_type>(maxBins),
77 ));
78 this->getBins()->debug();
79
80 this->setTempEField(std::make_shared<VField_t<T, Dim>>(this->fcontainer_m->getE())); // user copy constructor
81 this->getTempEField()->initialize(this->fcontainer_m->getMesh(), this->fcontainer_m->getFL());
82 // -----------------------------------------------
83
84 static IpplTimings::TimerRef setSolverT = IpplTimings::getTimer("setSolver");
85 IpplTimings::startTimer(setSolverT);
86 setSolver(OPALFieldSolver_m->getType());
87 IpplTimings::stopTimer(setSolverT);
88
89 static IpplTimings::TimerRef prerun = IpplTimings::getTimer("prerun");
90 IpplTimings::startTimer(prerun);
91 pre_run();
92 IpplTimings::stopTimer(prerun);
93
94 globalPartPerNode_m = std::make_unique<size_t[]>(ippl::Comm->size());
95}
96
97template <typename T, unsigned Dim>
99 using Base = ippl::ParticleBase<ippl::ParticleSpatialLayout<T, Dim>>;
100 typename Base::particle_position_type* Ep = &this->pcontainer_m->E;
101 typename Base::particle_position_type* R = &this->pcontainer_m->R;
102 VField_t<T, Dim>* Ef = &this->fcontainer_m->getE();
103 gather(*Ep, *Ef, *R);
104}
105
106template <typename T, unsigned Dim>
109 std::shared_ptr<FieldContainer_t> fc = this->fcontainer_m;
110
111 size_type totalP = this->getTotalNum();
112 int it = this->it_m;
113
114 if (this->loadbalancer_m->balance(totalP, it + 1)) {
115 auto* mesh = &fc->getRho().get_mesh();
116 auto* FL = &fc->getFL();
117 this->loadbalancer_m->repartition(FL, mesh, isFirstRepartition_m);
118 }
119}
120
121template <typename T, unsigned Dim>
123 std::fill_n(globalPartPerNode_m.get(), ippl::Comm->size(), 0); // Fill the array with zeros
124 globalPartPerNode_m[ippl::Comm->rank()] = getLocalNum();
125 ippl::Comm->allreduce(globalPartPerNode_m.get(), ippl::Comm->size(), std::plus<size_t>());
126}
127
128template <typename T, unsigned Dim>
129void PartBunch<T, Dim>::setSolver(std::string solver) {
130 if (this->solver_m != "")
131 *gmsg << "* Warning solver already initiated but overwrite ..." << endl;
132
133 this->solver_m = solver;
134
135 this->fcontainer_m->initializeFields(this->solver_m);
136
137 this->setFieldSolver(std::make_shared<FieldSolver_t>(
138 this->solver_m, &this->fcontainer_m->getRho(), &this->fcontainer_m->getE(),
139 &this->fcontainer_m->getPhi()));
140
141 this->fsolver_m->initSolver();
142
144 this->setLoadBalancer(std::make_shared<LoadBalancer_t>(this->lbt_m, this->fcontainer_m, this->pcontainer_m, this->fsolver_m));
145
146 *gmsg << "* Solver and Load Balancer set" << endl;
147}
148
149template <typename T, unsigned Dim>
151 Inform msg("EParticleStats");
152
153 auto pE_view = this->pcontainer_m->E.getView();
154 auto fphi_view = this->fcontainer_m->getPhi().getView();
155
156
157
158 double avgphi = 0.0;
159 double avgE = 0.0;
160 double minEComponent = std::numeric_limits<T>::max();
161 double maxEComponent = std::numeric_limits<T>::min();
162 double minE = std::numeric_limits<T>::max();
163 double maxE = std::numeric_limits<T>::min();
164 double cc = getCouplingConstant();
165
166 int myRank = ippl::Comm->rank();
167
168 Kokkos::parallel_reduce(
169 "check e-field", this->getLocalNum(),
170 KOKKOS_LAMBDA(const int i, double& loc_avgE, double& loc_minEComponent,
171 double& loc_maxEComponent, double& loc_minE, double& loc_maxE) {
172 double EX = pE_view[i][0]*cc;
173 double EY = pE_view[i][1]*cc;
174 double EZ = pE_view[i][2]*cc;
175
176 double ENorm = Kokkos::sqrt(EX*EX + EY*EY + EZ*EZ);
177
178 loc_avgE += ENorm;
179
180 loc_minEComponent = EX < loc_minEComponent ? EX : loc_minEComponent;
181 loc_minEComponent = EY < loc_minEComponent ? EY : loc_minEComponent;
182 loc_minEComponent = EZ < loc_minEComponent ? EZ : loc_minEComponent;
183
184 loc_maxEComponent = EX > loc_maxEComponent ? EX : loc_maxEComponent;
185 loc_maxEComponent = EY > loc_maxEComponent ? EY : loc_maxEComponent;
186 loc_maxEComponent = EZ > loc_maxEComponent ? EZ : loc_maxEComponent;
187
188 loc_minE = ENorm < loc_minE ? ENorm : loc_minE;
189 loc_maxE = ENorm > loc_maxE ? ENorm : loc_maxE;
190 },
191 Kokkos::Sum<T>(avgE), Kokkos::Min<T>(minEComponent),
192 Kokkos::Max<T>(maxEComponent), Kokkos::Min<T>(minE),
193 Kokkos::Max<T>(maxE));
194
195 if (this->getLocalNum() == 0) {
196 minEComponent = maxEComponent = minE = maxE = avgE = 0.0;
197 }
198
199 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &avgE, &avgE, 1, MPI_DOUBLE, MPI_SUM, 0, ippl::Comm->getCommunicator());
200 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &minEComponent, &minEComponent, 1, MPI_DOUBLE, MPI_MIN, 0, ippl::Comm->getCommunicator());
201 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &maxEComponent, &maxEComponent, 1, MPI_DOUBLE, MPI_MAX, 0, ippl::Comm->getCommunicator());
202 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &minE, &minE, 1, MPI_DOUBLE, MPI_MIN, 0, ippl::Comm->getCommunicator());
203 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &maxE, &maxE, 1, MPI_DOUBLE, MPI_MAX, 0, ippl::Comm->getCommunicator());
204
205 size_t Np = this->getTotalNum();
206 avgE /= (Np == 0) ? 1 : Np; // avoid division by zero for empty simulations (see also DistributionMoments::computeMeans implementation)
207
208 msg << "avgENorm = " << avgE << endl;
209
210 using mdrange_type = Kokkos::MDRangePolicy<Kokkos::Rank<3>>;
211
212 Kokkos::parallel_reduce(
213 "check phi", mdrange_type({0,0,0}, {fphi_view.extent(0),fphi_view.extent(1),fphi_view.extent(2)}),
214 KOKKOS_LAMBDA(const int i, const int j, const int k, double& loc_avgphi) {
215 double phi = fphi_view(i,j,k);
216 loc_avgphi += phi;
217 },
218 Kokkos::Sum<T>(avgphi));
219
220 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &avgphi, &avgphi, 1, MPI_DOUBLE, MPI_SUM, 0, ippl::Comm->getCommunicator());
221 avgphi /= this->getTotalNum();
222 msg << "avgphi = " << avgphi << endl;
223
224}
225
226template <typename T, unsigned Dim>
228 std::shared_ptr<ParticleContainer_t> pc = this->pcontainer_m;
229
230 auto Rview = pc->R.getView();
231 auto Pview = pc->P.getView();
232
236
237
238 double loc_centroid[2 * Dim] = {};
239 double loc_moment[2 * Dim][2 * Dim] = {};
240
241 double centroid[2 * Dim] = {};
242 double moment[2 * Dim][2 * Dim] = {};
243
244 for (unsigned i = 0; i < 2 * Dim; i++) {
245 loc_centroid[i] = 0.0;
246 for (unsigned j = 0; j <= i; j++) {
247 loc_moment[i][j] = 0.0;
248 loc_moment[j][i] = 0.0;
249 }
250 }
251
252 for (unsigned i = 0; i < 2 * Dim; ++i) {
253 Kokkos::parallel_reduce(
254 "calc moments of particle distr.", ippl::getRangePolicy(Rview),
255 KOKKOS_LAMBDA(
256 const int k, double& cent, double& mom0, double& mom1, double& mom2,
257 double& mom3, double& mom4, double& mom5) {
258 double part[2 * Dim];
259 part[0] = Rview(k)[0];
260 part[1] = Pview(k)[0];
261 part[2] = Rview(k)[1];
262 part[3] = Pview(k)[1];
263 part[4] = Rview(k)[2];
264 part[5] = Pview(k)[2];
265
266 cent += part[i];
267 mom0 += part[i] * part[0];
268 mom1 += part[i] * part[1];
269 mom2 += part[i] * part[2];
270 mom3 += part[i] * part[3];
271 mom4 += part[i] * part[4];
272 mom5 += part[i] * part[5];
273 },
274 Kokkos::Sum<T>(loc_centroid[i]), Kokkos::Sum<T>(loc_moment[i][0]),
275 Kokkos::Sum<T>(loc_moment[i][1]), Kokkos::Sum<T>(loc_moment[i][2]),
276 Kokkos::Sum<T>(loc_moment[i][3]), Kokkos::Sum<T>(loc_moment[i][4]),
277 Kokkos::Sum<T>(loc_moment[i][5]));
278 Kokkos::fence();
279 }
280
281 MPI_Allreduce(loc_moment, moment, 2 * Dim * 2 * Dim, MPI_DOUBLE, MPI_SUM, ippl::Comm->getCommunicator());
282 MPI_Allreduce(loc_centroid, centroid, 2 * Dim, MPI_DOUBLE, MPI_SUM, ippl::Comm->getCommunicator());
283 ippl::Comm->barrier();
284
285 double rmax_loc[Dim];
286 double rmin_loc[Dim];
287 double rmax[Dim];
288 double rmin[Dim];
289
290 for (unsigned d = 0; d < Dim; ++d) {
291 Kokkos::parallel_reduce(
292 "rel max", this->getLocalNum(),
293 KOKKOS_LAMBDA(const int i, double& mm) {
294 double tmp_vel = Rview(i)[d];
295 mm = tmp_vel > mm ? tmp_vel : mm;
296 },
297 Kokkos::Max<T>(rmax_loc[d]));
298
299 Kokkos::parallel_reduce(
300 "rel min", this->getLocalNum(),
301 KOKKOS_LAMBDA(const int i, double& mm) {
302 double tmp_vel = Rview(i)[d];
303 mm = tmp_vel < mm ? tmp_vel : mm;
304 },
305 Kokkos::Min<T>(rmin_loc[d]));
306 }
307 Kokkos::fence();
308 MPI_Allreduce(rmax_loc, rmax, Dim, MPI_DOUBLE, MPI_MAX, ippl::Comm->getCommunicator());
309 MPI_Allreduce(rmin_loc, rmin, Dim, MPI_DOUBLE, MPI_MIN, ippl::Comm->getCommunicator());
310 ippl::Comm->barrier();
311
312 // \todo can we do this nicer?
313 for (unsigned int i=0; i<Dim; i++) {
314 rmax_m(i) = rmax[i];
315 rmin_m(i) = rmin[i];
316 }
317}
318
319template <typename T, unsigned Dim>
321 this->fcontainer_m->getRho() = 0.0;
322 this->fsolver_m->runSolver();
323}
324
325template <typename T, unsigned Dim>
326Inform& PartBunch<T, Dim>::print(Inform& os) {
327 // if (this->getLocalNum() != 0) { // to suppress Nans
328 Inform::FmtFlags_t ff = os.flags();
329
330 os << std::scientific;
331 os << level1 << "\n";
332 os << "* ************** B U N C H "
333 "********************************************************* \n";
334 os << "* PARTICLES = " << this->getTotalNum() << "\n";
335 os << "* CHARGE = " << this->qi_m*this->getTotalNum() << " (Cb) \n";
336 os << "* INTEGRATOR = " << integration_method_m << "\n";
337 os << "* MIN R (origin) = " << Util::getLengthString( this->pcontainer_m->getMinR(), 5) << "\n";
338 os << "* MAX R (max ext) = " << Util::getLengthString( this->pcontainer_m->getMaxR(), 5) << "\n";
339 os << "* RMS R = " << Util::getLengthString( this->pcontainer_m->getRmsR(), 5) << "\n";
340 os << "* RMS P = " << this->pcontainer_m->getRmsP() << " [beta gamma]\n";
341 os << "* Mean R: " << this->pcontainer_m->getMeanR() << " [m]\n";
342 os << "* Mean P: " << this->pcontainer_m->getMeanP() << " [beta gamma]\n";
343 os << "* MESH SPACING = " << Util::getLengthString( this->fcontainer_m->getMesh().getMeshSpacing(), 5) << "\n";
344 os << "* COMPDOM INCR = " << this->OPALFieldSolver_m->getBoxIncr() << " (%) \n";
345 os << "* FIELD LAYOUT = " << this->fcontainer_m->getFL() << "\n";
346 os << "* Centroid : \n* ";
347 for (unsigned int i=0; i<2*Dim; i++) {
348 os << this->pcontainer_m->getCentroid()[i] << " ";
349 }
350 os << endl << "* Cov Matrix : \n* ";
351 for (unsigned int i=0; i<2*Dim; i++) {
352 for (unsigned int j=0; j<2*Dim; j++) {
353 os << this->pcontainer_m->getCovMatrix()(i,j) << " ";
354 }
355 os << "\n* ";
356 }
357 os << "* "
358 "********************************************************************************"
359 "** "
360 << endl;
361 os.flags(ff);
362 return os;
363}
364
365template <typename T, unsigned Dim>
366void PartBunch<T, Dim>::bunchUpdate(ippl::Vector<double, 3> hr) {
367 /* \brief
368 1. calculates and set hr
369 2. do repartitioning
370 */
371 Inform m ("bunchUpdate ");
372
373 auto *mesh = &this->fcontainer_m->getMesh();
374 auto *FL = &this->fcontainer_m->getFL();
375
376 std::shared_ptr<ParticleContainer_t> pc = this->getParticleContainer();
377
378 pc->computeMinMaxR();
379
381
382 ippl::Vector<double, 3> o = pc->getMinR() - std::numeric_limits<T>::lowest();
383 ippl::Vector<double, 3> e = pc->getMaxR() + std::numeric_limits<T>::lowest();
384 ippl::Vector<double, 3> l = e - o;
385
386 hr_m = (1.0+this->OPALFieldSolver_m->getBoxIncr()/100.)*(l / this->nr_m);
387 mesh->setMeshSpacing(hr);
388 mesh->setOrigin(o-0.5*hr_m*this->OPALFieldSolver_m->getBoxIncr()/100.);
389
390 pc->getLayout().updateLayout(*FL, *mesh);
391 pc->update();
392
393 this->getFieldContainer()->setRMin(o);
394 this->getFieldContainer()->setRMax(e);
395 this->getFieldContainer()->setHr(hr);
396
397 this->isFirstRepartition_m = true;
398 this->loadbalancer_m->initializeORB(FL, mesh);
399 this->loadbalancer_m->repartition(FL, mesh, this->isFirstRepartition_m);
400 this->updateMoments();
401}
402
403template <typename T, unsigned Dim>
405
406 /* \brief
407 1. calculates and set hr
408 2. do repartitioning
409 */
410
411 auto *mesh = &this->fcontainer_m->getMesh();
412 auto *FL = &this->fcontainer_m->getFL();
413
414 std::shared_ptr<ParticleContainer_t> pc = this->getParticleContainer();
415
416 pc->computeMinMaxR();
417
418 ippl::Vector<double, 3> o = pc->getMinR();
419 ippl::Vector<double, 3> e = pc->getMaxR();
420 ippl::Vector<double, 3> l = e - o;
421
422 hr_m = (1.0+this->OPALFieldSolver_m->getBoxIncr()/100.)*(l / this->nr_m);
423 mesh->setMeshSpacing(hr_m);
424 mesh->setOrigin(o-0.5*hr_m*this->OPALFieldSolver_m->getBoxIncr()/100.);
425
426 this->getFieldContainer()->setRMin(o);
427 this->getFieldContainer()->setRMax(e);
428 this->getFieldContainer()->setHr(hr_m);
429
430 pc->getLayout().updateLayout(*FL, *mesh);
431 pc->update();
432
433 this->isFirstRepartition_m = true;
434 this->loadbalancer_m->initializeORB(FL, mesh);
435 //this->loadbalancer_m->repartition(FL, mesh, this->isFirstRepartition_m);
436
437 this->updateMoments();
438}
439
440template <typename T, unsigned Dim>
442 static IpplTimings::TimerRef completeBinningT = IpplTimings::getTimer("bTotalBinningT");
443
444 // Start binning and sorting by bins
445 std::shared_ptr<AdaptBins_t> bins = this->getBins();
446 VField_t<double, 3>& Etmp = *(this->getTempEField());
447
448 IpplTimings::startTimer(completeBinningT);
449 bins->doFullRebin(bins->getMaxBinCount()); // rebin with 128 bins // bins->getMaxBinCount()
450 bins->print(); // For debugging...
451 bins->sortContainerByBin(); // Sort BEFORE, since it generates less atomics overhead with more bins!
452
453
454 bins->genAdaptiveHistogram(); // merge bins with width/N_part ratio of 1.0
455 IpplTimings::stopTimer(completeBinningT);
456
457 bins->print(); // For debugging...
458
459
460 static IpplTimings::TimerRef SolveTimer = IpplTimings::getTimer("SolveTimer");
461 IpplTimings::startTimer(SolveTimer);
462
463 /*
464 \todo check if Lorentz transform is needed
465
466 double gammaz = this->pcontainer_m->getMeanGammaZ();
467 gammaz *= gammaz;
468 gammaz = std::sqrt(gammaz + 1.0);
469
470 Vector_t<double, 3> hr_scaled = hr_m;
471 // hr_scaled[2] *= gammaz;
472
473 hr_m = hr_scaled;
474
475 */
476
477 /*
478 particles have moved need to adjust grid
479 \todo might not work -- can use container update for testing!
480 */
481 //std::shared_ptr<ParticleContainer_t> pc = this->getParticleContainer();
482 //pc->update();
483 this->bunchUpdate();
484
485 /*
486
487 scatterCIC start
488
489 */
490
492
493
494 ippl::ParticleAttrib<T>* Q = &this->pcontainer_m->Q;
495 typename Base::particle_position_type* R = &this->pcontainer_m->R;
496
497 this->fcontainer_m->getRho() = 0.0;
498 Field_t<Dim>* rho = &this->fcontainer_m->getRho();
499
500 scatter(*Q, *rho, *R);
501
502#ifdef doDEBUG
503 const double qtot = this->qi_m * this->getTotalNum();
504 size_type TotalParticles = 0;
505 size_type localParticles = this->pcontainer_m->getLocalNum();
506
507 double relError = std::fabs((qtot - (*rho).sum()) / qtot);
508
509 ippl::Comm->reduce(localParticles, TotalParticles, 1, std::plus<size_type>());
510
511 if ((ippl::Comm->rank() == 0) && (relError > 1.0E-13)) {
512 Inform m("computeSelfFields w CICScatter ", INFORM_ALL_NODES);
513 m << "Time step: " << it_m
514 << " total particles in the sim. " << totalP_m
515 << " missing : " << totalP_m-TotalParticles
516 << " rel. error in charge conservation: " << relError << endl;
517 }
518#endif
519
520 /*
521
522 scatterCIC end
523
524 */
525
526 this->fsolver_m->runSolver();
527
528 gather(this->pcontainer_m->E, this->fcontainer_m->getE(), this->pcontainer_m->R);
529
530#ifdef doDEBUG
531 Inform m("computeSelfFields w CICScatter ", INFORM_ALL_NODES);
532 double cellVolume = std::reduce(hr_m.begin(), hr_m.end(), 1., std::multiplies<double>());
533 m << "cellVolume= " << cellVolume << endl;
534 m << "Sum over E-field after gather = " << this->fcontainer_m->getE().sum() << endl;
535#endif
536
537 /*
538 Vector_t<double, 3> efScale = Vector_t<double,3>(gammaz*cc/hr_scaled[0], gammaz*cc/hr_scaled[1], cc / gammaz / hr_scaled[2]);
539 m << "efScale = " << efScale << endl;
540 spaceChargeEFieldCheck(efScale);
541 */
542
543 //IpplTimings::stopTimer(SolveTimer);
544}
545
546template <typename T, unsigned Dim>
551 Inform m("scatterCICPerBin");
552 m << "Scattering binIndex = " << binIndex << " to grid." << endl;
553
554 this->fcontainer_m->getRho() = 0.0;
555
556 ippl::ParticleAttrib<T>* q = &this->pcontainer_m->Q;
557 typename Base::particle_position_type* R = &this->pcontainer_m->R;
558 Field_t<Dim>* rho = &this->fcontainer_m->getRho();
559
560 double Q;
564
565 if (binIndex == -1) {
566 // Use original scatterCIC logic for all particles
567 Q = this->qi_m * this->getTotalNum();
568 scatter(*q, *rho, *R);
569 } else {
570 // Use per-bin scattering logic
571 Q = this->qi_m * this->bins_m->getNPartInBin(binIndex, true);
572 scatter(*q, *rho, *R, this->bins_m->getBinIterationPolicy(binIndex), this->bins_m->getHashArray());
573 }
574
575 m << "gammz= " << this->pcontainer_m->getMeanP()[2] << endl;
576
577#ifdef doDEBUG
578 double relError = std::fabs((Q - (*rho).sum()) / Q);
579 size_type TotalParticles = 0;
580 size_type localParticles = this->pcontainer_m->getLocalNum();
581 size_type totalP_check = (binIndex == -1) ? totalP_m : this->pcontainer_m->getTotalNum();
582
583 m << "computeSelfFields sum rho = " << (*rho).sum() << ", relError = " << relError << endl;
584
585 ippl::Comm->reduce(localParticles, TotalParticles, 1, std::plus<size_type>());
586
587 if (ippl::Comm->rank() == 0) {
588 if (TotalParticles != totalP_check || relError > 1e-10) {
589 m << "Time step: " << it_m << endl;
590 m << "Total particles in the sim. " << totalP_check << " "
591 << "after update: " << TotalParticles << endl;
592 m << "Rel. error in charge conservation: " << relError << endl;
593 ippl::Comm->abort();
594 }
595 }
596#endif
597
598 double cellVolume = std::reduce(hr.begin(), hr.end(), 1., std::multiplies<double>());
599
600 m << "cellVolume= " << cellVolume << endl;
601
602 (*rho) = (*rho) / cellVolume;
603
604 // rho = rho_e - rho_i (only if periodic BCs)
605 if (this->fsolver_m->getStype() != "OPEN") {
606 double size = 1;
607 for (unsigned d = 0; d < 3; d++) {
608 size *= rmax[d] - rmin[d];
609 }
610 *rho = *rho - (Q / size);
611 }
612}
613
614
615// Explicit instantiations
616template class PartBunch<double, 3>;
Inform * gmsg
Definition changes.cpp:7
ippl::Field< double, Dim, ViewArgs... > Field_t
Definition PBunchDefs.h:30
ippl::Field< ippl::Vector< T, Dim >, Dim, ViewArgs... > VField_t
Definition PBunchDefs.h:33
ippl::Vector< T, Dim > Vector_t
ippl::ParticleBase< ippl::ParticleSpatialLayout< T, Dim > > Base
FieldContainer< double, 3 > FieldContainer_t
Inform * gmsg
Definition changes.cpp:7
ippl::detail::size_type size_type
double T
Definition datatypes.h:7
constexpr unsigned Dim
Definition datatypes.h:6
double binningBeta
Definition Options.cpp:109
double binningAlpha
Definition Options.cpp:108
int maxBins
Definition Options.cpp:107
double desiredWidth
Definition Options.cpp:110
std::string getLengthString(double spos, unsigned int precision=3)
Definition Util.h:93
long long localTrackStep_m
step in a TRACK command
void gatherLoadBalanceStatistics()
Vector_t< double, Dim > rmin_m
Solver_t< T, Dim > solver_m
std::array< bool, Dim > decomp_m
Definition PartBunch.h:136
void computeSelfFields()
double rmsDensity_m
Definition PartBunch.h:117
void pre_run() override
void updateMoments()
Definition PartBunch.h:264
void calcBeamParameters()
std::shared_ptr< VField_t< T, Dim > > getTempEField()
Definition PartBunch.h:257
void scatterCICPerBin(binIndex_t binIndex)
void do_binaryRepart()
Vector_t< double, Dim > rmax_m
std::shared_ptr< FieldSolverCmd > OPALFieldSolver_m
Definition PartBunch.h:214
size_type totalP_m
Definition PartBunch.h:96
bool isFirstRepartition_m
Definition PartBunch.h:110
Vector_t< T, Dim > RefPartR_m
Vector_t< T, Dim > nr_m
double getRho(int x, int y, int z)
typename ParticleContainer_t::bin_index_type binIndex_t
Definition PartBunch.h:92
std::shared_ptr< ParticleContainer_t > getParticleContainer()
Definition PartBunch.h:248
PartBunch()=delete
double dt_m
6x6 matrix of the moments of the beam
double time_m
Definition PartBunch.h:94
long long globalTrackStep_m
if multiple TRACK commands
size_t getLocalNum() const
double lbt_m
Definition PartBunch.h:100
void setSolver(std::string solver)
Vector_t< double, Dim > R(size_t i)
Definition PartBunch.h:276
std::string integration_method_m
Definition PartBunch.h:106
ippl::NDIndex< Dim > domain_m
Definition PartBunch.h:135
void bunchUpdate()
Vector_t< double, Dim > hr_m
mesh size [m]
void spaceChargeEFieldCheck(Vector_t< double, 3 > efScale)
void setBins(std::shared_ptr< AdaptBins_t > bins)
Definition PartBunch.h:262
typename ParticleBinning::CoordinateSelector< ParticleContainer_t > BinningSelector_t
Definition PartBunch.h:90
Inform & print(Inform &os)
Vector_t< double, Dim > origin_m
Definition PartBunch.h:123
void gatherCIC()
double mi_m
Definition PartBunch.h:115
double getCouplingConstant() const
size_t getTotalNum() const
Vector_t< T, Dim > RefPartP_m
std::shared_ptr< Distribution > OPALdist_m
Definition PartBunch.h:212
std::shared_ptr< AdaptBins_t > getBins()
Definition PartBunch.h:260
ParticleAttrib< double > Q
std::unique_ptr< size_t[]> globalPartPerNode_m
std::shared_ptr< AdaptBins_t > bins_m
Definition PartBunch.h:182
void setTempEField(std::shared_ptr< VField_t< T, Dim > > Etmp)
Definition PartBunch.h:258