OPAL (Object Oriented Parallel Accelerator Library) 2024.2
OPAL
AmrYtWriter.cpp
Go to the documentation of this file.
1//
2// Class AmrYtWriter
3// This concrete class writes output files that are readable by yt
4// (cf. http://yt-project.org/). We have a fork of yt in
5// the repository at https://gitlab.psi.ch/frey_m/yt.
6// The functions of this class are copied from AMReX and modified to fit
7// our needs.
8//
9// Copyright (c) 2016 - 2020, Matthias Frey, Paul Scherrer Institut, Villigen PSI, Switzerland
10// All rights reserved
11//
12// Implemented as part of the PhD thesis
13// "Precise Simulations of Multibunches in High Intensity Cyclotrons"
14//
15// This file is part of OPAL.
16//
17// OPAL is free software: you can redistribute it and/or modify
18// it under the terms of the GNU General Public License as published by
19// the Free Software Foundation, either version 3 of the License, or
20// (at your option) any later version.
21//
22// You should have received a copy of the GNU General Public License
23// along with OPAL. If not, see <https://www.gnu.org/licenses/>.
24//
25#include "Amr/AmrYtWriter.h"
26
27#include <AMReX_Utility.H>
28#include <AMReX_IntVect.H>
29#include <AMReX_ParmParse.H>
30#include <AMReX_ParallelDescriptor.H>
31#include <AMReX_VisMF.H>
32#include <AMReX_VectorIO.H>
33#include <AMReX_NFiles.H>
34#include <filesystem>
35
38
39
40AmrYtWriter::AmrYtWriter(int step, int bin) {
41 intData_m.resize(1);
42// intData_m[0] = "id";
43// intData_m[1] = "cpu";
44 intData_m[0] = "energy_bin";
45
46 realData_m.resize(//3 + // coordinates
47 3 + // momenta
48 3 + // charge + mass + timestep
49// 1 + // potential at particle location
50 3 + // electric field at particle location
51 3); // magnetic field at particle location
52
53 int idx = 0;
54// realData_m[idx++] ="position_x";
55// realData_m[idx++] ="position_y";
56// realData_m[idx++] ="position_z";
57 realData_m[idx++] ="momentum_x";
58 realData_m[idx++] ="momentum_y";
59 realData_m[idx++] ="momentum_z";
60 realData_m[idx++] ="charge";
61 realData_m[idx++] ="mass";
62 realData_m[idx++] ="timestep";
63// realData_m[idx++] ="potential";
64 realData_m[idx++] ="electric_field_x";
65 realData_m[idx++] ="electric_field_y";
66 realData_m[idx++] ="electric_field_z";
67 realData_m[idx++] ="magnetic_field_x";
68 realData_m[idx++] ="magnetic_field_y";
69 realData_m[idx++] ="magnetic_field_z";
70
71 namespace fs = std::filesystem;
72
73 fs::path dir = OpalData::getInstance()->getInputBasename();
74 std::string dataDir = OpalData::getInstance()->getAuxiliaryOutputDirectory();
75 std::filesystem::path path = dir.parent_path() / dataDir / "amr" / "yt";
76 dir_m = amrex::Concatenate((path / "plt").string(), step, 10);
77 dir_m += "-";
78 dir_m = amrex::Concatenate(dir_m, bin, 3);
79
80 if ( Ippl::myNode() == 0 && !fs::exists(path) ) {
81 try {
82 fs::create_directories( path );
83 } catch(const fs::filesystem_error& ex) {
84 throw OpalException("AmrYtWriter::AmrYtWriter()",
85 ex.what());
86 }
87 }
88
89 Ippl::Comm->barrier();
90}
91
92
96 const amr::AmrIntArray_t& refRatio,
97 const amr::AmrGeomContainer_t& geom,
98 const int& nLevel,
99 const double& time,
100 const double& scale) {
101 /* We need to scale the geometry and cell sizes according to the
102 * particle mapping
103 */
104
105 //
106 // Only let 64 CPUs be writing at any one time.
107 //
108 amrex::VisMF::SetNOutFiles(64);
109 //
110 // Only the I/O processor makes the directory if it doesn't already exist.
111 //
112 if ( Ippl::myNode() == 0 )
113 if (!amrex::UtilCreateDirectory(dir_m, 0755))
114 amrex::CreateDirectoryFailed(dir_m);
115 //
116 // Force other processors to wait till directory is built.
117 //
118 Ippl::Comm->barrier();
119
120 std::string HeaderFileName = dir_m + "/Header";
121
122 amrex::VisMF::IO_Buffer io_buffer(amrex::VisMF::IO_Buffer_Size);
123
124 std::ofstream HeaderFile;
125
126 HeaderFile.rdbuf()->pubsetbuf(io_buffer.dataPtr(), io_buffer.size());
127
128 int nData = rho[0]->nComp()
129 + phi[0]->nComp()
130 + efield[0][0]->nComp()
131 + efield[0][1]->nComp()
132 + efield[0][2]->nComp();
133
134 if ( Ippl::myNode() == 0 ) {
135 //
136 // Only the IOProcessor() writes to the header file.
137 //
138 HeaderFile.open(HeaderFileName.c_str(), std::ios::out|std::ios::trunc|std::ios::binary);
139 if (!HeaderFile.good())
140 amrex::FileOpenFailed(HeaderFileName);
141 HeaderFile << "HyperCLaw-V1.1\n";
142
143 HeaderFile << nData << '\n';
144
145 // variable names
146 for (int ivar = 1; ivar <= rho[0]->nComp(); ivar++)
147 HeaderFile << "rho\n";
148
149 for (int ivar = 1; ivar <= phi[0]->nComp(); ivar++)
150 HeaderFile << "phi\n";
151
152 HeaderFile << "Ex\nEy\nEz\n";
153
154 // dimensionality
155 HeaderFile << AMREX_SPACEDIM << '\n';
156
157 // time
158 HeaderFile << time << '\n';
159 HeaderFile << nLevel - 1 << '\n'; // maximum level number (0=single level)
160
161 // physical domain
162 for (int i = 0; i < AMREX_SPACEDIM; i++)
163 HeaderFile << geom[0].ProbLo(i) / scale << ' ';
164 HeaderFile << '\n';
165 for (int i = 0; i < AMREX_SPACEDIM; i++)
166 HeaderFile << geom[0].ProbHi(i) / scale << ' ';
167 HeaderFile << '\n';
168
169 // reference ratio
170 for (int i = 0; i < refRatio.size(); ++i)
171 HeaderFile << refRatio[i] << ' ';
172 HeaderFile << '\n';
173
174 // geometry domain for all levels
175 for (int i = 0; i < nLevel; ++i)
176 HeaderFile << geom[i].Domain() << ' ';
177 HeaderFile << '\n';
178
179 // number of time steps
180 for (int i = 0; i < nLevel; ++i)
181 HeaderFile << 0 << ' ';
182 HeaderFile << '\n';
183
184 // cell sizes for all level
185 for (int i = 0; i < nLevel; ++i) {
186 for (int k = 0; k < AMREX_SPACEDIM; k++)
187 HeaderFile << geom[i].CellSize()[k] / scale << ' ';
188 HeaderFile << '\n';
189 }
190
191 // coordinate system
192 HeaderFile << geom[0].Coord() << '\n';
193 HeaderFile << "0\n"; // write boundary data
194 }
195
196 for (int lev = 0; lev < nLevel; ++lev) {
197 // Build the directory to hold the MultiFab at this level.
198 // The name is relative to the directory containing the Header file.
199 //
200 static const std::string BaseName = "/Cell";
201 char buf[64];
202 snprintf(buf, sizeof(buf), "Level_%d", lev);
203 std::string sLevel = buf;
204
205 //
206 // Now for the full pathname of that directory.
207 //
208 std::string FullPath = dir_m;
209 if (!FullPath.empty() && FullPath[FullPath.length()-1] != '/') {
210 FullPath += '/';
211 }
212 FullPath += sLevel;
213 //
214 // Only the I/O processor makes the directory if it doesn't already exist.
215 //
216 if ( Ippl::myNode() == 0 ) {
217 if (!amrex::UtilCreateDirectory(FullPath, 0755)) {
218 amrex::CreateDirectoryFailed(FullPath);
219 }
220 }
221 //
222 // Force other processors to wait till directory is built.
223 //
224 Ippl::Comm->barrier();
225
226 if ( Ippl::myNode() == 0 ) {
227 HeaderFile << lev << ' ' << rho[lev]->boxArray().size() << ' ' << 0 /*time*/ << '\n';
228 HeaderFile << 0 /* level steps */ << '\n';
229
230 for (int i = 0; i < rho[lev]->boxArray().size(); ++i) {
231 amrex::Real dx[3] = {
232 geom[lev].CellSize(0),
233 geom[lev].CellSize(1),
234 geom[lev].CellSize(2)
235 };
236 dx[0] /= scale;
237 dx[1] /= scale;
238 dx[2] /= scale;
239 amrex::Real lo[3] = {
240 geom[lev].ProbLo(0),
241 geom[lev].ProbLo(1),
242 geom[lev].ProbLo(2)
243 };
244 lo[0] /= scale;
245 lo[1] /= scale;
246 lo[2] /= scale;
247 amr::AmrDomain_t loc = amr::AmrDomain_t(rho[lev]->boxArray()[i],
248 &dx[0], &lo[0]);
249// geom[lev].CellSize(),
250// geom[lev].ProbLo());
251 for (int n = 0; n < AMREX_SPACEDIM; n++)
252 HeaderFile << loc.lo(n) << ' ' << loc.hi(n) << '\n';
253 }
254
255 std::string PathNameInHeader = sLevel;
256 PathNameInHeader += BaseName;
257 HeaderFile << PathNameInHeader << '\n';
258 }
259
260 //
261 // We combine all of the multifabs
262 //
263 amr::AmrField_t data(rho[lev]->boxArray(),
264 rho[lev]->DistributionMap(),
265 nData, 0,
266 amrex::MFInfo());
267
268 //
269 // Cull data -- use no ghost cells.
270 //
271 // dst, src, srccomp, dstcomp, numcomp, nghost
272 /*
273 * srccomp: the component to copy
274 * dstcmop: the component where to copy
275 * numcomp: how many components to copy
276 */
277 amr::AmrField_t::Copy(data, *rho[lev], 0, 0, 1, 0);
278 amr::AmrField_t::Copy(data, *phi[lev], 0, 1, 1, 0);
279 amr::AmrField_t::Copy(data, *efield[lev][0], 0, 2, 1, 0); // Ex
280 amr::AmrField_t::Copy(data, *efield[lev][1], 0, 3, 1, 0); // Ey
281 amr::AmrField_t::Copy(data, *efield[lev][2], 0, 4, 1, 0); // Ez
282
283 //
284 // Use the Full pathname when naming the MultiFab.
285 //
286 std::string TheFullPath = FullPath;
287 TheFullPath += BaseName;
288
289 amrex::VisMF::Write(data, TheFullPath, amrex::VisMF::NFiles, true);
290 }
291
292 if ( Ippl::myNode() == 0 )
293 HeaderFile.close();
294}
295
296
298 const double& /*time*/,
299 const double& gamma) {
300 /* According to
301 *
302 * template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
303 * void
304 * ParticleContainer<NStructReal, NStructInt, NArrayReal, NArrayInt>
305 * ::Checkpoint (const std::string& dir,
306 * const std::string& name,
307 * bool is_checkpoint,
308 * const Vector<std::string>& real_comp_names,
309 * const Vector<std::string>& int_comp_names) const
310 *
311 * in AMReX_ParticleContainerI.H with AMReX_Particles.H and AMReX_ParticleI.H.
312 *
313 * ATTENTION: We need to Lorentz transform the particles.
314 */
315
316 const AmrLayout_t* layout_p = static_cast<const AmrLayout_t*>(&bunch_p->getLayout());
317 const AmrPartBunch::pbase_t* amrpbase_p = bunch_p->getAmrParticleBase();
318
319 const int NProcs = amrex::ParallelDescriptor::NProcs();
320 const int IOProcNumber = amrex::ParallelDescriptor::IOProcessorNumber();
321
322 bool doUnlink = true;
323
324 //
325 // We store the particles in a subdirectory of "dir".
326 //
327 std::string pdir = dir_m;
328
329 if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') {
330 pdir += '/';
331 }
332
333 pdir += "opal";
334 //
335 // Make the particle directories if they don't already exist.
336 //
337 if ( Ippl::myNode() == 0 ) {
338 if ( ! amrex::UtilCreateDirectory(pdir, 0755)) {
339 amrex::CreateDirectoryFailed(pdir);
340 }
341 }
342
343 // Force other processors to wait until directory is built.
344 Ippl::Comm->barrier();
345
346 //
347 // The header contains the info we need to read back in the particles.
348 //
349 // Only the I/O processor writes to the header file.
350 //
351 std::ofstream HdrFile;
352
353 long nParticles = bunch_p->getTotalNum();
354
355 // do not modify LocalNumPerLevel in here!!!
356 auto LocalNumPerLevel = amrpbase_p->getLocalNumPerLevel();
357
358 int nLevel = (&amrpbase_p->getAmrLayout())->maxLevel() + 1;
359
360 std::unique_ptr<size_t[]> partPerLevel( new size_t[nLevel] );
361 std::unique_ptr<size_t[]> globalPartPerLevel( new size_t[nLevel] );
362
363 for (size_t i = 0; i < LocalNumPerLevel.size(); ++i)
364 partPerLevel[i] = LocalNumPerLevel[i];
365
366 allreduce(*partPerLevel.get(),
367 *globalPartPerLevel.get(),
368 nLevel, std::plus<size_t>());
369
370 int finest_level = layout_p->finestLevel();
371
372 if ( Ippl::myNode() == 0 ) {
373
374 std::string HdrFileName = pdir;
375 if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') {
376 HdrFileName += '/';
377 }
378
379 HdrFileName += "Header";
380 HdrFile.open(HdrFileName.c_str(), std::ios::out|std::ios::trunc);
381
382 if ( ! HdrFile.good()) {
383 amrex::FileOpenFailed(HdrFileName);
384 }
385 //
386 // First thing written is our Checkpoint/Restart version string.
387 //
388 // We append "_single" or "_double" to the version string indicating
389 // whether we're using "float" or "double" floating point data in the
390 // particles so that we can Restart from the checkpoint files.
391 //
392 HdrFile << "Version_Two_Dot_Zero_double" << '\n';
393 //
394 // AMREX_SPACEDIM and N for sanity checking.
395 //
396 HdrFile << AMREX_SPACEDIM << '\n';
397
398 // The number of extra real parameters
399 HdrFile << realData_m.size() << '\n';
400 for (std::size_t j = 0; j < realData_m.size(); ++j)
401 HdrFile << realData_m[j] << '\n';
402
403 // The number of extra int parameters
404 HdrFile << intData_m.size() << '\n';
405 for (std::size_t j = 0; j < intData_m.size(); ++j)
406 HdrFile << intData_m[j] << '\n';
407
408 // is_checkpoint
409 HdrFile << true << '\n';
410
411 //
412 // The total number of particles.
413 //
414 HdrFile << nParticles << '\n';
415 //
416 // The value of nextid that we need to restore on restart.
417 // --> we don's use this feature
418 //
419 HdrFile << 0 << '\n';
420 //
421 // Then the finest level of the AMR hierarchy.
422 //
423 HdrFile << finest_level << '\n';
424 //
425 // Then the number of grids at each level.
426 //
427 for (int lev = 0; lev <= finest_level; ++lev) {
428 HdrFile << layout_p->ParticleBoxArray(lev).size() << '\n';
429 }
430 }
431 //
432 // We want to write the data out in parallel.
433 //
434 // We'll allow up to nOutFiles active writers at a time.
435 //
436 int nOutFiles(256);
437
438 nOutFiles = std::max(1, std::min(nOutFiles, NProcs));
439
440 for (int lev = 0; lev <= finest_level; ++lev) {
441 bool gotsome = (globalPartPerLevel[lev] > 0);
442 //
443 // We store the particles at each level in their own subdirectory.
444 //
445 std::string LevelDir = pdir;
446
447 if (gotsome) {
448 if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') {
449 LevelDir += '/';
450 }
451
452 LevelDir = amrex::Concatenate(LevelDir + "Level_", lev, 1);
453
454 if ( Ippl::myNode() == 0 ) {
455 if ( ! amrex::UtilCreateDirectory(LevelDir, 0755)) {
456 amrex::CreateDirectoryFailed(LevelDir);
457 }
458 }
459 //
460 // Force other processors to wait until directory is built.
461 //
462 Ippl::Comm->barrier();
463 }
464
465 // Write out the header for each particle
466 if ( gotsome && Ippl::myNode() == 0 ) {
467 std::string HeaderFileName = LevelDir;
468 HeaderFileName += "/Particle_H";
469 std::ofstream ParticleHeader(HeaderFileName);
470
471 layout_p->ParticleBoxArray(lev).writeOn(ParticleHeader);
472 ParticleHeader << '\n';
473
474 ParticleHeader.flush();
475 ParticleHeader.close();
476 }
477
478 amrex::MFInfo info;
479 info.SetAlloc(false);
480 amr::AmrField_t state(layout_p->ParticleBoxArray(lev),
481 layout_p->ParticleDistributionMap(lev),
482 1, 0, info);
483 //
484 // We eventually want to write out the file name and the offset
485 // into that file into which each grid of particles is written.
486 //
487 amrex::Vector<int> which(state.size(),0);
488 amrex::Vector<int > count(state.size(),0);
489 amrex::Vector<long> where(state.size(),0);
490
491 std::string filePrefix(LevelDir);
492 filePrefix += '/';
493 filePrefix += "DATA_";
494
495 if (gotsome) {
496 bool groupSets(false), setBuf(true);
497 for(amrex::NFilesIter nfi(nOutFiles, filePrefix, groupSets, setBuf); nfi.ReadyToWrite(); ++nfi) {
498 std::ofstream& myStream = (std::ofstream&) nfi.Stream();
499 //
500 // Write out all the valid particles we own at the specified level.
501 // Do it grid block by grid block remembering the seek offset
502 // for the start of writing of each block of data.
503 //
504 writeParticles_m(lev, myStream, nfi.FileNumber(), which, count, where, bunch_p, gamma);
505 }
506
507 amrex::ParallelDescriptor::ReduceIntSum (which.dataPtr(), which.size(), IOProcNumber);
508 amrex::ParallelDescriptor::ReduceIntSum (count.dataPtr(), count.size(), IOProcNumber);
509 amrex::ParallelDescriptor::ReduceLongSum(where.dataPtr(), where.size(), IOProcNumber);
510 }
511
512 if ( Ippl::myNode() == 0 ) {
513 for (int j = 0; j < state.size(); j++) {
514 //
515 // We now write the which file, the particle count, and the
516 // file offset into which the data for each grid was written,
517 // to the header file.
518 //
519 HdrFile << which[j] << ' ' << count[j] << ' ' << where[j] << '\n';
520 }
521
522 if (gotsome && doUnlink) {
523 //
524 // Unlink any zero-length data files.
525 //
526 amrex::Vector<long> cnt(nOutFiles,0);
527
528 for (int i = 0, N=count.size(); i < N; i++) {
529 cnt[which[i]] += count[i];
530 }
531
532 for (int i = 0, N=cnt.size(); i < N; i++) {
533 if (cnt[i] == 0) {
534 std::string FullFileName = amrex::NFilesIter::FileName(i, filePrefix);
535 amrex::UnlinkFile(FullFileName.c_str());
536 }
537 }
538 }
539 }
540 } // ---- end for(lev...)
541
542 if ( Ippl::myNode() == 0 ) {
543 HdrFile.flush();
544 HdrFile.close();
545 if ( ! HdrFile.good()) {
546 throw OpalException("AmrYtWriter:writeBunch()",
547 "Problem writing HdrFile");
548 }
549 }
550}
551
552
554 std::ofstream& ofs,
555 int fnum,
556 amrex::Vector<int>& which,
557 amrex::Vector<int>& count,
558 amrex::Vector<long>& where,
559 const AmrPartBunch* bunch_p,
560 const double gamma) const
561{
562 /* Copied and modified of
563 *
564 * template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
565 * void
566 * ParticleContainer<NStructReal,
567 * NStructInt,
568 * NArrayReal,
569 * NArrayInt>
570 * ::WriteParticles(int lev,
571 * std::ofstream& ofs,
572 * int fnum,
573 * Vector<int>& which,
574 * Vector<int>& count,
575 * Vector<long>& where,
576 * bool is_checkpoint) const
577 *
578 * in AMReX_ParticleContainerI.H
579 */
580
581 const AmrLayout_t* layout_p = static_cast<const AmrLayout_t*>(&bunch_p->getLayout());
582 const AmrPartBunch::pbase_t* amrpbase_p = bunch_p->getAmrParticleBase();
583
584 const auto& LocalNumPerLevel = amrpbase_p->getLocalNumPerLevel();
585 size_t lBegin = LocalNumPerLevel.begin(level);
586 size_t lEnd = LocalNumPerLevel.end(level);
587
588 for (size_t ip = lBegin; ip < lEnd; ++ip) {
589 const int grid = amrpbase_p->Grid[ip];
590
591 count[grid] += 1;
592 }
593
594 amrex::MFInfo info;
595 info.SetAlloc(false);
596 amr::AmrField_t state(layout_p->ParticleBoxArray(level),
597 layout_p->ParticleDistributionMap(level),
598 1, 0, info);
599
600 for (amrex::MFIter mfi(state); mfi.isValid(); ++mfi) {
601 const int grid = mfi.index();
602
603 which[grid] = fnum;
604 where[grid] = amrex::VisMF::FileOffset(ofs);
605
606 if (count[grid] == 0) {
607 continue;
608 }
609
610 // First write out the integer data in binary.
611 const int iChunkSize = 2 + intData_m.size();
612 amrex::Vector<int> istuff(count[grid]*iChunkSize);
613 int* iptr = istuff.dataPtr();
614
615 // inefficient
616 for (size_t ip = lBegin; ip < lEnd; ++ip) {
617 const int pGrid = amrpbase_p->Grid[ip];
618
619 if ( grid == pGrid ) {
620
621 iptr[0] = bunch_p->ID[ip];
622 iptr[1] = Ippl::myNode();
623 iptr[2] = bunch_p->Bin[ip];
624 iptr += iChunkSize;
625 }
626 }
627
628 amrex::writeIntData(istuff.dataPtr(), istuff.size(), ofs);
629
630 // Write the Real data in binary.
631 const int rChunkSize = AMREX_SPACEDIM + realData_m.size();
632 amrex::Vector<double> rstuff(count[grid]*rChunkSize);
633 double* rptr = rstuff.dataPtr();
634
635
636 // inefficient
637 for (size_t ip = lBegin; ip < lEnd; ++ip) {
638 const int pGrid = amrpbase_p->Grid[ip];
639
640 if ( grid == pGrid ) {
641
642 int idx = 0;
643
644 // coordinates
645 for (int j = 0; j < AMREX_SPACEDIM; ++j)
646 rptr[idx++] = bunch_p->R[ip](j);
647
648 // Lorentz transform
649 if ( OpalData::getInstance()->isInOPALCyclMode() )
650 rptr[1] *= gamma; // y is longitudinal direction
651 else
652 rptr[2] *= gamma; // z is longitudinal direction
653
654 // momenta
655 for (int j = 0; j < AMREX_SPACEDIM; ++j)
656 rptr[idx++] = bunch_p->P[ip](j);
657
658 // charge
659 rptr[idx++] = bunch_p->Q[ip];
660
661 // mass
662 rptr[idx++] = bunch_p->M[ip];
663
664 // timestep
665 rptr[idx++] = bunch_p->dt[ip];
666
667// // potential
668// rptr[idx++] = bunch_p->Phi[ip];
669
670 // electric field
671 for (int j = 0; j < AMREX_SPACEDIM; ++j)
672 rptr[idx++] = bunch_p->Ef[ip](j);
673
674 // magnetic field
675 for (int j = 0; j < AMREX_SPACEDIM; ++j)
676 rptr[idx++] = bunch_p->Bf[ip](j);
677
678 rptr += idx; //realData_m.size();
679 }
680 }
681 amrex::writeDoubleData(rstuff.dataPtr(), rstuff.size(), ofs);
682 }
683}
BoxLibLayout< double, 3 > AmrLayout_t
Definition PBunchDefs.h:37
void allreduce(const T *input, T *output, int count, Op op)
PETE_TTTree< OpWhere, typename Cond_t::PETE_Expr_t, typename True_t::PETE_Expr_t, PETE_Scalar< Vektor< T, Dim > > > where(const PETE_Expr< Cond_t > &c, const PETE_Expr< True_t > &t, const Vektor< T, Dim > &f)
amrex::Vector< AmrVectorField_t > AmrVectorFieldContainer_t
Definition AmrDefs.h:42
amrex::RealBox AmrDomain_t
Definition AmrDefs.h:46
amrex::Vector< AmrGeometry_t > AmrGeomContainer_t
Definition AmrDefs.h:43
amrex::MultiFab AmrField_t
Definition AmrDefs.h:34
amrex::Vector< std::unique_ptr< AmrField_t > > AmrScalarFieldContainer_t
Definition AmrDefs.h:41
amrex::Vector< int > AmrIntArray_t
Definition AmrDefs.h:47
ParticleLayout< T, Dim > & getLayout()
ParticleAttrib< Vector_t > Ef
ParticlePos_t & R
ParticleAttrib< int > Bin
ParticleAttrib< double > M
size_t getTotalNum() const
ParticleAttrib< Vector_t > P
ParticleAttrib< double > Q
ParticleAttrib< double > dt
ParticleAttrib< Vector_t > Bf
ParticleIndex_t & ID
std::string getInputBasename()
get input file name without extension
Definition OpalData.cpp:674
static OpalData * getInstance()
Definition OpalData.cpp:196
std::string getAuxiliaryOutputDirectory() const
get the name of the the additional data directory
Definition OpalData.cpp:666
std::vector< std::string > intData_m
integer bunch data
Definition AmrYtWriter.h:85
std::vector< std::string > realData_m
real bunch data
Definition AmrYtWriter.h:86
AmrYtWriter(int step, int bin=0)
void writeFields(const amr::AmrScalarFieldContainer_t &rho, const amr::AmrScalarFieldContainer_t &phi, const amr::AmrVectorFieldContainer_t &efield, const amr::AmrIntArray_t &refRatio, const amr::AmrGeomContainer_t &geom, const int &nLevel, const double &time, const double &scale)
std::string dir_m
directory where to write files
Definition AmrYtWriter.h:84
void writeParticles_m(int level, std::ofstream &ofs, int fnum, amrex::Vector< int > &which, amrex::Vector< int > &count, amrex::Vector< long > &where, const AmrPartBunch *bunch_p, const double gamma) const
void writeBunch(const AmrPartBunch *bunch_p, const double &time, const double &gamma)
int finestLevel() const
AmrParticle_t pbase_t
pbase_t * getAmrParticleBase()
The base class for all OPAL exceptions.
ParticleIndex_t Grid
const ParticleLevelCounter_t & getLocalNumPerLevel() const
PLayout & getAmrLayout()
static int myNode()
Definition IpplInfo.cpp:691
static Communicate * Comm
Definition IpplInfo.h:84