IPPL (Independent Parallel Particle Layer)
IPPL
Loading...
Searching...
No Matches
UniformPlasmaTest.cpp
Go to the documentation of this file.
1// Uniform Plasma Test
2// Usage:
3// srun ./UniformPlasmaTest
4// <nx> [<ny>...] <Np> <Nt> <stype>
5// <lbthres> --overallocate <ovfactor> --info 10
6// nx = No. cell-centered points in the x-direction
7// ny... = No. cell-centered points in the y-, z-, ...direction
8// Np = Total no. of macro-particles in the simulation
9// Nt = Number of time steps
10// stype = Field solver type (FFT, CG, TG, and OPEN supported)
11// lbfreq = Load balancing frequency i.e., Number of time steps after which particle
12// load balancing should happen
13// ovfactor = Over-allocation factor for the buffers used in the communication. Typical
14// values are 1.0, 2.0. Value 1.0 means no over-allocation.
15// Example:
16// srun ./UniformPlasmaTest 128 128 128 10000 10 FFT 10 --overallocate 1.0 --info 10
17//
18#include <Kokkos_Random.hpp>
19#include <chrono>
20#include <iostream>
21#include <random>
22#include <set>
23#include <string>
24#include <vector>
25
26#include "Utility/IpplTimings.h"
27
28#include "ChargedParticles.hpp"
29
30constexpr unsigned Dim = 3;
31
32const char* TestName = "UniformPlasmaTest";
33
34template <typename T, class GeneratorPool, unsigned Dim>
35struct generate_random {
37 // Output View for the random numbers
39
40 // The GeneratorPool
41 GeneratorPool rand_pool;
42
44
45 // Initialize all members
46 generate_random(view_type vals_, GeneratorPool rand_pool_, T start_, T end_)
47 : vals(vals_)
48 , rand_pool(rand_pool_)
49 , start(start_)
50 , end(end_) {}
51
52 KOKKOS_INLINE_FUNCTION void operator()(const size_t i) const {
53 // Get a random number state from the pool for the active thread
54 typename GeneratorPool::generator_type rand_gen = rand_pool.get_state();
55
56 // Draw samples numbers from the pool as double in the range [start, end)
57 for (unsigned d = 0; d < Dim; ++d) {
58 vals(i)[d] = rand_gen.drand(start[d], end[d]);
59 }
60
61 // Give the state back, which will allow another thread to acquire it
62 rand_pool.free_state(rand_gen);
63 }
64};
65
66int main(int argc, char* argv[]) {
67 ippl::initialize(argc, argv);
68 {
70
71 Inform msg("UniformPlasmaTest");
72 Inform msg2all(argv[0], INFORM_ALL_NODES);
73
74 auto start = std::chrono::high_resolution_clock::now();
75 int arg = 1;
76
78 for (unsigned d = 0; d < Dim; d++)
79 nr[d] = std::atoi(argv[arg++]);
80
81 static IpplTimings::TimerRef mainTimer = IpplTimings::getTimer("total");
82 static IpplTimings::TimerRef particleCreation = IpplTimings::getTimer("particlesCreation");
83 static IpplTimings::TimerRef dumpDataTimer = IpplTimings::getTimer("dumpData");
84 static IpplTimings::TimerRef PTimer = IpplTimings::getTimer("pushVelocity");
85 static IpplTimings::TimerRef temp = IpplTimings::getTimer("randomMove");
86 static IpplTimings::TimerRef RTimer = IpplTimings::getTimer("pushPosition");
87 static IpplTimings::TimerRef updateTimer = IpplTimings::getTimer("update");
88 static IpplTimings::TimerRef DummySolveTimer = IpplTimings::getTimer("solveWarmup");
89 static IpplTimings::TimerRef SolveTimer = IpplTimings::getTimer("solve");
90 static IpplTimings::TimerRef domainDecomposition = IpplTimings::getTimer("loadBalance");
91
92 IpplTimings::startTimer(mainTimer);
93
94 const size_type totalP = std::atoll(argv[arg++]);
95 const unsigned int nt = std::atoi(argv[arg++]);
96
97 msg << "Uniform Plasma Test" << endl
98 << "nt " << nt << " Np= " << totalP << " grid = " << nr << endl;
99
100 using bunch_type = ChargedParticles<PLayout_t<double, Dim>, double, Dim>;
101
102 std::unique_ptr<bunch_type> P;
103
104 ippl::NDIndex<Dim> domain;
105 for (unsigned i = 0; i < Dim; i++) {
106 domain[i] = ippl::Index(nr[i]);
107 }
108
109 // create mesh and layout objects for this problem domain
110 Vector_t<double, Dim> rmin(0.0);
111 Vector_t<double, Dim> rmax(20.0);
112
113 Vector_t<double, Dim> hr = rmax / nr;
114 Vector_t<double, Dim> origin = rmin;
115 const double dt = 1.0;
116
117 std::array<bool, Dim> isParallel;
118 isParallel.fill(true);
119
120 const bool isAllPeriodic = true;
121 Mesh_t<Dim> mesh(domain, hr, origin);
122 FieldLayout_t<Dim> FL(MPI_COMM_WORLD, domain, isParallel, isAllPeriodic);
123 PLayout_t<double, Dim> PL(FL, mesh);
124
125 double Q = -1562.5;
126 std::string solver = argv[arg++];
127 P = std::make_unique<bunch_type>(PL, hr, rmin, rmax, isParallel, Q, solver);
128
129 P->nr_m = nr;
130 size_type nloc = totalP / ippl::Comm->size();
131
132 int rest = (int)(totalP - nloc * ippl::Comm->size());
133
134 if (ippl::Comm->rank() < rest)
135 ++nloc;
136
137 IpplTimings::startTimer(particleCreation);
138 P->create(nloc);
139
140 const ippl::NDIndex<Dim>& lDom = FL.getLocalNDIndex();
141 Vector_t<double, Dim> Rmin, Rmax;
142 for (unsigned d = 0; d < Dim; ++d) {
143 Rmin[d] = origin[d] + lDom[d].first() * hr[d];
144 Rmax[d] = origin[d] + (lDom[d].last() + 1) * hr[d];
145 }
146
147 Kokkos::Random_XorShift64_Pool<> rand_pool64((size_type)(42 + 100 * ippl::Comm->rank()));
148 Kokkos::parallel_for(
149 nloc, generate_random<Vector_t<double, Dim>, Kokkos::Random_XorShift64_Pool<>, Dim>(
150 P->R.getView(), rand_pool64, Rmin, Rmax));
151 Kokkos::fence();
152 P->q = P->Q_m / totalP;
153 P->P = 0.0;
154 IpplTimings::stopTimer(particleCreation);
155
156 P->initializeFields(mesh, FL);
157
158 IpplTimings::startTimer(updateTimer);
159 P->update();
160 IpplTimings::stopTimer(updateTimer);
161
162 msg << "particles created and initial conditions assigned " << endl;
163
164 P->initSolver();
165 P->time_m = 0.0;
166 P->loadbalancefreq_m = std::atoi(argv[arg++]);
167
168 IpplTimings::startTimer(DummySolveTimer);
169 P->rho_m = 0.0;
170 P->runSolver();
171 IpplTimings::stopTimer(DummySolveTimer);
172
173 P->scatterCIC(totalP, 0, hr);
174 P->initializeORB(FL, mesh);
175 bool fromAnalyticDensity = false;
176
177 IpplTimings::startTimer(SolveTimer);
178 P->runSolver();
179 IpplTimings::stopTimer(SolveTimer);
180
181 P->gatherCIC();
182
183 IpplTimings::startTimer(dumpDataTimer);
184 P->dumpData();
185 P->gatherStatistics(totalP);
186 IpplTimings::stopTimer(dumpDataTimer);
187
188 // begin main timestep loop
189 msg << "Starting iterations ..." << endl;
190 // P->gatherStatistics(totalP);
191 for (unsigned int it = 0; it < nt; it++) {
192 // LeapFrog time stepping https://en.wikipedia.org/wiki/Leapfrog_integration
193 // Here, we assume a constant charge-to-mass ratio of -1 for
194 // all the particles hence eliminating the need to store mass as
195 // an attribute
196 // kick
198 P->P = P->P - 0.5 * dt * P->E;
200
202 Kokkos::parallel_for(
203 P->getLocalNum(),
204 generate_random<Vector_t<double, Dim>, Kokkos::Random_XorShift64_Pool<>, Dim>(
205 P->P.getView(), rand_pool64, -hr, hr));
206 Kokkos::fence();
208
209 // drift
211 P->R = P->R + dt * P->P;
213
214 // Since the particles have moved spatially update them to correct processors
215 IpplTimings::startTimer(updateTimer);
216 P->update();
217 IpplTimings::stopTimer(updateTimer);
218
219 // Domain Decomposition
220 if (P->balance(totalP, it + 1)) {
221 msg << "Starting repartition" << endl;
222 IpplTimings::startTimer(domainDecomposition);
223 P->repartition(FL, mesh, fromAnalyticDensity);
224 IpplTimings::stopTimer(domainDecomposition);
225 }
226
227 // scatter the charge onto the underlying grid
228 P->scatterCIC(totalP, it + 1, hr);
229
230 // Field solve
231 IpplTimings::startTimer(SolveTimer);
232 P->runSolver();
233 IpplTimings::stopTimer(SolveTimer);
234
235 // gather E field
236 P->gatherCIC();
237
238 // kick
240 P->P = P->P - 0.5 * dt * P->E;
242
243 P->time_m += dt;
244 IpplTimings::startTimer(dumpDataTimer);
245 P->dumpData();
246 P->gatherStatistics(totalP);
247 IpplTimings::stopTimer(dumpDataTimer);
248 msg << "Finished time step: " << it + 1 << " time: " << P->time_m << endl;
249
250 if (checkSignalHandler()) {
251 msg << "Aborting timestepping loop due to signal " << interruptSignalReceived
252 << endl;
253 break;
254 }
255 }
256
257 msg << "Uniform Plasma Test: End." << endl;
258 IpplTimings::stopTimer(mainTimer);
260 IpplTimings::print(std::string("timing.dat"));
261 auto end = std::chrono::high_resolution_clock::now();
262
263 std::chrono::duration<double> time_chrono =
264 std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
265 std::cout << "Elapsed time: " << time_chrono.count() << std::endl;
266 }
268
269 return 0;
270}
int main(int argc, char *argv[])
constexpr unsigned Dim
bool checkSignalHandler()
int interruptSignalReceived
void setSignalHandler()
constexpr unsigned Dim
ippl::detail::size_type size_type
Definition datatypes.h:23
const char * TestName
ippl::FieldLayout< Dim > FieldLayout_t
Definition datatypes.h:21
ippl::Vector< T, Dim > Vector_t
Definition datatypes.h:38
typename ippl::ParticleSpatialLayout< T, Dim, Mesh_t< Dim > > PLayout_t
Definition datatypes.h:15
ippl::UniformCartesian< double, Dim > Mesh_t
Definition datatypes.h:12
Inform & endl(Inform &inf)
Definition Inform.cpp:42
#define INFORM_ALL_NODES
Definition Inform.h:38
void initialize(int &argc, char *argv[], MPI_Comm comm)
Definition Ippl.cpp:16
void finalize()
Definition Ippl.cpp:94
std::unique_ptr< mpi::Communicator > Comm
Definition Ippl.h:22
const NDIndex_t & getLocalNDIndex() const
KOKKOS_INLINE_FUNCTION Vector< int, Dim > last() const
Definition NDIndex.hpp:178
KOKKOS_INLINE_FUNCTION Vector< int, Dim > first() const
Definition NDIndex.hpp:170
Kokkos::View< typename NPtr< T, Dim >::type, Properties... > view_type
Definition ViewTypes.h:45
Timing::TimerRef TimerRef
static TimerRef getTimer(const char *nm)
static void stopTimer(TimerRef t)
static void print()
static void startTimer(TimerRef t)
KOKKOS_INLINE_FUNCTION void operator()(const size_t i) const
generate_random(view_type vals_, GeneratorPool rand_pool_, T start_, T end_)
typename ippl::detail::ViewType< T, 1 >::view_type view_type