139int main(
int argc,
char* argv[]) {
144 Inform msg(
"LandauDamping");
147 auto start = std::chrono::high_resolution_clock::now();
152 for (
unsigned d = 0; d <
Dim; d++) {
153 nr[d] = std::atoi(argv[arg++]);
168 const size_type totalP = std::atoll(argv[arg++]);
169 const unsigned int nt = std::atoi(argv[arg++]);
171 msg <<
"Landau damping" <<
endl
172 <<
"nt " << nt <<
" Np= " << totalP <<
" grid = " << nr <<
endl;
176 std::unique_ptr<bunch_type> P;
179 for (
unsigned i = 0; i <
Dim; i++) {
183 std::array<bool, Dim> isParallel;
184 isParallel.fill(
true);
194 double Q = std::reduce(rmax.
begin(), rmax.
end(), -1., std::multiplies<double>());
197 const bool isAllPeriodic =
true;
202 std::string solver = argv[arg++];
204 if (solver ==
"OPEN") {
206 "Open boundaries solver incompatible with this simulation!");
209 P = std::make_unique<bunch_type>(PL, hr, rmin, rmax, isParallel, Q, solver);
213 P->initializeFields(mesh, FL);
217 P->loadbalancethreshold_m = std::atof(argv[arg++]);
223 bool isFirstRepartition;
225 if ((P->loadbalancethreshold_m != 1.0) && (
ippl::Comm->size() > 1)) {
226 msg <<
"Starting first repartition" <<
endl;
228 isFirstRepartition =
true;
230 const int nghost = P->rho_m.getNghost();
231 auto rhoview = P->rho_m.getView();
235 "Assign initial rho based on PDF", P->rho_m.getFieldRangePolicy(),
236 KOKKOS_LAMBDA(
const index_array_type& args) {
247 P->initializeORB(FL, mesh);
248 P->repartition(FL, mesh, isFirstRepartition);
252 msg <<
"First domain decomposition done" <<
endl;
256 const RegionLayout_t& RLayout = PL.getRegionLayout();
257 const typename RegionLayout_t::host_mirror_type Regions = RLayout.
gethLocalRegions();
261 for (
unsigned d = 0; d <
Dim; ++d) {
262 Nr[d] =
CDF(Regions(myRank)[d].max(), alpha, kw[d])
263 -
CDF(Regions(myRank)[d].min(), alpha, kw[d]);
264 Dr[d] =
CDF(rmax[d], alpha, kw[d]) -
CDF(rmin[d], alpha, kw[d]);
265 minU[d] =
CDF(Regions(myRank)[d].min(), alpha, kw[d]);
266 maxU[d] =
CDF(Regions(myRank)[d].max(), alpha, kw[d]);
267 factor *= Nr[d] / Dr[d];
273 MPI_Allreduce(&nloc, &Total_particles, 1, MPI_UNSIGNED_LONG, MPI_SUM,
276 int rest = (int)(totalP - Total_particles);
284 Kokkos::parallel_for(
286 P->R.getView(), P->P.getView(), rand_pool64, alpha, kw, minU, maxU));
292 P->q = P->Q_m / totalP;
293 msg <<
"particles created and initial conditions assigned " <<
endl;
294 isFirstRepartition =
false;
303 P->scatterCIC(totalP, 0, hr);
309 auto Eview = P->getEMirror();
315 P->dumpLandau(Eview);
316 P->gatherStatistics(totalP);
321 std::thread dumpThread;
322 msg <<
"Starting iterations ..." <<
endl;
331 P->P = P->P - 0.5 * dt * P->E;
336 P->R = P->R + dt * P->P;
346 if (P->balance(totalP, it + 1)) {
347 msg <<
"Starting repartition" <<
endl;
349 P->repartition(FL, mesh, isFirstRepartition);
357 P->scatterCIC(totalP, it + 1, hr);
365 P->updateEMirror(Eview);
366 if (dumpThread.joinable()) {
371 msg <<
"Processing time step " << it + 1 <<
" on host" <<
endl;
372 P->dumpLandau(Eview);
373 P->gatherStatistics(totalP);
376 dumpThread = std::thread(dump);
384 P->P = P->P - 0.5 * dt * P->E;
388 msg <<
"Host reached end of time step: " << it + 1 <<
" time: " << P->time_m <<
endl;
396 if (dumpThread.joinable()) {
400 msg <<
"LandauDamping: End." <<
endl;
404 auto end = std::chrono::high_resolution_clock::now();
406 std::chrono::duration<double> time_chrono =
407 std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
408 std::cout <<
"Elapsed time: " << time_chrono.count() << std::endl;