OPAL (Object Oriented Parallel Accelerator Library) 2024.2
OPAL
HashPairBuilderPeriodicParallel.h
Go to the documentation of this file.
1// -*- C++ -*-
2/***************************************************************************
3 *
4 * The IPPL Framework
5 *
6 * HashPairBuilderPeriodicParallel follows the Hockney and Eastwood approach to efficiently
7 * find particle pairs. In this version of the code a local Chaining mesh per processor is used to avoid looping
8 * empty buckets.
9 *
10 * Visit http://people.web.psi.ch/adelmann/ for more details
11 *
12 ***************************************************************************/
13
14
15
16#ifndef HASH_PAIR_BUILDER_PERIODIC_PARALLEL_H
17#define HASH_PAIR_BUILDER_PERIODIC_PARALLEL_H
18
19#include <algorithm>
20#include <limits>
21#include <cmath>
22#include <set>
23
24template<class PBase>
26{
27public:
28 enum { Dim = PBase::Dim };
29 typedef typename PBase::Position_t Position_t;
30
31 HashPairBuilderPeriodicParallel(PBase &p) : particles(p) { hr_m = p.get_hr();}
32
33 template<class Pred, class OP>
34 void for_each(const Pred& pred, const OP &op,Vektor<double,3> extend_l, Vektor<double,3> extend_r )
35 {
36 const std::size_t END = std::numeric_limits<std::size_t>::max();
37 std::size_t size = particles.getLocalNum()+particles.getGhostNum();
38
39 // Inform dmsg("debug_msg:");
40 // dmsg << "We use parallel hash pair builder small chaining mesh ****************************" << endl;
41
42 //compute which dimensions are really serial process neighbors itself in this direction
43 Vektor<bool,3> parallel_dims(0,0,0);
44
45 NDIndex<3> globDomain = particles.getFieldLayout().getDomain();
46 NDIndex<3> locDomain = particles.getFieldLayout().getLocalNDIndex();
47
48 parallel_dims[0] = !(globDomain[0]==locDomain[0]);
49 parallel_dims[1] = !(globDomain[1]==locDomain[1]);
50 parallel_dims[2] = !(globDomain[2]==locDomain[2]);
51
52 Vektor<double,3> period;
53 period=extend_r-extend_l;
54
56 Vektor<double,3> extend_l_local, extend_r_local, domain_width_local;
57 for (unsigned i=0; i<3; ++i) {
58 extend_l_local[i] = locDomain[i].first()*hr_m[i]+extend_l[i];
59 extend_r_local[i] = extend_l[i]+(locDomain[i].last()+1)*hr_m[i];
60 domain_width_local[i] = extend_r_local[i]-extend_l_local[i];
61 }
62
63 //make sure that the chaining mesh covers the whole domain and has a gridwidth > r_cut
64 buckets_per_dim[0]=floor(domain_width_local[0]/pred.getRange(0));
65 buckets_per_dim[1]=floor(domain_width_local[1]/pred.getRange(1));
66 buckets_per_dim[2]=floor(domain_width_local[2]/pred.getRange(2));
67
68 for (unsigned dim = 0; dim<3; ++dim)
69 h_chaining[dim] = domain_width_local[dim]/buckets_per_dim[dim];
70
71 //extend the chaining mesh by one layer of chaining cells in each dimension
72 rmin_m = extend_l_local-h_chaining;
73 rmax_m = extend_r_local+h_chaining;
75
76 //dmsg << "local domain iwdth = " << domain_width_local << endl;
77 //dmsg << "local extends : " << extend_l_local << "\t" << extend_r_local << endl;
78 //dmsg << "local extends with chaining: " << rmin_m << "\t" << rmax_m << endl;
79 //dmsg << "buckets per dim = " << buckets_per_dim << endl;
80 //dmsg << "h_chaining = " << h_chaining << endl;
81
82 std::size_t Nbucket = buckets_per_dim[0]*buckets_per_dim[1]*buckets_per_dim[2];
83
84 std::size_t *buckets = new size_t[Nbucket]; //index of first particle in this bucket
85 std::size_t *next = new size_t[size]; //index of next particle in this bucket. END indicates last particle of bucket
86 std::fill(buckets, buckets+Nbucket, END);
87 std::fill(next, next+size, END);
88
89 //in 3D we interact with 14 neighboring cells (including self cell interaction)
90 unsigned neigh = 14;
91
92 int offset[14][3] = {{ 1, 1, 1}, { 0, 1, 1}, {-1, 1, 1},
93 { 1, 0, 1}, { 0, 0, 1}, {-1, 0, 1},
94 { 1,-1, 1}, { 0,-1, 1}, {-1,-1, 1},
95 { 1, 1, 0}, { 0, 1, 0}, {-1, 1, 0},
96 { 1, 0, 0}, { 0, 0, 0}};
97
98 //assign all particles to a bucket
99 for(std::size_t i = 0;i<size;++i)
100 {
101 unsigned bucket_id = get_bucket_id(i,pred);
102 //dmsg << "we got bucket id = " << bucket_id << endl;
103 next[i] = buckets[bucket_id];
104 buckets[bucket_id] = i;
105 }
106
107 //loop over all buckets
108 for (int bx=0+int(!parallel_dims[0]); bx<buckets_per_dim[0]-int(!parallel_dims[0]); ++bx) {
109 for (int by=0+int(!parallel_dims[1]); by<buckets_per_dim[1]-int(!parallel_dims[1]); ++by) {
110 for (int bz=0+int(!parallel_dims[2]); bz<buckets_per_dim[2]-int(!parallel_dims[2]); ++bz) {
111 unsigned bucket_id_self = bz*buckets_per_dim[1]*buckets_per_dim[0]+by*buckets_per_dim[0]+bx;
112 //compute index of neighboring bucket to interact with
113 for (unsigned n=0; n<neigh;++n){
114 int bx_neigh, by_neigh, bz_neigh;
115 Vektor<double,3> shift(0,0,0);
116
117 bx_neigh = bx+offset[n][0];
118 //if we are serial in x-dimension we have no cached ghost particles. The local positions get periodically shifted
119 if (!parallel_dims[0]) {
120 if (bx_neigh == 0) {
121 //bucket in -x direction exceed domain boundary
122 bx_neigh+=(buckets_per_dim[0]-2);//consider last bucket in +x instead
123 shift[0] = -period[0];//shift particles in negative x direction by domain size
124 }
125 else if (bx_neigh == (buckets_per_dim[0]-1)) {
126 //bucket in +x direction exceeds domain boundary
127 bx_neigh -= (buckets_per_dim[0]-2);//consider first bucket in +x instead
128 shift[0] = period[0];//shift particles in positive x direction by domain size
129 }
130 }
131 //do the same for y and z direction:
132 by_neigh = by+offset[n][1];
133 if (!parallel_dims[1]) {
134 if (by_neigh == 0) {
135 by_neigh+=(buckets_per_dim[1]-2);
136 shift[1] = -period[0];
137 }
138 else if (by_neigh == (buckets_per_dim[1]-1)) {
139 by_neigh -=(buckets_per_dim[1]-2);
140 shift[1] = period[1];
141 }
142 }
143
144 bz_neigh = bz+offset[n][2];
145 if (!parallel_dims[2]) {
146 if (bz_neigh == 0) {
147 bz_neigh+=(buckets_per_dim[2]-2);
148 shift[2] = -period[2];
149 }
150 else if (bz_neigh == (buckets_per_dim[2]-1)) {
151 bz_neigh -=(buckets_per_dim[2]-2);
152 shift[2] = period[2];
153 }
154 }
155
156 if (bx_neigh >= 0 && bx_neigh<buckets_per_dim[0] &&
157 by_neigh >= 0 && by_neigh<buckets_per_dim[1] &&
158 bz_neigh >= 0 && bz_neigh<buckets_per_dim[2]) {
159
160 unsigned bucket_id_neigh =
161 bz_neigh*buckets_per_dim[1]*buckets_per_dim[0]+by_neigh*buckets_per_dim[0]+bx_neigh;
162
163 //i is index of particle considered in active cahining cell, j is index of neighbor particle considered
164 std::size_t i = buckets[bucket_id_self];
165 std::size_t j;
166
167 //loop over all particles in self cell
168 //self offset avoids double counting in self cell
169 int self_offset = 0;
170 while (i != END) {
171 j = buckets[bucket_id_neigh];
172 //increase offset by number of processed particles in self cell
173 for (int o=0;o<self_offset;o++){
174 j = next[j];
175 }
176 //loop over all particles in neighbor cell
177 while(j != END) {
178 if(pred(particles.R[i], particles.R[j]+shift)) {
179 if (i!=j)
180 op(i, j, particles, shift);
181 }
182 j = next[j];
183 }
184 i = next[i];
185 //adjust self_offset
186 if (bucket_id_self==bucket_id_neigh)
187 self_offset++;
188 else
189 self_offset=0;
190 }
191 }
192 }
193
194 }
195 }
196 }
197
198 delete[] buckets;
199 delete[] next;
200 }
201private:
202
203 //returns the bucket id of particle i
204 template<class Pred>
205 int get_bucket_id(int i, const Pred& /*pred*/)
206 {
207 // Inform dmsg("debug_msg:");
208
209 Vektor<int,3> loc;
210 for (unsigned d=0; d<3; ++d)
211 loc[d] = (particles.R[i][d]-rmin_m[d])/h_chaining[d];
212 int bucket_id = loc[2]*buckets_per_dim[1]*buckets_per_dim[0]+loc[1]*buckets_per_dim[0]+loc[0];
213 // dmsg << "bucket id of particle " << i << "with coords " << particles.R[i] << " = [" << loc[0] << "," << loc[1] << "," << loc[2] << "] => bucket id = " << bucket_id << endl;
214 // dmsg << particles.R[i][0] << "," << particles.R[i][1] << "," << particles.R[i][2] << "," << bucket_id << endl;
215 return bucket_id;
216 }
217
218 PBase &particles;
224};
225
226
227#endif
PETE_TUTree< FnFloor, typename T::PETE_Expr_t > floor(const PETE_Expr< T > &l)
Definition PETE.h:733
void for_each(const Pred &pred, const OP &op, Vektor< double, 3 > extend_l, Vektor< double, 3 > extend_r)