GADGET-4
fof.cc
Go to the documentation of this file.
1/*******************************************************************************
2 * \copyright This file is part of the GADGET4 N-body/SPH code developed
3 * \copyright by Volker Springel. Copyright (C) 2014-2020 by Volker Springel
4 * \copyright (vspringel@mpa-garching.mpg.de) and all contributing authors.
5 *******************************************************************************/
6
12#include "gadgetconfig.h"
13
14#ifdef FOF
15
16#include <mpi.h>
17#include <algorithm>
18#include <climits>
19#include <cmath>
20#include <cstdio>
21#include <cstdlib>
22#include <cstring>
23
24#include "../data/allvars.h"
25#include "../data/dtypes.h"
26#include "../data/intposconvert.h"
27#include "../data/mymalloc.h"
28#include "../domain/domain.h"
29#include "../fof/fof.h"
30#include "../fof/fof_io.h"
31#include "../gravtree/gravtree.h"
32#include "../logs/timer.h"
33#include "../main/simulation.h"
34#include "../mpi_utils/mpi_utils.h"
35#include "../ngbtree/ngbtree.h"
36#include "../sort/cxxsort.h"
37#include "../sort/parallel_sort.h"
38#include "../sort/peano.h"
39#include "../subfind/subfind.h"
40#include "../system/system.h"
41#include "../time_integration/timestep.h"
42
43using namespace std;
44
53template <typename partset>
54void fof<partset>::fof_fof(int num, const char *grpcat_basename, const char *grpcat_dirbasename, double inner_distance)
55{
56 TIMER_START(CPU_FOF);
57
58 mpi_printf("FOF: Begin to compute FoF group catalogue... (presently allocated=%g MB)\n", Mem.getAllocatedBytesInMB());
59
60 double ta = Logs.second();
61
62 /* determine linking length */
63 Tp->LinkL = fof_get_comoving_linking_length();
64 mpi_printf("FOF: Comoving linking length: %g\n", Tp->LinkL);
65
66 /* allocate link lists for book-keeping local particle sets */
67#if defined(LIGHTCONE_PARTICLES_GROUPS)
68 Tp->DistanceOrigin = (double *)Mem.mymalloc("DistanceOrigin", Tp->NumPart * sizeof(double));
69#endif
70
71 Tp->MinID = (MyIDStorage *)Mem.mymalloc("MinID", Tp->NumPart * sizeof(MyIDStorage)); // smallest particle ID withing FOF group
72 Tp->MinIDTask = (int *)Mem.mymalloc("MinIDTask", Tp->NumPart * sizeof(int)); // processor on which this ID is stored
73 Tp->Head = (int *)Mem.mymalloc("Head", Tp->NumPart * sizeof(int)); // first particle in chaining list if local FOF group segment
74 Tp->Next = (int *)Mem.mymalloc("Next", Tp->NumPart * sizeof(int)); // next particle in chaining list
75 Tp->Tail = (int *)Mem.mymalloc("Tail", Tp->NumPart * sizeof(int)); // points to last particle in chaining list
76 Tp->Len = (int *)Mem.mymalloc("Len", Tp->NumPart * sizeof(int)); // length of local FOF group segment (note: 32 bit enough even for
77 // huge groups because they are split across processors)
78
79 /* initialize link-lists, each particle is in a group of its own initially */
80 for(int i = 0; i < Tp->NumPart; i++)
81 {
82 Tp->Head[i] = Tp->Tail[i] = i;
83 Tp->Len[i] = 1;
84 Tp->Next[i] = -1;
85 Tp->MinID[i] = Tp->P[i].ID;
86 Tp->MinIDTask[i] = ThisTask;
87
88#if defined(LIGHTCONE_PARTICLES_GROUPS)
89 Tp->DistanceOrigin[i] = fof_distance_to_origin(i);
90 Tp->P[i].setFlagSaveDistance();
91#endif
92 }
93
94 /* make a list with the particles that are of the primary link type(s) */
95#ifdef LEAN
96 int npart = Tp->NumPart;
97 int *targetlist = NULL;
98#else
99 int npart = 0;
100 int *targetlist = (int *)Mem.mymalloc("targetlist", Tp->NumPart * sizeof(int));
101 for(int i = 0; i < Tp->NumPart; i++)
102 if(is_type_primary_link_type(Tp->P[i].getType()))
103 targetlist[npart++] = i;
104#endif
105
106 /* build neighbour tree with primary link particles only */
107 FoFNgbTree.treeallocate(Tp->NumPart, Tp, FoFDomain);
108 FoFNgbTree.treebuild(npart, targetlist);
109
110 /* call routine to find groups made up of the primary particle types */
111 double cputime = fof_find_groups();
112 mpi_printf("FOF: primary group finding took = %g sec\n", cputime);
113
114 /* call routine to attach secondary particles/cells to primary groups */
115 cputime = fof_find_nearest_dmparticle();
116 mpi_printf("FOF: attaching gas and star particles to nearest dm particles took = %g sec\n", cputime);
117
118 /* free some arrays that are not needed any more */
119 FoFNgbTree.treefree();
120#ifndef LEAN
121 Mem.myfree(targetlist);
122#endif
123 Mem.myfree(Tp->Len);
124 Mem.myfree(Tp->Tail);
125 Mem.myfree(Tp->Next);
126
127#if defined(LIGHTCONE_PARTICLES_GROUPS)
128 double save_distance = 0;
129 if(inner_distance > 0)
130 save_distance = inner_distance + Tp->LinkL;
131#endif
132
133 double t0 = Logs.second();
134
135 /* transfer the still required link list information to a somewhat smaller structure, "FOF_PList"
136 * particles that are in the same group have the same MinID. The MinIDTask variable informs about the processor, on which
137 * the particle which has ID=MinID is stored (and this particle is also member of the group).
138 */
139 FOF_PList = (fof_particle_list *)Mem.mymalloc_movable(&FOF_PList, "FOF_PList", Tp->NumPart * sizeof(fof_particle_list));
140 for(int i = 0; i < Tp->NumPart; i++)
141 {
142 FOF_PList[i].MinID = Tp->MinID[Tp->Head[i]];
143 FOF_PList[i].MinIDTask = Tp->MinIDTask[Tp->Head[i]];
144 FOF_PList[i].Pindex = i;
145
146#if defined(LIGHTCONE_PARTICLES_GROUPS)
147 FOF_PList[i].DistanceOrigin = Tp->DistanceOrigin[Tp->Head[i]];
148
149 if(Tp->DistanceOrigin[Tp->Head[i]] < save_distance)
150 Tp->P[i].clearFlagSaveDistance();
151#endif
152 }
153
154 /* free the rest of the original link lists */
155 Mem.myfree_movable(Tp->Head);
156 Mem.myfree_movable(Tp->MinIDTask);
157 Mem.myfree_movable(Tp->MinID);
158#if defined(LIGHTCONE_PARTICLES_GROUPS)
159 Mem.myfree_movable(Tp->DistanceOrigin);
160#endif
161
162 /* Allocate a list of group pieces in FOF_Glist, one entry for each local group segment.
163 * If a group is split across processor boundaries, it will appear on each of the processors with
164 * a segment.
165 */
166 FOF_GList = (fof_group_list *)Mem.mymalloc_movable(&FOF_GList, "FOF_GList", sizeof(fof_group_list) * Tp->NumPart);
167
168 fof_compile_catalogue(inner_distance);
169
170 /* now determine total group number, largest group size, and output some log messages */
171 double t1 = Logs.second();
172 mpi_printf("FOF: compiling local group data and catalogue took = %g sec\n", Logs.timediff(t0, t1));
173
174 sumup_large_ints(1, &Ngroups, &TotNgroups, Communicator);
175 sumup_longs(1, &Nids, &TotNids, Communicator);
176
177 /* determine the largest group size */
178 long long largestgroup = 0;
179 if(TotNgroups > 0)
180 {
181 long long largestloc = 0;
182
183 for(int i = 0; i < NgroupsExt; i++)
184 if(FOF_GList[i].Count > largestloc)
185 largestloc = FOF_GList[i].Count;
186 MPI_Allreduce(&largestloc, &largestgroup, 1, MPI_LONG_LONG, MPI_MAX, Communicator);
187 }
188
189 mpi_printf("FOF: Total number of FOF groups with at least %d particles: %lld\n", FOF_GROUP_MIN_LEN, TotNgroups);
190 mpi_printf("FOF: Largest FOF group has %lld particles.\n", largestgroup);
191 mpi_printf("FOF: Total number of particles in FOF groups: %lld\n", TotNids);
192
193 t0 = Logs.second();
194
195 /* now allocate some storage for the group catalogue, and begin to fill it in with partial
196 * group properties
197 */
198 MaxNgroups = NgroupsExt;
199 Group = (group_properties *)Mem.mymalloc_movable(&Group, "Group", sizeof(group_properties) * MaxNgroups);
200
201 mpi_printf("FOF: group properties are now allocated.. (presently allocated=%g MB)\n", Mem.getAllocatedBytesInMB());
202
203 /* Sort FOF_GList according to MinID. We are going to match this with FOF_PList (still ordered according to MinID) to get
204 * access to the particles making up each group piece.
205 */
206 mycxxsort(FOF_GList, FOF_GList + NgroupsExt, fof_compare_FOF_GList_MinID);
207
208 /* compute partial group properties for each local group segment */
209 long long count_nids = 0;
210 for(int i = 0, start = 0; i < NgroupsExt; i++)
211 {
212 while(FOF_PList[start].MinID.get() < FOF_GList[i].MinID.get())
213 {
214 start++;
215 if(start > Tp->NumPart)
216 Terminate("start > Tp->NumPart");
217 }
218
219 if(FOF_PList[start].MinID.get() != FOF_GList[i].MinID.get())
220 Terminate("ID mismatch");
221
222 int lenloc = 0;
223 for(lenloc = 0; start + lenloc < Tp->NumPart;)
224 if(FOF_PList[start + lenloc].MinID.get() == FOF_GList[i].MinID.get())
225 lenloc++;
226 else
227 break;
228
229 Group[i].MinID = FOF_GList[i].MinID.get();
230 Group[i].MinIDTask = FOF_GList[i].MinIDTask;
231 Group[i].Len = FOF_GList[i].Count;
232
233 /* calculate velocity dispersion etc for a local group segment */
234 fof_compute_group_properties(i, start, lenloc);
235
236 start += lenloc;
237 count_nids += lenloc;
238 }
239
240 Mem.myfree_movable(FOF_GList);
241 FOF_GList = NULL;
242
243 /* do a sanity check */
244 long long totNids;
245 sumup_longs(1, &count_nids, &totNids, Communicator);
246 if(totNids != TotNids)
247 Terminate("Task=%d Nids=%lld count_nids=%lld totNids=%lld TotNids=%lld\n", ThisTask, Nids, count_nids, totNids, TotNids);
248
249 /* add in group properties for each external group segment */
250 fof_add_in_properties_of_group_segments();
251
252 t1 = Logs.second();
253 mpi_printf("FOF: computation of group properties took = %g sec\n", Logs.timediff(t0, t1));
254
255 /* now we assign group numbers */
256 fof_assign_group_numbers();
257
258 Mem.myfree_movable(FOF_PList);
259 FOF_PList = NULL;
260
261 /* finalize the computation of the properties of the groups, and prune the list to just contain the main (local) segment */
262 fof_finish_group_properties();
263
264 /* Sort the groups in parallel according to group-number, which will be our output order. */
265 mycxxsort_parallel(Group, Group + Ngroups, fof_compare_Group_GroupNr, Communicator);
266
267 fof_assign_group_offset();
268
269 double tb = Logs.second();
270
271 mpi_printf("FOF: Finished computing FoF groups. Complete work took %g sec (presently allocated=%g MB)\n", Logs.timediff(ta, tb),
273
274#ifdef SUBFIND
275 if(num >= 0)
276 {
277 TIMER_STOP(CPU_FOF);
278
279 char catname[1000];
280 sprintf(catname, "%s_subhalo_tab", grpcat_basename);
281
282 subfind_find_subhalos(num, catname, grpcat_dirbasename);
283
284 TIMER_START(CPU_FOF);
285 }
286#else
287 Nsubhalos = 0;
288 TotNsubhalos = 0;
289 if(num >= 0)
290 {
291 TIMER_STOP(CPU_FOF);
292 TIMER_START(CPU_SNAPSHOT);
293
294 fof_io<partset> FoF_IO{this, this->Communicator, All.SnapFormat};
295
296 char catname[1000];
297 sprintf(catname, "%s_tab", grpcat_basename);
298
299 FoF_IO.fof_subfind_save_groups(num, catname, grpcat_dirbasename);
300
301 TIMER_STOP(CPU_SNAPSHOT);
302 TIMER_START(CPU_FOF);
303 }
304#endif
305
306 Mem.myfree_movable(Group);
307
308 if(num >= 0)
309 {
310 TIMER_STOP(CPU_FOF);
311 TIMER_START(CPU_SNAPSHOT);
312
313 /* now distribute the particles into output order */
314 t0 = Logs.second();
315 /* distribute particles such that FOF groups (and SUBFIND halos) will appear in consecutive way in snapshot files */
316 fof_prepare_output_order();
317 t1 = Logs.second();
318 mpi_printf("FOF: preparing output order of particles took %g sec\n", Logs.timediff(t0, t1));
319
320 TIMER_STOP(CPU_SNAPSHOT);
321 TIMER_START(CPU_FOF);
322 }
323
324 TIMER_STOP(CPU_FOF);
325}
326
327template <typename partset>
328void fof<partset>::fof_prepare_output_order(void)
329{
330 int ntype[NTYPES];
331 for(int i = 0; i < NTYPES; i++)
332 ntype[i] = 0;
333
334 for(int i = 0; i < Tp->NumPart; i++)
335 {
336#ifndef LEAN
337 Tp->PS[i].Type = Tp->P[i].getType();
338#endif
339 ntype[Tp->P[i].getType()]++;
340 }
341
342#if defined(MERGERTREE) && defined(SUBFIND)
343
344 /* we determine a continuously increasing subhalo number (not starting again at zero in every Group), and also assign the ranking
345 * within a subhalo
346 */
347 mycxxsort_parallel(Tp->PS, Tp->PS + Tp->NumPart, fof_compare_subfind_data_GroupNr_SubRankInNr_BndEgy, Communicator);
348
349 int *num_list = (int *)Mem.mymalloc("num_list", NTask * sizeof(int));
350 MPI_Allgather(&Tp->NumPart, 1, MPI_INT, num_list, 1, MPI_INT, Communicator);
351
352 int prev_non_empty_task = ThisTask - 1;
353
354 while(prev_non_empty_task >= 0)
355 if(num_list[prev_non_empty_task] == 0)
356 prev_non_empty_task--;
357 else
358 break;
359
360 /* obtain last element of each processor */
361 subfind_data *aux_last_element = (subfind_data *)Mem.mymalloc("aux_last_element", NTask * sizeof(subfind_data));
362 MPI_Allgather(&Tp->PS[Tp->NumPart > 0 ? Tp->NumPart - 1 : 0], sizeof(subfind_data), MPI_BYTE, aux_last_element, sizeof(subfind_data),
363 MPI_BYTE, Communicator);
364
365 int first_index = INT_MAX;
366 int first_task = NTask;
367 /* find out the very first particle in a subhalo (which is not necessarily in the first FOF group) */
368 for(int i = 0; i < Tp->NumPart; i++)
369 if(Tp->PS[i].GroupNr.get() < HALONR_MAX && Tp->PS[i].SubRankInGr < INT_MAX) /* particle is in a subhalo */
370 {
371 first_index = i;
372 break;
373 }
374
375 int *first_list = (int *)Mem.mymalloc("first_list", NTask * sizeof(int));
376 MPI_Allgather(&first_index, 1, MPI_INT, first_list, 1, MPI_INT, Communicator);
377 for(int n = 0; n < NTask; n++)
378 {
379 if(first_list[n] != INT_MAX)
380 {
381 first_index = first_list[n];
382 first_task = n;
383 break;
384 }
385 }
386 Mem.myfree(first_list);
387
388 long long subnr = 0;
389 long long rank = 0;
390
391 for(int i = 0; i < Tp->NumPart; i++)
392 {
393 if(Tp->PS[i].GroupNr.get() < HALONR_MAX && Tp->PS[i].SubRankInGr < INT_MAX) /* particle is in a subhalo */
394 {
395 if(i == 0)
396 {
397 if(prev_non_empty_task >= 0)
398 {
399 if(Tp->PS[i].GroupNr.get() != aux_last_element[prev_non_empty_task].GroupNr.get() ||
400 Tp->PS[i].SubRankInGr !=
401 aux_last_element[prev_non_empty_task].SubRankInGr) /* we are the first particle of a new subhalo */
402 {
403 if(ThisTask != first_task || i != first_index) // to prevent that we start a new subhalo for the very first one
404 {
405 subnr++;
406 rank = 0;
407 }
408 }
409 }
410 }
411 else if(Tp->PS[i].GroupNr.get() != Tp->PS[i - 1].GroupNr.get() ||
412 Tp->PS[i].SubRankInGr != Tp->PS[i - 1].SubRankInGr) /* we are the first particle of a new subhalo */
413 {
414 if(ThisTask != first_task || i != first_index) // to prevent that we start a new subhalo for the very first one
415 {
416 subnr++;
417 rank = 0;
418 }
419 }
420
421 Tp->PS[i].SubhaloNr.set(subnr);
422 Tp->PS[i].RankInSubhalo.set(rank++);
423
424 if(subnr < 0 || subnr >= TotNsubhalos)
425 Terminate("i=%d NumPart=%d subnr=%lld PS[i].SubhaloNr.get()=%lld >= TotNsubhalos=%lld", i, Tp->NumPart, subnr,
426 (long long)Tp->PS[i].SubhaloNr.get(), TotNsubhalos);
427 }
428 else
429 {
430 Tp->PS[i].SubhaloNr.set(HALONR_MAX);
431 Tp->PS[i].RankInSubhalo.set(INT_MAX);
432 }
433 }
434
435 long long *subnr_list = (long long *)Mem.mymalloc("subnr_list", NTask * sizeof(long long));
436 MPI_Allgather(&subnr, 1, MPI_LONG_LONG, subnr_list, 1, MPI_LONG_LONG, Communicator);
437
438 long long subnr_prev = 0;
439 for(int i = 0; i < ThisTask; i++)
440 subnr_prev += subnr_list[i];
441
442 for(int i = 0; i < Tp->NumPart; i++)
443 if(Tp->PS[i].GroupNr.get() < HALONR_MAX && Tp->PS[i].SubRankInGr < INT_MAX) /* particle is in a subhalo */
444 Tp->PS[i].SubhaloNr.set(Tp->PS[i].SubhaloNr.get() + subnr_prev);
445
446 /* obtain previous element of each processor */
447 long long *rank_list = (long long *)Mem.mymalloc("rank_list", NTask * sizeof(long long));
448 MPI_Allgather(&rank, 1, MPI_LONG_LONG, rank_list, 1, MPI_LONG_LONG, Communicator);
449
450 if(prev_non_empty_task >= 0)
451 {
452 long long rank = rank_list[prev_non_empty_task];
453
454 for(int i = 0; i < Tp->NumPart; i++)
455 {
456 if(Tp->PS[i].GroupNr.get() < HALONR_MAX && Tp->PS[i].SubRankInGr < INT_MAX)
457 {
458 if(i == 0)
459 {
460 if(prev_non_empty_task >= 0)
461 if(Tp->PS[i].GroupNr.get() != aux_last_element[prev_non_empty_task].GroupNr.get() ||
462 Tp->PS[i].SubRankInGr !=
463 aux_last_element[prev_non_empty_task].SubRankInGr) /* we are the first particle of a new subhalo */
464 break;
465 }
466 else if(Tp->PS[i].GroupNr.get() != Tp->PS[i - 1].GroupNr.get() ||
467 Tp->PS[i].SubRankInGr != Tp->PS[i - 1].SubRankInGr) /* we are the first particle of a new subhalo */
468 break;
469
470 Tp->PS[i].RankInSubhalo.set(rank++);
471 }
472 }
473 }
474
475 Mem.myfree(rank_list);
476 Mem.myfree(subnr_list);
477 Mem.myfree(aux_last_element);
478 Mem.myfree(num_list);
479
480 /* now bring back into starting order */
481 mycxxsort_parallel(Tp->PS, Tp->PS + Tp->NumPart, fof_compare_subfind_data_OriginTask_OriginIndex, Communicator);
482#endif
483
484 /* note: the following will destroy the value of the Potential, which is not needed any more at this point */
485 for(int i = 0; i < Tp->NumPart; i++)
486 {
487#if defined(RANDOMIZE_DOMAINCENTER) && defined(PERIODIC)
488 Tp->PS[i].u.Key =
489 peano_hilbert_key(Tp->P[i].IntPos[0] - Tp->CurrentShiftVector[0], Tp->P[i].IntPos[1] - Tp->CurrentShiftVector[1],
490 Tp->P[i].IntPos[2] - Tp->CurrentShiftVector[2], BITS_FOR_POSITIONS);
491#else
492 Tp->PS[i].u.Key = peano_hilbert_key(Tp->P[i].IntPos[0], Tp->P[i].IntPos[1], Tp->P[i].IntPos[2], BITS_FOR_POSITIONS);
493#endif
494#ifdef SUBFIND
495 /* make sure that for particles not in group we have no binding energy and no subrankgr, so that they are ordered only by the key
496 */
497 if(Tp->PS[i].GroupNr.get() == HALONR_MAX)
498 {
499 Tp->PS[i].SubRankInGr = 0;
500 Tp->PS[i].v.DM_BindingEnergy = 0;
501 }
502#endif
503 }
504
505#ifndef LEAN
506 mycxxsort(Tp->PS, Tp->PS + Tp->NumPart, fof_compare_subfind_data_Type);
507#endif
508
509 for(int i = 0, off = 0; i < NTYPES; i++)
510 {
511 mycxxsort_parallel(Tp->PS + off, Tp->PS + off + ntype[i], fof_compare_subfind_data_GroupNr_SubNr_Egy_Key, Communicator);
512
513 off += ntype[i];
514 }
515
516 for(int i = 0; i < Tp->NumPart; i++)
517 {
518 Tp->PS[i].TargetTask = ThisTask;
519 Tp->PS[i].TargetIndex = i;
520 }
521
522 /* now bring back into starting order */
523 mycxxsort_parallel(Tp->PS, Tp->PS + Tp->NumPart, fof_compare_subfind_data_OriginTask_OriginIndex, Communicator);
524
525 /* finally, reorder both P[] and PS[] */
526 FoFDomain->particle_exchange_based_on_PS(Communicator);
527}
528
529/* calculate linkling length based on mean particle separation */
530template <typename partset>
531double fof<partset>::fof_get_comoving_linking_length(void)
532{
533 int ndm = 0;
534 long long ndmtot;
535 double mass = 0, masstot;
536
537 for(int i = 0; i < Tp->NumPart; i++)
538 if(is_type_primary_link_type(Tp->P[i].getType()))
539 {
540 ndm++;
541 mass += Tp->P[i].getMass();
542 }
543 sumup_large_ints(1, &ndm, &ndmtot, Communicator);
544 MPI_Allreduce(&mass, &masstot, 1, MPI_DOUBLE, MPI_SUM, Communicator);
545 double rhodm = (All.Omega0 - All.OmegaBaryon) * 3 * All.Hubble * All.Hubble / (8 * M_PI * All.G);
546
547 return FOF_LINKLENGTH * pow(masstot / ndmtot / rhodm, 1.0 / 3);
548}
549
550template <typename partset>
551void fof<partset>::fof_compile_catalogue(double inner_distance)
552{
553 /* sort according to MinID, this brings particles belonging to the same group together */
554 mycxxsort(FOF_PList, FOF_PList + Tp->NumPart, fof_compare_FOF_PList_MinID);
555
556 /* we now use the auxiliary FOF_GList structure to determine the group lengths.
557 * LocCount will count the length of the group piece that is on the principal processor
558 * of that group, while other group pieces on other processors are counted through ExtCount
559 */
560 for(int i = 0; i < Tp->NumPart; i++)
561 {
562 FOF_GList[i].MinID = FOF_PList[i].MinID;
563 FOF_GList[i].MinIDTask = FOF_PList[i].MinIDTask;
564 FOF_GList[i].Count = 1;
565#if defined(LIGHTCONE_PARTICLES_GROUPS)
566 FOF_GList[i].DistanceOrigin = FOF_PList[i].DistanceOrigin;
567#endif
568 }
569
570 /* Now we are going to eliminate duplicates in FOF_GList with respect to MinID, i.e. each group
571 * piece on the local processor will be squashed to one entry. Some groups may be present as
572 * pieces on several different processors.
573 */
574 if(Tp->NumPart)
575 NgroupsExt = 1;
576 else
577 NgroupsExt = 0;
578
579 for(int i = 1, start = 0; i < Tp->NumPart; i++)
580 {
581 if(FOF_GList[i].MinID.get() == FOF_GList[start].MinID.get())
582 {
583 if(FOF_GList[i].MinIDTask != FOF_GList[start].MinIDTask)
584 Terminate("FOF_GList[i].MinIDTask=%d != FOF_GList[start].MinIDTask=%d", FOF_GList[i].MinIDTask,
585 FOF_GList[start].MinIDTask);
586
587 FOF_GList[start].Count += FOF_GList[i].Count;
588
589#if defined(LIGHTCONE_PARTICLES_GROUPS)
590 if(FOF_GList[start].DistanceOrigin != FOF_GList[i].DistanceOrigin)
591 Terminate(
592 "start=%d i=%d Tp->NumPart=%d FOF_GList[start].DistanceOrigin=%g != FOF_GList[i].DistanceOrigin=%g MinID=%lld "
593 "MinIDTask=%d\n",
594 start, i, Tp->NumPart, FOF_GList[start].DistanceOrigin, FOF_GList[i].DistanceOrigin,
595 (long long)FOF_GList[i].MinID.get(), FOF_GList[i].MinIDTask);
596#endif
597 }
598 else
599 {
600 start = NgroupsExt;
601 FOF_GList[start] = FOF_GList[i];
602 NgroupsExt++;
603 }
604 }
605
606 /* we resize FOF_GList, which has shrunk */
607 FOF_GList = (fof_group_list *)Mem.myrealloc_movable(FOF_GList, sizeof(fof_group_list) * NgroupsExt);
608
609 /* sort the group pieces according to task */
610 mycxxsort(FOF_GList, FOF_GList + NgroupsExt, fof_compare_FOF_GList_MinIDTask);
611
612 int *Send_count = (int *)Mem.mymalloc("Send_count", sizeof(int) * NTask);
613 int *Send_offset = (int *)Mem.mymalloc("Send_offset", sizeof(int) * NTask);
614 int *Recv_count = (int *)Mem.mymalloc("Recv_count", sizeof(int) * NTask);
615 int *Recv_offset = (int *)Mem.mymalloc("Recv_offset", sizeof(int) * NTask);
616
617 /* count how many group pieces we have for each task */
618 for(int i = 0; i < NTask; i++)
619 Send_count[i] = 0;
620 for(int i = 0; i < NgroupsExt; i++)
621 Send_count[FOF_GList[i].MinIDTask]++;
622
623 /* inform everybody about how much they have to receive */
624 myMPI_Alltoall(Send_count, 1, MPI_INT, Recv_count, 1, MPI_INT, Communicator);
625
626 /* count how many we get and prepare offset tables */
627 int nimport = 0;
628 Recv_offset[0] = 0, Send_offset[0] = 0;
629 for(int j = 0; j < NTask; j++)
630 {
631 if(j == ThisTask) /* we will not exchange the ones that are local */
632 Recv_count[j] = 0;
633 nimport += Recv_count[j];
634
635 if(j > 0)
636 {
637 Send_offset[j] = Send_offset[j - 1] + Send_count[j - 1];
638 Recv_offset[j] = Recv_offset[j - 1] + Recv_count[j - 1];
639 }
640 }
641
642 /* allocate some temporary storage for foreign group pieces */
643 fof_group_list *get_FOF_GList = (fof_group_list *)Mem.mymalloc("get_FOF_GList", nimport * sizeof(fof_group_list));
644
645 /* get them */
646 for(int ngrp = 1; ngrp < (1 << PTask); ngrp++)
647 {
648 int recvTask = ThisTask ^ ngrp;
649
650 if(recvTask < NTask)
651 {
652 if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0)
653 {
654 /* get the group info */
655 myMPI_Sendrecv(&FOF_GList[Send_offset[recvTask]], Send_count[recvTask] * sizeof(fof_group_list), MPI_BYTE, recvTask,
656 TAG_DENS_A, &get_FOF_GList[Recv_offset[recvTask]], Recv_count[recvTask] * sizeof(fof_group_list), MPI_BYTE,
657 recvTask, TAG_DENS_A, Communicator, MPI_STATUS_IGNORE);
658 }
659 }
660 }
661
662 /* for the incoming pieces, we re-purpose MinIDTask and set it in ascending order in order to use this later to reestablish the order
663 * we had */
664 for(int i = 0; i < nimport; i++)
665 get_FOF_GList[i].MinIDTask = i;
666
667 /* sort both the local group pieces and the incoming group pieces so that we can match them efficiently */
668 mycxxsort(FOF_GList, FOF_GList + NgroupsExt, fof_compare_FOF_GList_MinID);
669 mycxxsort(get_FOF_GList, get_FOF_GList + nimport, fof_compare_FOF_GList_MinID);
670
671 /* Merge the imported group pieces with the local group pieces.
672 * For all local group pieces in FOF_GList, the total number of particles on other processors will
673 * be established in ExtCount.
674 */
675 int start = 0;
676 for(int i = 0; i < nimport; i++)
677 {
678 while(FOF_GList[start].MinID.get() < get_FOF_GList[i].MinID.get())
679 {
680 start++;
681 if(start >= NgroupsExt)
682 Terminate("start=%d >= NgroupsExt=%d", start, NgroupsExt);
683 }
684
685 if(FOF_GList[start].MinIDTask != ThisTask)
686 Terminate("FOF_GList[start].MinIDTask=%d != ThisTask=%d", FOF_GList[start].MinIDTask, ThisTask);
687
688 if(FOF_GList[start].MinID.get() != get_FOF_GList[i].MinID.get())
689 Terminate(
690 "FOF_GList[start].MinID != get_FOF_GList[i].MinID start=%d i=%d FOF_GList[start].MinID=%llu get_FOF_GList[i].MinID=%llu\n",
691 start, i, (long long)FOF_GList[start].MinID.get(), (long long)get_FOF_GList[i].MinID.get());
692
693 FOF_GList[start].Count += get_FOF_GList[i].Count;
694 }
695
696 /* copy the size information back into the received list, to inform the group pieces on originating processors */
697 start = 0;
698 for(int i = 0; i < nimport; i++)
699 {
700 while(FOF_GList[start].MinID.get() < get_FOF_GList[i].MinID.get())
701 {
702 start++;
703 if(start >= NgroupsExt)
704 Terminate("start >= NgroupsExt");
705 }
706
707 get_FOF_GList[i].Count = FOF_GList[start].Count;
708 }
709
710 /* Sort the imported list according to MinIDTask. This reestablishes the order we had before the previous.
711 * We also sort the local group pieces according to MinIDTask, so that we can fill in the exported info
712 * at the right place again.
713 */
714 mycxxsort(get_FOF_GList, get_FOF_GList + nimport, fof_compare_FOF_GList_MinIDTask);
715 mycxxsort(FOF_GList, FOF_GList + NgroupsExt, fof_compare_FOF_GList_MinIDTask);
716
717 /* fix the value of MinIDTask again that we had temporarily overwritten */
718 for(int i = 0; i < nimport; i++)
719 get_FOF_GList[i].MinIDTask = ThisTask;
720
721 /* bring the data back to the originating processors */
722 for(int ngrp = 1; ngrp < (1 << PTask); ngrp++)
723 {
724 int recvTask = ThisTask ^ ngrp;
725
726 if(recvTask < NTask)
727 {
728 if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0)
729 {
730 /* get the group info */
731 myMPI_Sendrecv(&get_FOF_GList[Recv_offset[recvTask]], Recv_count[recvTask] * sizeof(fof_group_list), MPI_BYTE, recvTask,
732 TAG_DENS_A, &FOF_GList[Send_offset[recvTask]], Send_count[recvTask] * sizeof(fof_group_list), MPI_BYTE,
733 recvTask, TAG_DENS_A, Communicator, MPI_STATUS_IGNORE);
734 }
735 }
736 }
737
738 /* free our temporary list */
739 Mem.myfree(get_FOF_GList);
740
741 /* Now we determine how many groups we have above the group size limit.
742 * Groups that are too small, or that are not guaranteed to be in the current lightcone segment
743 * are eliminated. Local groups are counted in length to get the total size of particles in stored groups.
744 */
745#if defined(LIGHTCONE_PARTICLES_GROUPS)
746 double save_distance = 0;
747 if(inner_distance > 0)
748 save_distance = inner_distance + Tp->LinkL;
749#endif
750
751 Ngroups = 0, Nids = 0;
752 for(int i = 0; i < NgroupsExt; i++)
753 {
754 if(FOF_GList[i].Count < FOF_GROUP_MIN_LEN)
755 {
756 FOF_GList[i] = FOF_GList[NgroupsExt - 1];
757 NgroupsExt--;
758 i--;
759 }
760#if defined(LIGHTCONE_PARTICLES_GROUPS)
761 else if(FOF_GList[i].DistanceOrigin < save_distance)
762 {
763 FOF_GList[i] = FOF_GList[NgroupsExt - 1];
764 NgroupsExt--;
765 i--;
766 }
767#endif
768 else
769 {
770 if(FOF_GList[i].MinIDTask == ThisTask)
771 {
772 Ngroups++;
773 Nids += FOF_GList[i].Count;
774 }
775 }
776 }
777
778 Mem.myfree(Recv_offset);
779 Mem.myfree(Recv_count);
780 Mem.myfree(Send_offset);
781 Mem.myfree(Send_count);
782
783 /* we resize FOF_GList again, which has shrunk */
784 FOF_GList = (fof_group_list *)Mem.myrealloc_movable(FOF_GList, sizeof(fof_group_list) * NgroupsExt);
785}
786
787template <typename partset>
788void fof<partset>::fof_assign_group_numbers(void)
789{
790 mpi_printf("FOF: start assigning group numbers\n");
791
792 double t0 = Logs.second();
793
794 for(int i = 0; i < NgroupsExt; i++)
795 Group[i].OriginTask = ThisTask;
796
797 // carry out a parallel sort that brings the group segments into order of the total group length, with the primary segment coming
798 // first
799 mycxxsort_parallel(Group, Group + NgroupsExt, fof_compare_Group_Len_MinID_DiffOriginTaskMinIDTask, Communicator);
800
801 /* assign group numbers to all local groups */
802 int ngr = 0;
803 for(int i = 0; i < NgroupsExt; i++)
804 {
805 if(Group[i].OriginTask == Group[i].MinIDTask)
806 ngr++;
807
808 Group[i].GroupNr = ngr - 1;
809 }
810
811 /* now need to count how many groups there are on earlier CPUs, so that we can
812 * increase the assigned group numbers accordingly
813 */
814 int *ngr_list = (int *)Mem.mymalloc("ngr_list", sizeof(int) * NTask);
815 MPI_Allgather(&ngr, 1, MPI_INT, ngr_list, 1, MPI_INT, Communicator);
816
817 long long ngr_sum = 0;
818 for(int j = 0; j < ThisTask; j++)
819 ngr_sum += ngr_list[j];
820
821 Mem.myfree(ngr_list);
822
823 /* increase the group numbers with the cumulative count on earlier processors */
824 for(int i = 0; i < NgroupsExt; i++)
825 Group[i].GroupNr += ngr_sum;
826
827 /* check that have consistent group numbers */
828 sumup_large_ints(1, &ngr, &ngr_sum, Communicator);
829 if(ngr_sum != TotNgroups)
830 Terminate("inconsistency ngr_sum=%lld\n", ngr_sum);
831
832 /* bring the group list back into the original order */
833 mycxxsort_parallel(Group, Group + NgroupsExt, fof_compare_Group_OriginTask_MinID, Communicator);
834
835 /* Let's now mark all particles that are not in any group by assigning them a fiducial maximum (unused)
836 * group number
837 */
838 for(int i = 0; i < Tp->NumPart; i++)
839 Tp->PS[i].GroupNr.set(HALONR_MAX);
840
841 long long Nids_old = Nids;
842
843 /* we now aim to assign the group number also to the particles that make up groups,
844 * in the auxiliary PS array
845 */
846 Nids = 0;
847 for(int i = 0, start = 0; i < NgroupsExt; i++)
848 {
849 while(FOF_PList[start].MinID.get() < Group[i].MinID)
850 {
851 start++;
852 if(start > Tp->NumPart)
853 Terminate("start > Tp->NumPart");
854 }
855
856 if(FOF_PList[start].MinID.get() != Group[i].MinID)
857 Terminate("FOF_PList[start=%d].MinID=%lld != Group[i=%d].MinID=%lld", start, (long long)FOF_PList[start].MinID.get(), i,
858 (long long)Group[i].MinID);
859
860 int lenloc;
861 for(lenloc = 0; start + lenloc < Tp->NumPart;)
862 if(FOF_PList[start + lenloc].MinID.get() == Group[i].MinID)
863 {
864 Tp->PS[FOF_PList[start + lenloc].Pindex].GroupNr.set(Group[i].GroupNr);
865 Nids++;
866 lenloc++;
867 }
868 else
869 break;
870
871 start += lenloc;
872 }
873
874 long long totNids;
875 sumup_longs(1, &Nids, &totNids, Communicator);
876
877 if(totNids != TotNids)
878 Terminate("Task=%d Nids=%lld Nids_old=%lld totNids=%lld TotNids=%lld\n", ThisTask, Nids, Nids_old, totNids, TotNids);
879
880 double t1 = Logs.second();
881
882 mpi_printf("FOF: Assigning of group numbers took = %g sec\n", Logs.timediff(t0, t1));
883}
884
885/* This function computes the Group[].OffsetType variable, which gives for each type how many
886 * particles are before it of the same time in earlier groups in the array.
887 */
888template <typename partset>
889void fof<partset>::fof_assign_group_offset(void)
890{
891 /* Tell everybody, how many particles the groups stored by each processor contain */
892
893 int gtype_loc[NTYPES]; /* particles of each type associated with locally stored groups */
894 long long gtype_previous[NTYPES];
895
896 for(int i = 0; i < NTYPES; i++)
897 gtype_loc[i] = 0;
898
899 for(int i = 0; i < Ngroups; i++)
900 for(int j = 0; j < NTYPES; j++)
901 gtype_loc[j] += Group[i].LenType[j];
902
903 int *gtype_all = (int *)Mem.mymalloc("gtype_all", NTYPES * NTask * sizeof(int));
904 MPI_Allgather(gtype_loc, NTYPES, MPI_INT, gtype_all, NTYPES, MPI_INT, Communicator);
905
906 for(int i = 0; i < NTYPES; i++)
907 gtype_previous[i] = 0;
908
909 for(int i = 0; i < ThisTask; i++)
910 for(int j = 0; j < NTYPES; j++)
911 gtype_previous[j] += gtype_all[i * NTYPES + j];
912
913 if(Ngroups > 0)
914 for(int j = 0; j < NTYPES; j++)
915 Group[0].OffsetType[j] = gtype_previous[j];
916
917 for(int i = 1; i < Ngroups; i++)
918 for(int j = 0; j < NTYPES; j++)
919 Group[i].OffsetType[j] = Group[i - 1].OffsetType[j] + Group[i - 1].LenType[j];
920
921 Mem.myfree(gtype_all);
922}
923
924template <typename partset>
925void fof<partset>::fof_compute_group_properties(int gr, int start, int len)
926{
927 int start_index = FOF_PList[start].Pindex;
928
929 Group[gr].Mass = 0;
930 Group[gr].Ascale = 0;
931#ifdef STARFORMATION
932 Group[gr].Sfr = 0;
933#endif
934#if defined(SUBFIND_ORPHAN_TREATMENT)
935 Group[gr].LenPrevMostBnd = 0;
936#endif
937
938 for(int k = 0; k < 3; k++)
939 {
940 Group[gr].CM[k] = 0;
941 Group[gr].Vel[k] = 0;
942 Group[gr].FirstIntPos[k] = Tp->P[start_index].IntPos[k];
943 }
944
945 for(int k = 0; k < NTYPES; k++)
946 {
947 Group[gr].LenType[k] = 0;
948 Group[gr].MassType[k] = 0;
949 }
950
951 for(int k = 0; k < len; k++)
952 {
953 int index = FOF_PList[start + k].Pindex;
954
955 Group[gr].Mass += Tp->P[index].getMass();
956 int type = Tp->P[index].getType();
957
958 Group[gr].Ascale += Tp->P[index].getMass() * Tp->P[index].getAscale();
959
960 Group[gr].LenType[type]++;
961 Group[gr].MassType[type] += Tp->P[index].getMass();
962
963#if defined(SUBFIND_ORPHAN_TREATMENT)
964 if(Tp->P[index].ID.is_previously_most_bound())
965 Group[gr].LenPrevMostBnd++;
966#endif
967
968#ifdef STARFORMATION
969 if(Tp->P[index].getType() == 0)
970 Group[gr].Sfr += Tp->SphP[index].Sfr;
971#endif
972
973 double xyz[3];
974 Tp->nearest_image_intpos_to_pos(Tp->P[index].IntPos, Tp->P[start_index].IntPos,
975 xyz); /* converts the integer distance to floating point */
976
977 for(int j = 0; j < 3; j++)
978 {
979 Group[gr].CM[j] += Tp->P[index].getMass() * xyz[j];
980 Group[gr].Vel[j] += Tp->P[index].getMass() * Tp->P[index].Vel[j];
981 }
982 }
983}
984
985template <typename partset>
986void fof<partset>::fof_add_in_properties_of_group_segments(void)
987{
988 int *Send_count = (int *)Mem.mymalloc("Send_count", sizeof(int) * NTask);
989 int *Send_offset = (int *)Mem.mymalloc("Send_offset", sizeof(int) * NTask);
990 int *Recv_count = (int *)Mem.mymalloc("Recv_count", sizeof(int) * NTask);
991 int *Recv_offset = (int *)Mem.mymalloc("Recv_offset", sizeof(int) * NTask);
992
993 /* sort the groups according to task */
994 mycxxsort(Group, Group + NgroupsExt, fof_compare_Group_MinIDTask);
995
996 /* count how many we have of each task */
997 for(int i = 0; i < NTask; i++)
998 Send_count[i] = 0;
999 for(int i = 0; i < NgroupsExt; i++)
1000 Send_count[Group[i].MinIDTask]++;
1001
1002 myMPI_Alltoall(Send_count, 1, MPI_INT, Recv_count, 1, MPI_INT, Communicator);
1003
1004 int nimport = 0;
1005 Recv_offset[0] = 0, Send_offset[0] = 0;
1006
1007 for(int j = 0; j < NTask; j++)
1008 {
1009 if(j == ThisTask) /* we will not exchange the ones that are local */
1010 Recv_count[j] = 0;
1011 nimport += Recv_count[j];
1012
1013 if(j > 0)
1014 {
1015 Send_offset[j] = Send_offset[j - 1] + Send_count[j - 1];
1016 Recv_offset[j] = Recv_offset[j - 1] + Recv_count[j - 1];
1017 }
1018 }
1019
1020 group_properties *get_Group = (group_properties *)Mem.mymalloc("get_Group", sizeof(group_properties) * nimport);
1021
1022 for(int ngrp = 1; ngrp < (1 << PTask); ngrp++)
1023 {
1024 int recvTask = ThisTask ^ ngrp;
1025
1026 if(recvTask < NTask)
1027 {
1028 if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0)
1029 {
1030 /* get the group data */
1031 myMPI_Sendrecv(&Group[Send_offset[recvTask]], Send_count[recvTask] * sizeof(group_properties), MPI_BYTE, recvTask,
1032 TAG_DENS_A, &get_Group[Recv_offset[recvTask]], Recv_count[recvTask] * sizeof(group_properties), MPI_BYTE,
1033 recvTask, TAG_DENS_A, Communicator, MPI_STATUS_IGNORE);
1034 }
1035 }
1036 }
1037
1038 /* sort the groups again according to MinID */
1039 mycxxsort(Group, Group + NgroupsExt, fof_compare_Group_MinID);
1040 mycxxsort(get_Group, get_Group + nimport, fof_compare_Group_MinID);
1041
1042 int start = 0;
1043 /* now add in the partial imported group data to the main ones */
1044 for(int i = 0; i < nimport; i++)
1045 {
1046 while(Group[start].MinID < get_Group[i].MinID)
1047 {
1048 start++;
1049 if(start >= NgroupsExt)
1050 Terminate("start >= NgroupsExt");
1051 }
1052
1053 Group[start].Mass += get_Group[i].Mass;
1054 Group[start].Ascale += get_Group[i].Ascale;
1055
1056 for(int j = 0; j < NTYPES; j++)
1057 {
1058 Group[start].LenType[j] += get_Group[i].LenType[j];
1059 Group[start].MassType[j] += get_Group[i].MassType[j];
1060 }
1061#if defined(SUBFIND_ORPHAN_TREATMENT)
1062 Group[start].LenPrevMostBnd += get_Group[i].LenPrevMostBnd;
1063#endif
1064#ifdef STARFORMATION
1065 Group[start].Sfr += get_Group[i].Sfr;
1066#endif
1067
1068 double tmpxyz[3] = {get_Group[i].CM[0] / get_Group[i].Mass, get_Group[i].CM[1] / get_Group[i].Mass,
1069 get_Group[i].CM[2] / get_Group[i].Mass};
1070
1071 MyIntPosType delta[3];
1072 Tp->pos_to_signedintpos(tmpxyz, (MySignedIntPosType *)delta);
1073
1074 delta[0] += get_Group[i].FirstIntPos[0];
1075 delta[1] += get_Group[i].FirstIntPos[1];
1076 delta[2] += get_Group[i].FirstIntPos[2];
1077
1078 Tp->constrain_intpos(delta); /* will only do something if we have a stretched box */
1079
1080 double xyz[3];
1081 Tp->nearest_image_intpos_to_pos(delta, Group[start].FirstIntPos, xyz); /* converts the integer distance to floating point */
1082
1083 for(int j = 0; j < 3; j++)
1084 {
1085 Group[start].CM[j] += get_Group[i].Mass * xyz[j];
1086 Group[start].Vel[j] += get_Group[i].Vel[j];
1087 }
1088 }
1089
1090 Mem.myfree(get_Group);
1091
1092 Mem.myfree(Recv_offset);
1093 Mem.myfree(Recv_count);
1094 Mem.myfree(Send_offset);
1095 Mem.myfree(Send_count);
1096}
1097
1098template <typename partset>
1099void fof<partset>::fof_finish_group_properties(void)
1100{
1101 for(int i = 0; i < NgroupsExt; i++)
1102 {
1103 if(Group[i].MinIDTask == ThisTask)
1104 {
1105 double cm[3];
1106 for(int j = 0; j < 3; j++)
1107 {
1108 Group[i].Vel[j] /= Group[i].Mass;
1109 cm[j] = Group[i].CM[j] / Group[i].Mass;
1110 }
1111
1112 Group[i].Ascale /= Group[i].Mass;
1113
1114 MyIntPosType delta[3];
1115 Tp->pos_to_signedintpos(cm, (MySignedIntPosType *)delta);
1116
1117 delta[0] += Group[i].FirstIntPos[0];
1118 delta[1] += Group[i].FirstIntPos[1];
1119 delta[2] += Group[i].FirstIntPos[2];
1120
1121 Tp->constrain_intpos(delta); /* will only do something if we have a stretched box */
1122
1123 fof_get_halo_position(delta, cm);
1124
1125 Group[i].CM[0] = cm[0];
1126 Group[i].CM[1] = cm[1];
1127 Group[i].CM[2] = cm[2];
1128
1129 /* define group position as CM . This will be overwritten in case Subfind is used with
1130 * the position of the potential minimum
1131 */
1132 for(int j = 0; j < 3; j++)
1133 Group[i].Pos[j] = Group[i].CM[j];
1134
1135 for(int j = 0; j < 3; j++)
1136 Group[i].IntPos[j] = delta[j];
1137 }
1138 }
1139
1140 int ngr = NgroupsExt;
1141
1142 /* eliminate the non-local groups */
1143 for(int i = 0; i < ngr; i++)
1144 {
1145 if(Group[i].MinIDTask != ThisTask)
1146 {
1147 Group[i] = Group[ngr - 1];
1148 i--;
1149 ngr--;
1150 }
1151 }
1152
1153 if(ngr != Ngroups)
1154 Terminate("ngr != Ngroups");
1155
1156 mycxxsort(Group, Group + Ngroups, fof_compare_Group_MinID);
1157}
1158
1159#if defined(LIGHTCONE_PARTICLES_GROUPS)
1160
1161template <>
1162double fof<simparticles>::fof_distance_to_origin(int i)
1163{
1164 return 0; // dummy information for ordinary timeslices
1165}
1166
1167template <>
1168double fof<lcparticles>::fof_distance_to_origin(int i)
1169{
1170 return Tp->signedintpos_to_distanceorigin((MySignedIntPosType *)Tp->P[i].IntPos);
1171}
1172
1173#endif
1174
1175template <>
1176void fof<simparticles>::fof_get_halo_position(MyIntPosType *intpos, double *pos)
1177{
1178 Tp->intpos_to_pos(intpos, pos);
1179}
1180
1181#if defined(LIGHTCONE) && defined(LIGHTCONE_PARTICLES_GROUPS)
1182template <>
1183void fof<lcparticles>::fof_get_halo_position(MyIntPosType *intpos, double *pos)
1184{
1185 MyIntPosType origin[3] = {0, 0, 0};
1186
1187 Tp->nearest_image_intpos_to_pos(intpos, origin, pos);
1188}
1189#endif
1190
1191/* now make sure that the following classes are really instantiated, otherwise we may get a linking problem */
1192#include "../data/simparticles.h"
1193template class fof<simparticles>;
1194
1195#if defined(LIGHTCONE) && defined(LIGHTCONE_PARTICLES_GROUPS)
1196#include "../data/lcparticles.h"
1197template class fof<lcparticles>;
1198#endif
1199
1200#endif /* of FOF */
global_data_all_processes All
Definition: main.cc:40
Definition: fof_io.h:20
double timediff(double t0, double t1)
Definition: logs.cc:488
double second(void)
Definition: logs.cc:471
double getAllocatedBytesInMB(void)
Definition: mymalloc.h:71
#define FOF_GROUP_MIN_LEN
Definition: constants.h:143
#define NTYPES
Definition: constants.h:308
#define FOF_LINKLENGTH
Definition: constants.h:139
#define M_PI
Definition: constants.h:56
double mycxxsort(T *begin, T *end, Tcomp comp)
Definition: cxxsort.h:39
uint32_t MyIntPosType
Definition: dtypes.h:35
int32_t MySignedIntPosType
Definition: dtypes.h:36
#define BITS_FOR_POSITIONS
Definition: dtypes.h:37
#define HALONR_MAX
Definition: idstorage.h:20
logs Logs
Definition: main.cc:43
#define TIMER_START(counter)
Starts the timer counter.
Definition: logs.h:197
#define TIMER_STOP(counter)
Stops the timer counter.
Definition: logs.h:220
#define Terminate(...)
Definition: macros.h:15
#define TAG_DENS_A
Definition: mpi_utils.h:50
void sumup_large_ints(int n, int *src, long long *res, MPI_Comm comm)
int myMPI_Alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
Definition: myalltoall.cc:301
int myMPI_Sendrecv(void *sendbuf, size_t sendcount, MPI_Datatype sendtype, int dest, int sendtag, void *recvbuf, size_t recvcount, MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm, MPI_Status *status)
void sumup_longs(int n, long long *src, long long *res, MPI_Comm comm)
memory Mem
Definition: main.cc:44
expr pow(half base, half exp)
Definition: half.hpp:2803
STL namespace.
double mycxxsort_parallel(T *begin, T *end, Comp comp, MPI_Comm comm)
peanokey peano_hilbert_key(MyIntPosType x, MyIntPosType y, MyIntPosType z, int bits)
Definition: peano.cc:83