12#include "gadgetconfig.h"
16#include <gsl/gsl_math.h>
24#include "../data/allvars.h"
25#include "../data/dtypes.h"
26#include "../data/intposconvert.h"
27#include "../data/mymalloc.h"
28#include "../domain/domain.h"
29#include "../fof/fof.h"
30#include "../gravtree/gravtree.h"
31#include "../logs/timer.h"
32#include "../main/simulation.h"
33#include "../mpi_utils/mpi_utils.h"
34#include "../sort/cxxsort.h"
35#include "../sort/parallel_sort.h"
36#include "../sort/peano.h"
37#include "../subfind/subfind.h"
38#include "../system/system.h"
40template <
typename partset>
49 if(SubThisTask == 0 && Ngroups != 1)
50 Terminate(
"Ngroups=%d != 1 SubNTask=%d SubThisTask=%d", Ngroups, SubNTask, SubThisTask);
52 if(SubThisTask != 0 && Ngroups != 0)
53 Terminate(
"Ngroups=%d != 0 SubNTask=%d SubThisTask=%d", Ngroups, SubNTask, SubThisTask);
55 subfind_collective_printf(
"SUBFIND: root-task=%d: Collectively doing halo %lld of length %lld on %d processors.\n", ThisTask,
56 Group[0].GroupNr, (
long long)Group[0].Len, SubNTask);
60 GroupNr = Group[0].GroupNr;
61 Ascale = Group[0].Ascale;
64 for(
int i = 0; i < Tp->NumPart; i++)
65 if(Tp->PS[i].GroupNr.get() == GroupNr)
66 Tp->PS[i].DomainFlag = 1;
68 Tp->PS[i].DomainFlag = 0;
71 MPI_Bcast(&GroupNr, 1, MPI_LONG_LONG, 0, SubComm);
74 MPI_Bcast(&Ascale, 1, MPI_DOUBLE, 0, SubComm);
78 for(
int i = 0; i < Tp->NumPart; i++)
79 Tp->PS[i].DomainFlag = 1;
82 Terminate(
"Strange: SubNTask=%d Ngroups=%d SubThisTask=%d (expect to be a single processor here)", SubNTask, Ngroups,
98 mpi_printf(
"SUBFIND: subdomain decomposition took %g sec\n",
Logs.
timediff(ta, tb));
103 for(
int i = 0; i < Tp->NumPart; i++)
104 Tp->PS[i].SubRankInGr = INT_MAX;
108 submp = (submp_data *)
Mem.mymalloc_movable(&submp,
"submp",
sizeof(submp_data) * Tp->NumPart);
109 for(
int i = 0; i < Tp->NumPart; i++)
112 submp[i].GroupNr = Tp->PS[i].GroupNr.get();
114 submp[i].DM_Density = Tp->PS[i].u.s.u.DM_Density;
117 mycxxsort(submp, submp + Tp->NumPart, subfind_compare_submp_GroupNr_DM_Density);
121 IndexList = (
int *)
Mem.mymalloc_movable(&IndexList,
"IndexList", Tp->NumPart *
sizeof(
int));
126 MPI_Comm_split(SubDomain->
Communicator, thistask, thistask, &SingleComm);
131 if(SingleDomain.NumNodes != 0)
132 Terminate(
"SubDomain.NumNodes=%d\n", SingleDomain.NumNodes);
138 mpi_printf(
"SUBFIND: serial subfind subdomain decomposition took %g sec\n",
Logs.
timediff(taa, tbb));
145 for(
int i = 0; i < Tp->NumPart; i++)
146 if(Tp->PS[i].GroupNr.get() == GroupNr)
147 IndexList[NumPartGroup++] = i;
151 subfind_hbt_single_group(SubDomain, &SingleDomain, mode, 0);
153 subfind_process_single_group(SubDomain, &SingleDomain, mode, 0);
159 for(
int gr = 0; gr < Ngroups; gr++)
161 GroupNr = Group[gr].GroupNr;
162 Ascale = Group[gr].Ascale;
166 for(; i < Tp->NumPart; i++)
167 if(Tp->PS[submp[i].index].GroupNr.get() == GroupNr)
168 IndexList[NumPartGroup++] = submp[i].index;
174 subfind_hbt_single_group(SubDomain, &SingleDomain, mode, gr);
176 subfind_process_single_group(SubDomain, &SingleDomain, mode, gr);
181 mpi_printf(
"SUBFIND: subfind_hbt_single_group() processing for Ngroups=%d took %g sec\n", Ngroups,
Logs.
timediff(ta0, tb0));
183 SingleDomain.domain_free();
184 MPI_Comm_free(&SingleComm);
186 Mem.myfree(IndexList);
193 subfind_collective_printf(
"SUBFIND: root-task=%d: Collective processing of halo %d took %g\n", ThisTask, Group[0].GroupNr,
197 mpi_printf(
"SUBFIND: root-task=%d: Serial processing of halo %d took %g\n", ThisTask, Group[0].GroupNr,
Logs.
timediff(t0, t1));
201#include "../data/simparticles.h"
202template class fof<simparticles>;
204#if defined(LIGHTCONE) && defined(LIGHTCONE_PARTICLES_GROUPS)
205#include "../data/lcparticles.h"
206template class fof<lcparticles>;
void particle_exchange_based_on_PS(MPI_Comm Communicator)
void domain_decomposition(domain_options mode)
double timediff(double t0, double t1)
double mycxxsort(T *begin, T *end, Tcomp comp)