12#include "gadgetconfig.h"
16#include <gsl/gsl_rng.h>
27#include "../data/allvars.h"
28#include "../data/dtypes.h"
29#include "../data/intposconvert.h"
30#include "../data/mymalloc.h"
31#include "../io/hdf5_util.h"
32#include "../io/snap_io.h"
33#include "../lightcone/lightcone.h"
34#include "../lightcone/lightcone_particle_io.h"
35#include "../main/main.h"
36#include "../main/simulation.h"
37#include "../mergertree/io_readtrees_mbound.h"
38#include "../sort/cxxsort.h"
39#include "../sort/parallel_sort.h"
40#include "../system/system.h"
41#include "../time_integration/driftfac.h"
43#if defined(REARRANGE_OPTION) && defined(MERGERTREE)
47 Terminate(
"too few arguments: Snapshot rearrange requires <firstnum> <lastnum>");
50 int firstnum = atoi(argv[3]);
51 int lastnum = atoi(argv[4]);
56 rearrange_generic<simparticles>(
Sp, conenr, firstnum, lastnum);
61void sim::rearrange_read<simparticles>(
simparticles &Tp,
int num,
int conenr)
70void sim::rearrange_write<simparticles>(
simparticles &Tp,
int num,
int conenr)
78#if defined(LIGHTCONE) && defined(LIGHTCONE_PARTICLES) && defined(REARRANGE_OPTION)
83 Terminate(
"too few arguments: Lightcone rearrange requires <conenr> <firstnum> <lastnum>");
85 int conenr = atoi(argv[3]);
86 int firstnum = atoi(argv[4]);
87 int lastnum = atoi(argv[5]);
92 if(LightCone.lightcone_init_times())
95#ifdef LIGHTCONE_MASSMAPS
96 LightCone.lightcone_init_massmaps();
97 if(LightCone.lightcone_massmap_report_boundaries())
101 double linklength = 0;
102 LightCone.lightcone_init_intposconverter(linklength);
104 rearrange_generic<lcparticles>(Lp, conenr, firstnum, lastnum);
109void sim::rearrange_read<lcparticles>(lcparticles &Tp,
int num,
int conenr)
113 LcIO.lightcone_read(num, conenr);
118void sim::rearrange_write<lcparticles>(lcparticles &Tp,
int num,
int conenr)
122 LcIO.lightcone_save(num, conenr,
true);
127template <
typename partset>
133 MtreeIO.read_trees_mostbound();
140 long long halominID = (MergerTree.Nhalos > 0) ? MergerTree.HaloIDdata[0].SubMostBoundID : 0;
141 long long halomaxID = (MergerTree.Nhalos > 0) ? MergerTree.HaloIDdata[MergerTree.Nhalos - 1].SubMostBoundID : 0;
143 long long *list_halominID = (
long long *)
Mem.mymalloc(
"list_halominID",
NTask *
sizeof(
long long));
144 long long *list_halomaxID = (
long long *)
Mem.mymalloc(
"list_halomaxID",
NTask *
sizeof(
long long));
145 int *list_Nhalos = (
int *)
Mem.mymalloc(
"list_Nhalos",
NTask *
sizeof(
int));
147 MPI_Allgather(&halominID, 1, MPI_LONG_LONG, list_halominID, 1, MPI_LONG_LONG,
Communicator);
148 MPI_Allgather(&halomaxID, 1, MPI_LONG_LONG, list_halomaxID, 1, MPI_LONG_LONG,
Communicator);
149 MPI_Allgather(&MergerTree.Nhalos, 1, MPI_INT, list_Nhalos, 1, MPI_INT,
Communicator);
151 typedef mergertree::treehalo_ids_type treehalo_ids_type;
153 for(
int num = firstnum; num <= lastnum; num++)
163 long long coneminID = (Tp.NumPart > 0) ? Tp.P[0].ID.get() : 0;
164 long long conemaxID = (Tp.NumPart > 0) ? Tp.P[Tp.NumPart - 1].ID.get() : 0;
166 long long *list_coneminID = (
long long *)
Mem.mymalloc(
"list_coneminID",
NTask *
sizeof(
long long));
167 long long *list_conemaxID = (
long long *)
Mem.mymalloc(
"list_conemaxID",
NTask *
sizeof(
long long));
168 int *list_NumPart = (
int *)
Mem.mymalloc(
"list_NumPart",
NTask *
sizeof(
int));
170 MPI_Allgather(&coneminID, 1, MPI_LONG_LONG, list_coneminID, 1, MPI_LONG_LONG,
Communicator);
171 MPI_Allgather(&conemaxID, 1, MPI_LONG_LONG, list_conemaxID, 1, MPI_LONG_LONG,
Communicator);
172 MPI_Allgather(&Tp.NumPart, 1, MPI_INT, list_NumPart, 1, MPI_INT,
Communicator);
177 for(
int n = 0; n < Tp.NumPart; n++)
180 MPI_Request *requests = (MPI_Request *)
Mem.mymalloc(
"requests",
NTask *
sizeof(MPI_Request));
184 for(
int task = 0; task <
NTask; task++)
186 if(MergerTree.Nhalos > 0 && list_NumPart[task] > 0)
187 if(!(halomaxID < list_coneminID[task] || halominID > list_conemaxID[task]))
189 MPI_Issend(MergerTree.HaloIDdata, MergerTree.Nhalos *
sizeof(mergertree::treehalo_ids_type), MPI_BYTE, task,
TAG_N,
194 for(
int task = 0; task <
NTask; task++)
196 if(list_Nhalos[task] > 0 && Tp.NumPart > 0)
197 if(!(list_halomaxID[task] < coneminID || list_halominID[task] > conemaxID))
199 treehalo_ids_type *halos = (treehalo_ids_type *)
Mem.mymalloc(
"halos", list_Nhalos[task] *
sizeof(treehalo_ids_type));
201 MPI_Recv(halos, list_Nhalos[task] *
sizeof(treehalo_ids_type), MPI_BYTE, task,
TAG_N,
Communicator, MPI_STATUS_IGNORE);
206 while(i_halo < list_Nhalos[task] && i_cone < Tp.NumPart)
208 if(halos[i_halo].SubMostBoundID == Tp.P[i_cone].ID.get())
210 Tp.P[i_cone++].TreeID = halos[i_halo++].TreeID;
212 else if(halos[i_halo].SubMostBoundID < Tp.P[i_cone].ID.get())
223 MPI_Waitall(nreq, requests, MPI_STATUSES_IGNORE);
225 Mem.myfree(requests);
226 Mem.myfree(list_NumPart);
227 Mem.myfree(list_conemaxID);
228 Mem.myfree(list_coneminID);
233 rearrange_fill_treetable<partset>(Tp);
242 Mem.myfree(list_Nhalos);
243 Mem.myfree(list_halomaxID);
244 Mem.myfree(list_halominID);
247template <
typename partset>
253 int *Send_count = (
int *)
Mem.mymalloc(
"Send_count",
sizeof(
int) *
NTask);
254 int *Send_offset = (
int *)
Mem.mymalloc(
"Send_offset",
sizeof(
int) *
NTask);
255 int *Recv_count = (
int *)
Mem.mymalloc(
"Recv_count",
sizeof(
int) *
NTask);
257 for(
int i = 0; i <
NTask; i++)
260 long long *TreeID_list = (
long long *)
Mem.mymalloc(
"TreeID_list",
sizeof(
long long) * Tp.NumPart);
262 long long maxTreeID = (MergerTree.Ntrees > 0) ? MergerTree.TreeTable[MergerTree.Ntrees - 1].TreeID : -1;
264 long long *maxTreeID_list = (
long long *)
Mem.mymalloc(
"maxTreeID_list",
sizeof(
long long) *
NTask);
265 MPI_Allgather(&maxTreeID,
sizeof(
long long), MPI_BYTE, maxTreeID_list,
sizeof(
long long), MPI_BYTE,
Communicator);
269 for(
int i = 0; i < Tp.NumPart; i++)
271 TreeID_list[i] = Tp.P[i].TreeID;
273 while(target_task <
NTask - 1 && TreeID_list[i] > maxTreeID_list[target_task])
276 Send_count[target_task]++;
281 int nexport = 0, nimport = 0;
284 for(
int j = 0; j <
NTask; j++)
286 nexport += Send_count[j];
287 nimport += Recv_count[j];
290 Send_offset[j] = Send_offset[j - 1] + Send_count[j - 1];
293 for(
int i = 0; i < MergerTree.Ntrees; i++)
294 MergerTree.TreeTable[i].HaloCount = 0;
297 for(
int ngrp = 0; ngrp < (1 <<
PTask); ngrp++)
303 if(Send_count[recvTask] > 0 || Recv_count[recvTask] > 0)
305 long long *treeid_tmp = (
long long *)
Mem.mymalloc(
"treeid_tmp",
sizeof(
long long) * Recv_count[recvTask]);
307 myMPI_Sendrecv(&TreeID_list[Send_offset[recvTask]], Send_count[recvTask] *
sizeof(
long long), MPI_BYTE, recvTask,
308 TAG_DENS_A, treeid_tmp, Recv_count[recvTask] *
sizeof(
long long), MPI_BYTE, recvTask,
TAG_DENS_A,
311 for(
int i = 0; i < Recv_count[recvTask]; i++)
313 if(treeid_tmp[i] != -1)
315 int off = treeid_tmp[i] - MergerTree.TreeTable[0].TreeID;
317 if(off < 0 || off >= MergerTree.Ntrees)
319 "strange: off=%d MergerTree.Ntrees=%d treeid_tmp[i]=%lld MergerTree.TreeTable[0].TreeID=%lld i=%d "
320 "Recv_count[recvTask]=%d ",
321 off, MergerTree.Ntrees, treeid_tmp[i], MergerTree.TreeTable[0].TreeID, i, Recv_count[recvTask]);
323 MergerTree.TreeTable[off].HaloCount++;
327 Mem.myfree(treeid_tmp);
332 Mem.myfree(maxTreeID_list);
333 Mem.myfree(TreeID_list);
335 Mem.myfree(Recv_count);
336 Mem.myfree(Send_offset);
337 Mem.myfree(Send_count);
341 if(MergerTree.Ntrees > 0)
342 MergerTree.TreeTable[0].FirstHalo = 0;
344 for(
int i = 1; i < MergerTree.Ntrees; i++)
345 MergerTree.TreeTable[i].FirstHalo = MergerTree.TreeTable[i - 1].FirstHalo + MergerTree.TreeTable[i - 1].HaloCount;
348 for(
int i = 0; i < MergerTree.Ntrees; i++)
349 cumul += MergerTree.TreeTable[i].HaloCount;
351 long long *cumul_list = (
long long *)
Mem.mymalloc(
"cumul_list",
sizeof(
long long) *
NTask);
353 MPI_Allgather(&cumul,
sizeof(
long long), MPI_BYTE, cumul_list,
sizeof(
long long), MPI_BYTE,
Communicator);
357 cumul += cumul_list[i];
359 for(
int i = 0; i < MergerTree.Ntrees; i++)
360 MergerTree.TreeTable[i].FirstHalo += cumul;
362 Mem.myfree(cumul_list);
global_data_all_processes All
void init_drift_table(void)
void mpi_printf(const char *fmt,...)
void rearrange_write(partset &Tp, int num, int conenr)
void rearrange_generic(partset &Tp, int conenr, int firstnr, int lastnr)
void rearrange_snapshot(int argc, char **argv)
void rearrange_lightcone(int argc, char **argv)
void rearrange_read(partset &Tp, int num, int conenr)
void rearrange_fill_treetable(partset &Tp)
void endrun(void)
This function aborts the simulations.
@ MOST_BOUND_PARTICLE_SNAPHOT
@ MOST_BOUND_PARTICLE_SNAPHOT_REORDERED
int myMPI_Alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
int myMPI_Sendrecv(void *sendbuf, size_t sendcount, MPI_Datatype sendtype, int dest, int sendtag, void *recvbuf, size_t recvcount, MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm, MPI_Status *status)
double mycxxsort_parallel(T *begin, T *end, Comp comp, MPI_Comm comm)