GADGET-4
shared_mem_handler.cc
Go to the documentation of this file.
1/*******************************************************************************
2 * \copyright This file is part of the GADGET4 N-body/SPH code developed
3 * \copyright by Volker Springel. Copyright (C) 2014-2020 by Volker Springel
4 * \copyright (vspringel@mpa-garching.mpg.de) and all contributing authors.
5 *******************************************************************************/
6
12#include <hdf5.h>
13#include <mpi.h>
14#include <stdio.h>
15#include <stdlib.h>
16#include <algorithm>
17#include <cstring>
18
19#include "../gravtree/gravtree.h"
20#include "../ngbtree/ngbtree.h"
21#include "../time_integration/driftfac.h"
22
23#include "../mpi_utils/shared_mem_handler.h"
24
26typedef ngbtree ntree;
27
28void shmem::prepare_offset_table(void *p, ptrdiff_t *&offset_tab) // called by ghost task
29{
30 ptrdiff_t off = ((char *)p - Mem.Base);
31
32 offset_tab = (ptrdiff_t *)Mem.mymalloc("offset_tab", Island_NTask * sizeof(ptrdiff_t));
33
34 MPI_Gather(&off, sizeof(ptrdiff_t), MPI_BYTE, offset_tab, sizeof(ptrdiff_t), MPI_BYTE, Island_NTask - 1, SharedMemComm);
35}
36
37void shmem::inform_offset_table(void *p) // called by worked tasks
38{
39 ptrdiff_t off = ((char *)p - Mem.Base);
40
41 MPI_Gather(&off, sizeof(ptrdiff_t), MPI_BYTE, NULL, sizeof(ptrdiff_t), MPI_BYTE, Island_NTask - 1, SharedMemComm);
42}
43
44void shmem::free_offset_table(ptrdiff_t *&offset_tab) { Mem.myfree(offset_tab); }
45
47{
48 simparticles Dp{MPI_COMM_WORLD}; /* dummy needed to access drift functions for ngbtree */
49
50 /* first, we wait for the parameter All.MaxMemSize, so that we can initialize the memory handler */
51 MPI_Bcast(All.get_data_ptr(), All.get_data_size(), MPI_BYTE, 0, MPI_COMM_WORLD);
53
54 while(true)
55 {
56 /* wait for an incoming message */
57 MPI_Status status;
58 MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
59
60 int source = status.MPI_SOURCE;
61 int tag = status.MPI_TAG;
62
63 int length;
64 MPI_Get_count(&status, MPI_BYTE, &length);
65
66 /* now pick it up */
67 char *message = (char *)Mem.mymalloc("message", length);
68 MPI_Recv(message, length, MPI_BYTE, source, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
69
70 if(tag == TAG_METDATA) // signals that we are synchronizing addresses and values for tree access
71 {
72 int handle = *((int *)message);
73 Mem.myfree(message);
74
75 MPI_Recv(&All.Ti_Current, sizeof(All.Ti_Current), MPI_BYTE, source, TAG_METDATA + 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
76 MPI_Recv(&tree_info[handle].Bd, sizeof(bookkeeping_data), MPI_BYTE, source, TAG_METDATA + 2, MPI_COMM_WORLD,
77 MPI_STATUS_IGNORE);
78
79 intposconvert *convfac = &Dp;
80 MPI_Recv(convfac, sizeof(intposconvert), MPI_BYTE, source, TAG_METDATA + 3, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
81
82 prepare_offset_table(NULL, tree_info[handle].TopNodes_offsets);
83 prepare_offset_table(NULL, tree_info[handle].Nodes_offsets);
84 prepare_offset_table(NULL, tree_info[handle].Nextnode_offsets);
85 prepare_offset_table(NULL, tree_info[handle].Points_offsets);
86 prepare_offset_table(NULL, tree_info[handle].P_offsets);
87 prepare_offset_table(NULL, tree_info[handle].SphP_offsets);
88 prepare_offset_table(NULL, tree_info[handle].Foreign_Nodes_offsets);
89 prepare_offset_table(NULL, tree_info[handle].Foreign_Points_offsets);
90
91 MPI_Barrier(SharedMemComm); // this barrier is in principle superfluous, but on some systems,
92 // the MPI_Gather in prepare_offset_table() can return prematurely before all data has arrived
93 }
94 else if(tag == TAG_HEADER) // signals that we are freeing addresses we stored for tree access
95 {
96 int handle = *((int *)message);
97 Mem.myfree(message);
98
99 free_offset_table(tree_info[handle].Foreign_Points_offsets);
100 free_offset_table(tree_info[handle].Foreign_Nodes_offsets);
101 free_offset_table(tree_info[handle].SphP_offsets);
102 free_offset_table(tree_info[handle].P_offsets);
103 free_offset_table(tree_info[handle].Points_offsets);
104 free_offset_table(tree_info[handle].Nextnode_offsets);
105 free_offset_table(tree_info[handle].Nodes_offsets);
106 free_offset_table(tree_info[handle].TopNodes_offsets);
107 }
108 else if(tag >= TAG_FETCH_SPH_DENSITY && tag < TAG_FETCH_SPH_DENSITY + MAX_TREE_INFOS) // fetch from SPH density tree
109 {
110 int handle = tag - TAG_FETCH_SPH_DENSITY;
111 deal_with_sph_node_request(message, length, source, handle, &Dp);
112 Mem.myfree(message);
113 }
114 else if(tag >= TAG_FETCH_SPH_HYDRO && tag < TAG_FETCH_SPH_HYDRO + MAX_TREE_INFOS) // fetch from SPH hydro tree
115 {
116 int handle = tag - TAG_FETCH_SPH_HYDRO;
117 deal_with_sph_node_request(message, length, source, handle, &Dp);
118 Mem.myfree(message);
119 }
120 else if(tag >= TAG_FETCH_SPH_TREETIMESTEP && tag < TAG_FETCH_SPH_TREETIMESTEP + MAX_TREE_INFOS) // fetch from SPH timesteptree
121 {
122 int handle = tag - TAG_FETCH_SPH_TREETIMESTEP;
123 deal_with_sph_node_request(message, length, source, handle, &Dp);
124 Mem.myfree(message);
125 }
126 else if(tag >= TAG_FETCH_GRAVTREE && tag < TAG_FETCH_GRAVTREE + MAX_TREE_INFOS) // fetch from gravity tree
127 {
128 int handle = tag - TAG_FETCH_GRAVTREE;
129 deal_with_gravity_node_request(message, length, source, handle);
130 Mem.myfree(message);
131 }
132 else if(tag == TAG_KEY) // request to terminate gracefully
133 {
134 H5Eset_auto(H5E_DEFAULT, NULL, NULL);
135 MPI_Finalize();
136 exit(0);
137 }
138 else if(tag == TAG_TABLE_ALLOC) // take over storage for TableData
139 {
140 size_t tab_len = *((size_t *)message);
141 Mem.myfree(message);
142
143 TableData = (char *)Mem.mymalloc("table", tab_len);
144 MPI_Recv(TableData, tab_len, MPI_BYTE, source, TAG_DMOM, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
145 ptrdiff_t off = ((char *)TableData - Mem.Base);
146 MPI_Bcast(&off, sizeof(ptrdiff_t), MPI_BYTE, Island_ThisTask, SharedMemComm);
147 }
148 else if(tag == TAG_TABLE_FREE)
149 {
150 Mem.myfree(message);
151 Mem.myfree(TableData);
152 }
153 else if(tag == TAG_EWALD_ALLOC) // take over storage for EwaldData
154 {
155 size_t tab_len = *((size_t *)message);
156 Mem.myfree(message);
157
158 EwaldData = (char *)Mem.mymalloc("table", tab_len);
159 MPI_Recv(EwaldData, tab_len, MPI_BYTE, source, TAG_DMOM, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
160 ptrdiff_t off = ((char *)EwaldData - Mem.Base);
161 MPI_Bcast(&off, sizeof(ptrdiff_t), MPI_BYTE, Island_ThisTask, SharedMemComm);
162 }
163 else if(tag == TAG_TOPNODE_ALLOC) // take over the storage of common top-level tree data
164 {
165 int handle = num_tree_info++;
167 Terminate("num_tree_info > MAX_TREE_INFOS");
168
169 size_t *sizep = ((size_t *)message);
170 size_t sizes[4] = {sizep[0], sizep[1], sizep[2], sizep[3]};
171 Mem.myfree(message);
172
173 tree_info[handle].NodeLevel_storage = (char *)Mem.mymalloc("NodeLevel_storage", sizes[0]);
174 tree_info[handle].NodeSibling_storage = (char *)Mem.mymalloc("NodeSibling_storage", sizes[1]);
175 tree_info[handle].NodeIndex_storage = (char *)Mem.mymalloc("NodeIndex_storage", sizes[2]);
176 tree_info[handle].TopNodes_storage = (char *)Mem.mymalloc("TopNodes_storage", sizes[3]);
177
178 ptrdiff_t off[4] = {
179 ((char *)tree_info[handle].NodeLevel_storage - Mem.Base), ((char *)tree_info[handle].NodeSibling_storage - Mem.Base),
180 ((char *)tree_info[handle].NodeIndex_storage - Mem.Base), ((char *)tree_info[handle].TopNodes_storage - Mem.Base)};
181
182 MPI_Send(off, 4 * sizeof(ptrdiff_t), MPI_BYTE, source, TAG_TOPNODE_OFFSET, MPI_COMM_WORLD);
183
184 MPI_Send(&handle, 1, MPI_INT, source, TAG_N, MPI_COMM_WORLD);
185 }
186 else if(tag == TAG_TOPNODE_FREE) // free the top-level storage for a tree again
187 {
188 int handle = *((int *)message);
189 Mem.myfree(message);
190
192 if(handle != num_tree_info)
193 Terminate("unexpected handle");
194
195 Mem.myfree(tree_info[handle].TopNodes_storage);
196 Mem.myfree(tree_info[handle].NodeIndex_storage);
197 Mem.myfree(tree_info[handle].NodeSibling_storage);
198 Mem.myfree(tree_info[handle].NodeLevel_storage);
199 }
200 else if(tag == TAG_DRIFT_INIT) // make the shared memory handler update All and init the local drift tables
201 {
202 memcpy(All.get_data_ptr(), message, All.get_data_size());
203 Mem.myfree(message);
205 }
206 else if(tag == TAG_ALL_UPDATE) // make the shared memory handler update the contents of the All structure
207 {
208 memcpy(All.get_data_ptr(), message, All.get_data_size());
209 Mem.myfree(message);
210 }
211 }
212}
213
214void shmem::deal_with_sph_node_request(char *message, int length, int source, int handle, simparticles *Sp)
215{
216#ifndef LEAN
217 bookkeeping_data &Bdat = tree_info[handle].Bd;
218
219 // we got the list of requested nodes
220 ntree::node_req *node_req_recv = (ntree::node_req *)message;
221 int nrecv = length / sizeof(ntree::node_req);
222
223 /* as part of this message, we get the real actually targeted rank in
224 * the simulation communicator. We will translate this to the rank in our shared memory
225 * block
226 */
227
228 /* now prepare answer message by reading from shared memory */
229 /******************* prepare tree answer ********************/
230
231 ntree::node_count_info *node_info_recv =
232 (ntree::node_count_info *)Mem.mymalloc("node_info_recv", nrecv * sizeof(ntree::node_count_info));
233
234 /* first let's count how many nodes and particles are hanging below in each case */
235 int n_recvpoints = 0;
236 int n_recvnodes = 0;
237
238 for(int i = 0; i < nrecv; i++)
239 {
240 node_info_recv[i].count_nodes = 0;
241 node_info_recv[i].count_parts = 0;
242
243 int no = node_req_recv[i].foreignnode;
244 int task = node_req_recv[i].foreigntask;
245 int shmrank = GetShmRankForSimulCommRank[task]; // corresponding local shared memory rank, stays fixed below
246
247 if(no < Bdat.MaxPart || no >= Bdat.MaxPart + Bdat.MaxNodes)
248 Terminate("not an internal node");
249
250 ngbnode *nop = ((ngbnode *)get_basenodep(no, shmrank, handle)) + no;
251
252 int p = nop->nextnode;
253
254 while(p != nop->sibling)
255 {
256 if(p < 0)
257 Terminate("p=%d < 0", p);
258
259 if(p < Bdat.MaxPart) /* a local particle */
260 {
261 node_info_recv[i].count_parts++;
262 p = get_nextnodep(shmrank, handle)[p];
263 }
264 else if(p < Bdat.MaxPart + Bdat.MaxNodes) /* an internal node */
265 {
266 node_info_recv[i].count_nodes++;
267 p = (((ngbnode *)get_basenodep(p, shmrank, handle)) + p)->sibling;
268 }
269 else if(p >= Bdat.ImportedNodeOffset && p < Bdat.EndOfTreePoints) /* an imported tree point */
270 {
271 node_info_recv[i].count_parts++;
272 p = get_nextnodep(shmrank, handle)[p - Bdat.MaxNodes];
273 }
274 else
275 Terminate("p=%d MaxPart=%d MaxNodes=%d", p, Bdat.MaxPart, Bdat.MaxNodes);
276 }
277
278 if(node_info_recv[i].count_parts == 0 && node_info_recv[i].count_nodes == 0)
279 Terminate("strange: we have we requested an empty node?\n");
280
281 n_recvpoints += node_info_recv[i].count_parts;
282 n_recvnodes += node_info_recv[i].count_nodes;
283 }
284
285 foreign_sphpoint_data *exportbuf_points = (foreign_sphpoint_data *)Mem.mymalloc_movable(
286 &exportbuf_points, "exportbuf_points", n_recvpoints * sizeof(foreign_sphpoint_data));
287 ngbnode *exportbuf_nodes = (ngbnode *)Mem.mymalloc_movable(&exportbuf_nodes, "exportbuf_nodes", n_recvnodes * sizeof(ngbnode));
288
289 n_recvpoints = 0;
290 n_recvnodes = 0;
291
292 for(int i = 0; i < nrecv; i++)
293 {
294 int no = node_req_recv[i].foreignnode;
295 int task = node_req_recv[i].foreigntask;
296 int shmrank = GetShmRankForSimulCommRank[task]; // corresponding local shared memory rank, stays fixed below
297
298 ngbnode *nop = ((ngbnode *)get_basenodep(no, shmrank, handle)) + no;
299
300 int p = nop->nextnode;
301
302 while(p != nop->sibling)
303 {
304 if(p < Bdat.MaxPart) /* a local particle */
305 {
306 int off = n_recvpoints++;
307
308 foreign_sphpoint_data *expoints = &exportbuf_points[off];
309
310 particle_data *ptr = (particle_data *)get_Pp(shmrank, handle) + p;
311 sph_particle_data *sph_ptr = (sph_particle_data *)get_SphPp(shmrank, handle) + p;
312
313 particle_data p_copy;
314 sph_particle_data sphp_copy;
315
316 if(ptr->get_Ti_Current() != All.Ti_Current)
317 {
318 /* Because of possible lightcone output, shared memory fetch not allowed to drift original,
319 * because this rank doesn't have a lightcone buffer. The original node needs to do it.
320 * We thus drift a copy of the particle without allowing lightcone access
321 */
322#ifndef LEAN
323 while(ptr->access.test_and_set(std::memory_order_acquire))
324 ; // acquire spin lock
325#endif
326 p_copy = *ptr;
327 sphp_copy = *sph_ptr;
328
329#ifndef LEAN
330 ptr->access.clear(std::memory_order_release); // release spin lock
331#endif
332 p_copy.access.clear(std::memory_order_release); // clear spin lock in copy
333
334 /* use the copy from now on */
335
336 ptr = &p_copy;
337 sph_ptr = &sphp_copy;
338
339 // the final flag tells the drift to not consider lightcone crossings
340 Sp->drift_particle(ptr, sph_ptr, All.Ti_Current, true);
341 }
342
343 expoints->IntPos[0] = ptr->IntPos[0];
344 expoints->IntPos[1] = ptr->IntPos[1];
345 expoints->IntPos[2] = ptr->IntPos[2];
346 expoints->Mass = ptr->getMass();
347 expoints->TimeBinHydro = ptr->TimeBinHydro;
348 expoints->SphCore = *sph_ptr;
349
350 expoints->Nextnode = -1;
351
352 p = get_nextnodep(shmrank, handle)[p];
353 }
354 else if(p < Bdat.MaxPart + Bdat.MaxNodes) /* an internal node */
355 {
356 int off = n_recvnodes++;
357
358 ngbnode *sourcep = (((ngbnode *)get_basenodep(p, shmrank, handle)) + p);
359 ngbnode *exnodes = &exportbuf_nodes[off];
360
361 if(sourcep->Ti_Current != All.Ti_Current)
362 sourcep->drift_node(All.Ti_Current, Sp);
363
364 *exnodes = *sourcep;
365
366 exnodes->cannot_be_opened_locally = 1;
367 exnodes->nextnode = -1;
368 exnodes->sibling = -1;
369 exnodes->OriginTask = task;
370 exnodes->OriginNode = p;
371
372 p = sourcep->sibling;
373 }
374 else if(p >= Bdat.ImportedNodeOffset) /* an imported treepoint particle */
375 {
376 Terminate("not expected here");
377 }
378 }
379 }
380
381 /************************************************************/
382
383 MPI_Send(node_info_recv, nrecv * sizeof(ntree::node_count_info), MPI_BYTE, source, TAG_N, MPI_COMM_WORLD);
384
385 /* now transfer the points and nodes */
386 if(n_recvpoints > 0)
387 MPI_Send(exportbuf_points, n_recvpoints * sizeof(foreign_sphpoint_data), MPI_BYTE, source, TAG_PDATA, MPI_COMM_WORLD);
388
389 if(n_recvnodes > 0)
390 MPI_Send(exportbuf_nodes, n_recvnodes * sizeof(ngbnode), MPI_BYTE, source, TAG_SPHDATA, MPI_COMM_WORLD);
391
392 Mem.myfree(exportbuf_nodes);
393 Mem.myfree(exportbuf_points);
394 Mem.myfree(node_info_recv);
395#endif
396}
397
398void shmem::deal_with_gravity_node_request(char *message, int length, int source, int handle)
399{
400 bookkeeping_data &Bdat = tree_info[handle].Bd;
401
402 // we got the list of requested nodes
403 gtree::node_req *node_req_recv = (gtree::node_req *)message;
404 int nrecv = length / sizeof(gtree::node_req);
405
406 /* as part of this message, we get the real actually targeted rank in
407 * the simulation communicator. We will translate this to the rank in our shared memory
408 * block
409 */
410
411 /* now prepare answer message by reading from shared memory */
412 /******************* prepare tree answer ********************/
413
414 gtree::node_count_info *node_info_recv =
415 (gtree::node_count_info *)Mem.mymalloc("node_info_recv", nrecv * sizeof(gtree::node_count_info));
416
417 /* first let's count how many nodes and particles are hanging below in each case */
418 int n_recvpoints = 0;
419 int n_recvnodes = 0;
420
421 for(int i = 0; i < nrecv; i++)
422 {
423 node_info_recv[i].count_nodes = 0;
424 node_info_recv[i].count_parts = 0;
425
426 int no = node_req_recv[i].foreignnode;
427 int task = node_req_recv[i].foreigntask;
428 int shmrank = GetShmRankForSimulCommRank[task]; // corresponding local shared memory rank, stays fixed below
429
430 if(no < Bdat.MaxPart || no >= Bdat.MaxPart + Bdat.MaxNodes)
431 Terminate("not an internal node");
432
433 gravnode *nop = ((gravnode *)get_basenodep(no, shmrank, handle)) + no;
434
435 int p = nop->nextnode;
436
437 while(p != nop->sibling)
438 {
439 if(p < 0)
440 Terminate("p=%d < 0", p);
441
442 if(p < Bdat.MaxPart) /* a local particle */
443 {
444 node_info_recv[i].count_parts++;
445 p = get_nextnodep(shmrank, handle)[p];
446 }
447 else if(p < Bdat.MaxPart + Bdat.MaxNodes) /* an internal node */
448 {
449 node_info_recv[i].count_nodes++;
450 p = (((gravnode *)get_basenodep(p, shmrank, handle)) + p)->sibling;
451 }
452 else if(p >= Bdat.ImportedNodeOffset && p < Bdat.EndOfTreePoints) /* an imported tree point */
453 {
454 node_info_recv[i].count_parts++;
455 p = get_nextnodep(shmrank, handle)[p - Bdat.MaxNodes];
456 }
457 else
458 Terminate("p=%d MaxPart=%d MaxNodes=%d", p, Bdat.MaxPart, Bdat.MaxNodes);
459 }
460
461 if(node_info_recv[i].count_parts == 0 && node_info_recv[i].count_nodes == 0)
462 Terminate("strange: we have we requested an empty node?\n");
463
464 n_recvpoints += node_info_recv[i].count_parts;
465 n_recvnodes += node_info_recv[i].count_nodes;
466 }
467
468 foreign_gravpoint_data *exportbuf_points = (foreign_gravpoint_data *)Mem.mymalloc_movable(
469 &exportbuf_points, "exportbuf_points", n_recvpoints * sizeof(foreign_gravpoint_data));
470 gravnode *exportbuf_nodes = (gravnode *)Mem.mymalloc_movable(&exportbuf_nodes, "exportbuf_nodes", n_recvnodes * sizeof(gravnode));
471
472 n_recvpoints = 0;
473 n_recvnodes = 0;
474
475 for(int i = 0; i < nrecv; i++)
476 {
477 int no = node_req_recv[i].foreignnode;
478 int task = node_req_recv[i].foreigntask;
479 int shmrank = GetShmRankForSimulCommRank[task]; // corresponding local shared memory rank, stays fixed below
480
481 gravnode *nop = ((gravnode *)get_basenodep(no, shmrank, handle)) + no;
482
483 int p = nop->nextnode;
484
485 while(p != nop->sibling)
486 {
487 if(p < Bdat.MaxPart) /* a local particle */
488 {
489 int off = n_recvpoints++;
490
491 foreign_gravpoint_data *expoints = &exportbuf_points[off];
492
493 particle_data *ptr = (particle_data *)get_Pp(shmrank, handle) + p;
494
495 expoints->IntPos[0] = ptr->IntPos[0];
496 expoints->IntPos[1] = ptr->IntPos[1];
497 expoints->IntPos[2] = ptr->IntPos[2];
498 expoints->Mass = ptr->getMass();
499 expoints->Type = ptr->getType();
500 expoints->OldAcc = ptr->OldAcc;
501#if NSOFTCLASSES > 1
502 expoints->SofteningClass = ptr->getSofteningClass();
503#endif
504#if defined(PMGRID) && defined(PLACEHIGHRESREGION)
505 expoints->InsideOutsideFlag = ptr->InsideOutsideFlag;
506#endif
507 expoints->Nextnode = -1;
508
509 p = get_nextnodep(shmrank, handle)[p];
510 }
511 else if(p < Bdat.MaxPart + Bdat.MaxNodes) /* an internal node */
512 {
513 int off = n_recvnodes++;
514
515 gravnode *exnodes = &exportbuf_nodes[off];
516 gravnode *sourcep = (((gravnode *)get_basenodep(p, shmrank, handle)) + p);
517
518 memcpy(static_cast<void *>(exnodes), static_cast<void *>(sourcep),
519 sizeof(gravnode)); // cannot do a *exnodes = *sourcep; because out std::atomic_flag
520 // has a deleted default copy operator
521
522 exnodes->cannot_be_opened_locally = 1;
523 exnodes->nextnode = -1;
524 exnodes->sibling = -1;
525 exnodes->OriginTask = task;
526 exnodes->OriginNode = p;
527
528 p = sourcep->sibling;
529 }
530 else if(p >= Bdat.ImportedNodeOffset) /* an imported Treepoint particle */
531 {
532 int off = n_recvpoints++;
533
534 foreign_gravpoint_data *expoints = &exportbuf_points[off];
535 int n = p - Bdat.ImportedNodeOffset;
536 gravpoint_data *pointsp = ((gravpoint_data *)get_pointsp(shmrank, handle)) + n;
537
538 expoints->IntPos[0] = pointsp->IntPos[0];
539 expoints->IntPos[1] = pointsp->IntPos[1];
540 expoints->IntPos[2] = pointsp->IntPos[2];
541 expoints->Mass = pointsp->Mass;
542 expoints->Type = pointsp->Type;
543 expoints->OldAcc = pointsp->OldAcc;
544#if NSOFTCLASSES > 1
545 expoints->SofteningClass = pointsp->SofteningClass;
546#endif
547#if defined(PMGRID) && defined(PLACEHIGHRESREGION)
548 expoints->InsideOutsideFlag = pointsp->InsideOutsideFlag;
549#endif
550 expoints->Nextnode = -1;
551
552 p = get_nextnodep(shmrank, handle)[p - Bdat.MaxNodes];
553 }
554 }
555 }
556
557 /************************************************************/
558
559 MPI_Send(node_info_recv, nrecv * sizeof(gtree::node_count_info), MPI_BYTE, source, TAG_N, MPI_COMM_WORLD);
560
561 /* now transfer the points and nodes */
562 if(n_recvpoints > 0)
563 MPI_Send(exportbuf_points, n_recvpoints * sizeof(foreign_gravpoint_data), MPI_BYTE, source, TAG_PDATA, MPI_COMM_WORLD);
564
565 if(n_recvnodes > 0)
566 MPI_Send(exportbuf_nodes, n_recvnodes * sizeof(gravnode), MPI_BYTE, source, TAG_SPHDATA, MPI_COMM_WORLD);
567
568 Mem.myfree(exportbuf_nodes);
569 Mem.myfree(exportbuf_points);
570 Mem.myfree(node_info_recv);
571}
global_data_all_processes All
Definition: main.cc:40
void init_drift_table(void)
Definition: driftfac.cc:26
char * Base
Definition: mymalloc.h:49
void mymalloc_init(int maxmemsize, enum restart_options restartflag)
Initialize memory manager.
Definition: mymalloc.cc:58
void prepare_offset_table(void *p, ptrdiff_t *&offset_tab)
void deal_with_gravity_node_request(char *message, int length, int source, int handle)
MPI_Comm SharedMemComm
char * TableData
int * get_nextnodep(unsigned char shmrank, int handle)
void deal_with_sph_node_request(char *message, int length, int source, int handle, simparticles *Sp)
void shared_memory_handler(void)
int Island_ThisTask
void inform_offset_table(void *p)
int Island_NTask
int * GetShmRankForSimulCommRank
tree_storage_info tree_info[MAX_TREE_INFOS]
char * get_basenodep(int no, unsigned char shmrank, int handle)
char * EwaldData
char * get_pointsp(unsigned char shmrank, int handle)
char * get_Pp(unsigned char shmrank, int handle)
char * get_SphPp(unsigned char shmrank, int handle)
void free_offset_table(ptrdiff_t *&offset_tab)
int drift_particle(particle_data *P, sph_particle_data *SphP, integertime time1, bool ignore_light_cone=false)
This function drifts a particle i to time1.
Definition: predict.cc:313
@ RST_BEGIN
Definition: dtypes.h:313
#define Terminate(...)
Definition: macros.h:15
driftfac Driftfac
Definition: main.cc:41
#define TAG_SPHDATA
Definition: mpi_utils.h:28
#define TAG_HEADER
Definition: mpi_utils.h:26
#define TAG_TABLE_FREE
Definition: mpi_utils.h:24
#define TAG_FETCH_SPH_DENSITY
Definition: mpi_utils.h:103
#define TAG_FETCH_SPH_TREETIMESTEP
Definition: mpi_utils.h:105
#define TAG_ALL_UPDATE
Definition: mpi_utils.h:100
#define TAG_TOPNODE_FREE
Definition: mpi_utils.h:19
#define TAG_PDATA
Definition: mpi_utils.h:27
#define TAG_DMOM
Definition: mpi_utils.h:30
#define TAG_FETCH_GRAVTREE
Definition: mpi_utils.h:102
#define TAG_TOPNODE_ALLOC
Definition: mpi_utils.h:21
#define TAG_EWALD_ALLOC
Definition: mpi_utils.h:22
#define TAG_N
Definition: mpi_utils.h:25
#define TAG_KEY
Definition: mpi_utils.h:29
#define TAG_FETCH_SPH_HYDRO
Definition: mpi_utils.h:104
#define TAG_TABLE_ALLOC
Definition: mpi_utils.h:23
#define TAG_TOPNODE_OFFSET
Definition: mpi_utils.h:20
#define TAG_METDATA
Definition: mpi_utils.h:101
#define TAG_DRIFT_INIT
Definition: mpi_utils.h:99
memory Mem
Definition: main.cc:44
gravtree< simparticles > gtree
ngbtree ntree
#define MAX_TREE_INFOS
std::atomic< unsigned char > cannot_be_opened_locally
Definition: tree.h:70
int OriginNode
Definition: tree.h:62
int nextnode
Definition: tree.h:57
int OriginTask
Definition: tree.h:61
int sibling
Definition: tree.h:53
unsigned char Type
Definition: gravtree.h:134
unsigned char SofteningClass
Definition: gravtree.h:137
MyIntPosType IntPos[3]
Definition: gravtree.h:130
sph_particle_data_hydrocore SphCore
Definition: ngbtree.h:105
signed char TimeBinHydro
Definition: ngbtree.h:113
MyIntPosType IntPos[3]
Definition: ngbtree.h:108
integertime Ti_Current
Definition: allvars.h:188
size_t get_data_size(void)
Definition: allvars.h:362
char * get_data_ptr(void)
Definition: allvars.h:360
MyDouble Mass
Definition: gravtree.h:103
unsigned char Type
Definition: gravtree.h:107
unsigned char SofteningClass
Definition: gravtree.h:109
float OldAcc
Definition: gravtree.h:104
MyIntPosType IntPos[3]
Definition: gravtree.h:102
std::atomic< integertime > Ti_Current
Definition: ngbtree.h:56
void drift_node(integertime time1, simparticles *Sp)
Definition: ngbtree.h:58
std::atomic_flag access
Definition: particle_data.h:88
integertime get_Ti_Current(void)
MyDouble getMass(void)
unsigned char getSofteningClass(void)
unsigned char getType(void)
signed char TimeBinHydro
Definition: particle_data.h:73
MyIntPosType IntPos[3]
Definition: particle_data.h:53