PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
ParticleMotion.c
Go to the documentation of this file.
1// ParticleMotion.c
2
3#include "ParticleMotion.h"
4
5// Define a buffer size for error messages if not already available
6#ifndef ERROR_MSG_BUFFER_SIZE
7#define ERROR_MSG_BUFFER_SIZE 256 // Or use PETSC_MAX_PATH_LEN if appropriate
8#endif
9
10/**
11 * @brief Updates a particle's position based on its velocity and the timestep dt (stored in user->dt).
12 *
13 * @param[in] user Pointer to your UserCtx (must contain user->dt).
14 * @param[in,out] position Pointer to the particle's current position (Cmpnts).
15 * @param[in] velocity Pointer to the particle's velocity (Cmpnts).
16 *
17 * @return PetscErrorCode Returns 0 on success, or an error code on failure.
18 */
19PetscErrorCode UpdateParticlePosition(UserCtx *user, Cmpnts *position, const Cmpnts *velocity)
20{
21 PetscFunctionBeginUser; // PETSc macro for error/stack tracing
22
23 PetscReal dt = user->simCtx->dt;
24
25 /* Update the position with velocity * dt */
26 position->x += velocity->x * dt;
27 position->y += velocity->y * dt;
28 position->z += velocity->z * dt;
29
30 PetscFunctionReturn(0);
31}
32
33#undef __FUNCT__
34#define __FUNCT__ "UpdateAllParticlePositions"
35/**
36 * @brief Loops over all local particles in the DMSwarm, updating their positions
37 * based on velocity and the global timestep user->dt.
38 * @param[in,out] user Pointer to UserCtx (must contain dt).
39 *
40 * @return PetscErrorCode Returns 0 on success, or an error code on failure.
41 */
43{
44 PetscErrorCode ierr;
45 DM swarm = user->swarm;
46 PetscInt nLocal, p;
47 PetscReal *pos = NULL;
48 PetscReal *vel = NULL;
49 PetscMPIInt rank;
50 Cmpnts temp_pos, temp_vel;
51
52 ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
53
54 PetscFunctionBeginUser; // PETSc macro for error/stack tracing
55
57
58 // 1) Get the number of local particles
59 ierr = DMSwarmGetLocalSize(swarm, &nLocal); CHKERRQ(ierr);
60 if (nLocal == 0) {
61 LOG_ALLOW(LOCAL,LOG_DEBUG,"[Rank %d] No particles to move/transport. \n",rank);
62 PetscFunctionReturn(0); // nothing to do, no fields held
63 }
64 // 2) Access the "position" and "velocity" fields
65 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void**)&pos); CHKERRQ(ierr);
66 ierr = DMSwarmGetField(swarm, "velocity", NULL, NULL, (void**)&vel); CHKERRQ(ierr);
67
68 LOG_ALLOW(GLOBAL,LOG_DEBUG," [Rank %d] No.of Particles to update: %d.\n",rank,nLocal);
69
70 // 3) Loop over all local particles, updating each position by velocity * dt
71 for (p = 0; p < nLocal; p++) {
72 // update temporary position struct
73 temp_pos.x = pos[3*p];
74 temp_pos.y = pos[3*p + 1];
75 temp_pos.z = pos[3*p + 2];
76
77 // update temporary velocity struct
78 temp_vel.x = vel[3*p];
79 temp_vel.y = vel[3*p + 1];
80 temp_vel.z = vel[3*p + 2];
81
82 ierr = UpdateParticlePosition(user, &temp_pos, &temp_vel); CHKERRQ(ierr);
83
84 // update swarm from temporary position struct
85 pos[3*p] = temp_pos.x;
86 pos[3*p + 1] = temp_pos.y;
87 pos[3*p + 2] = temp_pos.z;
88
89 vel[3*p] = temp_vel.x;
90 vel[3*p + 1] = temp_vel.y;
91 vel[3*p + 2] = temp_vel.z;
92 }
93
94 // 4) Restore the fields
95 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&pos); CHKERRQ(ierr);
96 ierr = DMSwarmRestoreField(swarm, "velocity", NULL, NULL, (void**)&vel); CHKERRQ(ierr);
97
98 LOG_ALLOW(LOCAL,LOG_DEBUG,"Particle moved/transported successfully on Rank %d.\n",rank);
99
101
102 PetscFunctionReturn(0);
103}
104
105
106/**
107 * @brief Checks if a particle position is within the bounds of a given bounding box.
108 *
109 * @param bbox Pointer to the BoundingBox structure.
110 * @param pos Pointer to the particle's position (Cmpnts).
111 *
112 * @return PetscBool PETSC_TRUE if the particle is inside or on the boundary, PETSC_FALSE otherwise.
113 */
114static inline PetscBool IsParticleInBox(const BoundingBox *bbox, const Cmpnts *pos) {
115 return (pos->x >= bbox->min_coords.x && pos->x <= bbox->max_coords.x &&
116 pos->y >= bbox->min_coords.y && pos->y <= bbox->max_coords.y &&
117 pos->z >= bbox->min_coords.z && pos->z <= bbox->max_coords.z);
118}
119
120/**
121 * @brief Checks for particles outside the global physical domain and removes them.
122 *
123 * This function iterates through all particles local to the current MPI rank. It checks
124 * if a particle's position is inside ANY of the bounding boxes that define the subdomains
125 * of the MPI ranks. The list of all bounding boxes must be available on all ranks.
126 *
127 * If a particle is not found within any of the provided bounding boxes, it is considered
128 * to be "out of bounds" and is permanently removed from the simulation using
129 * DMSwarmRemovePointAtIndex().
130 *
131 * This function uses a robust "Restore-Remove-Reacquire" pattern for modifying the swarm.
132 * When a particle is removed, all pointers to swarm data are restored, the removal operation
133 * is performed, and then the pointers are reacquired if the loop needs to continue. This is
134 * the safest way to modify the swarm's structure during iteration.
135 *
136 * @note This function is often redundant if a robust particle location scheme (e.g., one that
137 * sets a `LOST` status) and a corresponding cleanup function (e.g.,
138 * `CheckAndRemoveLostParticles`) are already in use. Using both can introduce
139 * complexity.
140 *
141 * @warning This function contains a collective MPI operation (MPI_Allreduce). To avoid
142 * deadlocks, it MUST be called by ALL ranks in the communicator, even if they
143 * have no work to do. Do not place this call inside rank-specific conditional
144 * logic in your main loop.
145 *
146 * @param[in,out] user Pointer to the UserCtx structure containing the swarm and MPI info.
147 * @param[out] removedCountLocal Pointer to store the number of particles removed on THIS rank.
148 * @param[out] removedCountGlobal Pointer to store the total number of particles removed ACROSS ALL ranks.
149 * @param[in] bboxlist An array of BoundingBox structures for ALL MPI ranks, indexed 0 to (size-1).
150 * This array must be up-to-date and available on all ranks.
151 *
152 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
153 */
154/*
155PetscErrorCode CheckAndRemoveOutOfBoundsParticles(UserCtx *user,
156 PetscInt *removedCountLocal,
157 PetscInt *removedCountGlobal,
158 const BoundingBox *bboxlist)
159{
160 PetscErrorCode ierr;
161 DM swarm = user->swarm;
162 PetscInt nLocalInitial;
163 PetscReal *pos_p = NULL;
164 PetscInt64 *pid_p = NULL; // For better logging
165 PetscInt local_removed_count = 0;
166 PetscMPIInt global_removed_count_mpi = 0;
167 PetscMPIInt rank, size;
168
169 PetscFunctionBeginUser;
170 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
171 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr);
172 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Checking for out-of-bounds particles...\n", rank);
173
174 // Initialize output parameters
175 *removedCountLocal = 0;
176 if (removedCountGlobal) *removedCountGlobal = 0;
177
178 ierr = DMSwarmGetLocalSize(swarm, &nLocalInitial); CHKERRQ(ierr);
179
180 if(nLocalInitial > 0){
181 // Get access to swarm fields. Getting PID makes for better log messages.
182 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
183 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
184
185 // --- Iterate BACKWARDS to handle index changes safely during removal ---
186 for (PetscInt p = nLocalInitial - 1; p >= 0; p--) {
187 PetscBool isInsideAnyBox = PETSC_FALSE;
188
189 // Create a temporary structure for the particle's position to pass to the check function.
190 // This assumes a helper function like IsParticleInBox exists. If not, inline the logic.
191 Cmpnts current_pos = {pos_p[3*p + 0], pos_p[3*p + 1], pos_p[3*p + 2]};
192
193 // Check if the particle is inside ANY of the rank bounding boxes
194 for (PetscMPIInt proc = 0; proc < size; proc++) {
195 if (IsParticleInBox(&bboxlist[proc], &current_pos)) {
196 isInsideAnyBox = PETSC_TRUE;
197 break; // Efficiently exit inner loop once a containing box is found
198 }
199 }
200
201 if (!isInsideAnyBox) {
202 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removing out-of-bounds particle [PID %lld] at local index %d. Pos: (%g, %g, %g)\n",
203 rank, (long long)pid_p[p], p, current_pos.x, current_pos.y, current_pos.z);
204
205 // --- Safe Removal Pattern: Restore -> Remove -> Reacquire ---
206
207 // 1. Restore all fields BEFORE modifying the swarm structure.
208 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
209 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
210
211 // 2. Remove the particle at the current local index 'p'.
212 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
213 local_removed_count++;
214
215 // 3. After removal, the swarm is modified. We MUST re-acquire pointers if we are to continue.
216 PetscInt nLocalCurrent;
217 ierr = DMSwarmGetLocalSize(swarm, &nLocalCurrent); CHKERRQ(ierr);
218 if (nLocalCurrent > 0) { // If there are still particles left to check
219 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
220 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
221 } else {
222 // All particles on this rank have been removed. Invalidate pointers and exit the loop.
223 pos_p = NULL;
224 pid_p = NULL;
225 // break;
226 }
227 }
228 } // End of backwards loop
229
230 // Restore fields if the loop completed without a final removal (i.e., pointers are still valid).
231 if (pos_p) {
232 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
233 }
234 if (pid_p) {
235 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
236 }
237 } // End of if (nLocalInitial > 0)
238
239 // Get the *final* local size for logging
240 PetscInt nLocalFinal;
241 ierr = DMSwarmGetLocalSize(swarm, &nLocalFinal); CHKERRQ(ierr);
242 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Finished removing %d out-of-bounds particles. Final local size: %d.\n", rank, local_removed_count, nLocalFinal);
243
244 // --- Synchronize counts across all ranks ---
245 *removedCountLocal = local_removed_count;
246 if (removedCountGlobal) {
247 ierr = MPI_Allreduce(&local_removed_count, &global_removed_count_mpi, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)swarm)); CHKERRQ(ierr);
248 *removedCountGlobal = global_removed_count_mpi;
249 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Removed %d out-of-bounds particles globally.\n", rank, *removedCountGlobal);
250 }
251
252 PetscFunctionReturn(0);
253}
254*/
255
256/**
257 * @brief Checks for particles outside the global physical domain and removes them.
258 *
259 * This function iterates through all particles local to the current MPI rank. It determines
260 * if a particle's physical position is outside of ALL subdomains owned by the MPI processes.
261 * To perform this check, it requires a list of the bounding boxes for every MPI rank's
262 * subdomain.
263 *
264 * If a particle is not found within any of the subdomains, it is considered "out of bounds"
265 * and is permanently removed from the simulation using DMSwarmRemovePointAtIndex().
266 *
267 * The function is carefully designed to handle modifications to the DMSwarm during iteration
268 * safely. It does this by:
269 * 1. Iterating BACKWARDS through the local particle list, so removing an element at
270 * index 'p' does not affect the indices of subsequent elements to be checked (p-1, p-2, ...).
271 * 2. Using a robust "Restore-Remove-Reacquire" pattern. When a particle is removed,
272 * all pointers to swarm data are first restored, the removal operation is performed,
273 * and then the pointers are re-acquired for the remainder of the loop.
274 *
275 * @warning This function contains a collective MPI operation (MPI_Allreduce) at the end.
276 * To avoid deadlocks, it MUST be called by ALL ranks in the communicator, even if
277 * a rank has no local particles. Do not place this call inside rank-specific
278 * conditional logic in your main loop.
279 *
280 * @note This function can be redundant if a robust particle location scheme (e.g., one that
281 * sets a `LOST` status) and a corresponding cleanup function are already in use.
282 *
283 * @param[in,out] user Pointer to the UserCtx structure containing the swarm and MPI info.
284 * @param[out] removedCountLocal Pointer to an integer that will store the number of particles removed on THIS rank.
285 * @param[out] removedCountGlobal Pointer to an integer that will store the total number of particles removed ACROSS ALL ranks.
286 * @param[in] bboxlist An array of BoundingBox structures for ALL MPI ranks, indexed 0 to (size-1).
287 * This array must be up-to-date and available on every rank.
288 *
289 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
290 */
292 PetscInt *removedCountLocal,
293 PetscInt *removedCountGlobal,
294 const BoundingBox *bboxlist)
295{
296 PetscErrorCode ierr;
297 DM swarm = user->swarm;
298 PetscInt nLocalInitial;
299 PetscReal *pos_p = NULL;
300 PetscInt64 *pid_p = NULL; // For better logging
301 PetscInt local_removed_count = 0;
302 PetscMPIInt global_removed_count_mpi = 0;
303 PetscMPIInt rank, size;
304
305 PetscFunctionBeginUser;
306 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
307 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr);
308 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Checking for out-of-bounds particles...", rank);
309
310 // Initialize output parameters to ensure clean state
311 *removedCountLocal = 0;
312 if (removedCountGlobal) *removedCountGlobal = 0;
313
314 ierr = DMSwarmGetLocalSize(swarm, &nLocalInitial); CHKERRQ(ierr);
315
316 // Only proceed if there are particles to check on this rank.
317 // All ranks will still participate in the final collective MPI_Allreduce.
318 if (nLocalInitial > 0) {
319 // Get access to swarm fields once before the loop begins.
320 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
321 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
322
323 // --- Iterate BACKWARDS to handle index changes safely during removal ---
324 for (PetscInt p = nLocalInitial - 1; p >= 0; p--) {
325 PetscBool isInsideAnyBox = PETSC_FALSE;
326 Cmpnts current_pos = {pos_p[3*p + 0], pos_p[3*p + 1], pos_p[3*p + 2]};
327
328 // Check if the particle is inside ANY of the rank bounding boxes
329 for (PetscMPIInt proc = 0; proc < size; proc++) {
330 if (IsParticleInBox(&bboxlist[proc], &current_pos)) {
331 isInsideAnyBox = PETSC_TRUE;
332 break; // Particle is inside a valid domain, stop checking.
333 }
334 }
335
336 if (!isInsideAnyBox) {
337 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removing out-of-bounds particle [PID %lld] at local index %d. Pos: (%g, %g, %g)\n",
338 rank, (long long)pid_p[p], p, current_pos.x, current_pos.y, current_pos.z);
339
340 // --- Safe Removal Pattern: Restore -> Remove -> Reacquire ---
341 // This is the fix for the double-restore bug. Pointers are managed carefully
342 // within this block and then restored cleanly after the loop.
343
344 // 1. Restore all fields BEFORE modifying the swarm structure. This invalidates pos_p and pid_p.
345 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
346 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
347
348 // 2. Remove the particle at the current local index 'p'.
349 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
350 local_removed_count++;
351
352 // 3. After removal, re-acquire pointers ONLY if the loop is not finished.
353 PetscInt nLocalCurrent;
354 ierr = DMSwarmGetLocalSize(swarm, &nLocalCurrent); CHKERRQ(ierr);
355
356 if (nLocalCurrent > 0 && p > 0) { // Check if there are particles left AND iterations left
357 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
358 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
359 } else {
360 // All remaining particles were removed OR this was the last particle (p=0).
361 // Invalidate pointers to prevent the final restore call and exit the loop.
362 pos_p = NULL;
363 pid_p = NULL;
364 break;
365 }
366 }
367 } // End of backwards loop
368
369 // At the end, restore any valid pointers. This handles three cases:
370 // 1. No particles were removed: restores the original pointers.
371 // 2. Particles were removed mid-loop: restores the pointers from the last re-acquisition.
372 // 3. All particles were removed: pointers are NULL, so nothing is done.
373 if (pos_p) { ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr); }
374 if (pid_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr); }
375 } // End of if (nLocalInitial > 0)
376
377 PetscInt nLocalFinal;
378 ierr = DMSwarmGetLocalSize(swarm, &nLocalFinal); CHKERRQ(ierr);
379 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Finished removing %d out-of-bounds particles. Final local size: %d.\n", rank, local_removed_count, nLocalFinal);
380
381 // --- Synchronize counts across all ranks ---
382 *removedCountLocal = local_removed_count;
383 if (removedCountGlobal) {
384 ierr = MPI_Allreduce(&local_removed_count, &global_removed_count_mpi, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)swarm)); CHKERRQ(ierr);
385 *removedCountGlobal = global_removed_count_mpi;
386 // Use a synchronized log message so only one rank prints the global total.
387 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Removed %d out-of-bounds particles globally.\n", rank, *removedCountGlobal);
388 }
389
390 PetscFunctionReturn(0);
391}
392
393/**
394 * @brief Removes particles that have been definitively flagged as LOST by the location algorithm.
395 *
396 * This function is the designated cleanup utility for particles that have exited the
397 * physical domain or could not be located for any other reason. It should be called
398 * after a particle location and migration phase is complete.
399 *
400 * It iterates through all locally owned particles and checks their `DMSwarm_location_status`
401 * field. If a particle's status is `LOST`, it is permanently removed from the simulation
402 * using `DMSwarmRemovePointAtIndex`.
403 *
404 * The function uses a robust "Restore-Remove-Reacquire" pattern for modifying the swarm.
405 * When a particle is removed, all pointers to swarm data are restored, the removal operation
406 * is performed, and then the pointers are reacquired if the loop needs to continue.
407 *
408 * @warning This function contains a collective MPI operation (MPI_Allreduce). To avoid
409 * deadlocks, it MUST be called by ALL ranks in the communicator. This implementation
410 * is designed to be safe by ensuring all ranks, even those with zero local
411 * particles, participate in the collective call.
412 *
413 * @param[in,out] user Pointer to the UserCtx structure containing the swarm.
414 * @param[out] removedCountLocal Pointer to store the number of particles removed on THIS rank.
415 * @param[out] removedCountGlobal Pointer to store the total number of particles removed ACROSS ALL ranks.
416 *
417 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
418 */
419/*
420PetscErrorCode CheckAndRemoveLostParticles(UserCtx *user,
421 PetscInt *removedCountLocal,
422 PetscInt *removedCountGlobal)
423{
424 PetscErrorCode ierr;
425 DM swarm = user->swarm;
426 PetscInt nLocalInitial;
427 PetscInt *status_p = NULL;
428 PetscInt64 *pid_p = NULL; // For better logging
429 PetscReal *pos_p = NULL; // For better logging
430 PetscInt local_removed_count = 0;
431 PetscMPIInt global_removed_count_mpi = 0;
432 PetscMPIInt rank;
433
434 PetscFunctionBeginUser;
435 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
436 LOG_ALLOW(LOCAL, LOG_INFO, "Rank %d: Checking for and removing LOST particles...\n", rank);
437
438 // Initialize output parameters
439 *removedCountLocal = 0;
440 if (removedCountGlobal) *removedCountGlobal = 0;
441
442 ierr = DMSwarmGetLocalSize(swarm, &nLocalInitial); CHKERRQ(ierr);
443
444 // Guard the local processing loop. If a rank has no particles, it will skip
445 // this block entirely but will still proceed to the collective MPI_Allreduce call.
446 // This is the critical fix to prevent deadlocks.
447 if (nLocalInitial > 0) {
448 // Get access to swarm fields. We get all three for robust processing and logging.
449 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
450 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
451 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
452
453 // --- Iterate BACKWARDS to handle index changes safely during removal ---
454 for (PetscInt p = nLocalInitial - 1; p >= 0; p--) {
455 if (status_p[p] == LOST) {
456 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removing LOST particle [PID %ld] at local index %d. Position: (%.4f, %.4f, %.4f).\n",rank, pid_p[p],p,pos_p[3*p], pos_p[3*p+1], pos_p[3*p+2]);
457
458 // --- Safe Removal Pattern: Restore -> Remove -> Reacquire ---
459
460 // 1. Restore all fields BEFORE modifying the swarm structure.
461 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
462 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
463 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
464
465 // 2. Remove the particle at the current local index 'p'.
466 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
467 local_removed_count++;
468
469 // 3. After removal, the swarm is modified. We MUST re-acquire pointers if we are to continue.
470 PetscInt nLocalCurrent;
471 ierr = DMSwarmGetLocalSize(swarm, &nLocalCurrent); CHKERRQ(ierr);
472 if (nLocalCurrent > 0) { // If there are still particles left to check
473 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
474 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
475 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
476 } else {
477 // All particles on this rank have been removed. Invalidate pointers and exit the loop.
478 status_p = NULL;
479 pid_p = NULL;
480 pos_p = NULL;
481 break;
482 }
483 }
484 } // End of backwards loop
485
486 // Restore fields if the loop completed without a final removal (i.e., pointers are still valid).
487 if (status_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr); }
488 if (pid_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr); }
489 if (pos_p) { ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr); }
490 } // End of if (nLocalInitial > 0)
491
492 // Get the *final* local size for logging
493 PetscInt nLocalFinal;
494 ierr = DMSwarmGetLocalSize(swarm, &nLocalFinal); CHKERRQ(ierr);
495 LOG_ALLOW(LOCAL, LOG_INFO, "Rank %d: Finished removing %d LOST particles. Final local size: %d.\n", rank, local_removed_count, nLocalFinal);
496
497 // --- Synchronize counts across all ranks ---
498 // ALL ranks MUST reach this point to participate in the collective call.
499 *removedCountLocal = local_removed_count;
500 if (removedCountGlobal) {
501 ierr = MPI_Allreduce(&local_removed_count, &global_removed_count_mpi, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)swarm)); CHKERRQ(ierr);
502 *removedCountGlobal = global_removed_count_mpi;
503 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Removed %d LOST particles globally.\n", rank, *removedCountGlobal);
504 }
505
506 PetscFunctionReturn(0);
507}
508*/
509
510#undef __FUNCT__
511#define __FUNCT__ "CheckAndRemoveLostParticles"
512/**
513 * @brief Removes particles that have been definitively flagged as LOST by the location algorithm.
514 *
515 * This function is the designated cleanup utility for particles that have exited the
516 * physical domain or could not be located for any other reason. It should be called
517 * after a particle location and migration phase is complete.
518 *
519 * It iterates through all locally owned particles and checks their `DMSwarm_location_status`
520 * field. If a particle's status is `LOST`, it is permanently removed from the simulation
521 * using `DMSwarmRemovePointAtIndex`.
522 *
523 * The function is carefully designed to handle modifications to the DMSwarm during iteration
524 * safely. It does this by:
525 * 1. Iterating BACKWARDS through the local particle list, so removing an element at
526 * index 'p' does not affect the indices of subsequent elements to be checked (p-1, p-2, ...).
527 * 2. Using a robust "Restore-Remove-Reacquire" pattern. When a particle is removed,
528 * all pointers to swarm data are first restored, the removal operation is performed,
529 * and then the pointers are re-acquired for the remainder of the loop.
530 *
531 * @warning This function contains a collective MPI operation (MPI_Allreduce) at the end.
532 * To avoid deadlocks, it MUST be called by ALL ranks in the communicator, even if
533 * a rank has no local particles. Do not place this call inside rank-specific
534 * conditional logic in your main loop.
535 *
536 * @param[in,out] user Pointer to the UserCtx structure containing the swarm.
537 * @param[out] removedCountLocal Pointer to an integer that will store the number of particles removed on THIS rank.
538 * @param[out] removedCountGlobal Pointer to an integer that will store the total number of particles removed ACROSS ALL ranks.
539 *
540 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
541 */
543 PetscInt *removedCountLocal,
544 PetscInt *removedCountGlobal)
545{
546 PetscErrorCode ierr;
547 DM swarm = user->swarm;
548 PetscInt nLocalInitial;
549 PetscInt *status_p = NULL;
550 PetscInt64 *pid_p = NULL; // For better logging
551 PetscReal *pos_p = NULL; // For better logging
552 PetscInt local_removed_count = 0;
553 PetscMPIInt global_removed_count_mpi = 0;
554 PetscMPIInt rank;
555
556 PetscFunctionBeginUser;
558 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
559 LOG_ALLOW(LOCAL, LOG_INFO, "Rank %d: Checking for and removing LOST particles...", rank);
560
561 // Initialize output parameters to ensure clean state
562 *removedCountLocal = 0;
563 if (removedCountGlobal) *removedCountGlobal = 0;
564
565 ierr = DMSwarmGetLocalSize(swarm, &nLocalInitial); CHKERRQ(ierr);
566
567 // Only proceed if there are particles to check on this rank.
568 // All ranks will still participate in the final collective MPI_Allreduce.
569 if (nLocalInitial > 0) {
570 // Get access to all swarm fields once before the loop begins.
571 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
572 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
573 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
574
575 // --- Iterate BACKWARDS to handle index changes safely during removal ---
576 for (PetscInt p = nLocalInitial - 1; p >= 0; p--) {
577 if (status_p[p] == LOST) {
578 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removing LOST particle [PID %lld] at local index %d. Position: (%.4f, %.4f, %.4f).\n",
579 rank, (long long)pid_p[p], p, pos_p[3*p], pos_p[3*p+1], pos_p[3*p+2]);
580
581 // --- Safe Removal Pattern: Restore -> Remove -> Reacquire ---
582 // This is the fix for the double-restore bug. Pointers are managed carefully
583 // within this block and then restored cleanly after the loop.
584
585 // 1. Restore all fields BEFORE modifying the swarm structure. This invalidates all pointers.
586 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
587 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
588 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
589
590 // 2. Remove the particle at the current local index 'p'.
591 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
592 local_removed_count++;
593
594 // 3. After removal, re-acquire pointers ONLY if the loop is not finished.
595 PetscInt nLocalCurrent;
596 ierr = DMSwarmGetLocalSize(swarm, &nLocalCurrent); CHKERRQ(ierr);
597
598 if (nLocalCurrent > 0 && p > 0) { // Check if there are particles left AND iterations left
599 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
600 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
601 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
602 } else {
603 // All remaining particles were removed OR this was the last particle (p=0).
604 // Invalidate pointers to prevent the final restore call and exit the loop.
605 status_p = NULL;
606 pid_p = NULL;
607 pos_p = NULL;
608 break;
609 }
610 }
611 } // End of backwards loop
612
613 // At the end, restore any valid pointers. This handles three cases:
614 // 1. No particles were removed: restores the original pointers.
615 // 2. Particles were removed mid-loop: restores the pointers from the last re-acquisition.
616 // 3. All particles were removed: pointers are NULL, so nothing is done.
617 if (status_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr); }
618 if (pid_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr); }
619 if (pos_p) { ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr); }
620 } // End of if (nLocalInitial > 0)
621
622 PetscInt nLocalFinal;
623 ierr = DMSwarmGetLocalSize(swarm, &nLocalFinal); CHKERRQ(ierr);
624 LOG_ALLOW(LOCAL, LOG_INFO, "Rank %d: Finished removing %d LOST particles. Final local size: %d.\n", rank, local_removed_count, nLocalFinal);
625
626 // --- Synchronize counts across all ranks ---
627 *removedCountLocal = local_removed_count;
628 if (removedCountGlobal) {
629 ierr = MPI_Allreduce(&local_removed_count, &global_removed_count_mpi, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)swarm)); CHKERRQ(ierr);
630 *removedCountGlobal = global_removed_count_mpi;
631 // Use a synchronized log message so only one rank prints the global total.
632 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Removed %d LOST particles globally.\n", rank, *removedCountGlobal);
633 }
634
636 PetscFunctionReturn(0);
637}
638
639/**
640 * @brief Sets the target rank field (DMSwarmPICField_rank) for particles scheduled for migration.
641 *
642 * @param user Pointer to UserCtx pa(contains swarm).
643 * @param migrationList Array of MigrationInfo structs containing local indices and target ranks.
644 * @param migrationCount Number of particles in the migrationList.
645 *
646 * @return PetscErrorCode 0 on success, non-zero on failure.
647 */
648PetscErrorCode SetMigrationRanks(UserCtx* user, const MigrationInfo *migrationList, PetscInt migrationCount)
649{
650 PetscErrorCode ierr;
651 DM swarm = user->swarm;
652 PetscInt p_idx;
653 PetscInt *rankField = NULL; // Field storing target rank
654
655 PetscFunctionBeginUser;
656
657 // Ensure the migration rank field exists
658 ierr = DMSwarmGetField(swarm, "DMSwarm_rank", NULL, NULL, (void **)&rankField); CHKERRQ(ierr);
659
660 // Set the target rank for migrating particles
661 for(p_idx = 0; p_idx < migrationCount; ++p_idx) {
662 rankField[migrationList[p_idx].local_index] = migrationList[p_idx].target_rank;
663 }
664
665 ierr = DMSwarmRestoreField(swarm, "DMSwarm_rank", NULL, NULL, (void **)&rankField); CHKERRQ(ierr);
666 PetscFunctionReturn(0);
667}
668
669/**
670 * @brief Performs particle migration based on the pre-populated DMSwarmPICField_rank field.
671 *
672 * Assumes SetMigrationRanks has already been called to mark particles with their target ranks.
673 * Calls DMSwarmMigrate to execute the communication and removal of un-migrated particles.
674 *
675 * @param user Pointer to the UserCtx structure containing the swarm.
676 *
677 * @return PetscErrorCode 0 on success, non-zero on failure.
678 */
679PetscErrorCode PerformMigration(UserCtx *user)
680{
681 PetscErrorCode ierr;
682 DM swarm = user->swarm;
683 PetscMPIInt rank;
684
685 PetscFunctionBeginUser;
686 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
687 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Starting DMSwarmMigrate...\n", rank);
688
689 // Perform the migration - PETSC_TRUE removes particles that fail to land
690 // in a valid cell on the target rank (or were marked with an invalid rank).
691 ierr = DMSwarmMigrate(swarm, PETSC_TRUE); CHKERRQ(ierr);
692
693 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Migration complete.\n", rank);
694 PetscFunctionReturn(0);
695}
696
697//-----------------------------------------------------------------------------
698// MODULE (COUNT): Calculates Particle Count Per Cell - REVISED FOR DMDAVecGetArray
699//-----------------------------------------------------------------------------
700/**
701 * @brief Counts particles in each cell of the DMDA 'da' and stores the result in user->ParticleCount.
702 * @ingroup scatter_module
703 *
704 * Zeros the user->ParticleCount vector, then iterates through local particles.
705 * Reads the **GLOBAL** cell index (I, J, K) stored in the "DMSwarm_CellID" field.
706 * Uses DMDAVecGetArray to access the local portion of the count vector and increments
707 * the count at the global index (I, J, K) if it belongs to the local patch (including ghosts).
708 *
709 * @param[in,out] user Pointer to the UserCtx structure containing da, swarm, and ParticleCount.
710 *
711 * @return PetscErrorCode Returns 0 on success, non-zero on failure.
712
713 */
715 PetscErrorCode ierr;
716 DM da = user->da;
717 DM swarm = user->swarm;
718 Vec countVec = user->ParticleCount;
719 PetscInt nlocal, p;
720 PetscInt *global_cell_id_arr; // Read GLOBAL cell IDs
721 PetscScalar ***count_arr_3d; // Use 3D accessor
722 PetscInt64 *PID_arr;
723 PetscMPIInt rank;
724 char msg[ERROR_MSG_BUFFER_SIZE];
725 PetscInt particles_counted_locally = 0;
726
727 PetscFunctionBeginUser;
728 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
729
730 // --- Input Validation ---
731 if (!da) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->da is NULL.");
732 if (!swarm) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->swarm is NULL.");
733 if (!countVec) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->ParticleCount is NULL.");
734 // Check DOF of da
735 PetscInt count_dof;
736 ierr = DMDAGetInfo(da, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &count_dof, NULL, NULL, NULL, NULL, NULL); CHKERRQ(ierr);
737 if (count_dof != 1) { PetscSNPrintf(msg, sizeof(msg), "countDM must have DOF=1, got %d.", count_dof); SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, msg); }
738
739 // --- Zero the count vector ---
740 ierr = VecSet(countVec, 0.0); CHKERRQ(ierr);
741
742 // --- Get Particle Data ---
743 LOG_ALLOW(GLOBAL,LOG_DEBUG, "CalculateParticleCountPerCell: Accessing particle data.\n");
744 ierr = DMSwarmGetLocalSize(swarm, &nlocal); CHKERRQ(ierr);
745 ierr = DMSwarmGetField(swarm,"DMSwarm_CellID", NULL, NULL, (void **)&global_cell_id_arr); CHKERRQ(ierr);
746 ierr = DMSwarmGetField(swarm,"DMSwarm_pid",NULL,NULL,(void **)&PID_arr);CHKERRQ(ierr);
747
748 // --- Get Grid Vector Array using DMDA accessor ---
749 LOG_ALLOW(GLOBAL, LOG_DEBUG, "CalculateParticleCountPerCell: Accessing ParticleCount vector array (using DMDAVecGetArray).\n");
750 ierr = DMDAVecGetArray(da, countVec, &count_arr_3d); CHKERRQ(ierr);
751
752 // Get local owned range for validation/logging if needed, but not for indexing with DMDAVecGetArray
753 PetscInt xs, ys, zs, xm, ym, zm;
754 ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm); CHKERRQ(ierr);
755
756 // --- Accumulate Counts Locally ---
757 LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Processing %d local particles using GLOBAL CellIDs.\n",rank,nlocal);
758 for (p = 0; p < nlocal; p++) {
759 // Read the GLOBAL indices stored for this particle
760 PetscInt64 i = global_cell_id_arr[p * 3 + 0]; // Global i index
761 PetscInt64 j = global_cell_id_arr[p * 3 + 1]; // Global j index
762 PetscInt64 k = global_cell_id_arr[p * 3 + 2]; // Global k index
763
764 // *** Bounds check is implicitly handled by DMDAVecGetArray for owned+ghost region ***
765 // However, accessing outside this region using global indices WILL cause an error.
766 // A preliminary check might still be wise if global IDs could be wild.
767 // We rely on LocateAllParticles to provide valid global indices [0..IM-1] etc.
768 // *** ADD PRINTF HERE ***
769 LOG_LOOP_ALLOW(LOCAL,LOG_DEBUG,p,100,"[Rank %d CalcCount] Read CellID for p=%d, PID = %ld: (%ld, %ld, %ld)\n", rank, p,PID_arr[p],i, j, k);
770 // *** END PRINTF ***
771 // *** Access the local array using GLOBAL indices ***
772 // DMDAVecGetArray allows this, mapping global (I,J,K) to the correct
773 // location within the local ghosted array segment.
774 // This assumes (I,J,K) corresponds to a cell within the local owned+ghost region.
775 // If (I,J,K) is for a cell owned by *another* rank (and not in the ghost region
776 // of this rank), accessing count_arr_3d[K][J][I] will likely lead to a crash
777 // or incorrect results. This highlights why storing LOCAL indices is preferred
778 // for parallel runs. But proceeding with GLOBAL as requested:
779 if (i >= xs && i < xs + xm &&
780 j >= ys && j < ys + ym && // Adjust based on actual ghost width
781 k >= zs && k < zs + zm ) // This check prevents definite crashes but doesn't guarantee ownership
782 {
783 // Increment count at the location corresponding to GLOBAL index (I,J,K)
784 // LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Particle %d with global CellID (%d, %d, %d) incremented with a particle.\n",rank, p, i, j, k);
785 count_arr_3d[k][j][i] += 1.0;
786 particles_counted_locally++;
787 } else {
788 // This particle's global ID is likely outside the range this rank handles (even ghosts)
789 LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Skipping particle %ld with global CellID (%ld, %ld, %ld) - likely outside local+ghost range.\n",rank, PID_arr[p] , i, j, k);
790 }
791 }
792 LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Local counting finished. Processed %d particles locally.\n", rank, particles_counted_locally);
793
794 // --- Restore Access ---
795 ierr = DMDAVecRestoreArray(da, countVec, &count_arr_3d); CHKERRQ(ierr);
796 ierr = DMSwarmRestoreField(swarm,"DMSwarm_CellID", NULL, NULL, (void **)&global_cell_id_arr); CHKERRQ(ierr);
797 ierr = DMSwarmRestoreField(swarm,"DMSwarm_pid",NULL,NULL,(void **)&PID_arr);CHKERRQ(ierr);
798
799 // --- Assemble Global Vector ---
800 LOG_ALLOW(GLOBAL, LOG_DEBUG, "CalculateParticleCountPerCell: Assembling global ParticleCount vector.\n");
801 ierr = VecAssemblyBegin(countVec); CHKERRQ(ierr);
802 ierr = VecAssemblyEnd(countVec); CHKERRQ(ierr);
803
804 // --- Verification Logging ---
805 PetscReal total_counted_particles = 0.0, max_count_in_cell = 0.0;
806 ierr = VecSum(countVec, &total_counted_particles); CHKERRQ(ierr);
807 PetscInt max_idx_global = -1;
808 ierr = VecMax(countVec, &max_idx_global, &max_count_in_cell); CHKERRQ(ierr);
809 LOG_ALLOW(GLOBAL, LOG_INFO, "CalculateParticleCountPerCell: Total counted globally = %.0f, Max count in cell = %.0f\n",
810 total_counted_particles, max_count_in_cell);
811
812 // --- ADD THIS DEBUGGING BLOCK ---
813 if (max_idx_global >= 0) { // Check if VecMax found a location
814 // Need to convert the flat global index back to 3D global index (I, J, K)
815 // Get global grid dimensions (Nodes, NOT Cells IM/JM/KM)
816 PetscInt M, N, P;
817 ierr = DMDAGetInfo(da, NULL, &M, &N, &P, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); CHKERRQ(ierr);
818 // Note: Assuming DOF=1 for countVec, index mapping uses node dimensions M,N,P from DMDA creation (IM+1, etc)
819 // Re-check if your DMDA uses cell counts (IM) or node counts (IM+1) for Vec layout. Let's assume Node counts M,N,P.
820 PetscInt Kmax = max_idx_global / (M * N);
821 PetscInt Jmax = (max_idx_global % (M * N)) / M;
822 PetscInt Imax = max_idx_global % M;
823 LOG_ALLOW(GLOBAL, LOG_INFO, " -> Max count located at global index (I,J,K) = (%d, %d, %d) [Flat index: %d]\n",
824 (int)Imax, (int)Jmax, (int)Kmax, (int)max_idx_global);
825
826 // Also, let's explicitly check the count at (0,0,0)
827 PetscScalar count_at_origin = 0.0;
828 PetscScalar ***count_arr_for_check;
829 ierr = DMDAVecGetArrayRead(da, countVec, &count_arr_for_check); CHKERRQ(ierr);
830 // Check bounds before accessing - crucial if using global indices
831 PetscInt xs, ys, zs, xm, ym, zm;
832 ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm); CHKERRQ(ierr);
833 if (0 >= xs && 0 < xs+xm && 0 >= ys && 0 < ys+ym && 0 >= zs && 0 < zs+zm) {
834 count_at_origin = count_arr_for_check[0][0][0]; // Access using global index (0,0,0)
835 } else {
836 // Origin is not on this rank (relevant for parallel, but check anyway)
837 count_at_origin = -999.0; // Indicate it wasn't accessible locally
838 }
839 ierr = DMDAVecRestoreArrayRead(da, countVec, &count_arr_for_check); CHKERRQ(ierr);
840 LOG_ALLOW(GLOBAL, LOG_INFO, " -> Count at global index (0,0,0) = %.1f\n", count_at_origin);
841
842 } else {
843 LOG_ALLOW(GLOBAL, LOG_WARNING, " -> VecMax did not return a location for the maximum value.\n");
844 }
845 // --- END DEBUGGING BLOCK ---
846
847 LOG_ALLOW(GLOBAL, LOG_INFO, "CalculateParticleCountPerCell: Particle counting complete.\n");
848
849 PetscFunctionReturn(0);
850}
851
852// NOTE: AccumulateParticleField also needs the same modification if it is to handle global CellIDs
853// and use DMDAVecGetArray/DMDAVecGetArrayDOF
854
855// --- Helper function to resize swarm globally (add or remove) ---
856// This assumes removing excess particles means removing the globally last ones.
857PetscErrorCode ResizeSwarmGlobally(DM swarm, PetscInt N_target)
858{
859 PetscErrorCode ierr;
860 PetscInt N_current, nlocal_current;
861 PetscMPIInt rank;
862 MPI_Comm comm;
863
864 PetscFunctionBeginUser;
865 ierr = PetscObjectGetComm((PetscObject)swarm, &comm); CHKERRQ(ierr);
866 ierr = MPI_Comm_rank(comm, &rank); CHKERRQ(ierr);
867 ierr = DMSwarmGetSize(swarm, &N_current); CHKERRQ(ierr);
868 ierr = DMSwarmGetLocalSize(swarm, &nlocal_current); CHKERRQ(ierr);
869
870 PetscInt delta = N_target - N_current;
871
872 if (delta == 0) {
873 PetscFunctionReturn(0); // Nothing to do
874 }
875
876 if (delta < 0) { // Remove particles
877 PetscInt num_to_remove_global = -delta;
878 LOG_ALLOW(GLOBAL, LOG_INFO, "ResizeSwarmGlobally: Current size %d > target size %d. Removing %d particles globally.\n", N_current, N_target, num_to_remove_global);
879
880 // --- Strategy: Remove the globally last 'num_to_remove_global' particles ---
881 // Each rank needs to determine how many of its *local* particles fall
882 // within the range of global indices [N_target, N_current - 1].
883
884 PetscInt rstart = 0;
885 PetscInt rend;
886 // Global range owned by this rank [rstart, rend)
887
888 ierr = MPI_Exscan(&nlocal_current, &rstart, 1, MPIU_INT, MPI_SUM, comm); CHKERRMPI(ierr); // Use CHKERRMPI for MPI calls
889
890 rend = rstart + nlocal_current;
891
892
893 // Calculate how many local particles have global indices >= N_target
894 PetscInt nlocal_remove_count = 0;
895 if (rend > N_target) { // If this rank owns any particles slated for removal
896 PetscInt start_remove_local_idx = (N_target > rstart) ? (N_target - rstart) : 0;
897 nlocal_remove_count = nlocal_current - start_remove_local_idx;
898 }
899
900 if (nlocal_remove_count < 0) nlocal_remove_count = 0; // Sanity check
901 if (nlocal_remove_count > nlocal_current) nlocal_remove_count = nlocal_current; // Sanity check
902
903 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Global range [%d, %d). Target size %d. Need to remove %d local particles (from end).\n", rank, rstart, rend, N_target, nlocal_remove_count);
904
905 // Remove the last 'nlocal_remove_count' particles *locally* by iterating backwards
906 PetscInt removal_ops_done = 0;
907 for (PetscInt p = nlocal_current - 1; p >= 0 && removal_ops_done < nlocal_remove_count; --p) {
908 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
909 removal_ops_done++;
910 }
911
912 if (removal_ops_done != nlocal_remove_count) {
913 SETERRQ(comm, PETSC_ERR_PLIB, "Rank %d: Failed to remove the expected number of local particles (%d != %d)", rank, removal_ops_done, nlocal_remove_count);
914 }
915 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removed %d local particles.\n", rank, removal_ops_done);
916
917 // Barrier to ensure all removals are done before size check
918 ierr = MPI_Barrier(comm); CHKERRMPI(ierr);
919
920 } else { // delta > 0: Add particles
921 PetscInt num_to_add_global = delta;
922 LOG_ALLOW(GLOBAL, LOG_INFO, "ResizeSwarmGlobally: Current size %d < target size %d. Adding %d particles globally.\n", N_current, N_target, num_to_add_global);
923 ierr = DMSwarmAddNPoints(swarm, num_to_add_global); CHKERRQ(ierr);
924 // Note: Added particles will have uninitialized field data. Reading will overwrite.
925 }
926
927 // Verify final size
928 PetscInt N_final;
929 ierr = DMSwarmGetSize(swarm, &N_final); CHKERRQ(ierr);
930 if (N_final != N_target) {
931 SETERRQ(comm, PETSC_ERR_PLIB, "Failed to resize swarm: expected %d particles, got %d", N_target, N_final);
932 }
933 LOG_ALLOW(GLOBAL, LOG_DEBUG, "ResizeSwarmGlobally: Swarm successfully resized to %d particles.\n", N_final);
934 PetscFunctionReturn(0);
935}
936
937/**
938 * @brief Checks particle count from a saved file and resizes the swarm globally.
939 *
940 * This function uses a robust parallel pattern: only Rank 0 reads the reference
941 * position file to determine the total number of particles saved (`N_file`).
942 * This count is then broadcast to all other ranks. Finally, each rank compares
943 * N_file with the current swarm size and participates in resizing if necessary.
944 *
945 * @param[in,out] user Pointer to the UserCtx structure containing the DMSwarm.
946 * @param[in] ti Time index for constructing the file name.
947 * @param[in] ext File extension (e.g., "dat").
948 *
949 * @return PetscErrorCode 0 on success, non-zero on failure.
950 */
951PetscErrorCode PreCheckAndResizeSwarm(UserCtx *user,
952 PetscInt ti,
953 const char *ext)
954{
955 PetscErrorCode ierr;
956 char filename[PETSC_MAX_PATH_LEN];
957 PetscInt N_file = 0; // The number of particles determined from the file
958 PetscInt N_current = 0;
959 MPI_Comm comm;
960 PetscMPIInt rank;
961 const char *refFieldName = "position";
962 const PetscInt bs = 3;
963
964 // NOTE: Your filename format has a hardcoded "_0" which is typical for
965 // PETSc when writing a parallel object from a single rank.
966 // If you ever write in parallel, PETSc might create one file per rank.
967 // The current logic assumes a single file written by one process.
968 const int placeholder_int = 0;
969
970 PetscFunctionBeginUser;
971 ierr = PetscObjectGetComm((PetscObject)user->swarm, &comm); CHKERRQ(ierr);
972 ierr = MPI_Comm_rank(comm, &rank); CHKERRQ(ierr);
973
974 // --- Construct filename using the specified format ---
975 // results/%s%05<PetscInt_FMT>_%d.%s
976 ierr = PetscSNPrintf(filename, sizeof(filename), "results/%s%05" PetscInt_FMT "_%d.%s",
977 refFieldName, ti, placeholder_int, ext); CHKERRQ(ierr);
978 // Note: Make sure the "results" directory exists or handle directory creation elsewhere.
979
980 LOG_ALLOW(GLOBAL, LOG_INFO, "PreCheckAndResizeSwarm: Checking particle count for timestep %d using ref file '%s'.\n", ti, filename);
981
982 // --- Rank 0 reads the file to determine the size ---
983 if (rank == 0) {
984 PetscBool fileExists = PETSC_FALSE;
985 ierr = PetscTestFile(filename, 'r', &fileExists); CHKERRQ(ierr);
986
987 if (!fileExists) {
988 // Set a special value to indicate file not found, then broadcast it.
989 N_file = -1;
990 LOG_ALLOW(GLOBAL, LOG_ERROR, "Rank 0: Mandatory reference file '%s' not found for timestep %d.\n", filename, ti);
991 } else {
992 PetscViewer viewer;
993 Vec tmpVec;
994 PetscInt vecSize;
995
996 ierr = VecCreate(PETSC_COMM_SELF, &tmpVec); CHKERRQ(ierr); // Create a SEQUENTIAL vector
997 ierr = PetscViewerBinaryOpen(PETSC_COMM_SELF, filename, FILE_MODE_READ, &viewer); CHKERRQ(ierr);
998 ierr = VecLoad(tmpVec, viewer); CHKERRQ(ierr);
999 ierr = PetscViewerDestroy(&viewer); CHKERRQ(ierr);
1000
1001 ierr = VecGetSize(tmpVec, &vecSize); CHKERRQ(ierr);
1002 ierr = VecDestroy(&tmpVec); CHKERRQ(ierr);
1003
1004 if (vecSize % bs != 0) {
1005 N_file = -2; // Special error code for bad file format
1006 LOG_ALLOW(GLOBAL, LOG_ERROR, "Rank 0: Vector size %d from file '%s' is not divisible by block size %d.\n", vecSize, filename, bs);
1007 } else {
1008 N_file = vecSize / bs;
1009 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Rank 0: Found %d particles in file.\n", N_file);
1010 }
1011 }
1012 }
1013
1014 // --- Broadcast the particle count (or error code) from Rank 0 to all other ranks ---
1015 ierr = MPI_Bcast(&N_file, 1, MPIU_INT, 0, comm); CHKERRMPI(ierr);
1016
1017 // --- All ranks check for errors and abort if necessary ---
1018 if (N_file == -1) {
1019 SETERRQ(comm, PETSC_ERR_FILE_OPEN, "Mandatory reference file '%s' not found for timestep %d (as determined by Rank 0).", filename, ti);
1020 }
1021 if (N_file == -2) {
1022 SETERRQ(comm, PETSC_ERR_FILE_READ, "Reference file '%s' has incorrect format (as determined by Rank 0).", filename);
1023 }
1024 if (N_file < 0) {
1025 SETERRQ(comm, PETSC_ERR_PLIB, "Received invalid particle count %d from Rank 0.", N_file);
1026 }
1027
1028
1029 // --- Now all ranks have the correct N_file, compare and resize if needed ---
1030 ierr = DMSwarmGetSize(user->swarm, &N_current); CHKERRQ(ierr);
1031
1032 if (N_file != N_current) {
1033 LOG_ALLOW(GLOBAL, LOG_INFO, "Swarm size %d differs from file size %d. Resizing swarm globally.\n", N_current, N_file);
1034 ierr = ResizeSwarmGlobally(user->swarm, N_file); CHKERRQ(ierr);
1035 } else {
1036 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Swarm size (%d) already matches file size. No resize needed.\n", N_current);
1037 }
1038
1039 // Also update the context
1040 user->simCtx->np = N_file;
1041
1042 PetscFunctionReturn(0);
1043}
1044
1045/**
1046 * @brief Re-initializes the positions of particles currently on this rank if this rank owns
1047 * part of the designated inlet surface.
1048 *
1049 * This function is intended for `user->ParticleInitialization == 0` (Surface Initialization mode)
1050 * and is typically called after an initial migration step (e.g., in `PerformInitialSetup`).
1051 * It ensures that all particles that should originate from the inlet surface and are now
1052 * on the correct MPI rank are properly distributed across that rank's portion of the inlet.
1053 *
1054 * @param user Pointer to the UserCtx structure, containing simulation settings and grid information.
1055 * @param currentTime Current simulation time (used for logging).
1056 * @param step Current simulation step number (used for logging).
1057 * @return PetscErrorCode 0 on success, non-zero on failure.
1058 */
1059PetscErrorCode ReinitializeParticlesOnInletSurface(UserCtx *user, PetscReal currentTime, PetscInt step)
1060{
1061 PetscErrorCode ierr;
1062 PetscMPIInt rank; // MPI rank of the current process
1063 DM swarm = user->swarm; // The particle swarm DM
1064 PetscReal *positions_field = NULL; // Pointer to swarm field for physical positions
1065 PetscReal *pos_phy_field = NULL; // Pointer to swarm field for physical positions (backup)
1066 PetscInt64 *particleIDs = NULL; // Pointer to swarm field for Particle IDs (for logging)
1067 PetscInt *cell_ID_field = NULL; // Pointer to swarm field for Cell IDs (for resetting after migration)
1068 const Cmpnts ***coor_nodes_local_array; // Read-only access to local node coordinates
1069 Vec Coor_local; // Local vector for node coordinates
1070 DMDALocalInfo info; // Local grid information (node-based) from user->da
1071 PetscInt xs_gnode_rank, ys_gnode_rank, zs_gnode_rank; // Local starting node indices (incl. ghosts) of rank's DA
1072 PetscInt IM_nodes_global, JM_nodes_global, KM_nodes_global; // Global node counts
1073
1074 PetscRandom rand_logic_reinit_i, rand_logic_reinit_j, rand_logic_reinit_k; // RNGs for re-placement
1075 PetscInt nlocal_current; // Number of particles currently on this rank
1076 PetscInt particles_actually_reinitialized_count = 0; // Counter for logging
1077 PetscBool can_this_rank_service_inlet = PETSC_FALSE; // Flag
1078
1079 PetscFunctionBeginUser;
1080
1081 // This function is only relevant for surface initialization mode and if an inlet face is defined.
1082 if (user->simCtx->ParticleInitialization != 0 || !user->inletFaceDefined) {
1083 PetscFunctionReturn(0);
1084 }
1085
1086 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1087 ierr = DMSwarmGetLocalSize(swarm, &nlocal_current); CHKERRQ(ierr);
1088
1089 // If no particles on this rank, nothing to do.
1090 if (nlocal_current == 0) {
1091 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d has no local particles to re-initialize on inlet.\n", currentTime, step, rank);
1092 PetscFunctionReturn(0);
1093 }
1094
1095 // Get DMDA information for the node-centered coordinate grid (user->da)
1096 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
1097 ierr = DMDAGetInfo(user->da, NULL, &IM_nodes_global, &JM_nodes_global, &KM_nodes_global, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); CHKERRQ(ierr);
1098 ierr = DMDAGetGhostCorners(user->da, &xs_gnode_rank, &ys_gnode_rank, &zs_gnode_rank, NULL, NULL, NULL); CHKERRQ(ierr);
1099
1100 // Check if this rank is responsible for (part of) the designated inlet surface
1101 ierr = CanRankServiceInletFace(user, &info, IM_nodes_global, JM_nodes_global, KM_nodes_global, &can_this_rank_service_inlet); CHKERRQ(ierr);
1102
1103 if (!can_this_rank_service_inlet) {
1104 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d cannot service inlet face %d. Skipping re-initialization of %d particles.\n", currentTime, step, rank, user->identifiedInletBCFace, nlocal_current);
1105 PetscFunctionReturn(0);
1106 }
1107
1108 LOG_ALLOW(GLOBAL, LOG_INFO, "[T=%.4f, Step=%d] Rank %d is on inlet face %d. Attempting to re-place %d local particles.\n", currentTime, step, rank, user->identifiedInletBCFace, nlocal_current);
1109
1110 // Get coordinate array and swarm fields for modification
1111 ierr = DMGetCoordinatesLocal(user->da, &Coor_local); CHKERRQ(ierr);
1112 ierr = DMDAVecGetArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
1113 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
1114 ierr = DMSwarmGetField(swarm, "pos_phy", NULL, NULL, (void**)&pos_phy_field); CHKERRQ(ierr);
1115 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr); // For logging
1116 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
1117
1118 // Initialize fresh RNGs for this re-placement to ensure good distribution
1119 ierr = InitializeLogicalSpaceRNGs(&rand_logic_reinit_i, &rand_logic_reinit_j, &rand_logic_reinit_k); CHKERRQ(ierr);
1120 // Optional: Seed RNGs for deterministic behavior if required, e.g., based on rank and step.
1121 // PetscRandomSetSeed(rand_logic_i, (unsigned long)rank*1000 + step + 100); PetscRandomSeed(rand_logic_i); // Example
1122
1123 // Loop over all particles currently local to this rank
1124 for (PetscInt p = 0; p < nlocal_current; p++) {
1125 PetscInt ci_metric_lnode, cj_metric_lnode, ck_metric_lnode; // Local node indices (of rank's DA patch) for cell origin
1126 PetscReal xi_metric_logic, eta_metric_logic, zta_metric_logic; // Intra-cell logical coordinates
1127 Cmpnts phys_coords; // To store newly calculated physical coordinates
1128
1129 // Get random cell on this rank's portion of the inlet and random logical coords within it
1130 ierr = GetRandomCellAndLogicOnInletFace(user, &info, xs_gnode_rank, ys_gnode_rank, zs_gnode_rank,
1131 IM_nodes_global, JM_nodes_global, KM_nodes_global,
1132 &rand_logic_reinit_i, &rand_logic_reinit_j, &rand_logic_reinit_k,
1133 &ci_metric_lnode, &cj_metric_lnode, &ck_metric_lnode,
1134 &xi_metric_logic, &eta_metric_logic, &zta_metric_logic); CHKERRQ(ierr);
1135
1136 // Convert these logical coordinates to physical coordinates
1137 ierr = MetricLogicalToPhysical(user, coor_nodes_local_array,
1138 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
1139 xi_metric_logic, eta_metric_logic, zta_metric_logic,
1140 &phys_coords); CHKERRQ(ierr);
1141
1142 // Update the particle's position in the swarm fields
1143 positions_field[3*p+0] = phys_coords.x;
1144 positions_field[3*p+1] = phys_coords.y;
1145 positions_field[3*p+2] = phys_coords.z;
1146 pos_phy_field[3*p+0] = phys_coords.x;
1147 pos_phy_field[3*p+1] = phys_coords.y;
1148 pos_phy_field[3*p+2] = phys_coords.z;
1149 particles_actually_reinitialized_count++;
1150
1151 cell_ID_field[3*p+0] = -1;
1152 cell_ID_field[3*p+1] = -1;
1153 cell_ID_field[3*p+2] = -1;
1154
1155 LOG_LOOP_ALLOW(LOCAL, LOG_DEBUG, p, (nlocal_current > 20 ? nlocal_current/10 : 1), // Sampled logging
1156 "ReInit - Rank %d: PID %ld (idx %ld) RE-PLACED. CellOriginNode(locDAIdx):(%d,%d,%d). LogicCoords: (%.2e,%.2f,%.2f). PhysCoords: (%.6f,%.6f,%.6f).\n",
1157 rank, particleIDs[p], (long)p,
1158 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
1159 xi_metric_logic, eta_metric_logic, zta_metric_logic,
1160 phys_coords.x, phys_coords.y, phys_coords.z);
1161 }
1162
1163 // Logging summary of re-initialization
1164 if (particles_actually_reinitialized_count > 0) {
1165 LOG_ALLOW(GLOBAL, LOG_INFO, "[T=%.4f, Step=%d] Rank %d (on inlet face %d) successfully re-initialized %d of %d local particles.\n", currentTime, step, rank, user->identifiedInletBCFace, particles_actually_reinitialized_count, nlocal_current);
1166 } else if (nlocal_current > 0) { // This case should ideally not be hit if can_this_rank_service_inlet was true and particles were present.
1167 LOG_ALLOW(GLOBAL, LOG_WARNING, "[T=%.4f, Step=%d] Rank %d claimed to service inlet face %d, but re-initialized 0 of %d local particles. This may indicate an issue if particles were expected to be re-placed.\n", currentTime, step, rank, user->identifiedInletBCFace, nlocal_current);
1168 }
1169
1170 // Cleanup: Destroy RNGs and restore swarm fields/coordinate array
1171 ierr = PetscRandomDestroy(&rand_logic_reinit_i); CHKERRQ(ierr);
1172 ierr = PetscRandomDestroy(&rand_logic_reinit_j); CHKERRQ(ierr);
1173 ierr = PetscRandomDestroy(&rand_logic_reinit_k); CHKERRQ(ierr);
1174
1175 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
1176 ierr = DMSwarmRestoreField(swarm, "pos_phy", NULL, NULL, (void**)&pos_phy_field); CHKERRQ(ierr);
1177 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr);
1178 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
1179 ierr = DMDAVecRestoreArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
1180
1181
1182 PetscFunctionReturn(0);
1183}
1184
1185// Comparison function needed for qsort
1186static int compare_PetscInt64(const void *a, const void *b) {
1187 PetscInt64 val_a = *(const PetscInt64*)a;
1188 PetscInt64 val_b = *(const PetscInt64*)b;
1189 if (val_a < val_b) return -1;
1190 if (val_a > val_b) return 1;
1191 return 0;
1192}
1193
1194/**
1195 * @brief Creates a sorted snapshot of all Particle IDs (PIDs) from a raw data array.
1196 * @ingroup ParticleUtils
1197 *
1198 * This function is a crucial helper for the migration process. It captures the state of
1199 * which particles are on the current MPI rank *before* migration occurs by taking a
1200 * pointer to the swarm's raw PID data array. The resulting sorted array can then be used
1201 * with an efficient binary search to quickly identify newcomer particles after migration.
1202 *
1203 * This function does NOT call DMSwarmGetField/RestoreField. It is the caller's
1204 * responsibility to acquire the `pid_field` pointer before calling and restore it afterward.
1205 *
1206 * @param[in] pid_field A read-only pointer to the raw array of PIDs for the local swarm.
1207 * @param[in] n_local The number of particles currently on the local rank.
1208 * @param[out] pids_snapshot_out A pointer to a `PetscInt64*` array. This function will
1209 * allocate memory for this array, and the caller is
1210 * responsible for freeing it with `PetscFree()` when it
1211 * is no longer needed.
1212 *
1213 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1214 */
1215PetscErrorCode GetLocalPIDSnapshot(const PetscInt64 pid_field[],
1216 PetscInt n_local,
1217 PetscInt64 **pids_snapshot_out)
1218{
1219 PetscErrorCode ierr;
1220 PetscMPIInt rank;
1221
1222 PetscFunctionBeginUser;
1223
1224 // --- 1. Input Validation ---
1225 if (!pids_snapshot_out) {
1226 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Output pointer pids_snapshot_out is NULL.");
1227 }
1228 // If n_local > 0, pid_field must not be NULL.
1229 if (n_local > 0 && !pid_field) {
1230 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Input pid_field pointer is NULL for n_local > 0.");
1231 }
1232
1233 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1234 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Creating PID snapshot for %d local particles.\n", rank, n_local);
1235
1236 // If there are no local particles, the snapshot is empty (NULL).
1237 if (n_local == 0) {
1238 *pids_snapshot_out = NULL;
1239 PetscFunctionReturn(0);
1240 }
1241
1242 // --- 2. Allocate Memory for the Snapshot ---
1243 ierr = PetscMalloc1(n_local, pids_snapshot_out); CHKERRQ(ierr);
1244
1245 // --- 3. Copy Data ---
1246 // Perform a fast memory copy from the provided array to our new snapshot array.
1247 ierr = PetscMemcpy(*pids_snapshot_out, pid_field, n_local * sizeof(PetscInt64)); CHKERRQ(ierr);
1248 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Copied %d PIDs.\n", rank, n_local);
1249
1250 // --- 4. Sort the Snapshot Array ---
1251 // Sorting enables fast binary search lookups later.
1252 ierr = PetscSortInt64(n_local, *pids_snapshot_out); CHKERRQ(ierr);
1253 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: PID snapshot sorted successfully.\n", rank);
1254
1255 PetscFunctionReturn(0);
1256}
1257
1258
1259/**
1260 * @brief Safely adds a new migration task to a dynamically sized list.
1261 *
1262 * This utility function manages a dynamic array of MigrationInfo structs. It appends
1263 * a new entry to the list and automatically doubles the array's capacity using
1264 * `PetscRealloc` if the current capacity is exceeded. This prevents buffer overflows
1265 * and avoids the need to know the number of migrating particles in advance.
1266 *
1267 * @param[in,out] migration_list_p A pointer to the MigrationInfo array pointer. The function
1268 * will update this pointer if the array is reallocated.
1269 * @param[in,out] capacity_p A pointer to an integer holding the current allocated
1270 * capacity of the list (in number of elements). This will be
1271 * updated upon reallocation.
1272 * @param[in,out] count_p A pointer to an integer holding the current number of
1273 * items in the list. This will be incremented by one.
1274 * @param[in] particle_local_idx The local index (from 0 to nlocal-1) of the particle
1275 * that needs to be migrated.
1276 * @param[in] destination_rank The target MPI rank for the particle.
1277 *
1278 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure (e.g., from memory allocation).
1279 */
1280PetscErrorCode AddToMigrationList(MigrationInfo **migration_list_p,
1281 PetscInt *capacity_p,
1282 PetscInt *count_p,
1283 PetscInt particle_local_idx,
1284 PetscMPIInt destination_rank)
1285{
1286 PetscErrorCode ierr;
1287 PetscMPIInt rank;
1288
1289 PetscFunctionBeginUser;
1290
1291 // --- 1. Input Validation ---
1292 if (!migration_list_p || !capacity_p || !count_p) {
1293 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Null pointer provided to AddToMigrationList for list management.");
1294 }
1295
1296 // --- 2. Check if the list needs to be resized ---
1297 if (*count_p >= *capacity_p) {
1298 PetscInt old_capacity = *capacity_p;
1299 // Start with a reasonable base capacity, then double for subsequent reallocations.
1300 PetscInt new_capacity = (old_capacity == 0) ? 16 : old_capacity * 2;
1301
1302 // Use PetscRealloc for safe memory reallocation.
1303 // It handles allocating new memory, copying old data, and freeing the old block.
1304 // The first argument to PetscRealloc is the new size in BYTES.
1305 ierr = PetscRealloc(new_capacity * sizeof(MigrationInfo), migration_list_p); CHKERRQ(ierr);
1306
1307 *capacity_p = new_capacity; // Update the capacity tracker
1308
1309 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1310 LOG_ALLOW(LOCAL, LOG_DEBUG, "AddToMigrationList [Rank %d]: Reallocated migrationList capacity from %d to %d.\n",
1311 rank, old_capacity, new_capacity);
1312 }
1313
1314 // --- 3. Add the new migration data to the list ---
1315 // Dereference the pointer-to-a-pointer to get the actual array.
1316 MigrationInfo *list = *migration_list_p;
1317
1318 list[*count_p].local_index = particle_local_idx;
1319 list[*count_p].target_rank = destination_rank;
1320
1321 // --- 4. Increment the count of items in the list ---
1322 (*count_p)++;
1323
1324 PetscFunctionReturn(0);
1325}
1326
1327/**
1328 * @brief Identifies newly arrived particles after migration and flags them for a location search.
1329 * @ingroup ParticleMotion
1330 *
1331 * This function is a critical component of the iterative migration process managed by
1332 * the main particle settlement orchestrator (e.g., `SettleParticles`). After a
1333 * `DMSwarmMigrate` call, each rank's local particle list is a new mix of resident
1334 * particles and newly received ones. This function's job is to efficiently identify
1335 * these "newcomers" and set their `DMSwarm_location_status` field to `NEEDS_LOCATION`.
1336 *
1337 * This ensures that in the subsequent pass of the migration `do-while` loop, only the
1338 * newly arrived particles are processed by the expensive location algorithm, preventing
1339 * redundant work on particles that are already settled on the current rank.
1340 *
1341 * The identification is done by comparing the PIDs of particles currently on the rank
1342 * against a "snapshot" of PIDs taken *before* the migration occurred.
1343 *
1344 * @param[in] swarm The DMSwarm object, which has just completed a migration.
1345 * @param[in] n_local_before The number of particles that were on this rank *before* the
1346 * migration was performed.
1347 * @param[in] pids_before A pre-sorted array of the PIDs that were on this rank before
1348 * the migration. This is used for fast lookups.
1349 *
1350 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1351 *
1352 * @note This function assumes the `pids_before` array is sorted in ascending order to
1353 * enable the use of an efficient binary search.
1354 */
1355PetscErrorCode FlagNewcomersForLocation(DM swarm,
1356 PetscInt n_local_before,
1357 const PetscInt64 pids_before[])
1358{
1359 PetscErrorCode ierr;
1360 PetscMPIInt rank;
1361 PetscInt n_local_after;
1362 PetscInt newcomer_count = 0;
1363
1364 // Pointers to the swarm data fields we will read and modify
1365 PetscInt64 *pid_field_after = NULL;
1366 PetscInt *status_field_after = NULL;
1367 PetscInt *cell_field_after = NULL;
1368
1369 PetscFunctionBeginUser;
1370
1371 // --- 1. Input Validation and Basic Setup ---
1372 if (!swarm) {
1373 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Input DMSwarm is NULL in FlagNewcomersForLocation.");
1374 }
1375 // If n_local_before > 0, the corresponding PID array must not be null.
1376 if (n_local_before > 0 && !pids_before) {
1377 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Input pids_before array is NULL for n_local_before > 0.");
1378 }
1379
1380 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1381
1382 // Get the number of particles on this rank *after* the migration.
1383 ierr = DMSwarmGetLocalSize(swarm, &n_local_after); CHKERRQ(ierr);
1384
1385 LOG_ALLOW(LOCAL, LOG_DEBUG, "FlagNewcomersForLocation [Rank %d]: Checking for newcomers. Size before: %d, Size after: %d\n",
1386 rank, n_local_before, n_local_after);
1387
1388 // If there are no particles now, there's nothing to do.
1389 if (n_local_after == 0) {
1390 PetscFunctionReturn(0);
1391 }
1392
1393 // --- 2. Access Swarm Data ---
1394 // Get read-only access to the PIDs and read-write access to the status field.
1395 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_field_after); CHKERRQ(ierr);
1396 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_field_after); CHKERRQ(ierr);
1397 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_field_after); CHKERRQ(ierr);
1398 if (!pid_field_after || !status_field_after) {
1399 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Failed to get required swarm fields in FlagNewcomersForLocation.");
1400 }
1401
1402 // --- 3. Identify and Flag Newcomers ---
1403 // Loop through all particles currently on this rank.
1404 for (PetscInt p_idx = 0; p_idx < n_local_after; ++p_idx) {
1405 PetscInt64 current_pid = pid_field_after[p_idx];
1406 PetscBool is_found_in_before_list;
1407
1408 // Use our custom, efficient helper function for the lookup.
1409 ierr = BinarySearchInt64(n_local_before, pids_before, current_pid, &is_found_in_before_list); CHKERRQ(ierr);
1410
1411 // If the PID was NOT found in the "before" list, it must be a newcomer.
1412 if (!is_found_in_before_list) {
1413 // Flag it for processing in the next pass of the migration loop.
1414 status_field_after[p_idx] = NEEDS_LOCATION;
1415 // cell_field_after[3*p_idx+0] = -1;
1416 // cell_field_after[3*p_idx+1] = -1;
1417 // cell_field_after[3*p_idx+2] = -1;
1418 newcomer_count++;
1419
1420 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Flagged newcomer PID %ld at local index %d as NEEDS_LOCATION.\n",
1421 rank, current_pid, p_idx);
1422 }
1423 }
1424
1425 // --- 4. Restore Swarm Fields ---
1426 // Release the locks on the swarm data arrays.
1427 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_field_after); CHKERRQ(ierr);
1428 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_field_after); CHKERRQ(ierr);
1429 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_field_after); CHKERRQ(ierr);
1430
1431 if (newcomer_count > 0) {
1432 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d]: Identified and flagged %d newcomers.\n", rank, newcomer_count);
1433 }
1434
1435 PetscFunctionReturn(0);
1436}
1437
1438/**
1439 * @brief Provides a fast, heuristic-based guess for a particle's owner rank using bounding boxes.
1440 * @ingroup ParticleLocation
1441 *
1442 * This function is part of the "Guess and Verify" strategy, called only for "lost"
1443 * particles. It attempts to find a candidate owner by checking which rank's bounding box
1444 * contains the particle's physical position.
1445 *
1446 * To optimize the search, it uses the particle's position relative to the local
1447 * bounding box to intelligently check the most likely neighboring ranks first.
1448 * For example, if a particle's x-coordinate is less than the local minimum x, it
1449 * will check the -X neighbor first. If no owner is found in the immediate neighbors,
1450 * it performs a full search of all other ranks as a fallback.
1451 *
1452 * @param[in] user Pointer to the UserCtx, which must contain the pre-computed
1453 * `bbox` (local), `neighbors` struct, and the global `bboxlist`.
1454 * @param[in] particle A pointer to the particle whose owner is being guessed.
1455 * @param[in] bboxlist An array of BoundingBox structures for ALL MPI ranks, indexed 0 to (size-1).
1456 * This array must be up-to-date and available on all ranks.
1457 * @param[out] guess_rank_out A pointer to a PetscMPIInt. Set to the candidate owner's rank
1458 * if found, otherwise set to -1 (or MPI_PROC_NULL).
1459 *
1460 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1461 */
1463 const Particle *particle,
1464 const BoundingBox *bboxlist,
1465 PetscMPIInt *guess_rank_out)
1466{
1467 PetscErrorCode ierr;
1468 PetscMPIInt rank, size;
1469 const RankNeighbors *neighbors = &user->neighbors; // Use a direct pointer for clarity
1470 const BoundingBox *localBBox = &user->bbox;
1471
1472 PetscFunctionBeginUser;
1473
1474 // --- 1. Input Validation and Setup ---
1475 if (!user || !particle || !guess_rank_out || !bboxlist) {
1476 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Null pointer provided to GuessParticleOwnerWithBBox.");
1477 }
1478 if (!localBBox|| !neighbors) {
1479 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Required user->bboxl or user->neighbors is not initialized.");
1480 }
1481
1482 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1483 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr);
1484
1485 *guess_rank_out = MPI_PROC_NULL; // Default to "not found"
1486
1487 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Starting guess for particle at (%.3f, %.3f, %.3f).\n",
1488 particle->PID, particle->loc.x, particle->loc.y, particle->loc.z);
1489
1490 // *** THE PRIMARY FIX ***
1491 // --- Step 0: Check if the particle is inside the CURRENT rank's bounding box FIRST. ---
1492 // This handles the common case of initial placement where a particle is "lost" but physically local.
1493 if (IsParticleInBox(localBBox, &particle->loc)) {
1494 *guess_rank_out = rank;
1495 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fast path guess SUCCESS. Particle is within the local (Rank %d) bounding box.\n",
1496 particle->PID, rank);
1497 PetscFunctionReturn(0); // Found it, we're done.
1498 }
1499 // --- 2. Fast Path: Check Immediate Neighbors Based on Exit Direction ---
1500
1501 // Determine likely exit direction(s) to prioritize neighbor check
1502 PetscBool exit_xm = particle->loc.x < localBBox->min_coords.x;
1503 PetscBool exit_xp = particle->loc.x > localBBox->max_coords.x;
1504 PetscBool exit_ym = particle->loc.y < localBBox->min_coords.y;
1505 PetscBool exit_yp = particle->loc.y > localBBox->max_coords.y;
1506 PetscBool exit_zm = particle->loc.z < localBBox->min_coords.z;
1507 PetscBool exit_zp = particle->loc.z > localBBox->max_coords.z;
1508
1509 if (exit_xm && neighbors->rank_xm != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_xm], &particle->loc)) {
1510 *guess_rank_out = neighbors->rank_xm;
1511 } else if (exit_xp&& neighbors->rank_xp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_xp], &particle->loc)) {
1512 *guess_rank_out = neighbors->rank_xp;
1513 } else if (exit_ym && neighbors->rank_ym != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_ym], &particle->loc)) {
1514 *guess_rank_out = neighbors->rank_ym;
1515 } else if (exit_yp && neighbors->rank_yp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_yp], &particle->loc)) {
1516 *guess_rank_out = neighbors->rank_yp;
1517 } else if (exit_zm && neighbors->rank_zm != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_zm], &particle->loc)) {
1518 *guess_rank_out = neighbors->rank_zm;
1519 } else if (exit_zp && neighbors->rank_zp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_zp], &particle->loc)) {
1520 *guess_rank_out = neighbors->rank_zp;
1521 }
1522 // Note: This does not handle corner/edge neighbors, which is why the fallback is essential.
1523
1524 if (*guess_rank_out != MPI_PROC_NULL) {
1525 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fast path guess SUCCESS. Found in immediate neighbor Rank %d.\n",
1526 particle->PID, *guess_rank_out);
1527 PetscFunctionReturn(0); // Found it, we're done.
1528 }
1529
1530 // --- 3. Robust Fallback: Check All Other Ranks ---
1531 // If we get here, the particle was not in any of the immediate face neighbors' boxes.
1532 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Not in immediate face neighbors. Starting global fallback search.\n",
1533 particle->PID);
1534
1535 for (PetscMPIInt r = 0; r < size; ++r) {
1536 if (r == rank) continue; // Don't check ourselves.
1537
1538 if (IsParticleInBox(&bboxlist[r], &particle->loc)) {
1539 PetscBool is_in = PETSC_TRUE;
1540 // This detailed, synchronized print will solve the mystery
1541 LOG_ALLOW(LOCAL,LOG_DEBUG, "[Guess BBox | Rank %d] Checking PID %lld at (%.4f, %.4f, %.4f) against Rank %d's box: [(%.4f, %.4f, %.4f) to (%.4f, %.4f, %.4f)]. Result: %s\n",
1542 (int)rank, (long long)particle->PID,
1543 particle->loc.x, particle->loc.y, particle->loc.z,
1544 (int)r,
1545 bboxlist[r].min_coords.x, bboxlist[r].min_coords.y, bboxlist[r].min_coords.z,
1546 bboxlist[r].max_coords.x, bboxlist[r].max_coords.y, bboxlist[r].max_coords.z,
1547 is_in ? "INSIDE" : "OUTSIDE");
1548
1549 *guess_rank_out = r;
1550 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fallback search SUCCESS. Found in Rank %d.\n",
1551 particle->PID, *guess_rank_out);
1552 PetscFunctionReturn(0); // Found it, we're done.
1553 }
1554 }
1555
1556 // If the code reaches here, the particle was not found in any rank's bounding box.
1557 LOG_ALLOW(LOCAL, LOG_WARNING, "[PID %ld]: Guess FAILED. Particle not found in any rank's bounding box.\n",
1558 particle->PID);
1559
1560 // The guess_rank_out will remain -1, signaling failure to the caller.
1561 PetscFunctionReturn(0);
1562}
1563
1564#undef __FUNCT__
1565#define __FUNCT__ "LocateAllParticlesInGrid_TEST"
1566/**
1567 * @brief Orchestrates the complete particle location and migration process for one timestep.
1568 * @ingroup ParticleLocation
1569 *
1570 * This function is the master orchestrator for ensuring every particle is on its correct
1571 * MPI rank and has a valid host cell index. It is designed to be called once per
1572 * timestep after particle positions have been updated.
1573 *
1574 * The function uses a robust, iterative "Guess and Verify" strategy within a
1575 * do-while loop to handle complex particle motion across processor boundaries,
1576 * especially on curvilinear grids.
1577 *
1578 * 1. **State Snapshot:** At the start of each pass, it captures a list of all Particle IDs (PIDs)
1579 * on the current rank.
1580 * 2. **"Guess" (Heuristic):** For particles that are "lost" (no valid host cell),
1581 * it first attempts a fast, bounding-box-based guess to find a potential new owner rank.
1582 * 3. **"Verify" (Robust Walk):** For all other particles, or if the guess fails,
1583 * it uses a robust cell-walking algorithm (`LocateParticleOrFindMigrationTarget`)
1584 * that determines the particle's status: located locally, needs migration, or is lost.
1585 * 4. **Migration:** After identifying all migrating particles on a pass, it performs the
1586 * MPI communication using the `SetMigrationRanks` and `PerformMigration` helpers.
1587 * 5. **Newcomer Flagging:** After migration, it uses the PID snapshot from step 1 to
1588 * efficiently identify newly arrived particles and flag them for location on the next pass.
1589 * 6. **Iteration:** The process repeats in a `do-while` loop until a pass occurs where
1590 * no particles migrate, ensuring the entire swarm is in a stable, consistent state.
1591 *
1592 * @param[in,out] user Pointer to the UserCtx, containing the swarm and all necessary
1593 * domain topology information (bboxlist, RankCellInfoMap, etc.).
1594 * @param[in] bboxlist An array of BoundingBox structures for ALL MPI ranks, indexed 0 to (size-1).
1595 * This array must be up-to-date and available on all ranks.
1596 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1597 */
1599{
1600 PetscErrorCode ierr;
1601 PetscInt passes = 0;
1602 const PetscInt MAX_MIGRATION_PASSES = 10; // Safety break for runaway loops
1603 PetscInt global_migrations_this_pass;
1604 PetscMPIInt rank;
1605
1606 PetscFunctionBeginUser;
1608 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1610 LOG_ALLOW(GLOBAL, LOG_INFO, "LocateAllParticlesInGrid (Orchestrator) - Beginning particle settlement process.\n");
1611
1612 // This loop ensures that particles that jump across multiple ranks are
1613 // handled correctly in successive, iterative handoffs.
1614 do {
1615 passes++;
1616 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Starting migration pass %d.\n", rank, passes);
1617
1618 // --- STAGE 1: PER-PASS INITIALIZATION ---
1619 MigrationInfo *migrationList = NULL;
1620 PetscInt local_migration_count = 0;
1621 PetscInt migrationListCapacity = 0;
1622 PetscInt nlocal_before;
1623 PetscInt64 *pids_before_snapshot = NULL;
1624 PetscInt local_lost_count = 0;
1625
1626 ierr = DMSwarmGetLocalSize(user->swarm, &nlocal_before); CHKERRQ(ierr);
1627 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d] Pass %d begins with %d local particles.\n", rank, passes, nlocal_before);
1628
1629
1630 // --- STAGE 2: PRE-MIGRATION SNAPSHOT & MAIN PROCESSING LOOP ---
1631 if (nlocal_before > 0) {
1632 // Get pointers to all fields needed for this pass
1633 PetscReal *pos_p, *weights_p, *vel_p;
1634 PetscInt *cell_p, *status_p;
1635 PetscInt64 *pid_p;
1636 ierr = DMSwarmGetField(user->swarm, "position", NULL, NULL, (void**)&pos_p); CHKERRQ(ierr);
1637 ierr = DMSwarmGetField(user->swarm, "velocity", NULL, NULL, (void**)&vel_p); CHKERRQ(ierr);
1638 ierr = DMSwarmGetField(user->swarm, "weight", NULL, NULL, (void**)&weights_p); CHKERRQ(ierr);
1639 ierr = DMSwarmGetField(user->swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1640 ierr = DMSwarmGetField(user->swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1641 ierr = DMSwarmGetField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1642
1643 // Create a sorted snapshot of current PIDs to identify newcomers after migration.
1644 // This helper requires a raw pointer, which we just acquired.
1645 ierr = GetLocalPIDSnapshot(pid_p, nlocal_before, &pids_before_snapshot); CHKERRQ(ierr);
1646
1647 for (PetscInt p_idx = 0; p_idx < nlocal_before; p_idx++) {
1648
1649 // OPTIMIZATION: Skip particles already settled in a previous pass of this do-while loop.
1650
1652 "p_idx=%d, PID=%ld, status=%d, cell=(%d, %d, %d)\n",
1653 p_idx,
1654 (long)pid_p[p_idx],
1655 status_p[p_idx],
1656 cell_p[3*p_idx],
1657 cell_p[3*p_idx+1],
1658 cell_p[3*p_idx+2]);
1659
1660 if (status_p[p_idx] == ACTIVE_AND_LOCATED) {
1661 LOG_ALLOW(LOCAL,LOG_DEBUG," [rank %d][PID %ld] skipped in pass %d as it is already located at (%d,%d,%d).\n",rank,pid_p[p_idx],passes,cell_p[3*p_idx],cell_p[3*p_idx + 1],cell_p[3*p_idx + 2]);
1662 continue;
1663 }
1664
1665 // UNPACK: Create a temporary C struct for easier processing using our helper.
1666 Particle current_particle;
1667
1668 // LOG_ALLOW(LOCAL,LOG_DEBUG,"about to unpack p_idx=%d (PID=%ld)\n",p_idx, (long)pid_p[p_idx]);
1669
1670 ierr = UnpackSwarmFields(p_idx, pid_p, weights_p, pos_p, cell_p, vel_p, status_p, &current_particle); CHKERRQ(ierr);
1671
1672 // LOG_ALLOW(LOCAL,LOG_DEBUG,"unpacked p_idx=%d → cell[0]=%d, status=%d\n",p_idx, current_particle.cell[0], current_particle.location_status);
1673
1674 ParticleLocationStatus final_status = (ParticleLocationStatus)status_p[p_idx];
1675
1676
1677 // CASE 1: Particle has a valid prior cell index.
1678 // It has moved, so we only need to run the robust walk from its last known location.
1679 if (current_particle.cell[0] >= 0) {
1680 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] has valid prior cell. Strategy: Robust Walk from previous cell.\n", current_particle.PID);
1681 ierr = LocateParticleOrFindMigrationTarget_TEST(user, &current_particle, &final_status); CHKERRQ(ierr);
1682 }
1683
1684 /*
1685 // --- "GUESS" FAST PATH for lost particles ---
1686 if (current_particle.cell[0] < 0) {
1687 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] is lost or uninitialzied (cell=%d), attempting fast guess.\n",current_particle.PID, current_particle.cell[0]);
1688 ierr = GuessParticleOwnerWithBBox(user, &current_particle, bboxlist, &destination_rank); CHKERRQ(ierr);
1689 if (destination_rank != MPI_PROC_NULL && destination_rank != rank) {
1690 final_status = MIGRATING_OUT;
1691 // The particle struct's destination rank must be updated for consistency
1692 current_particle.destination_rank = destination_rank;
1693 }
1694 }
1695
1696 LOG_ALLOW(LOCAL,LOG_DEBUG,"[PID %ld] Particle status after Initial Guess:%d \n",current_particle.PID,final_status);
1697
1698 // --- "VERIFY" ROBUST WALK if guess didn't resolve it ---
1699 if (final_status == NEEDS_LOCATION || UNINITIALIZED) {
1700 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] Not resolved by guess, starting robust walk.\n", current_particle.PID);
1701 // This function will update the particle's status and destination rank internally.
1702 ierr = LocateParticleOrFindMigrationTarget_TEST(user, &current_particle, &final_status); CHKERRQ(ierr);
1703 destination_rank = current_particle.destination_rank; // Retrieve the result
1704 }
1705
1706 // --- PROCESS THE FINAL STATUS AND TAKE ACTION ---
1707 if (final_status == MIGRATING_OUT) {
1708 status_p[p_idx] = MIGRATING_OUT; // Mark for removal by DMSwarm
1709 ierr = AddToMigrationList(&migrationList, &migrationListCapacity, &local_migration_count, p_idx, destination_rank); CHKERRQ(ierr);
1710 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] at local index %d marked for migration to rank %d.\n",current_particle.PID, p_idx, destination_rank);
1711 } else {
1712 // Particle's final status is either LOCATED or LOST; update its state in the swarm arrays.
1713 current_particle.location_status = final_status;
1714 // PACK: Use the helper to write results back to the swarm arrays.
1715 ierr = UpdateSwarmFields(p_idx, &current_particle, weights_p, cell_p, status_p); CHKERRQ(ierr);
1716 }
1717 */
1718 // CASE 2: Particle is "lost" (cell = -1). Strategy: Guess -> Verify.
1719 else {
1720 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] has invalid cell. Strategy: Guess Owner -> Find Cell.\n",current_particle.PID);
1721
1722 PetscMPIInt guessed_owner_rank = MPI_PROC_NULL;
1723 ierr = GuessParticleOwnerWithBBox(user, &current_particle, bboxlist, &guessed_owner_rank); CHKERRQ(ierr);
1724
1725 // If the guess finds a DIFFERENT rank, we can mark for migration and skip the walk.
1726 if (guessed_owner_rank != MPI_PROC_NULL && guessed_owner_rank != rank) {
1727 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] Guess SUCCESS: Found migration target Rank %d. Finalizing.\n", current_particle.PID, guessed_owner_rank);
1728 final_status = MIGRATING_OUT;
1729 current_particle.destination_rank = guessed_owner_rank;
1730 }
1731 else {
1732
1733 // This block runs if the guess either failed (rank is NULL) or found the particle is local (rank is self).
1734 // In BOTH cases, the situation is unresolved, and we MUST fall back to the robust walk.
1735 if (guessed_owner_rank == rank) {
1736 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] Guess determined particle is local. Proceeding to robust walk to find cell.\n", current_particle.PID);
1737 } else { // guessed_owner_rank == MPI_PROC_NULL
1738 LOG_ALLOW(LOCAL, LOG_WARNING, "[PID %ld] Guess FAILED to find an owner. Proceeding to robust walk for definitive search.\n", current_particle.PID);
1739 }
1740
1741 ierr = LocateParticleOrFindMigrationTarget_TEST(user, &current_particle, &final_status); CHKERRQ(ierr);
1742 }
1743 }
1744
1745 // --- PROCESS THE FINAL, DEFINITIVE STATUS ---
1746 current_particle.location_status = final_status;
1747 ierr = UpdateSwarmFields(p_idx, &current_particle, weights_p, cell_p, status_p); CHKERRQ(ierr);
1748
1749 if (final_status == MIGRATING_OUT) {
1750 ierr = AddToMigrationList(&migrationList, &migrationListCapacity, &local_migration_count, p_idx, current_particle.destination_rank); CHKERRQ(ierr);
1751 } else if (final_status == LOST) {
1752 local_lost_count++;
1753 }
1754
1755 } // End of main particle processing loop
1756
1757 // Restore all the fields acquired for this pass.
1758 ierr = DMSwarmRestoreField(user->swarm, "position", NULL, NULL, (void**)&pos_p); CHKERRQ(ierr);
1759 ierr = DMSwarmRestoreField(user->swarm, "velocity", NULL, NULL, (void**)&vel_p); CHKERRQ(ierr);
1760 ierr = DMSwarmRestoreField(user->swarm, "weight", NULL, NULL, (void**)&weights_p); CHKERRQ(ierr);
1761 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1762 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1763 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1764 }
1765
1766 // --- STAGE 3: ACTION & MPI COMMUNICATION ---
1767 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Pass %d: Identified %d particles to migrate out.\n", rank, passes, local_migration_count);
1768
1769 // --- STAGE 3: SYNCHRONIZE AND DECIDE ---
1770 // FIRST, determine if any rank wants to migrate. This call is safe because
1771 // all ranks have finished their local work and can participate.
1772 ierr = MPI_Allreduce(&local_migration_count, &global_migrations_this_pass, 1, MPIU_INT, MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1773
1774 if(global_migrations_this_pass > 0 ){
1775
1776 LOG_ALLOW(GLOBAL, LOG_INFO, "Pass %d: Migrating %d particles globally.\n", passes, global_migrations_this_pass);
1777
1778 ierr = SetMigrationRanks(user, migrationList, local_migration_count); CHKERRQ(ierr);
1779 ierr = PerformMigration(user); CHKERRQ(ierr);
1780
1781 // --- STAGE 4: POST-MIGRATION RESET ---
1782 // Identify newly arrived particles and flag them with NEEDS_LOCATION so they are
1783 // processed in the next pass. This uses the snapshot taken in STAGE 2.
1784 ierr = FlagNewcomersForLocation(user->swarm, nlocal_before, pids_before_snapshot); CHKERRQ(ierr);
1785 }
1786 // --- STAGE 5: LOOP SYNCHRONIZATION AND CLEANUP ---
1787
1788 ierr = PetscFree(pids_before_snapshot);
1789 ierr = PetscFree(migrationList);
1790
1791 LOG_ALLOW(GLOBAL, LOG_INFO, "End of LocateAllParticlesInGrid pass %d. Total particles migrated globally: %d.\n", passes, global_migrations_this_pass);
1792
1793 } while (global_migrations_this_pass > 0 && passes < MAX_MIGRATION_PASSES);
1794
1795 // --- FINAL CHECKS ---
1796 if (passes >= MAX_MIGRATION_PASSES) {
1797 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_CONV_FAILED, "Particle migration failed to converge after %d passes. Check for particles oscillating between ranks.", MAX_MIGRATION_PASSES);
1798 }
1799
1800 LOG_ALLOW(GLOBAL, LOG_INFO, "Particle Location completed in %d passes.\n", passes);
1803 PetscFunctionReturn(0);
1804}
1805
1806/**
1807 * This function is designed to be called at the end of a full timestep, after all
1808 * particle-based calculations are complete. It prepares the swarm for the next
1809 * timestep by ensuring that after the next position update, every particle will be
1810 * re-evaluated by the LocateAllParticlesInGrid orchestrator.
1811 *
1812 * It iterates through all locally owned particles and sets their
1813 * `DMSwarm_location_status` field to `NEEDS_LOCATION`.
1814 *
1815 * @param[in,out] user Pointer to the UserCtx containing the swarm.
1816 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1817 */
1819{
1820 PetscErrorCode ierr;
1821 PetscInt n_local;
1822 PetscInt *status_p;
1823
1824 PetscFunctionBeginUser;
1825
1826 ierr = DMSwarmGetLocalSize(user->swarm, &n_local); CHKERRQ(ierr);
1827
1828 if (n_local > 0) {
1829 // Get write access to the status field
1830 ierr = DMSwarmGetField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1831
1832 for (PetscInt p = 0; p < n_local; ++p) {
1833 // Only reset particles that are considered settled. This is a small optimization
1834 // to avoid changing the status of a LOST particle, though resetting all would also be fine.
1835 if (status_p[p] == ACTIVE_AND_LOCATED) {
1836 status_p[p] = NEEDS_LOCATION;
1837 }
1838 }
1839
1840 // Restore the field
1841 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1842 }
1843
1844 PetscFunctionReturn(0);
1845}
PetscErrorCode CanRankServiceInletFace(UserCtx *user, const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, PetscBool *can_service_inlet_out)
Determines if the current MPI rank owns any part of the globally defined inlet face,...
Definition Boundaries.c:24
PetscErrorCode GetRandomCellAndLogicOnInletFace(UserCtx *user, const DMDALocalInfo *info, PetscInt xs_gnode_rank, PetscInt ys_gnode_rank, PetscInt zs_gnode_rank, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, PetscRandom *rand_logic_i_ptr, PetscRandom *rand_logic_j_ptr, PetscRandom *rand_logic_k_ptr, PetscInt *ci_metric_lnode_out, PetscInt *cj_metric_lnode_out, PetscInt *ck_metric_lnode_out, PetscReal *xi_metric_logic_out, PetscReal *eta_metric_logic_out, PetscReal *zta_metric_logic_out)
Assuming the current rank services the inlet face, this function selects a random cell (owned by this...
Definition Boundaries.c:225
PetscErrorCode MetricLogicalToPhysical(UserCtx *user, const Cmpnts ***X, PetscInt i, PetscInt j, PetscInt k, PetscReal xi, PetscReal eta, PetscReal zta, Cmpnts *Xp)
Public wrapper: map (cell index, ξ,η,ζ) to (x,y,z).
Definition Metric.c:68
PetscErrorCode UpdateParticlePosition(UserCtx *user, Cmpnts *position, const Cmpnts *velocity)
Updates a particle's position based on its velocity and the timestep dt (stored in user->dt).
static int compare_PetscInt64(const void *a, const void *b)
PetscErrorCode GuessParticleOwnerWithBBox(UserCtx *user, const Particle *particle, const BoundingBox *bboxlist, PetscMPIInt *guess_rank_out)
Provides a fast, heuristic-based guess for a particle's owner rank using bounding boxes.
PetscErrorCode ResizeSwarmGlobally(DM swarm, PetscInt N_target)
PetscErrorCode AddToMigrationList(MigrationInfo **migration_list_p, PetscInt *capacity_p, PetscInt *count_p, PetscInt particle_local_idx, PetscMPIInt destination_rank)
Safely adds a new migration task to a dynamically sized list.
PetscErrorCode SetMigrationRanks(UserCtx *user, const MigrationInfo *migrationList, PetscInt migrationCount)
Sets the target rank field (DMSwarmPICField_rank) for particles scheduled for migration.
PetscErrorCode CheckAndRemoveOutOfBoundsParticles(UserCtx *user, PetscInt *removedCountLocal, PetscInt *removedCountGlobal, const BoundingBox *bboxlist)
Checks for particles outside the global physical domain and removes them.
PetscErrorCode GetLocalPIDSnapshot(const PetscInt64 pid_field[], PetscInt n_local, PetscInt64 **pids_snapshot_out)
Creates a sorted snapshot of all Particle IDs (PIDs) from a raw data array.
PetscErrorCode LocateAllParticlesInGrid_TEST(UserCtx *user, BoundingBox *bboxlist)
Orchestrates the complete particle location and migration process for one timestep.
PetscErrorCode UpdateAllParticlePositions(UserCtx *user)
Loops over all local particles in the DMSwarm, updating their positions based on velocity and the glo...
#define ERROR_MSG_BUFFER_SIZE
PetscErrorCode ResetAllParticleStatuses(UserCtx *user)
This function is designed to be called at the end of a full timestep, after all particle-based calcul...
PetscErrorCode ReinitializeParticlesOnInletSurface(UserCtx *user, PetscReal currentTime, PetscInt step)
Re-initializes the positions of particles currently on this rank if this rank owns part of the design...
PetscErrorCode CheckAndRemoveLostParticles(UserCtx *user, PetscInt *removedCountLocal, PetscInt *removedCountGlobal)
Removes particles that have been definitively flagged as LOST by the location algorithm.
PetscErrorCode FlagNewcomersForLocation(DM swarm, PetscInt n_local_before, const PetscInt64 pids_before[])
Identifies newly arrived particles after migration and flags them for a location search.
static PetscBool IsParticleInBox(const BoundingBox *bbox, const Cmpnts *pos)
Checks if a particle position is within the bounds of a given bounding box.
PetscErrorCode PreCheckAndResizeSwarm(UserCtx *user, PetscInt ti, const char *ext)
Checks particle count from a saved file and resizes the swarm globally.
PetscErrorCode PerformMigration(UserCtx *user)
Performs particle migration based on the pre-populated DMSwarmPICField_rank field.
PetscErrorCode UnpackSwarmFields(PetscInt i, const PetscInt64 *PIDs, const PetscReal *weights, const PetscReal *positions, const PetscInt *cellIndices, PetscReal *velocities, PetscInt *LocStatus, Particle *particle)
Initializes a Particle struct with data from DMSwarm fields.
PetscErrorCode InitializeLogicalSpaceRNGs(PetscRandom *rand_logic_i, PetscRandom *rand_logic_j, PetscRandom *rand_logic_k)
Initializes random number generators for logical space operations [0.0, 1.0).
PetscErrorCode UpdateSwarmFields(PetscInt i, const Particle *particle, PetscReal *weights, PetscInt *cellIndices, PetscInt *status_field)
Updates DMSwarm fields with data from a Particle struct.
PetscErrorCode CalculateParticleCountPerCell(UserCtx *user)
Counts particles in each cell of the DMDA 'da' and stores the result in user->ParticleCount.
#define LOG_LOOP_ALLOW(scope, level, iterVar, interval, fmt,...)
Logs a message inside a loop, but only every interval iterations.
Definition logging.h:319
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
----— DEBUG ---------------------------------------— #define LOG_ALLOW(scope, level,...
Definition logging.h:274
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:44
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:45
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:207
#define LOG_FUNC_TIMER_END_EVENT(eventID, scope)
Ends timing a function by:
Definition logging.h:462
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:767
#define LOG_FUNC_TIMER_BEGIN_EVENT(eventID, scope)
Begins timing a function by:
Definition logging.h:436
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:29
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:32
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:30
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:33
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:758
PetscLogEvent EVENT_GlobalParticleLocation
Definition logging.c:35
PetscErrorCode BinarySearchInt64(PetscInt n, const PetscInt64 arr[], PetscInt64 key, PetscBool *found)
Performs a binary search for a key in a sorted array of PetscInt64.
Definition setup.c:2017
PetscMPIInt rank_zm
Definition variables.h:178
PetscBool inletFaceDefined
Definition variables.h:649
PetscMPIInt rank_yp
Definition variables.h:177
BCFace identifiedInletBCFace
Definition variables.h:650
PetscInt cell[3]
Definition variables.h:166
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:633
ParticleLocationStatus
Defines the state of a particle with respect to its location and migration status during the iterativ...
Definition variables.h:134
@ LOST
Definition variables.h:138
@ NEEDS_LOCATION
Definition variables.h:135
@ ACTIVE_AND_LOCATED
Definition variables.h:136
@ MIGRATING_OUT
Definition variables.h:137
PetscMPIInt rank_ym
Definition variables.h:177
PetscMPIInt rank_xp
Definition variables.h:176
PetscInt local_index
Definition variables.h:189
Cmpnts max_coords
Maximum x, y, z coordinates of the bounding box.
Definition variables.h:155
PetscReal dt
Definition variables.h:527
RankNeighbors neighbors
Definition variables.h:642
PetscInt np
Definition variables.h:585
Cmpnts min_coords
Minimum x, y, z coordinates of the bounding box.
Definition variables.h:154
PetscScalar x
Definition variables.h:100
Cmpnts loc
Definition variables.h:167
PetscMPIInt destination_rank
Definition variables.h:171
ParticleLocationStatus location_status
Definition variables.h:170
PetscMPIInt rank_xm
Definition variables.h:176
PetscScalar z
Definition variables.h:100
Vec ParticleCount
Definition variables.h:697
PetscMPIInt rank_zp
Definition variables.h:178
PetscScalar y
Definition variables.h:100
PetscInt ParticleInitialization
Definition variables.h:589
BoundingBox bbox
Definition variables.h:641
PetscInt64 PID
Definition variables.h:165
Defines a 3D axis-aligned bounding box.
Definition variables.h:153
A 3D point or vector with PetscScalar components.
Definition variables.h:99
Information needed to migrate a single particle between MPI ranks.
Definition variables.h:188
Defines a particle's core properties for Lagrangian tracking.
Definition variables.h:164
Stores the MPI ranks of neighboring subdomains.
Definition variables.h:175
User-defined context containing data specific to a single computational grid level.
Definition variables.h:630
Head of a generic C-style linked list.
Definition variables.h:341
PetscErrorCode LocateParticleOrFindMigrationTarget_TEST(UserCtx *user, Particle *particle, ParticleLocationStatus *status_out)
Locates a particle's host cell or identifies its migration target using a robust walk search.