PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
ParticleMotion.c
Go to the documentation of this file.
1// ParticleMotion.c
2
3#include "ParticleMotion.h"
4
5// Define a buffer size for error messages if not already available
6#ifndef ERROR_MSG_BUFFER_SIZE
7#define ERROR_MSG_BUFFER_SIZE 256 // Or use PETSC_MAX_PATH_LEN if appropriate
8#endif
9
10
11#undef __FUNCT__
12#define __FUNCT__ "UpdateParticlePosition"
13/**
14 * @brief Updates a particle's position based on its velocity and the timestep dt (stored in user->dt).
15 *
16 * @param[in] user Pointer to your UserCtx (must contain user->dt).
17 * @param[in,out] position Pointer to the particle's current position (Cmpnts).
18 * @param[in] velocity Pointer to the particle's velocity (Cmpnts).
19 *
20 * @return PetscErrorCode Returns 0 on success, or an error code on failure.
21 */
22PetscErrorCode UpdateParticlePosition(UserCtx *user, Cmpnts *position, const Cmpnts *velocity)
23{
24 PetscFunctionBeginUser; // PETSc macro for error/stack tracing
26
27 PetscReal dt = user->simCtx->dt;
28
29 /* Update the position with velocity * dt */
30 position->x += velocity->x * dt;
31 position->y += velocity->y * dt;
32 position->z += velocity->z * dt;
33
34
36 PetscFunctionReturn(0);
37}
38
39#undef __FUNCT__
40#define __FUNCT__ "UpdateAllParticlePositions"
41/**
42 * @brief Loops over all local particles in the DMSwarm, updating their positions
43 * based on velocity and the global timestep user->dt.
44 * @param[in,out] user Pointer to UserCtx (must contain dt).
45 *
46 * @return PetscErrorCode Returns 0 on success, or an error code on failure.
47 */
49{
50 PetscErrorCode ierr;
51 DM swarm = user->swarm;
52 PetscInt nLocal, p;
53 PetscReal *pos = NULL;
54 PetscReal *vel = NULL;
55 PetscMPIInt rank;
56 Cmpnts temp_pos, temp_vel;
57
58 ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
59
60 PetscFunctionBeginUser; // PETSc macro for error/stack tracing
61
63
64 // 1) Get the number of local particles
65 ierr = DMSwarmGetLocalSize(swarm, &nLocal); CHKERRQ(ierr);
66 if (nLocal == 0) {
67 LOG_ALLOW(LOCAL,LOG_DEBUG,"[Rank %d] No particles to move/transport. \n",rank);
68 PetscFunctionReturn(0); // nothing to do, no fields held
69 }
70 // 2) Access the "position" and "velocity" fields
71 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void**)&pos); CHKERRQ(ierr);
72 ierr = DMSwarmGetField(swarm, "velocity", NULL, NULL, (void**)&vel); CHKERRQ(ierr);
73
74 LOG_ALLOW(GLOBAL,LOG_DEBUG," [Rank %d] No.of Particles to update: %d.\n",rank,nLocal);
75
76 // 3) Loop over all local particles, updating each position by velocity * dt
77 for (p = 0; p < nLocal; p++) {
78 // update temporary position struct
79 temp_pos.x = pos[3*p];
80 temp_pos.y = pos[3*p + 1];
81 temp_pos.z = pos[3*p + 2];
82
83 // update temporary velocity struct
84 temp_vel.x = vel[3*p];
85 temp_vel.y = vel[3*p + 1];
86 temp_vel.z = vel[3*p + 2];
87
88 ierr = UpdateParticlePosition(user, &temp_pos, &temp_vel); CHKERRQ(ierr);
89
90 // update swarm from temporary position struct
91 pos[3*p] = temp_pos.x;
92 pos[3*p + 1] = temp_pos.y;
93 pos[3*p + 2] = temp_pos.z;
94
95 vel[3*p] = temp_vel.x;
96 vel[3*p + 1] = temp_vel.y;
97 vel[3*p + 2] = temp_vel.z;
98 }
99
100 // 4) Restore the fields
101 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&pos); CHKERRQ(ierr);
102 ierr = DMSwarmRestoreField(swarm, "velocity", NULL, NULL, (void**)&vel); CHKERRQ(ierr);
103
104 LOG_ALLOW(LOCAL,LOG_DEBUG,"Particle moved/transported successfully on Rank %d.\n",rank);
105
107
108 PetscFunctionReturn(0);
109}
110
111
112/**
113 * @brief Checks if a particle position is within the bounds of a given bounding box.
114 *
115 * @param bbox Pointer to the BoundingBox structure.
116 * @param pos Pointer to the particle's position (Cmpnts).
117 *
118 * @return PetscBool PETSC_TRUE if the particle is inside or on the boundary, PETSC_FALSE otherwise.
119 */
120static inline PetscBool IsParticleInBox(const BoundingBox *bbox, const Cmpnts *pos) {
121 return (pos->x >= bbox->min_coords.x && pos->x <= bbox->max_coords.x &&
122 pos->y >= bbox->min_coords.y && pos->y <= bbox->max_coords.y &&
123 pos->z >= bbox->min_coords.z && pos->z <= bbox->max_coords.z);
124}
125
126
127#undef __FUNCT__
128#define __FUNCT__ "CheckAndRemoveOutOfBoundsParticles"
129
130/**
131 * @brief Checks for particles outside the global physical domain and removes them.
132 *
133 * This function iterates through all particles local to the current MPI rank. It determines
134 * if a particle's physical position is outside of ALL subdomains owned by the MPI processes.
135 * To perform this check, it requires a list of the bounding boxes for every MPI rank's
136 * subdomain.
137 *
138 * If a particle is not found within any of the subdomains, it is considered "out of bounds"
139 * and is permanently removed from the simulation using DMSwarmRemovePointAtIndex().
140 *
141 * The function is carefully designed to handle modifications to the DMSwarm during iteration
142 * safely. It does this by:
143 * 1. Iterating BACKWARDS through the local particle list, so removing an element at
144 * index 'p' does not affect the indices of subsequent elements to be checked (p-1, p-2, ...).
145 * 2. Using a robust "Restore-Remove-Reacquire" pattern. When a particle is removed,
146 * all pointers to swarm data are first restored, the removal operation is performed,
147 * and then the pointers are re-acquired for the remainder of the loop.
148 *
149 * @warning This function contains a collective MPI operation (MPI_Allreduce) at the end.
150 * To avoid deadlocks, it MUST be called by ALL ranks in the communicator, even if
151 * a rank has no local particles. Do not place this call inside rank-specific
152 * conditional logic in your main loop.
153 *
154 * @note This function can be redundant as the robust particle location scheme (e.g., one that
155 * sets a `LOST` status) and a corresponding cleanup function are already in use.
156 *
157 * @param[in,out] user Pointer to the UserCtx structure containing the swarm and MPI info.
158 * @param[out] removedCountLocal Pointer to an integer that will store the number of particles removed on THIS rank.
159 * @param[out] removedCountGlobal Pointer to an integer that will store the total number of particles removed ACROSS ALL ranks.
160 * @param[in] bboxlist An array of BoundingBox structures for ALL MPI ranks, indexed 0 to (size-1).
161 * This array must be up-to-date and available on every rank.
162 *
163 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
164 */
166 PetscInt *removedCountLocal,
167 PetscInt *removedCountGlobal,
168 const BoundingBox *bboxlist)
169{
170 PetscErrorCode ierr;
171 DM swarm = user->swarm;
172 PetscInt nLocalInitial;
173 PetscReal *pos_p = NULL;
174 PetscInt64 *pid_p = NULL; // For better logging
175 PetscInt local_removed_count = 0;
176 PetscMPIInt global_removed_count_mpi = 0;
177 PetscMPIInt rank, size;
178
179 PetscFunctionBeginUser;
180 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
181 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr);
182 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Checking for out-of-bounds particles...", rank);
183
184 // Initialize output parameters to ensure clean state
185 *removedCountLocal = 0;
186 if (removedCountGlobal) *removedCountGlobal = 0;
187
188 ierr = DMSwarmGetLocalSize(swarm, &nLocalInitial); CHKERRQ(ierr);
189
190 // Only proceed if there are particles to check on this rank.
191 // All ranks will still participate in the final collective MPI_Allreduce.
192 if (nLocalInitial > 0) {
193 // Get access to swarm fields once before the loop begins.
194 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
195 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
196
197 // --- Iterate BACKWARDS to handle index changes safely during removal ---
198 for (PetscInt p = nLocalInitial - 1; p >= 0; p--) {
199 PetscBool isInsideAnyBox = PETSC_FALSE;
200 Cmpnts current_pos = {pos_p[3*p + 0], pos_p[3*p + 1], pos_p[3*p + 2]};
201
202 // Check if the particle is inside ANY of the rank bounding boxes
203 for (PetscMPIInt proc = 0; proc < size; proc++) {
204 if (IsParticleInBox(&bboxlist[proc], &current_pos)) {
205 isInsideAnyBox = PETSC_TRUE;
206 break; // Particle is inside a valid domain, stop checking.
207 }
208 }
209
210 if (!isInsideAnyBox) {
211 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removing out-of-bounds particle [PID %lld] at local index %d. Pos: (%g, %g, %g)\n",
212 rank, (long long)pid_p[p], p, current_pos.x, current_pos.y, current_pos.z);
213
214 // --- Safe Removal Pattern: Restore -> Remove -> Reacquire ---
215 // This is the fix for the double-restore bug. Pointers are managed carefully
216 // within this block and then restored cleanly after the loop.
217
218 // 1. Restore all fields BEFORE modifying the swarm structure. This invalidates pos_p and pid_p.
219 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
220 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
221
222 // 2. Remove the particle at the current local index 'p'.
223 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
224 local_removed_count++;
225
226 // 3. After removal, re-acquire pointers ONLY if the loop is not finished.
227 PetscInt nLocalCurrent;
228 ierr = DMSwarmGetLocalSize(swarm, &nLocalCurrent); CHKERRQ(ierr);
229
230 if (nLocalCurrent > 0 && p > 0) { // Check if there are particles left AND iterations left
231 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
232 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
233 } else {
234 // All remaining particles were removed OR this was the last particle (p=0).
235 // Invalidate pointers to prevent the final restore call and exit the loop.
236 pos_p = NULL;
237 pid_p = NULL;
238 break;
239 }
240 }
241 } // End of backwards loop
242
243 // At the end, restore any valid pointers. This handles three cases:
244 // 1. No particles were removed: restores the original pointers.
245 // 2. Particles were removed mid-loop: restores the pointers from the last re-acquisition.
246 // 3. All particles were removed: pointers are NULL, so nothing is done.
247 if (pos_p) { ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr); }
248 if (pid_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr); }
249 } // End of if (nLocalInitial > 0)
250
251 PetscInt nLocalFinal;
252 ierr = DMSwarmGetLocalSize(swarm, &nLocalFinal); CHKERRQ(ierr);
253 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Finished removing %d out-of-bounds particles. Final local size: %d.\n", rank, local_removed_count, nLocalFinal);
254
255 // --- Synchronize counts across all ranks ---
256 *removedCountLocal = local_removed_count;
257 if (removedCountGlobal) {
258 ierr = MPI_Allreduce(&local_removed_count, &global_removed_count_mpi, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)swarm)); CHKERRQ(ierr);
259 *removedCountGlobal = global_removed_count_mpi;
260 // Use a synchronized log message so only one rank prints the global total.
261 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Removed %d out-of-bounds particles globally.\n", rank, *removedCountGlobal);
262 }
263
264 PetscFunctionReturn(0);
265}
266
267#undef __FUNCT__
268#define __FUNCT__ "CheckAndRemoveLostParticles"
269/**
270 * @brief Removes particles that have been definitively flagged as LOST by the location algorithm.
271 *
272 * This function is the designated cleanup utility for particles that have exited the
273 * physical domain or could not be located for any other reason. It should be called
274 * after a particle location and migration phase is complete.
275 *
276 * It iterates through all locally owned particles and checks their `DMSwarm_location_status`
277 * field. If a particle's status is `LOST`, it is permanently removed from the simulation
278 * using `DMSwarmRemovePointAtIndex`.
279 *
280 * The function is carefully designed to handle modifications to the DMSwarm during iteration
281 * safely. It does this by:
282 * 1. Iterating BACKWARDS through the local particle list, so removing an element at
283 * index 'p' does not affect the indices of subsequent elements to be checked (p-1, p-2, ...).
284 * 2. Using a robust "Restore-Remove-Reacquire" pattern. When a particle is removed,
285 * all pointers to swarm data are first restored, the removal operation is performed,
286 * and then the pointers are re-acquired for the remainder of the loop.
287 *
288 * @warning This function contains a collective MPI operation (MPI_Allreduce) at the end.
289 * To avoid deadlocks, it MUST be called by ALL ranks in the communicator, even if
290 * a rank has no local particles. Do not place this call inside rank-specific
291 * conditional logic in your main loop.
292 *
293 * @param[in,out] user Pointer to the UserCtx structure containing the swarm.
294 * @param[out] removedCountLocal Pointer to an integer that will store the number of particles removed on THIS rank.
295 * @param[out] removedCountGlobal Pointer to an integer that will store the total number of particles removed ACROSS ALL ranks.
296 *
297 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
298 */
300 PetscInt *removedCountLocal,
301 PetscInt *removedCountGlobal)
302{
303 PetscErrorCode ierr;
304 DM swarm = user->swarm;
305 PetscInt nLocalInitial;
306 PetscInt *status_p = NULL;
307 PetscInt64 *pid_p = NULL; // For better logging
308 PetscReal *pos_p = NULL; // For better logging
309 PetscInt local_removed_count = 0;
310 PetscMPIInt global_removed_count_mpi = 0;
311 PetscMPIInt rank;
312
313 PetscFunctionBeginUser;
315 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
316 LOG_ALLOW(LOCAL, LOG_INFO, "Rank %d: Checking for and removing LOST particles...", rank);
317
318 // Initialize output parameters to ensure clean state
319 *removedCountLocal = 0;
320 if (removedCountGlobal) *removedCountGlobal = 0;
321
322 ierr = DMSwarmGetLocalSize(swarm, &nLocalInitial); CHKERRQ(ierr);
323
324 // Only proceed if there are particles to check on this rank.
325 // All ranks will still participate in the final collective MPI_Allreduce.
326 if (nLocalInitial > 0) {
327 // Get access to all swarm fields once before the loop begins.
328 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
329 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
330 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
331
332 // --- Iterate BACKWARDS to handle index changes safely during removal ---
333 for (PetscInt p = nLocalInitial - 1; p >= 0; p--) {
334 if (status_p[p] == LOST) {
335 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removing LOST particle [PID %lld] at local index %d. Position: (%.4f, %.4f, %.4f).\n",
336 rank, (long long)pid_p[p], p, pos_p[3*p], pos_p[3*p+1], pos_p[3*p+2]);
337
338 // --- Safe Removal Pattern: Restore -> Remove -> Reacquire ---
339 // This is the fix for the double-restore bug. Pointers are managed carefully
340 // within this block and then restored cleanly after the loop.
341
342 // 1. Restore all fields BEFORE modifying the swarm structure. This invalidates all pointers.
343 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
344 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
345 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
346
347 // 2. Remove the particle at the current local index 'p'.
348 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
349 local_removed_count++;
350
351 // 3. After removal, re-acquire pointers ONLY if the loop is not finished.
352 PetscInt nLocalCurrent;
353 ierr = DMSwarmGetLocalSize(swarm, &nLocalCurrent); CHKERRQ(ierr);
354
355 if (nLocalCurrent > 0 && p > 0) { // Check if there are particles left AND iterations left
356 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
357 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
358 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
359 } else {
360 // All remaining particles were removed OR this was the last particle (p=0).
361 // Invalidate pointers to prevent the final restore call and exit the loop.
362 status_p = NULL;
363 pid_p = NULL;
364 pos_p = NULL;
365 break;
366 }
367 }
368 } // End of backwards loop
369
370 // At the end, restore any valid pointers. This handles three cases:
371 // 1. No particles were removed: restores the original pointers.
372 // 2. Particles were removed mid-loop: restores the pointers from the last re-acquisition.
373 // 3. All particles were removed: pointers are NULL, so nothing is done.
374 if (status_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr); }
375 if (pid_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr); }
376 if (pos_p) { ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr); }
377 } // End of if (nLocalInitial > 0)
378
379 PetscInt nLocalFinal;
380 ierr = DMSwarmGetLocalSize(swarm, &nLocalFinal); CHKERRQ(ierr);
381 LOG_ALLOW(LOCAL, LOG_INFO, "Rank %d: Finished removing %d LOST particles. Final local size: %d.\n", rank, local_removed_count, nLocalFinal);
382
383 // --- Synchronize counts across all ranks ---
384 *removedCountLocal = local_removed_count;
385 if (removedCountGlobal) {
386 ierr = MPI_Allreduce(&local_removed_count, &global_removed_count_mpi, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)swarm)); CHKERRQ(ierr);
387 *removedCountGlobal = global_removed_count_mpi;
388 // Use a synchronized log message so only one rank prints the global total.
389 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Removed %d LOST particles globally.\n", rank, *removedCountGlobal);
390 }
391
393 PetscFunctionReturn(0);
394}
395
396
397#undef __FUNCT__
398#define __FUNCT__ "SetMigrationRanks"
399/**
400 * @brief Sets the target rank field (DMSwarmPICField_rank) for particles scheduled for migration.
401 *
402 * @param user Pointer to UserCtx pa(contains swarm).
403 * @param migrationList Array of MigrationInfo structs containing local indices and target ranks.
404 * @param migrationCount Number of particles in the migrationList.
405 *
406 * @return PetscErrorCode 0 on success, non-zero on failure.
407 */
408PetscErrorCode SetMigrationRanks(UserCtx* user, const MigrationInfo *migrationList, PetscInt migrationCount)
409{
410 PetscErrorCode ierr;
411 DM swarm = user->swarm;
412 PetscInt p_idx;
413 PetscInt *rankField = NULL; // Field storing target rank
414
415 PetscFunctionBeginUser;
417
418 // Ensure the migration rank field exists
419 ierr = DMSwarmGetField(swarm, "DMSwarm_rank", NULL, NULL, (void **)&rankField); CHKERRQ(ierr);
420
421 // Set the target rank for migrating particles
422 for(p_idx = 0; p_idx < migrationCount; ++p_idx) {
423 rankField[migrationList[p_idx].local_index] = migrationList[p_idx].target_rank;
424 }
425
426 ierr = DMSwarmRestoreField(swarm, "DMSwarm_rank", NULL, NULL, (void **)&rankField); CHKERRQ(ierr);
427
429 PetscFunctionReturn(0);
430}
431
432#undef __FUNCT__
433#define __FUNCT__ "PerformMigration"
434
435/**
436 * @brief Performs particle migration based on the pre-populated DMSwarmPICField_rank field.
437 *
438 * Assumes SetMigrationRanks has already been called to mark particles with their target ranks.
439 * Calls DMSwarmMigrate to execute the communication and removal of un-migrated particles.
440 *
441 * @param user Pointer to the UserCtx structure containing the swarm.
442 *
443 * @return PetscErrorCode 0 on success, non-zero on failure.
444 */
445PetscErrorCode PerformMigration(UserCtx *user)
446{
447 PetscErrorCode ierr;
448 DM swarm = user->swarm;
449 PetscMPIInt rank;
450
451 PetscFunctionBeginUser;
453 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
454 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Starting DMSwarmMigrate...\n", rank);
455
456 // Perform the migration - PETSC_TRUE removes particles that fail to land
457 // in a valid cell on the target rank (or were marked with an invalid rank).
458 ierr = DMSwarmMigrate(swarm, PETSC_TRUE); CHKERRQ(ierr);
459
460 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Migration complete.\n", rank);
462 PetscFunctionReturn(0);
463}
464
465//-----------------------------------------------------------------------------
466// MODULE (COUNT): Calculates Particle Count Per Cell - REVISED FOR DMDAVecGetArray
467//-----------------------------------------------------------------------------
468
469#undef __FUNCT__
470#define __FUNCT__ "CalculateParticleCountPerCell"
471/**
472 * @brief Counts particles in each cell of the DMDA 'da' and stores the result in user->ParticleCount.
473 * @ingroup scatter_module
474 *
475 * Zeros the user->ParticleCount vector, then iterates through local particles.
476 * Reads the **GLOBAL** cell index (I, J, K) stored in the "DMSwarm_CellID" field.
477 * Uses DMDAVecGetArray to access the local portion of the count vector and increments
478 * the count at the global index (I, J, K) if it belongs to the local patch (including ghosts).
479 *
480 * @param[in,out] user Pointer to the UserCtx structure containing da, swarm, and ParticleCount.
481 *
482 * @return PetscErrorCode Returns 0 on success, non-zero on failure.
483
484 */
486 PetscErrorCode ierr;
487 DM da = user->da;
488 DM swarm = user->swarm;
489 Vec countVec = user->ParticleCount;
490 PetscInt nlocal, p;
491 PetscInt *global_cell_id_arr; // Read GLOBAL cell IDs
492 PetscScalar ***count_arr_3d; // Use 3D accessor
493 PetscInt64 *PID_arr;
494 PetscMPIInt rank;
495 char msg[ERROR_MSG_BUFFER_SIZE];
496 PetscInt particles_counted_locally = 0;
497
498 PetscFunctionBeginUser;
500 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
501
502 // --- Input Validation ---
503 if (!da) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->da is NULL.");
504 if (!swarm) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->swarm is NULL.");
505 if (!countVec) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->ParticleCount is NULL.");
506 // Check DOF of da
507 PetscInt count_dof;
508 ierr = DMDAGetInfo(da, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &count_dof, NULL, NULL, NULL, NULL, NULL); CHKERRQ(ierr);
509 if (count_dof != 1) { PetscSNPrintf(msg, sizeof(msg), "countDM must have DOF=1, got %d.", count_dof); SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, msg); }
510
511 // --- Zero the count vector ---
512 ierr = VecSet(countVec, 0.0); CHKERRQ(ierr);
513
514 // --- Get Particle Data ---
515 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Accessing particle data.\n");
516 ierr = DMSwarmGetLocalSize(swarm, &nlocal); CHKERRQ(ierr);
517 ierr = DMSwarmGetField(swarm,"DMSwarm_CellID", NULL, NULL, (void **)&global_cell_id_arr); CHKERRQ(ierr);
518 ierr = DMSwarmGetField(swarm,"DMSwarm_pid",NULL,NULL,(void **)&PID_arr);CHKERRQ(ierr);
519
520 // --- Get Grid Vector Array using DMDA accessor ---
521 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Accessing ParticleCount vector array (using DMDAVecGetArray).\n");
522 ierr = DMDAVecGetArray(da, countVec, &count_arr_3d); CHKERRQ(ierr);
523
524 // Get local owned range for validation/logging if needed, but not for indexing with DMDAVecGetArray
525 PetscInt xs, ys, zs, xm, ym, zm;
526 ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm); CHKERRQ(ierr);
527
528 // --- Accumulate Counts Locally ---
529 LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Processing %d local particles using GLOBAL CellIDs.\n",rank,nlocal);
530 for (p = 0; p < nlocal; p++) {
531 // Read the GLOBAL indices stored for this particle
532 PetscInt64 i_geom = global_cell_id_arr[p * 3 + 0]; // Global i index
533 PetscInt64 j_geom = global_cell_id_arr[p * 3 + 1]; // Global j index
534 PetscInt64 k_geom = global_cell_id_arr[p * 3 + 2]; // Global k index
535
536 // Apply the shift to ensure ParticleCount follows the indexing convention for cell-centered data in this codebase.
537 PetscInt i = (PetscInt)i_geom + 1; // Shift for cell-centered
538 PetscInt j = (PetscInt)j_geom + 1; // Shift for cell-centered
539 PetscInt k = (PetscInt)k_geom + 1; // Shift for cell-centered
540
541 // *** Bounds check is implicitly handled by DMDAVecGetArray for owned+ghost region ***
542 // However, accessing outside this region using global indices WILL cause an error.
543 // A preliminary check might still be wise if global IDs could be wild.
544 // We rely on LocateAllParticles to provide valid global indices [0..IM-1] etc.
545
546 LOG_LOOP_ALLOW(LOCAL,LOG_VERBOSE,p,100,"[Rank %d] Read CellID for p=%d, PID = %ld: (%ld, %ld, %ld)\n", rank, p,PID_arr[p],i, j, k);
547
548 // *** Access the local array using GLOBAL indices ***
549 // DMDAVecGetArray allows this, mapping global (I,J,K) to the correct
550 // location within the local ghosted array segment.
551 // This assumes (I,J,K) corresponds to a cell within the local owned+ghost region.
552 // If (I,J,K) is for a cell owned by *another* rank (and not in the ghost region
553 // of this rank), accessing count_arr_3d[K][J][I] will likely lead to a crash
554 // or incorrect results. This highlights why storing LOCAL indices is preferred
555 // for parallel runs. But proceeding with GLOBAL as requested:
556 if (i >= xs && i < xs + xm &&
557 j >= ys && j < ys + ym && // Adjust based on actual ghost width
558 k >= zs && k < zs + zm ) // This check prevents definite crashes but doesn't guarantee ownership
559 {
560
561 // Increment count at the location corresponding to GLOBAL index (I,J,K)
562 // LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Particle %d with global CellID (%d, %d, %d) incremented with a particle.\n",rank, p, i, j, k);
563 count_arr_3d[k][j][i] += 1.0;
564 particles_counted_locally++;
565 } else {
566 // This particle's global ID is likely outside the range this rank handles (even ghosts)
567 LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Skipping particle %ld with global CellID (%ld, %ld, %ld) - likely outside local+ghost range.\n",rank, PID_arr[p] , i, j, k);
568 }
569 }
570 LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Local counting finished. Processed %d particles locally.\n", rank, particles_counted_locally);
571
572 // --- Restore Access ---
573 ierr = DMDAVecRestoreArray(da, countVec, &count_arr_3d); CHKERRQ(ierr);
574 ierr = DMSwarmRestoreField(swarm,"DMSwarm_CellID", NULL, NULL, (void **)&global_cell_id_arr); CHKERRQ(ierr);
575 ierr = DMSwarmRestoreField(swarm,"DMSwarm_pid",NULL,NULL,(void **)&PID_arr);CHKERRQ(ierr);
576
577 // --- Assemble Global Vector ---
578 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Assembling global ParticleCount vector.\n");
579 ierr = VecAssemblyBegin(countVec); CHKERRQ(ierr);
580 ierr = VecAssemblyEnd(countVec); CHKERRQ(ierr);
581
582 // --- Verification Logging ---
583 PetscReal total_counted_particles = 0.0, max_count_in_cell = 0.0;
584 ierr = VecSum(countVec, &total_counted_particles); CHKERRQ(ierr);
585 PetscInt max_idx_global = -1;
586 ierr = VecMax(countVec, &max_idx_global, &max_count_in_cell); CHKERRQ(ierr);
587 LOG_ALLOW(GLOBAL, LOG_INFO, "Total counted globally = %.0f, Max count in cell = %.0f\n",
588 total_counted_particles, max_count_in_cell);
589
590 // --- ADD THIS DEBUGGING BLOCK ---
591 if (max_idx_global >= 0) { // Check if VecMax found a location
592 // Need to convert the flat global index back to 3D global index (I, J, K)
593 // Get global grid dimensions (Nodes, NOT Cells IM/JM/KM)
594 PetscInt M, N, P;
595 ierr = DMDAGetInfo(da, NULL, &M, &N, &P, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); CHKERRQ(ierr);
596 // Note: Assuming DOF=1 for countVec, index mapping uses node dimensions M,N,P from DMDA creation (IM+1, etc)
597 // Re-check if your DMDA uses cell counts (IM) or node counts (IM+1) for Vec layout. Let's assume Node counts M,N,P.
598 PetscInt Kmax = max_idx_global / (M * N);
599 PetscInt Jmax = (max_idx_global % (M * N)) / M;
600 PetscInt Imax = max_idx_global % M;
601 LOG_ALLOW(GLOBAL, LOG_INFO, " -> Max count located at global index (I,J,K) = (%d, %d, %d) [Flat index: %d]\n",
602 (int)Imax, (int)Jmax, (int)Kmax, (int)max_idx_global);
603
604 // Also, let's explicitly check the count at (0,0,0)
605 PetscScalar count_at_origin = 0.0;
606 PetscScalar ***count_arr_for_check;
607 ierr = DMDAVecGetArrayRead(da, countVec, &count_arr_for_check); CHKERRQ(ierr);
608 // Check bounds before accessing - crucial if using global indices
609 PetscInt xs, ys, zs, xm, ym, zm;
610 ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm); CHKERRQ(ierr);
611 if (0 >= xs && 0 < xs+xm && 0 >= ys && 0 < ys+ym && 0 >= zs && 0 < zs+zm) {
612 count_at_origin = count_arr_for_check[0][0][0]; // Access using global index (0,0,0)
613 } else {
614 // Origin is not on this rank (relevant for parallel, but check anyway)
615 count_at_origin = -999.0; // Indicate it wasn't accessible locally
616 }
617 ierr = DMDAVecRestoreArrayRead(da, countVec, &count_arr_for_check); CHKERRQ(ierr);
618 LOG_ALLOW(GLOBAL, LOG_INFO, " -> Count at global index (0,0,0) = %.1f\n", count_at_origin);
619
620 } else {
621 LOG_ALLOW(GLOBAL, LOG_WARNING, " -> VecMax did not return a location for the maximum value.\n");
622 }
623 // --- END DEBUGGING BLOCK ---
624
625 LOG_ALLOW(GLOBAL, LOG_INFO, "Particle counting complete.\n");
626
627
629 PetscFunctionReturn(0);
630}
631
632
633
634#undef __FUNCT__
635#define __FUNCT__ "ResizeSwarmGlobally"
636// --- Helper function to resize swarm globally (add or remove) ---
637// This assumes removing excess particles means removing the globally last ones.
638PetscErrorCode ResizeSwarmGlobally(DM swarm, PetscInt N_target)
639{
640 PetscErrorCode ierr;
641 PetscInt N_current, nlocal_current;
642 PetscMPIInt rank;
643 MPI_Comm comm;
644
645 PetscFunctionBeginUser;
647 ierr = PetscObjectGetComm((PetscObject)swarm, &comm); CHKERRQ(ierr);
648 ierr = MPI_Comm_rank(comm, &rank); CHKERRQ(ierr);
649 ierr = DMSwarmGetSize(swarm, &N_current); CHKERRQ(ierr);
650 ierr = DMSwarmGetLocalSize(swarm, &nlocal_current); CHKERRQ(ierr);
651
652 PetscInt delta = N_target - N_current;
653
654 if (delta == 0) {
656 PetscFunctionReturn(0); // Nothing to do
657 }
658
659 if (delta < 0) { // Remove particles
660 PetscInt num_to_remove_global = -delta;
661 LOG_ALLOW(GLOBAL, LOG_INFO, "Current size %d > target size %d. Removing %d particles globally.\n", N_current, N_target, num_to_remove_global);
662
663 // --- Strategy: Remove the globally last 'num_to_remove_global' particles ---
664 // Each rank needs to determine how many of its *local* particles fall
665 // within the range of global indices [N_target, N_current - 1].
666
667 PetscInt rstart = 0;
668 PetscInt rend;
669 // Global range owned by this rank [rstart, rend)
670
671 ierr = MPI_Exscan(&nlocal_current, &rstart, 1, MPIU_INT, MPI_SUM, comm); CHKERRMPI(ierr); // Use CHKERRMPI for MPI calls
672
673 rend = rstart + nlocal_current;
674
675
676 // Calculate how many local particles have global indices >= N_target
677 PetscInt nlocal_remove_count = 0;
678 if (rend > N_target) { // If this rank owns any particles slated for removal
679 PetscInt start_remove_local_idx = (N_target > rstart) ? (N_target - rstart) : 0;
680 nlocal_remove_count = nlocal_current - start_remove_local_idx;
681 }
682
683 if (nlocal_remove_count < 0) nlocal_remove_count = 0; // Sanity check
684 if (nlocal_remove_count > nlocal_current) nlocal_remove_count = nlocal_current; // Sanity check
685
686 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Global range [%d, %d). Target size %d. Need to remove %d local particles (from end).\n", rank, rstart, rend, N_target, nlocal_remove_count);
687
688 // Remove the last 'nlocal_remove_count' particles *locally* by iterating backwards
689 PetscInt removal_ops_done = 0;
690 for (PetscInt p = nlocal_current - 1; p >= 0 && removal_ops_done < nlocal_remove_count; --p) {
691 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
692 removal_ops_done++;
693 }
694
695 if (removal_ops_done != nlocal_remove_count) {
696 SETERRQ(comm, PETSC_ERR_PLIB, "Rank %d: Failed to remove the expected number of local particles (%d != %d)", rank, removal_ops_done, nlocal_remove_count);
697 }
698 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removed %d local particles.\n", rank, removal_ops_done);
699
700 // Barrier to ensure all removals are done before size check
701 ierr = MPI_Barrier(comm); CHKERRMPI(ierr);
702
703 } else { // delta > 0: Add particles
704 PetscInt num_to_add_global = delta;
705 LOG_ALLOW(GLOBAL, LOG_INFO, "Current size %d < target size %d. Adding %d particles globally.\n", N_current, N_target, num_to_add_global);
706 ierr = DMSwarmAddNPoints(swarm, num_to_add_global); CHKERRQ(ierr);
707 // Note: Added particles will have uninitialized field data. Reading will overwrite.
708 }
709
710 // Verify final size
711 PetscInt N_final;
712 ierr = DMSwarmGetSize(swarm, &N_final); CHKERRQ(ierr);
713 if (N_final != N_target) {
714 SETERRQ(comm, PETSC_ERR_PLIB, "Failed to resize swarm: expected %d particles, got %d", N_target, N_final);
715 }
716 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Swarm successfully resized to %d particles.\n", N_final);
718 PetscFunctionReturn(0);
719}
720
721#undef __FUNCT__
722#define __FUNCT__ "PreCheckAndResizeSwarm"
723/**
724 * @brief Checks particle count from a saved file and resizes the swarm globally.
725 *
726 * This function uses a robust parallel pattern: only Rank 0 reads the reference
727 * position file to determine the total number of particles saved (`N_file`).
728 * This count is then broadcast to all other ranks. Finally, each rank compares
729 * N_file with the current swarm size and participates in resizing if necessary.
730 *
731 * @param[in,out] user Pointer to the UserCtx structure containing the DMSwarm.
732 * @param[in] ti Time index for constructing the file name.
733 * @param[in] ext File extension (e.g., "dat").
734 *
735 * @return PetscErrorCode 0 on success, non-zero on failure.
736 */
737PetscErrorCode PreCheckAndResizeSwarm(UserCtx *user,
738 PetscInt ti,
739 const char *ext)
740{
741 PetscErrorCode ierr;
742 char filename[PETSC_MAX_PATH_LEN];
743 PetscInt N_file = 0; // The number of particles determined from the file
744 PetscInt N_current = 0;
745 MPI_Comm comm;
746 PetscMPIInt rank;
747 const char *refFieldName = "position";
748 const PetscInt bs = 3;
749 SimCtx *simCtx = user->simCtx;
750 char *source_path;
751
752 // NOTE: Your filename format has a hardcoded "_0" which is typical for
753 // PETSc when writing a parallel object from a single rank.
754 // If you ever write in parallel, PETSc might create one file per rank.
755 // The current logic assumes a single file written by one process.
756 const int placeholder_int = 0;
757
758 // Setup the I/O environment
759
760
761 PetscFunctionBeginUser;
763 ierr = PetscObjectGetComm((PetscObject)user->swarm, &comm); CHKERRQ(ierr);
764 ierr = MPI_Comm_rank(comm, &rank); CHKERRQ(ierr);
765
766 // First, determine the top-level source directory based on the execution mode.
767 if (simCtx->exec_mode == EXEC_MODE_SOLVER) {
768 source_path = simCtx->restart_dir;
769 } else if (simCtx->exec_mode == EXEC_MODE_POSTPROCESSOR) {
770 source_path = simCtx->pps->source_dir;
771 } else {
772 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONGSTATE, "Invalid execution mode for reading simulation fields.");
773 }
774
775 // Set the current I/O directory context
776 ierr = PetscSNPrintf(simCtx->_io_context_buffer, sizeof(simCtx->_io_context_buffer),
777 "%s/%s", source_path, simCtx->particle_subdir); CHKERRQ(ierr);
778
780
781 // --- Construct filename using the specified format ---
782 ierr = PetscSNPrintf(filename, sizeof(filename), "%s/%s%05" PetscInt_FMT "_%d.%s",
783 simCtx->current_io_directory,refFieldName, ti, placeholder_int, ext); CHKERRQ(ierr);
784
785 LOG_ALLOW(GLOBAL, LOG_INFO, "Checking particle count for timestep %d using ref file '%s'.\n", ti, filename);
786
787 // --- Rank 0 reads the file to determine the size ---
788 if (rank == 0) {
789 PetscBool fileExists = PETSC_FALSE;
790 ierr = PetscTestFile(filename, 'r', &fileExists); CHKERRQ(ierr);
791
792 if (!fileExists) {
793 // Set a special value to indicate file not found, then broadcast it.
794 N_file = -1;
795 LOG_ALLOW(GLOBAL, LOG_ERROR, "Rank 0: Mandatory reference file '%s' not found for timestep %d.\n", filename, ti);
796 } else {
797 PetscViewer viewer;
798 Vec tmpVec;
799 PetscInt vecSize;
800
801 ierr = VecCreate(PETSC_COMM_SELF, &tmpVec); CHKERRQ(ierr); // Create a SEQUENTIAL vector
802 ierr = PetscViewerBinaryOpen(PETSC_COMM_SELF, filename, FILE_MODE_READ, &viewer); CHKERRQ(ierr);
803 ierr = VecLoad(tmpVec, viewer); CHKERRQ(ierr);
804 ierr = PetscViewerDestroy(&viewer); CHKERRQ(ierr);
805
806 ierr = VecGetSize(tmpVec, &vecSize); CHKERRQ(ierr);
807 ierr = VecDestroy(&tmpVec); CHKERRQ(ierr);
808
809 if (vecSize % bs != 0) {
810 N_file = -2; // Special error code for bad file format
811 LOG_ALLOW(GLOBAL, LOG_ERROR, "Rank 0: Vector size %d from file '%s' is not divisible by block size %d.\n", vecSize, filename, bs);
812 } else {
813 N_file = vecSize / bs;
814 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Rank 0: Found %d particles in file.\n", N_file);
815 }
816 }
817 }
818
819 // --- Broadcast the particle count (or error code) from Rank 0 to all other ranks ---
820 ierr = MPI_Bcast(&N_file, 1, MPIU_INT, 0, comm); CHKERRMPI(ierr);
821
822 // --- All ranks check for errors and abort if necessary ---
823 if (N_file == -1) {
824 SETERRQ(comm, PETSC_ERR_FILE_OPEN, "Mandatory reference file '%s' not found for timestep %d (as determined by Rank 0).", filename, ti);
825 }
826 if (N_file == -2) {
827 SETERRQ(comm, PETSC_ERR_FILE_READ, "Reference file '%s' has incorrect format (as determined by Rank 0).", filename);
828 }
829 if (N_file < 0) {
830 SETERRQ(comm, PETSC_ERR_PLIB, "Received invalid particle count %d from Rank 0.", N_file);
831 }
832
833
834 // --- Now all ranks have the correct N_file, compare and resize if needed ---
835 ierr = DMSwarmGetSize(user->swarm, &N_current); CHKERRQ(ierr);
836
837 if (N_file != N_current) {
838 LOG_ALLOW(GLOBAL, LOG_INFO, "Swarm size %d differs from file size %d. Resizing swarm globally.\n", N_current, N_file);
839 ierr = ResizeSwarmGlobally(user->swarm, N_file); CHKERRQ(ierr);
840 } else {
841 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Swarm size (%d) already matches file size. No resize needed.\n", N_current);
842 }
843
844 // Also update the context
845 user->simCtx->np = N_file;
846
848 PetscFunctionReturn(0);
849}
850
851
852#undef __FUNCT__
853#define __FUNCT__ "ReinitializeParticlesOnInletSurface"
854/**
855 * @brief Re-initializes the positions of particles currently on this rank if this rank owns
856 * part of the designated inlet surface.
857 *
858 * This function is intended for `user->ParticleInitialization == 0 or 3` (Surface Initialization modes)
859 * and is typically called after an initial migration step (e.g., in `PerformInitialSetup`).
860 * It ensures that all particles that should originate from the inlet surface and are now
861 * on the correct MPI rank are properly distributed across that rank's portion of the inlet.
862 *
863 * @param user Pointer to the UserCtx structure, containing simulation settings and grid information.
864 * @param currentTime Current simulation time (used for logging).
865 * @param step Current simulation step number (used for logging).
866 * @return PetscErrorCode 0 on success, non-zero on failure.
867 */
868PetscErrorCode ReinitializeParticlesOnInletSurface(UserCtx *user, PetscReal currentTime, PetscInt step)
869{
870 PetscErrorCode ierr;
871 PetscMPIInt rank; // MPI rank of the current process
872 DM swarm = user->swarm; // The particle swarm DM
873 PetscReal *positions_field = NULL; // Pointer to swarm field for physical positions
874 PetscInt64 *particleIDs = NULL; // Pointer to swarm field for Particle IDs (for logging)
875 PetscInt *cell_ID_field = NULL; // Pointer to swarm field for Cell IDs (for resetting after migration)
876 const Cmpnts ***coor_nodes_local_array; // Read-only access to local node coordinates
877 Vec Coor_local; // Local vector for node coordinates
878 DMDALocalInfo info; // Local grid information (node-based) from user->da
879 PetscInt xs_gnode_rank, ys_gnode_rank, zs_gnode_rank; // Local starting node indices (incl. ghosts) of rank's DA
880 PetscInt IM_nodes_global, JM_nodes_global, KM_nodes_global; // Global node counts
881
882 PetscRandom rand_logic_reinit_i, rand_logic_reinit_j, rand_logic_reinit_k; // RNGs for re-placement
883 PetscInt nlocal_current; // Number of particles currently on this rank
884 PetscInt particles_actually_reinitialized_count = 0; // Counter for logging
885 PetscBool can_this_rank_service_inlet = PETSC_FALSE; // Flag
886
887 PetscFunctionBeginUser;
888
890
891 // This function is only relevant for surface initialization mode and if an inlet face is defined.
892 if ((user->simCtx->ParticleInitialization != 0 && user->simCtx->ParticleInitialization !=3) || !user->inletFaceDefined) {
893 PetscFunctionReturn(0);
894 }
895
896 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
897 ierr = DMSwarmGetLocalSize(swarm, &nlocal_current); CHKERRQ(ierr);
898
899 // If no particles on this rank, nothing to do.
900 if (nlocal_current == 0) {
901 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d has no local particles to re-initialize on inlet.\n", currentTime, step, rank);
902 PetscFunctionReturn(0);
903 }
904
905 // Get DMDA information for the node-centered coordinate grid (user->da)
906 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
907 ierr = DMDAGetInfo(user->da, NULL, &IM_nodes_global, &JM_nodes_global, &KM_nodes_global, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); CHKERRQ(ierr);
908 ierr = DMDAGetGhostCorners(user->da, &xs_gnode_rank, &ys_gnode_rank, &zs_gnode_rank, NULL, NULL, NULL); CHKERRQ(ierr);
909
910 // Modification to IM_nodes_global etc. to account for 1-cell halo in each direction.
911 IM_nodes_global -= 1; JM_nodes_global -= 1; KM_nodes_global -= 1;
912
913 const PetscInt IM_cells_global = IM_nodes_global > 0 ? IM_nodes_global - 1 : 0;
914 const PetscInt JM_cells_global = JM_nodes_global > 0 ? JM_nodes_global - 1 : 0;
915 const PetscInt KM_cells_global = KM_nodes_global > 0 ? KM_nodes_global - 1 : 0;
916
917
918
919 // Check if this rank is responsible for (part of) the designated inlet surface
920 ierr = CanRankServiceInletFace(user, &info, IM_nodes_global, JM_nodes_global, KM_nodes_global, &can_this_rank_service_inlet); CHKERRQ(ierr);
921
922 // Get coordinate array and swarm fields for modification
923 ierr = DMGetCoordinatesLocal(user->da, &Coor_local); CHKERRQ(ierr);
924 ierr = DMDAVecGetArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
925 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
926 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr); // For logging
927 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
928
929 if (!can_this_rank_service_inlet) {
930 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d cannot service inlet face %s. Skipping re-initialization of %d particles.\n", currentTime, step, rank, BCFaceToString(user->identifiedInletBCFace), nlocal_current);
931
932 // FALLBACK ACTION: Reset position fields to Inlet center for migration and cell ID to -1 for safety.
933 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d is resetting %d local particles to inlet center (%.6f, %.6f, %.6f) for migration.\n", currentTime, step, rank, nlocal_current, user->simCtx->CMx_c, user->simCtx->CMy_c, user->simCtx->CMz_c);
934
935 for(PetscInt p = 0; p < nlocal_current; p++){
936 positions_field[3*p+0] = user->simCtx->CMx_c;
937 positions_field[3*p+1] = user->simCtx->CMy_c;
938 positions_field[3*p+2] = user->simCtx->CMz_c;
939
940 cell_ID_field[3*p+0] = -1;
941 cell_ID_field[3*p+1] = -1;
942 cell_ID_field[3*p+2] = -1;
943 }
944
945 // Cleanup: restore swarm fields/coordinate array
946 ierr = DMDAVecRestoreArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
947 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
948 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr); // For logging
949 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
951 PetscFunctionReturn(0);
952 }
953
954 LOG_ALLOW(GLOBAL, LOG_INFO, "[T=%.4f, Step=%d] Rank %d is on inlet face %s. Attempting to re-place %d local particles.\n", currentTime, step, rank, BCFaceToString(user->identifiedInletBCFace), nlocal_current);
955
956 // Initialize fresh RNGs for this re-placement to ensure good distribution
957 ierr = InitializeLogicalSpaceRNGs(&rand_logic_reinit_i, &rand_logic_reinit_j, &rand_logic_reinit_k); CHKERRQ(ierr);
958 // Optional: Seed RNGs for deterministic behavior if required, e.g., based on rank and step.
959 // PetscRandomSetSeed(rand_logic_i, (unsigned long)rank*1000 + step + 100); PetscRandomSeed(rand_logic_i); // Example
960
961 // Loop over all particles currently local to this rank
962 for (PetscInt p = 0; p < nlocal_current; p++) {
963 PetscInt ci_metric_lnode, cj_metric_lnode, ck_metric_lnode; // Local node indices (of rank's DA patch) for cell origin
964 PetscReal xi_metric_logic, eta_metric_logic, zta_metric_logic; // Intra-cell logical coordinates
965 Cmpnts phys_coords; // To store newly calculated physical coordinates
966
967 if(user->simCtx->ParticleInitialization == 0){
968 // Get random cell on this rank's portion of the inlet and random logical coords within it
969 ierr = GetRandomCellAndLogicalCoordsOnInletFace(user, &info, xs_gnode_rank, ys_gnode_rank, zs_gnode_rank,
970 IM_nodes_global, JM_nodes_global, KM_nodes_global,
971 &rand_logic_reinit_i, &rand_logic_reinit_j, &rand_logic_reinit_k,
972 &ci_metric_lnode, &cj_metric_lnode, &ck_metric_lnode,
973 &xi_metric_logic, &eta_metric_logic, &zta_metric_logic); CHKERRQ(ierr);
974 }else if(user->simCtx->ParticleInitialization == 3){
975 PetscBool garbage_flag = PETSC_FALSE;
976 ierr = GetDeterministicFaceGridLocation(user, &info, xs_gnode_rank, ys_gnode_rank, zs_gnode_rank,
977 IM_cells_global, JM_cells_global, KM_cells_global,
978 particleIDs[p],
979 &ci_metric_lnode, &cj_metric_lnode, &ck_metric_lnode,
980 &xi_metric_logic, &eta_metric_logic, &zta_metric_logic,&garbage_flag); CHKERRQ(ierr);
981 }else{
982 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ReinitializeParticlesOnInletSurface only supports ParticleInitialization modes 0 and 3.");
983 }
984 // Convert these logical coordinates to physical coordinates
985 ierr = MetricLogicalToPhysical(user, coor_nodes_local_array,
986 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
987 xi_metric_logic, eta_metric_logic, zta_metric_logic,
988 &phys_coords); CHKERRQ(ierr);
989
990 // Update the particle's position in the swarm fields
991 positions_field[3*p+0] = phys_coords.x;
992 positions_field[3*p+1] = phys_coords.y;
993 positions_field[3*p+2] = phys_coords.z;
994
995 particles_actually_reinitialized_count++;
996
997 cell_ID_field[3*p+0] = -1;
998 cell_ID_field[3*p+1] = -1;
999 cell_ID_field[3*p+2] = -1;
1000
1001 LOG_LOOP_ALLOW(LOCAL, LOG_VERBOSE, p, (nlocal_current > 20 ? nlocal_current/10 : 1), // Sampled logging
1002 "Rank %d: PID %ld (idx %ld) RE-PLACED. CellOriginNode(locDAIdx):(%d,%d,%d). LogicCoords: (%.2e,%.2f,%.2f). PhysCoords: (%.6f,%.6f,%.6f).\n",
1003 rank, particleIDs[p], (long)p,
1004 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
1005 xi_metric_logic, eta_metric_logic, zta_metric_logic,
1006 phys_coords.x, phys_coords.y, phys_coords.z);
1007 }
1008
1009 // Logging summary of re-initialization
1010 if (particles_actually_reinitialized_count > 0) {
1011 LOG_ALLOW(GLOBAL, LOG_INFO, "[T=%.4f, Step=%d] Rank %d (on inlet face %d) successfully re-initialized %d of %d local particles.\n", currentTime, step, rank, user->identifiedInletBCFace, particles_actually_reinitialized_count, nlocal_current);
1012 } else if (nlocal_current > 0) { // This case should ideally not be hit if can_this_rank_service_inlet was true and particles were present.
1013 LOG_ALLOW(GLOBAL, LOG_WARNING, "[T=%.4f, Step=%d] Rank %d claimed to service inlet face %d, but re-initialized 0 of %d local particles. This may indicate an issue if particles were expected to be re-placed.\n", currentTime, step, rank, user->identifiedInletBCFace, nlocal_current);
1014 }
1015
1016 // Cleanup: Destroy RNGs and restore swarm fields/coordinate array
1017 ierr = PetscRandomDestroy(&rand_logic_reinit_i); CHKERRQ(ierr);
1018 ierr = PetscRandomDestroy(&rand_logic_reinit_j); CHKERRQ(ierr);
1019 ierr = PetscRandomDestroy(&rand_logic_reinit_k); CHKERRQ(ierr);
1020
1021 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
1022 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr);
1023 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
1024 ierr = DMDAVecRestoreArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
1025
1026
1028 PetscFunctionReturn(0);
1029}
1030
1031// Comparison function needed for qsort
1032static int compare_PetscInt64(const void *a, const void *b) {
1033 PetscInt64 val_a = *(const PetscInt64*)a;
1034 PetscInt64 val_b = *(const PetscInt64*)b;
1035 if (val_a < val_b) return -1;
1036 if (val_a > val_b) return 1;
1037 return 0;
1038}
1039
1040#undef __FUNCT__
1041#define __FUNCT__ "GetLocalPIDSnapshot"
1042/**
1043 * @brief Creates a sorted snapshot of all Particle IDs (PIDs) from a raw data array.
1044 * @ingroup ParticleUtils
1045 *
1046 * This function is a crucial helper for the migration process. It captures the state of
1047 * which particles are on the current MPI rank *before* migration occurs by taking a
1048 * pointer to the swarm's raw PID data array. The resulting sorted array can then be used
1049 * with an efficient binary search to quickly identify newcomer particles after migration.
1050 *
1051 * This function does NOT call DMSwarmGetField/RestoreField. It is the caller's
1052 * responsibility to acquire the `pid_field` pointer before calling and restore it afterward.
1053 *
1054 * @param[in] pid_field A read-only pointer to the raw array of PIDs for the local swarm.
1055 * @param[in] n_local The number of particles currently on the local rank.
1056 * @param[out] pids_snapshot_out A pointer to a `PetscInt64*` array. This function will
1057 * allocate memory for this array, and the caller is
1058 * responsible for freeing it with `PetscFree()` when it
1059 * is no longer needed.
1060 *
1061 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1062 */
1063PetscErrorCode GetLocalPIDSnapshot(const PetscInt64 pid_field[],
1064 PetscInt n_local,
1065 PetscInt64 **pids_snapshot_out)
1066{
1067 PetscErrorCode ierr;
1068 PetscMPIInt rank;
1069
1070 PetscFunctionBeginUser;
1071
1073
1074 // --- 1. Input Validation ---
1075 if (!pids_snapshot_out) {
1076 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Output pointer pids_snapshot_out is NULL.");
1077 }
1078 // If n_local > 0, pid_field must not be NULL.
1079 if (n_local > 0 && !pid_field) {
1080 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Input pid_field pointer is NULL for n_local > 0.");
1081 }
1082
1083 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1084 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Creating PID snapshot for %d local particles.\n", rank, n_local);
1085
1086 // If there are no local particles, the snapshot is empty (NULL).
1087 if (n_local == 0) {
1088 *pids_snapshot_out = NULL;
1089
1091 PetscFunctionReturn(0);
1092 }
1093
1094 // --- 2. Allocate Memory for the Snapshot ---
1095 ierr = PetscMalloc1(n_local, pids_snapshot_out); CHKERRQ(ierr);
1096
1097 // --- 3. Copy Data ---
1098 // Perform a fast memory copy from the provided array to our new snapshot array.
1099 ierr = PetscMemcpy(*pids_snapshot_out, pid_field, n_local * sizeof(PetscInt64)); CHKERRQ(ierr);
1100 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Copied %d PIDs.\n", rank, n_local);
1101
1102 // --- 4. Sort the Snapshot Array ---
1103 // Sorting enables fast binary search lookups later.
1104 ierr = PetscSortInt64(n_local, *pids_snapshot_out); CHKERRQ(ierr);
1105 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: PID snapshot sorted successfully.\n", rank);
1106
1107
1109 PetscFunctionReturn(0);
1110}
1111
1112#undef __FUNCT__
1113#define __FUNCT__ "AddToMigrationList"
1114/**
1115 * @brief Safely adds a new migration task to a dynamically sized list.
1116 *
1117 * This utility function manages a dynamic array of MigrationInfo structs. It appends
1118 * a new entry to the list and automatically doubles the array's capacity using
1119 * `PetscRealloc` if the current capacity is exceeded. This prevents buffer overflows
1120 * and avoids the need to know the number of migrating particles in advance.
1121 *
1122 * @param[in,out] migration_list_p A pointer to the MigrationInfo array pointer. The function
1123 * will update this pointer if the array is reallocated.
1124 * @param[in,out] capacity_p A pointer to an integer holding the current allocated
1125 * capacity of the list (in number of elements). This will be
1126 * updated upon reallocation.
1127 * @param[in,out] count_p A pointer to an integer holding the current number of
1128 * items in the list. This will be incremented by one.
1129 * @param[in] particle_local_idx The local index (from 0 to nlocal-1) of the particle
1130 * that needs to be migrated.
1131 * @param[in] destination_rank The target MPI rank for the particle.
1132 *
1133 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure (e.g., from memory allocation).
1134 */
1135PetscErrorCode AddToMigrationList(MigrationInfo **migration_list_p,
1136 PetscInt *capacity_p,
1137 PetscInt *count_p,
1138 PetscInt particle_local_idx,
1139 PetscMPIInt destination_rank)
1140{
1141 PetscErrorCode ierr;
1142 PetscMPIInt rank;
1143
1144 PetscFunctionBeginUser;
1145
1147
1148 // --- 1. Input Validation ---
1149 if (!migration_list_p || !capacity_p || !count_p) {
1150 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Null pointer provided to AddToMigrationList for list management.");
1151 }
1152
1153 // --- 2. Check if the list needs to be resized ---
1154 if (*count_p >= *capacity_p) {
1155 PetscInt old_capacity = *capacity_p;
1156 // Start with a reasonable base capacity, then double for subsequent reallocations.
1157 PetscInt new_capacity = (old_capacity == 0) ? 16 : old_capacity * 2;
1158
1159 // Use PetscRealloc for safe memory reallocation.
1160 // It handles allocating new memory, copying old data, and freeing the old block.
1161 // The first argument to PetscRealloc is the new size in BYTES.
1162 ierr = PetscRealloc(new_capacity * sizeof(MigrationInfo), migration_list_p); CHKERRQ(ierr);
1163
1164 *capacity_p = new_capacity; // Update the capacity tracker
1165
1166 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1167 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Reallocated migrationList capacity from %d to %d.\n",
1168 rank, old_capacity, new_capacity);
1169 }
1170
1171 // --- 3. Add the new migration data to the list ---
1172 // Dereference the pointer-to-a-pointer to get the actual array.
1173 MigrationInfo *list = *migration_list_p;
1174
1175 list[*count_p].local_index = particle_local_idx;
1176 list[*count_p].target_rank = destination_rank;
1177
1178 // --- 4. Increment the count of items in the list ---
1179 (*count_p)++;
1180
1181
1183 PetscFunctionReturn(0);
1184}
1185
1186
1187#undef __FUNCT__
1188#define __FUNCT__ "FlagNewComersForLocation"
1189/**
1190 * @brief Identifies newly arrived particles after migration and flags them for a location search.
1191 * @ingroup ParticleMotion
1192 *
1193 * This function is a critical component of the iterative migration process managed by
1194 * the main particle settlement orchestrator (e.g., `SettleParticles`). After a
1195 * `DMSwarmMigrate` call, each rank's local particle list is a new mix of resident
1196 * particles and newly received ones. This function's job is to efficiently identify
1197 * these "newcomers" and set their `DMSwarm_location_status` field to `NEEDS_LOCATION`.
1198 *
1199 * This ensures that in the subsequent pass of the migration `do-while` loop, only the
1200 * newly arrived particles are processed by the expensive location algorithm, preventing
1201 * redundant work on particles that are already settled on the current rank.
1202 *
1203 * The identification is done by comparing the PIDs of particles currently on the rank
1204 * against a "snapshot" of PIDs taken *before* the migration occurred.
1205 *
1206 * @param[in] swarm The DMSwarm object, which has just completed a migration.
1207 * @param[in] n_local_before The number of particles that were on this rank *before* the
1208 * migration was performed.
1209 * @param[in] pids_before A pre-sorted array of the PIDs that were on this rank before
1210 * the migration. This is used for fast lookups.
1211 *
1212 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1213 *
1214 * @note This function assumes the `pids_before` array is sorted in ascending order to
1215 * enable the use of an efficient binary search.
1216 */
1217PetscErrorCode FlagNewcomersForLocation(DM swarm,
1218 PetscInt n_local_before,
1219 const PetscInt64 pids_before[])
1220{
1221 PetscErrorCode ierr;
1222 PetscMPIInt rank;
1223 PetscInt n_local_after;
1224 PetscInt newcomer_count = 0;
1225
1226 // Pointers to the swarm data fields we will read and modify
1227 PetscInt64 *pid_field_after = NULL;
1228 PetscInt *status_field_after = NULL;
1229 PetscInt *cell_field_after = NULL;
1230
1231 PetscFunctionBeginUser;
1232
1234
1235 // --- 1. Input Validation and Basic Setup ---
1236 if (!swarm) {
1237 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Input DMSwarm is NULL in FlagNewcomersForLocation.");
1238 }
1239 // If n_local_before > 0, the corresponding PID array must not be null.
1240 if (n_local_before > 0 && !pids_before) {
1241 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Input pids_before array is NULL for n_local_before > 0.");
1242 }
1243
1244 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1245
1246 // Get the number of particles on this rank *after* the migration.
1247 ierr = DMSwarmGetLocalSize(swarm, &n_local_after); CHKERRQ(ierr);
1248
1249 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Checking for newcomers. Size before: %d, Size after: %d\n",
1250 rank, n_local_before, n_local_after);
1251
1252 // If there are no particles now, there's nothing to do.
1253 if (n_local_after == 0) {
1254 PetscFunctionReturn(0);
1255 }
1256
1257 // --- 2. Access Swarm Data ---
1258 // Get read-only access to the PIDs and read-write access to the status field.
1259 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_field_after); CHKERRQ(ierr);
1260 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_field_after); CHKERRQ(ierr);
1261 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_field_after); CHKERRQ(ierr);
1262 if (!pid_field_after || !status_field_after) {
1263 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Failed to get required swarm fields in FlagNewcomersForLocation.");
1264 }
1265
1266 // --- 3. Identify and Flag Newcomers ---
1267 // Loop through all particles currently on this rank.
1268 for (PetscInt p_idx = 0; p_idx < n_local_after; ++p_idx) {
1269 PetscInt64 current_pid = pid_field_after[p_idx];
1270 PetscBool is_found_in_before_list;
1271
1272 // Use our custom, efficient helper function for the lookup.
1273 ierr = BinarySearchInt64(n_local_before, pids_before, current_pid, &is_found_in_before_list); CHKERRQ(ierr);
1274
1275 // If the PID was NOT found in the "before" list, it must be a newcomer.
1276 if (!is_found_in_before_list) {
1277 // Flag it for processing in the next pass of the migration loop.
1278 status_field_after[p_idx] = NEEDS_LOCATION;
1279 // cell_field_after[3*p_idx+0] = -1;
1280 // cell_field_after[3*p_idx+1] = -1;
1281 // cell_field_after[3*p_idx+2] = -1;
1282 newcomer_count++;
1283
1284 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[Rank %d]: Flagged newcomer PID %ld at local index %d as NEEDS_LOCATION.\n",
1285 rank, current_pid, p_idx);
1286 }
1287 }
1288
1289 // --- 4. Restore Swarm Fields ---
1290 // Release the locks on the swarm data arrays.
1291 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_field_after); CHKERRQ(ierr);
1292 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_field_after); CHKERRQ(ierr);
1293 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_field_after); CHKERRQ(ierr);
1294
1295 if (newcomer_count > 0) {
1296 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d]: Identified and flagged %d newcomers.\n", rank, newcomer_count);
1297 }
1298
1299
1301 PetscFunctionReturn(0);
1302}
1303
1304#undef __FUNCT__
1305#define __FUNCT__ "GuessParticleOwnerWithBBox"
1306/**
1307 * @brief Provides a fast, heuristic-based guess for a particle's owner rank using bounding boxes.
1308 * @ingroup ParticleLocation
1309 *
1310 * This function is part of the "Guess and Verify" strategy, called only for "lost"
1311 * particles. It attempts to find a candidate owner by checking which rank's bounding box
1312 * contains the particle's physical position.
1313 *
1314 * To optimize the search, it uses the particle's position relative to the local
1315 * bounding box to intelligently check the most likely neighboring ranks first.
1316 * For example, if a particle's x-coordinate is less than the local minimum x, it
1317 * will check the -X neighbor first. If no owner is found in the immediate neighbors,
1318 * it performs a full search of all other ranks as a fallback.
1319 *
1320 * @param[in] user Pointer to the UserCtx, which must contain the pre-computed
1321 * `bbox` (local), `neighbors` struct, and the global `bboxlist`.
1322 * @param[in] particle A pointer to the particle whose owner is being guessed.
1323 * @param[in] bboxlist An array of BoundingBox structures for ALL MPI ranks, indexed 0 to (size-1).
1324 * This array must be up-to-date and available on all ranks.
1325 * @param[out] guess_rank_out A pointer to a PetscMPIInt. Set to the candidate owner's rank
1326 * if found, otherwise set to -1 (or MPI_PROC_NULL).
1327 *
1328 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1329 */
1331 const Particle *particle,
1332 const BoundingBox *bboxlist,
1333 PetscMPIInt *guess_rank_out)
1334{
1335 PetscErrorCode ierr;
1336 PetscMPIInt rank, size;
1337 const RankNeighbors *neighbors = &user->neighbors; // Use a direct pointer for clarity
1338 const BoundingBox *localBBox = &user->bbox;
1339
1340 PetscFunctionBeginUser;
1341
1343
1344 // --- 1. Input Validation and Setup ---
1345 if (!user || !particle || !guess_rank_out || !bboxlist) {
1346 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Null pointer provided to GuessParticleOwnerWithBBox.");
1347 }
1348 if (!localBBox|| !neighbors) {
1349 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Required user->bboxl or user->neighbors is not initialized.");
1350 }
1351
1352 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1353 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr);
1354
1355 *guess_rank_out = MPI_PROC_NULL; // Default to "not found"
1356
1357 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Starting guess for particle at (%.3f, %.3f, %.3f).\n",
1358 particle->PID, particle->loc.x, particle->loc.y, particle->loc.z);
1359
1360 // --- Step 0: Check if the particle is inside the CURRENT rank's bounding box FIRST. ---
1361 // This handles the common case of initial placement where a particle is "lost" but physically local.
1362 if (IsParticleInBox(localBBox, &particle->loc)) {
1363 *guess_rank_out = rank;
1364 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fast path guess SUCCESS. Particle is within the local (Rank %d) bounding box.\n",
1365 particle->PID, rank);
1366
1368 PetscFunctionReturn(0); // Found it, we're done.
1369 }
1370 // --- 2. Fast Path: Check Immediate Neighbors Based on Exit Direction ---
1371
1372 // Determine likely exit direction(s) to prioritize neighbor check
1373 PetscBool exit_xm = particle->loc.x < localBBox->min_coords.x;
1374 PetscBool exit_xp = particle->loc.x > localBBox->max_coords.x;
1375 PetscBool exit_ym = particle->loc.y < localBBox->min_coords.y;
1376 PetscBool exit_yp = particle->loc.y > localBBox->max_coords.y;
1377 PetscBool exit_zm = particle->loc.z < localBBox->min_coords.z;
1378 PetscBool exit_zp = particle->loc.z > localBBox->max_coords.z;
1379
1380 if (exit_xm && neighbors->rank_xm != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_xm], &particle->loc)) {
1381 *guess_rank_out = neighbors->rank_xm;
1382 } else if (exit_xp&& neighbors->rank_xp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_xp], &particle->loc)) {
1383 *guess_rank_out = neighbors->rank_xp;
1384 } else if (exit_ym && neighbors->rank_ym != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_ym], &particle->loc)) {
1385 *guess_rank_out = neighbors->rank_ym;
1386 } else if (exit_yp && neighbors->rank_yp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_yp], &particle->loc)) {
1387 *guess_rank_out = neighbors->rank_yp;
1388 } else if (exit_zm && neighbors->rank_zm != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_zm], &particle->loc)) {
1389 *guess_rank_out = neighbors->rank_zm;
1390 } else if (exit_zp && neighbors->rank_zp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_zp], &particle->loc)) {
1391 *guess_rank_out = neighbors->rank_zp;
1392 }
1393 // Note: This does not handle corner/edge neighbors, which is why the fallback is essential.
1394
1395 if (*guess_rank_out != MPI_PROC_NULL) {
1396 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fast path guess SUCCESS. Found in immediate neighbor Rank %d.\n",
1397 particle->PID, *guess_rank_out);
1398
1400 PetscFunctionReturn(0); // Found it, we're done.
1401 }
1402
1403 // --- 3. Robust Fallback: Check All Other Ranks ---
1404 // If we get here, the particle was not in any of the immediate face neighbors' boxes.
1405 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Not in immediate face neighbors. Starting global fallback search.\n",
1406 particle->PID);
1407
1408 for (PetscMPIInt r = 0; r < size; ++r) {
1409 if (r == rank) continue; // Don't check ourselves.
1410
1411 if (IsParticleInBox(&bboxlist[r], &particle->loc)) {
1412 PetscBool is_in = PETSC_TRUE;
1413 // This detailed, synchronized print will solve the mystery
1414 LOG_ALLOW(LOCAL,LOG_VERBOSE, "[Rank %d] Checking PID %lld at (%.4f, %.4f, %.4f) against Rank %d's box: [(%.4f, %.4f, %.4f) to (%.4f, %.4f, %.4f)]. Result: %s\n",
1415 (int)rank, (long long)particle->PID,
1416 particle->loc.x, particle->loc.y, particle->loc.z,
1417 (int)r,
1418 bboxlist[r].min_coords.x, bboxlist[r].min_coords.y, bboxlist[r].min_coords.z,
1419 bboxlist[r].max_coords.x, bboxlist[r].max_coords.y, bboxlist[r].max_coords.z,
1420 is_in ? "INSIDE" : "OUTSIDE");
1421
1422 *guess_rank_out = r;
1423 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fallback search SUCCESS. Found in Rank %d.\n",
1424 particle->PID, *guess_rank_out);
1425
1427 PetscFunctionReturn(0); // Found it, we're done.
1428 }
1429 }
1430
1431 // If the code reaches here, the particle was not found in any rank's bounding box.
1432 LOG_ALLOW(LOCAL, LOG_WARNING, "[PID %ld]: Guess FAILED. Particle not found in any rank's bounding box.\n",
1433 particle->PID);
1434
1435 // The guess_rank_out will remain -1, signaling failure to the caller.
1437 PetscFunctionReturn(0);
1438}
1439
1440#undef __FUNCT__
1441#define __FUNCT__ "LocateAllParticlesInGrid"
1442/**
1443 * @brief Orchestrates the complete particle location and migration process for one timestep.
1444 * @ingroup ParticleLocation
1445 *
1446 * This function is the master orchestrator for ensuring every particle is on its correct
1447 * MPI rank and has a valid host cell index. It is designed to be called once per
1448 * timestep after particle positions have been updated.
1449 *
1450 * The function uses a robust, iterative "Guess and Verify" strategy within a
1451 * do-while loop to handle complex particle motion across processor boundaries,
1452 * especially on curvilinear grids.
1453 *
1454 * 1. **State Snapshot:** At the start of each pass, it captures a list of all Particle IDs (PIDs)
1455 * on the current rank.
1456 * 2. **"Guess" (Heuristic):** For particles that are "lost" (no valid host cell),
1457 * it first attempts a fast, bounding-box-based guess to find a potential new owner rank.
1458 * 3. **"Verify" (Robust Walk):** For all other particles, or if the guess fails,
1459 * it uses a robust cell-walking algorithm (`LocateParticleOrFindMigrationTarget`)
1460 * that determines the particle's status: located locally, needs migration, or is lost.
1461 * 4. **Migration:** After identifying all migrating particles on a pass, it performs the
1462 * MPI communication using the `SetMigrationRanks` and `PerformMigration` helpers.
1463 * 5. **Newcomer Flagging:** After migration, it uses the PID snapshot from step 1 to
1464 * efficiently identify newly arrived particles and flag them for location on the next pass.
1465 * 6. **Iteration:** The process repeats in a `do-while` loop until a pass occurs where
1466 * no particles migrate, ensuring the entire swarm is in a stable, consistent state.
1467 *
1468 * @param[in,out] user Pointer to the UserCtx, containing the swarm and all necessary
1469 * domain topology information (bboxlist, RankCellInfoMap, etc.).
1470 * @param[in] bboxlist An array of BoundingBox structures for ALL MPI ranks, indexed 0 to (size-1).
1471 * This array must be up-to-date and available on all ranks.
1472 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1473 */
1474PetscErrorCode LocateAllParticlesInGrid(UserCtx *user,BoundingBox *bboxlist)
1475{
1476 PetscErrorCode ierr;
1477 PetscInt passes = 0;
1478 const PetscInt MAX_MIGRATION_PASSES = 10; // Safety break for runaway loops
1479 PetscInt global_migrations_this_pass;
1480 PetscMPIInt rank;
1481
1482 PetscFunctionBeginUser;
1484 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1485 LOG_ALLOW(GLOBAL, LOG_INFO, "LocateAllParticlesInGrid (Orchestrator) - Beginning particle settlement process.\n");
1486
1487 // This loop ensures that particles that jump across multiple ranks are
1488 // handled correctly in successive, iterative handoffs.
1489 do {
1490 passes++;
1491 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Starting migration pass %d.\n", rank, passes);
1492
1493 // --- STAGE 1: PER-PASS INITIALIZATION ---
1494 MigrationInfo *migrationList = NULL;
1495 PetscInt local_migration_count = 0;
1496 PetscInt migrationListCapacity = 0;
1497 PetscInt nlocal_before;
1498 PetscInt64 *pids_before_snapshot = NULL;
1499 PetscInt local_lost_count = 0;
1500
1501 ierr = DMSwarmGetLocalSize(user->swarm, &nlocal_before); CHKERRQ(ierr);
1502 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d] Pass %d begins with %d local particles.\n", rank, passes, nlocal_before);
1503
1504
1505 // --- STAGE 2: PRE-MIGRATION SNAPSHOT & MAIN PROCESSING LOOP ---
1506 if (nlocal_before > 0) {
1507 // Get pointers to all fields needed for this pass
1508 PetscReal *pos_p, *weights_p, *vel_p;
1509 PetscInt *cell_p, *status_p;
1510 PetscInt64 *pid_p;
1511 ierr = DMSwarmGetField(user->swarm, "position", NULL, NULL, (void**)&pos_p); CHKERRQ(ierr);
1512 ierr = DMSwarmGetField(user->swarm, "velocity", NULL, NULL, (void**)&vel_p); CHKERRQ(ierr);
1513 ierr = DMSwarmGetField(user->swarm, "weight", NULL, NULL, (void**)&weights_p); CHKERRQ(ierr);
1514 ierr = DMSwarmGetField(user->swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1515 ierr = DMSwarmGetField(user->swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1516 ierr = DMSwarmGetField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1517
1518 // Create a sorted snapshot of current PIDs to identify newcomers after migration.
1519 // This helper requires a raw pointer, which we just acquired.
1520 ierr = GetLocalPIDSnapshot(pid_p, nlocal_before, &pids_before_snapshot); CHKERRQ(ierr);
1521
1522 for (PetscInt p_idx = 0; p_idx < nlocal_before; p_idx++) {
1523
1524 // OPTIMIZATION: Skip particles already settled in a previous pass of this do-while loop.
1525
1527 "Local Particle idx=%d, PID=%ld, status=%s, cell=(%d, %d, %d)\n",
1528 p_idx,
1529 (long)pid_p[p_idx],
1531 cell_p[3*p_idx],
1532 cell_p[3*p_idx+1],
1533 cell_p[3*p_idx+2]);
1534
1535 if (status_p[p_idx] == ACTIVE_AND_LOCATED) {
1536 LOG_ALLOW(LOCAL,LOG_VERBOSE," [rank %d][PID %ld] skipped in pass %d as it is already located at (%d,%d,%d).\n",rank,pid_p[p_idx],passes,cell_p[3*p_idx],cell_p[3*p_idx + 1],cell_p[3*p_idx + 2]);
1537 continue;
1538 }
1539
1540 // UNPACK: Create a temporary C struct for easier processing using our helper.
1541 Particle current_particle;
1542
1543 // LOG_ALLOW(LOCAL,LOG_DEBUG,"about to unpack p_idx=%d (PID=%ld)\n",p_idx, (long)pid_p[p_idx]);
1544
1545 ierr = UnpackSwarmFields(p_idx, pid_p, weights_p, pos_p, cell_p, vel_p, status_p, &current_particle); CHKERRQ(ierr);
1546
1547 // LOG_ALLOW(LOCAL,LOG_DEBUG,"unpacked p_idx=%d → cell[0]=%d, status=%s\n",p_idx, current_particle.cell[0], ParticleLocationStatusToString((ParticleLocationStatus)current_particle.location_status));
1548
1549 ParticleLocationStatus final_status = (ParticleLocationStatus)status_p[p_idx];
1550
1551
1552 // CASE 1: Particle has a valid prior cell index.
1553 // It has moved, so we only need to run the robust walk from its last known location.
1554 if (current_particle.cell[0] >= 0) {
1555 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] has valid prior cell. Strategy: Robust Walk from previous cell.\n", current_particle.PID);
1556 ierr = LocateParticleOrFindMigrationTarget(user, &current_particle, &final_status); CHKERRQ(ierr);
1557 }
1558
1559 /*
1560 // --- "GUESS" FAST PATH for lost particles ---
1561 if (current_particle.cell[0] < 0) {
1562 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] is lost or uninitialzied (cell=%d), attempting fast guess.\n",current_particle.PID, current_particle.cell[0]);
1563 ierr = GuessParticleOwnerWithBBox(user, &current_particle, bboxlist, &destination_rank); CHKERRQ(ierr);
1564 if (destination_rank != MPI_PROC_NULL && destination_rank != rank) {
1565 final_status = MIGRATING_OUT;
1566 // The particle struct's destination rank must be updated for consistency
1567 current_particle.destination_rank = destination_rank;
1568 }
1569 }
1570
1571 LOG_ALLOW(LOCAL,LOG_DEBUG,"[PID %ld] Particle status after Initial Guess:%d \n",current_particle.PID,final_status);
1572
1573 // --- "VERIFY" ROBUST WALK if guess didn't resolve it ---
1574 if (final_status == NEEDS_LOCATION || UNINITIALIZED) {
1575 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] Not resolved by guess, starting robust walk.\n", current_particle.PID);
1576 // This function will update the particle's status and destination rank internally.
1577 ierr = LocateParticleOrFindMigrationTarget(user, &current_particle, &final_status); CHKERRQ(ierr);
1578 destination_rank = current_particle.destination_rank; // Retrieve the result
1579 }
1580
1581 // --- PROCESS THE FINAL STATUS AND TAKE ACTION ---
1582 if (final_status == MIGRATING_OUT) {
1583 status_p[p_idx] = MIGRATING_OUT; // Mark for removal by DMSwarm
1584 ierr = AddToMigrationList(&migrationList, &migrationListCapacity, &local_migration_count, p_idx, destination_rank); CHKERRQ(ierr);
1585 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] at local index %d marked for migration to rank %d.\n",current_particle.PID, p_idx, destination_rank);
1586 } else {
1587 // Particle's final status is either LOCATED or LOST; update its state in the swarm arrays.
1588 current_particle.location_status = final_status;
1589 // PACK: Use the helper to write results back to the swarm arrays.
1590 ierr = UpdateSwarmFields(p_idx, &current_particle, weights_p, cell_p, status_p); CHKERRQ(ierr);
1591 }
1592 */
1593 // CASE 2: Particle is "lost" (cell = -1). Strategy: Guess -> Verify.
1594 else {
1595 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] has invalid cell. Strategy: Guess Owner -> Find Cell.\n",current_particle.PID);
1596
1597 PetscMPIInt guessed_owner_rank = MPI_PROC_NULL;
1598 ierr = GuessParticleOwnerWithBBox(user, &current_particle, bboxlist, &guessed_owner_rank); CHKERRQ(ierr);
1599
1600 // If the guess finds a DIFFERENT rank, we can mark for migration and skip the walk.
1601 if (guessed_owner_rank != MPI_PROC_NULL && guessed_owner_rank != rank) {
1602 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] Guess SUCCESS: Found migration target Rank %d. Finalizing.\n", current_particle.PID, guessed_owner_rank);
1603 final_status = MIGRATING_OUT;
1604 current_particle.destination_rank = guessed_owner_rank;
1605 }
1606 else {
1607
1608 // This block runs if the guess either failed (rank is NULL) or found the particle is local (rank is self).
1609 // In BOTH cases, the situation is unresolved, and we MUST fall back to the robust walk.
1610 if (guessed_owner_rank == rank) {
1611 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] Guess determined particle is local. Proceeding to robust walk to find cell.\n", current_particle.PID);
1612 } else { // guessed_owner_rank == MPI_PROC_NULL
1613 LOG_ALLOW(LOCAL, LOG_WARNING, "[PID %ld] Guess FAILED to find an owner. Proceeding to robust walk for definitive search.\n", current_particle.PID);
1614 }
1615
1616 ierr = LocateParticleOrFindMigrationTarget(user, &current_particle, &final_status); CHKERRQ(ierr);
1617 }
1618 }
1619
1620 // --- PROCESS THE FINAL, DEFINITIVE STATUS ---
1621 current_particle.location_status = final_status;
1622 ierr = UpdateSwarmFields(p_idx, &current_particle, weights_p, cell_p, status_p); CHKERRQ(ierr);
1623
1624 if (final_status == MIGRATING_OUT) {
1625 ierr = AddToMigrationList(&migrationList, &migrationListCapacity, &local_migration_count, p_idx, current_particle.destination_rank); CHKERRQ(ierr);
1626 } else if (final_status == LOST) {
1627 local_lost_count++;
1628 }
1629
1630 } // End of main particle processing loop
1631
1632 // Restore all the fields acquired for this pass.
1633 ierr = DMSwarmRestoreField(user->swarm, "position", NULL, NULL, (void**)&pos_p); CHKERRQ(ierr);
1634 ierr = DMSwarmRestoreField(user->swarm, "velocity", NULL, NULL, (void**)&vel_p); CHKERRQ(ierr);
1635 ierr = DMSwarmRestoreField(user->swarm, "weight", NULL, NULL, (void**)&weights_p); CHKERRQ(ierr);
1636 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1637 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1638 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1639 }
1640
1641 // --- STAGE 3: ACTION & MPI COMMUNICATION ---
1642 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Pass %d: Identified %d particles to migrate out.\n", rank, passes, local_migration_count);
1643
1644 // --- STAGE 3: SYNCHRONIZE AND DECIDE ---
1645 // FIRST, determine if any rank wants to migrate. This call is safe because
1646 // all ranks have finished their local work and can participate.
1647 ierr = MPI_Allreduce(&local_migration_count, &global_migrations_this_pass, 1, MPIU_INT, MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1648
1649 if(global_migrations_this_pass > 0 ){
1650
1651 LOG_ALLOW(GLOBAL, LOG_INFO, "Pass %d: Migrating %d particles globally.\n", passes, global_migrations_this_pass);
1652
1653 ierr = SetMigrationRanks(user, migrationList, local_migration_count); CHKERRQ(ierr);
1654 ierr = PerformMigration(user); CHKERRQ(ierr);
1655
1656 // --- STAGE 4: POST-MIGRATION RESET ---
1657 // Identify newly arrived particles and flag them with NEEDS_LOCATION so they are
1658 // processed in the next pass. This uses the snapshot taken in STAGE 2.
1659 ierr = FlagNewcomersForLocation(user->swarm, nlocal_before, pids_before_snapshot); CHKERRQ(ierr);
1660 }
1661 // --- STAGE 5: LOOP SYNCHRONIZATION AND CLEANUP ---
1662
1663 ierr = PetscFree(pids_before_snapshot);
1664 ierr = PetscFree(migrationList);
1665
1666 LOG_ALLOW(GLOBAL, LOG_INFO, "End of pass %d. Total particles migrated globally: %d.\n", passes, global_migrations_this_pass);
1667
1668 } while (global_migrations_this_pass > 0 && passes < MAX_MIGRATION_PASSES);
1669
1670 // --- FINAL CHECKS ---
1671 if (passes >= MAX_MIGRATION_PASSES) {
1672 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_CONV_FAILED, "Particle migration failed to converge after %d passes. Check for particles oscillating between ranks.", MAX_MIGRATION_PASSES);
1673 }
1674
1675 LOG_ALLOW(GLOBAL, LOG_INFO, "Particle Location completed in %d passes.\n", passes);
1676
1678 PetscFunctionReturn(0);
1679}
1680
1681
1682#undef __FUNCT__
1683#define __FUNCT__ "ResetAllParticleStatuses"
1684/**
1685 * This function is designed to be called at the end of a full timestep, after all
1686 * particle-based calculations are complete. It prepares the swarm for the next
1687 * timestep by ensuring that after the next position update, every particle will be
1688 * re-evaluated by the LocateAllParticlesInGrid orchestrator.
1689 *
1690 * It iterates through all locally owned particles and sets their
1691 * `DMSwarm_location_status` field to `NEEDS_LOCATION`.
1692 *
1693 * @param[in,out] user Pointer to the UserCtx containing the swarm.
1694 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1695 */
1697{
1698 PetscErrorCode ierr;
1699 PetscInt n_local;
1700 PetscInt *status_p;
1701
1702 PetscFunctionBeginUser;
1703
1705
1706 ierr = DMSwarmGetLocalSize(user->swarm, &n_local); CHKERRQ(ierr);
1707
1708 if (n_local > 0) {
1709 // Get write access to the status field
1710 ierr = DMSwarmGetField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1711
1712 for (PetscInt p = 0; p < n_local; ++p) {
1713 // Only reset particles that are considered settled. This is a small optimization
1714 // to avoid changing the status of a LOST particle, though resetting all would also be fine.
1715 if (status_p[p] == ACTIVE_AND_LOCATED) {
1716 status_p[p] = NEEDS_LOCATION;
1717 }
1718 }
1719
1720 // Restore the field
1721 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1722 }
1723
1724
1726 PetscFunctionReturn(0);
1727}
PetscErrorCode GetRandomCellAndLogicalCoordsOnInletFace(UserCtx *user, const DMDALocalInfo *info, PetscInt xs_gnode_rank, PetscInt ys_gnode_rank, PetscInt zs_gnode_rank, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, PetscRandom *rand_logic_i_ptr, PetscRandom *rand_logic_j_ptr, PetscRandom *rand_logic_k_ptr, PetscInt *ci_metric_lnode_out, PetscInt *cj_metric_lnode_out, PetscInt *ck_metric_lnode_out, PetscReal *xi_metric_logic_out, PetscReal *eta_metric_logic_out, PetscReal *zta_metric_logic_out)
Assuming the current rank services the inlet face, this function selects a random cell (owned by this...
Definition Boundaries.c:459
PetscErrorCode CanRankServiceInletFace(UserCtx *user, const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, PetscBool *can_service_inlet_out)
Determines if the current MPI rank owns any part of the globally defined inlet face,...
Definition Boundaries.c:25
PetscErrorCode GetDeterministicFaceGridLocation(UserCtx *user, const DMDALocalInfo *info, PetscInt xs_gnode_rank, PetscInt ys_gnode_rank, PetscInt zs_gnode_rank, PetscInt IM_cells_global, PetscInt JM_cells_global, PetscInt KM_cells_global, PetscInt64 particle_global_id, PetscInt *ci_metric_lnode_out, PetscInt *cj_metric_lnode_out, PetscInt *ck_metric_lnode_out, PetscReal *xi_metric_logic_out, PetscReal *eta_metric_logic_out, PetscReal *zta_metric_logic_out, PetscBool *placement_successful_out)
Places particles in a deterministic grid/raster pattern on a specified domain face.
Definition Boundaries.c:267
PetscErrorCode MetricLogicalToPhysical(UserCtx *user, const Cmpnts ***X, PetscInt i, PetscInt j, PetscInt k, PetscReal xi, PetscReal eta, PetscReal zta, Cmpnts *Xp)
Public wrapper: map (cell index, ξ,η,ζ) to (x,y,z).
Definition Metric.c:77
PetscErrorCode UpdateParticlePosition(UserCtx *user, Cmpnts *position, const Cmpnts *velocity)
Updates a particle's position based on its velocity and the timestep dt (stored in user->dt).
static int compare_PetscInt64(const void *a, const void *b)
PetscErrorCode GuessParticleOwnerWithBBox(UserCtx *user, const Particle *particle, const BoundingBox *bboxlist, PetscMPIInt *guess_rank_out)
Provides a fast, heuristic-based guess for a particle's owner rank using bounding boxes.
PetscErrorCode ResizeSwarmGlobally(DM swarm, PetscInt N_target)
PetscErrorCode AddToMigrationList(MigrationInfo **migration_list_p, PetscInt *capacity_p, PetscInt *count_p, PetscInt particle_local_idx, PetscMPIInt destination_rank)
Safely adds a new migration task to a dynamically sized list.
PetscErrorCode SetMigrationRanks(UserCtx *user, const MigrationInfo *migrationList, PetscInt migrationCount)
Sets the target rank field (DMSwarmPICField_rank) for particles scheduled for migration.
PetscErrorCode CheckAndRemoveOutOfBoundsParticles(UserCtx *user, PetscInt *removedCountLocal, PetscInt *removedCountGlobal, const BoundingBox *bboxlist)
Checks for particles outside the global physical domain and removes them.
PetscErrorCode GetLocalPIDSnapshot(const PetscInt64 pid_field[], PetscInt n_local, PetscInt64 **pids_snapshot_out)
Creates a sorted snapshot of all Particle IDs (PIDs) from a raw data array.
PetscErrorCode UpdateAllParticlePositions(UserCtx *user)
Loops over all local particles in the DMSwarm, updating their positions based on velocity and the glo...
PetscErrorCode LocateAllParticlesInGrid(UserCtx *user, BoundingBox *bboxlist)
Orchestrates the complete particle location and migration process for one timestep.
#define ERROR_MSG_BUFFER_SIZE
PetscErrorCode ResetAllParticleStatuses(UserCtx *user)
This function is designed to be called at the end of a full timestep, after all particle-based calcul...
PetscErrorCode ReinitializeParticlesOnInletSurface(UserCtx *user, PetscReal currentTime, PetscInt step)
Re-initializes the positions of particles currently on this rank if this rank owns part of the design...
PetscErrorCode CheckAndRemoveLostParticles(UserCtx *user, PetscInt *removedCountLocal, PetscInt *removedCountGlobal)
Removes particles that have been definitively flagged as LOST by the location algorithm.
PetscErrorCode FlagNewcomersForLocation(DM swarm, PetscInt n_local_before, const PetscInt64 pids_before[])
Identifies newly arrived particles after migration and flags them for a location search.
static PetscBool IsParticleInBox(const BoundingBox *bbox, const Cmpnts *pos)
Checks if a particle position is within the bounds of a given bounding box.
PetscErrorCode PreCheckAndResizeSwarm(UserCtx *user, PetscInt ti, const char *ext)
Checks particle count from a saved file and resizes the swarm globally.
PetscErrorCode PerformMigration(UserCtx *user)
Performs particle migration based on the pre-populated DMSwarmPICField_rank field.
Header file for Particle Motion and migration related functions.
PetscErrorCode UnpackSwarmFields(PetscInt i, const PetscInt64 *PIDs, const PetscReal *weights, const PetscReal *positions, const PetscInt *cellIndices, PetscReal *velocities, PetscInt *LocStatus, Particle *particle)
Initializes a Particle struct with data from DMSwarm fields.
PetscErrorCode UpdateSwarmFields(PetscInt i, const Particle *particle, PetscReal *weights, PetscInt *cellIndices, PetscInt *status_field)
Updates DMSwarm fields with data from a Particle struct.
PetscErrorCode CalculateParticleCountPerCell(UserCtx *user)
Counts particles in each cell of the DMDA 'da' and stores the result in user->ParticleCount.
#define LOG_LOOP_ALLOW(scope, level, iterVar, interval, fmt,...)
Logs a message inside a loop, but only every interval iterations.
Definition logging.h:313
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
----— DEBUG ---------------------------------------— #define LOG_ALLOW(scope, level,...
Definition logging.h:268
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:46
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:47
const char * BCFaceToString(BCFace face)
Helper function to convert BCFace enum to a string representation.
Definition logging.c:643
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:201
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:740
const char * ParticleLocationStatusToString(ParticleLocationStatus level)
A function that outputs the name of the current level in the ParticleLocation enum.
Definition logging.c:921
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:29
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:32
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:30
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:33
@ LOG_VERBOSE
Extremely detailed logs, typically for development use only.
Definition logging.h:35
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:731
PetscErrorCode InitializeLogicalSpaceRNGs(PetscRandom *rand_logic_i, PetscRandom *rand_logic_j, PetscRandom *rand_logic_k)
Initializes random number generators for logical space operations [0.0, 1.0).
Definition setup.c:2600
PetscErrorCode BinarySearchInt64(PetscInt n, const PetscInt64 arr[], PetscInt64 key, PetscBool *found)
Performs a binary search for a key in a sorted array of PetscInt64.
Definition setup.c:2318
PetscMPIInt rank_zm
Definition variables.h:179
PetscBool inletFaceDefined
Definition variables.h:680
PetscMPIInt rank_yp
Definition variables.h:178
BCFace identifiedInletBCFace
Definition variables.h:681
PetscInt cell[3]
Definition variables.h:167
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:664
ParticleLocationStatus
Defines the state of a particle with respect to its location and migration status during the iterativ...
Definition variables.h:135
@ LOST
Definition variables.h:139
@ NEEDS_LOCATION
Definition variables.h:136
@ ACTIVE_AND_LOCATED
Definition variables.h:137
@ MIGRATING_OUT
Definition variables.h:138
PetscMPIInt rank_ym
Definition variables.h:178
PetscReal CMy_c
Definition variables.h:589
PetscMPIInt rank_xp
Definition variables.h:177
PetscInt local_index
Definition variables.h:190
Cmpnts max_coords
Maximum x, y, z coordinates of the bounding box.
Definition variables.h:156
PetscReal dt
Definition variables.h:552
RankNeighbors neighbors
Definition variables.h:673
PetscInt np
Definition variables.h:616
Cmpnts min_coords
Minimum x, y, z coordinates of the bounding box.
Definition variables.h:155
PetscScalar x
Definition variables.h:101
Cmpnts loc
Definition variables.h:168
char * current_io_directory
Definition variables.h:564
PetscMPIInt destination_rank
Definition variables.h:172
ParticleLocationStatus location_status
Definition variables.h:171
char particle_subdir[PETSC_MAX_PATH_LEN]
Definition variables.h:561
PetscMPIInt rank_xm
Definition variables.h:177
char source_dir[PETSC_MAX_PATH_LEN]
Definition variables.h:454
PetscReal CMz_c
Definition variables.h:589
PetscScalar z
Definition variables.h:101
Vec ParticleCount
Definition variables.h:729
PetscMPIInt rank_zp
Definition variables.h:179
PostProcessParams * pps
Definition variables.h:648
PetscScalar y
Definition variables.h:101
@ EXEC_MODE_SOLVER
Definition variables.h:511
@ EXEC_MODE_POSTPROCESSOR
Definition variables.h:512
char _io_context_buffer[PETSC_MAX_PATH_LEN]
Definition variables.h:563
ExecutionMode exec_mode
Definition variables.h:556
PetscInt ParticleInitialization
Definition variables.h:620
BoundingBox bbox
Definition variables.h:672
PetscInt64 PID
Definition variables.h:166
char restart_dir[PETSC_MAX_PATH_LEN]
Definition variables.h:558
PetscReal CMx_c
Definition variables.h:589
Defines a 3D axis-aligned bounding box.
Definition variables.h:154
A 3D point or vector with PetscScalar components.
Definition variables.h:100
Information needed to migrate a single particle between MPI ranks.
Definition variables.h:189
Defines a particle's core properties for Lagrangian tracking.
Definition variables.h:165
Stores the MPI ranks of neighboring subdomains.
Definition variables.h:176
The master context for the entire simulation.
Definition variables.h:538
User-defined context containing data specific to a single computational grid level.
Definition variables.h:661
Head of a generic C-style linked list.
Definition variables.h:350
PetscErrorCode LocateParticleOrFindMigrationTarget(UserCtx *user, Particle *particle, ParticleLocationStatus *status_out)
Locates a particle's host cell or identifies its migration target using a robust walk search.