PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
ParticleMotion.c
Go to the documentation of this file.
1// ParticleMotion.c
2
3#include "ParticleMotion.h"
4
5// Define a buffer size for error messages if not already available
6#ifndef ERROR_MSG_BUFFER_SIZE
7#define ERROR_MSG_BUFFER_SIZE 256 // Or use PETSC_MAX_PATH_LEN if appropriate
8#endif
9
10#undef __FUNCT__
11#define __FUNCT__ "GenerateGaussianNoise"
12/**
13 * @brief Generates two independent standard normal random variables N(0,1)
14 * using the Box-Muller transform.
15 *
16 * @param[in] rnd The PETSc Random context (Uniform [0,1)).
17 * @param[out] n1 First Gaussian number.
18 * @param[out] n2 Second Gaussian number.
19 *
20 * @return PetscErrorCode
21 */
22PetscErrorCode GenerateGaussianNoise(PetscRandom rnd, PetscReal *n1, PetscReal *n2)
23{
24 PetscErrorCode ierr;
25 PetscScalar val1, val2;
26 PetscReal u1, u2;
27 PetscReal magnitude, theta;
28
29 PetscFunctionBeginUser;
30
31 // 1. Get two independent uniform random numbers from the generator
32 // PetscRandomGetValue returns a PetscScalar (which might be complex).
33 // We take the Real part to ensure this works in both Real and Complex builds.
34 ierr = PetscRandomGetValue(rnd, &val1); CHKERRQ(ierr);
35 ierr = PetscRandomGetValue(rnd, &val2); CHKERRQ(ierr);
36
37 u1 = PetscRealPart(val1);
38 u2 = PetscRealPart(val2);
39
40 // 2. Safety Check: log(0) is undefined (infinity).
41 // If the RNG returns exactly 0.0, bump it to a tiny epsilon.
42 if (u1 <= 0.0) u1 = 1.0e-14;
43
44 // 3. Box-Muller Transform
45 // Formula: R = sqrt(-2 * ln(u1)), Theta = 2 * PI * u2
46 magnitude = PetscSqrtReal(-2.0 * PetscLogReal(u1));
47 theta = 2.0 * PETSC_PI * u2;
48
49 // 4. Calculate independent Normal variables
50 *n1 = magnitude * PetscCosReal(theta);
51 *n2 = magnitude * PetscSinReal(theta);
52
53 PetscFunctionReturn(0);
54}
55
56#undef __FUNCT__
57#define __FUNCT__ "CalculateBrownianDisplacement"
58/**
59 * @brief Calculates the stochastic displacement vector (Brownian motion) for a single particle.
60 * Equation: dX_stoch = sqrt(2 * Gamma_eff * dt) * N(0,1)
61 *
62 * @param[in] user Pointer to UserCtx (access to dt and BrownianMotionRNG).
63 * @param[in] diff_eff The effective diffusivity (Gamma + Gamma_t) at the particle's location.
64 * @param[out] displacement Pointer to a Cmpnts struct to store the resulting (dx, dy, dz).
65 *
66 * @return PetscErrorCode
67 */
68PetscErrorCode CalculateBrownianDisplacement(UserCtx *user, PetscReal diff_eff, Cmpnts *displacement)
69{
70 PetscErrorCode ierr;
71 PetscReal dt = user->simCtx->dt;
72 PetscReal sigma;
73 PetscReal n_x, n_y, n_z, n_unused;
74
75 PetscFunctionBeginUser;
76
77 // 1. Initialize output to zero for safety
78 displacement->x = 0.0;
79 displacement->y = 0.0;
80 displacement->z = 0.0;
81
82 // 2. Physical check: Diffusivity cannot be negative.
83 // If 0, there is no Brownian motion.
84 if (diff_eff <= 1.0e-12) {
85 PetscFunctionReturn(0);
86 }
87
88 // 3. Calculate the Scaling Factor (Standard Deviation)
89 // Formula: sigma = sqrt(2 * D * dt)
90 // Note: dt is inside the root because variance scales linearly with time.
91 sigma = PetscSqrtReal(2.0 * diff_eff * dt);
92
93 // 4. Generate 3 Independent Gaussian Random Numbers
94 // GenerateGaussianNoise produces 2 numbers at a time. We call it twice.
95
96 // Get noise for X and Y
97 ierr = GenerateGaussianNoise(user->simCtx->BrownianMotionRNG, &n_x, &n_y); CHKERRQ(ierr);
98
99 // Get noise for Z (n_unused is discarded, but cheap to generate)
100 ierr = GenerateGaussianNoise(user->simCtx->BrownianMotionRNG, &n_z, &n_unused); CHKERRQ(ierr);
101
102 // 5. Calculate final stochastic displacement
103 displacement->x = sigma * n_x;
104 displacement->y = sigma * n_y;
105 displacement->z = sigma * n_z;
106
107 PetscFunctionReturn(0);
108}
109
110#undef __FUNCT__
111#define __FUNCT__ "UpdateParticlePosition"
112/**
113 * @brief Updates a particle's position based on its velocity and the timestep dt (stored in user->dt).
114 *
115 * @param[in] user Pointer to your UserCtx (must contain user->dt).
116 * @param[in,out] particle Pointer to the particle struct (contains, pos,vel,diffusivity etc).
117 *
118 * @return PetscErrorCode Returns 0 on success, or an error code on failure.
119 */
120PetscErrorCode UpdateParticlePosition(UserCtx *user, Particle *particle)
121{
122 PetscFunctionBeginUser; // PETSc macro for error/stack tracing
124
125 PetscErrorCode ierr;
126 PetscReal dt = user->simCtx->dt;
127 Cmpnts brownian_disp;
128
129 // 2. Calculate the stochastic kick
130 ierr = CalculateBrownianDisplacement(user,particle->diffusivity, &brownian_disp); CHKERRQ(ierr);
131
132 // --- Update Position ---
133 // X_new = X_old + ((U_convection + U_diffusivitygradient) * dt) + dX_brownian
134
135 particle->loc.x += ((particle->vel.x + particle->diffusivitygradient.x) * dt) + brownian_disp.x;
136 particle->loc.y += ((particle->vel.y + particle->diffusivitygradient.y) * dt) + brownian_disp.y;
137 particle->loc.z += ((particle->vel.z + particle->diffusivitygradient.z) * dt) + brownian_disp.z;
138
140 PetscFunctionReturn(0);
141}
142
143#undef __FUNCT__
144#define __FUNCT__ "UpdateAllParticlePositions"
145/**
146 * @brief Loops over all local particles in the DMSwarm, updating their positions
147 * based on velocity and the global timestep user->dt.
148 * @param[in,out] user Pointer to UserCtx (must contain dt).
149 *
150 * @return PetscErrorCode Returns 0 on success, or an error code on failure.
151 */
153{
154 PetscErrorCode ierr;
155 DM swarm = user->swarm;
156 PetscInt nLocal, p;
157 PetscReal *pos = NULL;
158 PetscReal *vel = NULL;
159 PetscReal *diffusivity = NULL;
160 PetscReal *diffusivitygradient = NULL;
161 PetscReal *psi = NULL;
162 PetscReal *weights = NULL;
163 PetscInt *cell = NULL;
164 PetscInt *status = NULL;
165 PetscInt *pid = NULL;
166 PetscMPIInt rank;
167
168 ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
169
170 PetscFunctionBeginUser; // PETSc macro for error/stack tracing
171
173
174 // 1) Get the number of local particles
175 ierr = DMSwarmGetLocalSize(swarm, &nLocal); CHKERRQ(ierr);
176 if (nLocal == 0) {
177 LOG_ALLOW(LOCAL,LOG_DEBUG,"[Rank %d] No particles to move/transport. \n",rank);
178 PetscFunctionReturn(0); // nothing to do, no fields held
179 }
180 // 2) Access the "position" and "velocity" fields
181 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void**)&pos); CHKERRQ(ierr);
182 ierr = DMSwarmGetField(swarm, "velocity", NULL, NULL, (void**)&vel); CHKERRQ(ierr);
183 ierr = DMSwarmGetField(swarm, "Diffusivity", NULL, NULL, (void**)&diffusivity); CHKERRQ(ierr);
184 ierr = DMSwarmGetField(swarm, "DiffusivityGradient", NULL, NULL, (void**)&diffusivitygradient); CHKERRQ(ierr);
185 ierr = DMSwarmGetField(swarm, "Psi", NULL, NULL, (void**)&psi); CHKERRQ(ierr);
186 ierr = DMSwarmGetField(swarm, "weight", NULL, NULL, (void**)&weights); CHKERRQ(ierr);
187 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell); CHKERRQ(ierr);
188 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status); CHKERRQ(ierr);
189 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid); CHKERRQ(ierr);
190
191 LOG_ALLOW(GLOBAL,LOG_DEBUG," [Rank %d] No.of Particles to update: %d.\n",rank,nLocal);
192
193 // 3) Loop over all local particles, updating each position by velocity * dt
194 for (p = 0; p < nLocal; p++) {
195 // update temporary particle struct
196 Particle particle;
197
198 // Unpack: Use the helper to read from swarm arrays into the particle struct
199 ierr = UnpackSwarmFields(p, pid, weights, pos, cell, vel, status, diffusivity, diffusivitygradient, psi, &particle); CHKERRQ(ierr);
200
201 // Update position based on velocity and Brownian motion
202 ierr = UpdateParticlePosition(user, &particle); CHKERRQ(ierr);
203
204 // Update swarm fields
205 ierr = UpdateSwarmFields(p, &particle, pos, vel, weights, cell, status, diffusivity, diffusivitygradient, psi); CHKERRQ(ierr);
206 }
207
208 // 4) Restore the fields
209 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&pos); CHKERRQ(ierr);
210 ierr = DMSwarmRestoreField(swarm, "velocity", NULL, NULL, (void**)&vel); CHKERRQ(ierr);
211 ierr = DMSwarmRestoreField(swarm, "Diffusivity", NULL, NULL, (void**)&diffusivity); CHKERRQ(ierr);
212 ierr = DMSwarmRestoreField(swarm, "DiffusivityGradient", NULL, NULL, (void**)&diffusivitygradient); CHKERRQ(ierr);
213 ierr = DMSwarmRestoreField(swarm, "Psi", NULL, NULL, (void**)&psi); CHKERRQ(ierr);
214 ierr = DMSwarmRestoreField(swarm, "weight", NULL, NULL, (void**)&weights); CHKERRQ(ierr);
215 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell); CHKERRQ(ierr);
216 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status); CHKERRQ(ierr);
217 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid); CHKERRQ(ierr);
218
219
220 LOG_ALLOW(LOCAL,LOG_DEBUG,"Particle moved/transported successfully on Rank %d.\n",rank);
221
223
224 PetscFunctionReturn(0);
225}
226
227
228/**
229 * @brief Checks if a particle position is within the bounds of a given bounding box.
230 *
231 * @param bbox Pointer to the BoundingBox structure.
232 * @param pos Pointer to the particle's position (Cmpnts).
233 *
234 * @return PetscBool PETSC_TRUE if the particle is inside or on the boundary, PETSC_FALSE otherwise.
235 */
236static inline PetscBool IsParticleInBox(const BoundingBox *bbox, const Cmpnts *pos) {
237 return (pos->x >= bbox->min_coords.x && pos->x <= bbox->max_coords.x &&
238 pos->y >= bbox->min_coords.y && pos->y <= bbox->max_coords.y &&
239 pos->z >= bbox->min_coords.z && pos->z <= bbox->max_coords.z);
240}
241
242
243#undef __FUNCT__
244#define __FUNCT__ "CheckAndRemoveOutOfBoundsParticles"
245
246/**
247 * @brief Checks for particles outside the global physical domain and removes them.
248 *
249 * This function iterates through all particles local to the current MPI rank. It determines
250 * if a particle's physical position is outside of ALL subdomains owned by the MPI processes.
251 * To perform this check, it requires a list of the bounding boxes for every MPI rank's
252 * subdomain.
253 *
254 * If a particle is not found within any of the subdomains, it is considered "out of bounds"
255 * and is permanently removed from the simulation using DMSwarmRemovePointAtIndex().
256 *
257 * The function is carefully designed to handle modifications to the DMSwarm during iteration
258 * safely. It does this by:
259 * 1. Iterating BACKWARDS through the local particle list, so removing an element at
260 * index 'p' does not affect the indices of subsequent elements to be checked (p-1, p-2, ...).
261 * 2. Using a robust "Restore-Remove-Reacquire" pattern. When a particle is removed,
262 * all pointers to swarm data are first restored, the removal operation is performed,
263 * and then the pointers are re-acquired for the remainder of the loop.
264 *
265 * @warning This function contains a collective MPI operation (MPI_Allreduce) at the end.
266 * To avoid deadlocks, it MUST be called by ALL ranks in the communicator, even if
267 * a rank has no local particles. Do not place this call inside rank-specific
268 * conditional logic in your main loop.
269 *
270 * @note This function can be redundant as the robust particle location scheme (e.g., one that
271 * sets a `LOST` status) and a corresponding cleanup function are already in use.
272 *
273 * @param[in,out] user Pointer to the UserCtx structure containing the swarm and MPI info.
274 * @param[out] removedCountLocal Pointer to an integer that will store the number of particles removed on THIS rank.
275 * @param[out] removedCountGlobal Pointer to an integer that will store the total number of particles removed ACROSS ALL ranks.
276 * @param[in] bboxlist An array of BoundingBox structures for ALL MPI ranks, indexed 0 to (size-1).
277 * This array must be up-to-date and available on every rank.
278 *
279 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
280 */
282 PetscInt *removedCountLocal,
283 PetscInt *removedCountGlobal,
284 const BoundingBox *bboxlist)
285{
286 PetscErrorCode ierr;
287 DM swarm = user->swarm;
288 PetscInt nLocalInitial;
289 PetscReal *pos_p = NULL;
290 PetscInt64 *pid_p = NULL; // For better logging
291 PetscInt local_removed_count = 0;
292 PetscMPIInt global_removed_count_mpi = 0;
293 PetscMPIInt rank, size;
294
295 PetscFunctionBeginUser;
296 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
297 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr);
298 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Checking for out-of-bounds particles...", rank);
299
300 // Initialize output parameters to ensure clean state
301 *removedCountLocal = 0;
302 if (removedCountGlobal) *removedCountGlobal = 0;
303
304 ierr = DMSwarmGetLocalSize(swarm, &nLocalInitial); CHKERRQ(ierr);
305
306 // Only proceed if there are particles to check on this rank.
307 // All ranks will still participate in the final collective MPI_Allreduce.
308 if (nLocalInitial > 0) {
309 // Get access to swarm fields once before the loop begins.
310 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
311 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
312
313 // --- Iterate BACKWARDS to handle index changes safely during removal ---
314 for (PetscInt p = nLocalInitial - 1; p >= 0; p--) {
315 PetscBool isInsideAnyBox = PETSC_FALSE;
316 Cmpnts current_pos = {pos_p[3*p + 0], pos_p[3*p + 1], pos_p[3*p + 2]};
317
318 // Check if the particle is inside ANY of the rank bounding boxes
319 for (PetscMPIInt proc = 0; proc < size; proc++) {
320 if (IsParticleInBox(&bboxlist[proc], &current_pos)) {
321 isInsideAnyBox = PETSC_TRUE;
322 break; // Particle is inside a valid domain, stop checking.
323 }
324 }
325
326 if (!isInsideAnyBox) {
327 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removing out-of-bounds particle [PID %lld] at local index %d. Pos: (%g, %g, %g)\n",
328 rank, (long long)pid_p[p], p, current_pos.x, current_pos.y, current_pos.z);
329
330 // --- Safe Removal Pattern: Restore -> Remove -> Reacquire ---
331 // This is the fix for the double-restore bug. Pointers are managed carefully
332 // within this block and then restored cleanly after the loop.
333
334 // 1. Restore all fields BEFORE modifying the swarm structure. This invalidates pos_p and pid_p.
335 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
336 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
337
338 // 2. Remove the particle at the current local index 'p'.
339 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
340 local_removed_count++;
341
342 // 3. After removal, re-acquire pointers ONLY if the loop is not finished.
343 PetscInt nLocalCurrent;
344 ierr = DMSwarmGetLocalSize(swarm, &nLocalCurrent); CHKERRQ(ierr);
345
346 if (nLocalCurrent > 0 && p > 0) { // Check if there are particles left AND iterations left
347 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
348 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
349 } else {
350 // All remaining particles were removed OR this was the last particle (p=0).
351 // Invalidate pointers to prevent the final restore call and exit the loop.
352 pos_p = NULL;
353 pid_p = NULL;
354 break;
355 }
356 }
357 } // End of backwards loop
358
359 // At the end, restore any valid pointers. This handles three cases:
360 // 1. No particles were removed: restores the original pointers.
361 // 2. Particles were removed mid-loop: restores the pointers from the last re-acquisition.
362 // 3. All particles were removed: pointers are NULL, so nothing is done.
363 if (pos_p) { ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr); }
364 if (pid_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr); }
365 } // End of if (nLocalInitial > 0)
366
367 PetscInt nLocalFinal;
368 ierr = DMSwarmGetLocalSize(swarm, &nLocalFinal); CHKERRQ(ierr);
369 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Finished removing %d out-of-bounds particles. Final local size: %d.\n", rank, local_removed_count, nLocalFinal);
370
371 // --- Synchronize counts across all ranks ---
372 *removedCountLocal = local_removed_count;
373 if (removedCountGlobal) {
374 ierr = MPI_Allreduce(&local_removed_count, &global_removed_count_mpi, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)swarm)); CHKERRQ(ierr);
375 *removedCountGlobal = global_removed_count_mpi;
376 // Use a synchronized log message so only one rank prints the global total.
377 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Removed %d out-of-bounds particles globally.\n", rank, *removedCountGlobal);
378 }
379
380 PetscFunctionReturn(0);
381}
382
383#undef __FUNCT__
384#define __FUNCT__ "CheckAndRemoveLostParticles"
385/**
386 * @brief Removes particles that have been definitively flagged as LOST by the location algorithm.
387 *
388 * This function is the designated cleanup utility for particles that have exited the
389 * physical domain or could not be located for any other reason. It should be called
390 * after a particle location and migration phase is complete.
391 *
392 * It iterates through all locally owned particles and checks their `DMSwarm_location_status`
393 * field. If a particle's status is `LOST`, it is permanently removed from the simulation
394 * using `DMSwarmRemovePointAtIndex`.
395 *
396 * The function is carefully designed to handle modifications to the DMSwarm during iteration
397 * safely. It does this by:
398 * 1. Iterating BACKWARDS through the local particle list, so removing an element at
399 * index 'p' does not affect the indices of subsequent elements to be checked (p-1, p-2, ...).
400 * 2. Using a robust "Restore-Remove-Reacquire" pattern. When a particle is removed,
401 * all pointers to swarm data are first restored, the removal operation is performed,
402 * and then the pointers are re-acquired for the remainder of the loop.
403 *
404 * @warning This function contains a collective MPI operation (MPI_Allreduce) at the end.
405 * To avoid deadlocks, it MUST be called by ALL ranks in the communicator, even if
406 * a rank has no local particles. Do not place this call inside rank-specific
407 * conditional logic in your main loop.
408 *
409 * @param[in,out] user Pointer to the UserCtx structure containing the swarm.
410 * @param[out] removedCountLocal Pointer to an integer that will store the number of particles removed on THIS rank.
411 * @param[out] removedCountGlobal Pointer to an integer that will store the total number of particles removed ACROSS ALL ranks.
412 *
413 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
414 */
416 PetscInt *removedCountLocal,
417 PetscInt *removedCountGlobal)
418{
419 PetscErrorCode ierr;
420 DM swarm = user->swarm;
421 PetscInt nLocalInitial;
422 PetscInt *status_p = NULL;
423 PetscInt64 *pid_p = NULL; // For better logging
424 PetscReal *pos_p = NULL; // For better logging
425 PetscInt local_removed_count = 0;
426 PetscMPIInt global_removed_count_mpi = 0;
427 PetscMPIInt rank;
428
429 PetscFunctionBeginUser;
431 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
432 LOG_ALLOW(LOCAL, LOG_INFO, "Rank %d: Checking for and removing LOST particles...\n", rank);
433
434 // Initialize output parameters to ensure clean state
435 *removedCountLocal = 0;
436 if (removedCountGlobal) *removedCountGlobal = 0;
437
438 ierr = DMSwarmGetLocalSize(swarm, &nLocalInitial); CHKERRQ(ierr);
439
440 // Only proceed if there are particles to check on this rank.
441 // All ranks will still participate in the final collective MPI_Allreduce.
442 if (nLocalInitial > 0) {
443 // Get access to all swarm fields once before the loop begins.
444 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
445 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
446 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
447
448 // --- Iterate BACKWARDS to handle index changes safely during removal ---
449 for (PetscInt p = nLocalInitial - 1; p >= 0; p--) {
450 if (status_p[p] == LOST) {
451 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removing LOST particle [PID %lld] at local index %d. Position: (%.4f, %.4f, %.4f).\n",
452 rank, (long long)pid_p[p], p, pos_p[3*p], pos_p[3*p+1], pos_p[3*p+2]);
453
454 // --- Safe Removal Pattern: Restore -> Remove -> Reacquire ---
455 // This is the fix for the double-restore bug. Pointers are managed carefully
456 // within this block and then restored cleanly after the loop.
457
458 // 1. Restore all fields BEFORE modifying the swarm structure. This invalidates all pointers.
459 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
460 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
461 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
462
463 // 2. Remove the particle at the current local index 'p'.
464 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
465 local_removed_count++;
466
467 // 3. After removal, re-acquire pointers ONLY if the loop is not finished.
468 PetscInt nLocalCurrent;
469 ierr = DMSwarmGetLocalSize(swarm, &nLocalCurrent); CHKERRQ(ierr);
470
471 if (nLocalCurrent > 0 && p > 0) { // Check if there are particles left AND iterations left
472 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
473 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
474 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
475 } else {
476 // All remaining particles were removed OR this was the last particle (p=0).
477 // Invalidate pointers to prevent the final restore call and exit the loop.
478 status_p = NULL;
479 pid_p = NULL;
480 pos_p = NULL;
481 break;
482 }
483 }
484 } // End of backwards loop
485
486 // At the end, restore any valid pointers. This handles three cases:
487 // 1. No particles were removed: restores the original pointers.
488 // 2. Particles were removed mid-loop: restores the pointers from the last re-acquisition.
489 // 3. All particles were removed: pointers are NULL, so nothing is done.
490 if (status_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr); }
491 if (pid_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr); }
492 if (pos_p) { ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr); }
493 } // End of if (nLocalInitial > 0)
494
495 PetscInt nLocalFinal;
496 ierr = DMSwarmGetLocalSize(swarm, &nLocalFinal); CHKERRQ(ierr);
497 LOG_ALLOW(LOCAL, LOG_INFO, "Rank %d: Finished removing %d LOST particles. Final local size: %d.\n", rank, local_removed_count, nLocalFinal);
498
499 // --- Synchronize counts across all ranks ---
500 *removedCountLocal = local_removed_count;
501 if (removedCountGlobal) {
502 ierr = MPI_Allreduce(&local_removed_count, &global_removed_count_mpi, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)swarm)); CHKERRQ(ierr);
503 *removedCountGlobal = global_removed_count_mpi;
504 // Use a synchronized log message so only one rank prints the global total.
505 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Removed %d LOST particles globally.\n", rank, *removedCountGlobal);
506 }
507
509 PetscFunctionReturn(0);
510}
511
512
513#undef __FUNCT__
514#define __FUNCT__ "SetMigrationRanks"
515/**
516 * @brief Sets the target rank field (DMSwarmPICField_rank) for particles scheduled for migration.
517 *
518 * @param user Pointer to UserCtx pa(contains swarm).
519 * @param migrationList Array of MigrationInfo structs containing local indices and target ranks.
520 * @param migrationCount Number of particles in the migrationList.
521 *
522 * @return PetscErrorCode 0 on success, non-zero on failure.
523 */
524PetscErrorCode SetMigrationRanks(UserCtx* user, const MigrationInfo *migrationList, PetscInt migrationCount)
525{
526 PetscErrorCode ierr;
527 DM swarm = user->swarm;
528 PetscInt p_idx;
529 PetscInt *rankField = NULL; // Field storing target rank
530
531 PetscFunctionBeginUser;
533
534 // Ensure the migration rank field exists
535 ierr = DMSwarmGetField(swarm, "DMSwarm_rank", NULL, NULL, (void **)&rankField); CHKERRQ(ierr);
536
537 // Set the target rank for migrating particles
538 for(p_idx = 0; p_idx < migrationCount; ++p_idx) {
539 rankField[migrationList[p_idx].local_index] = migrationList[p_idx].target_rank;
540 }
541
542 ierr = DMSwarmRestoreField(swarm, "DMSwarm_rank", NULL, NULL, (void **)&rankField); CHKERRQ(ierr);
543
545 PetscFunctionReturn(0);
546}
547
548#undef __FUNCT__
549#define __FUNCT__ "PerformMigration"
550
551/**
552 * @brief Performs particle migration based on the pre-populated DMSwarmPICField_rank field.
553 *
554 * Assumes SetMigrationRanks has already been called to mark particles with their target ranks.
555 * Calls DMSwarmMigrate to execute the communication and removal of un-migrated particles.
556 *
557 * @param user Pointer to the UserCtx structure containing the swarm.
558 *
559 * @return PetscErrorCode 0 on success, non-zero on failure.
560 */
561PetscErrorCode PerformMigration(UserCtx *user)
562{
563 PetscErrorCode ierr;
564 DM swarm = user->swarm;
565 PetscMPIInt rank;
566
567 PetscFunctionBeginUser;
569 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
570 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Starting DMSwarmMigrate...\n", rank);
571
572 // Perform the migration - PETSC_TRUE removes particles that fail to land
573 // in a valid cell on the target rank (or were marked with an invalid rank).
574 ierr = DMSwarmMigrate(swarm, PETSC_TRUE); CHKERRQ(ierr);
575
576 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Migration complete.\n", rank);
578 PetscFunctionReturn(0);
579}
580
581//-----------------------------------------------------------------------------
582// MODULE (COUNT): Calculates Particle Count Per Cell - REVISED FOR DMDAVecGetArray
583//-----------------------------------------------------------------------------
584
585#undef __FUNCT__
586#define __FUNCT__ "CalculateParticleCountPerCell"
587/**
588 * @brief Counts particles in each cell of the DMDA 'da' and stores the result in user->ParticleCount.
589 * @ingroup scatter_module
590 *
591 * Zeros the user->ParticleCount vector, then iterates through local particles.
592 * Reads the **GLOBAL** cell index (I, J, K) stored in the "DMSwarm_CellID" field.
593 * Uses DMDAVecGetArray to access the local portion of the count vector and increments
594 * the count at the global index (I, J, K) if it belongs to the local patch (including ghosts).
595 *
596 * @param[in,out] user Pointer to the UserCtx structure containing da, swarm, and ParticleCount.
597 *
598 * @return PetscErrorCode Returns 0 on success, non-zero on failure.
599
600 */
602 PetscErrorCode ierr;
603 DM da = user->da;
604 DM swarm = user->swarm;
605 Vec countVec = user->ParticleCount;
606 Vec localcountVec = user->lParticleCount;
607 PetscInt nlocal, p;
608 PetscInt *global_cell_id_arr; // Read GLOBAL cell IDs
609 PetscScalar ***count_arr_3d; // Use 3D accessor
610 PetscInt64 *PID_arr;
611 PetscMPIInt rank;
612 char msg[ERROR_MSG_BUFFER_SIZE];
613 PetscInt particles_counted_locally = 0;
614
615 PetscFunctionBeginUser;
617 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
618
619 // --- Input Validation ---
620 if (!da) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->da is NULL.");
621 if (!swarm) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->swarm is NULL.");
622 if (!countVec) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->ParticleCount is NULL.");
623 // Check DOF of da
624 PetscInt count_dof;
625 ierr = DMDAGetInfo(da, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &count_dof, NULL, NULL, NULL, NULL, NULL); CHKERRQ(ierr);
626 if (count_dof != 1) { PetscSNPrintf(msg, sizeof(msg), "countDM must have DOF=1, got %d.", count_dof); SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, msg); }
627
628 // --- Zero the local count vector ---
629 ierr = VecSet(localcountVec, 0.0); CHKERRQ(ierr);
630
631 // --- Get Particle Data ---
632 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Accessing particle data.\n");
633 ierr = DMSwarmGetLocalSize(swarm, &nlocal); CHKERRQ(ierr);
634 ierr = DMSwarmGetField(swarm,"DMSwarm_CellID", NULL, NULL, (void **)&global_cell_id_arr); CHKERRQ(ierr);
635 ierr = DMSwarmGetField(swarm,"DMSwarm_pid",NULL,NULL,(void **)&PID_arr);CHKERRQ(ierr);
636
637 // --- Get Grid Vector Array using DMDA accessor ---
638 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Accessing ParticleCount vector array (using DMDAVecGetArray).\n");
639 ierr = DMDAVecGetArray(da, localcountVec, &count_arr_3d); CHKERRQ(ierr);
640
641 // Get local owned + ghosted range for writing into ghost slots.
642 PetscInt gxs, gys, gzs, gxm, gym, gzm;
643 ierr = DMDAGetGhostCorners(da, &gxs, &gys, &gzs, &gxm, &gym, &gzm); CHKERRQ(ierr);
644
645 // --- Accumulate Counts Locally ---
646 LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Processing %d local particles using GLOBAL CellIDs.\n",rank,nlocal);
647 for (p = 0; p < nlocal; p++) {
648 // Read the GLOBAL indices stored for this particle
649 PetscInt i_geom = global_cell_id_arr[p * 3 + 0]; // Global i index
650 PetscInt j_geom = global_cell_id_arr[p * 3 + 1]; // Global j index
651 PetscInt k_geom = global_cell_id_arr[p * 3 + 2]; // Global k index
652
653 // Apply the shift to ensure ParticleCount follows the indexing convention for cell-centered data in this codebase.
654 PetscInt i = (PetscInt)i_geom + 1; // Shift for cell-centered
655 PetscInt j = (PetscInt)j_geom + 1; // Shift for cell-centered
656 PetscInt k = (PetscInt)k_geom + 1; // Shift for cell-centered
657
658 // *** Bounds check is implicitly handled by DMDAVecGetArray for owned+ghost region ***
659 // However, accessing outside this region using global indices WILL cause an error.
660 // A preliminary check might still be wise if global IDs could be wild.
661 // We rely on LocateAllParticles to provide valid global indices [0..IM-1] etc.
662
663 LOG_LOOP_ALLOW(LOCAL,LOG_VERBOSE,p,100,"[Rank %d] Read CellID for p=%d, PID = %ld: (%ld, %ld, %ld)\n", rank, p,PID_arr[p],i, j, k);
664
665 // Check if the global index (i,j,k) falls within the local + ghost range
666 if (i >= gxs && i < gxs + gxm &&
667 j >= gys && j < gys + gym && // Adjust based on actual ghost width
668 k >= gzs && k < gzs + gzm ) // This check prevents definite crashes but doesn't guarantee ownership
669 {
670
671 // Increment count at the location corresponding to GLOBAL index (I,J,K)
672 // LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Particle %d with global CellID (%d, %d, %d) incremented with a particle.\n",rank, p, i, j, k);
673 count_arr_3d[k][j][i] += 1.0;
674 particles_counted_locally++;
675 } else {
676 // This particle's global ID is likely outside the range this rank handles (even ghosts)
677 // note: this is not necessarily an error if the particle is legitimately outside the local+ghost region
678 LOG_ALLOW(LOCAL, LOG_VERBOSE, "(Rank %d): Skipping particle %ld with global CellID (%ld, %ld, %ld) - likely outside local+ghost range.\n",rank, PID_arr[p] , i, j, k);
679 }
680 }
681 LOG_ALLOW(LOCAL, LOG_DEBUG, "(Rank %d): Local counting finished. Processed %d particles locally.\n", rank, particles_counted_locally);
682
683 // --- Restore Access ---
684 ierr = DMDAVecRestoreArray(da, localcountVec, &count_arr_3d); CHKERRQ(ierr);
685 ierr = DMSwarmRestoreField(swarm,"DMSwarm_CellID", NULL, NULL, (void **)&global_cell_id_arr); CHKERRQ(ierr);
686 ierr = DMSwarmRestoreField(swarm,"DMSwarm_pid",NULL,NULL,(void **)&PID_arr);CHKERRQ(ierr);
687
688 // --- Assemble Global Vector ---
689 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Assembling global ParticleCount vector.\n");
690 ierr = VecZeroEntries(countVec); CHKERRQ(ierr); // Ensure global vector is zeroed before accumulation
691 ierr = DMLocalToGlobalBegin(da, localcountVec, ADD_VALUES, countVec); CHKERRQ(ierr);
692 ierr = DMLocalToGlobalEnd(da, localcountVec, ADD_VALUES, countVec); CHKERRQ(ierr);
693 /*
694 * OPTIONAL: Synchronize Ghosts for Stencil Operations
695 * If a future function needs to read ParticleCount from neighbor cells (e.g., density smoothing
696 * or gradient calculations), uncomment the following lines to update the ghost slots
697 * in user->lParticleCount with the final summed values.
698 *
699 ierr = UpdateLocalGhosts(user,"ParticleCount"); CHKERRQ(ierr);
700 */
701
702 // --- Verification Logging ---
703 PetscReal total_counted_particles = 0.0, max_count_in_cell = 0.0;
704 ierr = VecSum(countVec, &total_counted_particles); CHKERRQ(ierr);
705 PetscInt max_idx_global = -1;
706 ierr = VecMax(countVec, &max_idx_global, &max_count_in_cell); CHKERRQ(ierr);
707 LOG_ALLOW(GLOBAL, LOG_INFO, "Total counted globally = %.0f, Max count in cell = %.0f\n",
708 total_counted_particles, max_count_in_cell);
709
710 // --- ADD THIS DEBUGGING BLOCK ---
711 if (max_idx_global >= 0) { // Check if VecMax found a location
712 // Need to convert the flat global index back to 3D global index (I, J, K)
713 // Get global grid dimensions (Nodes, NOT Cells IM/JM/KM)
714 PetscInt M, N, P;
715 ierr = DMDAGetInfo(da, NULL, &M, &N, &P, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); CHKERRQ(ierr);
716 // Note: Assuming DOF=1 for countVec, index mapping uses node dimensions M,N,P from DMDA creation (IM+1, etc)
717 // Re-check if your DMDA uses cell counts (IM) or node counts (IM+1) for Vec layout. Let's assume Node counts M,N,P.
718 PetscInt Kmax = max_idx_global / (M * N);
719 PetscInt Jmax = (max_idx_global % (M * N)) / M;
720 PetscInt Imax = max_idx_global % M;
721 LOG_ALLOW(GLOBAL, LOG_INFO, " -> Max count located at global index (I,J,K) = (%d, %d, %d) [Flat index: %d]\n",
722 (int)Imax, (int)Jmax, (int)Kmax, (int)max_idx_global);
723
724 // Also, let's explicitly check the count at (0,0,0)
725 PetscScalar count_at_origin = 0.0;
726 PetscScalar ***count_arr_for_check;
727 ierr = DMDAVecGetArrayRead(da, countVec, &count_arr_for_check); CHKERRQ(ierr);
728 // Check bounds before accessing - crucial if using global indices
729 PetscInt xs, ys, zs, xm, ym, zm;
730 ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm); CHKERRQ(ierr);
731 if (0 >= xs && 0 < xs+xm && 0 >= ys && 0 < ys+ym && 0 >= zs && 0 < zs+zm) {
732 count_at_origin = count_arr_for_check[0][0][0]; // Access using global index (0,0,0)
733 } else {
734 // Origin is not on this rank (relevant for parallel, but check anyway)
735 count_at_origin = -999.0; // Indicate it wasn't accessible locally
736 }
737 ierr = DMDAVecRestoreArrayRead(da, countVec, &count_arr_for_check); CHKERRQ(ierr);
738 LOG_ALLOW(GLOBAL, LOG_INFO, " -> Count at global index (0,0,0) = %.1f\n", count_at_origin);
739
740 } else {
741 LOG_ALLOW(GLOBAL, LOG_WARNING, " -> VecMax did not return a location for the maximum value.\n");
742 }
743 // --- END DEBUGGING BLOCK ---
744
745 LOG_ALLOW(GLOBAL, LOG_INFO, "Particle counting complete.\n");
746
747
749 PetscFunctionReturn(0);
750}
751
752
753
754#undef __FUNCT__
755#define __FUNCT__ "ResizeSwarmGlobally"
756// --- Helper function to resize swarm globally (add or remove) ---
757// This assumes removing excess particles means removing the globally last ones.
758PetscErrorCode ResizeSwarmGlobally(DM swarm, PetscInt N_target)
759{
760 PetscErrorCode ierr;
761 PetscInt N_current, nlocal_current;
762 PetscMPIInt rank;
763 MPI_Comm comm;
764
765 PetscFunctionBeginUser;
767 ierr = PetscObjectGetComm((PetscObject)swarm, &comm); CHKERRQ(ierr);
768 ierr = MPI_Comm_rank(comm, &rank); CHKERRQ(ierr);
769 ierr = DMSwarmGetSize(swarm, &N_current); CHKERRQ(ierr);
770 ierr = DMSwarmGetLocalSize(swarm, &nlocal_current); CHKERRQ(ierr);
771
772 PetscInt delta = N_target - N_current;
773
774 if (delta == 0) {
776 PetscFunctionReturn(0); // Nothing to do
777 }
778
779 if (delta < 0) { // Remove particles
780 PetscInt num_to_remove_global = -delta;
781 LOG_ALLOW(GLOBAL, LOG_INFO, "Current size %d > target size %d. Removing %d particles globally.\n", N_current, N_target, num_to_remove_global);
782
783 // --- Strategy: Remove the globally last 'num_to_remove_global' particles ---
784 // Each rank needs to determine how many of its *local* particles fall
785 // within the range of global indices [N_target, N_current - 1].
786
787 PetscInt rstart = 0;
788 PetscInt rend;
789 // Global range owned by this rank [rstart, rend)
790
791 ierr = MPI_Exscan(&nlocal_current, &rstart, 1, MPIU_INT, MPI_SUM, comm); CHKERRMPI(ierr); // Use CHKERRMPI for MPI calls
792
793 rend = rstart + nlocal_current;
794
795
796 // Calculate how many local particles have global indices >= N_target
797 PetscInt nlocal_remove_count = 0;
798 if (rend > N_target) { // If this rank owns any particles slated for removal
799 PetscInt start_remove_local_idx = (N_target > rstart) ? (N_target - rstart) : 0;
800 nlocal_remove_count = nlocal_current - start_remove_local_idx;
801 }
802
803 if (nlocal_remove_count < 0) nlocal_remove_count = 0; // Sanity check
804 if (nlocal_remove_count > nlocal_current) nlocal_remove_count = nlocal_current; // Sanity check
805
806 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Global range [%d, %d). Target size %d. Need to remove %d local particles (from end).\n", rank, rstart, rend, N_target, nlocal_remove_count);
807
808 // Remove the last 'nlocal_remove_count' particles *locally* by iterating backwards
809 PetscInt removal_ops_done = 0;
810 for (PetscInt p = nlocal_current - 1; p >= 0 && removal_ops_done < nlocal_remove_count; --p) {
811 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
812 removal_ops_done++;
813 }
814
815 if (removal_ops_done != nlocal_remove_count) {
816 SETERRQ(comm, PETSC_ERR_PLIB, "Rank %d: Failed to remove the expected number of local particles (%d != %d)", rank, removal_ops_done, nlocal_remove_count);
817 }
818 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removed %d local particles.\n", rank, removal_ops_done);
819
820 // Barrier to ensure all removals are done before size check
821 ierr = MPI_Barrier(comm); CHKERRMPI(ierr);
822
823 } else { // delta > 0: Add particles
824 PetscInt num_to_add_global = delta;
825 LOG_ALLOW(GLOBAL, LOG_INFO, "Current size %d < target size %d. Adding %d particles globally.\n", N_current, N_target, num_to_add_global);
826 ierr = DMSwarmAddNPoints(swarm, num_to_add_global); CHKERRQ(ierr);
827 // Note: Added particles will have uninitialized field data. Reading will overwrite.
828 }
829
830 // Verify final size
831 PetscInt N_final;
832 ierr = DMSwarmGetSize(swarm, &N_final); CHKERRQ(ierr);
833 if (N_final != N_target) {
834 SETERRQ(comm, PETSC_ERR_PLIB, "Failed to resize swarm: expected %d particles, got %d", N_target, N_final);
835 }
836 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Swarm successfully resized to %d particles.\n", N_final);
838 PetscFunctionReturn(0);
839}
840
841#undef __FUNCT__
842#define __FUNCT__ "PreCheckAndResizeSwarm"
843/**
844 * @brief Checks particle count from a saved file and resizes the swarm globally.
845 *
846 * This function uses a robust parallel pattern: only Rank 0 reads the reference
847 * position file to determine the total number of particles saved (`N_file`).
848 * This count is then broadcast to all other ranks. Finally, each rank compares
849 * N_file with the current swarm size and participates in resizing if necessary.
850 *
851 * @param[in,out] user Pointer to the UserCtx structure containing the DMSwarm.
852 * @param[in] ti Time index for constructing the file name.
853 * @param[in] ext File extension (e.g., "dat").
854 *
855 * @return PetscErrorCode 0 on success, non-zero on failure.
856 */
857PetscErrorCode PreCheckAndResizeSwarm(UserCtx *user,
858 PetscInt ti,
859 const char *ext)
860{
861 PetscErrorCode ierr;
862 char filename[PETSC_MAX_PATH_LEN];
863 PetscInt N_file = 0; // The number of particles determined from the file
864 PetscInt N_current = 0;
865 MPI_Comm comm;
866 PetscMPIInt rank;
867 const char *refFieldName = "position";
868 const PetscInt bs = 3;
869 SimCtx *simCtx = user->simCtx;
870 char *source_path;
871
872 // NOTE: Your filename format has a hardcoded "_0" which is typical for
873 // PETSc when writing a parallel object from a single rank.
874 // If you ever write in parallel, PETSc might create one file per rank.
875 // The current logic assumes a single file written by one process.
876 const int placeholder_int = 0;
877
878 // Setup the I/O environment
879
880
881 PetscFunctionBeginUser;
883 ierr = PetscObjectGetComm((PetscObject)user->swarm, &comm); CHKERRQ(ierr);
884 ierr = MPI_Comm_rank(comm, &rank); CHKERRQ(ierr);
885
886 // First, determine the top-level source directory based on the execution mode.
887 if (simCtx->exec_mode == EXEC_MODE_SOLVER) {
888 source_path = simCtx->restart_dir;
889 } else if (simCtx->exec_mode == EXEC_MODE_POSTPROCESSOR) {
890 source_path = simCtx->pps->source_dir;
891 } else {
892 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONGSTATE, "Invalid execution mode for reading simulation fields.");
893 }
894
895 // Set the current I/O directory context
896 ierr = PetscSNPrintf(simCtx->_io_context_buffer, sizeof(simCtx->_io_context_buffer),
897 "%s/%s", source_path, simCtx->particle_subdir); CHKERRQ(ierr);
898
900
901 // --- Construct filename using the specified format ---
902 ierr = PetscSNPrintf(filename, sizeof(filename), "%s/%s%05" PetscInt_FMT "_%d.%s",
903 simCtx->current_io_directory,refFieldName, ti, placeholder_int, ext); CHKERRQ(ierr);
904
905 LOG_ALLOW(GLOBAL, LOG_INFO, "Checking particle count for timestep %d using ref file '%s'.\n", ti, filename);
906
907 // --- Rank 0 reads the file to determine the size ---
908 if (rank == 0) {
909 PetscBool fileExists = PETSC_FALSE;
910 ierr = PetscTestFile(filename, 'r', &fileExists); CHKERRQ(ierr);
911
912 if (!fileExists) {
913 // Set a special value to indicate file not found, then broadcast it.
914 N_file = -1;
915 LOG_ALLOW(GLOBAL, LOG_ERROR, "Rank 0: Mandatory reference file '%s' not found for timestep %d.\n", filename, ti);
916 } else {
917 PetscViewer viewer;
918 Vec tmpVec;
919 PetscInt vecSize;
920
921 ierr = VecCreate(PETSC_COMM_SELF, &tmpVec); CHKERRQ(ierr); // Create a SEQUENTIAL vector
922 ierr = PetscViewerBinaryOpen(PETSC_COMM_SELF, filename, FILE_MODE_READ, &viewer); CHKERRQ(ierr);
923 ierr = VecLoad(tmpVec, viewer); CHKERRQ(ierr);
924 ierr = PetscViewerDestroy(&viewer); CHKERRQ(ierr);
925
926 ierr = VecGetSize(tmpVec, &vecSize); CHKERRQ(ierr);
927 ierr = VecDestroy(&tmpVec); CHKERRQ(ierr);
928
929 if (vecSize % bs != 0) {
930 N_file = -2; // Special error code for bad file format
931 LOG_ALLOW(GLOBAL, LOG_ERROR, "Rank 0: Vector size %d from file '%s' is not divisible by block size %d.\n", vecSize, filename, bs);
932 } else {
933 N_file = vecSize / bs;
934 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Rank 0: Found %d particles in file.\n", N_file);
935 }
936 }
937 }
938
939 // --- Broadcast the particle count (or error code) from Rank 0 to all other ranks ---
940 ierr = MPI_Bcast(&N_file, 1, MPIU_INT, 0, comm); CHKERRMPI(ierr);
941
942 // --- All ranks check for errors and abort if necessary ---
943 if (N_file == -1) {
944 SETERRQ(comm, PETSC_ERR_FILE_OPEN, "Mandatory reference file '%s' not found for timestep %d (as determined by Rank 0).", filename, ti);
945 }
946 if (N_file == -2) {
947 SETERRQ(comm, PETSC_ERR_FILE_READ, "Reference file '%s' has incorrect format (as determined by Rank 0).", filename);
948 }
949 if (N_file < 0) {
950 SETERRQ(comm, PETSC_ERR_PLIB, "Received invalid particle count %d from Rank 0.", N_file);
951 }
952
953
954 // --- Now all ranks have the correct N_file, compare and resize if needed ---
955 ierr = DMSwarmGetSize(user->swarm, &N_current); CHKERRQ(ierr);
956
957 if (N_file != N_current) {
958 LOG_ALLOW(GLOBAL, LOG_INFO, "Swarm size %d differs from file size %d. Resizing swarm globally.\n", N_current, N_file);
959 ierr = ResizeSwarmGlobally(user->swarm, N_file); CHKERRQ(ierr);
960 } else {
961 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Swarm size (%d) already matches file size. No resize needed.\n", N_current);
962 }
963
964 // Also update the context
965 user->simCtx->np = N_file;
966
968 PetscFunctionReturn(0);
969}
970
971
972#undef __FUNCT__
973#define __FUNCT__ "ReinitializeParticlesOnInletSurface"
974/**
975 * @brief Re-initializes the positions of particles currently on this rank if this rank owns
976 * part of the designated inlet surface.
977 *
978 * This function is intended for `user->ParticleInitialization == 0 or 3` (Surface Initialization modes)
979 * and is typically called after an initial migration step (e.g., in `PerformInitialSetup`).
980 * It ensures that all particles that should originate from the inlet surface and are now
981 * on the correct MPI rank are properly distributed across that rank's portion of the inlet.
982 *
983 * @param user Pointer to the UserCtx structure, containing simulation settings and grid information.
984 * @param currentTime Current simulation time (used for logging).
985 * @param step Current simulation step number (used for logging).
986 * @return PetscErrorCode 0 on success, non-zero on failure.
987 */
988PetscErrorCode ReinitializeParticlesOnInletSurface(UserCtx *user, PetscReal currentTime, PetscInt step)
989{
990 PetscErrorCode ierr;
991 PetscMPIInt rank; // MPI rank of the current process
992 DM swarm = user->swarm; // The particle swarm DM
993 PetscReal *positions_field = NULL; // Pointer to swarm field for physical positions
994 PetscInt64 *particleIDs = NULL; // Pointer to swarm field for Particle IDs (for logging)
995 PetscInt *cell_ID_field = NULL; // Pointer to swarm field for Cell IDs (for resetting after migration)
996 const Cmpnts ***coor_nodes_local_array; // Read-only access to local node coordinates
997 Vec Coor_local; // Local vector for node coordinates
998 DMDALocalInfo info; // Local grid information (node-based) from user->da
999 PetscInt xs_gnode_rank, ys_gnode_rank, zs_gnode_rank; // Local starting node indices (incl. ghosts) of rank's DA
1000 PetscInt IM_nodes_global, JM_nodes_global, KM_nodes_global; // Global node counts
1001
1002 PetscRandom rand_logic_reinit_i, rand_logic_reinit_j, rand_logic_reinit_k; // RNGs for re-placement
1003 PetscInt nlocal_current; // Number of particles currently on this rank
1004 PetscInt particles_actually_reinitialized_count = 0; // Counter for logging
1005 PetscBool can_this_rank_service_inlet = PETSC_FALSE; // Flag
1006
1007 PetscFunctionBeginUser;
1008
1010
1011 // This function is only relevant for surface initialization mode and if an inlet face is defined.
1012 if ((user->simCtx->ParticleInitialization != 0 && user->simCtx->ParticleInitialization !=3) || !user->inletFaceDefined) {
1013 PetscFunctionReturn(0);
1014 }
1015
1016 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1017 ierr = DMSwarmGetLocalSize(swarm, &nlocal_current); CHKERRQ(ierr);
1018
1019 // If no particles on this rank, nothing to do.
1020 if (nlocal_current == 0) {
1021 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d has no local particles to re-initialize on inlet.\n", currentTime, step, rank);
1022 PetscFunctionReturn(0);
1023 }
1024
1025 // Get DMDA information for the node-centered coordinate grid (user->da)
1026 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
1027 ierr = DMDAGetInfo(user->da, NULL, &IM_nodes_global, &JM_nodes_global, &KM_nodes_global, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); CHKERRQ(ierr);
1028 ierr = DMDAGetCorners(user->da, &xs_gnode_rank, &ys_gnode_rank, &zs_gnode_rank, NULL, NULL, NULL); CHKERRQ(ierr);
1029
1030 // Modification to IM_nodes_global etc. to account for 1-cell halo in each direction.
1031 IM_nodes_global -= 1; JM_nodes_global -= 1; KM_nodes_global -= 1;
1032
1033 const PetscInt IM_cells_global = IM_nodes_global > 0 ? IM_nodes_global - 1 : 0;
1034 const PetscInt JM_cells_global = JM_nodes_global > 0 ? JM_nodes_global - 1 : 0;
1035 const PetscInt KM_cells_global = KM_nodes_global > 0 ? KM_nodes_global - 1 : 0;
1036
1037
1038
1039 // Check if this rank is responsible for (part of) the designated inlet surface
1040 ierr = CanRankServiceInletFace(user, &info, IM_nodes_global, JM_nodes_global, KM_nodes_global, &can_this_rank_service_inlet); CHKERRQ(ierr);
1041
1042 // Get coordinate array and swarm fields for modification
1043 ierr = DMGetCoordinatesLocal(user->da, &Coor_local); CHKERRQ(ierr);
1044 ierr = DMDAVecGetArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
1045 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
1046 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr); // For logging
1047 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
1048
1049 if (!can_this_rank_service_inlet) {
1050 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d cannot service inlet face %s. Skipping re-initialization of %d particles.\n", currentTime, step, rank, BCFaceToString(user->identifiedInletBCFace), nlocal_current);
1051
1052 // FALLBACK ACTION: Reset position fields to Inlet center for migration and cell ID to -1 for safety.
1053 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d is resetting %d local particles to inlet center (%.6f, %.6f, %.6f) for migration.\n", currentTime, step, rank, nlocal_current, user->simCtx->CMx_c, user->simCtx->CMy_c, user->simCtx->CMz_c);
1054
1055 for(PetscInt p = 0; p < nlocal_current; p++){
1056 positions_field[3*p+0] = user->simCtx->CMx_c;
1057 positions_field[3*p+1] = user->simCtx->CMy_c;
1058 positions_field[3*p+2] = user->simCtx->CMz_c;
1059
1060 cell_ID_field[3*p+0] = -1;
1061 cell_ID_field[3*p+1] = -1;
1062 cell_ID_field[3*p+2] = -1;
1063 }
1064
1065 // Cleanup: restore swarm fields/coordinate array
1066 ierr = DMDAVecRestoreArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
1067 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
1068 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr); // For logging
1069 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
1071 PetscFunctionReturn(0);
1072 }
1073
1074 LOG_ALLOW(GLOBAL, LOG_INFO, "[T=%.4f, Step=%d] Rank %d is on inlet face %s. Attempting to re-place %d local particles.\n", currentTime, step, rank, BCFaceToString(user->identifiedInletBCFace), nlocal_current);
1075
1076 // Initialize fresh RNGs for this re-placement to ensure good distribution
1077 ierr = InitializeLogicalSpaceRNGs(&rand_logic_reinit_i, &rand_logic_reinit_j, &rand_logic_reinit_k); CHKERRQ(ierr);
1078 // Optional: Seed RNGs for deterministic behavior if required, e.g., based on rank and step.
1079 // PetscRandomSetSeed(rand_logic_i, (unsigned long)rank*1000 + step + 100); PetscRandomSeed(rand_logic_i); // Example
1080
1081 // Loop over all particles currently local to this rank
1082 for (PetscInt p = 0; p < nlocal_current; p++) {
1083 PetscInt ci_metric_lnode, cj_metric_lnode, ck_metric_lnode; // Local node indices (of rank's DA patch) for cell origin
1084 PetscReal xi_metric_logic, eta_metric_logic, zta_metric_logic; // Intra-cell logical coordinates
1085 Cmpnts phys_coords = {0.0,0.0,0.0}; // To store newly calculated physical coordinates
1086 PetscBool particle_was_placed = PETSC_FALSE;
1087
1089 // Get random cell on this rank's portion of the inlet and random logical coords within it
1090 ierr = GetRandomCellAndLogicalCoordsOnInletFace(user, &info, xs_gnode_rank, ys_gnode_rank, zs_gnode_rank,
1091 IM_nodes_global, JM_nodes_global, KM_nodes_global,
1092 &rand_logic_reinit_i, &rand_logic_reinit_j, &rand_logic_reinit_k,
1093 &ci_metric_lnode, &cj_metric_lnode, &ck_metric_lnode,
1094 &xi_metric_logic, &eta_metric_logic, &zta_metric_logic); CHKERRQ(ierr);
1095
1096 // Convert these logical coordinates to physical coordinates
1097
1098 ierr = MetricLogicalToPhysical(user, coor_nodes_local_array,
1099 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
1100 xi_metric_logic, eta_metric_logic, zta_metric_logic,
1101 &phys_coords); CHKERRQ(ierr);
1102
1103 // Update the particle's position in the swarm fields
1104 positions_field[3*p+0] = phys_coords.x;
1105 positions_field[3*p+1] = phys_coords.y;
1106 positions_field[3*p+2] = phys_coords.z;
1107 particle_was_placed = PETSC_TRUE;
1108
1110 PetscBool placement_flag = PETSC_FALSE;
1111 ierr = GetDeterministicFaceGridLocation(user, &info, xs_gnode_rank, ys_gnode_rank, zs_gnode_rank,
1112 IM_cells_global, JM_cells_global, KM_cells_global,
1113 particleIDs[p],
1114 &ci_metric_lnode, &cj_metric_lnode, &ck_metric_lnode,
1115 &xi_metric_logic, &eta_metric_logic, &zta_metric_logic,&placement_flag); CHKERRQ(ierr);
1116
1117
1118 if(placement_flag){
1119 // Convert these logical coordinates to physical coordinates
1120 ierr = MetricLogicalToPhysical(user, coor_nodes_local_array,
1121 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
1122 xi_metric_logic, eta_metric_logic, zta_metric_logic,
1123 &phys_coords); CHKERRQ(ierr);
1124
1125 // Update the particle's position in the swarm fields
1126 positions_field[3*p+0] = phys_coords.x;
1127 positions_field[3*p+1] = phys_coords.y;
1128 positions_field[3*p+2] = phys_coords.z;
1129 particle_was_placed = PETSC_TRUE;
1130 } else{
1131 // Deterministic placement failed (particle migrated to rank where formula says it doesn't belong)
1132 // Fall back to random placement on this rank's portion of inlet surface
1133 LOG_ALLOW(GLOBAL, LOG_WARNING, "Rank %d: Particle PID %ld deterministic placement failed (belongs to different rank). Falling back to random placement.\n", rank, particleIDs[p]);
1134
1135 ierr = GetRandomCellAndLogicalCoordsOnInletFace(user, &info, xs_gnode_rank, ys_gnode_rank, zs_gnode_rank,
1136 IM_nodes_global, JM_nodes_global, KM_nodes_global,
1137 &rand_logic_reinit_i, &rand_logic_reinit_j, &rand_logic_reinit_k,
1138 &ci_metric_lnode, &cj_metric_lnode, &ck_metric_lnode,
1139 &xi_metric_logic, &eta_metric_logic, &zta_metric_logic); CHKERRQ(ierr);
1140
1141 // Convert to physical coordinates
1142 ierr = MetricLogicalToPhysical(user, coor_nodes_local_array,
1143 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
1144 xi_metric_logic, eta_metric_logic, zta_metric_logic,
1145 &phys_coords); CHKERRQ(ierr);
1146
1147 // Update particle position
1148 positions_field[3*p+0] = phys_coords.x;
1149 positions_field[3*p+1] = phys_coords.y;
1150 positions_field[3*p+2] = phys_coords.z;
1151 particle_was_placed = PETSC_TRUE;
1152 }
1153
1154 } else{
1155 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ReinitializeParticlesOnInletSurface only supports ParticleInitialization modes 0 and 3.");
1156 }
1157
1158 if(particle_was_placed){
1159 particles_actually_reinitialized_count++;
1160
1161 cell_ID_field[3*p+0] = -1;
1162 cell_ID_field[3*p+1] = -1;
1163 cell_ID_field[3*p+2] = -1;
1164
1165 LOG_LOOP_ALLOW(LOCAL, LOG_VERBOSE, p, (nlocal_current > 20 ? nlocal_current/10 : 1), // Sampled logging
1166 "Rank %d: PID %ld (idx %ld) RE-PLACED. CellOriginNode(locDAIdx):(%d,%d,%d). LogicCoords: (%.2e,%.2f,%.2f). PhysCoords: (%.6f,%.6f,%.6f).\n",
1167 rank, particleIDs[p], (long)p,
1168 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
1169 xi_metric_logic, eta_metric_logic, zta_metric_logic,
1170 phys_coords.x, phys_coords.y, phys_coords.z);
1171 }
1172 }
1173
1174 // Logging summary of re-initialization
1175 if (particles_actually_reinitialized_count > 0) {
1176 LOG_ALLOW(GLOBAL, LOG_INFO, "[T=%.4f, Step=%d] Rank %d (on inlet face %d) successfully re-initialized %d of %d local particles.\n", currentTime, step, rank, user->identifiedInletBCFace, particles_actually_reinitialized_count, nlocal_current);
1177 } else if (nlocal_current > 0) { // This case should ideally not be hit if can_this_rank_service_inlet was true and particles were present.
1178 LOG_ALLOW(GLOBAL, LOG_WARNING, "[T=%.4f, Step=%d] Rank %d claimed to service inlet face %d, but re-initialized 0 of %d local particles. This may indicate an issue if particles were expected to be re-placed.\n", currentTime, step, rank, user->identifiedInletBCFace, nlocal_current);
1179 }
1180
1181 // Cleanup: Destroy RNGs and restore swarm fields/coordinate array
1182 ierr = PetscRandomDestroy(&rand_logic_reinit_i); CHKERRQ(ierr);
1183 ierr = PetscRandomDestroy(&rand_logic_reinit_j); CHKERRQ(ierr);
1184 ierr = PetscRandomDestroy(&rand_logic_reinit_k); CHKERRQ(ierr);
1185
1186 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
1187 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr);
1188 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
1189 ierr = DMDAVecRestoreArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
1190
1191
1193 PetscFunctionReturn(0);
1194}
1195
1196// Comparison function needed for qsort
1197static int compare_PetscInt64(const void *a, const void *b) {
1198 PetscInt64 val_a = *(const PetscInt64*)a;
1199 PetscInt64 val_b = *(const PetscInt64*)b;
1200 if (val_a < val_b) return -1;
1201 if (val_a > val_b) return 1;
1202 return 0;
1203}
1204
1205#undef __FUNCT__
1206#define __FUNCT__ "GetLocalPIDSnapshot"
1207/**
1208 * @brief Creates a sorted snapshot of all Particle IDs (PIDs) from a raw data array.
1209 * @ingroup ParticleUtils
1210 *
1211 * This function is a crucial helper for the migration process. It captures the state of
1212 * which particles are on the current MPI rank *before* migration occurs by taking a
1213 * pointer to the swarm's raw PID data array. The resulting sorted array can then be used
1214 * with an efficient binary search to quickly identify newcomer particles after migration.
1215 *
1216 * This function does NOT call DMSwarmGetField/RestoreField. It is the caller's
1217 * responsibility to acquire the `pid_field` pointer before calling and restore it afterward.
1218 *
1219 * @param[in] pid_field A read-only pointer to the raw array of PIDs for the local swarm.
1220 * @param[in] n_local The number of particles currently on the local rank.
1221 * @param[out] pids_snapshot_out A pointer to a `PetscInt64*` array. This function will
1222 * allocate memory for this array, and the caller is
1223 * responsible for freeing it with `PetscFree()` when it
1224 * is no longer needed.
1225 *
1226 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1227 */
1228PetscErrorCode GetLocalPIDSnapshot(const PetscInt64 pid_field[],
1229 PetscInt n_local,
1230 PetscInt64 **pids_snapshot_out)
1231{
1232 PetscErrorCode ierr;
1233 PetscMPIInt rank;
1234
1235 PetscFunctionBeginUser;
1236
1238
1239 // --- 1. Input Validation ---
1240 if (!pids_snapshot_out) {
1241 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Output pointer pids_snapshot_out is NULL.");
1242 }
1243 // If n_local > 0, pid_field must not be NULL.
1244 if (n_local > 0 && !pid_field) {
1245 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Input pid_field pointer is NULL for n_local > 0.");
1246 }
1247
1248 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1249 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Creating PID snapshot for %d local particles.\n", rank, n_local);
1250
1251 // If there are no local particles, the snapshot is empty (NULL).
1252 if (n_local == 0) {
1253 *pids_snapshot_out = NULL;
1254
1256 PetscFunctionReturn(0);
1257 }
1258
1259 // --- 2. Allocate Memory for the Snapshot ---
1260 ierr = PetscMalloc1(n_local, pids_snapshot_out); CHKERRQ(ierr);
1261
1262 // --- 3. Copy Data ---
1263 // Perform a fast memory copy from the provided array to our new snapshot array.
1264 ierr = PetscMemcpy(*pids_snapshot_out, pid_field, n_local * sizeof(PetscInt64)); CHKERRQ(ierr);
1265 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Copied %d PIDs.\n", rank, n_local);
1266
1267 // --- 4. Sort the Snapshot Array ---
1268 // Sorting enables fast binary search lookups later.
1269 ierr = PetscSortInt64(n_local, *pids_snapshot_out); CHKERRQ(ierr);
1270 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: PID snapshot sorted successfully.\n", rank);
1271
1272
1274 PetscFunctionReturn(0);
1275}
1276
1277#undef __FUNCT__
1278#define __FUNCT__ "AddToMigrationList"
1279/**
1280 * @brief Safely adds a new migration task to a dynamically sized list.
1281 *
1282 * This utility function manages a dynamic array of MigrationInfo structs. It appends
1283 * a new entry to the list and automatically doubles the array's capacity using
1284 * `PetscRealloc` if the current capacity is exceeded. This prevents buffer overflows
1285 * and avoids the need to know the number of migrating particles in advance.
1286 *
1287 * @param[in,out] migration_list_p A pointer to the MigrationInfo array pointer. The function
1288 * will update this pointer if the array is reallocated.
1289 * @param[in,out] capacity_p A pointer to an integer holding the current allocated
1290 * capacity of the list (in number of elements). This will be
1291 * updated upon reallocation.
1292 * @param[in,out] count_p A pointer to an integer holding the current number of
1293 * items in the list. This will be incremented by one.
1294 * @param[in] particle_local_idx The local index (from 0 to nlocal-1) of the particle
1295 * that needs to be migrated.
1296 * @param[in] destination_rank The target MPI rank for the particle.
1297 *
1298 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure (e.g., from memory allocation).
1299 */
1300PetscErrorCode AddToMigrationList(MigrationInfo **migration_list_p,
1301 PetscInt *capacity_p,
1302 PetscInt *count_p,
1303 PetscInt particle_local_idx,
1304 PetscMPIInt destination_rank)
1305{
1306 PetscErrorCode ierr;
1307 PetscMPIInt rank;
1308
1309 PetscFunctionBeginUser;
1310
1312
1313 // --- 1. Input Validation ---
1314 if (!migration_list_p || !capacity_p || !count_p) {
1315 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Null pointer provided to AddToMigrationList for list management.");
1316 }
1317
1318 // --- 2. Check if the list needs to be resized ---
1319 if (*count_p >= *capacity_p) {
1320 PetscInt old_capacity = *capacity_p;
1321 // Start with a reasonable base capacity, then double for subsequent reallocations.
1322 PetscInt new_capacity = (old_capacity == 0) ? 16 : old_capacity * 2;
1323
1324 // Use PetscRealloc for safe memory reallocation.
1325 // It handles allocating new memory, copying old data, and freeing the old block.
1326 // The first argument to PetscRealloc is the new size in BYTES.
1327 ierr = PetscRealloc(new_capacity * sizeof(MigrationInfo), migration_list_p); CHKERRQ(ierr);
1328
1329 *capacity_p = new_capacity; // Update the capacity tracker
1330
1331 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1332 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Reallocated migrationList capacity from %d to %d.\n",
1333 rank, old_capacity, new_capacity);
1334 }
1335
1336 // --- 3. Add the new migration data to the list ---
1337 // Dereference the pointer-to-a-pointer to get the actual array.
1338 MigrationInfo *list = *migration_list_p;
1339
1340 list[*count_p].local_index = particle_local_idx;
1341 list[*count_p].target_rank = destination_rank;
1342
1343 // --- 4. Increment the count of items in the list ---
1344 (*count_p)++;
1345
1346
1348 PetscFunctionReturn(0);
1349}
1350
1351
1352#undef __FUNCT__
1353#define __FUNCT__ "FlagNewComersForLocation"
1354/**
1355 * @brief Identifies newly arrived particles after migration and flags them for a location search.
1356 * @ingroup ParticleMotion
1357 *
1358 * This function is a critical component of the iterative migration process managed by
1359 * the main particle settlement orchestrator (e.g., `LocateAllParticlesInGrid`). After a
1360 * `DMSwarmMigrate` call, each rank's local particle list is a new mix of resident
1361 * particles and newly received ones. This function's job is to efficiently identify
1362 * these "newcomers" and set their `DMSwarm_location_status` field to `NEEDS_LOCATION`.
1363 *
1364 * This ensures that in the subsequent pass of the migration `do-while` loop, only the
1365 * newly arrived particles are processed by the expensive location algorithm, preventing
1366 * redundant work on particles that are already settled on the current rank.
1367 *
1368 * The identification is done by comparing the PIDs of particles currently on the rank
1369 * against a "snapshot" of PIDs taken *before* the migration occurred.
1370 *
1371 * @param[in] swarm The DMSwarm object, which has just completed a migration.
1372 * @param[in] n_local_before The number of particles that were on this rank *before* the
1373 * migration was performed.
1374 * @param[in] pids_before A pre-sorted array of the PIDs that were on this rank before
1375 * the migration. This is used for fast lookups.
1376 *
1377 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1378 *
1379 * @note This function assumes the `pids_before` array is sorted in ascending order to
1380 * enable the use of an efficient binary search.
1381 */
1382PetscErrorCode FlagNewcomersForLocation(DM swarm,
1383 PetscInt n_local_before,
1384 const PetscInt64 pids_before[])
1385{
1386 PetscErrorCode ierr;
1387 PetscMPIInt rank;
1388 PetscInt n_local_after;
1389 PetscInt newcomer_count = 0;
1390
1391 // Pointers to the swarm data fields we will read and modify
1392 PetscInt64 *pid_field_after = NULL;
1393 PetscInt *status_field_after = NULL;
1394 PetscInt *cell_field_after = NULL;
1395
1396 PetscFunctionBeginUser;
1397
1399
1400 // --- 1. Input Validation and Basic Setup ---
1401 if (!swarm) {
1402 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Input DMSwarm is NULL in FlagNewcomersForLocation.");
1403 }
1404 // If n_local_before > 0, the corresponding PID array must not be null.
1405 if (n_local_before > 0 && !pids_before) {
1406 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Input pids_before array is NULL for n_local_before > 0.");
1407 }
1408
1409 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1410
1411 // Get the number of particles on this rank *after* the migration.
1412 ierr = DMSwarmGetLocalSize(swarm, &n_local_after); CHKERRQ(ierr);
1413
1414 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Checking for newcomers. Size before: %d, Size after: %d\n",
1415 rank, n_local_before, n_local_after);
1416
1417 // If there are no particles now, there's nothing to do.
1418 if (n_local_after == 0) {
1419 PetscFunctionReturn(0);
1420 }
1421
1422 // --- 2. Access Swarm Data ---
1423 // Get read-only access to the PIDs and read-write access to the status field.
1424 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_field_after); CHKERRQ(ierr);
1425 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_field_after); CHKERRQ(ierr);
1426 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_field_after); CHKERRQ(ierr);
1427 if (!pid_field_after || !status_field_after) {
1428 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Failed to get required swarm fields in FlagNewcomersForLocation.");
1429 }
1430
1431 // --- 3. Identify and Flag Newcomers ---
1432 // Loop through all particles currently on this rank.
1433 for (PetscInt p_idx = 0; p_idx < n_local_after; ++p_idx) {
1434 PetscInt64 current_pid = pid_field_after[p_idx];
1435 PetscBool is_found_in_before_list;
1436
1437 // Use our custom, efficient helper function for the lookup.
1438 ierr = BinarySearchInt64(n_local_before, pids_before, current_pid, &is_found_in_before_list); CHKERRQ(ierr);
1439
1440 // If the PID was NOT found in the "before" list, it must be a newcomer.
1441 if (!is_found_in_before_list) {
1442 // Flag it for processing in the next pass of the migration loop.
1443 status_field_after[p_idx] = NEEDS_LOCATION;
1444 // cell_field_after[3*p_idx+0] = -1;
1445 // cell_field_after[3*p_idx+1] = -1;
1446 // cell_field_after[3*p_idx+2] = -1;
1447 newcomer_count++;
1448
1449 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[Rank %d]: Flagged newcomer PID %ld at local index %d as NEEDS_LOCATION.\n",
1450 rank, current_pid, p_idx);
1451 }
1452 }
1453
1454 // --- 4. Restore Swarm Fields ---
1455 // Release the locks on the swarm data arrays.
1456 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_field_after); CHKERRQ(ierr);
1457 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_field_after); CHKERRQ(ierr);
1458 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_field_after); CHKERRQ(ierr);
1459
1460 if (newcomer_count > 0) {
1461 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d]: Identified and flagged %d newcomers.\n", rank, newcomer_count);
1462 }
1463
1464
1466 PetscFunctionReturn(0);
1467}
1468
1469#undef __FUNCT__
1470#define __FUNCT__ "MigrateRestartParticlesUsingCellID"
1471/**
1472 * @brief Fast-path migration for restart particles using preloaded Cell IDs.
1473 *
1474 * This function provides an optimized migration path specifically for particles
1475 * loaded from restart files. Unlike the standard `LocateAllParticlesInGrid()`
1476 * which performs expensive walking searches, this function leverages the fact that
1477 * restart particles already have valid global Cell IDs loaded from disk.
1478 *
1479 * **How It Works:**
1480 * 1. Iterates through all local particles.
1481 * 2. For each particle with a valid Cell ID (ci, cj, ck):
1482 * - Calls `FindOwnerOfCell(ci, cj, ck)` to determine the correct rank.
1483 * - If owner differs from current rank, adds to migration list.
1484 * - If owner matches current rank, the existing `ACTIVE_AND_LOCATED` status is preserved.
1485 * 3. Uses existing `SetMigrationRanks()` and `PerformMigration()` infrastructure.
1486 * 4. Achieves **single-pass direct migration** (no multi-hop, no walking searches).
1487 *
1488 * @param[in,out] user Pointer to UserCtx containing the swarm and RankCellInfoMap.
1489 * The function updates particle status fields and performs migration.
1490 *
1491 * @return PetscErrorCode 0 on success, non-zero on failure.
1492 */
1494{
1495 PetscErrorCode ierr;
1496 DM swarm = user->swarm;
1497 PetscInt nlocal;
1498 PetscInt *cell_p = NULL;
1499 PetscInt64 *pid_p = NULL;
1500 PetscMPIInt rank;
1501
1502 MigrationInfo *migrationList = NULL;
1503 PetscInt local_migration_count = 0;
1504 PetscInt migrationListCapacity = 0;
1505 PetscInt global_migration_count = 0;
1506
1507 PetscFunctionBeginUser;
1509 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1510
1511 ierr = DMSwarmGetLocalSize(swarm, &nlocal); CHKERRQ(ierr);
1512 LOG_ALLOW(LOCAL, LOG_DEBUG, "Checking %d restart particles for direct migration using CellIDs.\n", nlocal);
1513
1514 if (nlocal > 0) {
1515 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1516 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1517
1518 // Note: We do NOT need to modify the status field here.
1519 // We trust the loaded status (ACTIVE_AND_LOCATED) is correct for the destination rank.
1520
1521 for (PetscInt p_idx = 0; p_idx < nlocal; ++p_idx) {
1522 PetscInt ci = cell_p[3*p_idx + 0];
1523 PetscInt cj = cell_p[3*p_idx + 1];
1524 PetscInt ck = cell_p[3*p_idx + 2];
1525
1526 /* Skip particles with invalid Cell IDs (will be handled by LocateAllParticles) */
1527 if (ci < 0 || cj < 0 || ck < 0) {
1528 continue;
1529 }
1530
1531 PetscMPIInt owner_rank;
1532 ierr = FindOwnerOfCell(user, ci, cj, ck, &owner_rank); CHKERRQ(ierr);
1533
1534 if (owner_rank != -1 && owner_rank != rank) {
1535 /* Particle belongs to another rank - migrate it */
1536 ierr = AddToMigrationList(&migrationList, &migrationListCapacity, &local_migration_count,
1537 p_idx, owner_rank); CHKERRQ(ierr);
1538
1539 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] Direct migration: Cell (%d,%d,%d) belongs to Rank %d (Current: %d).\n",
1540 (long)pid_p[p_idx], ci, cj, ck, owner_rank, rank);
1541 }
1542 }
1543
1544 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1545 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1546 }
1547
1548 /* Check if any rank needs to migrate particles */
1549 ierr = MPI_Allreduce(&local_migration_count, &global_migration_count, 1, MPIU_INT, MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1550
1551 if (global_migration_count > 0) {
1552 LOG_ALLOW(GLOBAL, LOG_INFO, "Fast restart migration: Directly migrating %d particles using CellIDs.\n", global_migration_count);
1553 ierr = SetMigrationRanks(user, migrationList, local_migration_count); CHKERRQ(ierr);
1554 ierr = PerformMigration(user); CHKERRQ(ierr);
1555 /* We do NOT flag newcomers here. We trust their loaded status (ACTIVE_AND_LOCATED) */
1556 /* is valid for their destination rank. */
1557 } else {
1558 LOG_ALLOW(GLOBAL, LOG_INFO, "Fast restart migration: All particles are already on correct ranks.\n");
1559 }
1560
1561 ierr = PetscFree(migrationList); CHKERRQ(ierr);
1562
1564 PetscFunctionReturn(0);
1565}
1566
1567#undef __FUNCT__
1568#define __FUNCT__ "GuessParticleOwnerWithBBox"
1569/**
1570 * @brief Provides a fast, heuristic-based guess for a particle's owner rank using bounding boxes.
1571 * @ingroup ParticleLocation
1572 *
1573 * This function is part of the "Guess and Verify" strategy, called only for "lost"
1574 * particles. It attempts to find a candidate owner by checking which rank's bounding box
1575 * contains the particle's physical position.
1576 *
1577 * To optimize the search, it uses the particle's position relative to the local
1578 * bounding box to intelligently check the most likely neighboring ranks first.
1579 * For example, if a particle's x-coordinate is less than the local minimum x, it
1580 * will check the -X neighbor first. If no owner is found in the immediate neighbors,
1581 * it performs a full search of all other ranks as a fallback.
1582 *
1583 * @param[in] user Pointer to the UserCtx, which must contain the pre-computed
1584 * `bbox` (local), `neighbors` struct, and the global `bboxlist`.
1585 * @param[in] particle A pointer to the particle whose owner is being guessed.
1586 * @param[in] bboxlist An array of BoundingBox structures for ALL MPI ranks, indexed 0 to (size-1).
1587 * This array must be up-to-date and available on all ranks.
1588 * @param[out] guess_rank_out A pointer to a PetscMPIInt. Set to the candidate owner's rank
1589 * if found, otherwise set to -1 (or MPI_PROC_NULL).
1590 *
1591 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1592 */
1594 const Particle *particle,
1595 const BoundingBox *bboxlist,
1596 PetscMPIInt *guess_rank_out)
1597{
1598 PetscErrorCode ierr;
1599 PetscMPIInt rank, size;
1600 const RankNeighbors *neighbors = &user->neighbors; // Use a direct pointer for clarity
1601 const BoundingBox *localBBox = &user->bbox;
1602
1603 PetscFunctionBeginUser;
1604
1606
1607 // --- 1. Input Validation and Setup ---
1608 if (!user || !particle || !guess_rank_out || !bboxlist) {
1609 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Null pointer provided to GuessParticleOwnerWithBBox.");
1610 }
1611 if (!localBBox|| !neighbors) {
1612 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Required user->bboxl or user->neighbors is not initialized.");
1613 }
1614
1615 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1616 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr);
1617
1618 *guess_rank_out = MPI_PROC_NULL; // Default to "not found"
1619
1620 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Starting guess for particle at (%.3f, %.3f, %.3f).\n",
1621 particle->PID, particle->loc.x, particle->loc.y, particle->loc.z);
1622
1623 // --- Step 0: Check if the particle is inside the CURRENT rank's bounding box FIRST. ---
1624 // This handles the common case of initial placement where a particle is "lost" but physically local.
1625 if (IsParticleInBox(localBBox, &particle->loc)) {
1626 *guess_rank_out = rank;
1627 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fast path guess SUCCESS. Particle is within the local (Rank %d) bounding box.\n",
1628 particle->PID, rank);
1629
1631 PetscFunctionReturn(0); // Found it, we're done.
1632 }
1633 // --- 2. Fast Path: Check Immediate Neighbors Based on Exit Direction ---
1634
1635 // Determine likely exit direction(s) to prioritize neighbor check
1636 PetscBool exit_xm = particle->loc.x < localBBox->min_coords.x;
1637 PetscBool exit_xp = particle->loc.x > localBBox->max_coords.x;
1638 PetscBool exit_ym = particle->loc.y < localBBox->min_coords.y;
1639 PetscBool exit_yp = particle->loc.y > localBBox->max_coords.y;
1640 PetscBool exit_zm = particle->loc.z < localBBox->min_coords.z;
1641 PetscBool exit_zp = particle->loc.z > localBBox->max_coords.z;
1642
1643 if (exit_xm && neighbors->rank_xm != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_xm], &particle->loc)) {
1644 *guess_rank_out = neighbors->rank_xm;
1645 } else if (exit_xp&& neighbors->rank_xp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_xp], &particle->loc)) {
1646 *guess_rank_out = neighbors->rank_xp;
1647 } else if (exit_ym && neighbors->rank_ym != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_ym], &particle->loc)) {
1648 *guess_rank_out = neighbors->rank_ym;
1649 } else if (exit_yp && neighbors->rank_yp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_yp], &particle->loc)) {
1650 *guess_rank_out = neighbors->rank_yp;
1651 } else if (exit_zm && neighbors->rank_zm != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_zm], &particle->loc)) {
1652 *guess_rank_out = neighbors->rank_zm;
1653 } else if (exit_zp && neighbors->rank_zp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_zp], &particle->loc)) {
1654 *guess_rank_out = neighbors->rank_zp;
1655 }
1656 // Note: This does not handle corner/edge neighbors, which is why the fallback is essential.
1657
1658 if (*guess_rank_out != MPI_PROC_NULL) {
1659 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fast path guess SUCCESS. Found in immediate neighbor Rank %d.\n",
1660 particle->PID, *guess_rank_out);
1661
1663 PetscFunctionReturn(0); // Found it, we're done.
1664 }
1665
1666 // --- 3. Robust Fallback: Check All Other Ranks ---
1667 // If we get here, the particle was not in any of the immediate face neighbors' boxes.
1668 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Not in immediate face neighbors. Starting global fallback search.\n",
1669 particle->PID);
1670
1671 for (PetscMPIInt r = 0; r < size; ++r) {
1672 if (r == rank) continue; // Don't check ourselves.
1673
1674 if (IsParticleInBox(&bboxlist[r], &particle->loc)) {
1675 PetscBool is_in = PETSC_TRUE;
1676 // This detailed, synchronized print will solve the mystery
1677 LOG_ALLOW(LOCAL,LOG_VERBOSE, "[Rank %d] Checking PID %lld at (%.4f, %.4f, %.4f) against Rank %d's box: [(%.4f, %.4f, %.4f) to (%.4f, %.4f, %.4f)]. Result: %s\n",
1678 (int)rank, (long long)particle->PID,
1679 particle->loc.x, particle->loc.y, particle->loc.z,
1680 (int)r,
1681 bboxlist[r].min_coords.x, bboxlist[r].min_coords.y, bboxlist[r].min_coords.z,
1682 bboxlist[r].max_coords.x, bboxlist[r].max_coords.y, bboxlist[r].max_coords.z,
1683 is_in ? "INSIDE" : "OUTSIDE");
1684
1685 *guess_rank_out = r;
1686 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fallback search SUCCESS. Found in Rank %d.\n",
1687 particle->PID, *guess_rank_out);
1688
1690 PetscFunctionReturn(0); // Found it, we're done.
1691 }
1692 }
1693
1694 // If the code reaches here, the particle was not found in any rank's bounding box.
1695 LOG_ALLOW(LOCAL, LOG_WARNING, "[PID %ld]: Guess FAILED. Particle not found in any rank's bounding box.\n",
1696 particle->PID);
1697
1698 // The guess_rank_out will remain -1, signaling failure to the caller.
1700 PetscFunctionReturn(0);
1701}
1702
1703#undef __FUNCT__
1704#define __FUNCT__ "LocateAllParticlesInGrid"
1705/**
1706 * @brief Orchestrates the complete particle location and migration process for one timestep.
1707 * @ingroup ParticleLocation
1708 *
1709 * This function is the master orchestrator for ensuring every particle is on its correct
1710 * MPI rank and has a valid host cell index. It is designed to be called once per
1711 * timestep after particle positions have been updated.
1712 *
1713 * The function uses a robust, iterative "Guess and Verify" strategy within a
1714 * do-while loop to handle complex particle motion across processor boundaries,
1715 * especially on curvilinear grids.
1716 *
1717 * 1. **State Snapshot:** At the start of each pass, it captures a list of all Particle IDs (PIDs)
1718 * on the current rank.
1719 * 2. **"Guess" (Heuristic):** For particles that are "lost" (no valid host cell),
1720 * it first attempts a fast, bounding-box-based guess to find a potential new owner rank.
1721 * 3. **"Verify" (Robust Walk):** For all other particles, or if the guess fails,
1722 * it uses a robust cell-walking algorithm (`LocateParticleOrFindMigrationTarget`)
1723 * that determines the particle's status: located locally, needs migration, or is lost.
1724 * 4. **Migration:** After identifying all migrating particles on a pass, it performs the
1725 * MPI communication using the `SetMigrationRanks` and `PerformMigration` helpers.
1726 * 5. **Newcomer Flagging:** After migration, it uses the PID snapshot from step 1 to
1727 * efficiently identify newly arrived particles and flag them for location on the next pass.
1728 * 6. **Iteration:** The process repeats in a `do-while` loop until a pass occurs where
1729 * no particles migrate, ensuring the entire swarm is in a stable, consistent state.
1730 *
1731 * @param[in,out] user Pointer to the UserCtx, containing the swarm and all necessary
1732 * domain topology information (bboxlist, RankCellInfoMap, etc.).
1733 * @param[in] bboxlist An array of BoundingBox structures for ALL MPI ranks, indexed 0 to (size-1).
1734 * This array must be up-to-date and available on all ranks.
1735 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1736 */
1737PetscErrorCode LocateAllParticlesInGrid(UserCtx *user,BoundingBox *bboxlist)
1738{
1739 PetscErrorCode ierr;
1740 PetscInt passes = 0;
1741 const PetscInt MAX_MIGRATION_PASSES = 50; // Safety break for runaway loops
1742 PetscInt global_migrations_this_pass;
1743 PetscMPIInt rank;
1744 PetscInt total_migrated_this_timestep = 0;
1745
1746 PetscFunctionBeginUser;
1748 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1749 LOG_ALLOW(GLOBAL, LOG_INFO, "LocateAllParticlesInGrid (Orchestrator) - Beginning particle settlement process.\n");
1750
1751 // This loop ensures that particles that jump across multiple ranks are
1752 // handled correctly in successive, iterative handoffs.
1753 do {
1754 passes++;
1755 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Starting migration pass %d.\n", rank, passes);
1756
1757 // --- STAGE 1: PER-PASS INITIALIZATION ---
1758 MigrationInfo *migrationList = NULL;
1759 PetscInt local_migration_count = 0;
1760 PetscInt migrationListCapacity = 0;
1761 PetscInt nlocal_before;
1762 PetscInt64 *pids_before_snapshot = NULL;
1763 PetscInt local_lost_count = 0;
1764
1765 ierr = DMSwarmGetLocalSize(user->swarm, &nlocal_before); CHKERRQ(ierr);
1766 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d] Pass %d begins with %d local particles.\n", rank, passes, nlocal_before);
1767
1768
1769 // --- STAGE 2: PRE-MIGRATION SNAPSHOT & MAIN PROCESSING LOOP ---
1770 if (nlocal_before > 0) {
1771 // Get pointers to all fields needed for this pass
1772 PetscReal *pos_p, *weights_p, *vel_p;
1773 PetscInt *cell_p, *status_p;
1774 PetscInt64 *pid_p;
1775 ierr = DMSwarmGetField(user->swarm, "position", NULL, NULL, (void**)&pos_p); CHKERRQ(ierr);
1776 ierr = DMSwarmGetField(user->swarm, "velocity", NULL, NULL, (void**)&vel_p); CHKERRQ(ierr);
1777 ierr = DMSwarmGetField(user->swarm, "weight", NULL, NULL, (void**)&weights_p); CHKERRQ(ierr);
1778 ierr = DMSwarmGetField(user->swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1779 ierr = DMSwarmGetField(user->swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1780 ierr = DMSwarmGetField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1781
1782 // Create a sorted snapshot of current PIDs to identify newcomers after migration.
1783 // This helper requires a raw pointer, which we just acquired.
1784 ierr = GetLocalPIDSnapshot(pid_p, nlocal_before, &pids_before_snapshot); CHKERRQ(ierr);
1785
1786 for (PetscInt p_idx = 0; p_idx < nlocal_before; p_idx++) {
1787
1788 // OPTIMIZATION: Skip particles already settled in a previous pass of this do-while loop.
1789
1791 "Local Particle idx=%d, PID=%ld, status=%s, cell=(%d, %d, %d)\n",
1792 p_idx,
1793 (long)pid_p[p_idx],
1795 cell_p[3*p_idx],
1796 cell_p[3*p_idx+1],
1797 cell_p[3*p_idx+2]);
1798
1799 if (status_p[p_idx] == ACTIVE_AND_LOCATED) {
1800 LOG_ALLOW(LOCAL,LOG_VERBOSE," [rank %d][PID %ld] skipped in pass %d as it is already located at (%d,%d,%d).\n",rank,pid_p[p_idx],passes,cell_p[3*p_idx],cell_p[3*p_idx + 1],cell_p[3*p_idx + 2]);
1801 continue;
1802 }
1803
1804 // UNPACK: Create a temporary C struct for easier processing using our helper.
1805 Particle current_particle;
1806
1807 // LOG_ALLOW(LOCAL,LOG_DEBUG,"about to unpack p_idx=%d (PID=%ld)\n",p_idx, (long)pid_p[p_idx]);
1808
1809 ierr = UnpackSwarmFields(p_idx, pid_p, weights_p, pos_p, cell_p, vel_p, status_p,NULL,NULL,NULL,&current_particle); CHKERRQ(ierr);
1810
1811 // LOG_ALLOW(LOCAL,LOG_DEBUG,"unpacked p_idx=%d → cell[0]=%d, status=%s\n",p_idx, current_particle.cell[0], ParticleLocationStatusToString((ParticleLocationStatus)current_particle.location_status));
1812
1813 ParticleLocationStatus final_status = (ParticleLocationStatus)status_p[p_idx];
1814
1815
1816 // CASE 1: Particle has a valid prior cell index.
1817 // It has moved, so we only need to run the robust walk from its last known location.
1818 if (current_particle.cell[0] >= 0) {
1819 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] has valid prior cell. Strategy: Robust Walk from previous cell.\n", current_particle.PID);
1820 ierr = LocateParticleOrFindMigrationTarget(user, &current_particle, &final_status); CHKERRQ(ierr);
1821 }
1822
1823 /*
1824 // --- "GUESS" FAST PATH for lost particles ---
1825 if (current_particle.cell[0] < 0) {
1826 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] is lost or uninitialzied (cell=%d), attempting fast guess.\n",current_particle.PID, current_particle.cell[0]);
1827 ierr = GuessParticleOwnerWithBBox(user, &current_particle, bboxlist, &destination_rank); CHKERRQ(ierr);
1828 if (destination_rank != MPI_PROC_NULL && destination_rank != rank) {
1829 final_status = MIGRATING_OUT;
1830 // The particle struct's destination rank must be updated for consistency
1831 current_particle.destination_rank = destination_rank;
1832 }
1833 }
1834
1835 LOG_ALLOW(LOCAL,LOG_DEBUG,"[PID %ld] Particle status after Initial Guess:%d \n",current_particle.PID,final_status);
1836
1837 // --- "VERIFY" ROBUST WALK if guess didn't resolve it ---
1838 if (final_status == NEEDS_LOCATION || UNINITIALIZED) {
1839 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] Not resolved by guess, starting robust walk.\n", current_particle.PID);
1840 // This function will update the particle's status and destination rank internally.
1841 ierr = LocateParticleOrFindMigrationTarget(user, &current_particle, &final_status); CHKERRQ(ierr);
1842 destination_rank = current_particle.destination_rank; // Retrieve the result
1843 }
1844
1845 // --- PROCESS THE FINAL STATUS AND TAKE ACTION ---
1846 if (final_status == MIGRATING_OUT) {
1847 status_p[p_idx] = MIGRATING_OUT; // Mark for removal by DMSwarm
1848 ierr = AddToMigrationList(&migrationList, &migrationListCapacity, &local_migration_count, p_idx, destination_rank); CHKERRQ(ierr);
1849 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] at local index %d marked for migration to rank %d.\n",current_particle.PID, p_idx, destination_rank);
1850 } else {
1851 // Particle's final status is either LOCATED or LOST; update its state in the swarm arrays.
1852 current_particle.location_status = final_status;
1853 // PACK: Use the helper to write results back to the swarm arrays.
1854 ierr = UpdateSwarmFields(p_idx, &current_particle, pos_p, vel_p, weights_p, cell_p, status_p,NULL,NULL,NULL); CHKERRQ(ierr);
1855 }
1856 */
1857 // CASE 2: Particle is "lost" (cell = -1). Strategy: Guess -> Verify.
1858 else {
1859 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] has invalid cell. Strategy: Guess Owner -> Find Cell.\n",current_particle.PID);
1860
1861 PetscMPIInt guessed_owner_rank = MPI_PROC_NULL;
1862 ierr = GuessParticleOwnerWithBBox(user, &current_particle, bboxlist, &guessed_owner_rank); CHKERRQ(ierr);
1863
1864 // If the guess finds a DIFFERENT rank, we can mark for migration and skip the walk.
1865 if (guessed_owner_rank != MPI_PROC_NULL && guessed_owner_rank != rank) {
1866 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] Guess SUCCESS: Found migration target Rank %d. Finalizing.\n", current_particle.PID, guessed_owner_rank);
1867 final_status = MIGRATING_OUT;
1868 current_particle.destination_rank = guessed_owner_rank;
1869 }
1870 else {
1871
1872 // This block runs if the guess either failed (rank is NULL) or found the particle is local (rank is self).
1873 // In BOTH cases, the situation is unresolved, and we MUST fall back to the robust walk.
1874 if (guessed_owner_rank == rank) {
1875 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] Guess determined particle is local. Proceeding to robust walk to find cell.\n", current_particle.PID);
1876 } else { // guessed_owner_rank == MPI_PROC_NULL
1877 LOG_ALLOW(LOCAL, LOG_WARNING, "[PID %ld] Guess FAILED to find an owner. Proceeding to robust walk for definitive search.\n", current_particle.PID);
1878 }
1879
1880 ierr = LocateParticleOrFindMigrationTarget(user, &current_particle, &final_status); CHKERRQ(ierr);
1881 }
1882 }
1883
1884 // --- PROCESS THE FINAL, DEFINITIVE STATUS ---
1885 current_particle.location_status = final_status;
1886 ierr = UpdateSwarmFields(p_idx, &current_particle, pos_p, vel_p, weights_p, cell_p, status_p,NULL,NULL,NULL); CHKERRQ(ierr);
1887
1888 if (final_status == MIGRATING_OUT) {
1889 ierr = AddToMigrationList(&migrationList, &migrationListCapacity, &local_migration_count, p_idx, current_particle.destination_rank); CHKERRQ(ierr);
1890 } else if (final_status == LOST) {
1891 local_lost_count++;
1892 }
1893
1894 } // End of main particle processing loop
1895
1896 // Restore all the fields acquired for this pass.
1897 ierr = DMSwarmRestoreField(user->swarm, "position", NULL, NULL, (void**)&pos_p); CHKERRQ(ierr);
1898 ierr = DMSwarmRestoreField(user->swarm, "velocity", NULL, NULL, (void**)&vel_p); CHKERRQ(ierr);
1899 ierr = DMSwarmRestoreField(user->swarm, "weight", NULL, NULL, (void**)&weights_p); CHKERRQ(ierr);
1900 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1901 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1902 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1903 }
1904
1905 // --- STAGE 3: ACTION & MPI COMMUNICATION ---
1906 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Pass %d: Identified %d particles to migrate out.\n", rank, passes, local_migration_count);
1907
1908 // --- STAGE 3: SYNCHRONIZE AND DECIDE ---
1909 // FIRST, determine if any rank wants to migrate. This call is safe because
1910 // all ranks have finished their local work and can participate.
1911 ierr = MPI_Allreduce(&local_migration_count, &global_migrations_this_pass, 1, MPIU_INT, MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1912
1913 total_migrated_this_timestep += global_migrations_this_pass;
1914
1915 if(global_migrations_this_pass > 0 ){
1916
1917 LOG_ALLOW(GLOBAL, LOG_INFO, "Pass %d: Migrating %d particles globally.\n", passes, global_migrations_this_pass);
1918
1919 ierr = SetMigrationRanks(user, migrationList, local_migration_count); CHKERRQ(ierr);
1920 ierr = PerformMigration(user); CHKERRQ(ierr);
1921
1922 // --- STAGE 4: POST-MIGRATION RESET ---
1923 // Identify newly arrived particles and flag them with NEEDS_LOCATION so they are
1924 // processed in the next pass. This uses the snapshot taken in STAGE 2.
1925 ierr = FlagNewcomersForLocation(user->swarm, nlocal_before, pids_before_snapshot); CHKERRQ(ierr);
1926 }
1927 // --- STAGE 5: LOOP SYNCHRONIZATION AND CLEANUP ---
1928
1929 ierr = PetscFree(pids_before_snapshot);
1930 ierr = PetscFree(migrationList);
1931
1932 LOG_ALLOW(GLOBAL, LOG_INFO, "End of pass %d. Total particles migrated globally: %d.\n", passes, global_migrations_this_pass);
1933
1934 } while (global_migrations_this_pass > 0 && passes < MAX_MIGRATION_PASSES);
1935
1936 // --- FINAL CHECKS ---
1937 if (passes >= MAX_MIGRATION_PASSES) {
1938 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_CONV_FAILED, "Particle migration failed to converge after %d passes. Check for particles oscillating between ranks.", MAX_MIGRATION_PASSES);
1939 }
1940
1941 user->simCtx->particlesMigratedLastStep = total_migrated_this_timestep;
1942 user->simCtx->migrationPassesLastStep = passes;
1943
1944 LOG_ALLOW(GLOBAL, LOG_INFO, "Particle Location completed in %d passes.\n", passes);
1945
1947 PetscFunctionReturn(0);
1948}
1949
1950
1951#undef __FUNCT__
1952#define __FUNCT__ "ResetAllParticleStatuses"
1953/**
1954 * This function is designed to be called at the end of a full timestep, after all
1955 * particle-based calculations are complete. It prepares the swarm for the next
1956 * timestep by ensuring that after the next position update, every particle will be
1957 * re-evaluated by the LocateAllParticlesInGrid orchestrator.
1958 *
1959 * It iterates through all locally owned particles and sets their
1960 * `DMSwarm_location_status` field to `NEEDS_LOCATION`.
1961 *
1962 * @param[in,out] user Pointer to the UserCtx containing the swarm.
1963 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1964 */
1966{
1967 PetscErrorCode ierr;
1968 PetscInt n_local;
1969 PetscInt *status_p;
1970
1971 PetscFunctionBeginUser;
1972
1974
1975 ierr = DMSwarmGetLocalSize(user->swarm, &n_local); CHKERRQ(ierr);
1976
1977 if (n_local > 0) {
1978 // Get write access to the status field
1979 ierr = DMSwarmGetField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1980
1981 for (PetscInt p = 0; p < n_local; ++p) {
1982 // Only reset particles that are considered settled. This is a small optimization
1983 // to avoid changing the status of a LOST particle, though resetting all would also be fine.
1984 if (status_p[p] == ACTIVE_AND_LOCATED) {
1985 status_p[p] = NEEDS_LOCATION;
1986 }
1987 }
1988
1989 // Restore the field
1990 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1991 }
1992
1993
1995 PetscFunctionReturn(0);
1996}
PetscErrorCode GetRandomCellAndLogicalCoordsOnInletFace(UserCtx *user, const DMDALocalInfo *info, PetscInt xs_gnode_rank, PetscInt ys_gnode_rank, PetscInt zs_gnode_rank, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, PetscRandom *rand_logic_i_ptr, PetscRandom *rand_logic_j_ptr, PetscRandom *rand_logic_k_ptr, PetscInt *ci_metric_lnode_out, PetscInt *cj_metric_lnode_out, PetscInt *ck_metric_lnode_out, PetscReal *xi_metric_logic_out, PetscReal *eta_metric_logic_out, PetscReal *zta_metric_logic_out)
Assuming the current rank services the inlet face, this function selects a random cell (owned by this...
Definition Boundaries.c:467
PetscErrorCode CanRankServiceInletFace(UserCtx *user, const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, PetscBool *can_service_inlet_out)
Determines if the current MPI rank owns any part of the globally defined inlet face,...
Definition Boundaries.c:25
PetscErrorCode GetDeterministicFaceGridLocation(UserCtx *user, const DMDALocalInfo *info, PetscInt xs_gnode_rank, PetscInt ys_gnode_rank, PetscInt zs_gnode_rank, PetscInt IM_cells_global, PetscInt JM_cells_global, PetscInt KM_cells_global, PetscInt64 particle_global_id, PetscInt *ci_metric_lnode_out, PetscInt *cj_metric_lnode_out, PetscInt *ck_metric_lnode_out, PetscReal *xi_metric_logic_out, PetscReal *eta_metric_logic_out, PetscReal *zta_metric_logic_out, PetscBool *placement_successful_out)
Places particles in a deterministic grid/raster pattern on a specified domain face.
Definition Boundaries.c:268
PetscErrorCode MetricLogicalToPhysical(UserCtx *user, const Cmpnts ***X, PetscInt i, PetscInt j, PetscInt k, PetscReal xi, PetscReal eta, PetscReal zta, Cmpnts *Xp)
Public wrapper: map (cell index, ξ,η,ζ) to (x,y,z).
Definition Metric.c:77
static int compare_PetscInt64(const void *a, const void *b)
PetscErrorCode GuessParticleOwnerWithBBox(UserCtx *user, const Particle *particle, const BoundingBox *bboxlist, PetscMPIInt *guess_rank_out)
Provides a fast, heuristic-based guess for a particle's owner rank using bounding boxes.
PetscErrorCode GenerateGaussianNoise(PetscRandom rnd, PetscReal *n1, PetscReal *n2)
Generates two independent standard normal random variables N(0,1) using the Box-Muller transform.
PetscErrorCode ResizeSwarmGlobally(DM swarm, PetscInt N_target)
PetscErrorCode AddToMigrationList(MigrationInfo **migration_list_p, PetscInt *capacity_p, PetscInt *count_p, PetscInt particle_local_idx, PetscMPIInt destination_rank)
Safely adds a new migration task to a dynamically sized list.
PetscErrorCode SetMigrationRanks(UserCtx *user, const MigrationInfo *migrationList, PetscInt migrationCount)
Sets the target rank field (DMSwarmPICField_rank) for particles scheduled for migration.
PetscErrorCode CheckAndRemoveOutOfBoundsParticles(UserCtx *user, PetscInt *removedCountLocal, PetscInt *removedCountGlobal, const BoundingBox *bboxlist)
Checks for particles outside the global physical domain and removes them.
PetscErrorCode GetLocalPIDSnapshot(const PetscInt64 pid_field[], PetscInt n_local, PetscInt64 **pids_snapshot_out)
Creates a sorted snapshot of all Particle IDs (PIDs) from a raw data array.
PetscErrorCode MigrateRestartParticlesUsingCellID(UserCtx *user)
Fast-path migration for restart particles using preloaded Cell IDs.
PetscErrorCode UpdateAllParticlePositions(UserCtx *user)
Loops over all local particles in the DMSwarm, updating their positions based on velocity and the glo...
PetscErrorCode CalculateBrownianDisplacement(UserCtx *user, PetscReal diff_eff, Cmpnts *displacement)
Calculates the stochastic displacement vector (Brownian motion) for a single particle.
PetscErrorCode LocateAllParticlesInGrid(UserCtx *user, BoundingBox *bboxlist)
Orchestrates the complete particle location and migration process for one timestep.
#define ERROR_MSG_BUFFER_SIZE
PetscErrorCode UpdateParticlePosition(UserCtx *user, Particle *particle)
Updates a particle's position based on its velocity and the timestep dt (stored in user->dt).
PetscErrorCode ResetAllParticleStatuses(UserCtx *user)
This function is designed to be called at the end of a full timestep, after all particle-based calcul...
PetscErrorCode ReinitializeParticlesOnInletSurface(UserCtx *user, PetscReal currentTime, PetscInt step)
Re-initializes the positions of particles currently on this rank if this rank owns part of the design...
PetscErrorCode CheckAndRemoveLostParticles(UserCtx *user, PetscInt *removedCountLocal, PetscInt *removedCountGlobal)
Removes particles that have been definitively flagged as LOST by the location algorithm.
PetscErrorCode FlagNewcomersForLocation(DM swarm, PetscInt n_local_before, const PetscInt64 pids_before[])
Identifies newly arrived particles after migration and flags them for a location search.
static PetscBool IsParticleInBox(const BoundingBox *bbox, const Cmpnts *pos)
Checks if a particle position is within the bounds of a given bounding box.
PetscErrorCode PreCheckAndResizeSwarm(UserCtx *user, PetscInt ti, const char *ext)
Checks particle count from a saved file and resizes the swarm globally.
PetscErrorCode PerformMigration(UserCtx *user)
Performs particle migration based on the pre-populated DMSwarmPICField_rank field.
Header file for Particle Motion and migration related functions.
PetscErrorCode UnpackSwarmFields(PetscInt i, const PetscInt64 *PIDs, const PetscReal *weights, const PetscReal *positions, const PetscInt *cellIndices, PetscReal *velocities, PetscInt *LocStatus, PetscReal *diffusivity, Cmpnts *diffusivitygradient, PetscReal *psi, Particle *particle)
Initializes a Particle struct with data from DMSwarm fields.
PetscErrorCode UpdateSwarmFields(PetscInt i, const Particle *particle, PetscReal *positions, PetscReal *velocities, PetscReal *weights, PetscInt *cellIndices, PetscInt *status, PetscReal *diffusivity, Cmpnts *diffusivitygradient, PetscReal *psi)
Updates DMSwarm data arrays from a Particle struct.
PetscErrorCode CalculateParticleCountPerCell(UserCtx *user)
Counts particles in each cell of the DMDA 'da' and stores the result in user->ParticleCount.
#define LOG_LOOP_ALLOW(scope, level, iterVar, interval, fmt,...)
Logs a message inside a loop, but only every interval iterations.
Definition logging.h:312
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
----— DEBUG ---------------------------------------— #define LOG_ALLOW(scope, level,...
Definition logging.h:267
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:45
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:46
const char * BCFaceToString(BCFace face)
Helper function to convert BCFace enum to a string representation.
Definition logging.c:643
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:200
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:746
const char * ParticleLocationStatusToString(ParticleLocationStatus level)
A function that outputs the name of the current level in the ParticleLocation enum.
Definition logging.c:938
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:28
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:31
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:29
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:32
@ LOG_VERBOSE
Extremely detailed logs, typically for development use only.
Definition logging.h:34
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:737
PetscErrorCode InitializeLogicalSpaceRNGs(PetscRandom *rand_logic_i, PetscRandom *rand_logic_j, PetscRandom *rand_logic_k)
Initializes random number generators for logical space operations [0.0, 1.0).
Definition setup.c:2693
PetscErrorCode BinarySearchInt64(PetscInt n, const PetscInt64 arr[], PetscInt64 key, PetscBool *found)
Performs a binary search for a key in a sorted array of PetscInt64.
Definition setup.c:2411
PetscMPIInt rank_zm
Definition variables.h:182
Cmpnts vel
Definition variables.h:169
PetscBool inletFaceDefined
Definition variables.h:747
PetscMPIInt rank_yp
Definition variables.h:181
BCFace identifiedInletBCFace
Definition variables.h:748
PetscInt cell[3]
Definition variables.h:167
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:731
@ PARTICLE_INIT_SURFACE_RANDOM
Random placement on the inlet face.
Definition variables.h:466
@ PARTICLE_INIT_SURFACE_EDGES
Deterministic placement at inlet face edges.
Definition variables.h:469
ParticleLocationStatus
Defines the state of a particle with respect to its location and migration status during the iterativ...
Definition variables.h:135
@ LOST
Definition variables.h:139
@ NEEDS_LOCATION
Definition variables.h:136
@ ACTIVE_AND_LOCATED
Definition variables.h:137
@ MIGRATING_OUT
Definition variables.h:138
PetscMPIInt rank_ym
Definition variables.h:181
PetscReal CMy_c
Definition variables.h:642
PetscMPIInt rank_xp
Definition variables.h:180
PetscInt local_index
Definition variables.h:193
Cmpnts max_coords
Maximum x, y, z coordinates of the bounding box.
Definition variables.h:156
Cmpnts diffusivitygradient
Definition variables.h:174
PetscReal dt
Definition variables.h:599
RankNeighbors neighbors
Definition variables.h:740
PetscInt np
Definition variables.h:676
Cmpnts min_coords
Minimum x, y, z coordinates of the bounding box.
Definition variables.h:155
PetscScalar x
Definition variables.h:101
Cmpnts loc
Definition variables.h:168
char * current_io_directory
Definition variables.h:611
PetscMPIInt destination_rank
Definition variables.h:172
Vec lParticleCount
Definition variables.h:800
ParticleLocationStatus location_status
Definition variables.h:171
char particle_subdir[PETSC_MAX_PATH_LEN]
Definition variables.h:608
PetscMPIInt rank_xm
Definition variables.h:180
char source_dir[PETSC_MAX_PATH_LEN]
Definition variables.h:501
PetscReal CMz_c
Definition variables.h:642
ParticleInitializationType ParticleInitialization
Definition variables.h:680
PetscScalar z
Definition variables.h:101
Vec ParticleCount
Definition variables.h:800
PetscInt particlesMigratedLastStep
Definition variables.h:684
PetscMPIInt rank_zp
Definition variables.h:182
PetscReal diffusivity
Definition variables.h:173
PetscRandom BrownianMotionRNG
Definition variables.h:687
PostProcessParams * pps
Definition variables.h:715
PetscInt migrationPassesLastStep
Definition variables.h:683
PetscScalar y
Definition variables.h:101
@ EXEC_MODE_SOLVER
Definition variables.h:558
@ EXEC_MODE_POSTPROCESSOR
Definition variables.h:559
char _io_context_buffer[PETSC_MAX_PATH_LEN]
Definition variables.h:610
ExecutionMode exec_mode
Definition variables.h:603
BoundingBox bbox
Definition variables.h:739
PetscInt64 PID
Definition variables.h:166
char restart_dir[PETSC_MAX_PATH_LEN]
Definition variables.h:605
PetscReal CMx_c
Definition variables.h:642
Defines a 3D axis-aligned bounding box.
Definition variables.h:154
A 3D point or vector with PetscScalar components.
Definition variables.h:100
Information needed to migrate a single particle between MPI ranks.
Definition variables.h:192
Defines a particle's core properties for Lagrangian tracking.
Definition variables.h:165
Stores the MPI ranks of neighboring subdomains.
Definition variables.h:179
The master context for the entire simulation.
Definition variables.h:585
User-defined context containing data specific to a single computational grid level.
Definition variables.h:728
Head of a generic C-style linked list.
Definition variables.h:372
PetscErrorCode LocateParticleOrFindMigrationTarget(UserCtx *user, Particle *particle, ParticleLocationStatus *status_out)
Locates a particle's host cell or identifies its migration target using a robust walk search.
PetscErrorCode FindOwnerOfCell(UserCtx *user, PetscInt i, PetscInt j, PetscInt k, PetscMPIInt *owner_rank)
Finds the MPI rank that owns a given global cell index.