PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
ParticleMotion.c
Go to the documentation of this file.
1// ParticleMotion.c
2
3#include "ParticleMotion.h"
4
5// Define a buffer size for error messages if not already available
6#ifndef ERROR_MSG_BUFFER_SIZE
7#define ERROR_MSG_BUFFER_SIZE 256 // Or use PETSC_MAX_PATH_LEN if appropriate
8#endif
9
10#undef __FUNCT__
11#define __FUNCT__ "GenerateGaussianNoise"
12/**
13 * @brief Internal helper implementation: `GenerateGaussianNoise()`.
14 * @details Local to this translation unit.
15 */
16PetscErrorCode GenerateGaussianNoise(PetscRandom rnd, PetscReal *n1, PetscReal *n2)
17{
18 PetscErrorCode ierr;
19 PetscScalar val1, val2;
20 PetscReal u1, u2;
21 PetscReal magnitude, theta;
22
23 PetscFunctionBeginUser;
24
25 // 1. Get two independent uniform random numbers from the generator
26 // PetscRandomGetValue returns a PetscScalar (which might be complex).
27 // We take the Real part to ensure this works in both Real and Complex builds.
28 ierr = PetscRandomGetValue(rnd, &val1); CHKERRQ(ierr);
29 ierr = PetscRandomGetValue(rnd, &val2); CHKERRQ(ierr);
30
31 u1 = PetscRealPart(val1);
32 u2 = PetscRealPart(val2);
33
34 // 2. Safety Check: log(0) is undefined (infinity).
35 // If the RNG returns exactly 0.0, bump it to a tiny epsilon.
36 if (u1 <= 0.0) u1 = 1.0e-14;
37
38 // 3. Box-Muller Transform
39 // Formula: R = sqrt(-2 * ln(u1)), Theta = 2 * PI * u2
40 magnitude = PetscSqrtReal(-2.0 * PetscLogReal(u1));
41 theta = 2.0 * PETSC_PI * u2;
42
43 // 4. Calculate independent Normal variables
44 *n1 = magnitude * PetscCosReal(theta);
45 *n2 = magnitude * PetscSinReal(theta);
46
47 PetscFunctionReturn(0);
48}
49
50#undef __FUNCT__
51#define __FUNCT__ "CalculateBrownianDisplacement"
52/**
53 * @brief Internal helper implementation: `CalculateBrownianDisplacement()`.
54 * @details Local to this translation unit.
55 */
56PetscErrorCode CalculateBrownianDisplacement(UserCtx *user, PetscReal diff_eff, Cmpnts *displacement)
57{
58 PetscErrorCode ierr;
59 PetscReal dt = user->simCtx->dt;
60 PetscReal sigma;
61 PetscReal n_x, n_y, n_z, gaussian_dummy;
62
63 PetscFunctionBeginUser;
64
65 // 1. Initialize output to zero for safety
66 displacement->x = 0.0;
67 displacement->y = 0.0;
68 displacement->z = 0.0;
69
70 // 2. Physical check: Diffusivity cannot be negative.
71 // If 0, there is no Brownian motion.
72 if (diff_eff <= 1.0e-12) {
73 PetscFunctionReturn(0);
74 }
75
76 // 3. Calculate the Scaling Factor (Standard Deviation)
77 // Formula: sigma = sqrt(2 * D * dt)
78 // Note: dt is inside the root because variance scales linearly with time.
79 sigma = PetscSqrtReal(2.0 * diff_eff * dt);
80
81 // 4. Generate 3 Independent Gaussian Random Numbers
82 // GenerateGaussianNoise produces 2 numbers at a time. We call it twice.
83
84 // Get noise for X and Y
85 ierr = GenerateGaussianNoise(user->simCtx->BrownianMotionRNG, &n_x, &n_y); CHKERRQ(ierr);
86
87 // Get noise for Z (second sample is intentionally discarded here).
88 ierr = GenerateGaussianNoise(user->simCtx->BrownianMotionRNG, &n_z, &gaussian_dummy); CHKERRQ(ierr);
89
90 // 5. Calculate final stochastic displacement
91 displacement->x = sigma * n_x;
92 displacement->y = sigma * n_y;
93 displacement->z = sigma * n_z;
94
95 PetscFunctionReturn(0);
96}
97
98#undef __FUNCT__
99#define __FUNCT__ "UpdateParticlePosition"
100/**
101 * @brief Internal helper implementation: `UpdateParticlePosition()`.
102 * @details Local to this translation unit.
103 */
104PetscErrorCode UpdateParticlePosition(UserCtx *user, Particle *particle)
105{
106 PetscFunctionBeginUser; // PETSc macro for error/stack tracing
108
109 PetscErrorCode ierr;
110 PetscReal dt = user->simCtx->dt;
111 Cmpnts brownian_disp;
112
113 // 2. Calculate the stochastic kick
114 ierr = CalculateBrownianDisplacement(user,particle->diffusivity, &brownian_disp); CHKERRQ(ierr);
115
116 // --- Update Position ---
117 // X_new = X_old + ((U_convection + U_diffusivitygradient) * dt) + dX_brownian
118
119 particle->loc.x += ((particle->vel.x + particle->diffusivitygradient.x) * dt) + brownian_disp.x;
120 particle->loc.y += ((particle->vel.y + particle->diffusivitygradient.y) * dt) + brownian_disp.y;
121 particle->loc.z += ((particle->vel.z + particle->diffusivitygradient.z) * dt) + brownian_disp.z;
122
124 PetscFunctionReturn(0);
125}
126
127#undef __FUNCT__
128#define __FUNCT__ "UpdateAllParticlePositions"
129/**
130 * @brief Internal helper implementation: `UpdateAllParticlePositions()`.
131 * @details Local to this translation unit.
132 */
134{
135 PetscErrorCode ierr;
136 DM swarm = user->swarm;
137 PetscInt nLocal, p;
138 PetscReal *pos = NULL;
139 PetscReal *vel = NULL;
140 PetscReal *diffusivity = NULL;
141 Cmpnts *diffusivitygradient = NULL;
142 PetscReal *psi = NULL;
143 PetscReal *weights = NULL;
144 PetscInt *cell = NULL;
145 PetscInt *status = NULL;
146 PetscInt64 *pid = NULL;
147 PetscMPIInt rank;
148
149 ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
150
151 PetscFunctionBeginUser; // PETSc macro for error/stack tracing
152
154
155 // 1) Get the number of local particles
156 ierr = DMSwarmGetLocalSize(swarm, &nLocal); CHKERRQ(ierr);
157 if (nLocal == 0) {
158 LOG_ALLOW(LOCAL,LOG_DEBUG,"[Rank %d] No particles to move/transport. \n",rank);
159 PetscFunctionReturn(0); // nothing to do, no fields held
160 }
161 // 2) Access the "position" and "velocity" fields
162 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void**)&pos); CHKERRQ(ierr);
163 ierr = DMSwarmGetField(swarm, "velocity", NULL, NULL, (void**)&vel); CHKERRQ(ierr);
164 ierr = DMSwarmGetField(swarm, "Diffusivity", NULL, NULL, (void**)&diffusivity); CHKERRQ(ierr);
165 ierr = DMSwarmGetField(swarm, "DiffusivityGradient", NULL, NULL, (void**)&diffusivitygradient); CHKERRQ(ierr);
166 ierr = DMSwarmGetField(swarm, "Psi", NULL, NULL, (void**)&psi); CHKERRQ(ierr);
167 ierr = DMSwarmGetField(swarm, "weight", NULL, NULL, (void**)&weights); CHKERRQ(ierr);
168 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell); CHKERRQ(ierr);
169 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status); CHKERRQ(ierr);
170 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid); CHKERRQ(ierr);
171
172 LOG_ALLOW(GLOBAL,LOG_DEBUG," [Rank %d] No.of Particles to update: %" PetscInt_FMT ".\n",rank,nLocal);
173
174 // 3) Loop over all local particles, updating each position by velocity * dt
175 for (p = 0; p < nLocal; p++) {
176 // update temporary particle struct
177 Particle particle;
178
179 // Unpack: Use the helper to read from swarm arrays into the particle struct
180 ierr = UnpackSwarmFields(p, pid, weights, pos, cell, vel, status, diffusivity, diffusivitygradient, psi, &particle); CHKERRQ(ierr);
181
182 // Update position based on velocity and Brownian motion
183 ierr = UpdateParticlePosition(user, &particle); CHKERRQ(ierr);
184
185 // Update swarm fields
186 ierr = UpdateSwarmFields(p, &particle, pos, vel, weights, cell, status, diffusivity, diffusivitygradient, psi); CHKERRQ(ierr);
187 }
188
189 // 4) Restore the fields
190 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&pos); CHKERRQ(ierr);
191 ierr = DMSwarmRestoreField(swarm, "velocity", NULL, NULL, (void**)&vel); CHKERRQ(ierr);
192 ierr = DMSwarmRestoreField(swarm, "Diffusivity", NULL, NULL, (void**)&diffusivity); CHKERRQ(ierr);
193 ierr = DMSwarmRestoreField(swarm, "DiffusivityGradient", NULL, NULL, (void**)&diffusivitygradient); CHKERRQ(ierr);
194 ierr = DMSwarmRestoreField(swarm, "Psi", NULL, NULL, (void**)&psi); CHKERRQ(ierr);
195 ierr = DMSwarmRestoreField(swarm, "weight", NULL, NULL, (void**)&weights); CHKERRQ(ierr);
196 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell); CHKERRQ(ierr);
197 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status); CHKERRQ(ierr);
198 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid); CHKERRQ(ierr);
199
200
201 LOG_ALLOW(LOCAL,LOG_DEBUG,"Particle moved/transported successfully on Rank %d.\n",rank);
202
204
205 PetscFunctionReturn(0);
206}
207
208
209/**
210 * @brief Internal helper implementation: `IsParticleInBox()`.
211 * @details Local to this translation unit.
212 */
213static inline PetscBool IsParticleInBox(const BoundingBox *bbox, const Cmpnts *pos) {
214 return (pos->x >= bbox->min_coords.x && pos->x <= bbox->max_coords.x &&
215 pos->y >= bbox->min_coords.y && pos->y <= bbox->max_coords.y &&
216 pos->z >= bbox->min_coords.z && pos->z <= bbox->max_coords.z);
217}
218
219
220#undef __FUNCT__
221#define __FUNCT__ "CheckAndRemoveOutOfBoundsParticles"
222
223/**
224 * @brief Internal helper implementation: `CheckAndRemoveOutOfBoundsParticles()`.
225 * @details Local to this translation unit.
226 */
228 PetscInt *removedCountLocal,
229 PetscInt *removedCountGlobal,
230 const BoundingBox *bboxlist)
231{
232 PetscErrorCode ierr;
233 DM swarm = user->swarm;
234 PetscInt nLocalInitial;
235 PetscReal *pos_p = NULL;
236 PetscInt64 *pid_p = NULL; // For better logging
237 PetscInt local_removed_count = 0;
238 PetscMPIInt global_removed_count_mpi = 0;
239 PetscMPIInt rank, size;
240
241 PetscFunctionBeginUser;
242 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
243 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr);
244 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Checking for out-of-bounds particles...", rank);
245
246 // Initialize output parameters to ensure clean state
247 *removedCountLocal = 0;
248 if (removedCountGlobal) *removedCountGlobal = 0;
249
250 ierr = DMSwarmGetLocalSize(swarm, &nLocalInitial); CHKERRQ(ierr);
251
252 // Only proceed if there are particles to check on this rank.
253 // All ranks will still participate in the final collective MPI_Allreduce.
254 if (nLocalInitial > 0) {
255 // Get access to swarm fields once before the loop begins.
256 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
257 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
258
259 // --- Iterate BACKWARDS to handle index changes safely during removal ---
260 for (PetscInt p = nLocalInitial - 1; p >= 0; p--) {
261 PetscBool isInsideAnyBox = PETSC_FALSE;
262 Cmpnts current_pos = {pos_p[3*p + 0], pos_p[3*p + 1], pos_p[3*p + 2]};
263
264 // Check if the particle is inside ANY of the rank bounding boxes
265 for (PetscMPIInt proc = 0; proc < size; proc++) {
266 if (IsParticleInBox(&bboxlist[proc], &current_pos)) {
267 isInsideAnyBox = PETSC_TRUE;
268 break; // Particle is inside a valid domain, stop checking.
269 }
270 }
271
272 if (!isInsideAnyBox) {
273 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removing out-of-bounds particle [PID %lld] at local index %d. Pos: (%g, %g, %g)\n",
274 rank, (long long)pid_p[p], p, current_pos.x, current_pos.y, current_pos.z);
275
276 // --- Safe Removal Pattern: Restore -> Remove -> Reacquire ---
277 // This is the fix for the double-restore bug. Pointers are managed carefully
278 // within this block and then restored cleanly after the loop.
279
280 // 1. Restore all fields BEFORE modifying the swarm structure. This invalidates pos_p and pid_p.
281 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
282 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
283
284 // 2. Remove the particle at the current local index 'p'.
285 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
286 local_removed_count++;
287
288 // 3. After removal, re-acquire pointers ONLY if the loop is not finished.
289 PetscInt nLocalCurrent;
290 ierr = DMSwarmGetLocalSize(swarm, &nLocalCurrent); CHKERRQ(ierr);
291
292 if (nLocalCurrent > 0 && p > 0) { // Check if there are particles left AND iterations left
293 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
294 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
295 } else {
296 // All remaining particles were removed OR this was the last particle (p=0).
297 // Invalidate pointers to prevent the final restore call and exit the loop.
298 pos_p = NULL;
299 pid_p = NULL;
300 break;
301 }
302 }
303 } // End of backwards loop
304
305 // At the end, restore any valid pointers. This handles three cases:
306 // 1. No particles were removed: restores the original pointers.
307 // 2. Particles were removed mid-loop: restores the pointers from the last re-acquisition.
308 // 3. All particles were removed: pointers are NULL, so nothing is done.
309 if (pos_p) { ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr); }
310 if (pid_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr); }
311 } // End of if (nLocalInitial > 0)
312
313 PetscInt nLocalFinal;
314 ierr = DMSwarmGetLocalSize(swarm, &nLocalFinal); CHKERRQ(ierr);
315 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Finished removing %d out-of-bounds particles. Final local size: %d.\n", rank, local_removed_count, nLocalFinal);
316
317 // --- Synchronize counts across all ranks ---
318 *removedCountLocal = local_removed_count;
319 if (removedCountGlobal) {
320 ierr = MPI_Allreduce(&local_removed_count, &global_removed_count_mpi, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)swarm)); CHKERRQ(ierr);
321 *removedCountGlobal = global_removed_count_mpi;
322 // Use a synchronized log message so only one rank prints the global total.
323 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Removed %d out-of-bounds particles globally.\n", rank, *removedCountGlobal);
324 }
325
326 PetscFunctionReturn(0);
327}
328
329#undef __FUNCT__
330#define __FUNCT__ "CheckAndRemoveLostParticles"
331/**
332 * @brief Internal helper implementation: `CheckAndRemoveLostParticles()`.
333 * @details Local to this translation unit.
334 */
336 PetscInt *removedCountLocal,
337 PetscInt *removedCountGlobal)
338{
339 PetscErrorCode ierr;
340 DM swarm = user->swarm;
341 PetscInt nLocalInitial;
342 PetscInt *status_p = NULL;
343 PetscInt64 *pid_p = NULL; // For better logging
344 PetscReal *pos_p = NULL; // For better logging
345 PetscInt local_removed_count = 0;
346 PetscMPIInt global_removed_count_mpi = 0;
347 PetscMPIInt rank;
348
349 PetscFunctionBeginUser;
351 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
352 LOG_ALLOW(LOCAL, LOG_INFO, "Rank %d: Checking for and removing LOST particles...\n", rank);
353
354 // Initialize output parameters to ensure clean state
355 *removedCountLocal = 0;
356 if (removedCountGlobal) *removedCountGlobal = 0;
357
358 ierr = DMSwarmGetLocalSize(swarm, &nLocalInitial); CHKERRQ(ierr);
359
360 // Only proceed if there are particles to check on this rank.
361 // All ranks will still participate in the final collective MPI_Allreduce.
362 if (nLocalInitial > 0) {
363 // Get access to all swarm fields once before the loop begins.
364 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
365 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
366 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
367
368 // --- Iterate BACKWARDS to handle index changes safely during removal ---
369 for (PetscInt p = nLocalInitial - 1; p >= 0; p--) {
370 if (status_p[p] == LOST) {
371 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removing LOST particle [PID %lld] at local index %d. Position: (%.4f, %.4f, %.4f).\n",
372 rank, (long long)pid_p[p], p, pos_p[3*p], pos_p[3*p+1], pos_p[3*p+2]);
373
374 // --- Safe Removal Pattern: Restore -> Remove -> Reacquire ---
375 // This is the fix for the double-restore bug. Pointers are managed carefully
376 // within this block and then restored cleanly after the loop.
377
378 // 1. Restore all fields BEFORE modifying the swarm structure. This invalidates all pointers.
379 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
380 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
381 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
382
383 // 2. Remove the particle at the current local index 'p'.
384 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
385 local_removed_count++;
386
387 // 3. After removal, re-acquire pointers ONLY if the loop is not finished.
388 PetscInt nLocalCurrent;
389 ierr = DMSwarmGetLocalSize(swarm, &nLocalCurrent); CHKERRQ(ierr);
390
391 if (nLocalCurrent > 0 && p > 0) { // Check if there are particles left AND iterations left
392 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr);
393 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr);
394 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr);
395 } else {
396 // All remaining particles were removed OR this was the last particle (p=0).
397 // Invalidate pointers to prevent the final restore call and exit the loop.
398 status_p = NULL;
399 pid_p = NULL;
400 pos_p = NULL;
401 break;
402 }
403 }
404 } // End of backwards loop
405
406 // At the end, restore any valid pointers. This handles three cases:
407 // 1. No particles were removed: restores the original pointers.
408 // 2. Particles were removed mid-loop: restores the pointers from the last re-acquisition.
409 // 3. All particles were removed: pointers are NULL, so nothing is done.
410 if (status_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void **)&status_p); CHKERRQ(ierr); }
411 if (pid_p) { ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void **)&pid_p); CHKERRQ(ierr); }
412 if (pos_p) { ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void **)&pos_p); CHKERRQ(ierr); }
413 } // End of if (nLocalInitial > 0)
414
415 PetscInt nLocalFinal;
416 ierr = DMSwarmGetLocalSize(swarm, &nLocalFinal); CHKERRQ(ierr);
417 LOG_ALLOW(LOCAL, LOG_INFO, "Rank %d: Finished removing %d LOST particles. Final local size: %d.\n", rank, local_removed_count, nLocalFinal);
418
419 // --- Synchronize counts across all ranks ---
420 *removedCountLocal = local_removed_count;
421 if (removedCountGlobal) {
422 ierr = MPI_Allreduce(&local_removed_count, &global_removed_count_mpi, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)swarm)); CHKERRQ(ierr);
423 *removedCountGlobal = global_removed_count_mpi;
424 // Use a synchronized log message so only one rank prints the global total.
425 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Removed %d LOST particles globally.\n", rank, *removedCountGlobal);
426 }
427
429 PetscFunctionReturn(0);
430}
431
432
433#undef __FUNCT__
434#define __FUNCT__ "SetMigrationRanks"
435/**
436 * @brief Internal helper implementation: `SetMigrationRanks()`.
437 * @details Local to this translation unit.
438 */
439PetscErrorCode SetMigrationRanks(UserCtx* user, const MigrationInfo *migrationList, PetscInt migrationCount)
440{
441 PetscErrorCode ierr;
442 DM swarm = user->swarm;
443 PetscInt p_idx;
444 PetscInt *rankField = NULL; // Field storing target rank
445
446 PetscFunctionBeginUser;
448
449 // Ensure the migration rank field exists
450 ierr = DMSwarmGetField(swarm, "DMSwarm_rank", NULL, NULL, (void **)&rankField); CHKERRQ(ierr);
451
452 // Set the target rank for migrating particles
453 for(p_idx = 0; p_idx < migrationCount; ++p_idx) {
454 rankField[migrationList[p_idx].local_index] = migrationList[p_idx].target_rank;
455 }
456
457 ierr = DMSwarmRestoreField(swarm, "DMSwarm_rank", NULL, NULL, (void **)&rankField); CHKERRQ(ierr);
458
460 PetscFunctionReturn(0);
461}
462
463#undef __FUNCT__
464#define __FUNCT__ "PerformMigration"
465
466/**
467 * @brief Implementation of \ref PerformMigration().
468 * @details Full API contract (arguments, ownership, side effects) is documented with
469 * the header declaration in `include/ParticleMotion.h`.
470 * @see PerformMigration()
471 */
472PetscErrorCode PerformMigration(UserCtx *user)
473{
474 PetscErrorCode ierr;
475 DM swarm = user->swarm;
476 PetscMPIInt rank;
477
478 PetscFunctionBeginUser;
480 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
481 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Starting DMSwarmMigrate...\n", rank);
482
483 // Perform the migration - PETSC_TRUE removes particles that fail to land
484 // in a valid cell on the target rank (or were marked with an invalid rank).
485 ierr = DMSwarmMigrate(swarm, PETSC_TRUE); CHKERRQ(ierr);
486
487 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Migration complete.\n", rank);
489 PetscFunctionReturn(0);
490}
491
492//-----------------------------------------------------------------------------
493// MODULE (COUNT): Calculates Particle Count Per Cell - REVISED FOR DMDAVecGetArray
494//-----------------------------------------------------------------------------
495
496#undef __FUNCT__
497#define __FUNCT__ "CalculateParticleCountPerCell"
498/**
499 * @brief Implementation of \ref CalculateParticleCountPerCell().
500 * @details Full API contract (arguments, ownership, side effects) is documented with
501 * the header declaration in `include/logging.h`.
502 * @see CalculateParticleCountPerCell()
503 */
505 PetscErrorCode ierr;
506 DM da = user->da;
507 DM swarm = user->swarm;
508 Vec countVec = user->ParticleCount;
509 Vec localcountVec = user->lParticleCount;
510 PetscInt nlocal, p;
511 PetscInt *global_cell_id_arr; // Read GLOBAL cell IDs
512 PetscScalar ***count_arr_3d; // Use 3D accessor
513 PetscInt64 *PID_arr;
514 PetscMPIInt rank;
515 char msg[ERROR_MSG_BUFFER_SIZE];
516 PetscInt particles_counted_locally = 0;
517
518 PetscFunctionBeginUser;
520 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
521
522 // --- Input Validation ---
523 if (!da) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->da is NULL.");
524 if (!swarm) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->swarm is NULL.");
525 if (!countVec) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx->ParticleCount is NULL.");
526 // Check DOF of da
527 PetscInt count_dof;
528 ierr = DMDAGetInfo(da, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &count_dof, NULL, NULL, NULL, NULL, NULL); CHKERRQ(ierr);
529 if (count_dof != 1) {
530 PetscSNPrintf(msg, sizeof(msg), "countDM must have DOF=1, got %" PetscInt_FMT ".", count_dof);
531 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "%s", msg);
532 }
533
534 // --- Zero the local count vector ---
535 ierr = VecSet(localcountVec, 0.0); CHKERRQ(ierr);
536
537 // --- Get Particle Data ---
538 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Accessing particle data.\n");
539 ierr = DMSwarmGetLocalSize(swarm, &nlocal); CHKERRQ(ierr);
540 ierr = DMSwarmGetField(swarm,"DMSwarm_CellID", NULL, NULL, (void **)&global_cell_id_arr); CHKERRQ(ierr);
541 ierr = DMSwarmGetField(swarm,"DMSwarm_pid",NULL,NULL,(void **)&PID_arr);CHKERRQ(ierr);
542
543 // --- Get Grid Vector Array using DMDA accessor ---
544 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Accessing ParticleCount vector array (using DMDAVecGetArray).\n");
545 ierr = DMDAVecGetArray(da, localcountVec, &count_arr_3d); CHKERRQ(ierr);
546
547 // Get local owned + ghosted range for writing into ghost slots.
548 PetscInt gxs, gys, gzs, gxm, gym, gzm;
549 ierr = DMDAGetGhostCorners(da, &gxs, &gys, &gzs, &gxm, &gym, &gzm); CHKERRQ(ierr);
550
551 // --- Accumulate Counts Locally ---
552 LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Processing %" PetscInt_FMT " local particles using GLOBAL CellIDs.\n",rank,nlocal);
553 for (p = 0; p < nlocal; p++) {
554 // Read the GLOBAL indices stored for this particle
555 PetscInt i_geom = global_cell_id_arr[p * 3 + 0]; // Global i index
556 PetscInt j_geom = global_cell_id_arr[p * 3 + 1]; // Global j index
557 PetscInt k_geom = global_cell_id_arr[p * 3 + 2]; // Global k index
558
559 // Apply the shift to ensure ParticleCount follows the indexing convention for cell-centered data in this codebase.
560 PetscInt i = (PetscInt)i_geom + 1; // Shift for cell-centered
561 PetscInt j = (PetscInt)j_geom + 1; // Shift for cell-centered
562 PetscInt k = (PetscInt)k_geom + 1; // Shift for cell-centered
563
564 // *** Bounds check is implicitly handled by DMDAVecGetArray for owned+ghost region ***
565 // However, accessing outside this region using global indices WILL cause an error.
566 // A preliminary check might still be wise if global IDs could be wild.
567 // We rely on LocateAllParticles to provide valid global indices [0..IM-1] etc.
568
570 "[Rank %d] Read CellID for p=%" PetscInt_FMT ", PID = %" PetscInt64_FMT ": (%" PetscInt_FMT ", %" PetscInt_FMT ", %" PetscInt_FMT ")\n",
571 rank, p, PID_arr[p], i, j, k);
572
573 // Check if the global index (i,j,k) falls within the local + ghost range
574 if (i >= gxs && i < gxs + gxm &&
575 j >= gys && j < gys + gym && // Adjust based on actual ghost width
576 k >= gzs && k < gzs + gzm ) // This check prevents definite crashes but doesn't guarantee ownership
577 {
578
579 // Increment count at the location corresponding to GLOBAL index (I,J,K)
580 // LOG_ALLOW(LOCAL, LOG_DEBUG, "CalculateParticleCountPerCell (Rank %d): Particle %d with global CellID (%d, %d, %d) incremented with a particle.\n",rank, p, i, j, k);
581 count_arr_3d[k][j][i] += 1.0;
582 particles_counted_locally++;
583 } else {
584 // This particle's global ID is likely outside the range this rank handles (even ghosts)
585 // note: this is not necessarily an error if the particle is legitimately outside the local+ghost region
587 "(Rank %d): Skipping particle %" PetscInt64_FMT " with global CellID (%" PetscInt_FMT ", %" PetscInt_FMT ", %" PetscInt_FMT ") - likely outside local+ghost range.\n",
588 rank, PID_arr[p], i, j, k);
589 }
590 }
591 LOG_ALLOW(LOCAL, LOG_DEBUG, "(Rank %d): Local counting finished. Processed %" PetscInt_FMT " particles locally.\n", rank, particles_counted_locally);
592
593 // --- Restore Access ---
594 ierr = DMDAVecRestoreArray(da, localcountVec, &count_arr_3d); CHKERRQ(ierr);
595 ierr = DMSwarmRestoreField(swarm,"DMSwarm_CellID", NULL, NULL, (void **)&global_cell_id_arr); CHKERRQ(ierr);
596 ierr = DMSwarmRestoreField(swarm,"DMSwarm_pid",NULL,NULL,(void **)&PID_arr);CHKERRQ(ierr);
597
598 // --- Assemble Global Vector ---
599 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Assembling global ParticleCount vector.\n");
600 ierr = VecZeroEntries(countVec); CHKERRQ(ierr); // Ensure global vector is zeroed before accumulation
601 ierr = DMLocalToGlobalBegin(da, localcountVec, ADD_VALUES, countVec); CHKERRQ(ierr);
602 ierr = DMLocalToGlobalEnd(da, localcountVec, ADD_VALUES, countVec); CHKERRQ(ierr);
603 /*
604 * OPTIONAL: Synchronize Ghosts for Stencil Operations
605 * If a future function needs to read ParticleCount from neighbor cells (e.g., density smoothing
606 * or gradient calculations), uncomment the following lines to update the ghost slots
607 * in user->lParticleCount with the final summed values.
608 *
609 ierr = UpdateLocalGhosts(user,"ParticleCount"); CHKERRQ(ierr);
610 */
611
612 // --- Verification Logging ---
613 PetscReal total_counted_particles = 0.0, max_count_in_cell = 0.0;
614 ierr = VecSum(countVec, &total_counted_particles); CHKERRQ(ierr);
615 PetscInt max_idx_global = -1;
616 ierr = VecMax(countVec, &max_idx_global, &max_count_in_cell); CHKERRQ(ierr);
617 LOG_ALLOW(GLOBAL, LOG_INFO, "Total counted globally = %.0f, Max count in cell = %.0f\n",
618 total_counted_particles, max_count_in_cell);
619
620 // --- ADD THIS DEBUGGING BLOCK ---
621 if (max_idx_global >= 0) { // Check if VecMax found a location
622 // Need to convert the flat global index back to 3D global index (I, J, K)
623 // Get global grid dimensions (Nodes, NOT Cells IM/JM/KM)
624 PetscInt M, N, P;
625 ierr = DMDAGetInfo(da, NULL, &M, &N, &P, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); CHKERRQ(ierr);
626 // Note: Assuming DOF=1 for countVec, index mapping uses node dimensions M,N,P from DMDA creation (IM+1, etc)
627 // Re-check if your DMDA uses cell counts (IM) or node counts (IM+1) for Vec layout. Let's assume Node counts M,N,P.
628 PetscInt Kmax = max_idx_global / (M * N);
629 PetscInt Jmax = (max_idx_global % (M * N)) / M;
630 PetscInt Imax = max_idx_global % M;
631 LOG_ALLOW(GLOBAL, LOG_INFO, " -> Max count located at global index (I,J,K) = (%d, %d, %d) [Flat index: %d]\n",
632 (int)Imax, (int)Jmax, (int)Kmax, (int)max_idx_global);
633
634 // Also, let's explicitly check the count at (0,0,0)
635 PetscScalar count_at_origin = 0.0;
636 PetscScalar ***count_arr_for_check;
637 ierr = DMDAVecGetArrayRead(da, countVec, &count_arr_for_check); CHKERRQ(ierr);
638 // Check bounds before accessing - crucial if using global indices
639 PetscInt xs, ys, zs, xm, ym, zm;
640 ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm); CHKERRQ(ierr);
641 if (0 >= xs && 0 < xs+xm && 0 >= ys && 0 < ys+ym && 0 >= zs && 0 < zs+zm) {
642 count_at_origin = count_arr_for_check[0][0][0]; // Access using global index (0,0,0)
643 } else {
644 // Origin is not on this rank (relevant for parallel, but check anyway)
645 count_at_origin = -999.0; // Indicate it wasn't accessible locally
646 }
647 ierr = DMDAVecRestoreArrayRead(da, countVec, &count_arr_for_check); CHKERRQ(ierr);
648 LOG_ALLOW(GLOBAL, LOG_INFO, " -> Count at global index (0,0,0) = %.1f\n", count_at_origin);
649
650 } else {
651 LOG_ALLOW(GLOBAL, LOG_WARNING, " -> VecMax did not return a location for the maximum value.\n");
652 }
653 // --- END DEBUGGING BLOCK ---
654
655 LOG_ALLOW(GLOBAL, LOG_INFO, "Particle counting complete.\n");
656
657
659 PetscFunctionReturn(0);
660}
661
662
663
664#undef __FUNCT__
665#define __FUNCT__ "ResizeSwarmGlobally"
666/**
667 * @brief Implementation of \ref ResizeSwarmGlobally().
668 * @details Full API contract (arguments, ownership, side effects) is documented with
669 * the header declaration in `include/ParticleMotion.h`.
670 * @see ResizeSwarmGlobally()
671 */
672
673PetscErrorCode ResizeSwarmGlobally(DM swarm, PetscInt N_target)
674{
675 PetscErrorCode ierr;
676 PetscInt N_current, nlocal_current;
677 PetscMPIInt rank;
678 MPI_Comm comm;
679
680 PetscFunctionBeginUser;
682 ierr = PetscObjectGetComm((PetscObject)swarm, &comm); CHKERRQ(ierr);
683 ierr = MPI_Comm_rank(comm, &rank); CHKERRQ(ierr);
684 ierr = DMSwarmGetSize(swarm, &N_current); CHKERRQ(ierr);
685 ierr = DMSwarmGetLocalSize(swarm, &nlocal_current); CHKERRQ(ierr);
686
687 PetscInt delta = N_target - N_current;
688
689 if (delta == 0) {
691 PetscFunctionReturn(0); // Nothing to do
692 }
693
694 if (delta < 0) { // Remove particles
695 PetscInt num_to_remove_global = -delta;
696 LOG_ALLOW(GLOBAL, LOG_INFO, "Current size %d > target size %d. Removing %d particles globally.\n", N_current, N_target, num_to_remove_global);
697
698 // --- Strategy: Remove the globally last 'num_to_remove_global' particles ---
699 // Each rank needs to determine how many of its *local* particles fall
700 // within the range of global indices [N_target, N_current - 1].
701
702 PetscInt rstart = 0;
703 PetscInt rend;
704 // Global range owned by this rank [rstart, rend)
705
706 ierr = MPI_Exscan(&nlocal_current, &rstart, 1, MPIU_INT, MPI_SUM, comm); CHKERRMPI(ierr); // Use CHKERRMPI for MPI calls
707
708 rend = rstart + nlocal_current;
709
710
711 // Calculate how many local particles have global indices >= N_target
712 PetscInt nlocal_remove_count = 0;
713 if (rend > N_target) { // If this rank owns any particles slated for removal
714 PetscInt start_remove_local_idx = (N_target > rstart) ? (N_target - rstart) : 0;
715 nlocal_remove_count = nlocal_current - start_remove_local_idx;
716 }
717
718 if (nlocal_remove_count < 0) nlocal_remove_count = 0; // Sanity check
719 if (nlocal_remove_count > nlocal_current) nlocal_remove_count = nlocal_current; // Sanity check
720
721 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Global range [%d, %d). Target size %d. Need to remove %d local particles (from end).\n", rank, rstart, rend, N_target, nlocal_remove_count);
722
723 // Remove the last 'nlocal_remove_count' particles *locally* by iterating backwards
724 PetscInt removal_ops_done = 0;
725 for (PetscInt p = nlocal_current - 1; p >= 0 && removal_ops_done < nlocal_remove_count; --p) {
726 ierr = DMSwarmRemovePointAtIndex(swarm, p); CHKERRQ(ierr);
727 removal_ops_done++;
728 }
729
730 if (removal_ops_done != nlocal_remove_count) {
731 SETERRQ(comm, PETSC_ERR_PLIB, "Rank %d: Failed to remove the expected number of local particles (%d != %d)", rank, removal_ops_done, nlocal_remove_count);
732 }
733 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Removed %d local particles.\n", rank, removal_ops_done);
734
735 // Barrier to ensure all removals are done before size check
736 ierr = MPI_Barrier(comm); CHKERRMPI(ierr);
737
738 } else { // delta > 0: Add particles
739 PetscInt num_to_add_global = delta;
740 LOG_ALLOW(GLOBAL, LOG_INFO, "Current size %d < target size %d. Adding %d particles globally.\n", N_current, N_target, num_to_add_global);
741 ierr = DMSwarmAddNPoints(swarm, num_to_add_global); CHKERRQ(ierr);
742 // Note: Added particles will have uninitialized field data. Reading will overwrite.
743 }
744
745 // Verify final size
746 PetscInt N_final;
747 ierr = DMSwarmGetSize(swarm, &N_final); CHKERRQ(ierr);
748 if (N_final != N_target) {
749 SETERRQ(comm, PETSC_ERR_PLIB, "Failed to resize swarm: expected %d particles, got %d", N_target, N_final);
750 }
751 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Swarm successfully resized to %d particles.\n", N_final);
753 PetscFunctionReturn(0);
754}
755
756#undef __FUNCT__
757#define __FUNCT__ "PreCheckAndResizeSwarm"
758/**
759 * @brief Internal helper implementation: `PreCheckAndResizeSwarm()`.
760 * @details Local to this translation unit.
761 */
762PetscErrorCode PreCheckAndResizeSwarm(UserCtx *user,
763 PetscInt ti,
764 const char *ext)
765{
766 PetscErrorCode ierr;
767 char filename[PETSC_MAX_PATH_LEN];
768 PetscInt N_file = 0; // The number of particles determined from the file
769 PetscInt N_current = 0;
770 MPI_Comm comm;
771 PetscMPIInt rank;
772 const char *refFieldName = "position";
773 const PetscInt bs = 3;
774 SimCtx *simCtx = user->simCtx;
775 char *source_path;
776
777 // NOTE: Your filename format has a hardcoded "_0" which is typical for
778 // PETSc when writing a parallel object from a single rank.
779 // If you ever write in parallel, PETSc might create one file per rank.
780 // The current logic assumes a single file written by one process.
781 const int placeholder_int = 0;
782
783 // Setup the I/O environment
784
785
786 PetscFunctionBeginUser;
788 ierr = PetscObjectGetComm((PetscObject)user->swarm, &comm); CHKERRQ(ierr);
789 ierr = MPI_Comm_rank(comm, &rank); CHKERRQ(ierr);
790
791 // First, determine the top-level source directory based on the execution mode.
792 if (simCtx->exec_mode == EXEC_MODE_SOLVER) {
793 source_path = simCtx->restart_dir;
794 } else if (simCtx->exec_mode == EXEC_MODE_POSTPROCESSOR) {
795 source_path = simCtx->pps->source_dir;
796 } else {
797 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONGSTATE, "Invalid execution mode for reading simulation fields.");
798 }
799
800 // Set the current I/O directory context
801 ierr = PetscSNPrintf(simCtx->_io_context_buffer, sizeof(simCtx->_io_context_buffer),
802 "%s/%s", source_path, simCtx->particle_subdir); CHKERRQ(ierr);
803
805
806 // --- Construct filename using the specified format ---
807 ierr = PetscSNPrintf(filename, sizeof(filename), "%s/%s%05" PetscInt_FMT "_%d.%s",
808 simCtx->current_io_directory,refFieldName, ti, placeholder_int, ext); CHKERRQ(ierr);
809
810 LOG_ALLOW(GLOBAL, LOG_INFO, "Checking particle count for timestep %d using ref file '%s'.\n", ti, filename);
811
812 // --- Rank 0 reads the file to determine the size ---
813 if (rank == 0) {
814 PetscBool fileExists = PETSC_FALSE;
815 ierr = PetscTestFile(filename, 'r', &fileExists); CHKERRQ(ierr);
816
817 if (!fileExists) {
818 // Set a special value to indicate file not found, then broadcast it.
819 N_file = -1;
820 LOG_ALLOW(GLOBAL, LOG_ERROR, "Rank 0: Mandatory reference file '%s' not found for timestep %d.\n", filename, ti);
821 } else {
822 PetscViewer viewer;
823 Vec tmpVec;
824 PetscInt vecSize;
825
826 ierr = VecCreate(PETSC_COMM_SELF, &tmpVec); CHKERRQ(ierr); // Create a SEQUENTIAL vector
827 ierr = PetscViewerBinaryOpen(PETSC_COMM_SELF, filename, FILE_MODE_READ, &viewer); CHKERRQ(ierr);
828 ierr = VecLoad(tmpVec, viewer); CHKERRQ(ierr);
829 ierr = PetscViewerDestroy(&viewer); CHKERRQ(ierr);
830
831 ierr = VecGetSize(tmpVec, &vecSize); CHKERRQ(ierr);
832 ierr = VecDestroy(&tmpVec); CHKERRQ(ierr);
833
834 if (vecSize % bs != 0) {
835 N_file = -2; // Special error code for bad file format
836 LOG_ALLOW(GLOBAL, LOG_ERROR, "Rank 0: Vector size %d from file '%s' is not divisible by block size %d.\n", vecSize, filename, bs);
837 } else {
838 N_file = vecSize / bs;
839 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Rank 0: Found %d particles in file.\n", N_file);
840 }
841 }
842 }
843
844 // --- Broadcast the particle count (or error code) from Rank 0 to all other ranks ---
845 ierr = MPI_Bcast(&N_file, 1, MPIU_INT, 0, comm); CHKERRMPI(ierr);
846
847 // --- All ranks check for errors and abort if necessary ---
848 if (N_file == -1) {
849 SETERRQ(comm, PETSC_ERR_FILE_OPEN, "Mandatory reference file '%s' not found for timestep %d (as determined by Rank 0).", filename, ti);
850 }
851 if (N_file == -2) {
852 SETERRQ(comm, PETSC_ERR_FILE_READ, "Reference file '%s' has incorrect format (as determined by Rank 0).", filename);
853 }
854 if (N_file < 0) {
855 SETERRQ(comm, PETSC_ERR_PLIB, "Received invalid particle count %d from Rank 0.", N_file);
856 }
857
858
859 // --- Now all ranks have the correct N_file, compare and resize if needed ---
860 ierr = DMSwarmGetSize(user->swarm, &N_current); CHKERRQ(ierr);
861
862 if (N_file != N_current) {
863 LOG_ALLOW(GLOBAL, LOG_INFO, "Swarm size %d differs from file size %d. Resizing swarm globally.\n", N_current, N_file);
864 ierr = ResizeSwarmGlobally(user->swarm, N_file); CHKERRQ(ierr);
865 } else {
866 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Swarm size (%d) already matches file size. No resize needed.\n", N_current);
867 }
868
869 // Also update the context
870 user->simCtx->np = N_file;
871
873 PetscFunctionReturn(0);
874}
875
876
877#undef __FUNCT__
878#define __FUNCT__ "ReinitializeParticlesOnInletSurface"
879/**
880 * @brief Internal helper implementation: `ReinitializeParticlesOnInletSurface()`.
881 * @details Local to this translation unit.
882 */
883PetscErrorCode ReinitializeParticlesOnInletSurface(UserCtx *user, PetscReal currentTime, PetscInt step)
884{
885 PetscErrorCode ierr;
886 PetscMPIInt rank; // MPI rank of the current process
887 DM swarm = user->swarm; // The particle swarm DM
888 PetscReal *positions_field = NULL; // Pointer to swarm field for physical positions
889 PetscInt64 *particleIDs = NULL; // Pointer to swarm field for Particle IDs (for logging)
890 PetscInt *cell_ID_field = NULL; // Pointer to swarm field for Cell IDs (for resetting after migration)
891 const Cmpnts ***coor_nodes_local_array; // Read-only access to local node coordinates
892 Vec Coor_local; // Local vector for node coordinates
893 DMDALocalInfo info; // Local grid information (node-based) from user->da
894 PetscInt xs_gnode_rank, ys_gnode_rank, zs_gnode_rank; // Local starting node indices (incl. ghosts) of rank's DA
895 PetscInt IM_nodes_global, JM_nodes_global, KM_nodes_global; // Global node counts
896
897 PetscRandom rand_logic_reinit_i, rand_logic_reinit_j, rand_logic_reinit_k; // RNGs for re-placement
898 PetscInt nlocal_current; // Number of particles currently on this rank
899 PetscInt particles_actually_reinitialized_count = 0; // Counter for logging
900 PetscBool can_this_rank_service_inlet = PETSC_FALSE; // Flag
901
902 PetscFunctionBeginUser;
903
905
906 // This function is only relevant for surface initialization mode and if an inlet face is defined.
907 if ((user->simCtx->ParticleInitialization != 0 && user->simCtx->ParticleInitialization !=3) || !user->inletFaceDefined) {
908 PetscFunctionReturn(0);
909 }
910
911 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
912 ierr = DMSwarmGetLocalSize(swarm, &nlocal_current); CHKERRQ(ierr);
913
914 // If no particles on this rank, nothing to do.
915 if (nlocal_current == 0) {
916 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d has no local particles to re-initialize on inlet.\n", currentTime, step, rank);
917 PetscFunctionReturn(0);
918 }
919
920 // Get DMDA information for the node-centered coordinate grid (user->da)
921 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
922 ierr = DMDAGetInfo(user->da, NULL, &IM_nodes_global, &JM_nodes_global, &KM_nodes_global, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); CHKERRQ(ierr);
923 ierr = DMDAGetCorners(user->da, &xs_gnode_rank, &ys_gnode_rank, &zs_gnode_rank, NULL, NULL, NULL); CHKERRQ(ierr);
924
925 // Modification to IM_nodes_global etc. to account for 1-cell halo in each direction.
926 IM_nodes_global -= 1; JM_nodes_global -= 1; KM_nodes_global -= 1;
927
928 const PetscInt IM_cells_global = IM_nodes_global > 0 ? IM_nodes_global - 1 : 0;
929 const PetscInt JM_cells_global = JM_nodes_global > 0 ? JM_nodes_global - 1 : 0;
930 const PetscInt KM_cells_global = KM_nodes_global > 0 ? KM_nodes_global - 1 : 0;
931
932
933
934 // Check if this rank is responsible for (part of) the designated inlet surface
935 ierr = CanRankServiceInletFace(user, &info, IM_nodes_global, JM_nodes_global, KM_nodes_global, &can_this_rank_service_inlet); CHKERRQ(ierr);
936
937 // Get coordinate array and swarm fields for modification
938 ierr = DMGetCoordinatesLocal(user->da, &Coor_local); CHKERRQ(ierr);
939 ierr = DMDAVecGetArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
940 ierr = DMSwarmGetField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
941 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr); // For logging
942 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
943
944 if (!can_this_rank_service_inlet) {
945 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d cannot service inlet face %s. Skipping re-initialization of %d particles.\n", currentTime, step, rank, BCFaceToString(user->identifiedInletBCFace), nlocal_current);
946
947 // FALLBACK ACTION: Reset position fields to Inlet center for migration and cell ID to -1 for safety.
948 LOG_ALLOW(LOCAL, LOG_DEBUG, "[T=%.4f, Step=%d] Rank %d is resetting %d local particles to inlet center (%.6f, %.6f, %.6f) for migration.\n", currentTime, step, rank, nlocal_current, user->simCtx->CMx_c, user->simCtx->CMy_c, user->simCtx->CMz_c);
949
950 for(PetscInt p = 0; p < nlocal_current; p++){
951 positions_field[3*p+0] = user->simCtx->CMx_c;
952 positions_field[3*p+1] = user->simCtx->CMy_c;
953 positions_field[3*p+2] = user->simCtx->CMz_c;
954
955 cell_ID_field[3*p+0] = -1;
956 cell_ID_field[3*p+1] = -1;
957 cell_ID_field[3*p+2] = -1;
958 }
959
960 // Cleanup: restore swarm fields/coordinate array
961 ierr = DMDAVecRestoreArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
962 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
963 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr); // For logging
964 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
966 PetscFunctionReturn(0);
967 }
968
969 LOG_ALLOW(GLOBAL, LOG_INFO, "[T=%.4f, Step=%d] Rank %d is on inlet face %s. Attempting to re-place %d local particles.\n", currentTime, step, rank, BCFaceToString(user->identifiedInletBCFace), nlocal_current);
970
971 // Initialize fresh RNGs for this re-placement to ensure good distribution
972 ierr = InitializeLogicalSpaceRNGs(&rand_logic_reinit_i, &rand_logic_reinit_j, &rand_logic_reinit_k); CHKERRQ(ierr);
973 // Optional: Seed RNGs for deterministic behavior if required, e.g., based on rank and step.
974 // PetscRandomSetSeed(rand_logic_i, (unsigned long)rank*1000 + step + 100); PetscRandomSeed(rand_logic_i); // Example
975
976 // Loop over all particles currently local to this rank
977 for (PetscInt p = 0; p < nlocal_current; p++) {
978 PetscInt ci_metric_lnode, cj_metric_lnode, ck_metric_lnode; // Local node indices (of rank's DA patch) for cell origin
979 PetscReal xi_metric_logic, eta_metric_logic, zta_metric_logic; // Intra-cell logical coordinates
980 Cmpnts phys_coords = {0.0,0.0,0.0}; // To store newly calculated physical coordinates
981 PetscBool particle_was_placed = PETSC_FALSE;
982
984 // Get random cell on this rank's portion of the inlet and random logical coords within it
985 ierr = GetRandomCellAndLogicalCoordsOnInletFace(user, &info, xs_gnode_rank, ys_gnode_rank, zs_gnode_rank,
986 IM_nodes_global, JM_nodes_global, KM_nodes_global,
987 &rand_logic_reinit_i, &rand_logic_reinit_j, &rand_logic_reinit_k,
988 &ci_metric_lnode, &cj_metric_lnode, &ck_metric_lnode,
989 &xi_metric_logic, &eta_metric_logic, &zta_metric_logic); CHKERRQ(ierr);
990
991 // Convert these logical coordinates to physical coordinates
992
993 ierr = MetricLogicalToPhysical(user, coor_nodes_local_array,
994 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
995 xi_metric_logic, eta_metric_logic, zta_metric_logic,
996 &phys_coords); CHKERRQ(ierr);
997
998 // Update the particle's position in the swarm fields
999 positions_field[3*p+0] = phys_coords.x;
1000 positions_field[3*p+1] = phys_coords.y;
1001 positions_field[3*p+2] = phys_coords.z;
1002 particle_was_placed = PETSC_TRUE;
1003
1005 PetscBool placement_flag = PETSC_FALSE;
1006 ierr = GetDeterministicFaceGridLocation(user, &info, xs_gnode_rank, ys_gnode_rank, zs_gnode_rank,
1007 IM_cells_global, JM_cells_global, KM_cells_global,
1008 particleIDs[p],
1009 &ci_metric_lnode, &cj_metric_lnode, &ck_metric_lnode,
1010 &xi_metric_logic, &eta_metric_logic, &zta_metric_logic,&placement_flag); CHKERRQ(ierr);
1011
1012
1013 if(placement_flag){
1014 // Convert these logical coordinates to physical coordinates
1015 ierr = MetricLogicalToPhysical(user, coor_nodes_local_array,
1016 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
1017 xi_metric_logic, eta_metric_logic, zta_metric_logic,
1018 &phys_coords); CHKERRQ(ierr);
1019
1020 // Update the particle's position in the swarm fields
1021 positions_field[3*p+0] = phys_coords.x;
1022 positions_field[3*p+1] = phys_coords.y;
1023 positions_field[3*p+2] = phys_coords.z;
1024 particle_was_placed = PETSC_TRUE;
1025 } else{
1026 // Deterministic placement failed (particle migrated to rank where formula says it doesn't belong)
1027 // Fall back to random placement on this rank's portion of inlet surface
1028 LOG_ALLOW(GLOBAL, LOG_WARNING, "Rank %d: Particle PID %ld deterministic placement failed (belongs to different rank). Falling back to random placement.\n", rank, particleIDs[p]);
1029
1030 ierr = GetRandomCellAndLogicalCoordsOnInletFace(user, &info, xs_gnode_rank, ys_gnode_rank, zs_gnode_rank,
1031 IM_nodes_global, JM_nodes_global, KM_nodes_global,
1032 &rand_logic_reinit_i, &rand_logic_reinit_j, &rand_logic_reinit_k,
1033 &ci_metric_lnode, &cj_metric_lnode, &ck_metric_lnode,
1034 &xi_metric_logic, &eta_metric_logic, &zta_metric_logic); CHKERRQ(ierr);
1035
1036 // Convert to physical coordinates
1037 ierr = MetricLogicalToPhysical(user, coor_nodes_local_array,
1038 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
1039 xi_metric_logic, eta_metric_logic, zta_metric_logic,
1040 &phys_coords); CHKERRQ(ierr);
1041
1042 // Update particle position
1043 positions_field[3*p+0] = phys_coords.x;
1044 positions_field[3*p+1] = phys_coords.y;
1045 positions_field[3*p+2] = phys_coords.z;
1046 particle_was_placed = PETSC_TRUE;
1047 }
1048
1049 } else{
1050 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ReinitializeParticlesOnInletSurface only supports ParticleInitialization modes 0 and 3.");
1051 }
1052
1053 if(particle_was_placed){
1054 particles_actually_reinitialized_count++;
1055
1056 cell_ID_field[3*p+0] = -1;
1057 cell_ID_field[3*p+1] = -1;
1058 cell_ID_field[3*p+2] = -1;
1059
1060 LOG_LOOP_ALLOW(LOCAL, LOG_VERBOSE, p, (nlocal_current > 20 ? nlocal_current/10 : 1), // Sampled logging
1061 "Rank %d: PID %ld (idx %ld) RE-PLACED. CellOriginNode(locDAIdx):(%d,%d,%d). LogicCoords: (%.2e,%.2f,%.2f). PhysCoords: (%.6f,%.6f,%.6f).\n",
1062 rank, particleIDs[p], (long)p,
1063 ci_metric_lnode, cj_metric_lnode, ck_metric_lnode,
1064 xi_metric_logic, eta_metric_logic, zta_metric_logic,
1065 phys_coords.x, phys_coords.y, phys_coords.z);
1066 }
1067 }
1068
1069 // Logging summary of re-initialization
1070 if (particles_actually_reinitialized_count > 0) {
1071 LOG_ALLOW(GLOBAL, LOG_INFO, "[T=%.4f, Step=%d] Rank %d (on inlet face %d) successfully re-initialized %d of %d local particles.\n", currentTime, step, rank, user->identifiedInletBCFace, particles_actually_reinitialized_count, nlocal_current);
1072 } else if (nlocal_current > 0) { // This case should ideally not be hit if can_this_rank_service_inlet was true and particles were present.
1073 LOG_ALLOW(GLOBAL, LOG_WARNING, "[T=%.4f, Step=%d] Rank %d claimed to service inlet face %d, but re-initialized 0 of %d local particles. This may indicate an issue if particles were expected to be re-placed.\n", currentTime, step, rank, user->identifiedInletBCFace, nlocal_current);
1074 }
1075
1076 // Cleanup: Destroy RNGs and restore swarm fields/coordinate array
1077 ierr = PetscRandomDestroy(&rand_logic_reinit_i); CHKERRQ(ierr);
1078 ierr = PetscRandomDestroy(&rand_logic_reinit_j); CHKERRQ(ierr);
1079 ierr = PetscRandomDestroy(&rand_logic_reinit_k); CHKERRQ(ierr);
1080
1081 ierr = DMSwarmRestoreField(swarm, "position", NULL, NULL, (void**)&positions_field); CHKERRQ(ierr);
1082 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&particleIDs); CHKERRQ(ierr);
1083 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_ID_field); CHKERRQ(ierr);
1084 ierr = DMDAVecRestoreArrayRead(user->fda, Coor_local, (void*)&coor_nodes_local_array); CHKERRQ(ierr);
1085
1086
1088 PetscFunctionReturn(0);
1089}
1090
1091#undef __FUNCT__
1092#define __FUNCT__ "GetLocalPIDSnapshot"
1093/**
1094 * @brief Internal helper implementation: `GetLocalPIDSnapshot()`.
1095 * @details Local to this translation unit.
1096 */
1097PetscErrorCode GetLocalPIDSnapshot(const PetscInt64 pid_field[],
1098 PetscInt n_local,
1099 PetscInt64 **pids_snapshot_out)
1100{
1101 PetscErrorCode ierr;
1102 PetscMPIInt rank;
1103
1104 PetscFunctionBeginUser;
1105
1107
1108 // --- 1. Input Validation ---
1109 if (!pids_snapshot_out) {
1110 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Output pointer pids_snapshot_out is NULL.");
1111 }
1112 // If n_local > 0, pid_field must not be NULL.
1113 if (n_local > 0 && !pid_field) {
1114 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Input pid_field pointer is NULL for n_local > 0.");
1115 }
1116
1117 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1118 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Creating PID snapshot for %d local particles.\n", rank, n_local);
1119
1120 // If there are no local particles, the snapshot is empty (NULL).
1121 if (n_local == 0) {
1122 *pids_snapshot_out = NULL;
1123
1125 PetscFunctionReturn(0);
1126 }
1127
1128 // --- 2. Allocate Memory for the Snapshot ---
1129 ierr = PetscMalloc1(n_local, pids_snapshot_out); CHKERRQ(ierr);
1130
1131 // --- 3. Copy Data ---
1132 // Perform a fast memory copy from the provided array to our new snapshot array.
1133 ierr = PetscMemcpy(*pids_snapshot_out, pid_field, n_local * sizeof(PetscInt64)); CHKERRQ(ierr);
1134 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Copied %d PIDs.\n", rank, n_local);
1135
1136 // --- 4. Sort the Snapshot Array ---
1137 // Sorting enables fast binary search lookups later.
1138 ierr = PetscSortInt64(n_local, *pids_snapshot_out); CHKERRQ(ierr);
1139 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: PID snapshot sorted successfully.\n", rank);
1140
1141
1143 PetscFunctionReturn(0);
1144}
1145
1146#undef __FUNCT__
1147#define __FUNCT__ "AddToMigrationList"
1148/**
1149 * @brief Internal helper implementation: `AddToMigrationList()`.
1150 * @details Local to this translation unit.
1151 */
1152PetscErrorCode AddToMigrationList(MigrationInfo **migration_list_p,
1153 PetscInt *capacity_p,
1154 PetscInt *count_p,
1155 PetscInt particle_local_idx,
1156 PetscMPIInt destination_rank)
1157{
1158 PetscErrorCode ierr;
1159 PetscMPIInt rank;
1160
1161 PetscFunctionBeginUser;
1162
1164
1165 // --- 1. Input Validation ---
1166 if (!migration_list_p || !capacity_p || !count_p) {
1167 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Null pointer provided to AddToMigrationList for list management.");
1168 }
1169
1170 // --- 2. Check if the list needs to be resized ---
1171 if (*count_p >= *capacity_p) {
1172 PetscInt old_capacity = *capacity_p;
1173 // Start with a reasonable base capacity, then double for subsequent reallocations.
1174 PetscInt new_capacity = (old_capacity == 0) ? 16 : old_capacity * 2;
1175
1176 // Use PetscRealloc for safe memory reallocation.
1177 // It handles allocating new memory, copying old data, and freeing the old block.
1178 // The first argument to PetscRealloc is the new size in BYTES.
1179 ierr = PetscRealloc(new_capacity * sizeof(MigrationInfo), migration_list_p); CHKERRQ(ierr);
1180
1181 *capacity_p = new_capacity; // Update the capacity tracker
1182
1183 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1184 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Reallocated migrationList capacity from %d to %d.\n",
1185 rank, old_capacity, new_capacity);
1186 }
1187
1188 // --- 3. Add the new migration data to the list ---
1189 // Dereference the pointer-to-a-pointer to get the actual array.
1190 MigrationInfo *list = *migration_list_p;
1191
1192 list[*count_p].local_index = particle_local_idx;
1193 list[*count_p].target_rank = destination_rank;
1194
1195 // --- 4. Increment the count of items in the list ---
1196 (*count_p)++;
1197
1198
1200 PetscFunctionReturn(0);
1201}
1202
1203
1204#undef __FUNCT__
1205#define __FUNCT__ "FlagNewComersForLocation"
1206/**
1207 * @brief Internal helper implementation: `FlagNewcomersForLocation()`.
1208 * @details Local to this translation unit.
1209 */
1210PetscErrorCode FlagNewcomersForLocation(DM swarm,
1211 PetscInt n_local_before,
1212 const PetscInt64 pids_before[])
1213{
1214 PetscErrorCode ierr;
1215 PetscMPIInt rank;
1216 PetscInt n_local_after;
1217 PetscInt newcomer_count = 0;
1218
1219 // Pointers to the swarm data fields we will read and modify
1220 PetscInt64 *pid_field_after = NULL;
1221 PetscInt *status_field_after = NULL;
1222 PetscInt *cell_field_after = NULL;
1223
1224 PetscFunctionBeginUser;
1225
1227
1228 // --- 1. Input Validation and Basic Setup ---
1229 if (!swarm) {
1230 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Input DMSwarm is NULL in FlagNewcomersForLocation.");
1231 }
1232 // If n_local_before > 0, the corresponding PID array must not be null.
1233 if (n_local_before > 0 && !pids_before) {
1234 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Input pids_before array is NULL for n_local_before > 0.");
1235 }
1236
1237 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1238
1239 // Get the number of particles on this rank *after* the migration.
1240 ierr = DMSwarmGetLocalSize(swarm, &n_local_after); CHKERRQ(ierr);
1241
1242 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Checking for newcomers. Size before: %d, Size after: %d\n",
1243 rank, n_local_before, n_local_after);
1244
1245 // If there are no particles now, there's nothing to do.
1246 if (n_local_after == 0) {
1247 PetscFunctionReturn(0);
1248 }
1249
1250 // --- 2. Access Swarm Data ---
1251 // Get read-only access to the PIDs and read-write access to the status field.
1252 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_field_after); CHKERRQ(ierr);
1253 ierr = DMSwarmGetField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_field_after); CHKERRQ(ierr);
1254 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_field_after); CHKERRQ(ierr);
1255 if (!pid_field_after || !status_field_after) {
1256 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Failed to get required swarm fields in FlagNewcomersForLocation.");
1257 }
1258
1259 // --- 3. Identify and Flag Newcomers ---
1260 // Loop through all particles currently on this rank.
1261 for (PetscInt p_idx = 0; p_idx < n_local_after; ++p_idx) {
1262 PetscInt64 current_pid = pid_field_after[p_idx];
1263 PetscBool is_found_in_before_list;
1264
1265 // Use our custom, efficient helper function for the lookup.
1266 ierr = BinarySearchInt64(n_local_before, pids_before, current_pid, &is_found_in_before_list); CHKERRQ(ierr);
1267
1268 // If the PID was NOT found in the "before" list, it must be a newcomer.
1269 if (!is_found_in_before_list) {
1270 // Flag it for processing in the next pass of the migration loop.
1271 status_field_after[p_idx] = NEEDS_LOCATION;
1272 // cell_field_after[3*p_idx+0] = -1;
1273 // cell_field_after[3*p_idx+1] = -1;
1274 // cell_field_after[3*p_idx+2] = -1;
1275 newcomer_count++;
1276
1277 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[Rank %d]: Flagged newcomer PID %ld at local index %d as NEEDS_LOCATION.\n",
1278 rank, current_pid, p_idx);
1279 }
1280 }
1281
1282 // --- 4. Restore Swarm Fields ---
1283 // Release the locks on the swarm data arrays.
1284 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_field_after); CHKERRQ(ierr);
1285 ierr = DMSwarmRestoreField(swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_field_after); CHKERRQ(ierr);
1286 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_field_after); CHKERRQ(ierr);
1287
1288 if (newcomer_count > 0) {
1289 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d]: Identified and flagged %d newcomers.\n", rank, newcomer_count);
1290 }
1291
1292
1294 PetscFunctionReturn(0);
1295}
1296
1297#undef __FUNCT__
1298#define __FUNCT__ "MigrateRestartParticlesUsingCellID"
1299/**
1300 * @brief Internal helper implementation: `MigrateRestartParticlesUsingCellID()`.
1301 * @details Local to this translation unit.
1302 */
1304{
1305 PetscErrorCode ierr;
1306 DM swarm = user->swarm;
1307 PetscInt nlocal;
1308 PetscInt *cell_p = NULL;
1309 PetscInt64 *pid_p = NULL;
1310 PetscMPIInt rank;
1311
1312 MigrationInfo *migrationList = NULL;
1313 PetscInt local_migration_count = 0;
1314 PetscInt migrationListCapacity = 0;
1315 PetscInt global_migration_count = 0;
1316
1317 PetscFunctionBeginUser;
1319 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1320
1321 ierr = DMSwarmGetLocalSize(swarm, &nlocal); CHKERRQ(ierr);
1322 LOG_ALLOW(LOCAL, LOG_DEBUG, "Checking %d restart particles for direct migration using CellIDs.\n", nlocal);
1323
1324 if (nlocal > 0) {
1325 ierr = DMSwarmGetField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1326 ierr = DMSwarmGetField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1327
1328 // Note: We do NOT need to modify the status field here.
1329 // We trust the loaded status (ACTIVE_AND_LOCATED) is correct for the destination rank.
1330
1331 for (PetscInt p_idx = 0; p_idx < nlocal; ++p_idx) {
1332 PetscInt ci = cell_p[3*p_idx + 0];
1333 PetscInt cj = cell_p[3*p_idx + 1];
1334 PetscInt ck = cell_p[3*p_idx + 2];
1335
1336 /* Skip particles with invalid Cell IDs (will be handled by LocateAllParticles) */
1337 if (ci < 0 || cj < 0 || ck < 0) {
1338 continue;
1339 }
1340
1341 PetscMPIInt owner_rank;
1342 ierr = FindOwnerOfCell(user, ci, cj, ck, &owner_rank); CHKERRQ(ierr);
1343
1344 if (owner_rank != -1 && owner_rank != rank) {
1345 /* Particle belongs to another rank - migrate it */
1346 ierr = AddToMigrationList(&migrationList, &migrationListCapacity, &local_migration_count,
1347 p_idx, owner_rank); CHKERRQ(ierr);
1348
1349 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] Direct migration: Cell (%d,%d,%d) belongs to Rank %d (Current: %d).\n",
1350 (long)pid_p[p_idx], ci, cj, ck, owner_rank, rank);
1351 }
1352 }
1353
1354 ierr = DMSwarmRestoreField(swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1355 ierr = DMSwarmRestoreField(swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1356 }
1357
1358 /* Check if any rank needs to migrate particles */
1359 ierr = MPI_Allreduce(&local_migration_count, &global_migration_count, 1, MPIU_INT, MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1360
1361 if (global_migration_count > 0) {
1362 LOG_ALLOW(GLOBAL, LOG_INFO, "Fast restart migration: Directly migrating %d particles using CellIDs.\n", global_migration_count);
1363 ierr = SetMigrationRanks(user, migrationList, local_migration_count); CHKERRQ(ierr);
1364 ierr = PerformMigration(user); CHKERRQ(ierr);
1365 /* We do NOT flag newcomers here. We trust their loaded status (ACTIVE_AND_LOCATED) */
1366 /* is valid for their destination rank. */
1367 } else {
1368 LOG_ALLOW(GLOBAL, LOG_INFO, "Fast restart migration: All particles are already on correct ranks.\n");
1369 }
1370
1371 ierr = PetscFree(migrationList); CHKERRQ(ierr);
1372
1374 PetscFunctionReturn(0);
1375}
1376
1377#undef __FUNCT__
1378#define __FUNCT__ "GuessParticleOwnerWithBBox"
1379/**
1380 * @brief Internal helper implementation: `GuessParticleOwnerWithBBox()`.
1381 * @details Local to this translation unit.
1382 * @note Testing status:
1383 * The current direct surface reaches this helper through orchestrator
1384 * tests, but direction-complete immediate-neighbor coverage and the
1385 * explicit "not found in any rank" path are still targeted for future
1386 * bespoke tests.
1387 */
1388static PetscErrorCode GuessParticleOwnerWithBBox(UserCtx *user,
1389 const Particle *particle,
1390 const BoundingBox *bboxlist,
1391 PetscMPIInt *guess_rank_out)
1392{
1393 PetscErrorCode ierr;
1394 PetscMPIInt rank, size;
1395 const RankNeighbors *neighbors = &user->neighbors; // Use a direct pointer for clarity
1396 const BoundingBox *localBBox = &user->bbox;
1397
1398 PetscFunctionBeginUser;
1399
1401
1402 // --- 1. Input Validation and Setup ---
1403 if (!user || !particle || !guess_rank_out || !bboxlist) {
1404 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Null pointer provided to GuessParticleOwnerWithBBox.");
1405 }
1406 if (!localBBox|| !neighbors) {
1407 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Required user->bboxl or user->neighbors is not initialized.");
1408 }
1409
1410 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1411 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr);
1412
1413 *guess_rank_out = MPI_PROC_NULL; // Default to "not found"
1414
1415 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Starting guess for particle at (%.3f, %.3f, %.3f).\n",
1416 particle->PID, particle->loc.x, particle->loc.y, particle->loc.z);
1417
1418 // --- Step 0: Check if the particle is inside the CURRENT rank's bounding box FIRST. ---
1419 // This handles the common case of initial placement where a particle is "lost" but physically local.
1420 if (IsParticleInBox(localBBox, &particle->loc)) {
1421 *guess_rank_out = rank;
1422 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fast path guess SUCCESS. Particle is within the local (Rank %d) bounding box.\n",
1423 particle->PID, rank);
1424
1426 PetscFunctionReturn(0); // Found it, we're done.
1427 }
1428 // --- 2. Fast Path: Check Immediate Neighbors Based on Exit Direction ---
1429
1430 // Determine likely exit direction(s) to prioritize neighbor check
1431 PetscBool exit_xm = particle->loc.x < localBBox->min_coords.x;
1432 PetscBool exit_xp = particle->loc.x > localBBox->max_coords.x;
1433 PetscBool exit_ym = particle->loc.y < localBBox->min_coords.y;
1434 PetscBool exit_yp = particle->loc.y > localBBox->max_coords.y;
1435 PetscBool exit_zm = particle->loc.z < localBBox->min_coords.z;
1436 PetscBool exit_zp = particle->loc.z > localBBox->max_coords.z;
1437
1438 if (exit_xm && neighbors->rank_xm != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_xm], &particle->loc)) {
1439 *guess_rank_out = neighbors->rank_xm;
1440 } else if (exit_xp&& neighbors->rank_xp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_xp], &particle->loc)) {
1441 *guess_rank_out = neighbors->rank_xp;
1442 } else if (exit_ym && neighbors->rank_ym != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_ym], &particle->loc)) {
1443 *guess_rank_out = neighbors->rank_ym;
1444 } else if (exit_yp && neighbors->rank_yp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_yp], &particle->loc)) {
1445 *guess_rank_out = neighbors->rank_yp;
1446 } else if (exit_zm && neighbors->rank_zm != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_zm], &particle->loc)) {
1447 *guess_rank_out = neighbors->rank_zm;
1448 } else if (exit_zp && neighbors->rank_zp != MPI_PROC_NULL && IsParticleInBox(&bboxlist[neighbors->rank_zp], &particle->loc)) {
1449 *guess_rank_out = neighbors->rank_zp;
1450 }
1451 // Note: This does not handle corner/edge neighbors, which is why the fallback is essential.
1452
1453 if (*guess_rank_out != MPI_PROC_NULL) {
1454 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fast path guess SUCCESS. Found in immediate neighbor Rank %d.\n",
1455 particle->PID, *guess_rank_out);
1456
1458 PetscFunctionReturn(0); // Found it, we're done.
1459 }
1460
1461 // --- 3. Robust Fallback: Check All Other Ranks ---
1462 // If we get here, the particle was not in any of the immediate face neighbors' boxes.
1463 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Not in immediate face neighbors. Starting global fallback search.\n",
1464 particle->PID);
1465
1466 for (PetscMPIInt r = 0; r < size; ++r) {
1467 if (r == rank) continue; // Don't check ourselves.
1468
1469 if (IsParticleInBox(&bboxlist[r], &particle->loc)) {
1470 PetscBool is_in = PETSC_TRUE;
1471 // This detailed, synchronized print will solve the mystery
1472 LOG_ALLOW(LOCAL,LOG_VERBOSE, "[Rank %d] Checking PID %lld at (%.4f, %.4f, %.4f) against Rank %d's box: [(%.4f, %.4f, %.4f) to (%.4f, %.4f, %.4f)]. Result: %s\n",
1473 (int)rank, (long long)particle->PID,
1474 particle->loc.x, particle->loc.y, particle->loc.z,
1475 (int)r,
1476 bboxlist[r].min_coords.x, bboxlist[r].min_coords.y, bboxlist[r].min_coords.z,
1477 bboxlist[r].max_coords.x, bboxlist[r].max_coords.y, bboxlist[r].max_coords.z,
1478 is_in ? "INSIDE" : "OUTSIDE");
1479
1480 *guess_rank_out = r;
1481 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld]: Fallback search SUCCESS. Found in Rank %d.\n",
1482 particle->PID, *guess_rank_out);
1483
1485 PetscFunctionReturn(0); // Found it, we're done.
1486 }
1487 }
1488
1489 // If the code reaches here, the particle was not found in any rank's bounding box.
1490 LOG_ALLOW(LOCAL, LOG_WARNING, "[PID %ld]: Guess FAILED. Particle not found in any rank's bounding box.\n",
1491 particle->PID);
1492
1493 // The guess_rank_out will remain -1, signaling failure to the caller.
1495 PetscFunctionReturn(0);
1496}
1497
1498#undef __FUNCT__
1499#define __FUNCT__ "LocateAllParticlesInGrid"
1500/**
1501 * @brief Implementation of \ref LocateAllParticlesInGrid().
1502 * @details Full API contract (arguments, ownership, side effects) is documented with
1503 * the header declaration in `include/ParticleMotion.h`.
1504 * @see LocateAllParticlesInGrid()
1505 */
1506PetscErrorCode LocateAllParticlesInGrid(UserCtx *user,BoundingBox *bboxlist)
1507{
1508 PetscErrorCode ierr;
1509 PetscInt passes = 0;
1510 const PetscInt MAX_MIGRATION_PASSES = 50; // Safety break for runaway loops
1511 PetscInt global_migrations_this_pass;
1512 PetscMPIInt rank;
1513 PetscInt total_migrated_this_timestep = 0;
1514
1515 PetscFunctionBeginUser;
1517 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1518 ierr = ResetSearchMetrics(user->simCtx); CHKERRQ(ierr);
1519 LOG_ALLOW(GLOBAL, LOG_INFO, "LocateAllParticlesInGrid (Orchestrator) - Beginning particle settlement process.\n");
1520
1521 // This loop ensures that particles that jump across multiple ranks are
1522 // handled correctly in successive, iterative handoffs.
1523 do {
1524 passes++;
1526 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "[Rank %d] Starting migration pass %d.\n", rank, passes);
1527
1528 // --- STAGE 1: PER-PASS INITIALIZATION ---
1529 MigrationInfo *migrationList = NULL;
1530 PetscInt local_migration_count = 0;
1531 PetscInt migrationListCapacity = 0;
1532 PetscInt nlocal_before;
1533 PetscInt64 *pids_before_snapshot = NULL;
1534 PetscInt local_lost_count = 0;
1535
1536 ierr = DMSwarmGetLocalSize(user->swarm, &nlocal_before); CHKERRQ(ierr);
1537 if (passes == 1) {
1538 user->simCtx->searchMetrics.searchPopulation += (PetscInt64)nlocal_before;
1539 }
1540 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d] Pass %d begins with %d local particles.\n", rank, passes, nlocal_before);
1541
1542
1543 // --- STAGE 2: PRE-MIGRATION SNAPSHOT & MAIN PROCESSING LOOP ---
1544 if (nlocal_before > 0) {
1545 // Get pointers to all fields needed for this pass
1546 PetscReal *pos_p, *weights_p, *vel_p;
1547 PetscInt *cell_p, *status_p;
1548 PetscInt64 *pid_p;
1549 ierr = DMSwarmGetField(user->swarm, "position", NULL, NULL, (void**)&pos_p); CHKERRQ(ierr);
1550 ierr = DMSwarmGetField(user->swarm, "velocity", NULL, NULL, (void**)&vel_p); CHKERRQ(ierr);
1551 ierr = DMSwarmGetField(user->swarm, "weight", NULL, NULL, (void**)&weights_p); CHKERRQ(ierr);
1552 ierr = DMSwarmGetField(user->swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1553 ierr = DMSwarmGetField(user->swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1554 ierr = DMSwarmGetField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1555
1556 // Create a sorted snapshot of current PIDs to identify newcomers after migration.
1557 // This helper requires a raw pointer, which we just acquired.
1558 ierr = GetLocalPIDSnapshot(pid_p, nlocal_before, &pids_before_snapshot); CHKERRQ(ierr);
1559
1560 for (PetscInt p_idx = 0; p_idx < nlocal_before; p_idx++) {
1561
1562 // OPTIMIZATION: Skip particles already settled in a previous pass of this do-while loop.
1563
1565 "Local Particle idx=%d, PID=%ld, status=%s, cell=(%d, %d, %d)\n",
1566 p_idx,
1567 (long)pid_p[p_idx],
1569 cell_p[3*p_idx],
1570 cell_p[3*p_idx+1],
1571 cell_p[3*p_idx+2]);
1572
1573 if (status_p[p_idx] == ACTIVE_AND_LOCATED) {
1574 LOG_ALLOW(LOCAL,LOG_VERBOSE," [rank %d][PID %ld] skipped in pass %d as it is already located at (%d,%d,%d).\n",rank,pid_p[p_idx],passes,cell_p[3*p_idx],cell_p[3*p_idx + 1],cell_p[3*p_idx + 2]);
1575 continue;
1576 }
1577
1578 // UNPACK: Create a temporary C struct for easier processing using our helper.
1579 Particle current_particle;
1580
1581 // LOG_ALLOW(LOCAL,LOG_DEBUG,"about to unpack p_idx=%d (PID=%ld)\n",p_idx, (long)pid_p[p_idx]);
1582
1583 ierr = UnpackSwarmFields(p_idx, pid_p, weights_p, pos_p, cell_p, vel_p, status_p,NULL,NULL,NULL,&current_particle); CHKERRQ(ierr);
1584
1585 // LOG_ALLOW(LOCAL,LOG_DEBUG,"unpacked p_idx=%d → cell[0]=%d, status=%s\n",p_idx, current_particle.cell[0], ParticleLocationStatusToString((ParticleLocationStatus)current_particle.location_status));
1586
1587 ParticleLocationStatus final_status = (ParticleLocationStatus)status_p[p_idx];
1588
1589
1590 // CASE 1: Particle has a valid prior cell index.
1591 // It has moved, so we only need to run the robust walk from its last known location.
1592 if (current_particle.cell[0] >= 0) {
1593 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] has valid prior cell. Strategy: Robust Walk from previous cell.\n", current_particle.PID);
1594 ierr = LocateParticleOrFindMigrationTarget(user, &current_particle, &final_status); CHKERRQ(ierr);
1595 }
1596
1597 /*
1598 // --- "GUESS" FAST PATH for lost particles ---
1599 if (current_particle.cell[0] < 0) {
1600 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] is lost or uninitialzied (cell=%d), attempting fast guess.\n",current_particle.PID, current_particle.cell[0]);
1601 ierr = GuessParticleOwnerWithBBox(user, &current_particle, bboxlist, &destination_rank); CHKERRQ(ierr);
1602 if (destination_rank != MPI_PROC_NULL && destination_rank != rank) {
1603 final_status = MIGRATING_OUT;
1604 // The particle struct's destination rank must be updated for consistency
1605 current_particle.destination_rank = destination_rank;
1606 }
1607 }
1608
1609 LOG_ALLOW(LOCAL,LOG_DEBUG,"[PID %ld] Particle status after Initial Guess:%d \n",current_particle.PID,final_status);
1610
1611 // --- "VERIFY" ROBUST WALK if guess didn't resolve it ---
1612 if (final_status == NEEDS_LOCATION || UNINITIALIZED) {
1613 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] Not resolved by guess, starting robust walk.\n", current_particle.PID);
1614 // This function will update the particle's status and destination rank internally.
1615 ierr = LocateParticleOrFindMigrationTarget(user, &current_particle, &final_status); CHKERRQ(ierr);
1616 destination_rank = current_particle.destination_rank; // Retrieve the result
1617 }
1618
1619 // --- PROCESS THE FINAL STATUS AND TAKE ACTION ---
1620 if (final_status == MIGRATING_OUT) {
1621 status_p[p_idx] = MIGRATING_OUT; // Mark for removal by DMSwarm
1622 ierr = AddToMigrationList(&migrationList, &migrationListCapacity, &local_migration_count, p_idx, destination_rank); CHKERRQ(ierr);
1623 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] at local index %d marked for migration to rank %d.\n",current_particle.PID, p_idx, destination_rank);
1624 } else {
1625 // Particle's final status is either LOCATED or LOST; update its state in the swarm arrays.
1626 current_particle.location_status = final_status;
1627 // PACK: Use the helper to write results back to the swarm arrays.
1628 ierr = UpdateSwarmFields(p_idx, &current_particle, pos_p, vel_p, weights_p, cell_p, status_p,NULL,NULL,NULL); CHKERRQ(ierr);
1629 }
1630 */
1631 // CASE 2: Particle is "lost" (cell = -1). Strategy: Guess -> Verify.
1632 else {
1633 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] has invalid cell. Strategy: Guess Owner -> Find Cell.\n",current_particle.PID);
1634
1635 PetscMPIInt guessed_owner_rank = MPI_PROC_NULL;
1636 ierr = GuessParticleOwnerWithBBox(user, &current_particle, bboxlist, &guessed_owner_rank); CHKERRQ(ierr);
1637
1638 // If the guess finds a DIFFERENT rank, we can mark for migration and skip the walk.
1639 if (guessed_owner_rank != MPI_PROC_NULL && guessed_owner_rank != rank) {
1641 LOG_ALLOW(LOCAL, LOG_VERBOSE, "[PID %ld] Guess SUCCESS: Found migration target Rank %d. Finalizing.\n", current_particle.PID, guessed_owner_rank);
1642 final_status = MIGRATING_OUT;
1643 current_particle.destination_rank = guessed_owner_rank;
1644 }
1645 else {
1647
1648 // This block runs if the guess either failed (rank is NULL) or found the particle is local (rank is self).
1649 // In BOTH cases, the situation is unresolved, and we MUST fall back to the robust walk.
1650 if (guessed_owner_rank == rank) {
1651 LOG_ALLOW(LOCAL, LOG_DEBUG, "[PID %ld] Guess determined particle is local. Proceeding to robust walk to find cell.\n", current_particle.PID);
1652 } else { // guessed_owner_rank == MPI_PROC_NULL
1653 LOG_ALLOW(LOCAL, LOG_WARNING, "[PID %ld] Guess FAILED to find an owner. Proceeding to robust walk for definitive search.\n", current_particle.PID);
1654 }
1655
1656 ierr = LocateParticleOrFindMigrationTarget(user, &current_particle, &final_status); CHKERRQ(ierr);
1657 }
1658 }
1659
1660 // --- PROCESS THE FINAL, DEFINITIVE STATUS ---
1661 current_particle.location_status = final_status;
1662 ierr = UpdateSwarmFields(p_idx, &current_particle, pos_p, vel_p, weights_p, cell_p, status_p,NULL,NULL,NULL); CHKERRQ(ierr);
1663
1664 if (final_status == MIGRATING_OUT) {
1665 ierr = AddToMigrationList(&migrationList, &migrationListCapacity, &local_migration_count, p_idx, current_particle.destination_rank); CHKERRQ(ierr);
1666 } else if (final_status == LOST) {
1667 local_lost_count++;
1669 } else if (final_status == ACTIVE_AND_LOCATED) {
1671 }
1672
1673 } // End of main particle processing loop
1674
1675 // Restore all the fields acquired for this pass.
1676 ierr = DMSwarmRestoreField(user->swarm, "position", NULL, NULL, (void**)&pos_p); CHKERRQ(ierr);
1677 ierr = DMSwarmRestoreField(user->swarm, "velocity", NULL, NULL, (void**)&vel_p); CHKERRQ(ierr);
1678 ierr = DMSwarmRestoreField(user->swarm, "weight", NULL, NULL, (void**)&weights_p); CHKERRQ(ierr);
1679 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_CellID", NULL, NULL, (void**)&cell_p); CHKERRQ(ierr);
1680 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_pid", NULL, NULL, (void**)&pid_p); CHKERRQ(ierr);
1681 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1682 }
1683
1684 // --- STAGE 3: ACTION & MPI COMMUNICATION ---
1685 LOG_ALLOW(LOCAL, LOG_INFO, "[Rank %d] Pass %d: Identified %d particles to migrate out.\n", rank, passes, local_migration_count);
1686
1687 // --- STAGE 3: SYNCHRONIZE AND DECIDE ---
1688 // FIRST, determine if any rank wants to migrate. This call is safe because
1689 // all ranks have finished their local work and can participate.
1690 ierr = MPI_Allreduce(&local_migration_count, &global_migrations_this_pass, 1, MPIU_INT, MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1691
1692 total_migrated_this_timestep += global_migrations_this_pass;
1693
1694 if(global_migrations_this_pass > 0 ){
1695
1696 LOG_ALLOW(GLOBAL, LOG_INFO, "Pass %d: Migrating %d particles globally.\n", passes, global_migrations_this_pass);
1697
1698 ierr = SetMigrationRanks(user, migrationList, local_migration_count); CHKERRQ(ierr);
1699 ierr = PerformMigration(user); CHKERRQ(ierr);
1700
1701 // --- STAGE 4: POST-MIGRATION RESET ---
1702 // Identify newly arrived particles and flag them with NEEDS_LOCATION so they are
1703 // processed in the next pass. This uses the snapshot taken in STAGE 2.
1704 ierr = FlagNewcomersForLocation(user->swarm, nlocal_before, pids_before_snapshot); CHKERRQ(ierr);
1705 }
1706 // --- STAGE 5: LOOP SYNCHRONIZATION AND CLEANUP ---
1707
1708 ierr = PetscFree(pids_before_snapshot);
1709 ierr = PetscFree(migrationList);
1710
1711 LOG_ALLOW(GLOBAL, LOG_INFO, "End of pass %d. Total particles migrated globally: %d.\n", passes, global_migrations_this_pass);
1712
1713 } while (global_migrations_this_pass > 0 && passes < MAX_MIGRATION_PASSES);
1714
1715 // --- FINAL CHECKS ---
1716 if (passes >= MAX_MIGRATION_PASSES) {
1717 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_CONV_FAILED, "Particle migration failed to converge after %d passes. Check for particles oscillating between ranks.", MAX_MIGRATION_PASSES);
1718 }
1719
1720 user->simCtx->particlesMigratedLastStep = total_migrated_this_timestep;
1721 user->simCtx->migrationPassesLastStep = passes;
1722 user->simCtx->searchMetrics.maxParticlePassDepth = PetscMax(user->simCtx->searchMetrics.maxParticlePassDepth, (PetscInt64)passes);
1724
1725 LOG_ALLOW(GLOBAL, LOG_INFO, "Particle Location completed in %d passes.\n", passes);
1726
1728 PetscFunctionReturn(0);
1729}
1730
1731
1732#undef __FUNCT__
1733#define __FUNCT__ "ResetAllParticleStatuses"
1734/**
1735 * @brief Implementation of \ref ResetAllParticleStatuses().
1736 * @details Full API contract (arguments, ownership, side effects) is documented with
1737 * the header declaration in `include/ParticleMotion.h`.
1738 * @see ResetAllParticleStatuses()
1739 */
1741{
1742 PetscErrorCode ierr;
1743 PetscInt n_local;
1744 PetscInt *status_p;
1745
1746 PetscFunctionBeginUser;
1747
1749
1750 ierr = DMSwarmGetLocalSize(user->swarm, &n_local); CHKERRQ(ierr);
1751
1752 if (n_local > 0) {
1753 // Get write access to the status field
1754 ierr = DMSwarmGetField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1755
1756 for (PetscInt p = 0; p < n_local; ++p) {
1757 // Only reset particles that are considered settled. This is a small optimization
1758 // to avoid changing the status of a LOST particle, though resetting all would also be fine.
1759 if (status_p[p] == ACTIVE_AND_LOCATED) {
1760 status_p[p] = NEEDS_LOCATION;
1761 }
1762 }
1763
1764 // Restore the field
1765 ierr = DMSwarmRestoreField(user->swarm, "DMSwarm_location_status", NULL, NULL, (void**)&status_p); CHKERRQ(ierr);
1766 }
1767
1768
1770 PetscFunctionReturn(0);
1771}
PetscErrorCode GetRandomCellAndLogicalCoordsOnInletFace(UserCtx *user, const DMDALocalInfo *info, PetscInt xs_gnode_rank, PetscInt ys_gnode_rank, PetscInt zs_gnode_rank, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, PetscRandom *rand_logic_i_ptr, PetscRandom *rand_logic_j_ptr, PetscRandom *rand_logic_k_ptr, PetscInt *ci_metric_lnode_out, PetscInt *cj_metric_lnode_out, PetscInt *ck_metric_lnode_out, PetscReal *xi_metric_logic_out, PetscReal *eta_metric_logic_out, PetscReal *zta_metric_logic_out)
Assuming the current rank services the inlet face, this function selects a random cell (owned by this...
Definition Boundaries.c:399
PetscErrorCode CanRankServiceInletFace(UserCtx *user, const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, PetscBool *can_service_inlet_out)
Determines if the current MPI rank owns any part of the globally defined inlet face,...
Definition Boundaries.c:11
PetscErrorCode GetDeterministicFaceGridLocation(UserCtx *user, const DMDALocalInfo *info, PetscInt xs_gnode_rank, PetscInt ys_gnode_rank, PetscInt zs_gnode_rank, PetscInt IM_cells_global, PetscInt JM_cells_global, PetscInt KM_cells_global, PetscInt64 particle_global_id, PetscInt *ci_metric_lnode_out, PetscInt *cj_metric_lnode_out, PetscInt *ck_metric_lnode_out, PetscReal *xi_metric_logic_out, PetscReal *eta_metric_logic_out, PetscReal *zta_metric_logic_out, PetscBool *placement_successful_out)
Places particles in a deterministic grid/raster pattern on a specified domain face.
Definition Boundaries.c:212
PetscErrorCode MetricLogicalToPhysical(UserCtx *user, const Cmpnts ***X, PetscInt i, PetscInt j, PetscInt k, PetscReal xi, PetscReal eta, PetscReal zta, Cmpnts *Xp)
Public interface for MetricLogicalToPhysical().
Definition Metric.c:77
PetscErrorCode GenerateGaussianNoise(PetscRandom rnd, PetscReal *n1, PetscReal *n2)
Internal helper implementation: GenerateGaussianNoise().
PetscErrorCode ResizeSwarmGlobally(DM swarm, PetscInt N_target)
Implementation of ResizeSwarmGlobally().
PetscErrorCode AddToMigrationList(MigrationInfo **migration_list_p, PetscInt *capacity_p, PetscInt *count_p, PetscInt particle_local_idx, PetscMPIInt destination_rank)
Internal helper implementation: AddToMigrationList().
PetscErrorCode SetMigrationRanks(UserCtx *user, const MigrationInfo *migrationList, PetscInt migrationCount)
Internal helper implementation: SetMigrationRanks().
PetscErrorCode CheckAndRemoveOutOfBoundsParticles(UserCtx *user, PetscInt *removedCountLocal, PetscInt *removedCountGlobal, const BoundingBox *bboxlist)
Internal helper implementation: CheckAndRemoveOutOfBoundsParticles().
PetscErrorCode GetLocalPIDSnapshot(const PetscInt64 pid_field[], PetscInt n_local, PetscInt64 **pids_snapshot_out)
Internal helper implementation: GetLocalPIDSnapshot().
PetscErrorCode MigrateRestartParticlesUsingCellID(UserCtx *user)
Internal helper implementation: MigrateRestartParticlesUsingCellID().
PetscErrorCode UpdateAllParticlePositions(UserCtx *user)
Internal helper implementation: UpdateAllParticlePositions().
PetscErrorCode CalculateParticleCountPerCell(UserCtx *user)
Implementation of CalculateParticleCountPerCell().
PetscErrorCode CalculateBrownianDisplacement(UserCtx *user, PetscReal diff_eff, Cmpnts *displacement)
Internal helper implementation: CalculateBrownianDisplacement().
PetscErrorCode LocateAllParticlesInGrid(UserCtx *user, BoundingBox *bboxlist)
Implementation of LocateAllParticlesInGrid().
static PetscErrorCode GuessParticleOwnerWithBBox(UserCtx *user, const Particle *particle, const BoundingBox *bboxlist, PetscMPIInt *guess_rank_out)
Internal helper implementation: GuessParticleOwnerWithBBox().
#define ERROR_MSG_BUFFER_SIZE
PetscErrorCode UpdateParticlePosition(UserCtx *user, Particle *particle)
Internal helper implementation: UpdateParticlePosition().
PetscErrorCode ResetAllParticleStatuses(UserCtx *user)
Implementation of ResetAllParticleStatuses().
PetscErrorCode ReinitializeParticlesOnInletSurface(UserCtx *user, PetscReal currentTime, PetscInt step)
Internal helper implementation: ReinitializeParticlesOnInletSurface().
PetscErrorCode CheckAndRemoveLostParticles(UserCtx *user, PetscInt *removedCountLocal, PetscInt *removedCountGlobal)
Internal helper implementation: CheckAndRemoveLostParticles().
PetscErrorCode FlagNewcomersForLocation(DM swarm, PetscInt n_local_before, const PetscInt64 pids_before[])
Internal helper implementation: FlagNewcomersForLocation().
static PetscBool IsParticleInBox(const BoundingBox *bbox, const Cmpnts *pos)
Internal helper implementation: IsParticleInBox().
PetscErrorCode PreCheckAndResizeSwarm(UserCtx *user, PetscInt ti, const char *ext)
Internal helper implementation: PreCheckAndResizeSwarm().
PetscErrorCode PerformMigration(UserCtx *user)
Implementation of PerformMigration().
Header file for Particle Motion and migration related functions.
PetscErrorCode UnpackSwarmFields(PetscInt i, const PetscInt64 *PIDs, const PetscReal *weights, const PetscReal *positions, const PetscInt *cellIndices, PetscReal *velocities, PetscInt *LocStatus, PetscReal *diffusivity, Cmpnts *diffusivitygradient, PetscReal *psi, Particle *particle)
Initializes a Particle struct with data from DMSwarm fields.
PetscErrorCode UpdateSwarmFields(PetscInt i, const Particle *particle, PetscReal *positions, PetscReal *velocities, PetscReal *weights, PetscInt *cellIndices, PetscInt *status, PetscReal *diffusivity, Cmpnts *diffusivitygradient, PetscReal *psi)
Updates DMSwarm data arrays from a Particle struct.
#define LOG_LOOP_ALLOW(scope, level, iterVar, interval, fmt,...)
Logs a message inside a loop, but only every interval iterations.
Definition logging.h:297
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
Synchronized logging macro that checks both the log level and whether the calling function is in the ...
Definition logging.h:252
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:44
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:45
const char * BCFaceToString(BCFace face)
Helper function to convert BCFace enum to a string representation.
Definition logging.c:669
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:199
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:779
const char * ParticleLocationStatusToString(ParticleLocationStatus level)
A function that outputs the name of the current level in the ParticleLocation enum.
Definition logging.c:973
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:28
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:30
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:29
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:31
@ LOG_VERBOSE
Extremely detailed logs, typically for development use only.
Definition logging.h:33
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:770
PetscErrorCode ResetSearchMetrics(SimCtx *simCtx)
Resets the aggregate per-timestep search instrumentation counters.
Definition logging.c:2012
PetscErrorCode InitializeLogicalSpaceRNGs(PetscRandom *rand_logic_i, PetscRandom *rand_logic_j, PetscRandom *rand_logic_k)
Initializes random number generators for logical space operations [0.0, 1.0).
Definition setup.c:2725
PetscErrorCode BinarySearchInt64(PetscInt n, const PetscInt64 arr[], PetscInt64 key, PetscBool *found)
Performs a binary search for a key in a sorted array of PetscInt64.
Definition setup.c:2448
PetscMPIInt rank_zm
Definition variables.h:182
Cmpnts vel
Definition variables.h:169
PetscBool inletFaceDefined
Definition variables.h:830
PetscMPIInt rank_yp
Definition variables.h:181
PetscInt64 searchLocatedCount
Definition variables.h:224
PetscInt64 searchLostCount
Definition variables.h:225
BCFace identifiedInletBCFace
Definition variables.h:831
PetscInt cell[3]
Definition variables.h:167
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:814
@ PARTICLE_INIT_SURFACE_RANDOM
Random placement on the inlet face.
Definition variables.h:509
@ PARTICLE_INIT_SURFACE_EDGES
Deterministic placement at inlet face edges.
Definition variables.h:512
ParticleLocationStatus
Defines the state of a particle with respect to its location and migration status during the iterativ...
Definition variables.h:135
@ LOST
Definition variables.h:139
@ NEEDS_LOCATION
Definition variables.h:136
@ ACTIVE_AND_LOCATED
Definition variables.h:137
@ MIGRATING_OUT
Definition variables.h:138
PetscMPIInt rank_ym
Definition variables.h:181
PetscReal CMy_c
Definition variables.h:705
PetscMPIInt rank_xp
Definition variables.h:180
PetscInt local_index
Definition variables.h:193
Cmpnts max_coords
Maximum x, y, z coordinates of the bounding box.
Definition variables.h:156
Cmpnts diffusivitygradient
Definition variables.h:174
PetscInt64 searchPopulation
Definition variables.h:223
PetscReal dt
Definition variables.h:658
RankNeighbors neighbors
Definition variables.h:823
PetscInt currentSettlementPass
Definition variables.h:235
PetscInt np
Definition variables.h:739
Cmpnts min_coords
Minimum x, y, z coordinates of the bounding box.
Definition variables.h:155
PetscScalar x
Definition variables.h:101
Cmpnts loc
Definition variables.h:168
char * current_io_directory
Definition variables.h:670
PetscMPIInt destination_rank
Definition variables.h:172
Vec lParticleCount
Definition variables.h:882
PetscInt64 bboxGuessFallbackCount
Definition variables.h:233
ParticleLocationStatus location_status
Definition variables.h:171
PetscInt64 bboxGuessSuccessCount
Definition variables.h:232
char particle_subdir[PETSC_MAX_PATH_LEN]
Definition variables.h:667
PetscMPIInt rank_xm
Definition variables.h:180
PetscInt64 maxParticlePassDepth
Definition variables.h:234
char source_dir[PETSC_MAX_PATH_LEN]
Definition variables.h:555
PetscReal CMz_c
Definition variables.h:705
ParticleInitializationType ParticleInitialization
Definition variables.h:743
PetscScalar z
Definition variables.h:101
Vec ParticleCount
Definition variables.h:882
PetscInt particlesMigratedLastStep
Definition variables.h:749
PetscMPIInt rank_zp
Definition variables.h:182
SearchMetricsState searchMetrics
Definition variables.h:752
PetscReal diffusivity
Definition variables.h:173
PetscRandom BrownianMotionRNG
Definition variables.h:753
PostProcessParams * pps
Definition variables.h:798
PetscInt migrationPassesLastStep
Definition variables.h:748
PetscScalar y
Definition variables.h:101
@ EXEC_MODE_SOLVER
Definition variables.h:616
@ EXEC_MODE_POSTPROCESSOR
Definition variables.h:617
char _io_context_buffer[PETSC_MAX_PATH_LEN]
Definition variables.h:669
ExecutionMode exec_mode
Definition variables.h:662
BoundingBox bbox
Definition variables.h:822
PetscInt64 PID
Definition variables.h:166
char restart_dir[PETSC_MAX_PATH_LEN]
Definition variables.h:664
PetscReal CMx_c
Definition variables.h:705
Defines a 3D axis-aligned bounding box.
Definition variables.h:154
A 3D point or vector with PetscScalar components.
Definition variables.h:100
Information needed to migrate a single particle between MPI ranks.
Definition variables.h:192
Defines a particle's core properties for Lagrangian tracking.
Definition variables.h:165
Stores the MPI ranks of neighboring subdomains.
Definition variables.h:179
The master context for the entire simulation.
Definition variables.h:643
User-defined context containing data specific to a single computational grid level.
Definition variables.h:811
Head of a generic C-style linked list.
Definition variables.h:413
PetscErrorCode LocateParticleOrFindMigrationTarget(UserCtx *user, Particle *particle, ParticleLocationStatus *status_out)
Locates a particle's host cell or identifies its migration target using a robust walk search.
PetscErrorCode FindOwnerOfCell(UserCtx *user, PetscInt i, PetscInt j, PetscInt k, PetscMPIInt *owner_rank)
Finds the MPI rank that owns a given global cell index.