PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
grid.c
Go to the documentation of this file.
1// in src/grid.c
2
3#include "grid.h"
4#include "logging.h"
5
6#define BBOX_TOLERANCE 1e-6
7
8#undef __FUNCT__
9#define __FUNCT__ "ParseAndSetGridInputs"
10/**
11 * @brief Determines the grid source and calls the appropriate parsing routine.
12 *
13 * This function acts as a router. It checks the global `simCtx->generate_grid`
14 * flag (accessed via the `user->simCtx` back-pointer) to decide whether to
15 * call the parser for a programmatically generated grid or for a grid defined
16 * in a file.
17 *
18 * @param user Pointer to the `UserCtx` for a specific block. The function will
19 * populate the geometric fields within this struct.
20 * @return PetscErrorCode 0 on success, or a PETSc error code on failure.
21 */
22static PetscErrorCode ParseAndSetGridInputs(UserCtx *user)
23{
24 PetscErrorCode ierr;
25 SimCtx *simCtx = user->simCtx; // Get the global context via the back-pointer
26
27 PetscFunctionBeginUser;
28
30 if(strcmp(simCtx->eulerianSource,"analytical")==0){
31 ierr = SetAnalyticalGridInfo(user); CHKERRQ(ierr);
32 }else{ // eulerianSource is "solve" or "load"
33 if (simCtx->generate_grid) {
34 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: Block %d is programmatically generated. Calling generation parser.\n", simCtx->rank, user->_this);
35 ierr = ReadGridGenerationInputs(user); CHKERRQ(ierr);
36 } else {
37 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: Block %d is file-based. Calling file parser.\n", simCtx->rank, user->_this);
38 ierr = ReadGridFile(user); CHKERRQ(ierr);
39 }
40 }
41
43
44 PetscFunctionReturn(0);
45}
46
47
48#undef __FUNCT__
49#define __FUNCT__ "DefineAllGridDimensions"
50/**
51 * @brief Orchestrates the parsing and setting of grid dimensions for all blocks.
52 *
53 * This function serves as the high-level entry point for defining the geometric
54 * properties of each grid block in the simulation. It iterates through every
55 * block defined by `simCtx->block_number`.
56 *
57 * For each block, it performs two key actions:
58 * 1. It explicitly sets the block's index (`_this`) in the corresponding `UserCtx`
59 * struct for the finest multigrid level. This makes the context "self-aware".
60 * 2. It calls a helper function (`ParseAndSetGridInputs`) to handle the detailed
61 * work of parsing options or files to populate the rest of the geometric
62 * properties for that specific block (e.g., `IM`, `Min_X`, `rx`).
63 *
64 * @param simCtx The master SimCtx, which contains the number of blocks and the
65 * UserCtx hierarchy to be configured.
66 * @return PetscErrorCode 0 on success, or a PETSc error code on failure.
67 */
68PetscErrorCode DefineAllGridDimensions(SimCtx *simCtx)
69{
70 PetscErrorCode ierr;
71 PetscInt nblk = simCtx->block_number;
72 UserCtx *finest_users;
73
74 PetscFunctionBeginUser;
75
77
78 if (simCtx->usermg.mglevels == 0) {
79 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONGSTATE, "MG levels not set. Cannot get finest_users.");
80 }
81 // Get the UserCtx array for the finest grid level
82 finest_users = simCtx->usermg.mgctx[simCtx->usermg.mglevels - 1].user;
83
84 LOG_ALLOW(GLOBAL, LOG_INFO, "Defining grid dimensions for %d blocks...\n", nblk);
85
86 // Loop over each block to configure its grid dimensions and geometry.
87 for (PetscInt bi = 0; bi < nblk; bi++) {
88 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: --- Configuring Geometry for Block %d ---\n", simCtx->rank, bi);
89
90 // Before calling any helpers, set the block index in the context.
91 // This makes the UserCtx self-aware of which block it represents.
92 LOG_ALLOW(GLOBAL,LOG_DEBUG,"finest_users->_this = %d, bi = %d\n",finest_users[bi]._this,bi);
93 //finest_user[bi]._this = bi;
94
95 // Call the helper function for this specific block. It can now derive
96 // all necessary information from the UserCtx pointer it receives.
97 ierr = ParseAndSetGridInputs(&finest_users[bi]); CHKERRQ(ierr);
98 }
99
101
102 PetscFunctionReturn(0);
103}
104
105#undef __FUNCT__
106#define __FUNCT__ "InitializeSingleGridDM"
107/**
108 * @brief Creates the DMDA objects (da and fda) for a single UserCtx.
109 *
110 * This function is a direct adaptation of the core logic in `MGDACreate`. It
111 * creates the scalar (`da`) and vector (`fda`) DMs for a single grid level.
112 *
113 * If a `coarse_user` context is provided, it performs the critical processor
114 * alignment calculation from the legacy code. This ensures the new (fine) DM
115 * aligns with the coarse DM for multigrid efficiency. If `coarse_user` is NULL,
116 * it creates the DM with a default PETSc decomposition, intended for the
117 * coarsest grid level.
118 *
119 * @param user The UserCtx for which the DMs will be created. Its IM, JM, KM fields must be pre-populated.
120 * @param coarse_user The UserCtx of the next-coarser grid level, or NULL if `user` is the coarsest level.
121 * @return PetscErrorCode 0 on success, or a PETSc error code on failure.
122 */
123static PetscErrorCode InitializeSingleGridDM(UserCtx *user, UserCtx *coarse_user)
124{
125 PetscErrorCode ierr;
126 SimCtx *simCtx = user->simCtx;
127
128 DMBoundaryType xperiod = (simCtx->i_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
129 DMBoundaryType yperiod = (simCtx->j_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
130 DMBoundaryType zperiod = (simCtx->k_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
131 PetscInt stencil_width = (simCtx->i_periodic || simCtx->j_periodic || simCtx->k_periodic) ? 3:2; // Stencil width is 2 in the legacy code
132
133 PetscInt *lx = NULL, *ly = NULL, *lz = NULL;
134 PetscInt m, n, p;
135
136 PetscFunctionBeginUser;
137
139
140 if (coarse_user) {
141 // --- This is a FINE grid; it must be aligned with the COARSE grid ---
142 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: [Aligning DM] for block %d level %d (size %dx%dx%d) with level %d\n", simCtx->rank, user->_this, user->thislevel, user->IM, user->JM, user->KM, coarse_user->thislevel);
143
144 DMDAGetInfo(coarse_user->da, NULL, NULL, NULL, NULL, &m, &n, &p, NULL, NULL, NULL, NULL, NULL, NULL);
145 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Coarse grid processor decomposition is %d x %d x %d\n", simCtx->rank, m, n, p);
146
147 // This is the core logic from MGDACreate to ensure processor alignment.
148 PetscInt *lx_contrib, *ly_contrib, *lz_contrib;
149 ierr = PetscMalloc3(m, &lx_contrib, n, &ly_contrib, p, &lz_contrib); CHKERRQ(ierr);
150 ierr = PetscMemzero(lx_contrib, m * sizeof(PetscInt)); CHKERRQ(ierr);
151 ierr = PetscMemzero(ly_contrib, n * sizeof(PetscInt)); CHKERRQ(ierr);
152 ierr = PetscMemzero(lz_contrib, p * sizeof(PetscInt)); CHKERRQ(ierr);
153
154 DMDALocalInfo info;
155 DMDAGetLocalInfo(coarse_user->da, &info);
156 PetscInt xs = info.xs, xe = info.xs + info.xm, mx = info.mx;
157 PetscInt ys = info.ys, ye = info.ys + info.ym, my = info.my;
158 PetscInt zs = info.zs, ze = info.zs + info.zm, mz = info.mz;
159
160 PetscMPIInt rank;
161 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
162 PetscInt proc_i = rank % m;
163 PetscInt proc_j = (rank / m) % n;
164 PetscInt proc_k = rank / (m * n);
165
166 // --- X-Direction Logic (Identical to MGDACreate) ---
167 if (user->isc) lx_contrib[proc_i] = (xe - xs);
168 else {
169 if (m == 1) lx_contrib[0] = user->IM + 1;
170 else if (xs == 0) lx_contrib[0] = 2 * xe - 1;
171 else if (xe == mx) lx_contrib[proc_i] = user->IM + 1 - (2 * xs - 1);
172 else lx_contrib[proc_i] = (xe - xs) * 2;
173 }
174
175 // --- Y-Direction Logic (Identical to MGDACreate) ---
176 if (user->jsc) ly_contrib[proc_j] = (ye - ys);
177 else {
178 if (n == 1) ly_contrib[0] = user->JM + 1;
179 else if (ys == 0) ly_contrib[0] = 2 * ye - 1;
180 else if (ye == my) ly_contrib[proc_j] = user->JM + 1 - (2 * ys - 1);
181 else ly_contrib[proc_j] = (ye - ys) * 2;
182 }
183
184 // --- Z-Direction Logic (Identical to MGDACreate) ---
185 if (user->ksc) lz_contrib[proc_k] = (ze - zs);
186 else {
187 if (p == 1) lz_contrib[0] = user->KM + 1;
188 else if (zs == 0) lz_contrib[0] = 2 * ze - 1;
189 else if (ze == mz) lz_contrib[proc_k] = user->KM + 1 - (2 * zs - 1);
190 else lz_contrib[proc_k] = (ze - zs) * 2;
191 }
192 LOG_ALLOW_SYNC(LOCAL, LOG_VERBOSE, "Rank %d: Calculated this rank's node contribution to fine grid: lx=%d, ly=%d, lz=%d\n", simCtx->rank, lx_contrib[proc_i], ly_contrib[proc_j], lz_contrib[proc_k]);
193
194 // Allocate the final distribution arrays and Allreduce to get the global distribution
195 ierr = PetscMalloc3(m, &lx, n, &ly, p, &lz); CHKERRQ(ierr);
196 ierr = MPI_Allreduce(lx_contrib, lx, m, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
197 ierr = MPI_Allreduce(ly_contrib, ly, n, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
198 ierr = MPI_Allreduce(lz_contrib, lz, p, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
199
200 ierr = PetscFree3(lx_contrib, ly_contrib, lz_contrib); CHKERRQ(ierr);
201
202 } else {
203 // --- CASE 2: This is the COARSEST grid; use default or user-specified decomposition ---
204 if(simCtx->exec_mode == EXEC_MODE_SOLVER){
205
206 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Creating coarsest DM for block %d level %d (size %dx%dx%d)\n", simCtx->rank, user->_this, user->thislevel, user->IM, user->JM, user->KM);
207 m = simCtx->da_procs_x;
208 n = simCtx->da_procs_y;
209 p = simCtx->da_procs_z;
210
211 } else if(simCtx->exec_mode == EXEC_MODE_POSTPROCESSOR){
212
213 LOG_ALLOW(GLOBAL,LOG_ERROR,"Currently Only Single Rank is supported. \n");
214
215 m = n = p = PETSC_DECIDE;
216
217 }
218 // lx, ly, lz are NULL, so DMDACreate3d will use the m,n,p values.
219 }
220
221 // --- Create the DMDA for the current UserCtx ---
222 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Calling DMDACreate3d...\n", simCtx->rank);
223 ierr = DMDACreate3d(PETSC_COMM_WORLD, xperiod, yperiod, zperiod, DMDA_STENCIL_BOX,
224 user->IM + 1, user->JM + 1, user->KM + 1,
225 m, n, p,
226 1, stencil_width, lx, ly, lz, &user->da); CHKERRQ(ierr);
227
228 if (coarse_user) {
229 ierr = PetscFree3(lx, ly, lz); CHKERRQ(ierr);
230 }
231
232 // --- Standard DM setup applicable to all levels ---
233 ierr = DMSetUp(user->da); CHKERRQ(ierr);
234 ierr = DMGetCoordinateDM(user->da, &user->fda); CHKERRQ(ierr);
235 ierr = DMDASetUniformCoordinates(user->da, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); CHKERRQ(ierr);
236 ierr = DMDAGetLocalInfo(user->da, &user->info); CHKERRQ(ierr);
237 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: DM creation for block %d level %d complete.\n", simCtx->rank, user->_this, user->thislevel);
238
240
241 PetscFunctionReturn(0);
242}
243
244
245#undef __FUNCT__
246#define __FUNCT__ "InitializeAllGridDMs"
247/**
248 * @brief Orchestrates the creation of DMDA objects for every block and multigrid level.
249 *
250 * This function systematically builds the entire DMDA hierarchy. It first
251 * calculates the dimensions (IM, JM, KM) for all coarse grids based on the
252 * finest grid's dimensions and the semi-coarsening flags. It then iterates
253 * from the coarsest to the finest level, calling a powerful helper function
254 * (`InitializeSingleGridDM`) to create the DMs for each block, ensuring that
255 * finer grids are properly aligned with their coarser parents for multigrid efficiency.
256 *
257 * @param simCtx The master SimCtx, containing the configured UserCtx hierarchy.
258 * @return PetscErrorCode 0 on success, or a PETSc error code on failure.
259 */
260PetscErrorCode InitializeAllGridDMs(SimCtx *simCtx)
261{
262 PetscErrorCode ierr;
263 UserMG *usermg = &simCtx->usermg;
264 MGCtx *mgctx = usermg->mgctx;
265 PetscInt nblk = simCtx->block_number;
266
267 PetscFunctionBeginUser;
268
270
271 LOG_ALLOW(GLOBAL,LOG_INFO, "Pre-scanning BCs to identify domain periodicity.\n");
272 ierr = DeterminePeriodicity(simCtx); CHKERRQ(ierr);
273
274 LOG_ALLOW(GLOBAL, LOG_INFO, "Creating DMDA objects for all levels and blocks...\n");
275
276 // --- Part 1: Calculate Coarse Grid Dimensions & VALIDATE ---
277 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Calculating and validating coarse grid dimensions...\n");
278 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
279 for (PetscInt bi = 0; bi < nblk; bi++) {
280 UserCtx *user_coarse = &mgctx[level].user[bi];
281 UserCtx *user_fine = &mgctx[level + 1].user[bi];
282
283 user_coarse->IM = user_fine->isc ? user_fine->IM : (user_fine->IM + 1) / 2;
284 user_coarse->JM = user_fine->jsc ? user_fine->JM : (user_fine->JM + 1) / 2;
285 user_coarse->KM = user_fine->ksc ? user_fine->KM : (user_fine->KM + 1) / 2;
286
287 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Block %d, Level %d dims calculated: %d x %d x %d\n",
288 simCtx->rank, bi, level, user_coarse->IM, user_coarse->JM, user_coarse->KM);
289
290 // Validation check from legacy MGDACreate to ensure coarsening is possible
291 PetscInt check_i = user_coarse->IM * (2 - user_coarse->isc) - (user_fine->IM + 1 - user_coarse->isc);
292 PetscInt check_j = user_coarse->JM * (2 - user_coarse->jsc) - (user_fine->JM + 1 - user_coarse->jsc);
293 PetscInt check_k = user_coarse->KM * (2 - user_coarse->ksc) - (user_fine->KM + 1 - user_coarse->ksc);
294
295 if (check_i + check_j + check_k != 0) {
296 // SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG,
297 // "Grid at level %d, block %d cannot be coarsened from %dx%dx%d to %dx%dx%d with the given semi-coarsening flags. Check grid dimensions.",
298 // level, bi, user_fine->IM, user_fine->JM, user_fine->KM, user_coarse->IM, user_coarse->JM, user_coarse->KM);
299 LOG(GLOBAL,LOG_WARNING,"WARNING: Grid at level %d, block %d can't be consistently coarsened further.\n", level, bi);
300 }
301 }
302 }
303
304 // --- Part 2: Create DMs from Coarse to Fine for each Block ---
305 for (PetscInt bi = 0; bi < nblk; bi++) {
306 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "--- Creating DMs for Block %d ---\n", bi);
307
308 // Create the coarsest level DM first (passing NULL for the coarse_user)
309 ierr = InitializeSingleGridDM(&mgctx[0].user[bi], NULL); CHKERRQ(ierr);
310
311 // Create finer level DMs, passing the next-coarser context for alignment
312 for (PetscInt level = 1; level < usermg->mglevels; level++) {
313 ierr = InitializeSingleGridDM(&mgctx[level].user[bi], &mgctx[level-1].user[bi]); CHKERRQ(ierr);
314 }
315 }
316
317 // --- Optional: View the finest DM for debugging verification ---
318 if (get_log_level() >= LOG_DEBUG) {
319 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "--- Viewing Finest DMDA (Level %d, Block 0) ---\n", usermg->mglevels - 1);
320 ierr = DMView(mgctx[usermg->mglevels - 1].user[0].da, PETSC_VIEWER_STDOUT_WORLD); CHKERRQ(ierr);
321 }
322
323 LOG_ALLOW(GLOBAL, LOG_INFO, "DMDA object creation complete.\n");
324
326
327 PetscFunctionReturn(0);
328}
329
330// Forward declarations for the static helper functions within this file.
331static PetscErrorCode SetFinestLevelCoordinates(UserCtx *user);
332static PetscErrorCode GenerateAndSetCoordinates(UserCtx *user);
333static PetscErrorCode ReadAndSetCoordinates(UserCtx *user, FILE *fd);
334static PetscErrorCode RestrictCoordinates(UserCtx *coarse_user, UserCtx *fine_user);
335
336#undef __FUNCT__
337#define __FUNCT__ "AssignAllGridCoordinates"
338/**
339 * @brief Orchestrates the assignment of physical coordinates to all DMDA objects.
340 *
341 * This function manages the entire process of populating the coordinate vectors
342 * for every DMDA across all multigrid levels and blocks. It follows a two-part
343 * strategy that is essential for multigrid methods:
344 *
345 * 1. **Populate Finest Level:** It first loops through each block and calls a
346 * helper (`SetFinestLevelCoordinates`) to set the physical coordinates for
347 * the highest-resolution grid (the finest multigrid level).
348 * 2. **Restrict to Coarser Levels:** It then iterates downwards from the finest
349 * level, calling a helper (`RestrictCoordinates`) to copy the coordinate
350 * values from the fine grid nodes to their corresponding parent nodes on the
351 * coarser grids. This ensures all levels represent the exact same geometry.
352 *
353 * @param simCtx The master SimCtx, containing the configured UserCtx hierarchy.
354 * @return PetscErrorCode 0 on success, or a PETSc error code on failure.
355 */
356PetscErrorCode AssignAllGridCoordinates(SimCtx *simCtx)
357{
358 PetscErrorCode ierr;
359 UserMG *usermg = &simCtx->usermg;
360 PetscInt nblk = simCtx->block_number;
361
362 PetscFunctionBeginUser;
363
365
366 LOG_ALLOW(GLOBAL, LOG_INFO, "Assigning physical coordinates to all grid DMs...\n");
367
368 // --- Part 1: Populate the Finest Grid Level ---
369 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Setting coordinates for the finest grid level (%d)...\n", usermg->mglevels - 1);
370 for (PetscInt bi = 0; bi < nblk; bi++) {
371 UserCtx *fine_user = &usermg->mgctx[usermg->mglevels - 1].user[bi];
372 ierr = SetFinestLevelCoordinates(fine_user); CHKERRQ(ierr);
373 LOG_ALLOW(GLOBAL,LOG_TRACE,"The Finest level coordinates for block %d have been set.\n",bi);
375 ierr = LOG_FIELD_MIN_MAX(fine_user,"Coordinates");
376 }
377 }
378 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Finest level coordinates have been set for all blocks.\n");
379
380 // --- Part 2: Restrict Coordinates to Coarser Levels ---
381 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Restricting coordinates to coarser grid levels...\n");
382 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
383 for (PetscInt bi = 0; bi < nblk; bi++) {
384 UserCtx *coarse_user = &usermg->mgctx[level].user[bi];
385 UserCtx *fine_user = &usermg->mgctx[level + 1].user[bi];
386 ierr = RestrictCoordinates(coarse_user, fine_user); CHKERRQ(ierr);
387
388 LOG_ALLOW(GLOBAL,LOG_TRACE,"Coordinates restricted to block %d level %d.\n",bi,level);
390 ierr = LOG_FIELD_MIN_MAX(coarse_user,"Coordinates");
391 }
392 }
393 }
394
395 LOG_ALLOW(GLOBAL, LOG_INFO, "Physical coordinates assigned to all grid levels and blocks.\n");
396
398
399 PetscFunctionReturn(0);
400}
401
402
403#undef __FUNCT__
404#define __FUNCT__ "SetFinestLevelCoordinates"
405/**
406 * @brief A router that populates the coordinates for a single finest-level DMDA.
407 *
408 * This function orchestrates the coordinate setting for one block. It checks the
409 * global `generate_grid` flag and calls the appropriate helper for either
410 * programmatic generation or reading from a file.
411 *
412 * After the local coordinate vector is populated by a helper, this function
413 * performs the necessary DMLocalToGlobal and DMGlobalToLocal scatters to ensure
414 * that the ghost node coordinate values are correctly communicated and updated
415 * across all MPI ranks.
416 *
417 * @param user The UserCtx for a specific block on the finest level.
418 * @return PetscErrorCode 0 on success, or a PETSc error code on failure.
419 */
420static PetscErrorCode SetFinestLevelCoordinates(UserCtx *user)
421{
422 PetscErrorCode ierr;
423 SimCtx *simCtx = user->simCtx;
424
425 PetscFunctionBeginUser;
426
428
429 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Setting finest level coordinates for block %d...\n", simCtx->rank, user->_this);
430
431 if (simCtx->generate_grid) {
432 ierr = GenerateAndSetCoordinates(user); CHKERRQ(ierr);
433 } else {
434
435 FILE *grid_file_handle = NULL;
436 // Only Rank 0 opens the file.
437 if (simCtx->rank == 0) {
438 grid_file_handle = fopen(simCtx->grid_file, "r");
439 if (!grid_file_handle) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open file: %s", simCtx->grid_file);
440
441 // Now, on Rank 0, we skip the entire header section once.
442 // This is the logic from your modern code's AssignGridCoordinates.
443 PetscInt headerLines = simCtx->block_number + 2; // 1 for nblk, plus one for each block's dims
444 char dummy_buffer[2048];
445 for (PetscInt s = 0; s < headerLines; ++s) {
446 if (!fgets(dummy_buffer, sizeof(dummy_buffer), grid_file_handle)) {
447 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "Unexpected EOF while skipping grid header");
448 }
449 }
450 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank 0: Skipped %d header lines, now at coordinate data.\n", headerLines);
451 }
452
453 // We now call the coordinate reader, passing the file handle.
454 // It's responsible for reading its block's data and broadcasting.
455 ierr = ReadAndSetCoordinates(user, grid_file_handle); CHKERRQ(ierr);
456
457 // Only Rank 0, which opened the file, should close it.
458 if (simCtx->rank == 0) {
459 fclose(grid_file_handle);
460 }
461 }
462
463 // After populating the local coordinate vector, we must perform a
464 // Local-to-Global and then Global-to-Local scatter to correctly
465 // populate the ghost node coordinates across process boundaries.
466 Vec gCoor, lCoor;
467 ierr = DMGetCoordinates(user->da, &gCoor); CHKERRQ(ierr);
468 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
469
470 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Scattering coordinates to update ghost nodes for block %d...\n", simCtx->rank, user->_this);
471 ierr = DMLocalToGlobalBegin(user->fda, lCoor, INSERT_VALUES, gCoor); CHKERRQ(ierr);
472 ierr = DMLocalToGlobalEnd(user->fda, lCoor, INSERT_VALUES, gCoor); CHKERRQ(ierr);
473
474 ierr = DMGlobalToLocalBegin(user->fda, gCoor, INSERT_VALUES, lCoor); CHKERRQ(ierr);
475 ierr = DMGlobalToLocalEnd(user->fda, gCoor, INSERT_VALUES, lCoor); CHKERRQ(ierr);
476
478
479 PetscFunctionReturn(0);
480}
481/**
482 * @brief Computes a stretched coordinate along one dimension.
483 *
484 * This function computes a coordinate based on a geometric stretching ratio.
485 * If the ratio (r) is 1.0, a uniform distribution is used:
486 * x(i) = L * (i/N)
487 *
488 * If r != 1.0, a geometric stretching is applied:
489 * x(i) = L * [ (r^(i/N) - 1 ) / (r - 1) ]
490 *
491 * Here:
492 * - i : The current index along the dimension.
493 * - N : The total number of divisions along that dimension.
494 * - L : The length of the domain along that dimension.
495 * - r : The stretching ratio. r > 1.0 stretches the grid in a geometric fashion
496 * increasing spacing away from the start, whereas 0 < r < 1.0 would
497 * cluster points near the start.
498 *
499 * @param[in] i Current index (0 <= i <= N).
500 * @param[in] N Number of segments along the dimension.
501 * @param[in] L Total length of the domain.
502 * @param[in] r Stretching ratio.
503 *
504 * @return PetscReal The computed coordinate at index i.
505 *
506 * @note This function does not return a PetscErrorCode because it
507 * does not allocate memory or call PETSc routines that can fail.
508 * It is just a helper calculation function.
509 **/
510static inline PetscReal ComputeStretchedCoord(PetscInt i, PetscInt N, PetscReal L, PetscReal r)
511{
512 if (N <=1) return 0.0;
513 PetscReal fraction = (PetscReal)i / ((PetscReal)N - 1.0);
514 if (PetscAbsReal(r - 1.0) < 1.0e-9) { // Use a tolerance for float comparison
515 return L * fraction;
516 } else {
517 return L * (PetscPowReal(r, fraction) - 1.0) / (r - 1.0);
518 }
519}
520
521#undef __FUNCT__
522#define __FUNCT__ "GenerateAndSetCoordinates"
523/**
524 * @brief Programmatically generates and sets grid coordinates based on user parameters.
525 *
526 * This function populates the local coordinate vector of the provided `UserCtx`
527 * using the geometric properties (`Min_X`, `Max_X`, `rx`, etc.) that were parsed
528 * from command-line options. It supports non-linear grid stretching.
529 *
530 * @param user The UserCtx for a specific block.
531 * @return PetscErrorCode 0 on success, or a PETSc error code on failure.
532 */
533static PetscErrorCode GenerateAndSetCoordinates(UserCtx *user)
534{
535 PetscErrorCode ierr;
536 DMDALocalInfo info;
537 Cmpnts ***coor;
538 Vec lCoor;
539
540 PetscFunctionBeginUser;
541
543
544 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Generating coordinates for block %d...\n", user->simCtx->rank, user->_this);
545
546 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
547 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
548
549 PetscInt xs = info.xs, xe = info.xs + info.xm;
550 PetscInt ys = info.ys, ye = info.ys + info.ym;
551 PetscInt zs = info.zs, ze = info.zs + info.zm;
552
553 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Local Info for block %d - X range - [%d,%d], Y range - [%d,%d], Z range - [%d,%d]\n",
554 user->simCtx->rank, user->_this, xs, xe, ys, ye, zs, ze);
555
556 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Local Info for block %d - X domain - [%.4f,%.4f], Y range - [%.4f,%.4f], Z range - [%.4f,%.4f]\n",
557 user->simCtx->rank, user->_this, user->Min_X,user->Max_X,user->Min_Y,user->Max_Y,user->Min_Z,user->Max_Z);
558
559 ierr = VecSet(lCoor, 0.0); CHKERRQ(ierr);
560 ierr = DMDAVecGetArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
561
562 PetscReal Lx = user->Max_X - user->Min_X;
563 PetscReal Ly = user->Max_Y - user->Min_Y;
564 PetscReal Lz = user->Max_Z - user->Min_Z;
565
566 // Loop over the local nodes, including ghost nodes, owned by this process.
567 for (PetscInt k = zs; k < ze; k++) {
568 for (PetscInt j = ys; j < ye; j++) {
569 for (PetscInt i = xs; i < xe; i++) {
570 if(k < user->KM && j < user->JM && i < user->IM){
571 coor[k][j][i].x = user->Min_X + ComputeStretchedCoord(i, user->IM, Lx, user->rx);
572 coor[k][j][i].y = user->Min_Y + ComputeStretchedCoord(j, user->JM, Ly, user->ry);
573 coor[k][j][i].z = user->Min_Z + ComputeStretchedCoord(k, user->KM, Lz, user->rz);
574 }
575 }
576 }
577 }
578
579 /// DEBUG: This verifies the presence of a last "unphysical" layer of coordinates.
580 /*
581 PetscInt KM = user->KM;
582 for (PetscInt j = ys; j < ye; j++){
583 for(PetscInt i = xs; i < xe; i++){
584 LOG_ALLOW(GLOBAL,LOG_DEBUG,"coor[%d][%d][%d].(x,y,z) = %le,%le,%le",KM,j,i,coor[KM][j][i].x,coor[KM][j][i].y,coor[KM][j][i].z);
585 }
586 }
587 */
588
589
590
591 ierr = DMDAVecRestoreArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
592
594
595 PetscFunctionReturn(0);
596}
597#undef __FUNCT__
598#define __FUNCT__ "ReadAndSetCoordinates"
599/**
600 * @brief Reads physical coordinates from a file and populates the DMDA for a specific block.
601 *
602 * This function handles the collective read of an interleaved (X Y Z per line)
603 * multi-block grid file. It assumes the file header (nblk, dimensions) has
604 * already been processed by ReadGridFile.
605 *
606 * The process is robust for parallel execution:
607 * 1. Rank 0 opens the grid file.
608 * 2. It intelligently skips past the header section and the coordinate data
609 * for all blocks *preceding* the current block being processed (`user->_this`).
610 * 3. It then reads the entire coordinate data for the *current* block into
611 * a single contiguous buffer `gc`.
612 * 4. This global buffer `gc` is broadcast to all other MPI ranks.
613 * 5. Each rank then loops through its local (owned + ghost) node indices,
614 * calculates the corresponding index in the global `gc` array, and copies
615 * the (x,y,z) coordinates into its local PETSc coordinate vector.
616 *
617 * @param user The UserCtx for a specific block. Its `_this` field must be set,
618 * and its IM, JM, KM fields must be correctly pre-populated.
619 * @return PetscErrorCode 0 on success, or a PETSc error code on failure.
620 */
621static PetscErrorCode ReadAndSetCoordinates(UserCtx *user, FILE *fd)
622{
623 PetscErrorCode ierr;
624 SimCtx *simCtx = user->simCtx;
625 PetscMPIInt rank = simCtx->rank;
626 PetscInt block_index = user->_this;
627 PetscInt IM = user->IM, JM = user->JM, KM = user->KM;
628 DMDALocalInfo info;
629 Cmpnts ***coor;
630 Vec lCoor;
631 PetscReal *gc = NULL; // Global coordinate buffer, allocated on all ranks
632
633 PetscFunctionBeginUser;
634
636
637 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Reading interleaved coordinates from file for block %d...\n",
638 simCtx->rank, block_index);
639
640 // 1. Allocate the buffer on ALL ranks to receive the broadcast data.
641 // PetscInt n_nodes = (IM + 1) * (JM + 1) * (KM + 1);
642 PetscInt n_nodes = (IM) * (JM) * (KM);
643 ierr = PetscMalloc1(3 * n_nodes, &gc); CHKERRQ(ierr);
644
645 // 2. Only Rank 0 opens the file and reads the data.
646 if (rank == 0) {
647 if (!fd) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Recieved a NULL file handle.\n");
648
649 // Read the coordinate data for the CURRENT block.
650 for (PetscInt k = 0; k < KM; k++) {
651 for (PetscInt j = 0; j < JM; j++) {
652 for (PetscInt i = 0; i < IM; i++) {
653 PetscInt base_index = 3 * ((k * (JM) + j) * (IM) + i);
654 if (fscanf(fd, "%le %le %le\n", &gc[base_index], &gc[base_index + 1], &gc[base_index + 2]) != 3) {
655 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "Error reading coordinates for node (i,j,k)=(%d,%d,%d) in block %d", i, j, k, block_index);
656 }
657 }
658 }
659 }
660
661 }
662
663 // 3. Broadcast the coordinate block for the current block to all other processes.
664 ierr = MPI_Bcast(gc, 3 * n_nodes, MPIU_REAL, 0, PETSC_COMM_WORLD); CHKERRQ(ierr);
665
666 // 4. Each rank populates its local portion of the coordinate vector.
667 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
668 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
669 ierr = DMDAVecGetArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
670
671 for (PetscInt k = info.zs; k < info.zs + info.zm; k++) {
672 for (PetscInt j = info.ys; j < info.ys + info.ym; j++) {
673 for (PetscInt i = info.xs; i < info.xs + info.xm; i++) {
674 if(k< KM && j < JM && i < IM){
675 PetscInt base_idx = 3 * ((k * (JM) + j) * (IM) + i);
676 coor[k][j][i].x = gc[base_idx];
677 coor[k][j][i].y = gc[base_idx + 1];
678 coor[k][j][i].z = gc[base_idx + 2];
679 }
680 }
681 }
682 }
683
684 // 5. Clean up and restore.
685 ierr = DMDAVecRestoreArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
686 ierr = PetscFree(gc); CHKERRQ(ierr);
687
689
690 PetscFunctionReturn(0);
691}
692
693#undef __FUNCT__
694#define __FUNCT__ "RestrictCoordinates"
695/**
696 * @brief Populates coarse grid coordinates by restricting from a fine grid.
697 *
698 * This function is a direct adaptation of the coordinate restriction loop
699 * from the legacy `MG_Initial` function. It ensures that the physical location
700 * of a coarse grid node is identical to its corresponding parent node on the
701 * fine grid. The mapping from coarse to fine index (`i` -> `ih`) is determined
702 * by the semi-coarsening flags (`isc`, `jsc`, `ksc`) stored in the `UserCtx`.
703 *
704 * @param coarse_user The UserCtx for the coarse grid (destination).
705 * @param fine_user The UserCtx for the fine grid (source).
706 * @return PetscErrorCode
707 */
708static PetscErrorCode RestrictCoordinates(UserCtx *coarse_user, UserCtx *fine_user)
709{
710 PetscErrorCode ierr;
711 Vec c_lCoor, f_lCoor; // Coarse and Fine local coordinate vectors
712 Cmpnts ***c_coor;
713 const Cmpnts ***f_coor; // Use const for read-only access
714 DMDALocalInfo c_info;
715 PetscInt ih, jh, kh; // Fine-grid indices corresponding to coarse-grid i,j,k
716
717 PetscFunctionBeginUser;
718
720
721 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Restricting coords from level %d to level %d for block %d\n",
722 fine_user->simCtx->rank, fine_user->thislevel, coarse_user->thislevel, coarse_user->_this);
723
724 ierr = DMDAGetLocalInfo(coarse_user->da, &c_info); CHKERRQ(ierr);
725
726 ierr = DMGetCoordinatesLocal(coarse_user->da, &c_lCoor); CHKERRQ(ierr);
727 ierr = DMGetCoordinatesLocal(fine_user->da, &f_lCoor); CHKERRQ(ierr);
728
729 ierr = DMDAVecGetArray(coarse_user->fda, c_lCoor, &c_coor); CHKERRQ(ierr);
730 ierr = DMDAVecGetArrayRead(fine_user->fda, f_lCoor, &f_coor); CHKERRQ(ierr);
731
732 // Get the local owned range of the coarse grid.
733 PetscInt xs = c_info.xs, xe = c_info.xs + c_info.xm;
734 PetscInt ys = c_info.ys, ye = c_info.ys + c_info.ym;
735 PetscInt zs = c_info.zs, ze = c_info.zs + c_info.zm;
736
737 // Get the global dimensions of the coarse grid.
738 PetscInt mx = c_info.mx, my = c_info.my, mz = c_info.mz;
739
740 // If this process owns the maximum boundary node, contract the loop by one
741 // to prevent the index doubling `2*i` from going out of bounds.
742 // This is also ensuring we do not manipulate the unphysical layer of coors present in the finest level.
743 if (xe == mx) xe--;
744 if (ye == my) ye--;
745 if (ze == mz) ze--;
746
747 for (PetscInt k = zs; k < ze; k++) {
748 for (PetscInt j = ys; j < ye; j++) {
749 for (PetscInt i = xs; i < xe; i++) {
750 // Determine the corresponding parent node index on the FINE grid,
751 // respecting the semi-coarsening flags of the FINE grid's UserCtx.
752 ih = coarse_user->isc ? i : 2 * i;
753 jh = coarse_user->jsc ? j : 2 * j;
754 kh = coarse_user->ksc ? k : 2 * k;
755
756 // LOG_ALLOW(GLOBAL,LOG_DEBUG," [kh][ih][jh] = %d,%d,%d - k,j,i = %d,%d,%d.\n",kh,jh,ih,k,j,i);
757
758 c_coor[k][j][i] = f_coor[kh][jh][ih];
759 }
760 }
761 }
762
763 ierr = DMDAVecRestoreArray(coarse_user->fda, c_lCoor, &c_coor); CHKERRQ(ierr);
764 ierr = DMDAVecRestoreArrayRead(fine_user->fda, f_lCoor, &f_coor); CHKERRQ(ierr);
765
766 // After populating the local portion, scatter to update ghost regions.
767 Vec c_gCoor;
768 ierr = DMGetCoordinates(coarse_user->da, &c_gCoor); CHKERRQ(ierr);
769 ierr = DMLocalToGlobalBegin(coarse_user->fda, c_lCoor, INSERT_VALUES, c_gCoor); CHKERRQ(ierr);
770 ierr = DMLocalToGlobalEnd(coarse_user->fda, c_lCoor, INSERT_VALUES, c_gCoor); CHKERRQ(ierr);
771 ierr = DMGlobalToLocalBegin(coarse_user->fda, c_gCoor, INSERT_VALUES, c_lCoor); CHKERRQ(ierr);
772 ierr = DMGlobalToLocalEnd(coarse_user->fda, c_gCoor, INSERT_VALUES, c_lCoor); CHKERRQ(ierr);
773
775
776 PetscFunctionReturn(0);
777}
778
779#undef __FUNCT__
780#define __FUNCT__ "ComputeLocalBoundingBox"
781/**
782 * @brief Computes the local bounding box of the grid on the current process.
783 *
784 * This function calculates the minimum and maximum coordinates (x, y, z) of the
785 * local grid points owned by the current MPI process. It iterates over the local
786 * portion of the grid, examines each grid point's coordinates, and updates the
787 * minimum and maximum values accordingly.
788 *
789 * The computed bounding box is stored in the provided `localBBox` structure,
790 * and the `user->bbox` field is also updated with this bounding box for
791 * consistency within the user context.
792 *
793 * @param[in] user Pointer to the user-defined context containing grid information.
794 * This context must be properly initialized before calling this function.
795 * @param[out] localBBox Pointer to the BoundingBox structure where the computed local bounding box will be stored.
796 * The structure should be allocated by the caller.
797 *
798 * @return PetscErrorCode Returns `0` on success, non-zero on failure.
799 */
800PetscErrorCode ComputeLocalBoundingBox(UserCtx *user, BoundingBox *localBBox)
801{
802 PetscErrorCode ierr;
803 PetscInt i, j, k;
804 PetscMPIInt rank;
805 PetscInt xs, ys, zs, xe, ye, ze;
806 DMDALocalInfo info;
807 Vec coordinates;
808 Cmpnts ***coordArray;
809 Cmpnts minCoords, maxCoords;
810
811 PetscFunctionBeginUser;
812
814
815 // Start of function execution
816 LOG_ALLOW(GLOBAL, LOG_INFO, "Entering the function.\n");
817
818 // Validate input Pointers
819 if (!user) {
820 LOG_ALLOW(LOCAL, LOG_ERROR, "Input 'user' Pointer is NULL.\n");
821 return PETSC_ERR_ARG_NULL;
822 }
823 if (!localBBox) {
824 LOG_ALLOW(LOCAL, LOG_ERROR, "Output 'localBBox' Pointer is NULL.\n");
825 return PETSC_ERR_ARG_NULL;
826 }
827
828 // Get MPI rank
829 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
830
831 // Get the local coordinates vector from the DMDA
832 ierr = DMGetCoordinatesLocal(user->da, &coordinates);
833 if (ierr) {
834 LOG_ALLOW(LOCAL, LOG_ERROR, "Error getting local coordinates vector.\n");
835 return ierr;
836 }
837
838 if (!coordinates) {
839 LOG_ALLOW(LOCAL, LOG_ERROR, "Coordinates vector is NULL.\n");
840 return PETSC_ERR_ARG_NULL;
841 }
842
843 // Access the coordinate array for reading
844 ierr = DMDAVecGetArrayRead(user->fda, coordinates, &coordArray);
845 if (ierr) {
846 LOG_ALLOW(LOCAL, LOG_ERROR, "Error accessing coordinate array.\n");
847 return ierr;
848 }
849
850 // Get the local grid information (indices and sizes)
851 ierr = DMDAGetLocalInfo(user->da, &info);
852 if (ierr) {
853 LOG_ALLOW(LOCAL, LOG_ERROR, "Error getting DMDA local info.\n");
854 return ierr;
855 }
856
857
858 xs = info.gxs; xe = xs + info.gxm;
859 ys = info.gys; ye = ys + info.gym;
860 zs = info.gzs; ze = zs + info.gzm;
861
862 /*
863 xs = info.xs; xe = xs + info.xm;
864 ys = info.ys; ye = ys + info.ym;
865 zs = info.zs; ze = zs + info.zm;
866 */
867
868 // Initialize min and max coordinates with extreme values
869 minCoords.x = minCoords.y = minCoords.z = PETSC_MAX_REAL;
870 maxCoords.x = maxCoords.y = maxCoords.z = PETSC_MIN_REAL;
871
872 LOG_ALLOW(LOCAL, LOG_TRACE, "[Rank %d] Grid indices (Including Ghosts): xs=%d, xe=%d, ys=%d, ye=%d, zs=%d, ze=%d.\n",rank, xs, xe, ys, ye, zs, ze);
873
874 // Iterate over the local grid to find min and max coordinates
875 for (k = zs; k < ze; k++) {
876 for (j = ys; j < ye; j++) {
877 for (i = xs; i < xe; i++) {
878 // Only consider nodes within the physical domain.
879 if(i < user->IM && j < user->JM && k < user->KM){
880 Cmpnts coord = coordArray[k][j][i];
881
882 // Update min and max coordinates
883 if (coord.x < minCoords.x) minCoords.x = coord.x;
884 if (coord.y < minCoords.y) minCoords.y = coord.y;
885 if (coord.z < minCoords.z) minCoords.z = coord.z;
886
887 if (coord.x > maxCoords.x) maxCoords.x = coord.x;
888 if (coord.y > maxCoords.y) maxCoords.y = coord.y;
889 if (coord.z > maxCoords.z) maxCoords.z = coord.z;
890 }
891 }
892 }
893 }
894
895
896 // Add tolerance to bboxes.
897 minCoords.x = minCoords.x - BBOX_TOLERANCE;
898 minCoords.y = minCoords.y - BBOX_TOLERANCE;
899 minCoords.z = minCoords.z - BBOX_TOLERANCE;
900
901 maxCoords.x = maxCoords.x + BBOX_TOLERANCE;
902 maxCoords.y = maxCoords.y + BBOX_TOLERANCE;
903 maxCoords.z = maxCoords.z + BBOX_TOLERANCE;
904
905 LOG_ALLOW(LOCAL,LOG_DEBUG," Tolerance added to the limits: %.8e .\n",(PetscReal)BBOX_TOLERANCE);
906
907 // Log the computed min and max coordinates
908 LOG_ALLOW(LOCAL, LOG_INFO,"[Rank %d] Bounding Box Ranges = X[%.6f, %.6f], Y[%.6f,%.6f], Z[%.6f, %.6f].\n",rank,minCoords.x, maxCoords.x,minCoords.y, maxCoords.y, minCoords.z, maxCoords.z);
909
910
911
912 // Restore the coordinate array
913 ierr = DMDAVecRestoreArrayRead(user->fda, coordinates, &coordArray);
914 if (ierr) {
915 LOG_ALLOW(LOCAL, LOG_ERROR, "Error restoring coordinate array.\n");
916 return ierr;
917 }
918
919 // Set the local bounding box
920 localBBox->min_coords = minCoords;
921 localBBox->max_coords = maxCoords;
922
923 // Update the bounding box inside the UserCtx for consistency
924 user->bbox = *localBBox;
925
926 LOG_ALLOW(GLOBAL, LOG_INFO, "Exiting the function successfully.\n");
927
929
930 PetscFunctionReturn(0);
931}
932
933#undef __FUNCT__
934#define __FUNCT__ "GatherAllBoundingBoxes"
935
936/**
937 * @brief Gathers local bounding boxes from all MPI processes to rank 0.
938 *
939 * Each rank computes its local bounding box, then all ranks
940 * participate in an MPI_Gather to send their BoundingBox to rank 0.
941 * Rank 0 allocates the result array and returns it via allBBoxes.
942 *
943 * @param[in] user Pointer to UserCtx (must be non-NULL).
944 * @param[out] allBBoxes On rank 0, receives malloc’d array of size `size`.
945 * On other ranks, set to NULL.
946 * @return PetscErrorCode
947 */
948PetscErrorCode GatherAllBoundingBoxes(UserCtx *user, BoundingBox **allBBoxes)
949{
950 PetscErrorCode ierr;
951 PetscMPIInt rank, size;
952 BoundingBox *bboxArray = NULL;
953 BoundingBox localBBox;
954
955 PetscFunctionBeginUser;
956
958
959 /* Validate */
960 if (!user || !allBBoxes) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL,
961 "GatherAllBoundingBoxes: NULL pointer");
962
963 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRMPI(ierr);
964 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRMPI(ierr);
965
966 /* Compute local bbox */
967 ierr = ComputeLocalBoundingBox(user, &localBBox); CHKERRQ(ierr);
968
969 /* Ensure everyone is synchronized before the gather */
970 MPI_Barrier(PETSC_COMM_WORLD);
972 "Rank %d: about to MPI_Gather(localBBox)\n", rank);
973
974 /* Allocate on root */
975 if (rank == 0) {
976 bboxArray = (BoundingBox*)malloc(size * sizeof(BoundingBox));
977 if (!bboxArray) SETERRABORT(PETSC_COMM_WORLD, PETSC_ERR_MEM,
978 "GatherAllBoundingBoxes: malloc failed");
979 }
980
981 /* Collective: every rank must call */
982 ierr = MPI_Gather(&localBBox, sizeof(BoundingBox), MPI_BYTE,
983 bboxArray, sizeof(BoundingBox), MPI_BYTE,
984 0, PETSC_COMM_WORLD);
985 CHKERRMPI(ierr);
986
987 MPI_Barrier(PETSC_COMM_WORLD);
989 "Rank %d: completed MPI_Gather(localBBox)\n", rank);
990
991 /* Return result */
992 if (rank == 0) {
993 *allBBoxes = bboxArray;
994 } else {
995 *allBBoxes = NULL;
996 }
997
999
1000 PetscFunctionReturn(0);
1001}
1002
1003#undef __FUNCT__
1004#define __FUNCT__ "BroadcastAllBoundingBoxes"
1005
1006/**
1007 * @brief Broadcasts the bounding box list from rank 0 to all ranks.
1008 *
1009 * After GatherAllBoundingBoxes, rank 0 has an array of `size` boxes.
1010 * This routine makes sure every rank ends up with its own malloc’d copy.
1011 *
1012 * @param[in] user Pointer to UserCtx (unused here, but kept for signature).
1013 * @param[in,out] bboxlist On entry: rank 0’s array; on exit: every rank’s array.
1014 * @return PetscErrorCode
1015 */
1016PetscErrorCode BroadcastAllBoundingBoxes(UserCtx *user, BoundingBox **bboxlist)
1017{
1018 PetscErrorCode ierr;
1019 PetscMPIInt rank, size;
1020
1021 PetscFunctionBeginUser;
1022
1024
1025 if (!bboxlist) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL,
1026 "BroadcastAllBoundingBoxes: NULL pointer");
1027
1028 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRMPI(ierr);
1029 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRMPI(ierr);
1030
1031 /* Non-root ranks must allocate before the Bcast */
1032 if (rank != 0) {
1033 *bboxlist = (BoundingBox*)malloc(size * sizeof(BoundingBox));
1034 if (!*bboxlist) SETERRABORT(PETSC_COMM_WORLD, PETSC_ERR_MEM,
1035 "BroadcastAllBoundingBoxes: malloc failed");
1036 }
1037
1038 MPI_Barrier(PETSC_COMM_WORLD);
1040 "Rank %d: about to MPI_Bcast(%d boxes)\n", rank, size);
1041
1042 /* Collective: every rank must call */
1043 ierr = MPI_Bcast(*bboxlist, size * sizeof(BoundingBox), MPI_BYTE,
1044 0, PETSC_COMM_WORLD);
1045 CHKERRMPI(ierr);
1046
1047 MPI_Barrier(PETSC_COMM_WORLD);
1049 "Rank %d: completed MPI_Bcast(%d boxes)\n", rank, size);
1050
1051
1053
1054 PetscFunctionReturn(0);
1055}
1056
1057#undef __FUNCT__
1058#define __FUNCT__ "CalculateInletProperties"
1059/**
1060 * @brief Calculates the center and area of the primary INLET face.
1061 *
1062 * This function identifies the primary INLET face from the boundary face
1063 * configurations, computes its geometric center and total area using a
1064 * generic utility function, and stores these results in the simulation context.
1065 *
1066 * @param user Pointer to the UserCtx containing boundary face information.
1067 * @return PetscErrorCode
1068 */
1070{
1071 PetscErrorCode ierr;
1072 BCFace inlet_face_id = -1;
1073 PetscBool inlet_found = PETSC_FALSE;
1074
1075 PetscFunctionBeginUser;
1077
1078 // 1. Identify the primary inlet face from the configuration
1079 for (int i = 0; i < 6; i++) {
1080 if (user->boundary_faces[i].mathematical_type == INLET) {
1081 inlet_face_id = user->boundary_faces[i].face_id;
1082 inlet_found = PETSC_TRUE;
1083 break; // Use the first inlet found
1084 }
1085 }
1086
1087 if (!inlet_found) {
1088 LOG_ALLOW(GLOBAL, LOG_INFO, "No INLET face found. Skipping inlet center calculation.\n");
1090 PetscFunctionReturn(0);
1091 }
1092
1093 Cmpnts inlet_center;
1094 PetscReal inlet_area;
1095
1096 // 2. Call the generic utility to compute the center and area of any face.
1097 ierr = CalculateFaceCenterAndArea(user,inlet_face_id,&inlet_center,&inlet_area); CHKERRQ(ierr);
1098
1099 // 3. Store results in the SimCtx
1100 user->simCtx->CMx_c = inlet_center.x;
1101 user->simCtx->CMy_c = inlet_center.y;
1102 user->simCtx->CMz_c = inlet_center.z;
1103 user->simCtx->AreaInSum = inlet_area;
1104
1106 "Rank[%d] Inlet Center: (%.6f, %.6f, %.6f), Area: %.6f\n",
1107 inlet_center.x, inlet_center.y, inlet_center.z, inlet_area);
1108
1110 PetscFunctionReturn(0);
1111
1112}
1113
1114#undef __FUNCT__
1115#define __FUNCT__ "CalculateOutletProperties"
1116/**
1117 * @brief Calculates the center and area of the primary OUTLET face.
1118 *
1119 * This function identifies the primary OUTLET face from the boundary face
1120 * configurations, computes its geometric center and total area using a
1121 * generic utility function, and stores these results in the simulation context.
1122 * @param user Pointer to the UserCtx containing boundary face information.
1123 * @return PetscErrorCode
1124 */
1126{
1127 PetscErrorCode ierr;
1128 BCFace outlet_face_id = -1;
1129 PetscBool outlet_found = PETSC_FALSE;
1130 PetscFunctionBeginUser;
1132 // 1. Identify the primary outlet face from the configuration
1133 for (int i = 0; i < 6; i++) {
1134 if (user->boundary_faces[i].mathematical_type == OUTLET) {
1135 outlet_face_id = user->boundary_faces[i].face_id;
1136 outlet_found = PETSC_TRUE;
1137 break; // Use the first outlet found
1138 }
1139 }
1140 if (!outlet_found) {
1141 LOG_ALLOW(GLOBAL, LOG_INFO, "No OUTLET face found. Skipping outlet center calculation.\n");
1143 PetscFunctionReturn(0);
1144 }
1145 PetscReal outlet_area;
1146 Cmpnts outlet_center;
1147 // 2. Call the generic utility to compute the center and area of any face
1148 ierr = CalculateFaceCenterAndArea(user,outlet_face_id,&outlet_center,&outlet_area); CHKERRQ(ierr);
1149 // 3. Store results in the SimCtx
1150 user->simCtx->AreaOutSum = outlet_area;
1151
1153 "Outlet Center: (%.6f, %.6f, %.6f), Area: %.6f\n",
1154 outlet_center.x, outlet_center.y, outlet_center.z, outlet_area);
1155
1157 PetscFunctionReturn(0);
1158}
1159
1160#undef __FUNCT__
1161#define __FUNCT__ "CalculateFaceCenterAndArea"
1162/**
1163 * @brief Calculates the geometric center and total area of a specified boundary face.
1164 *
1165 * This function computes two key properties of a boundary face in the computational domain:
1166 * 1. **Geometric Center**: The average (x,y,z) position of all physical nodes on the face
1167 * 2. **Total Area**: The sum of face area vector magnitudes from all non-solid cells adjacent to the face
1168 *
1169 * @section architecture Indexing Architecture
1170 *
1171 * The solver uses different indexing conventions for different field types:
1172 *
1173 * **Node-Centered Fields (Coordinates):**
1174 * - Direct indexing: Node n stored at coor[n]
1175 * - For mx=26: Physical nodes [0-24], Dummy at [25]
1176 * - For mz=98: Physical nodes [0-96], Dummy at [97]
1177 *
1178 * **Face-Centered Fields (Metrics: csi, eta, zet):**
1179 * - Direct indexing: Face n stored at csi/eta/zet[n]
1180 * - For mx=26: Physical faces [0-24], Dummy at [25]
1181 * - For mz=98: Physical faces [0-96], Dummy at [97]
1182 * - Face at index k bounds cells k-1 and k
1183 *
1184 * **Cell-Centered Fields (nvert):**
1185 * - Shifted indexing: Physical cell c stored at nvert[c+1]
1186 * - For mx=26 (25 cells): Cell 0→nvert[1], Cell 23→nvert[24]
1187 * - For mz=98 (96 cells): Cell 0→nvert[1], Cell 95→nvert[96]
1188 * - nvert[0] and nvert[mx-1] are ghost values
1189 *
1190 * @section face_geometry Face-to-Index Mapping
1191 *
1192 * Example for a domain with mx=26, my=26, mz=98:
1193 *
1194 * | Face ID | Node Index | Face Metric | Adjacent Cell (shifted) | Physical Extent |
1195 * |---------------|------------|------------------|-------------------------|-----------------|
1196 * | BC_FACE_NEG_X | i=0 | csi[k][j][0] | nvert[k][j][1] (Cell 0) | j∈[0,24], k∈[0,96] |
1197 * | BC_FACE_POS_X | i=24 | csi[k][j][24] | nvert[k][j][24] (Cell 23)| j∈[0,24], k∈[0,96] |
1198 * | BC_FACE_NEG_Y | j=0 | eta[k][0][i] | nvert[k][1][i] (Cell 0) | i∈[0,24], k∈[0,96] |
1199 * | BC_FACE_POS_Y | j=24 | eta[k][24][i] | nvert[k][24][i] (Cell 23)| i∈[0,24], k∈[0,96] |
1200 * | BC_FACE_NEG_Z | k=0 | zet[0][j][i] | nvert[1][j][i] (Cell 0) | i∈[0,24], j∈[0,24] |
1201 * | BC_FACE_POS_Z | k=96 | zet[96][j][i] | nvert[96][j][i] (Cell 95)| i∈[0,24], j∈[0,24] |
1202 *
1203 * @section algorithm Algorithm
1204 *
1205 * The function performs two separate computations with different loop bounds:
1206 *
1207 * **1. Center Calculation (uses ALL physical nodes):**
1208 * - Loop over all physical nodes on the face (excluding dummy indices)
1209 * - Accumulate coordinate sums: Σx, Σy, Σz
1210 * - Count number of nodes
1211 * - Average: center = (Σx/n, Σy/n, Σz/n)
1212 *
1213 * **2. Area Calculation (uses INTERIOR cells only):**
1214 * - Loop over interior cell range to avoid accessing ghost values in nvert
1215 * - For each face adjacent to a fluid cell (nvert < 0.1):
1216 * - Compute area magnitude: |csi/eta/zet| = √(x² + y² + z²)
1217 * - Accumulate to total area
1218 *
1219 * @section loop_bounds Loop Bound Details
1220 *
1221 * **Why different bounds for center vs. area?**
1222 *
1223 * For BC_FACE_NEG_X at i=0 with my=26, mz=98:
1224 *
1225 * *Center calculation (coordinates):*
1226 * - j ∈ [ys, j_max): Includes j=[0,24] (25 nodes), excludes dummy at j=25
1227 * - k ∈ [zs, k_max): Includes k=[0,96] (97 nodes), excludes dummy at k=97
1228 * - Total: 25 × 97 = 2,425 nodes
1229 *
1230 * *Area calculation (nvert checks):*
1231 * - j ∈ [lys, lye): j=[1,24] (24 values), excludes boundaries
1232 * - k ∈ [lzs, lze): k=[1,96] (96 values), excludes boundaries
1233 * - Why restricted?
1234 * - At j=0: nvert[k][0][1] is ghost (no cell at j=-1)
1235 * - At j=25: nvert[k][25][1] is ghost (no cell at j=24, index 25 is dummy)
1236 * - At k=0: nvert[0][j][1] is ghost (no cell at k=-1)
1237 * - At k=97: nvert[97][j][1] is ghost (no cell at k=96, index 97 is dummy)
1238 * - Total: 24 × 96 = 2,304 interior cells adjacent to face
1239 *
1240 * @section area_formulas Area Calculation Formulas
1241 *
1242 * Face area contributions are computed from metric tensor magnitudes:
1243 * - **i-faces (±Xi)**: Area = |csi| = √(csi_x² + csi_y² + csi_z²)
1244 * - **j-faces (±Eta)**: Area = |eta| = √(eta_x² + eta_y² + eta_z²)
1245 * - **k-faces (±Zeta)**: Area = |zet| = √(zet_x² + zet_y² + zet_z²)
1246 *
1247 * @param[in] user Pointer to UserCtx containing grid info, DMs, and field vectors
1248 * @param[in] face_id Enum identifying which boundary face to analyze (BC_FACE_NEG_X, etc.)
1249 * @param[out] face_center Pointer to Cmpnts structure to store computed geometric center (x,y,z)
1250 * @param[out] face_area Pointer to PetscReal to store computed total face area
1251 *
1252 * @return PetscErrorCode Returns 0 on success, non-zero PETSc error code on failure
1253 *
1254 * @note This function uses MPI_Allreduce, so it must be called collectively by all ranks
1255 * @note Only ranks that own the specified boundary face contribute to the calculation
1256 * @note Center calculation includes ALL physical nodes on the face
1257 * @note Area calculation ONLY includes faces adjacent to fluid cells (nvert < 0.1)
1258 * @note Dummy/unused indices (e.g., k=97, j=25 for standard test case) are excluded
1259 *
1260 * @warning Assumes grid and field arrays have been properly initialized
1261 * @warning Incorrect face_id values will result in zero contribution from all ranks
1262 *
1263 * @see CanRankServiceFace() for determining rank ownership of boundary faces
1264 * @see BCFace enum for valid face_id values
1265 * @see LOG_FIELD_ANATOMY() for debugging field indexing
1266 *
1267 * @par Example Usage:
1268 * @code
1269 * Cmpnts inlet_center;
1270 * PetscReal inlet_area;
1271 * ierr = CalculateFaceCenterAndArea(user, BC_FACE_NEG_Z, &inlet_center, &inlet_area);
1272 * PetscPrintf(PETSC_COMM_WORLD, "Inlet center: (%.4f, %.4f, %.4f), Area: %.6f\n",
1273 * inlet_center.x, inlet_center.y, inlet_center.z, inlet_area);
1274 * @endcode
1275 */
1276PetscErrorCode CalculateFaceCenterAndArea(UserCtx *user, BCFace face_id,
1277 Cmpnts *face_center, PetscReal *face_area)
1278{
1279 PetscErrorCode ierr;
1280 DMDALocalInfo info;
1281
1282 // ========================================================================
1283 // Local accumulators for this rank's contribution
1284 // ========================================================================
1285 PetscReal local_sum[3] = {0.0, 0.0, 0.0}; ///< Local sum of (x,y,z) coordinates
1286 PetscReal localAreaSum = 0.0; ///< Local sum of face area magnitudes
1287 PetscCount local_n_points = 0; ///< Local count of nodes
1288
1289 // ========================================================================
1290 // Global accumulators after MPI reduction
1291 // ========================================================================
1292 PetscReal global_sum[3] = {0.0, 0.0, 0.0}; ///< Global sum of coordinates
1293 PetscReal globalAreaSum = 0.0; ///< Global sum of areas
1294 PetscCount global_n_points = 0; ///< Global count of nodes
1295
1296 // ========================================================================
1297 // Grid information and array pointers
1298 // ========================================================================
1299 DM da = user->da;
1300 info = user->info;
1301
1302 // Rank's owned range in global indices
1303 PetscInt xs = info.xs, xe = info.xs + info.xm; ///< i-range: [xs, xe)
1304 PetscInt ys = info.ys, ye = info.ys + info.ym; ///< j-range: [ys, ye)
1305 PetscInt zs = info.zs, ze = info.zs + info.zm; ///< k-range: [zs, ze)
1306
1307 // Global domain dimensions (total allocated, includes dummy at end)
1308 PetscInt mx = info.mx, my = info.my, mz = info.mz;
1309 PetscInt IM = user->IM; ///< Physical domain size in i (exclude dummy)
1310 PetscInt JM = user->JM; ///< Physical domain size in j (exclude dummy)
1311 PetscInt KM = user->KM; ///< Physical domain size in k (exclude dummy)
1312
1313 // ========================================================================
1314 // Interior loop bounds (adjusted to avoid ghost/boundary cells)
1315 // These are used for nvert checks where we need valid cell indices
1316 // ========================================================================
1317 PetscInt lxs = xs; if(xs == 0) lxs = xs + 1; ///< Start at 1 if on -Xi boundary
1318 PetscInt lxe = xe; if(xe == mx) lxe = xe - 1; ///< End at mx-1 if on +Xi boundary
1319 PetscInt lys = ys; if(ys == 0) lys = ys + 1; ///< Start at 1 if on -Eta boundary
1320 PetscInt lye = ye; if(ye == my) lye = ye - 1; ///< End at my-1 if on +Eta boundary
1321 PetscInt lzs = zs; if(zs == 0) lzs = zs + 1; ///< Start at 1 if on -Zeta boundary
1322 PetscInt lze = ze; if(ze == mz) lze = ze - 1; ///< End at mz-1 if on +Zeta boundary
1323
1324 // ========================================================================
1325 // Physical node bounds (exclude dummy indices at mx-1, my-1, mz-1)
1326 // These are used for coordinate loops where we want ALL physical nodes
1327 // ========================================================================
1328 PetscInt i_max = (xe == mx) ? mx - 1 : xe; ///< Exclude dummy at i=mx-1 (e.g., i=25)
1329 PetscInt j_max = (ye == my) ? my - 1 : ye; ///< Exclude dummy at j=my-1 (e.g., j=25)
1330 PetscInt k_max = (ze == mz) ? mz - 1 : ze; ///< Exclude dummy at k=mz-1 (e.g., k=97)
1331
1332 // ========================================================================
1333 // Array pointers for field access
1334 // ========================================================================
1335 Vec lCoor; ///< Local ghosted coordinate vector
1336 Cmpnts ***coor; ///< Nodal coordinates [k][j][i]
1337 Cmpnts ***csi, ***eta, ***zet; ///< Face metric tensors [k][j][i]
1338 PetscReal ***nvert; ///< Cell blanking field [k][j][i] (shifted +1)
1339
1340 PetscFunctionBeginUser;
1342
1343 // ========================================================================
1344 // Step 1: Check if this rank owns the specified boundary face
1345 // ========================================================================
1346 PetscBool owns_face = PETSC_FALSE;
1347 ierr = CanRankServiceFace(&info,IM,JM,KM,face_id,&owns_face); CHKERRQ(ierr);
1348 if(owns_face){
1349 // ========================================================================
1350 // Step 2: Get read-only array access for all required fields
1351 // ========================================================================
1352 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
1353 ierr = DMDAVecGetArrayRead(user->fda, lCoor, &coor); CHKERRQ(ierr);
1354 ierr = DMDAVecGetArrayRead(user->da, user->lNvert, &nvert); CHKERRQ(ierr);
1355 ierr = DMDAVecGetArrayRead(user->fda, user->lCsi, &csi); CHKERRQ(ierr);
1356 ierr = DMDAVecGetArrayRead(user->fda, user->lEta, &eta); CHKERRQ(ierr);
1357 ierr = DMDAVecGetArrayRead(user->fda, user->lZet, &zet); CHKERRQ(ierr);
1358
1359 // ========================================================================
1360 // Step 3: Loop over the specified face and accumulate center and area
1361 // ========================================================================
1362 switch (face_id) {
1363
1364 // ====================================================================
1365 // BC_FACE_NEG_X: Face at i=0 (bottom boundary in i-direction)
1366 // ====================================================================
1367 case BC_FACE_NEG_X:
1368 if (xs == 0) {
1369 PetscInt i = 0; // Face is at node index i=0
1370
1371 // ---- Part 1: Center calculation (ALL physical nodes) ----
1372 // Loop over ALL physical nodes on this face
1373 // For my=26, mz=98: j∈[0,24], k∈[0,96] → 25×97 = 2,425 nodes
1374 for (PetscInt k = zs; k < k_max; k++) {
1375 for (PetscInt j = ys; j < j_max; j++) {
1376 // Accumulate coordinates at node [k][j][0]
1377 local_sum[0] += coor[k][j][i].x;
1378 local_sum[1] += coor[k][j][i].y;
1379 local_sum[2] += coor[k][j][i].z;
1380 local_n_points++;
1381 }
1382 }
1383
1384 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1385 // Loop over interior range where nvert checks are valid
1386 // For my=26, mz=98: j∈[1,24], k∈[1,96] → 24×96 = 2,304 cells
1387 for (PetscInt k = lzs; k < lze; k++) {
1388 for (PetscInt j = lys; j < lye; j++) {
1389 // Check if adjacent cell is fluid
1390 // nvert[k][j][i+1] = nvert[k][j][1] checks Cell 0
1391 // (Physical Cell 0 in j-k plane, stored at shifted index [1])
1392 if (nvert[k][j][i+1] < 0.1) {
1393 // Cell is fluid - add face area contribution
1394 // Face area = magnitude of csi metric at [k][j][0]
1395 localAreaSum += sqrt(csi[k][j][i].x * csi[k][j][i].x +
1396 csi[k][j][i].y * csi[k][j][i].y +
1397 csi[k][j][i].z * csi[k][j][i].z);
1398 }
1399 }
1400 }
1401 }
1402 break;
1403
1404 // ====================================================================
1405 // BC_FACE_POS_X: Face at i=IM-1 (top boundary in i-direction)
1406 // ====================================================================
1407 case BC_FACE_POS_X:
1408 if (xe == mx) {
1409 PetscInt i = mx - 2; // Last physical node (e.g., i=24 for mx=26)
1410
1411 // ---- Part 1: Center calculation (ALL physical nodes) ----
1412 for (PetscInt k = zs; k < k_max; k++) {
1413 for (PetscInt j = ys; j < j_max; j++) {
1414 local_sum[0] += coor[k][j][i].x;
1415 local_sum[1] += coor[k][j][i].y;
1416 local_sum[2] += coor[k][j][i].z;
1417 local_n_points++;
1418 }
1419 }
1420
1421 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1422 for (PetscInt k = lzs; k < lze; k++) {
1423 for (PetscInt j = lys; j < lye; j++) {
1424 // Check if adjacent cell is fluid
1425 // nvert[k][j][i] = nvert[k][j][24] checks last cell (Cell 23)
1426 // (Physical Cell 23, stored at shifted index [24])
1427 if (nvert[k][j][i] < 0.1) {
1428 // Face area = magnitude of csi metric at [k][j][24]
1429 localAreaSum += sqrt(csi[k][j][i].x * csi[k][j][i].x +
1430 csi[k][j][i].y * csi[k][j][i].y +
1431 csi[k][j][i].z * csi[k][j][i].z);
1432 }
1433 }
1434 }
1435 }
1436 break;
1437
1438 // ====================================================================
1439 // BC_FACE_NEG_Y: Face at j=0 (bottom boundary in j-direction)
1440 // ====================================================================
1441 case BC_FACE_NEG_Y:
1442 if (ys == 0) {
1443 PetscInt j = 0; // Face is at node index j=0
1444
1445 // ---- Part 1: Center calculation (ALL physical nodes) ----
1446 // For mx=26, mz=98: i∈[0,24], k∈[0,96] → 25×97 = 2,425 nodes
1447 for (PetscInt k = zs; k < k_max; k++) {
1448 for (PetscInt i = xs; i < i_max; i++) {
1449 local_sum[0] += coor[k][j][i].x;
1450 local_sum[1] += coor[k][j][i].y;
1451 local_sum[2] += coor[k][j][i].z;
1452 local_n_points++;
1453 }
1454 }
1455
1456 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1457 // For mx=26, mz=98: i∈[1,24], k∈[1,96] → 24×96 = 2,304 cells
1458 for (PetscInt k = lzs; k < lze; k++) {
1459 for (PetscInt i = lxs; i < lxe; i++) {
1460 // nvert[k][j+1][i] = nvert[k][1][i] checks Cell 0
1461 if (nvert[k][j+1][i] < 0.1) {
1462 // Face area = magnitude of eta metric at [k][0][i]
1463 localAreaSum += sqrt(eta[k][j][i].x * eta[k][j][i].x +
1464 eta[k][j][i].y * eta[k][j][i].y +
1465 eta[k][j][i].z * eta[k][j][i].z);
1466 }
1467 }
1468 }
1469 }
1470 break;
1471
1472 // ====================================================================
1473 // BC_FACE_POS_Y: Face at j=JM-1 (top boundary in j-direction)
1474 // ====================================================================
1475 case BC_FACE_POS_Y:
1476 if (ye == my) {
1477 PetscInt j = my - 2; // Last physical node (e.g., j=24 for my=26)
1478
1479 // ---- Part 1: Center calculation (ALL physical nodes) ----
1480 for (PetscInt k = zs; k < k_max; k++) {
1481 for (PetscInt i = xs; i < i_max; i++) {
1482 local_sum[0] += coor[k][j][i].x;
1483 local_sum[1] += coor[k][j][i].y;
1484 local_sum[2] += coor[k][j][i].z;
1485 local_n_points++;
1486 }
1487 }
1488
1489 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1490 for (PetscInt k = lzs; k < lze; k++) {
1491 for (PetscInt i = lxs; i < lxe; i++) {
1492 // nvert[k][j][i] = nvert[k][24][i] checks last cell (Cell 23)
1493 if (nvert[k][j][i] < 0.1) {
1494 // Face area = magnitude of eta metric at [k][24][i]
1495 localAreaSum += sqrt(eta[k][j][i].x * eta[k][j][i].x +
1496 eta[k][j][i].y * eta[k][j][i].y +
1497 eta[k][j][i].z * eta[k][j][i].z);
1498 }
1499 }
1500 }
1501 }
1502 break;
1503
1504 // ====================================================================
1505 // BC_FACE_NEG_Z: Face at k=0 (inlet, bottom boundary in k-direction)
1506 // ====================================================================
1507 case BC_FACE_NEG_Z:
1508 if (zs == 0) {
1509 PetscInt k = 0; // Face is at node index k=0
1510
1511 // ---- Part 1: Center calculation (ALL physical nodes) ----
1512 // For mx=26, my=26: i∈[0,24], j∈[0,24] → 25×25 = 625 nodes
1513 for (PetscInt j = ys; j < j_max; j++) {
1514 for (PetscInt i = xs; i < i_max; i++) {
1515 local_sum[0] += coor[k][j][i].x;
1516 local_sum[1] += coor[k][j][i].y;
1517 local_sum[2] += coor[k][j][i].z;
1518 local_n_points++;
1519 }
1520 }
1521
1522 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1523 // For mx=26, my=26: i∈[1,24], j∈[1,24] → 24×24 = 576 cells
1524 for (PetscInt j = lys; j < lye; j++) {
1525 for (PetscInt i = lxs; i < lxe; i++) {
1526 // nvert[k+1][j][i] = nvert[1][j][i] checks Cell 0
1527 // (Physical Cell 0 in i-j plane, stored at shifted index [1])
1528 if (nvert[k+1][j][i] < 0.1) {
1529 // Face area = magnitude of zet metric at [0][j][i]
1530 localAreaSum += sqrt(zet[k][j][i].x * zet[k][j][i].x +
1531 zet[k][j][i].y * zet[k][j][i].y +
1532 zet[k][j][i].z * zet[k][j][i].z);
1533 }
1534 }
1535 }
1536 }
1537 break;
1538
1539 // ====================================================================
1540 // BC_FACE_POS_Z: Face at k=KM-1 (outlet, top boundary in k-direction)
1541 // ====================================================================
1542 case BC_FACE_POS_Z:
1543 if (ze == mz) {
1544 PetscInt k = mz - 2; // Last physical node (e.g., k=96 for mz=98)
1545
1546 // ---- Part 1: Center calculation (ALL physical nodes) ----
1547 // For mx=26, my=26: i∈[0,24], j∈[0,24] → 25×25 = 625 nodes
1548 for (PetscInt j = ys; j < j_max; j++) {
1549 for (PetscInt i = xs; i < i_max; i++) {
1550 local_sum[0] += coor[k][j][i].x;
1551 local_sum[1] += coor[k][j][i].y;
1552 local_sum[2] += coor[k][j][i].z;
1553 local_n_points++;
1554 }
1555 }
1556
1557 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1558 // For mx=26, my=26: i∈[1,24], j∈[1,24] → 24×24 = 576 cells
1559 for (PetscInt j = lys; j < lye; j++) {
1560 for (PetscInt i = lxs; i < lxe; i++) {
1561 // nvert[k][j][i] = nvert[96][j][i] checks last cell (Cell 95)
1562 // (Physical Cell 95, stored at shifted index [96])
1563 if (nvert[k][j][i] < 0.1) {
1564 // Face area = magnitude of zet metric at [96][j][i]
1565 localAreaSum += sqrt(zet[k][j][i].x * zet[k][j][i].x +
1566 zet[k][j][i].y * zet[k][j][i].y +
1567 zet[k][j][i].z * zet[k][j][i].z);
1568 }
1569 }
1570 }
1571 }
1572 break;
1573
1574 default:
1575 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE,
1576 "Unknown face_id %d in CalculateFaceCenterAndArea", face_id);
1577 }
1578
1579 // ========================================================================
1580 // Step 4: Restore array access (release pointers)
1581 // ========================================================================
1582 ierr = DMDAVecRestoreArrayRead(user->fda, lCoor, &coor); CHKERRQ(ierr);
1583 ierr = DMDAVecRestoreArrayRead(user->da, user->lNvert, &nvert); CHKERRQ(ierr);
1584 ierr = DMDAVecRestoreArrayRead(user->fda, user->lCsi, &csi); CHKERRQ(ierr);
1585 ierr = DMDAVecRestoreArrayRead(user->fda, user->lEta, &eta); CHKERRQ(ierr);
1586 ierr = DMDAVecRestoreArrayRead(user->fda, user->lZet, &zet); CHKERRQ(ierr);
1587 }
1588 // ========================================================================
1589 // Step 5: Perform MPI reductions to get global sums
1590 // ========================================================================
1591 // Sum coordinate contributions from all ranks
1592 ierr = MPI_Allreduce(local_sum, global_sum, 3, MPI_DOUBLE, MPI_SUM,
1593 PETSC_COMM_WORLD); CHKERRQ(ierr);
1594
1595 // Sum node counts from all ranks
1596 ierr = MPI_Allreduce(&local_n_points, &global_n_points, 1, MPI_COUNT, MPI_SUM,
1597 PETSC_COMM_WORLD); CHKERRQ(ierr);
1598
1599 // Sum area contributions from all ranks
1600 ierr = MPI_Allreduce(&localAreaSum, &globalAreaSum, 1, MPI_DOUBLE, MPI_SUM,
1601 PETSC_COMM_WORLD); CHKERRQ(ierr);
1602
1603 // ========================================================================
1604 // Step 6: Calculate geometric center by averaging coordinates
1605 // ========================================================================
1606 if (global_n_points > 0) {
1607 face_center->x = global_sum[0] / global_n_points;
1608 face_center->y = global_sum[1] / global_n_points;
1609 face_center->z = global_sum[2] / global_n_points;
1611 "Calculated center for Face %s: (x=%.4f, y=%.4f, z=%.4f) from %lld nodes\n",
1612 BCFaceToString(face_id),
1613 face_center->x, face_center->y, face_center->z,
1614 (long long)global_n_points);
1615 } else {
1616 // No nodes found - this should not happen for a valid face
1618 "WARNING: Face %s identified but no grid points found. Center not calculated.\n",
1619 BCFaceToString(face_id));
1620 face_center->x = face_center->y = face_center->z = 0.0;
1621 }
1622
1623 // ========================================================================
1624 // Step 7: Return computed total area
1625 // ========================================================================
1626 *face_area = globalAreaSum;
1628 "Calculated area for Face %s: Area=%.6f\n",
1629 BCFaceToString(face_id), *face_area);
1630
1631 PetscFunctionReturn(0);
1632}
PetscErrorCode SetAnalyticalGridInfo(UserCtx *user)
Sets the grid domain and resolution for analytical solution cases.
PetscErrorCode CanRankServiceFace(const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, BCFace face_id, PetscBool *can_service_out)
Determines if the current MPI rank owns any part of a specified global face.
Definition Boundaries.c:151
PetscErrorCode DefineAllGridDimensions(SimCtx *simCtx)
Orchestrates the parsing and setting of grid dimensions for all blocks.
Definition grid.c:68
PetscErrorCode CalculateOutletProperties(UserCtx *user)
Calculates the center and area of the primary OUTLET face.
Definition grid.c:1125
#define BBOX_TOLERANCE
Definition grid.c:6
PetscErrorCode BroadcastAllBoundingBoxes(UserCtx *user, BoundingBox **bboxlist)
Broadcasts the bounding box list from rank 0 to all ranks.
Definition grid.c:1016
static PetscErrorCode ParseAndSetGridInputs(UserCtx *user)
Determines the grid source and calls the appropriate parsing routine.
Definition grid.c:22
static PetscReal ComputeStretchedCoord(PetscInt i, PetscInt N, PetscReal L, PetscReal r)
Computes a stretched coordinate along one dimension.
Definition grid.c:510
static PetscErrorCode RestrictCoordinates(UserCtx *coarse_user, UserCtx *fine_user)
Populates coarse grid coordinates by restricting from a fine grid.
Definition grid.c:708
static PetscErrorCode InitializeSingleGridDM(UserCtx *user, UserCtx *coarse_user)
Creates the DMDA objects (da and fda) for a single UserCtx.
Definition grid.c:123
PetscErrorCode CalculateFaceCenterAndArea(UserCtx *user, BCFace face_id, Cmpnts *face_center, PetscReal *face_area)
Calculates the geometric center and total area of a specified boundary face.
Definition grid.c:1276
PetscErrorCode InitializeAllGridDMs(SimCtx *simCtx)
Orchestrates the creation of DMDA objects for every block and multigrid level.
Definition grid.c:260
PetscErrorCode AssignAllGridCoordinates(SimCtx *simCtx)
Orchestrates the assignment of physical coordinates to all DMDA objects.
Definition grid.c:356
static PetscErrorCode GenerateAndSetCoordinates(UserCtx *user)
Programmatically generates and sets grid coordinates based on user parameters.
Definition grid.c:533
static PetscErrorCode ReadAndSetCoordinates(UserCtx *user, FILE *fd)
Reads physical coordinates from a file and populates the DMDA for a specific block.
Definition grid.c:621
static PetscErrorCode SetFinestLevelCoordinates(UserCtx *user)
A router that populates the coordinates for a single finest-level DMDA.
Definition grid.c:420
PetscErrorCode ComputeLocalBoundingBox(UserCtx *user, BoundingBox *localBBox)
Computes the local bounding box of the grid on the current process.
Definition grid.c:800
PetscErrorCode CalculateInletProperties(UserCtx *user)
Calculates the center and area of the primary INLET face.
Definition grid.c:1069
#define __FUNCT__
Definition grid.c:9
PetscErrorCode GatherAllBoundingBoxes(UserCtx *user, BoundingBox **allBBoxes)
Gathers local bounding boxes from all MPI processes to rank 0.
Definition grid.c:948
Public interface for grid, solver, and metric setup routines.
PetscErrorCode ReadGridFile(UserCtx *user)
Sets grid dimensions from a file for a SINGLE block using a one-time read cache.
Definition io.c:177
PetscErrorCode ReadGridGenerationInputs(UserCtx *user)
Parses command-line options for a programmatically generated grid for a SINGLE block.
Definition io.c:77
PetscErrorCode DeterminePeriodicity(SimCtx *simCtx)
Scans all block-specific boundary condition files to determine a globally consistent periodicity for ...
Definition io.c:645
Logging utilities and macros for PETSc-based applications.
PetscBool is_function_allowed(const char *functionName)
Checks if a given function is in the allow-list.
Definition logging.c:157
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
----— DEBUG ---------------------------------------— #define LOG_ALLOW(scope, level,...
Definition logging.h:267
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:45
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:46
const char * BCFaceToString(BCFace face)
Helper function to convert BCFace enum to a string representation.
Definition logging.c:643
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:200
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:746
#define LOG(scope, level, fmt,...)
Logging macro for PETSc-based applications with scope control.
Definition logging.h:84
PetscErrorCode LOG_FIELD_MIN_MAX(UserCtx *user, const char *fieldName)
Computes and logs the local and global min/max values of a 3-component vector field.
Definition logging.c:1293
LogLevel get_log_level()
Retrieves the current logging level from the environment variable LOG_LEVEL.
Definition logging.c:39
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:28
@ LOG_TRACE
Very fine-grained tracing information for in-depth debugging.
Definition logging.h:33
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:31
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:29
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:32
@ LOG_VERBOSE
Extremely detailed logs, typically for development use only.
Definition logging.h:34
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:737
PetscInt isc
Definition variables.h:741
@ INLET
Definition variables.h:217
@ OUTLET
Definition variables.h:216
UserCtx * user
Definition variables.h:474
PetscMPIInt rank
Definition variables.h:588
BoundaryFaceConfig boundary_faces[6]
Definition variables.h:746
PetscInt block_number
Definition variables.h:649
PetscInt da_procs_z
Definition variables.h:655
Vec lNvert
Definition variables.h:755
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:731
PetscReal CMy_c
Definition variables.h:642
PetscReal Min_X
Definition variables.h:738
PetscInt ksc
Definition variables.h:741
PetscInt KM
Definition variables.h:737
Vec lZet
Definition variables.h:776
UserMG usermg
Definition variables.h:698
PetscInt da_procs_y
Definition variables.h:655
Cmpnts max_coords
Maximum x, y, z coordinates of the bounding box.
Definition variables.h:156
PetscInt _this
Definition variables.h:741
PetscReal ry
Definition variables.h:742
PetscInt k_periodic
Definition variables.h:650
PetscInt jsc
Definition variables.h:741
PetscReal Max_Y
Definition variables.h:738
Cmpnts min_coords
Minimum x, y, z coordinates of the bounding box.
Definition variables.h:155
PetscScalar x
Definition variables.h:101
char grid_file[PETSC_MAX_PATH_LEN]
Definition variables.h:654
PetscReal rz
Definition variables.h:742
Vec lCsi
Definition variables.h:776
PetscReal CMz_c
Definition variables.h:642
PetscBool generate_grid
Definition variables.h:651
PetscInt thislevel
Definition variables.h:792
char eulerianSource[PETSC_MAX_PATH_LEN]
Definition variables.h:604
PetscScalar z
Definition variables.h:101
PetscInt JM
Definition variables.h:737
PetscInt mglevels
Definition variables.h:481
PetscReal Min_Z
Definition variables.h:738
PetscInt da_procs_x
Definition variables.h:655
PetscReal Max_X
Definition variables.h:738
PetscReal Min_Y
Definition variables.h:738
PetscInt i_periodic
Definition variables.h:650
PetscReal AreaOutSum
Definition variables.h:663
DMDALocalInfo info
Definition variables.h:735
PetscScalar y
Definition variables.h:101
@ EXEC_MODE_SOLVER
Definition variables.h:558
@ EXEC_MODE_POSTPROCESSOR
Definition variables.h:559
PetscInt IM
Definition variables.h:737
Vec lEta
Definition variables.h:776
MGCtx * mgctx
Definition variables.h:484
BCType mathematical_type
Definition variables.h:295
PetscReal rx
Definition variables.h:742
ExecutionMode exec_mode
Definition variables.h:603
BoundingBox bbox
Definition variables.h:739
PetscReal Max_Z
Definition variables.h:738
PetscReal AreaInSum
Definition variables.h:663
PetscReal CMx_c
Definition variables.h:642
BCFace
Identifies the six logical faces of a structured computational block.
Definition variables.h:203
@ BC_FACE_NEG_X
Definition variables.h:204
@ BC_FACE_POS_Z
Definition variables.h:206
@ BC_FACE_POS_Y
Definition variables.h:205
@ BC_FACE_NEG_Z
Definition variables.h:206
@ BC_FACE_POS_X
Definition variables.h:204
@ BC_FACE_NEG_Y
Definition variables.h:205
PetscInt j_periodic
Definition variables.h:650
Defines a 3D axis-aligned bounding box.
Definition variables.h:154
A 3D point or vector with PetscScalar components.
Definition variables.h:100
Context for Multigrid operations.
Definition variables.h:473
The master context for the entire simulation.
Definition variables.h:585
User-defined context containing data specific to a single computational grid level.
Definition variables.h:728
User-level context for managing the entire multigrid hierarchy.
Definition variables.h:480