PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
grid.c File Reference
#include "grid.h"
#include "logging.h"
Include dependency graph for grid.c:

Go to the source code of this file.

Macros

#define BBOX_TOLERANCE   1e-6
 
#define __FUNCT__   "ParseAndSetGridInputs"
 
#define __FUNCT__   "DefineAllGridDimensions"
 
#define __FUNCT__   "InitializeSingleGridDM"
 
#define __FUNCT__   "InitializeAllGridDMs"
 
#define __FUNCT__   "AssignAllGridCoordinates"
 
#define __FUNCT__   "SetFinestLevelCoordinates"
 
#define __FUNCT__   "GenerateAndSetCoordinates"
 
#define __FUNCT__   "ReadAndSetCoordinates"
 
#define __FUNCT__   "RestrictCoordinates"
 
#define __FUNCT__   "ComputeLocalBoundingBox"
 
#define __FUNCT__   "GatherAllBoundingBoxes"
 
#define __FUNCT__   "BroadcastAllBoundingBoxes"
 
#define __FUNCT__   "CalculateInletProperties"
 
#define __FUNCT__   "CalculateOutletProperties"
 
#define __FUNCT__   "CalculateFaceCenterAndArea"
 

Functions

static PetscErrorCode ParseAndSetGridInputs (UserCtx *user)
 Determines the grid source and calls the appropriate parsing routine.
 
PetscErrorCode DefineAllGridDimensions (SimCtx *simCtx)
 Orchestrates the parsing and setting of grid dimensions for all blocks.
 
static PetscErrorCode InitializeSingleGridDM (UserCtx *user, UserCtx *coarse_user)
 Creates the DMDA objects (da and fda) for a single UserCtx.
 
PetscErrorCode InitializeAllGridDMs (SimCtx *simCtx)
 Orchestrates the creation of DMDA objects for every block and multigrid level.
 
static PetscErrorCode SetFinestLevelCoordinates (UserCtx *user)
 A router that populates the coordinates for a single finest-level DMDA.
 
static PetscErrorCode GenerateAndSetCoordinates (UserCtx *user)
 Programmatically generates and sets grid coordinates based on user parameters.
 
static PetscErrorCode ReadAndSetCoordinates (UserCtx *user, FILE *fd)
 Reads physical coordinates from a file and populates the DMDA for a specific block.
 
static PetscErrorCode RestrictCoordinates (UserCtx *coarse_user, UserCtx *fine_user)
 Populates coarse grid coordinates by restricting from a fine grid.
 
PetscErrorCode AssignAllGridCoordinates (SimCtx *simCtx)
 Orchestrates the assignment of physical coordinates to all DMDA objects.
 
static PetscReal ComputeStretchedCoord (PetscInt i, PetscInt N, PetscReal L, PetscReal r)
 Computes a stretched coordinate along one dimension.
 
PetscErrorCode ComputeLocalBoundingBox (UserCtx *user, BoundingBox *localBBox)
 Computes the local bounding box of the grid on the current process.
 
PetscErrorCode GatherAllBoundingBoxes (UserCtx *user, BoundingBox **allBBoxes)
 Gathers local bounding boxes from all MPI processes to rank 0.
 
PetscErrorCode BroadcastAllBoundingBoxes (UserCtx *user, BoundingBox **bboxlist)
 Broadcasts the bounding box list from rank 0 to all ranks.
 
PetscErrorCode CalculateInletProperties (UserCtx *user)
 Calculates the center and area of the primary INLET face.
 
PetscErrorCode CalculateOutletProperties (UserCtx *user)
 Calculates the center and area of the primary OUTLET face.
 
PetscErrorCode CalculateFaceCenterAndArea (UserCtx *user, BCFace face_id, Cmpnts *face_center, PetscReal *face_area)
 Calculates the geometric center and total area of a specified boundary face.
 

Macro Definition Documentation

◆ BBOX_TOLERANCE

#define BBOX_TOLERANCE   1e-6

Definition at line 6 of file grid.c.

◆ __FUNCT__ [1/15]

#define __FUNCT__   "ParseAndSetGridInputs"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [2/15]

#define __FUNCT__   "DefineAllGridDimensions"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [3/15]

#define __FUNCT__   "InitializeSingleGridDM"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [4/15]

#define __FUNCT__   "InitializeAllGridDMs"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [5/15]

#define __FUNCT__   "AssignAllGridCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [6/15]

#define __FUNCT__   "SetFinestLevelCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [7/15]

#define __FUNCT__   "GenerateAndSetCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [8/15]

#define __FUNCT__   "ReadAndSetCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [9/15]

#define __FUNCT__   "RestrictCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [10/15]

#define __FUNCT__   "ComputeLocalBoundingBox"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [11/15]

#define __FUNCT__   "GatherAllBoundingBoxes"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [12/15]

#define __FUNCT__   "BroadcastAllBoundingBoxes"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [13/15]

#define __FUNCT__   "CalculateInletProperties"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [14/15]

#define __FUNCT__   "CalculateOutletProperties"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [15/15]

#define __FUNCT__   "CalculateFaceCenterAndArea"

Definition at line 9 of file grid.c.

Function Documentation

◆ ParseAndSetGridInputs()

static PetscErrorCode ParseAndSetGridInputs ( UserCtx user)
static

Determines the grid source and calls the appropriate parsing routine.

This function acts as a router. It checks the global simCtx->generate_grid flag (accessed via the user->simCtx back-pointer) to decide whether to call the parser for a programmatically generated grid or for a grid defined in a file.

Parameters
userPointer to the UserCtx for a specific block. The function will populate the geometric fields within this struct.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 22 of file grid.c.

23{
24 PetscErrorCode ierr;
25 SimCtx *simCtx = user->simCtx; // Get the global context via the back-pointer
26
27 PetscFunctionBeginUser;
28
30 if(strcmp(simCtx->eulerianSource,"analytical")==0){
31 ierr = SetAnalyticalGridInfo(user); CHKERRQ(ierr);
32 }else{ // eulerianSource is "solve" or "load"
33 if (simCtx->generate_grid) {
34 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: Block %d is programmatically generated. Calling generation parser.\n", simCtx->rank, user->_this);
35 ierr = ReadGridGenerationInputs(user); CHKERRQ(ierr);
36 } else {
37 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: Block %d is file-based. Calling file parser.\n", simCtx->rank, user->_this);
38 ierr = ReadGridFile(user); CHKERRQ(ierr);
39 }
40 }
41
43
44 PetscFunctionReturn(0);
45}
PetscErrorCode SetAnalyticalGridInfo(UserCtx *user)
Sets the grid domain and resolution for analytical solution cases.
PetscErrorCode ReadGridFile(UserCtx *user)
Sets grid dimensions from a file for a SINGLE block using a one-time read cache.
Definition io.c:177
PetscErrorCode ReadGridGenerationInputs(UserCtx *user)
Parses command-line options for a programmatically generated grid for a SINGLE block.
Definition io.c:77
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
----— DEBUG ---------------------------------------— #define LOG_ALLOW(scope, level,...
Definition logging.h:267
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:46
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:746
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:32
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:737
PetscMPIInt rank
Definition variables.h:588
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:731
PetscInt _this
Definition variables.h:741
PetscBool generate_grid
Definition variables.h:651
char eulerianSource[PETSC_MAX_PATH_LEN]
Definition variables.h:604
The master context for the entire simulation.
Definition variables.h:585
Here is the call graph for this function:
Here is the caller graph for this function:

◆ DefineAllGridDimensions()

PetscErrorCode DefineAllGridDimensions ( SimCtx simCtx)

Orchestrates the parsing and setting of grid dimensions for all blocks.

This function serves as the high-level entry point for defining the geometric properties of each grid block in the simulation. It iterates through every block defined by simCtx->block_number.

For each block, it performs two key actions:

  1. It explicitly sets the block's index (_this) in the corresponding UserCtx struct for the finest multigrid level. This makes the context "self-aware".
  2. It calls a helper function (ParseAndSetGridInputs) to handle the detailed work of parsing options or files to populate the rest of the geometric properties for that specific block (e.g., IM, Min_X, rx).
Parameters
simCtxThe master SimCtx, which contains the number of blocks and the UserCtx hierarchy to be configured.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 68 of file grid.c.

69{
70 PetscErrorCode ierr;
71 PetscInt nblk = simCtx->block_number;
72 UserCtx *finest_users;
73
74 PetscFunctionBeginUser;
75
77
78 if (simCtx->usermg.mglevels == 0) {
79 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONGSTATE, "MG levels not set. Cannot get finest_users.");
80 }
81 // Get the UserCtx array for the finest grid level
82 finest_users = simCtx->usermg.mgctx[simCtx->usermg.mglevels - 1].user;
83
84 LOG_ALLOW(GLOBAL, LOG_INFO, "Defining grid dimensions for %d blocks...\n", nblk);
85
86 // Loop over each block to configure its grid dimensions and geometry.
87 for (PetscInt bi = 0; bi < nblk; bi++) {
88 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: --- Configuring Geometry for Block %d ---\n", simCtx->rank, bi);
89
90 // Before calling any helpers, set the block index in the context.
91 // This makes the UserCtx self-aware of which block it represents.
92 LOG_ALLOW(GLOBAL,LOG_DEBUG,"finest_users->_this = %d, bi = %d\n",finest_users[bi]._this,bi);
93 //finest_user[bi]._this = bi;
94
95 // Call the helper function for this specific block. It can now derive
96 // all necessary information from the UserCtx pointer it receives.
97 ierr = ParseAndSetGridInputs(&finest_users[bi]); CHKERRQ(ierr);
98 }
99
101
102 PetscFunctionReturn(0);
103}
static PetscErrorCode ParseAndSetGridInputs(UserCtx *user)
Determines the grid source and calls the appropriate parsing routine.
Definition grid.c:22
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:200
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:31
UserCtx * user
Definition variables.h:474
PetscInt block_number
Definition variables.h:649
UserMG usermg
Definition variables.h:698
PetscInt mglevels
Definition variables.h:481
MGCtx * mgctx
Definition variables.h:484
User-defined context containing data specific to a single computational grid level.
Definition variables.h:728
Here is the call graph for this function:
Here is the caller graph for this function:

◆ InitializeSingleGridDM()

static PetscErrorCode InitializeSingleGridDM ( UserCtx user,
UserCtx coarse_user 
)
static

Creates the DMDA objects (da and fda) for a single UserCtx.

This function is a direct adaptation of the core logic in MGDACreate. It creates the scalar (da) and vector (fda) DMs for a single grid level.

If a coarse_user context is provided, it performs the critical processor alignment calculation from the legacy code. This ensures the new (fine) DM aligns with the coarse DM for multigrid efficiency. If coarse_user is NULL, it creates the DM with a default PETSc decomposition, intended for the coarsest grid level.

Parameters
userThe UserCtx for which the DMs will be created. Its IM, JM, KM fields must be pre-populated.
coarse_userThe UserCtx of the next-coarser grid level, or NULL if user is the coarsest level.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 123 of file grid.c.

124{
125 PetscErrorCode ierr;
126 SimCtx *simCtx = user->simCtx;
127
128 DMBoundaryType xperiod = (simCtx->i_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
129 DMBoundaryType yperiod = (simCtx->j_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
130 DMBoundaryType zperiod = (simCtx->k_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
131 PetscInt stencil_width = (simCtx->i_periodic || simCtx->j_periodic || simCtx->k_periodic) ? 3:2; // Stencil width is 2 in the legacy code
132
133 PetscInt *lx = NULL, *ly = NULL, *lz = NULL;
134 PetscInt m, n, p;
135
136 PetscFunctionBeginUser;
137
139
140 if (coarse_user) {
141 // --- This is a FINE grid; it must be aligned with the COARSE grid ---
142 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: [Aligning DM] for block %d level %d (size %dx%dx%d) with level %d\n", simCtx->rank, user->_this, user->thislevel, user->IM, user->JM, user->KM, coarse_user->thislevel);
143
144 DMDAGetInfo(coarse_user->da, NULL, NULL, NULL, NULL, &m, &n, &p, NULL, NULL, NULL, NULL, NULL, NULL);
145 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Coarse grid processor decomposition is %d x %d x %d\n", simCtx->rank, m, n, p);
146
147 // This is the core logic from MGDACreate to ensure processor alignment.
148 PetscInt *lx_contrib, *ly_contrib, *lz_contrib;
149 ierr = PetscMalloc3(m, &lx_contrib, n, &ly_contrib, p, &lz_contrib); CHKERRQ(ierr);
150 ierr = PetscMemzero(lx_contrib, m * sizeof(PetscInt)); CHKERRQ(ierr);
151 ierr = PetscMemzero(ly_contrib, n * sizeof(PetscInt)); CHKERRQ(ierr);
152 ierr = PetscMemzero(lz_contrib, p * sizeof(PetscInt)); CHKERRQ(ierr);
153
154 DMDALocalInfo info;
155 DMDAGetLocalInfo(coarse_user->da, &info);
156 PetscInt xs = info.xs, xe = info.xs + info.xm, mx = info.mx;
157 PetscInt ys = info.ys, ye = info.ys + info.ym, my = info.my;
158 PetscInt zs = info.zs, ze = info.zs + info.zm, mz = info.mz;
159
160 PetscMPIInt rank;
161 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
162 PetscInt proc_i = rank % m;
163 PetscInt proc_j = (rank / m) % n;
164 PetscInt proc_k = rank / (m * n);
165
166 // --- X-Direction Logic (Identical to MGDACreate) ---
167 if (user->isc) lx_contrib[proc_i] = (xe - xs);
168 else {
169 if (m == 1) lx_contrib[0] = user->IM + 1;
170 else if (xs == 0) lx_contrib[0] = 2 * xe - 1;
171 else if (xe == mx) lx_contrib[proc_i] = user->IM + 1 - (2 * xs - 1);
172 else lx_contrib[proc_i] = (xe - xs) * 2;
173 }
174
175 // --- Y-Direction Logic (Identical to MGDACreate) ---
176 if (user->jsc) ly_contrib[proc_j] = (ye - ys);
177 else {
178 if (n == 1) ly_contrib[0] = user->JM + 1;
179 else if (ys == 0) ly_contrib[0] = 2 * ye - 1;
180 else if (ye == my) ly_contrib[proc_j] = user->JM + 1 - (2 * ys - 1);
181 else ly_contrib[proc_j] = (ye - ys) * 2;
182 }
183
184 // --- Z-Direction Logic (Identical to MGDACreate) ---
185 if (user->ksc) lz_contrib[proc_k] = (ze - zs);
186 else {
187 if (p == 1) lz_contrib[0] = user->KM + 1;
188 else if (zs == 0) lz_contrib[0] = 2 * ze - 1;
189 else if (ze == mz) lz_contrib[proc_k] = user->KM + 1 - (2 * zs - 1);
190 else lz_contrib[proc_k] = (ze - zs) * 2;
191 }
192 LOG_ALLOW_SYNC(LOCAL, LOG_VERBOSE, "Rank %d: Calculated this rank's node contribution to fine grid: lx=%d, ly=%d, lz=%d\n", simCtx->rank, lx_contrib[proc_i], ly_contrib[proc_j], lz_contrib[proc_k]);
193
194 // Allocate the final distribution arrays and Allreduce to get the global distribution
195 ierr = PetscMalloc3(m, &lx, n, &ly, p, &lz); CHKERRQ(ierr);
196 ierr = MPI_Allreduce(lx_contrib, lx, m, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
197 ierr = MPI_Allreduce(ly_contrib, ly, n, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
198 ierr = MPI_Allreduce(lz_contrib, lz, p, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
199
200 ierr = PetscFree3(lx_contrib, ly_contrib, lz_contrib); CHKERRQ(ierr);
201
202 } else {
203 // --- CASE 2: This is the COARSEST grid; use default or user-specified decomposition ---
204 if(simCtx->exec_mode == EXEC_MODE_SOLVER){
205
206 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Creating coarsest DM for block %d level %d (size %dx%dx%d)\n", simCtx->rank, user->_this, user->thislevel, user->IM, user->JM, user->KM);
207 m = simCtx->da_procs_x;
208 n = simCtx->da_procs_y;
209 p = simCtx->da_procs_z;
210
211 } else if(simCtx->exec_mode == EXEC_MODE_POSTPROCESSOR){
212
213 LOG_ALLOW(GLOBAL,LOG_ERROR,"Currently Only Single Rank is supported. \n");
214
215 m = n = p = PETSC_DECIDE;
216
217 }
218 // lx, ly, lz are NULL, so DMDACreate3d will use the m,n,p values.
219 }
220
221 // --- Create the DMDA for the current UserCtx ---
222 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Calling DMDACreate3d...\n", simCtx->rank);
223 ierr = DMDACreate3d(PETSC_COMM_WORLD, xperiod, yperiod, zperiod, DMDA_STENCIL_BOX,
224 user->IM + 1, user->JM + 1, user->KM + 1,
225 m, n, p,
226 1, stencil_width, lx, ly, lz, &user->da); CHKERRQ(ierr);
227
228 if (coarse_user) {
229 ierr = PetscFree3(lx, ly, lz); CHKERRQ(ierr);
230 }
231
232 // --- Standard DM setup applicable to all levels ---
233 ierr = DMSetUp(user->da); CHKERRQ(ierr);
234 ierr = DMGetCoordinateDM(user->da, &user->fda); CHKERRQ(ierr);
235 ierr = DMDASetUniformCoordinates(user->da, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); CHKERRQ(ierr);
236 ierr = DMDAGetLocalInfo(user->da, &user->info); CHKERRQ(ierr);
237 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: DM creation for block %d level %d complete.\n", simCtx->rank, user->_this, user->thislevel);
238
240
241 PetscFunctionReturn(0);
242}
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:45
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:28
@ LOG_TRACE
Very fine-grained tracing information for in-depth debugging.
Definition logging.h:33
@ LOG_VERBOSE
Extremely detailed logs, typically for development use only.
Definition logging.h:34
PetscInt isc
Definition variables.h:741
PetscInt da_procs_z
Definition variables.h:655
PetscInt ksc
Definition variables.h:741
PetscInt KM
Definition variables.h:737
PetscInt da_procs_y
Definition variables.h:655
PetscInt k_periodic
Definition variables.h:650
PetscInt jsc
Definition variables.h:741
PetscInt thislevel
Definition variables.h:792
PetscInt JM
Definition variables.h:737
PetscInt da_procs_x
Definition variables.h:655
PetscInt i_periodic
Definition variables.h:650
DMDALocalInfo info
Definition variables.h:735
@ EXEC_MODE_SOLVER
Definition variables.h:558
@ EXEC_MODE_POSTPROCESSOR
Definition variables.h:559
PetscInt IM
Definition variables.h:737
ExecutionMode exec_mode
Definition variables.h:603
PetscInt j_periodic
Definition variables.h:650
Here is the caller graph for this function:

◆ InitializeAllGridDMs()

PetscErrorCode InitializeAllGridDMs ( SimCtx simCtx)

Orchestrates the creation of DMDA objects for every block and multigrid level.

This function systematically builds the entire DMDA hierarchy. It first calculates the dimensions (IM, JM, KM) for all coarse grids based on the finest grid's dimensions and the semi-coarsening flags. It then iterates from the coarsest to the finest level, calling a powerful helper function (InitializeSingleGridDM) to create the DMs for each block, ensuring that finer grids are properly aligned with their coarser parents for multigrid efficiency.

Parameters
simCtxThe master SimCtx, containing the configured UserCtx hierarchy.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 260 of file grid.c.

261{
262 PetscErrorCode ierr;
263 UserMG *usermg = &simCtx->usermg;
264 MGCtx *mgctx = usermg->mgctx;
265 PetscInt nblk = simCtx->block_number;
266
267 PetscFunctionBeginUser;
268
270
271 LOG_ALLOW(GLOBAL,LOG_INFO, "Pre-scanning BCs to identify domain periodicity.\n");
272 ierr = DeterminePeriodicity(simCtx); CHKERRQ(ierr);
273
274 LOG_ALLOW(GLOBAL, LOG_INFO, "Creating DMDA objects for all levels and blocks...\n");
275
276 // --- Part 1: Calculate Coarse Grid Dimensions & VALIDATE ---
277 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Calculating and validating coarse grid dimensions...\n");
278 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
279 for (PetscInt bi = 0; bi < nblk; bi++) {
280 UserCtx *user_coarse = &mgctx[level].user[bi];
281 UserCtx *user_fine = &mgctx[level + 1].user[bi];
282
283 user_coarse->IM = user_fine->isc ? user_fine->IM : (user_fine->IM + 1) / 2;
284 user_coarse->JM = user_fine->jsc ? user_fine->JM : (user_fine->JM + 1) / 2;
285 user_coarse->KM = user_fine->ksc ? user_fine->KM : (user_fine->KM + 1) / 2;
286
287 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Block %d, Level %d dims calculated: %d x %d x %d\n",
288 simCtx->rank, bi, level, user_coarse->IM, user_coarse->JM, user_coarse->KM);
289
290 // Validation check from legacy MGDACreate to ensure coarsening is possible
291 PetscInt check_i = user_coarse->IM * (2 - user_coarse->isc) - (user_fine->IM + 1 - user_coarse->isc);
292 PetscInt check_j = user_coarse->JM * (2 - user_coarse->jsc) - (user_fine->JM + 1 - user_coarse->jsc);
293 PetscInt check_k = user_coarse->KM * (2 - user_coarse->ksc) - (user_fine->KM + 1 - user_coarse->ksc);
294
295 if (check_i + check_j + check_k != 0) {
296 // SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG,
297 // "Grid at level %d, block %d cannot be coarsened from %dx%dx%d to %dx%dx%d with the given semi-coarsening flags. Check grid dimensions.",
298 // level, bi, user_fine->IM, user_fine->JM, user_fine->KM, user_coarse->IM, user_coarse->JM, user_coarse->KM);
299 LOG(GLOBAL,LOG_WARNING,"WARNING: Grid at level %d, block %d can't be consistently coarsened further.\n", level, bi);
300 }
301 }
302 }
303
304 // --- Part 2: Create DMs from Coarse to Fine for each Block ---
305 for (PetscInt bi = 0; bi < nblk; bi++) {
306 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "--- Creating DMs for Block %d ---\n", bi);
307
308 // Create the coarsest level DM first (passing NULL for the coarse_user)
309 ierr = InitializeSingleGridDM(&mgctx[0].user[bi], NULL); CHKERRQ(ierr);
310
311 // Create finer level DMs, passing the next-coarser context for alignment
312 for (PetscInt level = 1; level < usermg->mglevels; level++) {
313 ierr = InitializeSingleGridDM(&mgctx[level].user[bi], &mgctx[level-1].user[bi]); CHKERRQ(ierr);
314 }
315 }
316
317 // --- Optional: View the finest DM for debugging verification ---
318 if (get_log_level() >= LOG_DEBUG) {
319 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "--- Viewing Finest DMDA (Level %d, Block 0) ---\n", usermg->mglevels - 1);
320 ierr = DMView(mgctx[usermg->mglevels - 1].user[0].da, PETSC_VIEWER_STDOUT_WORLD); CHKERRQ(ierr);
321 }
322
323 LOG_ALLOW(GLOBAL, LOG_INFO, "DMDA object creation complete.\n");
324
326
327 PetscFunctionReturn(0);
328}
static PetscErrorCode InitializeSingleGridDM(UserCtx *user, UserCtx *coarse_user)
Creates the DMDA objects (da and fda) for a single UserCtx.
Definition grid.c:123
PetscErrorCode DeterminePeriodicity(SimCtx *simCtx)
Scans all block-specific boundary condition files to determine a globally consistent periodicity for ...
Definition io.c:645
#define LOG(scope, level, fmt,...)
Logging macro for PETSc-based applications with scope control.
Definition logging.h:84
LogLevel get_log_level()
Retrieves the current logging level from the environment variable LOG_LEVEL.
Definition logging.c:39
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:29
Context for Multigrid operations.
Definition variables.h:473
User-level context for managing the entire multigrid hierarchy.
Definition variables.h:480
Here is the call graph for this function:
Here is the caller graph for this function:

◆ SetFinestLevelCoordinates()

static PetscErrorCode SetFinestLevelCoordinates ( UserCtx user)
static

A router that populates the coordinates for a single finest-level DMDA.

This function orchestrates the coordinate setting for one block. It checks the global generate_grid flag and calls the appropriate helper for either programmatic generation or reading from a file.

After the local coordinate vector is populated by a helper, this function performs the necessary DMLocalToGlobal and DMGlobalToLocal scatters to ensure that the ghost node coordinate values are correctly communicated and updated across all MPI ranks.

Parameters
userThe UserCtx for a specific block on the finest level.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 420 of file grid.c.

421{
422 PetscErrorCode ierr;
423 SimCtx *simCtx = user->simCtx;
424
425 PetscFunctionBeginUser;
426
428
429 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Setting finest level coordinates for block %d...\n", simCtx->rank, user->_this);
430
431 if (simCtx->generate_grid) {
432 ierr = GenerateAndSetCoordinates(user); CHKERRQ(ierr);
433 } else {
434
435 FILE *grid_file_handle = NULL;
436 // Only Rank 0 opens the file.
437 if (simCtx->rank == 0) {
438 grid_file_handle = fopen(simCtx->grid_file, "r");
439 if (!grid_file_handle) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open file: %s", simCtx->grid_file);
440
441 // Now, on Rank 0, we skip the entire header section once.
442 // This is the logic from your modern code's AssignGridCoordinates.
443 PetscInt headerLines = simCtx->block_number + 2; // 1 for nblk, plus one for each block's dims
444 char dummy_buffer[2048];
445 for (PetscInt s = 0; s < headerLines; ++s) {
446 if (!fgets(dummy_buffer, sizeof(dummy_buffer), grid_file_handle)) {
447 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "Unexpected EOF while skipping grid header");
448 }
449 }
450 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank 0: Skipped %d header lines, now at coordinate data.\n", headerLines);
451 }
452
453 // We now call the coordinate reader, passing the file handle.
454 // It's responsible for reading its block's data and broadcasting.
455 ierr = ReadAndSetCoordinates(user, grid_file_handle); CHKERRQ(ierr);
456
457 // Only Rank 0, which opened the file, should close it.
458 if (simCtx->rank == 0) {
459 fclose(grid_file_handle);
460 }
461 }
462
463 // After populating the local coordinate vector, we must perform a
464 // Local-to-Global and then Global-to-Local scatter to correctly
465 // populate the ghost node coordinates across process boundaries.
466 Vec gCoor, lCoor;
467 ierr = DMGetCoordinates(user->da, &gCoor); CHKERRQ(ierr);
468 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
469
470 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Scattering coordinates to update ghost nodes for block %d...\n", simCtx->rank, user->_this);
471 ierr = DMLocalToGlobalBegin(user->fda, lCoor, INSERT_VALUES, gCoor); CHKERRQ(ierr);
472 ierr = DMLocalToGlobalEnd(user->fda, lCoor, INSERT_VALUES, gCoor); CHKERRQ(ierr);
473
474 ierr = DMGlobalToLocalBegin(user->fda, gCoor, INSERT_VALUES, lCoor); CHKERRQ(ierr);
475 ierr = DMGlobalToLocalEnd(user->fda, gCoor, INSERT_VALUES, lCoor); CHKERRQ(ierr);
476
478
479 PetscFunctionReturn(0);
480}
static PetscErrorCode GenerateAndSetCoordinates(UserCtx *user)
Programmatically generates and sets grid coordinates based on user parameters.
Definition grid.c:533
static PetscErrorCode ReadAndSetCoordinates(UserCtx *user, FILE *fd)
Reads physical coordinates from a file and populates the DMDA for a specific block.
Definition grid.c:621
char grid_file[PETSC_MAX_PATH_LEN]
Definition variables.h:654
Here is the call graph for this function:
Here is the caller graph for this function:

◆ GenerateAndSetCoordinates()

static PetscErrorCode GenerateAndSetCoordinates ( UserCtx user)
static

Programmatically generates and sets grid coordinates based on user parameters.

This function populates the local coordinate vector of the provided UserCtx using the geometric properties (Min_X, Max_X, rx, etc.) that were parsed from command-line options. It supports non-linear grid stretching.

Parameters
userThe UserCtx for a specific block.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

DEBUG: This verifies the presence of a last "unphysical" layer of coordinates.

Definition at line 533 of file grid.c.

534{
535 PetscErrorCode ierr;
536 DMDALocalInfo info;
537 Cmpnts ***coor;
538 Vec lCoor;
539
540 PetscFunctionBeginUser;
541
543
544 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Generating coordinates for block %d...\n", user->simCtx->rank, user->_this);
545
546 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
547 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
548
549 PetscInt xs = info.xs, xe = info.xs + info.xm;
550 PetscInt ys = info.ys, ye = info.ys + info.ym;
551 PetscInt zs = info.zs, ze = info.zs + info.zm;
552
553 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Local Info for block %d - X range - [%d,%d], Y range - [%d,%d], Z range - [%d,%d]\n",
554 user->simCtx->rank, user->_this, xs, xe, ys, ye, zs, ze);
555
556 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Local Info for block %d - X domain - [%.4f,%.4f], Y range - [%.4f,%.4f], Z range - [%.4f,%.4f]\n",
557 user->simCtx->rank, user->_this, user->Min_X,user->Max_X,user->Min_Y,user->Max_Y,user->Min_Z,user->Max_Z);
558
559 ierr = VecSet(lCoor, 0.0); CHKERRQ(ierr);
560 ierr = DMDAVecGetArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
561
562 PetscReal Lx = user->Max_X - user->Min_X;
563 PetscReal Ly = user->Max_Y - user->Min_Y;
564 PetscReal Lz = user->Max_Z - user->Min_Z;
565
566 // Loop over the local nodes, including ghost nodes, owned by this process.
567 for (PetscInt k = zs; k < ze; k++) {
568 for (PetscInt j = ys; j < ye; j++) {
569 for (PetscInt i = xs; i < xe; i++) {
570 if(k < user->KM && j < user->JM && i < user->IM){
571 coor[k][j][i].x = user->Min_X + ComputeStretchedCoord(i, user->IM, Lx, user->rx);
572 coor[k][j][i].y = user->Min_Y + ComputeStretchedCoord(j, user->JM, Ly, user->ry);
573 coor[k][j][i].z = user->Min_Z + ComputeStretchedCoord(k, user->KM, Lz, user->rz);
574 }
575 }
576 }
577 }
578
579 /// DEBUG: This verifies the presence of a last "unphysical" layer of coordinates.
580 /*
581 PetscInt KM = user->KM;
582 for (PetscInt j = ys; j < ye; j++){
583 for(PetscInt i = xs; i < xe; i++){
584 LOG_ALLOW(GLOBAL,LOG_DEBUG,"coor[%d][%d][%d].(x,y,z) = %le,%le,%le",KM,j,i,coor[KM][j][i].x,coor[KM][j][i].y,coor[KM][j][i].z);
585 }
586 }
587 */
588
589
590
591 ierr = DMDAVecRestoreArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
592
594
595 PetscFunctionReturn(0);
596}
static PetscReal ComputeStretchedCoord(PetscInt i, PetscInt N, PetscReal L, PetscReal r)
Computes a stretched coordinate along one dimension.
Definition grid.c:510
PetscReal Min_X
Definition variables.h:738
PetscReal ry
Definition variables.h:742
PetscReal Max_Y
Definition variables.h:738
PetscScalar x
Definition variables.h:101
PetscReal rz
Definition variables.h:742
PetscScalar z
Definition variables.h:101
PetscReal Min_Z
Definition variables.h:738
PetscReal Max_X
Definition variables.h:738
PetscReal Min_Y
Definition variables.h:738
PetscScalar y
Definition variables.h:101
PetscReal rx
Definition variables.h:742
PetscReal Max_Z
Definition variables.h:738
A 3D point or vector with PetscScalar components.
Definition variables.h:100
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReadAndSetCoordinates()

static PetscErrorCode ReadAndSetCoordinates ( UserCtx user,
FILE *  fd 
)
static

Reads physical coordinates from a file and populates the DMDA for a specific block.

This function handles the collective read of an interleaved (X Y Z per line) multi-block grid file. It assumes the file header (nblk, dimensions) has already been processed by ReadGridFile.

The process is robust for parallel execution:

  1. Rank 0 opens the grid file.
  2. It intelligently skips past the header section and the coordinate data for all blocks preceding the current block being processed (user->_this).
  3. It then reads the entire coordinate data for the current block into a single contiguous buffer gc.
  4. This global buffer gc is broadcast to all other MPI ranks.
  5. Each rank then loops through its local (owned + ghost) node indices, calculates the corresponding index in the global gc array, and copies the (x,y,z) coordinates into its local PETSc coordinate vector.
Parameters
userThe UserCtx for a specific block. Its _this field must be set, and its IM, JM, KM fields must be correctly pre-populated.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 621 of file grid.c.

622{
623 PetscErrorCode ierr;
624 SimCtx *simCtx = user->simCtx;
625 PetscMPIInt rank = simCtx->rank;
626 PetscInt block_index = user->_this;
627 PetscInt IM = user->IM, JM = user->JM, KM = user->KM;
628 DMDALocalInfo info;
629 Cmpnts ***coor;
630 Vec lCoor;
631 PetscReal *gc = NULL; // Global coordinate buffer, allocated on all ranks
632
633 PetscFunctionBeginUser;
634
636
637 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Reading interleaved coordinates from file for block %d...\n",
638 simCtx->rank, block_index);
639
640 // 1. Allocate the buffer on ALL ranks to receive the broadcast data.
641 // PetscInt n_nodes = (IM + 1) * (JM + 1) * (KM + 1);
642 PetscInt n_nodes = (IM) * (JM) * (KM);
643 ierr = PetscMalloc1(3 * n_nodes, &gc); CHKERRQ(ierr);
644
645 // 2. Only Rank 0 opens the file and reads the data.
646 if (rank == 0) {
647 if (!fd) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Recieved a NULL file handle.\n");
648
649 // Read the coordinate data for the CURRENT block.
650 for (PetscInt k = 0; k < KM; k++) {
651 for (PetscInt j = 0; j < JM; j++) {
652 for (PetscInt i = 0; i < IM; i++) {
653 PetscInt base_index = 3 * ((k * (JM) + j) * (IM) + i);
654 if (fscanf(fd, "%le %le %le\n", &gc[base_index], &gc[base_index + 1], &gc[base_index + 2]) != 3) {
655 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "Error reading coordinates for node (i,j,k)=(%d,%d,%d) in block %d", i, j, k, block_index);
656 }
657 }
658 }
659 }
660
661 }
662
663 // 3. Broadcast the coordinate block for the current block to all other processes.
664 ierr = MPI_Bcast(gc, 3 * n_nodes, MPIU_REAL, 0, PETSC_COMM_WORLD); CHKERRQ(ierr);
665
666 // 4. Each rank populates its local portion of the coordinate vector.
667 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
668 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
669 ierr = DMDAVecGetArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
670
671 for (PetscInt k = info.zs; k < info.zs + info.zm; k++) {
672 for (PetscInt j = info.ys; j < info.ys + info.ym; j++) {
673 for (PetscInt i = info.xs; i < info.xs + info.xm; i++) {
674 if(k< KM && j < JM && i < IM){
675 PetscInt base_idx = 3 * ((k * (JM) + j) * (IM) + i);
676 coor[k][j][i].x = gc[base_idx];
677 coor[k][j][i].y = gc[base_idx + 1];
678 coor[k][j][i].z = gc[base_idx + 2];
679 }
680 }
681 }
682 }
683
684 // 5. Clean up and restore.
685 ierr = DMDAVecRestoreArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
686 ierr = PetscFree(gc); CHKERRQ(ierr);
687
689
690 PetscFunctionReturn(0);
691}
Here is the caller graph for this function:

◆ RestrictCoordinates()

static PetscErrorCode RestrictCoordinates ( UserCtx coarse_user,
UserCtx fine_user 
)
static

Populates coarse grid coordinates by restricting from a fine grid.

This function is a direct adaptation of the coordinate restriction loop from the legacy MG_Initial function. It ensures that the physical location of a coarse grid node is identical to its corresponding parent node on the fine grid. The mapping from coarse to fine index (i -> ih) is determined by the semi-coarsening flags (isc, jsc, ksc) stored in the UserCtx.

Parameters
coarse_userThe UserCtx for the coarse grid (destination).
fine_userThe UserCtx for the fine grid (source).
Returns
PetscErrorCode

Definition at line 708 of file grid.c.

709{
710 PetscErrorCode ierr;
711 Vec c_lCoor, f_lCoor; // Coarse and Fine local coordinate vectors
712 Cmpnts ***c_coor;
713 const Cmpnts ***f_coor; // Use const for read-only access
714 DMDALocalInfo c_info;
715 PetscInt ih, jh, kh; // Fine-grid indices corresponding to coarse-grid i,j,k
716
717 PetscFunctionBeginUser;
718
720
721 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Restricting coords from level %d to level %d for block %d\n",
722 fine_user->simCtx->rank, fine_user->thislevel, coarse_user->thislevel, coarse_user->_this);
723
724 ierr = DMDAGetLocalInfo(coarse_user->da, &c_info); CHKERRQ(ierr);
725
726 ierr = DMGetCoordinatesLocal(coarse_user->da, &c_lCoor); CHKERRQ(ierr);
727 ierr = DMGetCoordinatesLocal(fine_user->da, &f_lCoor); CHKERRQ(ierr);
728
729 ierr = DMDAVecGetArray(coarse_user->fda, c_lCoor, &c_coor); CHKERRQ(ierr);
730 ierr = DMDAVecGetArrayRead(fine_user->fda, f_lCoor, &f_coor); CHKERRQ(ierr);
731
732 // Get the local owned range of the coarse grid.
733 PetscInt xs = c_info.xs, xe = c_info.xs + c_info.xm;
734 PetscInt ys = c_info.ys, ye = c_info.ys + c_info.ym;
735 PetscInt zs = c_info.zs, ze = c_info.zs + c_info.zm;
736
737 // Get the global dimensions of the coarse grid.
738 PetscInt mx = c_info.mx, my = c_info.my, mz = c_info.mz;
739
740 // If this process owns the maximum boundary node, contract the loop by one
741 // to prevent the index doubling `2*i` from going out of bounds.
742 // This is also ensuring we do not manipulate the unphysical layer of coors present in the finest level.
743 if (xe == mx) xe--;
744 if (ye == my) ye--;
745 if (ze == mz) ze--;
746
747 for (PetscInt k = zs; k < ze; k++) {
748 for (PetscInt j = ys; j < ye; j++) {
749 for (PetscInt i = xs; i < xe; i++) {
750 // Determine the corresponding parent node index on the FINE grid,
751 // respecting the semi-coarsening flags of the FINE grid's UserCtx.
752 ih = coarse_user->isc ? i : 2 * i;
753 jh = coarse_user->jsc ? j : 2 * j;
754 kh = coarse_user->ksc ? k : 2 * k;
755
756 // LOG_ALLOW(GLOBAL,LOG_DEBUG," [kh][ih][jh] = %d,%d,%d - k,j,i = %d,%d,%d.\n",kh,jh,ih,k,j,i);
757
758 c_coor[k][j][i] = f_coor[kh][jh][ih];
759 }
760 }
761 }
762
763 ierr = DMDAVecRestoreArray(coarse_user->fda, c_lCoor, &c_coor); CHKERRQ(ierr);
764 ierr = DMDAVecRestoreArrayRead(fine_user->fda, f_lCoor, &f_coor); CHKERRQ(ierr);
765
766 // After populating the local portion, scatter to update ghost regions.
767 Vec c_gCoor;
768 ierr = DMGetCoordinates(coarse_user->da, &c_gCoor); CHKERRQ(ierr);
769 ierr = DMLocalToGlobalBegin(coarse_user->fda, c_lCoor, INSERT_VALUES, c_gCoor); CHKERRQ(ierr);
770 ierr = DMLocalToGlobalEnd(coarse_user->fda, c_lCoor, INSERT_VALUES, c_gCoor); CHKERRQ(ierr);
771 ierr = DMGlobalToLocalBegin(coarse_user->fda, c_gCoor, INSERT_VALUES, c_lCoor); CHKERRQ(ierr);
772 ierr = DMGlobalToLocalEnd(coarse_user->fda, c_gCoor, INSERT_VALUES, c_lCoor); CHKERRQ(ierr);
773
775
776 PetscFunctionReturn(0);
777}
Here is the caller graph for this function:

◆ AssignAllGridCoordinates()

PetscErrorCode AssignAllGridCoordinates ( SimCtx simCtx)

Orchestrates the assignment of physical coordinates to all DMDA objects.

This function manages the entire process of populating the coordinate vectors for every DMDA across all multigrid levels and blocks. It follows a two-part strategy that is essential for multigrid methods:

  1. Populate Finest Level: It first loops through each block and calls a helper (SetFinestLevelCoordinates) to set the physical coordinates for the highest-resolution grid (the finest multigrid level).
  2. Restrict to Coarser Levels: It then iterates downwards from the finest level, calling a helper (RestrictCoordinates) to copy the coordinate values from the fine grid nodes to their corresponding parent nodes on the coarser grids. This ensures all levels represent the exact same geometry.
Parameters
simCtxThe master SimCtx, containing the configured UserCtx hierarchy.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 356 of file grid.c.

357{
358 PetscErrorCode ierr;
359 UserMG *usermg = &simCtx->usermg;
360 PetscInt nblk = simCtx->block_number;
361
362 PetscFunctionBeginUser;
363
365
366 LOG_ALLOW(GLOBAL, LOG_INFO, "Assigning physical coordinates to all grid DMs...\n");
367
368 // --- Part 1: Populate the Finest Grid Level ---
369 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Setting coordinates for the finest grid level (%d)...\n", usermg->mglevels - 1);
370 for (PetscInt bi = 0; bi < nblk; bi++) {
371 UserCtx *fine_user = &usermg->mgctx[usermg->mglevels - 1].user[bi];
372 ierr = SetFinestLevelCoordinates(fine_user); CHKERRQ(ierr);
373 LOG_ALLOW(GLOBAL,LOG_TRACE,"The Finest level coordinates for block %d have been set.\n",bi);
375 ierr = LOG_FIELD_MIN_MAX(fine_user,"Coordinates");
376 }
377 }
378 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Finest level coordinates have been set for all blocks.\n");
379
380 // --- Part 2: Restrict Coordinates to Coarser Levels ---
381 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Restricting coordinates to coarser grid levels...\n");
382 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
383 for (PetscInt bi = 0; bi < nblk; bi++) {
384 UserCtx *coarse_user = &usermg->mgctx[level].user[bi];
385 UserCtx *fine_user = &usermg->mgctx[level + 1].user[bi];
386 ierr = RestrictCoordinates(coarse_user, fine_user); CHKERRQ(ierr);
387
388 LOG_ALLOW(GLOBAL,LOG_TRACE,"Coordinates restricted to block %d level %d.\n",bi,level);
390 ierr = LOG_FIELD_MIN_MAX(coarse_user,"Coordinates");
391 }
392 }
393 }
394
395 LOG_ALLOW(GLOBAL, LOG_INFO, "Physical coordinates assigned to all grid levels and blocks.\n");
396
398
399 PetscFunctionReturn(0);
400}
static PetscErrorCode RestrictCoordinates(UserCtx *coarse_user, UserCtx *fine_user)
Populates coarse grid coordinates by restricting from a fine grid.
Definition grid.c:708
static PetscErrorCode SetFinestLevelCoordinates(UserCtx *user)
A router that populates the coordinates for a single finest-level DMDA.
Definition grid.c:420
#define __FUNCT__
Definition grid.c:9
PetscBool is_function_allowed(const char *functionName)
Checks if a given function is in the allow-list.
Definition logging.c:157
PetscErrorCode LOG_FIELD_MIN_MAX(UserCtx *user, const char *fieldName)
Computes and logs the local and global min/max values of a 3-component vector field.
Definition logging.c:1293
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ComputeStretchedCoord()

static PetscReal ComputeStretchedCoord ( PetscInt  i,
PetscInt  N,
PetscReal  L,
PetscReal  r 
)
inlinestatic

Computes a stretched coordinate along one dimension.

This function computes a coordinate based on a geometric stretching ratio. If the ratio (r) is 1.0, a uniform distribution is used: x(i) = L * (i/N)

If r != 1.0, a geometric stretching is applied: x(i) = L * [ (r^(i/N) - 1 ) / (r - 1) ]

Here:

  • i : The current index along the dimension.
  • N : The total number of divisions along that dimension.
  • L : The length of the domain along that dimension.
  • r : The stretching ratio. r > 1.0 stretches the grid in a geometric fashion increasing spacing away from the start, whereas 0 < r < 1.0 would cluster points near the start.
Parameters
[in]iCurrent index (0 <= i <= N).
[in]NNumber of segments along the dimension.
[in]LTotal length of the domain.
[in]rStretching ratio.
Returns
PetscReal The computed coordinate at index i.
Note
This function does not return a PetscErrorCode because it does not allocate memory or call PETSc routines that can fail. It is just a helper calculation function.

Definition at line 510 of file grid.c.

511{
512 if (N <=1) return 0.0;
513 PetscReal fraction = (PetscReal)i / ((PetscReal)N - 1.0);
514 if (PetscAbsReal(r - 1.0) < 1.0e-9) { // Use a tolerance for float comparison
515 return L * fraction;
516 } else {
517 return L * (PetscPowReal(r, fraction) - 1.0) / (r - 1.0);
518 }
519}
Here is the caller graph for this function:

◆ ComputeLocalBoundingBox()

PetscErrorCode ComputeLocalBoundingBox ( UserCtx user,
BoundingBox localBBox 
)

Computes the local bounding box of the grid on the current process.

This function calculates the minimum and maximum coordinates (x, y, z) of the local grid points owned by the current MPI process. It iterates over the local portion of the grid, examines each grid point's coordinates, and updates the minimum and maximum values accordingly.

The computed bounding box is stored in the provided localBBox structure, and the user->bbox field is also updated with this bounding box for consistency within the user context.

Parameters
[in]userPointer to the user-defined context containing grid information. This context must be properly initialized before calling this function.
[out]localBBoxPointer to the BoundingBox structure where the computed local bounding box will be stored. The structure should be allocated by the caller.
Returns
PetscErrorCode Returns 0 on success, non-zero on failure.

Definition at line 800 of file grid.c.

801{
802 PetscErrorCode ierr;
803 PetscInt i, j, k;
804 PetscMPIInt rank;
805 PetscInt xs, ys, zs, xe, ye, ze;
806 DMDALocalInfo info;
807 Vec coordinates;
808 Cmpnts ***coordArray;
809 Cmpnts minCoords, maxCoords;
810
811 PetscFunctionBeginUser;
812
814
815 // Start of function execution
816 LOG_ALLOW(GLOBAL, LOG_INFO, "Entering the function.\n");
817
818 // Validate input Pointers
819 if (!user) {
820 LOG_ALLOW(LOCAL, LOG_ERROR, "Input 'user' Pointer is NULL.\n");
821 return PETSC_ERR_ARG_NULL;
822 }
823 if (!localBBox) {
824 LOG_ALLOW(LOCAL, LOG_ERROR, "Output 'localBBox' Pointer is NULL.\n");
825 return PETSC_ERR_ARG_NULL;
826 }
827
828 // Get MPI rank
829 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
830
831 // Get the local coordinates vector from the DMDA
832 ierr = DMGetCoordinatesLocal(user->da, &coordinates);
833 if (ierr) {
834 LOG_ALLOW(LOCAL, LOG_ERROR, "Error getting local coordinates vector.\n");
835 return ierr;
836 }
837
838 if (!coordinates) {
839 LOG_ALLOW(LOCAL, LOG_ERROR, "Coordinates vector is NULL.\n");
840 return PETSC_ERR_ARG_NULL;
841 }
842
843 // Access the coordinate array for reading
844 ierr = DMDAVecGetArrayRead(user->fda, coordinates, &coordArray);
845 if (ierr) {
846 LOG_ALLOW(LOCAL, LOG_ERROR, "Error accessing coordinate array.\n");
847 return ierr;
848 }
849
850 // Get the local grid information (indices and sizes)
851 ierr = DMDAGetLocalInfo(user->da, &info);
852 if (ierr) {
853 LOG_ALLOW(LOCAL, LOG_ERROR, "Error getting DMDA local info.\n");
854 return ierr;
855 }
856
857
858 xs = info.gxs; xe = xs + info.gxm;
859 ys = info.gys; ye = ys + info.gym;
860 zs = info.gzs; ze = zs + info.gzm;
861
862 /*
863 xs = info.xs; xe = xs + info.xm;
864 ys = info.ys; ye = ys + info.ym;
865 zs = info.zs; ze = zs + info.zm;
866 */
867
868 // Initialize min and max coordinates with extreme values
869 minCoords.x = minCoords.y = minCoords.z = PETSC_MAX_REAL;
870 maxCoords.x = maxCoords.y = maxCoords.z = PETSC_MIN_REAL;
871
872 LOG_ALLOW(LOCAL, LOG_TRACE, "[Rank %d] Grid indices (Including Ghosts): xs=%d, xe=%d, ys=%d, ye=%d, zs=%d, ze=%d.\n",rank, xs, xe, ys, ye, zs, ze);
873
874 // Iterate over the local grid to find min and max coordinates
875 for (k = zs; k < ze; k++) {
876 for (j = ys; j < ye; j++) {
877 for (i = xs; i < xe; i++) {
878 // Only consider nodes within the physical domain.
879 if(i < user->IM && j < user->JM && k < user->KM){
880 Cmpnts coord = coordArray[k][j][i];
881
882 // Update min and max coordinates
883 if (coord.x < minCoords.x) minCoords.x = coord.x;
884 if (coord.y < minCoords.y) minCoords.y = coord.y;
885 if (coord.z < minCoords.z) minCoords.z = coord.z;
886
887 if (coord.x > maxCoords.x) maxCoords.x = coord.x;
888 if (coord.y > maxCoords.y) maxCoords.y = coord.y;
889 if (coord.z > maxCoords.z) maxCoords.z = coord.z;
890 }
891 }
892 }
893 }
894
895
896 // Add tolerance to bboxes.
897 minCoords.x = minCoords.x - BBOX_TOLERANCE;
898 minCoords.y = minCoords.y - BBOX_TOLERANCE;
899 minCoords.z = minCoords.z - BBOX_TOLERANCE;
900
901 maxCoords.x = maxCoords.x + BBOX_TOLERANCE;
902 maxCoords.y = maxCoords.y + BBOX_TOLERANCE;
903 maxCoords.z = maxCoords.z + BBOX_TOLERANCE;
904
905 LOG_ALLOW(LOCAL,LOG_DEBUG," Tolerance added to the limits: %.8e .\n",(PetscReal)BBOX_TOLERANCE);
906
907 // Log the computed min and max coordinates
908 LOG_ALLOW(LOCAL, LOG_INFO,"[Rank %d] Bounding Box Ranges = X[%.6f, %.6f], Y[%.6f,%.6f], Z[%.6f, %.6f].\n",rank,minCoords.x, maxCoords.x,minCoords.y, maxCoords.y, minCoords.z, maxCoords.z);
909
910
911
912 // Restore the coordinate array
913 ierr = DMDAVecRestoreArrayRead(user->fda, coordinates, &coordArray);
914 if (ierr) {
915 LOG_ALLOW(LOCAL, LOG_ERROR, "Error restoring coordinate array.\n");
916 return ierr;
917 }
918
919 // Set the local bounding box
920 localBBox->min_coords = minCoords;
921 localBBox->max_coords = maxCoords;
922
923 // Update the bounding box inside the UserCtx for consistency
924 user->bbox = *localBBox;
925
926 LOG_ALLOW(GLOBAL, LOG_INFO, "Exiting the function successfully.\n");
927
929
930 PetscFunctionReturn(0);
931}
#define BBOX_TOLERANCE
Definition grid.c:6
Cmpnts max_coords
Maximum x, y, z coordinates of the bounding box.
Definition variables.h:156
Cmpnts min_coords
Minimum x, y, z coordinates of the bounding box.
Definition variables.h:155
BoundingBox bbox
Definition variables.h:739
Here is the caller graph for this function:

◆ GatherAllBoundingBoxes()

PetscErrorCode GatherAllBoundingBoxes ( UserCtx user,
BoundingBox **  allBBoxes 
)

Gathers local bounding boxes from all MPI processes to rank 0.

Each rank computes its local bounding box, then all ranks participate in an MPI_Gather to send their BoundingBox to rank 0. Rank 0 allocates the result array and returns it via allBBoxes.

Parameters
[in]userPointer to UserCtx (must be non-NULL).
[out]allBBoxesOn rank 0, receives malloc’d array of size size. On other ranks, set to NULL.
Returns
PetscErrorCode

Definition at line 948 of file grid.c.

949{
950 PetscErrorCode ierr;
951 PetscMPIInt rank, size;
952 BoundingBox *bboxArray = NULL;
953 BoundingBox localBBox;
954
955 PetscFunctionBeginUser;
956
958
959 /* Validate */
960 if (!user || !allBBoxes) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL,
961 "GatherAllBoundingBoxes: NULL pointer");
962
963 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRMPI(ierr);
964 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRMPI(ierr);
965
966 /* Compute local bbox */
967 ierr = ComputeLocalBoundingBox(user, &localBBox); CHKERRQ(ierr);
968
969 /* Ensure everyone is synchronized before the gather */
970 MPI_Barrier(PETSC_COMM_WORLD);
972 "Rank %d: about to MPI_Gather(localBBox)\n", rank);
973
974 /* Allocate on root */
975 if (rank == 0) {
976 bboxArray = (BoundingBox*)malloc(size * sizeof(BoundingBox));
977 if (!bboxArray) SETERRABORT(PETSC_COMM_WORLD, PETSC_ERR_MEM,
978 "GatherAllBoundingBoxes: malloc failed");
979 }
980
981 /* Collective: every rank must call */
982 ierr = MPI_Gather(&localBBox, sizeof(BoundingBox), MPI_BYTE,
983 bboxArray, sizeof(BoundingBox), MPI_BYTE,
984 0, PETSC_COMM_WORLD);
985 CHKERRMPI(ierr);
986
987 MPI_Barrier(PETSC_COMM_WORLD);
989 "Rank %d: completed MPI_Gather(localBBox)\n", rank);
990
991 /* Return result */
992 if (rank == 0) {
993 *allBBoxes = bboxArray;
994 } else {
995 *allBBoxes = NULL;
996 }
997
999
1000 PetscFunctionReturn(0);
1001}
PetscErrorCode ComputeLocalBoundingBox(UserCtx *user, BoundingBox *localBBox)
Computes the local bounding box of the grid on the current process.
Definition grid.c:800
Defines a 3D axis-aligned bounding box.
Definition variables.h:154
Here is the call graph for this function:
Here is the caller graph for this function:

◆ BroadcastAllBoundingBoxes()

PetscErrorCode BroadcastAllBoundingBoxes ( UserCtx user,
BoundingBox **  bboxlist 
)

Broadcasts the bounding box list from rank 0 to all ranks.

Broadcasts the bounding box information collected on rank 0 to all other ranks.

After GatherAllBoundingBoxes, rank 0 has an array of size boxes. This routine makes sure every rank ends up with its own malloc’d copy.

Parameters
[in]userPointer to UserCtx (unused here, but kept for signature).
[in,out]bboxlistOn entry: rank 0’s array; on exit: every rank’s array.
Returns
PetscErrorCode

Definition at line 1016 of file grid.c.

1017{
1018 PetscErrorCode ierr;
1019 PetscMPIInt rank, size;
1020
1021 PetscFunctionBeginUser;
1022
1024
1025 if (!bboxlist) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL,
1026 "BroadcastAllBoundingBoxes: NULL pointer");
1027
1028 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRMPI(ierr);
1029 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRMPI(ierr);
1030
1031 /* Non-root ranks must allocate before the Bcast */
1032 if (rank != 0) {
1033 *bboxlist = (BoundingBox*)malloc(size * sizeof(BoundingBox));
1034 if (!*bboxlist) SETERRABORT(PETSC_COMM_WORLD, PETSC_ERR_MEM,
1035 "BroadcastAllBoundingBoxes: malloc failed");
1036 }
1037
1038 MPI_Barrier(PETSC_COMM_WORLD);
1040 "Rank %d: about to MPI_Bcast(%d boxes)\n", rank, size);
1041
1042 /* Collective: every rank must call */
1043 ierr = MPI_Bcast(*bboxlist, size * sizeof(BoundingBox), MPI_BYTE,
1044 0, PETSC_COMM_WORLD);
1045 CHKERRMPI(ierr);
1046
1047 MPI_Barrier(PETSC_COMM_WORLD);
1049 "Rank %d: completed MPI_Bcast(%d boxes)\n", rank, size);
1050
1051
1053
1054 PetscFunctionReturn(0);
1055}
Here is the caller graph for this function:

◆ CalculateInletProperties()

PetscErrorCode CalculateInletProperties ( UserCtx user)

Calculates the center and area of the primary INLET face.

This function identifies the primary INLET face from the boundary face configurations, computes its geometric center and total area using a generic utility function, and stores these results in the simulation context.

Parameters
userPointer to the UserCtx containing boundary face information.
Returns
PetscErrorCode

Definition at line 1069 of file grid.c.

1070{
1071 PetscErrorCode ierr;
1072 BCFace inlet_face_id = -1;
1073 PetscBool inlet_found = PETSC_FALSE;
1074
1075 PetscFunctionBeginUser;
1077
1078 // 1. Identify the primary inlet face from the configuration
1079 for (int i = 0; i < 6; i++) {
1080 if (user->boundary_faces[i].mathematical_type == INLET) {
1081 inlet_face_id = user->boundary_faces[i].face_id;
1082 inlet_found = PETSC_TRUE;
1083 break; // Use the first inlet found
1084 }
1085 }
1086
1087 if (!inlet_found) {
1088 LOG_ALLOW(GLOBAL, LOG_INFO, "No INLET face found. Skipping inlet center calculation.\n");
1090 PetscFunctionReturn(0);
1091 }
1092
1093 Cmpnts inlet_center;
1094 PetscReal inlet_area;
1095
1096 // 2. Call the generic utility to compute the center and area of any face.
1097 ierr = CalculateFaceCenterAndArea(user,inlet_face_id,&inlet_center,&inlet_area); CHKERRQ(ierr);
1098
1099 // 3. Store results in the SimCtx
1100 user->simCtx->CMx_c = inlet_center.x;
1101 user->simCtx->CMy_c = inlet_center.y;
1102 user->simCtx->CMz_c = inlet_center.z;
1103 user->simCtx->AreaInSum = inlet_area;
1104
1106 "Rank[%d] Inlet Center: (%.6f, %.6f, %.6f), Area: %.6f\n",
1107 inlet_center.x, inlet_center.y, inlet_center.z, inlet_area);
1108
1110 PetscFunctionReturn(0);
1111
1112}
PetscErrorCode CalculateFaceCenterAndArea(UserCtx *user, BCFace face_id, Cmpnts *face_center, PetscReal *face_area)
Calculates the geometric center and total area of a specified boundary face.
Definition grid.c:1276
@ INLET
Definition variables.h:217
BoundaryFaceConfig boundary_faces[6]
Definition variables.h:746
PetscReal CMy_c
Definition variables.h:642
PetscReal CMz_c
Definition variables.h:642
BCType mathematical_type
Definition variables.h:295
PetscReal AreaInSum
Definition variables.h:663
PetscReal CMx_c
Definition variables.h:642
BCFace
Identifies the six logical faces of a structured computational block.
Definition variables.h:203
Here is the call graph for this function:
Here is the caller graph for this function:

◆ CalculateOutletProperties()

PetscErrorCode CalculateOutletProperties ( UserCtx user)

Calculates the center and area of the primary OUTLET face.

This function identifies the primary OUTLET face from the boundary face configurations, computes its geometric center and total area using a generic utility function, and stores these results in the simulation context.

Parameters
userPointer to the UserCtx containing boundary face information.
Returns
PetscErrorCode

Definition at line 1125 of file grid.c.

1126{
1127 PetscErrorCode ierr;
1128 BCFace outlet_face_id = -1;
1129 PetscBool outlet_found = PETSC_FALSE;
1130 PetscFunctionBeginUser;
1132 // 1. Identify the primary outlet face from the configuration
1133 for (int i = 0; i < 6; i++) {
1134 if (user->boundary_faces[i].mathematical_type == OUTLET) {
1135 outlet_face_id = user->boundary_faces[i].face_id;
1136 outlet_found = PETSC_TRUE;
1137 break; // Use the first outlet found
1138 }
1139 }
1140 if (!outlet_found) {
1141 LOG_ALLOW(GLOBAL, LOG_INFO, "No OUTLET face found. Skipping outlet center calculation.\n");
1143 PetscFunctionReturn(0);
1144 }
1145 PetscReal outlet_area;
1146 Cmpnts outlet_center;
1147 // 2. Call the generic utility to compute the center and area of any face
1148 ierr = CalculateFaceCenterAndArea(user,outlet_face_id,&outlet_center,&outlet_area); CHKERRQ(ierr);
1149 // 3. Store results in the SimCtx
1150 user->simCtx->AreaOutSum = outlet_area;
1151
1153 "Outlet Center: (%.6f, %.6f, %.6f), Area: %.6f\n",
1154 outlet_center.x, outlet_center.y, outlet_center.z, outlet_area);
1155
1157 PetscFunctionReturn(0);
1158}
@ OUTLET
Definition variables.h:216
PetscReal AreaOutSum
Definition variables.h:663
Here is the call graph for this function:
Here is the caller graph for this function:

◆ CalculateFaceCenterAndArea()

PetscErrorCode CalculateFaceCenterAndArea ( UserCtx user,
BCFace  face_id,
Cmpnts face_center,
PetscReal *  face_area 
)

Calculates the geometric center and total area of a specified boundary face.

This function computes two key properties of a boundary face in the computational domain:

  1. Geometric Center: The average (x,y,z) position of all physical nodes on the face
  2. Total Area: The sum of face area vector magnitudes from all non-solid cells adjacent to the face

Indexing Architecture

The solver uses different indexing conventions for different field types:

Node-Centered Fields (Coordinates):

  • Direct indexing: Node n stored at coor[n]
  • For mx=26: Physical nodes [0-24], Dummy at [25]
  • For mz=98: Physical nodes [0-96], Dummy at [97]

Face-Centered Fields (Metrics: csi, eta, zet):

  • Direct indexing: Face n stored at csi/eta/zet[n]
  • For mx=26: Physical faces [0-24], Dummy at [25]
  • For mz=98: Physical faces [0-96], Dummy at [97]
  • Face at index k bounds cells k-1 and k

Cell-Centered Fields (nvert):

  • Shifted indexing: Physical cell c stored at nvert[c+1]
  • For mx=26 (25 cells): Cell 0→nvert[1], Cell 23→nvert[24]
  • For mz=98 (96 cells): Cell 0→nvert[1], Cell 95→nvert[96]
  • nvert[0] and nvert[mx-1] are ghost values

Face-to-Index Mapping

Example for a domain with mx=26, my=26, mz=98:

Face ID Node Index Face Metric Adjacent Cell (shifted) Physical Extent
BC_FACE_NEG_X i=0 csi[k][j][0] nvert[k][j][1] (Cell 0) j∈[0,24], k∈[0,96]
BC_FACE_POS_X i=24 csi[k][j][24] nvert[k][j][24] (Cell 23) j∈[0,24], k∈[0,96]
BC_FACE_NEG_Y j=0 eta[k][0][i] nvert[k][1][i] (Cell 0) i∈[0,24], k∈[0,96]
BC_FACE_POS_Y j=24 eta[k][24][i] nvert[k][24][i] (Cell 23) i∈[0,24], k∈[0,96]
BC_FACE_NEG_Z k=0 zet[0][j][i] nvert[1][j][i] (Cell 0) i∈[0,24], j∈[0,24]
BC_FACE_POS_Z k=96 zet[96][j][i] nvert[96][j][i] (Cell 95) i∈[0,24], j∈[0,24]

Algorithm

The function performs two separate computations with different loop bounds:

1. Center Calculation (uses ALL physical nodes):

  • Loop over all physical nodes on the face (excluding dummy indices)
  • Accumulate coordinate sums: Σx, Σy, Σz
  • Count number of nodes
  • Average: center = (Σx/n, Σy/n, Σz/n)

2. Area Calculation (uses INTERIOR cells only):

  • Loop over interior cell range to avoid accessing ghost values in nvert
  • For each face adjacent to a fluid cell (nvert < 0.1):
    • Compute area magnitude: |csi/eta/zet| = √(x² + y² + z²)
    • Accumulate to total area

Loop Bound Details

Why different bounds for center vs. area?

For BC_FACE_NEG_X at i=0 with my=26, mz=98:

Center calculation (coordinates):

  • j ∈ [ys, j_max): Includes j=[0,24] (25 nodes), excludes dummy at j=25
  • k ∈ [zs, k_max): Includes k=[0,96] (97 nodes), excludes dummy at k=97
  • Total: 25 × 97 = 2,425 nodes

Area calculation (nvert checks):

  • j ∈ [lys, lye): j=[1,24] (24 values), excludes boundaries
  • k ∈ [lzs, lze): k=[1,96] (96 values), excludes boundaries
  • Why restricted?
    • At j=0: nvert[k][0][1] is ghost (no cell at j=-1)
    • At j=25: nvert[k][25][1] is ghost (no cell at j=24, index 25 is dummy)
    • At k=0: nvert[0][j][1] is ghost (no cell at k=-1)
    • At k=97: nvert[97][j][1] is ghost (no cell at k=96, index 97 is dummy)
  • Total: 24 × 96 = 2,304 interior cells adjacent to face

Area Calculation Formulas

Face area contributions are computed from metric tensor magnitudes:

  • i-faces (±Xi): Area = |csi| = √(csi_x² + csi_y² + csi_z²)
  • j-faces (±Eta): Area = |eta| = √(eta_x² + eta_y² + eta_z²)
  • k-faces (±Zeta): Area = |zet| = √(zet_x² + zet_y² + zet_z²)
Parameters
[in]userPointer to UserCtx containing grid info, DMs, and field vectors
[in]face_idEnum identifying which boundary face to analyze (BC_FACE_NEG_X, etc.)
[out]face_centerPointer to Cmpnts structure to store computed geometric center (x,y,z)
[out]face_areaPointer to PetscReal to store computed total face area
Returns
PetscErrorCode Returns 0 on success, non-zero PETSc error code on failure
Note
This function uses MPI_Allreduce, so it must be called collectively by all ranks
Only ranks that own the specified boundary face contribute to the calculation
Center calculation includes ALL physical nodes on the face
Area calculation ONLY includes faces adjacent to fluid cells (nvert < 0.1)
Dummy/unused indices (e.g., k=97, j=25 for standard test case) are excluded
Warning
Assumes grid and field arrays have been properly initialized
Incorrect face_id values will result in zero contribution from all ranks
See also
CanRankServiceFace() for determining rank ownership of boundary faces
BCFace enum for valid face_id values
LOG_FIELD_ANATOMY() for debugging field indexing
Example Usage:
Cmpnts inlet_center;
PetscReal inlet_area;
ierr = CalculateFaceCenterAndArea(user, BC_FACE_NEG_Z, &inlet_center, &inlet_area);
PetscPrintf(PETSC_COMM_WORLD, "Inlet center: (%.4f, %.4f, %.4f), Area: %.6f\n",
inlet_center.x, inlet_center.y, inlet_center.z, inlet_area);
PetscErrorCode CalculateFaceCenterAndArea(UserCtx *user, BCFace face_id, Cmpnts *face_center, PetscReal *face_area)
Calculates the geometric center and total area of a specified boundary face.
Definition grid.c:1276
@ BC_FACE_NEG_Z
Definition variables.h:206

< Local sum of (x,y,z) coordinates

< Local sum of face area magnitudes

< Local count of nodes

< Global sum of coordinates

< Global sum of areas

< Global count of nodes

< i-range: [xs, xe)

< j-range: [ys, ye)

< k-range: [zs, ze)

< Physical domain size in i (exclude dummy)

< Physical domain size in j (exclude dummy)

< Physical domain size in k (exclude dummy)

< Start at 1 if on -Xi boundary

< End at mx-1 if on +Xi boundary

< Start at 1 if on -Eta boundary

< End at my-1 if on +Eta boundary

< Start at 1 if on -Zeta boundary

< End at mz-1 if on +Zeta boundary

< Exclude dummy at i=mx-1 (e.g., i=25)

< Exclude dummy at j=my-1 (e.g., j=25)

< Exclude dummy at k=mz-1 (e.g., k=97)

< Local ghosted coordinate vector

< Nodal coordinates [k][j][i]

< Face metric tensors [k][j][i]

< Cell blanking field [k][j][i] (shifted +1)

Definition at line 1276 of file grid.c.

1278{
1279 PetscErrorCode ierr;
1280 DMDALocalInfo info;
1281
1282 // ========================================================================
1283 // Local accumulators for this rank's contribution
1284 // ========================================================================
1285 PetscReal local_sum[3] = {0.0, 0.0, 0.0}; ///< Local sum of (x,y,z) coordinates
1286 PetscReal localAreaSum = 0.0; ///< Local sum of face area magnitudes
1287 PetscCount local_n_points = 0; ///< Local count of nodes
1288
1289 // ========================================================================
1290 // Global accumulators after MPI reduction
1291 // ========================================================================
1292 PetscReal global_sum[3] = {0.0, 0.0, 0.0}; ///< Global sum of coordinates
1293 PetscReal globalAreaSum = 0.0; ///< Global sum of areas
1294 PetscCount global_n_points = 0; ///< Global count of nodes
1295
1296 // ========================================================================
1297 // Grid information and array pointers
1298 // ========================================================================
1299 DM da = user->da;
1300 info = user->info;
1301
1302 // Rank's owned range in global indices
1303 PetscInt xs = info.xs, xe = info.xs + info.xm; ///< i-range: [xs, xe)
1304 PetscInt ys = info.ys, ye = info.ys + info.ym; ///< j-range: [ys, ye)
1305 PetscInt zs = info.zs, ze = info.zs + info.zm; ///< k-range: [zs, ze)
1306
1307 // Global domain dimensions (total allocated, includes dummy at end)
1308 PetscInt mx = info.mx, my = info.my, mz = info.mz;
1309 PetscInt IM = user->IM; ///< Physical domain size in i (exclude dummy)
1310 PetscInt JM = user->JM; ///< Physical domain size in j (exclude dummy)
1311 PetscInt KM = user->KM; ///< Physical domain size in k (exclude dummy)
1312
1313 // ========================================================================
1314 // Interior loop bounds (adjusted to avoid ghost/boundary cells)
1315 // These are used for nvert checks where we need valid cell indices
1316 // ========================================================================
1317 PetscInt lxs = xs; if(xs == 0) lxs = xs + 1; ///< Start at 1 if on -Xi boundary
1318 PetscInt lxe = xe; if(xe == mx) lxe = xe - 1; ///< End at mx-1 if on +Xi boundary
1319 PetscInt lys = ys; if(ys == 0) lys = ys + 1; ///< Start at 1 if on -Eta boundary
1320 PetscInt lye = ye; if(ye == my) lye = ye - 1; ///< End at my-1 if on +Eta boundary
1321 PetscInt lzs = zs; if(zs == 0) lzs = zs + 1; ///< Start at 1 if on -Zeta boundary
1322 PetscInt lze = ze; if(ze == mz) lze = ze - 1; ///< End at mz-1 if on +Zeta boundary
1323
1324 // ========================================================================
1325 // Physical node bounds (exclude dummy indices at mx-1, my-1, mz-1)
1326 // These are used for coordinate loops where we want ALL physical nodes
1327 // ========================================================================
1328 PetscInt i_max = (xe == mx) ? mx - 1 : xe; ///< Exclude dummy at i=mx-1 (e.g., i=25)
1329 PetscInt j_max = (ye == my) ? my - 1 : ye; ///< Exclude dummy at j=my-1 (e.g., j=25)
1330 PetscInt k_max = (ze == mz) ? mz - 1 : ze; ///< Exclude dummy at k=mz-1 (e.g., k=97)
1331
1332 // ========================================================================
1333 // Array pointers for field access
1334 // ========================================================================
1335 Vec lCoor; ///< Local ghosted coordinate vector
1336 Cmpnts ***coor; ///< Nodal coordinates [k][j][i]
1337 Cmpnts ***csi, ***eta, ***zet; ///< Face metric tensors [k][j][i]
1338 PetscReal ***nvert; ///< Cell blanking field [k][j][i] (shifted +1)
1339
1340 PetscFunctionBeginUser;
1342
1343 // ========================================================================
1344 // Step 1: Check if this rank owns the specified boundary face
1345 // ========================================================================
1346 PetscBool owns_face = PETSC_FALSE;
1347 ierr = CanRankServiceFace(&info,IM,JM,KM,face_id,&owns_face); CHKERRQ(ierr);
1348 if(owns_face){
1349 // ========================================================================
1350 // Step 2: Get read-only array access for all required fields
1351 // ========================================================================
1352 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
1353 ierr = DMDAVecGetArrayRead(user->fda, lCoor, &coor); CHKERRQ(ierr);
1354 ierr = DMDAVecGetArrayRead(user->da, user->lNvert, &nvert); CHKERRQ(ierr);
1355 ierr = DMDAVecGetArrayRead(user->fda, user->lCsi, &csi); CHKERRQ(ierr);
1356 ierr = DMDAVecGetArrayRead(user->fda, user->lEta, &eta); CHKERRQ(ierr);
1357 ierr = DMDAVecGetArrayRead(user->fda, user->lZet, &zet); CHKERRQ(ierr);
1358
1359 // ========================================================================
1360 // Step 3: Loop over the specified face and accumulate center and area
1361 // ========================================================================
1362 switch (face_id) {
1363
1364 // ====================================================================
1365 // BC_FACE_NEG_X: Face at i=0 (bottom boundary in i-direction)
1366 // ====================================================================
1367 case BC_FACE_NEG_X:
1368 if (xs == 0) {
1369 PetscInt i = 0; // Face is at node index i=0
1370
1371 // ---- Part 1: Center calculation (ALL physical nodes) ----
1372 // Loop over ALL physical nodes on this face
1373 // For my=26, mz=98: j∈[0,24], k∈[0,96] → 25×97 = 2,425 nodes
1374 for (PetscInt k = zs; k < k_max; k++) {
1375 for (PetscInt j = ys; j < j_max; j++) {
1376 // Accumulate coordinates at node [k][j][0]
1377 local_sum[0] += coor[k][j][i].x;
1378 local_sum[1] += coor[k][j][i].y;
1379 local_sum[2] += coor[k][j][i].z;
1380 local_n_points++;
1381 }
1382 }
1383
1384 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1385 // Loop over interior range where nvert checks are valid
1386 // For my=26, mz=98: j∈[1,24], k∈[1,96] → 24×96 = 2,304 cells
1387 for (PetscInt k = lzs; k < lze; k++) {
1388 for (PetscInt j = lys; j < lye; j++) {
1389 // Check if adjacent cell is fluid
1390 // nvert[k][j][i+1] = nvert[k][j][1] checks Cell 0
1391 // (Physical Cell 0 in j-k plane, stored at shifted index [1])
1392 if (nvert[k][j][i+1] < 0.1) {
1393 // Cell is fluid - add face area contribution
1394 // Face area = magnitude of csi metric at [k][j][0]
1395 localAreaSum += sqrt(csi[k][j][i].x * csi[k][j][i].x +
1396 csi[k][j][i].y * csi[k][j][i].y +
1397 csi[k][j][i].z * csi[k][j][i].z);
1398 }
1399 }
1400 }
1401 }
1402 break;
1403
1404 // ====================================================================
1405 // BC_FACE_POS_X: Face at i=IM-1 (top boundary in i-direction)
1406 // ====================================================================
1407 case BC_FACE_POS_X:
1408 if (xe == mx) {
1409 PetscInt i = mx - 2; // Last physical node (e.g., i=24 for mx=26)
1410
1411 // ---- Part 1: Center calculation (ALL physical nodes) ----
1412 for (PetscInt k = zs; k < k_max; k++) {
1413 for (PetscInt j = ys; j < j_max; j++) {
1414 local_sum[0] += coor[k][j][i].x;
1415 local_sum[1] += coor[k][j][i].y;
1416 local_sum[2] += coor[k][j][i].z;
1417 local_n_points++;
1418 }
1419 }
1420
1421 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1422 for (PetscInt k = lzs; k < lze; k++) {
1423 for (PetscInt j = lys; j < lye; j++) {
1424 // Check if adjacent cell is fluid
1425 // nvert[k][j][i] = nvert[k][j][24] checks last cell (Cell 23)
1426 // (Physical Cell 23, stored at shifted index [24])
1427 if (nvert[k][j][i] < 0.1) {
1428 // Face area = magnitude of csi metric at [k][j][24]
1429 localAreaSum += sqrt(csi[k][j][i].x * csi[k][j][i].x +
1430 csi[k][j][i].y * csi[k][j][i].y +
1431 csi[k][j][i].z * csi[k][j][i].z);
1432 }
1433 }
1434 }
1435 }
1436 break;
1437
1438 // ====================================================================
1439 // BC_FACE_NEG_Y: Face at j=0 (bottom boundary in j-direction)
1440 // ====================================================================
1441 case BC_FACE_NEG_Y:
1442 if (ys == 0) {
1443 PetscInt j = 0; // Face is at node index j=0
1444
1445 // ---- Part 1: Center calculation (ALL physical nodes) ----
1446 // For mx=26, mz=98: i∈[0,24], k∈[0,96] → 25×97 = 2,425 nodes
1447 for (PetscInt k = zs; k < k_max; k++) {
1448 for (PetscInt i = xs; i < i_max; i++) {
1449 local_sum[0] += coor[k][j][i].x;
1450 local_sum[1] += coor[k][j][i].y;
1451 local_sum[2] += coor[k][j][i].z;
1452 local_n_points++;
1453 }
1454 }
1455
1456 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1457 // For mx=26, mz=98: i∈[1,24], k∈[1,96] → 24×96 = 2,304 cells
1458 for (PetscInt k = lzs; k < lze; k++) {
1459 for (PetscInt i = lxs; i < lxe; i++) {
1460 // nvert[k][j+1][i] = nvert[k][1][i] checks Cell 0
1461 if (nvert[k][j+1][i] < 0.1) {
1462 // Face area = magnitude of eta metric at [k][0][i]
1463 localAreaSum += sqrt(eta[k][j][i].x * eta[k][j][i].x +
1464 eta[k][j][i].y * eta[k][j][i].y +
1465 eta[k][j][i].z * eta[k][j][i].z);
1466 }
1467 }
1468 }
1469 }
1470 break;
1471
1472 // ====================================================================
1473 // BC_FACE_POS_Y: Face at j=JM-1 (top boundary in j-direction)
1474 // ====================================================================
1475 case BC_FACE_POS_Y:
1476 if (ye == my) {
1477 PetscInt j = my - 2; // Last physical node (e.g., j=24 for my=26)
1478
1479 // ---- Part 1: Center calculation (ALL physical nodes) ----
1480 for (PetscInt k = zs; k < k_max; k++) {
1481 for (PetscInt i = xs; i < i_max; i++) {
1482 local_sum[0] += coor[k][j][i].x;
1483 local_sum[1] += coor[k][j][i].y;
1484 local_sum[2] += coor[k][j][i].z;
1485 local_n_points++;
1486 }
1487 }
1488
1489 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1490 for (PetscInt k = lzs; k < lze; k++) {
1491 for (PetscInt i = lxs; i < lxe; i++) {
1492 // nvert[k][j][i] = nvert[k][24][i] checks last cell (Cell 23)
1493 if (nvert[k][j][i] < 0.1) {
1494 // Face area = magnitude of eta metric at [k][24][i]
1495 localAreaSum += sqrt(eta[k][j][i].x * eta[k][j][i].x +
1496 eta[k][j][i].y * eta[k][j][i].y +
1497 eta[k][j][i].z * eta[k][j][i].z);
1498 }
1499 }
1500 }
1501 }
1502 break;
1503
1504 // ====================================================================
1505 // BC_FACE_NEG_Z: Face at k=0 (inlet, bottom boundary in k-direction)
1506 // ====================================================================
1507 case BC_FACE_NEG_Z:
1508 if (zs == 0) {
1509 PetscInt k = 0; // Face is at node index k=0
1510
1511 // ---- Part 1: Center calculation (ALL physical nodes) ----
1512 // For mx=26, my=26: i∈[0,24], j∈[0,24] → 25×25 = 625 nodes
1513 for (PetscInt j = ys; j < j_max; j++) {
1514 for (PetscInt i = xs; i < i_max; i++) {
1515 local_sum[0] += coor[k][j][i].x;
1516 local_sum[1] += coor[k][j][i].y;
1517 local_sum[2] += coor[k][j][i].z;
1518 local_n_points++;
1519 }
1520 }
1521
1522 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1523 // For mx=26, my=26: i∈[1,24], j∈[1,24] → 24×24 = 576 cells
1524 for (PetscInt j = lys; j < lye; j++) {
1525 for (PetscInt i = lxs; i < lxe; i++) {
1526 // nvert[k+1][j][i] = nvert[1][j][i] checks Cell 0
1527 // (Physical Cell 0 in i-j plane, stored at shifted index [1])
1528 if (nvert[k+1][j][i] < 0.1) {
1529 // Face area = magnitude of zet metric at [0][j][i]
1530 localAreaSum += sqrt(zet[k][j][i].x * zet[k][j][i].x +
1531 zet[k][j][i].y * zet[k][j][i].y +
1532 zet[k][j][i].z * zet[k][j][i].z);
1533 }
1534 }
1535 }
1536 }
1537 break;
1538
1539 // ====================================================================
1540 // BC_FACE_POS_Z: Face at k=KM-1 (outlet, top boundary in k-direction)
1541 // ====================================================================
1542 case BC_FACE_POS_Z:
1543 if (ze == mz) {
1544 PetscInt k = mz - 2; // Last physical node (e.g., k=96 for mz=98)
1545
1546 // ---- Part 1: Center calculation (ALL physical nodes) ----
1547 // For mx=26, my=26: i∈[0,24], j∈[0,24] → 25×25 = 625 nodes
1548 for (PetscInt j = ys; j < j_max; j++) {
1549 for (PetscInt i = xs; i < i_max; i++) {
1550 local_sum[0] += coor[k][j][i].x;
1551 local_sum[1] += coor[k][j][i].y;
1552 local_sum[2] += coor[k][j][i].z;
1553 local_n_points++;
1554 }
1555 }
1556
1557 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1558 // For mx=26, my=26: i∈[1,24], j∈[1,24] → 24×24 = 576 cells
1559 for (PetscInt j = lys; j < lye; j++) {
1560 for (PetscInt i = lxs; i < lxe; i++) {
1561 // nvert[k][j][i] = nvert[96][j][i] checks last cell (Cell 95)
1562 // (Physical Cell 95, stored at shifted index [96])
1563 if (nvert[k][j][i] < 0.1) {
1564 // Face area = magnitude of zet metric at [96][j][i]
1565 localAreaSum += sqrt(zet[k][j][i].x * zet[k][j][i].x +
1566 zet[k][j][i].y * zet[k][j][i].y +
1567 zet[k][j][i].z * zet[k][j][i].z);
1568 }
1569 }
1570 }
1571 }
1572 break;
1573
1574 default:
1575 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE,
1576 "Unknown face_id %d in CalculateFaceCenterAndArea", face_id);
1577 }
1578
1579 // ========================================================================
1580 // Step 4: Restore array access (release pointers)
1581 // ========================================================================
1582 ierr = DMDAVecRestoreArrayRead(user->fda, lCoor, &coor); CHKERRQ(ierr);
1583 ierr = DMDAVecRestoreArrayRead(user->da, user->lNvert, &nvert); CHKERRQ(ierr);
1584 ierr = DMDAVecRestoreArrayRead(user->fda, user->lCsi, &csi); CHKERRQ(ierr);
1585 ierr = DMDAVecRestoreArrayRead(user->fda, user->lEta, &eta); CHKERRQ(ierr);
1586 ierr = DMDAVecRestoreArrayRead(user->fda, user->lZet, &zet); CHKERRQ(ierr);
1587 }
1588 // ========================================================================
1589 // Step 5: Perform MPI reductions to get global sums
1590 // ========================================================================
1591 // Sum coordinate contributions from all ranks
1592 ierr = MPI_Allreduce(local_sum, global_sum, 3, MPI_DOUBLE, MPI_SUM,
1593 PETSC_COMM_WORLD); CHKERRQ(ierr);
1594
1595 // Sum node counts from all ranks
1596 ierr = MPI_Allreduce(&local_n_points, &global_n_points, 1, MPI_COUNT, MPI_SUM,
1597 PETSC_COMM_WORLD); CHKERRQ(ierr);
1598
1599 // Sum area contributions from all ranks
1600 ierr = MPI_Allreduce(&localAreaSum, &globalAreaSum, 1, MPI_DOUBLE, MPI_SUM,
1601 PETSC_COMM_WORLD); CHKERRQ(ierr);
1602
1603 // ========================================================================
1604 // Step 6: Calculate geometric center by averaging coordinates
1605 // ========================================================================
1606 if (global_n_points > 0) {
1607 face_center->x = global_sum[0] / global_n_points;
1608 face_center->y = global_sum[1] / global_n_points;
1609 face_center->z = global_sum[2] / global_n_points;
1611 "Calculated center for Face %s: (x=%.4f, y=%.4f, z=%.4f) from %lld nodes\n",
1612 BCFaceToString(face_id),
1613 face_center->x, face_center->y, face_center->z,
1614 (long long)global_n_points);
1615 } else {
1616 // No nodes found - this should not happen for a valid face
1618 "WARNING: Face %s identified but no grid points found. Center not calculated.\n",
1619 BCFaceToString(face_id));
1620 face_center->x = face_center->y = face_center->z = 0.0;
1621 }
1622
1623 // ========================================================================
1624 // Step 7: Return computed total area
1625 // ========================================================================
1626 *face_area = globalAreaSum;
1628 "Calculated area for Face %s: Area=%.6f\n",
1629 BCFaceToString(face_id), *face_area);
1630
1631 PetscFunctionReturn(0);
1632}
PetscErrorCode CanRankServiceFace(const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, BCFace face_id, PetscBool *can_service_out)
Determines if the current MPI rank owns any part of a specified global face.
Definition Boundaries.c:151
const char * BCFaceToString(BCFace face)
Helper function to convert BCFace enum to a string representation.
Definition logging.c:643
Vec lNvert
Definition variables.h:755
Vec lZet
Definition variables.h:776
Vec lCsi
Definition variables.h:776
Vec lEta
Definition variables.h:776
@ BC_FACE_NEG_X
Definition variables.h:204
@ BC_FACE_POS_Z
Definition variables.h:206
@ BC_FACE_POS_Y
Definition variables.h:205
@ BC_FACE_POS_X
Definition variables.h:204
@ BC_FACE_NEG_Y
Definition variables.h:205
Here is the call graph for this function:
Here is the caller graph for this function: