PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
Macros | Functions
grid.c File Reference
#include "grid.h"
#include "logging.h"
Include dependency graph for grid.c:

Go to the source code of this file.

Macros

#define BBOX_TOLERANCE   1e-6
 
#define __FUNCT__   "ParseAndSetGridInputs"
 
#define __FUNCT__   "DefineAllGridDimensions"
 
#define __FUNCT__   "InitializeSingleGridDM"
 
#define __FUNCT__   "InitializeAllGridDMs"
 
#define __FUNCT__   "AssignAllGridCoordinates"
 
#define __FUNCT__   "SetFinestLevelCoordinates"
 
#define __FUNCT__   "GenerateAndSetCoordinates"
 
#define __FUNCT__   "ReadAndSetCoordinates"
 
#define __FUNCT__   "RestrictCoordinates"
 
#define __FUNCT__   "ComputeLocalBoundingBox"
 
#define __FUNCT__   "GatherAllBoundingBoxes"
 
#define __FUNCT__   "BroadcastAllBoundingBoxes"
 
#define __FUNCT__   "CalculateInletProperties"
 
#define __FUNCT__   "CalculateOutletProperties"
 
#define __FUNCT__   "CalculateFaceCenterAndArea"
 

Functions

static PetscErrorCode ParseAndSetGridInputs (UserCtx *user)
 Determines the grid source and calls the appropriate parsing routine.
 
PetscErrorCode DefineAllGridDimensions (SimCtx *simCtx)
 Orchestrates the parsing and setting of grid dimensions for all blocks.
 
static PetscErrorCode InitializeSingleGridDM (UserCtx *user, UserCtx *coarse_user)
 Creates the DMDA objects (da and fda) for a single UserCtx.
 
PetscErrorCode InitializeAllGridDMs (SimCtx *simCtx)
 Orchestrates the creation of DMDA objects for every block and multigrid level.
 
static PetscErrorCode SetFinestLevelCoordinates (UserCtx *user)
 A router that populates the coordinates for a single finest-level DMDA.
 
static PetscErrorCode GenerateAndSetCoordinates (UserCtx *user)
 Programmatically generates and sets grid coordinates based on user parameters.
 
static PetscErrorCode ReadAndSetCoordinates (UserCtx *user, FILE *fd)
 Reads physical coordinates from a file and populates the DMDA for a specific block.
 
static PetscErrorCode RestrictCoordinates (UserCtx *coarse_user, UserCtx *fine_user)
 Populates coarse grid coordinates by restricting from a fine grid.
 
PetscErrorCode AssignAllGridCoordinates (SimCtx *simCtx)
 Orchestrates the assignment of physical coordinates to all DMDA objects.
 
static PetscReal ComputeStretchedCoord (PetscInt i, PetscInt N, PetscReal L, PetscReal r)
 Computes a stretched coordinate along one dimension.
 
PetscErrorCode ComputeLocalBoundingBox (UserCtx *user, BoundingBox *localBBox)
 Computes the local bounding box of the grid on the current process.
 
PetscErrorCode GatherAllBoundingBoxes (UserCtx *user, BoundingBox **allBBoxes)
 Gathers local bounding boxes from all MPI processes to rank 0.
 
PetscErrorCode BroadcastAllBoundingBoxes (UserCtx *user, BoundingBox **bboxlist)
 Broadcasts the bounding box list from rank 0 to all ranks.
 
PetscErrorCode CalculateInletProperties (UserCtx *user)
 Calculates the center and area of the primary INLET face.
 
PetscErrorCode CalculateOutletProperties (UserCtx *user)
 Calculates the center and area of the primary OUTLET face.
 
PetscErrorCode CalculateFaceCenterAndArea (UserCtx *user, BCFace face_id, Cmpnts *face_center, PetscReal *face_area)
 Calculates the geometric center and total area of a specified boundary face.
 

Macro Definition Documentation

◆ BBOX_TOLERANCE

#define BBOX_TOLERANCE   1e-6

Definition at line 6 of file grid.c.

◆ __FUNCT__ [1/15]

#define __FUNCT__   "ParseAndSetGridInputs"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [2/15]

#define __FUNCT__   "DefineAllGridDimensions"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [3/15]

#define __FUNCT__   "InitializeSingleGridDM"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [4/15]

#define __FUNCT__   "InitializeAllGridDMs"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [5/15]

#define __FUNCT__   "AssignAllGridCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [6/15]

#define __FUNCT__   "SetFinestLevelCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [7/15]

#define __FUNCT__   "GenerateAndSetCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [8/15]

#define __FUNCT__   "ReadAndSetCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [9/15]

#define __FUNCT__   "RestrictCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [10/15]

#define __FUNCT__   "ComputeLocalBoundingBox"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [11/15]

#define __FUNCT__   "GatherAllBoundingBoxes"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [12/15]

#define __FUNCT__   "BroadcastAllBoundingBoxes"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [13/15]

#define __FUNCT__   "CalculateInletProperties"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [14/15]

#define __FUNCT__   "CalculateOutletProperties"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [15/15]

#define __FUNCT__   "CalculateFaceCenterAndArea"

Definition at line 9 of file grid.c.

Function Documentation

◆ ParseAndSetGridInputs()

static PetscErrorCode ParseAndSetGridInputs ( UserCtx user)
static

Determines the grid source and calls the appropriate parsing routine.

This function acts as a router. It checks the global simCtx->generate_grid flag (accessed via the user->simCtx back-pointer) to decide whether to call the parser for a programmatically generated grid or for a grid defined in a file.

Parameters
userPointer to the UserCtx for a specific block. The function will populate the geometric fields within this struct.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 22 of file grid.c.

23{
24 PetscErrorCode ierr;
25 SimCtx *simCtx = user->simCtx; // Get the global context via the back-pointer
26
27 PetscFunctionBeginUser;
28
30 if(strcmp(simCtx->eulerianSource,"analytical")==0){
32 ierr = SetAnalyticalGridInfo(user); CHKERRQ(ierr);
33 } else {
35 "Rank %d: Analytical type '%s' uses programmatic grid fallback for block %d.\n",
36 simCtx->rank, simCtx->AnalyticalSolutionType, user->_this);
37 ierr = ReadGridGenerationInputs(user); CHKERRQ(ierr);
38 }
39 }else{ // eulerianSource is "solve" or "load"
40 if (simCtx->generate_grid) {
41 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: Block %d is programmatically generated. Calling generation parser.\n", simCtx->rank, user->_this);
42 ierr = ReadGridGenerationInputs(user); CHKERRQ(ierr);
43 } else {
44 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: Block %d is file-based. Calling file parser.\n", simCtx->rank, user->_this);
45 ierr = ReadGridFile(user); CHKERRQ(ierr);
46 }
47 }
48
50
51 PetscFunctionReturn(0);
52}
PetscBool AnalyticalTypeRequiresCustomGeometry(const char *analytical_type)
Reports whether an analytical type requires custom geometry/decomposition logic.
PetscErrorCode SetAnalyticalGridInfo(UserCtx *user)
Sets the grid domain and resolution for analytical solution cases.
PetscErrorCode ReadGridFile(UserCtx *user)
Sets grid dimensions from a file for a SINGLE block using a one-time read cache.
Definition io.c:222
PetscErrorCode ReadGridGenerationInputs(UserCtx *user)
Parses command-line options for a programmatically generated grid for a SINGLE block.
Definition io.c:77
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
----— DEBUG ---------------------------------------— #define LOG_ALLOW(scope, level,...
Definition logging.h:267
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:46
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:746
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:32
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:737
PetscMPIInt rank
Definition variables.h:592
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:736
PetscInt _this
Definition variables.h:746
PetscBool generate_grid
Definition variables.h:656
char eulerianSource[PETSC_MAX_PATH_LEN]
Definition variables.h:608
char AnalyticalSolutionType[PETSC_MAX_PATH_LEN]
Definition variables.h:621
The master context for the entire simulation.
Definition variables.h:589
Here is the call graph for this function:
Here is the caller graph for this function:

◆ DefineAllGridDimensions()

PetscErrorCode DefineAllGridDimensions ( SimCtx simCtx)

Orchestrates the parsing and setting of grid dimensions for all blocks.

This function serves as the high-level entry point for defining the geometric properties of each grid block in the simulation. It iterates through every block defined by simCtx->block_number.

For each block, it performs two key actions:

  1. It explicitly sets the block's index (_this) in the corresponding UserCtx struct for the finest multigrid level. This makes the context "self-aware".
  2. It calls a helper function (ParseAndSetGridInputs) to handle the detailed work of parsing options or files to populate the rest of the geometric properties for that specific block (e.g., IM, Min_X, rx).
Parameters
simCtxThe master SimCtx, which contains the number of blocks and the UserCtx hierarchy to be configured.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 75 of file grid.c.

76{
77 PetscErrorCode ierr;
78 PetscInt nblk = simCtx->block_number;
79 UserCtx *finest_users;
80
81 PetscFunctionBeginUser;
82
84
85 if (simCtx->usermg.mglevels == 0) {
86 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONGSTATE, "MG levels not set. Cannot get finest_users.");
87 }
88 // Get the UserCtx array for the finest grid level
89 finest_users = simCtx->usermg.mgctx[simCtx->usermg.mglevels - 1].user;
90
91 LOG_ALLOW(GLOBAL, LOG_INFO, "Defining grid dimensions for %d blocks...\n", nblk);
92 if (strcmp(simCtx->eulerianSource, "analytical") == 0 &&
95 "Analytical type '%s' requires custom geometry; preloading finest-grid IM/JM/KM once.\n",
97 ierr = PopulateFinestUserGridResolutionFromOptions(finest_users, nblk); CHKERRQ(ierr);
98 }
99
100 // Loop over each block to configure its grid dimensions and geometry.
101 for (PetscInt bi = 0; bi < nblk; bi++) {
102 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: --- Configuring Geometry for Block %d ---\n", simCtx->rank, bi);
103
104 // Before calling any helpers, set the block index in the context.
105 // This makes the UserCtx self-aware of which block it represents.
106 LOG_ALLOW(GLOBAL,LOG_DEBUG,"finest_users->_this = %d, bi = %d\n",finest_users[bi]._this,bi);
107 //finest_user[bi]._this = bi;
108
109 // Call the helper function for this specific block. It can now derive
110 // all necessary information from the UserCtx pointer it receives.
111 ierr = ParseAndSetGridInputs(&finest_users[bi]); CHKERRQ(ierr);
112 }
113
115
116 PetscFunctionReturn(0);
117}
static PetscErrorCode ParseAndSetGridInputs(UserCtx *user)
Determines the grid source and calls the appropriate parsing routine.
Definition grid.c:22
PetscErrorCode PopulateFinestUserGridResolutionFromOptions(UserCtx *finest_users, PetscInt nblk)
Parses grid resolution arrays (-im, -jm, -km) once and applies them to all finest-grid blocks.
Definition io.c:161
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:200
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:31
UserCtx * user
Definition variables.h:474
PetscInt block_number
Definition variables.h:654
UserMG usermg
Definition variables.h:703
PetscInt mglevels
Definition variables.h:481
MGCtx * mgctx
Definition variables.h:484
User-defined context containing data specific to a single computational grid level.
Definition variables.h:733
Here is the call graph for this function:
Here is the caller graph for this function:

◆ InitializeSingleGridDM()

static PetscErrorCode InitializeSingleGridDM ( UserCtx user,
UserCtx coarse_user 
)
static

Creates the DMDA objects (da and fda) for a single UserCtx.

This function is a direct adaptation of the core logic in MGDACreate. It creates the scalar (da) and vector (fda) DMs for a single grid level.

If a coarse_user context is provided, it performs the critical processor alignment calculation from the legacy code. This ensures the new (fine) DM aligns with the coarse DM for multigrid efficiency. If coarse_user is NULL, it creates the DM with a default PETSc decomposition, intended for the coarsest grid level.

Parameters
userThe UserCtx for which the DMs will be created. Its IM, JM, KM fields must be pre-populated.
coarse_userThe UserCtx of the next-coarser grid level, or NULL if user is the coarsest level.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 137 of file grid.c.

138{
139 PetscErrorCode ierr;
140 SimCtx *simCtx = user->simCtx;
141
142 DMBoundaryType xperiod = (simCtx->i_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
143 DMBoundaryType yperiod = (simCtx->j_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
144 DMBoundaryType zperiod = (simCtx->k_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
145 PetscInt stencil_width = (simCtx->i_periodic || simCtx->j_periodic || simCtx->k_periodic) ? 3:2; // Stencil width is 2 in the legacy code
146
147 PetscInt *lx = NULL, *ly = NULL, *lz = NULL;
148 PetscInt m, n, p;
149
150 PetscFunctionBeginUser;
151
153
154 if (coarse_user) {
155 // --- This is a FINE grid; it must be aligned with the COARSE grid ---
156 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: [Aligning DM] for block %d level %d (size %dx%dx%d) with level %d\n", simCtx->rank, user->_this, user->thislevel, user->IM, user->JM, user->KM, coarse_user->thislevel);
157
158 DMDAGetInfo(coarse_user->da, NULL, NULL, NULL, NULL, &m, &n, &p, NULL, NULL, NULL, NULL, NULL, NULL);
159 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Coarse grid processor decomposition is %d x %d x %d\n", simCtx->rank, m, n, p);
160
161 // This is the core logic from MGDACreate to ensure processor alignment.
162 PetscInt *lx_contrib, *ly_contrib, *lz_contrib;
163 ierr = PetscMalloc3(m, &lx_contrib, n, &ly_contrib, p, &lz_contrib); CHKERRQ(ierr);
164 ierr = PetscMemzero(lx_contrib, m * sizeof(PetscInt)); CHKERRQ(ierr);
165 ierr = PetscMemzero(ly_contrib, n * sizeof(PetscInt)); CHKERRQ(ierr);
166 ierr = PetscMemzero(lz_contrib, p * sizeof(PetscInt)); CHKERRQ(ierr);
167
168 DMDALocalInfo info;
169 DMDAGetLocalInfo(coarse_user->da, &info);
170 PetscInt xs = info.xs, xe = info.xs + info.xm, mx = info.mx;
171 PetscInt ys = info.ys, ye = info.ys + info.ym, my = info.my;
172 PetscInt zs = info.zs, ze = info.zs + info.zm, mz = info.mz;
173
174 PetscMPIInt rank;
175 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
176 PetscInt proc_i = rank % m;
177 PetscInt proc_j = (rank / m) % n;
178 PetscInt proc_k = rank / (m * n);
179
180 // --- X-Direction Logic (Identical to MGDACreate) ---
181 if (user->isc) lx_contrib[proc_i] = (xe - xs);
182 else {
183 if (m == 1) lx_contrib[0] = user->IM + 1;
184 else if (xs == 0) lx_contrib[0] = 2 * xe - 1;
185 else if (xe == mx) lx_contrib[proc_i] = user->IM + 1 - (2 * xs - 1);
186 else lx_contrib[proc_i] = (xe - xs) * 2;
187 }
188
189 // --- Y-Direction Logic (Identical to MGDACreate) ---
190 if (user->jsc) ly_contrib[proc_j] = (ye - ys);
191 else {
192 if (n == 1) ly_contrib[0] = user->JM + 1;
193 else if (ys == 0) ly_contrib[0] = 2 * ye - 1;
194 else if (ye == my) ly_contrib[proc_j] = user->JM + 1 - (2 * ys - 1);
195 else ly_contrib[proc_j] = (ye - ys) * 2;
196 }
197
198 // --- Z-Direction Logic (Identical to MGDACreate) ---
199 if (user->ksc) lz_contrib[proc_k] = (ze - zs);
200 else {
201 if (p == 1) lz_contrib[0] = user->KM + 1;
202 else if (zs == 0) lz_contrib[0] = 2 * ze - 1;
203 else if (ze == mz) lz_contrib[proc_k] = user->KM + 1 - (2 * zs - 1);
204 else lz_contrib[proc_k] = (ze - zs) * 2;
205 }
206 LOG_ALLOW_SYNC(LOCAL, LOG_VERBOSE, "Rank %d: Calculated this rank's node contribution to fine grid: lx=%d, ly=%d, lz=%d\n", simCtx->rank, lx_contrib[proc_i], ly_contrib[proc_j], lz_contrib[proc_k]);
207
208 // Allocate the final distribution arrays and Allreduce to get the global distribution
209 ierr = PetscMalloc3(m, &lx, n, &ly, p, &lz); CHKERRQ(ierr);
210 ierr = MPI_Allreduce(lx_contrib, lx, m, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
211 ierr = MPI_Allreduce(ly_contrib, ly, n, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
212 ierr = MPI_Allreduce(lz_contrib, lz, p, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
213
214 ierr = PetscFree3(lx_contrib, ly_contrib, lz_contrib); CHKERRQ(ierr);
215
216 } else {
217 // --- CASE 2: This is the COARSEST grid; use default or user-specified decomposition ---
218 if(simCtx->exec_mode == EXEC_MODE_SOLVER){
219
220 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Creating coarsest DM for block %d level %d (size %dx%dx%d)\n", simCtx->rank, user->_this, user->thislevel, user->IM, user->JM, user->KM);
221 m = simCtx->da_procs_x;
222 n = simCtx->da_procs_y;
223 p = simCtx->da_procs_z;
224
225 } else if(simCtx->exec_mode == EXEC_MODE_POSTPROCESSOR){
226
227 LOG_ALLOW(GLOBAL,LOG_ERROR,"Currently Only Single Rank is supported. \n");
228
229 m = n = p = PETSC_DECIDE;
230
231 }
232 // lx, ly, lz are NULL, so DMDACreate3d will use the m,n,p values.
233 }
234
235 // --- Create the DMDA for the current UserCtx ---
236 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Calling DMDACreate3d...\n", simCtx->rank);
237 ierr = DMDACreate3d(PETSC_COMM_WORLD, xperiod, yperiod, zperiod, DMDA_STENCIL_BOX,
238 user->IM + 1, user->JM + 1, user->KM + 1,
239 m, n, p,
240 1, stencil_width, lx, ly, lz, &user->da); CHKERRQ(ierr);
241
242 if (coarse_user) {
243 ierr = PetscFree3(lx, ly, lz); CHKERRQ(ierr);
244 }
245
246 // --- Standard DM setup applicable to all levels ---
247 ierr = DMSetUp(user->da); CHKERRQ(ierr);
248 ierr = DMGetCoordinateDM(user->da, &user->fda); CHKERRQ(ierr);
249 ierr = DMDASetUniformCoordinates(user->da, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); CHKERRQ(ierr);
250 ierr = DMDAGetLocalInfo(user->da, &user->info); CHKERRQ(ierr);
251 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: DM creation for block %d level %d complete.\n", simCtx->rank, user->_this, user->thislevel);
252
254
255 PetscFunctionReturn(0);
256}
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:45
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:28
@ LOG_TRACE
Very fine-grained tracing information for in-depth debugging.
Definition logging.h:33
@ LOG_VERBOSE
Extremely detailed logs, typically for development use only.
Definition logging.h:34
PetscInt isc
Definition variables.h:746
PetscInt da_procs_z
Definition variables.h:660
PetscInt ksc
Definition variables.h:746
PetscInt KM
Definition variables.h:742
PetscInt da_procs_y
Definition variables.h:660
PetscInt k_periodic
Definition variables.h:655
PetscInt jsc
Definition variables.h:746
PetscInt thislevel
Definition variables.h:797
PetscInt JM
Definition variables.h:742
PetscInt da_procs_x
Definition variables.h:660
PetscInt i_periodic
Definition variables.h:655
DMDALocalInfo info
Definition variables.h:740
@ EXEC_MODE_SOLVER
Definition variables.h:562
@ EXEC_MODE_POSTPROCESSOR
Definition variables.h:563
PetscInt IM
Definition variables.h:742
ExecutionMode exec_mode
Definition variables.h:607
PetscInt j_periodic
Definition variables.h:655
Here is the caller graph for this function:

◆ InitializeAllGridDMs()

PetscErrorCode InitializeAllGridDMs ( SimCtx simCtx)

Orchestrates the creation of DMDA objects for every block and multigrid level.

This function systematically builds the entire DMDA hierarchy. It first calculates the dimensions (IM, JM, KM) for all coarse grids based on the finest grid's dimensions and the semi-coarsening flags. It then iterates from the coarsest to the finest level, calling a powerful helper function (InitializeSingleGridDM) to create the DMs for each block, ensuring that finer grids are properly aligned with their coarser parents for multigrid efficiency.

Parameters
simCtxThe master SimCtx, containing the configured UserCtx hierarchy.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 274 of file grid.c.

275{
276 PetscErrorCode ierr;
277 UserMG *usermg = &simCtx->usermg;
278 MGCtx *mgctx = usermg->mgctx;
279 PetscInt nblk = simCtx->block_number;
280
281 PetscFunctionBeginUser;
282
284
285 LOG_ALLOW(GLOBAL,LOG_INFO, "Pre-scanning BCs to identify domain periodicity.\n");
286 ierr = DeterminePeriodicity(simCtx); CHKERRQ(ierr);
287
288 LOG_ALLOW(GLOBAL, LOG_INFO, "Creating DMDA objects for all levels and blocks...\n");
289
290 // --- Part 1: Calculate Coarse Grid Dimensions & VALIDATE ---
291 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Calculating and validating coarse grid dimensions...\n");
292 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
293 for (PetscInt bi = 0; bi < nblk; bi++) {
294 UserCtx *user_coarse = &mgctx[level].user[bi];
295 UserCtx *user_fine = &mgctx[level + 1].user[bi];
296
297 user_coarse->IM = user_fine->isc ? user_fine->IM : (user_fine->IM + 1) / 2;
298 user_coarse->JM = user_fine->jsc ? user_fine->JM : (user_fine->JM + 1) / 2;
299 user_coarse->KM = user_fine->ksc ? user_fine->KM : (user_fine->KM + 1) / 2;
300
301 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Block %d, Level %d dims calculated: %d x %d x %d\n",
302 simCtx->rank, bi, level, user_coarse->IM, user_coarse->JM, user_coarse->KM);
303
304 // Validation check from legacy MGDACreate to ensure coarsening is possible
305 PetscInt check_i = user_coarse->IM * (2 - user_coarse->isc) - (user_fine->IM + 1 - user_coarse->isc);
306 PetscInt check_j = user_coarse->JM * (2 - user_coarse->jsc) - (user_fine->JM + 1 - user_coarse->jsc);
307 PetscInt check_k = user_coarse->KM * (2 - user_coarse->ksc) - (user_fine->KM + 1 - user_coarse->ksc);
308
309 if (check_i + check_j + check_k != 0) {
310 // SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG,
311 // "Grid at level %d, block %d cannot be coarsened from %dx%dx%d to %dx%dx%d with the given semi-coarsening flags. Check grid dimensions.",
312 // level, bi, user_fine->IM, user_fine->JM, user_fine->KM, user_coarse->IM, user_coarse->JM, user_coarse->KM);
313 LOG(GLOBAL,LOG_WARNING,"WARNING: Grid at level %d, block %d can't be consistently coarsened further.\n", level, bi);
314 }
315 }
316 }
317
318 // --- Part 2: Create DMs from Coarse to Fine for each Block ---
319 for (PetscInt bi = 0; bi < nblk; bi++) {
320 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "--- Creating DMs for Block %d ---\n", bi);
321
322 // Create the coarsest level DM first (passing NULL for the coarse_user)
323 ierr = InitializeSingleGridDM(&mgctx[0].user[bi], NULL); CHKERRQ(ierr);
324
325 // Create finer level DMs, passing the next-coarser context for alignment
326 for (PetscInt level = 1; level < usermg->mglevels; level++) {
327 ierr = InitializeSingleGridDM(&mgctx[level].user[bi], &mgctx[level-1].user[bi]); CHKERRQ(ierr);
328 }
329 }
330
331 // --- Optional: View the finest DM for debugging verification ---
332 if (get_log_level() >= LOG_DEBUG) {
333 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "--- Viewing Finest DMDA (Level %d, Block 0) ---\n", usermg->mglevels - 1);
334 ierr = DMView(mgctx[usermg->mglevels - 1].user[0].da, PETSC_VIEWER_STDOUT_WORLD); CHKERRQ(ierr);
335 }
336
337 LOG_ALLOW(GLOBAL, LOG_INFO, "DMDA object creation complete.\n");
338
340
341 PetscFunctionReturn(0);
342}
static PetscErrorCode InitializeSingleGridDM(UserCtx *user, UserCtx *coarse_user)
Creates the DMDA objects (da and fda) for a single UserCtx.
Definition grid.c:137
PetscErrorCode DeterminePeriodicity(SimCtx *simCtx)
Scans all block-specific boundary condition files to determine a globally consistent periodicity for ...
Definition io.c:690
#define LOG(scope, level, fmt,...)
Logging macro for PETSc-based applications with scope control.
Definition logging.h:84
LogLevel get_log_level()
Retrieves the current logging level from the environment variable LOG_LEVEL.
Definition logging.c:39
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:29
Context for Multigrid operations.
Definition variables.h:473
User-level context for managing the entire multigrid hierarchy.
Definition variables.h:480
Here is the call graph for this function:
Here is the caller graph for this function:

◆ SetFinestLevelCoordinates()

static PetscErrorCode SetFinestLevelCoordinates ( UserCtx user)
static

A router that populates the coordinates for a single finest-level DMDA.

This function orchestrates the coordinate setting for one block. It checks the global generate_grid flag and calls the appropriate helper for either programmatic generation or reading from a file.

After the local coordinate vector is populated by a helper, this function performs the necessary DMLocalToGlobal and DMGlobalToLocal scatters to ensure that the ghost node coordinate values are correctly communicated and updated across all MPI ranks.

Parameters
userThe UserCtx for a specific block on the finest level.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 434 of file grid.c.

435{
436 PetscErrorCode ierr;
437 SimCtx *simCtx = user->simCtx;
438
439 PetscFunctionBeginUser;
440
442
443 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Setting finest level coordinates for block %d...\n", simCtx->rank, user->_this);
444
445 if (simCtx->generate_grid) {
446 ierr = GenerateAndSetCoordinates(user); CHKERRQ(ierr);
447 } else {
448
449 FILE *grid_file_handle = NULL;
450 // Only Rank 0 opens the file.
451 if (simCtx->rank == 0) {
452 grid_file_handle = fopen(simCtx->grid_file, "r");
453 if (!grid_file_handle) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open file: %s", simCtx->grid_file);
454
455 // Now, on Rank 0, we skip the entire header section once.
456 // This is the logic from your modern code's AssignGridCoordinates.
457 PetscInt headerLines = simCtx->block_number + 2; // 1 for nblk, plus one for each block's dims
458 char dummy_buffer[2048];
459 for (PetscInt s = 0; s < headerLines; ++s) {
460 if (!fgets(dummy_buffer, sizeof(dummy_buffer), grid_file_handle)) {
461 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "Unexpected EOF while skipping grid header");
462 }
463 }
464 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank 0: Skipped %d header lines, now at coordinate data.\n", headerLines);
465 }
466
467 // We now call the coordinate reader, passing the file handle.
468 // It's responsible for reading its block's data and broadcasting.
469 ierr = ReadAndSetCoordinates(user, grid_file_handle); CHKERRQ(ierr);
470
471 // Only Rank 0, which opened the file, should close it.
472 if (simCtx->rank == 0) {
473 fclose(grid_file_handle);
474 }
475 }
476
477 // After populating the local coordinate vector, we must perform a
478 // Local-to-Global and then Global-to-Local scatter to correctly
479 // populate the ghost node coordinates across process boundaries.
480 Vec gCoor, lCoor;
481 ierr = DMGetCoordinates(user->da, &gCoor); CHKERRQ(ierr);
482 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
483
484 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Scattering coordinates to update ghost nodes for block %d...\n", simCtx->rank, user->_this);
485 ierr = DMLocalToGlobalBegin(user->fda, lCoor, INSERT_VALUES, gCoor); CHKERRQ(ierr);
486 ierr = DMLocalToGlobalEnd(user->fda, lCoor, INSERT_VALUES, gCoor); CHKERRQ(ierr);
487
488 ierr = DMGlobalToLocalBegin(user->fda, gCoor, INSERT_VALUES, lCoor); CHKERRQ(ierr);
489 ierr = DMGlobalToLocalEnd(user->fda, gCoor, INSERT_VALUES, lCoor); CHKERRQ(ierr);
490
492
493 PetscFunctionReturn(0);
494}
static PetscErrorCode GenerateAndSetCoordinates(UserCtx *user)
Programmatically generates and sets grid coordinates based on user parameters.
Definition grid.c:547
static PetscErrorCode ReadAndSetCoordinates(UserCtx *user, FILE *fd)
Reads physical coordinates from a file and populates the DMDA for a specific block.
Definition grid.c:635
char grid_file[PETSC_MAX_PATH_LEN]
Definition variables.h:659
Here is the call graph for this function:
Here is the caller graph for this function:

◆ GenerateAndSetCoordinates()

static PetscErrorCode GenerateAndSetCoordinates ( UserCtx user)
static

Programmatically generates and sets grid coordinates based on user parameters.

This function populates the local coordinate vector of the provided UserCtx using the geometric properties (Min_X, Max_X, rx, etc.) that were parsed from command-line options. It supports non-linear grid stretching.

Parameters
userThe UserCtx for a specific block.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

DEBUG: This verifies the presence of a last "unphysical" layer of coordinates.

Definition at line 547 of file grid.c.

548{
549 PetscErrorCode ierr;
550 DMDALocalInfo info;
551 Cmpnts ***coor;
552 Vec lCoor;
553
554 PetscFunctionBeginUser;
555
557
558 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Generating coordinates for block %d...\n", user->simCtx->rank, user->_this);
559
560 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
561 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
562
563 PetscInt xs = info.xs, xe = info.xs + info.xm;
564 PetscInt ys = info.ys, ye = info.ys + info.ym;
565 PetscInt zs = info.zs, ze = info.zs + info.zm;
566
567 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Local Info for block %d - X range - [%d,%d], Y range - [%d,%d], Z range - [%d,%d]\n",
568 user->simCtx->rank, user->_this, xs, xe, ys, ye, zs, ze);
569
570 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Local Info for block %d - X domain - [%.4f,%.4f], Y range - [%.4f,%.4f], Z range - [%.4f,%.4f]\n",
571 user->simCtx->rank, user->_this, user->Min_X,user->Max_X,user->Min_Y,user->Max_Y,user->Min_Z,user->Max_Z);
572
573 ierr = VecSet(lCoor, 0.0); CHKERRQ(ierr);
574 ierr = DMDAVecGetArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
575
576 PetscReal Lx = user->Max_X - user->Min_X;
577 PetscReal Ly = user->Max_Y - user->Min_Y;
578 PetscReal Lz = user->Max_Z - user->Min_Z;
579
580 // Loop over the local nodes, including ghost nodes, owned by this process.
581 for (PetscInt k = zs; k < ze; k++) {
582 for (PetscInt j = ys; j < ye; j++) {
583 for (PetscInt i = xs; i < xe; i++) {
584 if(k < user->KM && j < user->JM && i < user->IM){
585 coor[k][j][i].x = user->Min_X + ComputeStretchedCoord(i, user->IM, Lx, user->rx);
586 coor[k][j][i].y = user->Min_Y + ComputeStretchedCoord(j, user->JM, Ly, user->ry);
587 coor[k][j][i].z = user->Min_Z + ComputeStretchedCoord(k, user->KM, Lz, user->rz);
588 }
589 }
590 }
591 }
592
593 /// DEBUG: This verifies the presence of a last "unphysical" layer of coordinates.
594 /*
595 PetscInt KM = user->KM;
596 for (PetscInt j = ys; j < ye; j++){
597 for(PetscInt i = xs; i < xe; i++){
598 LOG_ALLOW(GLOBAL,LOG_DEBUG,"coor[%d][%d][%d].(x,y,z) = %le,%le,%le",KM,j,i,coor[KM][j][i].x,coor[KM][j][i].y,coor[KM][j][i].z);
599 }
600 }
601 */
602
603
604
605 ierr = DMDAVecRestoreArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
606
608
609 PetscFunctionReturn(0);
610}
static PetscReal ComputeStretchedCoord(PetscInt i, PetscInt N, PetscReal L, PetscReal r)
Computes a stretched coordinate along one dimension.
Definition grid.c:524
PetscReal Min_X
Definition variables.h:743
PetscReal ry
Definition variables.h:747
PetscReal Max_Y
Definition variables.h:743
PetscScalar x
Definition variables.h:101
PetscReal rz
Definition variables.h:747
PetscScalar z
Definition variables.h:101
PetscReal Min_Z
Definition variables.h:743
PetscReal Max_X
Definition variables.h:743
PetscReal Min_Y
Definition variables.h:743
PetscScalar y
Definition variables.h:101
PetscReal rx
Definition variables.h:747
PetscReal Max_Z
Definition variables.h:743
A 3D point or vector with PetscScalar components.
Definition variables.h:100
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReadAndSetCoordinates()

static PetscErrorCode ReadAndSetCoordinates ( UserCtx user,
FILE *  fd 
)
static

Reads physical coordinates from a file and populates the DMDA for a specific block.

This function handles the collective read of an interleaved (X Y Z per line) multi-block grid file. It assumes the file header (nblk, dimensions) has already been processed by ReadGridFile.

The process is robust for parallel execution:

  1. Rank 0 opens the grid file.
  2. It intelligently skips past the header section and the coordinate data for all blocks preceding the current block being processed (user->_this).
  3. It then reads the entire coordinate data for the current block into a single contiguous buffer gc.
  4. This global buffer gc is broadcast to all other MPI ranks.
  5. Each rank then loops through its local (owned + ghost) node indices, calculates the corresponding index in the global gc array, and copies the (x,y,z) coordinates into its local PETSc coordinate vector.
Parameters
userThe UserCtx for a specific block. Its _this field must be set, and its IM, JM, KM fields must be correctly pre-populated.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 635 of file grid.c.

636{
637 PetscErrorCode ierr;
638 SimCtx *simCtx = user->simCtx;
639 PetscMPIInt rank = simCtx->rank;
640 PetscInt block_index = user->_this;
641 PetscInt IM = user->IM, JM = user->JM, KM = user->KM;
642 DMDALocalInfo info;
643 Cmpnts ***coor;
644 Vec lCoor;
645 PetscReal *gc = NULL; // Global coordinate buffer, allocated on all ranks
646
647 PetscFunctionBeginUser;
648
650
651 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Reading interleaved coordinates from file for block %d...\n",
652 simCtx->rank, block_index);
653
654 // 1. Allocate the buffer on ALL ranks to receive the broadcast data.
655 // PetscInt n_nodes = (IM + 1) * (JM + 1) * (KM + 1);
656 PetscInt n_nodes = (IM) * (JM) * (KM);
657 ierr = PetscMalloc1(3 * n_nodes, &gc); CHKERRQ(ierr);
658
659 // 2. Only Rank 0 opens the file and reads the data.
660 if (rank == 0) {
661 if (!fd) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Recieved a NULL file handle.\n");
662
663 // Read the coordinate data for the CURRENT block.
664 for (PetscInt k = 0; k < KM; k++) {
665 for (PetscInt j = 0; j < JM; j++) {
666 for (PetscInt i = 0; i < IM; i++) {
667 PetscInt base_index = 3 * ((k * (JM) + j) * (IM) + i);
668 if (fscanf(fd, "%le %le %le\n", &gc[base_index], &gc[base_index + 1], &gc[base_index + 2]) != 3) {
669 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "Error reading coordinates for node (i,j,k)=(%d,%d,%d) in block %d", i, j, k, block_index);
670 }
671 }
672 }
673 }
674
675 }
676
677 // 3. Broadcast the coordinate block for the current block to all other processes.
678 ierr = MPI_Bcast(gc, 3 * n_nodes, MPIU_REAL, 0, PETSC_COMM_WORLD); CHKERRQ(ierr);
679
680 // 4. Each rank populates its local portion of the coordinate vector.
681 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
682 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
683 ierr = DMDAVecGetArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
684
685 for (PetscInt k = info.zs; k < info.zs + info.zm; k++) {
686 for (PetscInt j = info.ys; j < info.ys + info.ym; j++) {
687 for (PetscInt i = info.xs; i < info.xs + info.xm; i++) {
688 if(k< KM && j < JM && i < IM){
689 PetscInt base_idx = 3 * ((k * (JM) + j) * (IM) + i);
690 coor[k][j][i].x = gc[base_idx];
691 coor[k][j][i].y = gc[base_idx + 1];
692 coor[k][j][i].z = gc[base_idx + 2];
693 }
694 }
695 }
696 }
697
698 // 5. Clean up and restore.
699 ierr = DMDAVecRestoreArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
700 ierr = PetscFree(gc); CHKERRQ(ierr);
701
703
704 PetscFunctionReturn(0);
705}
Here is the caller graph for this function:

◆ RestrictCoordinates()

static PetscErrorCode RestrictCoordinates ( UserCtx coarse_user,
UserCtx fine_user 
)
static

Populates coarse grid coordinates by restricting from a fine grid.

This function is a direct adaptation of the coordinate restriction loop from the legacy MG_Initial function. It ensures that the physical location of a coarse grid node is identical to its corresponding parent node on the fine grid. The mapping from coarse to fine index (i -> ih) is determined by the semi-coarsening flags (isc, jsc, ksc) stored in the UserCtx.

Parameters
coarse_userThe UserCtx for the coarse grid (destination).
fine_userThe UserCtx for the fine grid (source).
Returns
PetscErrorCode

Definition at line 722 of file grid.c.

723{
724 PetscErrorCode ierr;
725 Vec c_lCoor, f_lCoor; // Coarse and Fine local coordinate vectors
726 Cmpnts ***c_coor;
727 const Cmpnts ***f_coor; // Use const for read-only access
728 DMDALocalInfo c_info;
729 PetscInt ih, jh, kh; // Fine-grid indices corresponding to coarse-grid i,j,k
730
731 PetscFunctionBeginUser;
732
734
735 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Restricting coords from level %d to level %d for block %d\n",
736 fine_user->simCtx->rank, fine_user->thislevel, coarse_user->thislevel, coarse_user->_this);
737
738 ierr = DMDAGetLocalInfo(coarse_user->da, &c_info); CHKERRQ(ierr);
739
740 ierr = DMGetCoordinatesLocal(coarse_user->da, &c_lCoor); CHKERRQ(ierr);
741 ierr = DMGetCoordinatesLocal(fine_user->da, &f_lCoor); CHKERRQ(ierr);
742
743 ierr = DMDAVecGetArray(coarse_user->fda, c_lCoor, &c_coor); CHKERRQ(ierr);
744 ierr = DMDAVecGetArrayRead(fine_user->fda, f_lCoor, &f_coor); CHKERRQ(ierr);
745
746 // Get the local owned range of the coarse grid.
747 PetscInt xs = c_info.xs, xe = c_info.xs + c_info.xm;
748 PetscInt ys = c_info.ys, ye = c_info.ys + c_info.ym;
749 PetscInt zs = c_info.zs, ze = c_info.zs + c_info.zm;
750
751 // Get the global dimensions of the coarse grid.
752 PetscInt mx = c_info.mx, my = c_info.my, mz = c_info.mz;
753
754 // If this process owns the maximum boundary node, contract the loop by one
755 // to prevent the index doubling `2*i` from going out of bounds.
756 // This is also ensuring we do not manipulate the unphysical layer of coors present in the finest level.
757 if (xe == mx) xe--;
758 if (ye == my) ye--;
759 if (ze == mz) ze--;
760
761 for (PetscInt k = zs; k < ze; k++) {
762 for (PetscInt j = ys; j < ye; j++) {
763 for (PetscInt i = xs; i < xe; i++) {
764 // Determine the corresponding parent node index on the FINE grid,
765 // respecting the semi-coarsening flags of the FINE grid's UserCtx.
766 ih = coarse_user->isc ? i : 2 * i;
767 jh = coarse_user->jsc ? j : 2 * j;
768 kh = coarse_user->ksc ? k : 2 * k;
769
770 // LOG_ALLOW(GLOBAL,LOG_DEBUG," [kh][ih][jh] = %d,%d,%d - k,j,i = %d,%d,%d.\n",kh,jh,ih,k,j,i);
771
772 c_coor[k][j][i] = f_coor[kh][jh][ih];
773 }
774 }
775 }
776
777 ierr = DMDAVecRestoreArray(coarse_user->fda, c_lCoor, &c_coor); CHKERRQ(ierr);
778 ierr = DMDAVecRestoreArrayRead(fine_user->fda, f_lCoor, &f_coor); CHKERRQ(ierr);
779
780 // After populating the local portion, scatter to update ghost regions.
781 Vec c_gCoor;
782 ierr = DMGetCoordinates(coarse_user->da, &c_gCoor); CHKERRQ(ierr);
783 ierr = DMLocalToGlobalBegin(coarse_user->fda, c_lCoor, INSERT_VALUES, c_gCoor); CHKERRQ(ierr);
784 ierr = DMLocalToGlobalEnd(coarse_user->fda, c_lCoor, INSERT_VALUES, c_gCoor); CHKERRQ(ierr);
785 ierr = DMGlobalToLocalBegin(coarse_user->fda, c_gCoor, INSERT_VALUES, c_lCoor); CHKERRQ(ierr);
786 ierr = DMGlobalToLocalEnd(coarse_user->fda, c_gCoor, INSERT_VALUES, c_lCoor); CHKERRQ(ierr);
787
789
790 PetscFunctionReturn(0);
791}
Here is the caller graph for this function:

◆ AssignAllGridCoordinates()

PetscErrorCode AssignAllGridCoordinates ( SimCtx simCtx)

Orchestrates the assignment of physical coordinates to all DMDA objects.

This function manages the entire process of populating the coordinate vectors for every DMDA across all multigrid levels and blocks. It follows a two-part strategy that is essential for multigrid methods:

  1. Populate Finest Level: It first loops through each block and calls a helper (SetFinestLevelCoordinates) to set the physical coordinates for the highest-resolution grid (the finest multigrid level).
  2. Restrict to Coarser Levels: It then iterates downwards from the finest level, calling a helper (RestrictCoordinates) to copy the coordinate values from the fine grid nodes to their corresponding parent nodes on the coarser grids. This ensures all levels represent the exact same geometry.
Parameters
simCtxThe master SimCtx, containing the configured UserCtx hierarchy.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 370 of file grid.c.

371{
372 PetscErrorCode ierr;
373 UserMG *usermg = &simCtx->usermg;
374 PetscInt nblk = simCtx->block_number;
375
376 PetscFunctionBeginUser;
377
379
380 LOG_ALLOW(GLOBAL, LOG_INFO, "Assigning physical coordinates to all grid DMs...\n");
381
382 // --- Part 1: Populate the Finest Grid Level ---
383 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Setting coordinates for the finest grid level (%d)...\n", usermg->mglevels - 1);
384 for (PetscInt bi = 0; bi < nblk; bi++) {
385 UserCtx *fine_user = &usermg->mgctx[usermg->mglevels - 1].user[bi];
386 ierr = SetFinestLevelCoordinates(fine_user); CHKERRQ(ierr);
387 LOG_ALLOW(GLOBAL,LOG_TRACE,"The Finest level coordinates for block %d have been set.\n",bi);
389 ierr = LOG_FIELD_MIN_MAX(fine_user,"Coordinates");
390 }
391 }
392 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Finest level coordinates have been set for all blocks.\n");
393
394 // --- Part 2: Restrict Coordinates to Coarser Levels ---
395 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Restricting coordinates to coarser grid levels...\n");
396 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
397 for (PetscInt bi = 0; bi < nblk; bi++) {
398 UserCtx *coarse_user = &usermg->mgctx[level].user[bi];
399 UserCtx *fine_user = &usermg->mgctx[level + 1].user[bi];
400 ierr = RestrictCoordinates(coarse_user, fine_user); CHKERRQ(ierr);
401
402 LOG_ALLOW(GLOBAL,LOG_TRACE,"Coordinates restricted to block %d level %d.\n",bi,level);
404 ierr = LOG_FIELD_MIN_MAX(coarse_user,"Coordinates");
405 }
406 }
407 }
408
409 LOG_ALLOW(GLOBAL, LOG_INFO, "Physical coordinates assigned to all grid levels and blocks.\n");
410
412
413 PetscFunctionReturn(0);
414}
static PetscErrorCode RestrictCoordinates(UserCtx *coarse_user, UserCtx *fine_user)
Populates coarse grid coordinates by restricting from a fine grid.
Definition grid.c:722
static PetscErrorCode SetFinestLevelCoordinates(UserCtx *user)
A router that populates the coordinates for a single finest-level DMDA.
Definition grid.c:434
#define __FUNCT__
Definition grid.c:9
PetscBool is_function_allowed(const char *functionName)
Checks if a given function is in the allow-list.
Definition logging.c:157
PetscErrorCode LOG_FIELD_MIN_MAX(UserCtx *user, const char *fieldName)
Computes and logs the local and global min/max values of a 3-component vector field.
Definition logging.c:1293
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ComputeStretchedCoord()

static PetscReal ComputeStretchedCoord ( PetscInt  i,
PetscInt  N,
PetscReal  L,
PetscReal  r 
)
inlinestatic

Computes a stretched coordinate along one dimension.

This function computes a coordinate based on a geometric stretching ratio. If the ratio (r) is 1.0, a uniform distribution is used: x(i) = L * (i/N)

If r != 1.0, a geometric stretching is applied: x(i) = L * [ (r^(i/N) - 1 ) / (r - 1) ]

Here:

  • i : The current index along the dimension.
  • N : The total number of divisions along that dimension.
  • L : The length of the domain along that dimension.
  • r : The stretching ratio. r > 1.0 stretches the grid in a geometric fashion increasing spacing away from the start, whereas 0 < r < 1.0 would cluster points near the start.
Parameters
[in]iCurrent index (0 <= i <= N).
[in]NNumber of segments along the dimension.
[in]LTotal length of the domain.
[in]rStretching ratio.
Returns
PetscReal The computed coordinate at index i.
Note
This function does not return a PetscErrorCode because it does not allocate memory or call PETSc routines that can fail. It is just a helper calculation function.

Definition at line 524 of file grid.c.

525{
526 if (N <=1) return 0.0;
527 PetscReal fraction = (PetscReal)i / ((PetscReal)N - 1.0);
528 if (PetscAbsReal(r - 1.0) < 1.0e-9) { // Use a tolerance for float comparison
529 return L * fraction;
530 } else {
531 return L * (PetscPowReal(r, fraction) - 1.0) / (r - 1.0);
532 }
533}
Here is the caller graph for this function:

◆ ComputeLocalBoundingBox()

PetscErrorCode ComputeLocalBoundingBox ( UserCtx user,
BoundingBox localBBox 
)

Computes the local bounding box of the grid on the current process.

This function calculates the minimum and maximum coordinates (x, y, z) of the local grid points owned by the current MPI process. It iterates over the local portion of the grid, examines each grid point's coordinates, and updates the minimum and maximum values accordingly.

The computed bounding box is stored in the provided localBBox structure, and the user->bbox field is also updated with this bounding box for consistency within the user context.

Parameters
[in]userPointer to the user-defined context containing grid information. This context must be properly initialized before calling this function.
[out]localBBoxPointer to the BoundingBox structure where the computed local bounding box will be stored. The structure should be allocated by the caller.
Returns
PetscErrorCode Returns 0 on success, non-zero on failure.

Definition at line 814 of file grid.c.

815{
816 PetscErrorCode ierr;
817 PetscInt i, j, k;
818 PetscMPIInt rank;
819 PetscInt xs, ys, zs, xe, ye, ze;
820 DMDALocalInfo info;
821 Vec coordinates;
822 Cmpnts ***coordArray;
823 Cmpnts minCoords, maxCoords;
824
825 PetscFunctionBeginUser;
826
828
829 // Start of function execution
830 LOG_ALLOW(GLOBAL, LOG_INFO, "Entering the function.\n");
831
832 // Validate input Pointers
833 if (!user) {
834 LOG_ALLOW(LOCAL, LOG_ERROR, "Input 'user' Pointer is NULL.\n");
835 return PETSC_ERR_ARG_NULL;
836 }
837 if (!localBBox) {
838 LOG_ALLOW(LOCAL, LOG_ERROR, "Output 'localBBox' Pointer is NULL.\n");
839 return PETSC_ERR_ARG_NULL;
840 }
841
842 // Get MPI rank
843 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
844
845 // Get the local coordinates vector from the DMDA
846 ierr = DMGetCoordinatesLocal(user->da, &coordinates);
847 if (ierr) {
848 LOG_ALLOW(LOCAL, LOG_ERROR, "Error getting local coordinates vector.\n");
849 return ierr;
850 }
851
852 if (!coordinates) {
853 LOG_ALLOW(LOCAL, LOG_ERROR, "Coordinates vector is NULL.\n");
854 return PETSC_ERR_ARG_NULL;
855 }
856
857 // Access the coordinate array for reading
858 ierr = DMDAVecGetArrayRead(user->fda, coordinates, &coordArray);
859 if (ierr) {
860 LOG_ALLOW(LOCAL, LOG_ERROR, "Error accessing coordinate array.\n");
861 return ierr;
862 }
863
864 // Get the local grid information (indices and sizes)
865 ierr = DMDAGetLocalInfo(user->da, &info);
866 if (ierr) {
867 LOG_ALLOW(LOCAL, LOG_ERROR, "Error getting DMDA local info.\n");
868 return ierr;
869 }
870
871
872 xs = info.gxs; xe = xs + info.gxm;
873 ys = info.gys; ye = ys + info.gym;
874 zs = info.gzs; ze = zs + info.gzm;
875
876 /*
877 xs = info.xs; xe = xs + info.xm;
878 ys = info.ys; ye = ys + info.ym;
879 zs = info.zs; ze = zs + info.zm;
880 */
881
882 // Initialize min and max coordinates with extreme values
883 minCoords.x = minCoords.y = minCoords.z = PETSC_MAX_REAL;
884 maxCoords.x = maxCoords.y = maxCoords.z = PETSC_MIN_REAL;
885
886 LOG_ALLOW(LOCAL, LOG_TRACE, "[Rank %d] Grid indices (Including Ghosts): xs=%d, xe=%d, ys=%d, ye=%d, zs=%d, ze=%d.\n",rank, xs, xe, ys, ye, zs, ze);
887
888 // Iterate over the local grid to find min and max coordinates
889 for (k = zs; k < ze; k++) {
890 for (j = ys; j < ye; j++) {
891 for (i = xs; i < xe; i++) {
892 // Only consider nodes within the physical domain.
893 if(i < user->IM && j < user->JM && k < user->KM){
894 Cmpnts coord = coordArray[k][j][i];
895
896 // Update min and max coordinates
897 if (coord.x < minCoords.x) minCoords.x = coord.x;
898 if (coord.y < minCoords.y) minCoords.y = coord.y;
899 if (coord.z < minCoords.z) minCoords.z = coord.z;
900
901 if (coord.x > maxCoords.x) maxCoords.x = coord.x;
902 if (coord.y > maxCoords.y) maxCoords.y = coord.y;
903 if (coord.z > maxCoords.z) maxCoords.z = coord.z;
904 }
905 }
906 }
907 }
908
909
910 // Add tolerance to bboxes.
911 minCoords.x = minCoords.x - BBOX_TOLERANCE;
912 minCoords.y = minCoords.y - BBOX_TOLERANCE;
913 minCoords.z = minCoords.z - BBOX_TOLERANCE;
914
915 maxCoords.x = maxCoords.x + BBOX_TOLERANCE;
916 maxCoords.y = maxCoords.y + BBOX_TOLERANCE;
917 maxCoords.z = maxCoords.z + BBOX_TOLERANCE;
918
919 LOG_ALLOW(LOCAL,LOG_DEBUG," Tolerance added to the limits: %.8e .\n",(PetscReal)BBOX_TOLERANCE);
920
921 // Log the computed min and max coordinates
922 LOG_ALLOW(LOCAL, LOG_INFO,"[Rank %d] Bounding Box Ranges = X[%.6f, %.6f], Y[%.6f,%.6f], Z[%.6f, %.6f].\n",rank,minCoords.x, maxCoords.x,minCoords.y, maxCoords.y, minCoords.z, maxCoords.z);
923
924
925
926 // Restore the coordinate array
927 ierr = DMDAVecRestoreArrayRead(user->fda, coordinates, &coordArray);
928 if (ierr) {
929 LOG_ALLOW(LOCAL, LOG_ERROR, "Error restoring coordinate array.\n");
930 return ierr;
931 }
932
933 // Set the local bounding box
934 localBBox->min_coords = minCoords;
935 localBBox->max_coords = maxCoords;
936
937 // Update the bounding box inside the UserCtx for consistency
938 user->bbox = *localBBox;
939
940 LOG_ALLOW(GLOBAL, LOG_INFO, "Exiting the function successfully.\n");
941
943
944 PetscFunctionReturn(0);
945}
#define BBOX_TOLERANCE
Definition grid.c:6
Cmpnts max_coords
Maximum x, y, z coordinates of the bounding box.
Definition variables.h:156
Cmpnts min_coords
Minimum x, y, z coordinates of the bounding box.
Definition variables.h:155
BoundingBox bbox
Definition variables.h:744
Here is the caller graph for this function:

◆ GatherAllBoundingBoxes()

PetscErrorCode GatherAllBoundingBoxes ( UserCtx user,
BoundingBox **  allBBoxes 
)

Gathers local bounding boxes from all MPI processes to rank 0.

Each rank computes its local bounding box, then all ranks participate in an MPI_Gather to send their BoundingBox to rank 0. Rank 0 allocates the result array and returns it via allBBoxes.

Parameters
[in]userPointer to UserCtx (must be non-NULL).
[out]allBBoxesOn rank 0, receives malloc’d array of size size. On other ranks, set to NULL.
Returns
PetscErrorCode

Definition at line 962 of file grid.c.

963{
964 PetscErrorCode ierr;
965 PetscMPIInt rank, size;
966 BoundingBox *bboxArray = NULL;
967 BoundingBox localBBox;
968
969 PetscFunctionBeginUser;
970
972
973 /* Validate */
974 if (!user || !allBBoxes) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL,
975 "GatherAllBoundingBoxes: NULL pointer");
976
977 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRMPI(ierr);
978 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRMPI(ierr);
979
980 /* Compute local bbox */
981 ierr = ComputeLocalBoundingBox(user, &localBBox); CHKERRQ(ierr);
982
983 /* Ensure everyone is synchronized before the gather */
984 MPI_Barrier(PETSC_COMM_WORLD);
986 "Rank %d: about to MPI_Gather(localBBox)\n", rank);
987
988 /* Allocate on root */
989 if (rank == 0) {
990 bboxArray = (BoundingBox*)malloc(size * sizeof(BoundingBox));
991 if (!bboxArray) SETERRABORT(PETSC_COMM_WORLD, PETSC_ERR_MEM,
992 "GatherAllBoundingBoxes: malloc failed");
993 }
994
995 /* Collective: every rank must call */
996 ierr = MPI_Gather(&localBBox, sizeof(BoundingBox), MPI_BYTE,
997 bboxArray, sizeof(BoundingBox), MPI_BYTE,
998 0, PETSC_COMM_WORLD);
999 CHKERRMPI(ierr);
1000
1001 MPI_Barrier(PETSC_COMM_WORLD);
1003 "Rank %d: completed MPI_Gather(localBBox)\n", rank);
1004
1005 /* Return result */
1006 if (rank == 0) {
1007 *allBBoxes = bboxArray;
1008 } else {
1009 *allBBoxes = NULL;
1010 }
1011
1013
1014 PetscFunctionReturn(0);
1015}
PetscErrorCode ComputeLocalBoundingBox(UserCtx *user, BoundingBox *localBBox)
Computes the local bounding box of the grid on the current process.
Definition grid.c:814
Defines a 3D axis-aligned bounding box.
Definition variables.h:154
Here is the call graph for this function:
Here is the caller graph for this function:

◆ BroadcastAllBoundingBoxes()

PetscErrorCode BroadcastAllBoundingBoxes ( UserCtx user,
BoundingBox **  bboxlist 
)

Broadcasts the bounding box list from rank 0 to all ranks.

Broadcasts the bounding box information collected on rank 0 to all other ranks.

After GatherAllBoundingBoxes, rank 0 has an array of size boxes. This routine makes sure every rank ends up with its own malloc’d copy.

Parameters
[in]userPointer to UserCtx (unused here, but kept for signature).
[in,out]bboxlistOn entry: rank 0’s array; on exit: every rank’s array.
Returns
PetscErrorCode

Definition at line 1030 of file grid.c.

1031{
1032 PetscErrorCode ierr;
1033 PetscMPIInt rank, size;
1034
1035 PetscFunctionBeginUser;
1036
1038
1039 if (!bboxlist) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL,
1040 "BroadcastAllBoundingBoxes: NULL pointer");
1041
1042 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRMPI(ierr);
1043 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRMPI(ierr);
1044
1045 /* Non-root ranks must allocate before the Bcast */
1046 if (rank != 0) {
1047 *bboxlist = (BoundingBox*)malloc(size * sizeof(BoundingBox));
1048 if (!*bboxlist) SETERRABORT(PETSC_COMM_WORLD, PETSC_ERR_MEM,
1049 "BroadcastAllBoundingBoxes: malloc failed");
1050 }
1051
1052 MPI_Barrier(PETSC_COMM_WORLD);
1054 "Rank %d: about to MPI_Bcast(%d boxes)\n", rank, size);
1055
1056 /* Collective: every rank must call */
1057 ierr = MPI_Bcast(*bboxlist, size * sizeof(BoundingBox), MPI_BYTE,
1058 0, PETSC_COMM_WORLD);
1059 CHKERRMPI(ierr);
1060
1061 MPI_Barrier(PETSC_COMM_WORLD);
1063 "Rank %d: completed MPI_Bcast(%d boxes)\n", rank, size);
1064
1065
1067
1068 PetscFunctionReturn(0);
1069}
Here is the caller graph for this function:

◆ CalculateInletProperties()

PetscErrorCode CalculateInletProperties ( UserCtx user)

Calculates the center and area of the primary INLET face.

This function identifies the primary INLET face from the boundary face configurations, computes its geometric center and total area using a generic utility function, and stores these results in the simulation context.

Parameters
userPointer to the UserCtx containing boundary face information.
Returns
PetscErrorCode

Definition at line 1083 of file grid.c.

1084{
1085 PetscErrorCode ierr;
1086 BCFace inlet_face_id = -1;
1087 PetscBool inlet_found = PETSC_FALSE;
1088
1089 PetscFunctionBeginUser;
1091
1092 // 1. Identify the primary inlet face from the configuration
1093 for (int i = 0; i < 6; i++) {
1094 if (user->boundary_faces[i].mathematical_type == INLET) {
1095 inlet_face_id = user->boundary_faces[i].face_id;
1096 inlet_found = PETSC_TRUE;
1097 break; // Use the first inlet found
1098 }
1099 }
1100
1101 if (!inlet_found) {
1102 LOG_ALLOW(GLOBAL, LOG_INFO, "No INLET face found. Skipping inlet center calculation.\n");
1104 PetscFunctionReturn(0);
1105 }
1106
1107 Cmpnts inlet_center;
1108 PetscReal inlet_area;
1109
1110 // 2. Call the generic utility to compute the center and area of any face.
1111 ierr = CalculateFaceCenterAndArea(user,inlet_face_id,&inlet_center,&inlet_area); CHKERRQ(ierr);
1112
1113 // 3. Store results in the SimCtx
1114 user->simCtx->CMx_c = inlet_center.x;
1115 user->simCtx->CMy_c = inlet_center.y;
1116 user->simCtx->CMz_c = inlet_center.z;
1117 user->simCtx->AreaInSum = inlet_area;
1118
1120 "Rank[%d] Inlet Center: (%.6f, %.6f, %.6f), Area: %.6f\n",
1121 inlet_center.x, inlet_center.y, inlet_center.z, inlet_area);
1122
1124 PetscFunctionReturn(0);
1125
1126}
PetscErrorCode CalculateFaceCenterAndArea(UserCtx *user, BCFace face_id, Cmpnts *face_center, PetscReal *face_area)
Calculates the geometric center and total area of a specified boundary face.
Definition grid.c:1290
@ INLET
Definition variables.h:217
BoundaryFaceConfig boundary_faces[6]
Definition variables.h:751
PetscReal CMy_c
Definition variables.h:647
PetscReal CMz_c
Definition variables.h:647
BCType mathematical_type
Definition variables.h:295
PetscReal AreaInSum
Definition variables.h:668
PetscReal CMx_c
Definition variables.h:647
BCFace
Identifies the six logical faces of a structured computational block.
Definition variables.h:203
Here is the call graph for this function:
Here is the caller graph for this function:

◆ CalculateOutletProperties()

PetscErrorCode CalculateOutletProperties ( UserCtx user)

Calculates the center and area of the primary OUTLET face.

This function identifies the primary OUTLET face from the boundary face configurations, computes its geometric center and total area using a generic utility function, and stores these results in the simulation context.

Parameters
userPointer to the UserCtx containing boundary face information.
Returns
PetscErrorCode

Definition at line 1139 of file grid.c.

1140{
1141 PetscErrorCode ierr;
1142 BCFace outlet_face_id = -1;
1143 PetscBool outlet_found = PETSC_FALSE;
1144 PetscFunctionBeginUser;
1146 // 1. Identify the primary outlet face from the configuration
1147 for (int i = 0; i < 6; i++) {
1148 if (user->boundary_faces[i].mathematical_type == OUTLET) {
1149 outlet_face_id = user->boundary_faces[i].face_id;
1150 outlet_found = PETSC_TRUE;
1151 break; // Use the first outlet found
1152 }
1153 }
1154 if (!outlet_found) {
1155 LOG_ALLOW(GLOBAL, LOG_INFO, "No OUTLET face found. Skipping outlet center calculation.\n");
1157 PetscFunctionReturn(0);
1158 }
1159 PetscReal outlet_area;
1160 Cmpnts outlet_center;
1161 // 2. Call the generic utility to compute the center and area of any face
1162 ierr = CalculateFaceCenterAndArea(user,outlet_face_id,&outlet_center,&outlet_area); CHKERRQ(ierr);
1163 // 3. Store results in the SimCtx
1164 user->simCtx->AreaOutSum = outlet_area;
1165
1167 "Outlet Center: (%.6f, %.6f, %.6f), Area: %.6f\n",
1168 outlet_center.x, outlet_center.y, outlet_center.z, outlet_area);
1169
1171 PetscFunctionReturn(0);
1172}
@ OUTLET
Definition variables.h:216
PetscReal AreaOutSum
Definition variables.h:668
Here is the call graph for this function:
Here is the caller graph for this function:

◆ CalculateFaceCenterAndArea()

PetscErrorCode CalculateFaceCenterAndArea ( UserCtx user,
BCFace  face_id,
Cmpnts face_center,
PetscReal *  face_area 
)

Calculates the geometric center and total area of a specified boundary face.

This function computes two key properties of a boundary face in the computational domain:

  1. Geometric Center: The average (x,y,z) position of all physical nodes on the face
  2. Total Area: The sum of face area vector magnitudes from all non-solid cells adjacent to the face

Indexing Architecture

The solver uses different indexing conventions for different field types:

Node-Centered Fields (Coordinates):

  • Direct indexing: Node n stored at coor[n]
  • For mx=26: Physical nodes [0-24], Dummy at [25]
  • For mz=98: Physical nodes [0-96], Dummy at [97]

Face-Centered Fields (Metrics: csi, eta, zet):

  • Direct indexing: Face n stored at csi/eta/zet[n]
  • For mx=26: Physical faces [0-24], Dummy at [25]
  • For mz=98: Physical faces [0-96], Dummy at [97]
  • Face at index k bounds cells k-1 and k

Cell-Centered Fields (nvert):

  • Shifted indexing: Physical cell c stored at nvert[c+1]
  • For mx=26 (25 cells): Cell 0→nvert[1], Cell 23→nvert[24]
  • For mz=98 (96 cells): Cell 0→nvert[1], Cell 95→nvert[96]
  • nvert[0] and nvert[mx-1] are ghost values

Face-to-Index Mapping

Example for a domain with mx=26, my=26, mz=98:

Face ID Node Index Face Metric Adjacent Cell (shifted) Physical Extent
BC_FACE_NEG_X i=0 csi[k][j][0] nvert[k][j][1] (Cell 0) j∈[0,24], k∈[0,96]
BC_FACE_POS_X i=24 csi[k][j][24] nvert[k][j][24] (Cell 23) j∈[0,24], k∈[0,96]
BC_FACE_NEG_Y j=0 eta[k][0][i] nvert[k][1][i] (Cell 0) i∈[0,24], k∈[0,96]
BC_FACE_POS_Y j=24 eta[k][24][i] nvert[k][24][i] (Cell 23) i∈[0,24], k∈[0,96]
BC_FACE_NEG_Z k=0 zet[0][j][i] nvert[1][j][i] (Cell 0) i∈[0,24], j∈[0,24]
BC_FACE_POS_Z k=96 zet[96][j][i] nvert[96][j][i] (Cell 95) i∈[0,24], j∈[0,24]

Algorithm

The function performs two separate computations with different loop bounds:

1. Center Calculation (uses ALL physical nodes):

  • Loop over all physical nodes on the face (excluding dummy indices)
  • Accumulate coordinate sums: Σx, Σy, Σz
  • Count number of nodes
  • Average: center = (Σx/n, Σy/n, Σz/n)

2. Area Calculation (uses INTERIOR cells only):

  • Loop over interior cell range to avoid accessing ghost values in nvert
  • For each face adjacent to a fluid cell (nvert < 0.1):
    • Compute area magnitude: |csi/eta/zet| = √(x² + y² + z²)
    • Accumulate to total area

Loop Bound Details

Why different bounds for center vs. area?

For BC_FACE_NEG_X at i=0 with my=26, mz=98:

Center calculation (coordinates):

  • j ∈ [ys, j_max): Includes j=[0,24] (25 nodes), excludes dummy at j=25
  • k ∈ [zs, k_max): Includes k=[0,96] (97 nodes), excludes dummy at k=97
  • Total: 25 × 97 = 2,425 nodes

Area calculation (nvert checks):

  • j ∈ [lys, lye): j=[1,24] (24 values), excludes boundaries
  • k ∈ [lzs, lze): k=[1,96] (96 values), excludes boundaries
  • Why restricted?
    • At j=0: nvert[k][0][1] is ghost (no cell at j=-1)
    • At j=25: nvert[k][25][1] is ghost (no cell at j=24, index 25 is dummy)
    • At k=0: nvert[0][j][1] is ghost (no cell at k=-1)
    • At k=97: nvert[97][j][1] is ghost (no cell at k=96, index 97 is dummy)
  • Total: 24 × 96 = 2,304 interior cells adjacent to face

Area Calculation Formulas

Face area contributions are computed from metric tensor magnitudes:

  • i-faces (±Xi): Area = |csi| = √(csi_x² + csi_y² + csi_z²)
  • j-faces (±Eta): Area = |eta| = √(eta_x² + eta_y² + eta_z²)
  • k-faces (±Zeta): Area = |zet| = √(zet_x² + zet_y² + zet_z²)
Parameters
[in]userPointer to UserCtx containing grid info, DMs, and field vectors
[in]face_idEnum identifying which boundary face to analyze (BC_FACE_NEG_X, etc.)
[out]face_centerPointer to Cmpnts structure to store computed geometric center (x,y,z)
[out]face_areaPointer to PetscReal to store computed total face area
Returns
PetscErrorCode Returns 0 on success, non-zero PETSc error code on failure
Note
This function uses MPI_Allreduce, so it must be called collectively by all ranks
Only ranks that own the specified boundary face contribute to the calculation
Center calculation includes ALL physical nodes on the face
Area calculation ONLY includes faces adjacent to fluid cells (nvert < 0.1)
Dummy/unused indices (e.g., k=97, j=25 for standard test case) are excluded
Warning
Assumes grid and field arrays have been properly initialized
Incorrect face_id values will result in zero contribution from all ranks
See also
CanRankServiceFace() for determining rank ownership of boundary faces
BCFace enum for valid face_id values
LOG_FIELD_ANATOMY() for debugging field indexing
Example Usage:
Cmpnts inlet_center;
PetscReal inlet_area;
ierr = CalculateFaceCenterAndArea(user, BC_FACE_NEG_Z, &inlet_center, &inlet_area);
PetscPrintf(PETSC_COMM_WORLD, "Inlet center: (%.4f, %.4f, %.4f), Area: %.6f\n",
inlet_center.x, inlet_center.y, inlet_center.z, inlet_area);
PetscErrorCode CalculateFaceCenterAndArea(UserCtx *user, BCFace face_id, Cmpnts *face_center, PetscReal *face_area)
Calculates the geometric center and total area of a specified boundary face.
Definition grid.c:1290
@ BC_FACE_NEG_Z
Definition variables.h:206

< Local sum of (x,y,z) coordinates

< Local sum of face area magnitudes

< Local count of nodes

< Global sum of coordinates

< Global sum of areas

< Global count of nodes

< i-range: [xs, xe)

< j-range: [ys, ye)

< k-range: [zs, ze)

< Physical domain size in i (exclude dummy)

< Physical domain size in j (exclude dummy)

< Physical domain size in k (exclude dummy)

< Start at 1 if on -Xi boundary

< End at mx-1 if on +Xi boundary

< Start at 1 if on -Eta boundary

< End at my-1 if on +Eta boundary

< Start at 1 if on -Zeta boundary

< End at mz-1 if on +Zeta boundary

< Exclude dummy at i=mx-1 (e.g., i=25)

< Exclude dummy at j=my-1 (e.g., j=25)

< Exclude dummy at k=mz-1 (e.g., k=97)

< Local ghosted coordinate vector

< Nodal coordinates [k][j][i]

< Face metric tensors [k][j][i]

< Cell blanking field [k][j][i] (shifted +1)

Definition at line 1290 of file grid.c.

1292{
1293 PetscErrorCode ierr;
1294 DMDALocalInfo info;
1295
1296 // ========================================================================
1297 // Local accumulators for this rank's contribution
1298 // ========================================================================
1299 PetscReal local_sum[3] = {0.0, 0.0, 0.0}; ///< Local sum of (x,y,z) coordinates
1300 PetscReal localAreaSum = 0.0; ///< Local sum of face area magnitudes
1301 PetscCount local_n_points = 0; ///< Local count of nodes
1302
1303 // ========================================================================
1304 // Global accumulators after MPI reduction
1305 // ========================================================================
1306 PetscReal global_sum[3] = {0.0, 0.0, 0.0}; ///< Global sum of coordinates
1307 PetscReal globalAreaSum = 0.0; ///< Global sum of areas
1308 PetscCount global_n_points = 0; ///< Global count of nodes
1309
1310 // ========================================================================
1311 // Grid information and array pointers
1312 // ========================================================================
1313 DM da = user->da;
1314 info = user->info;
1315
1316 // Rank's owned range in global indices
1317 PetscInt xs = info.xs, xe = info.xs + info.xm; ///< i-range: [xs, xe)
1318 PetscInt ys = info.ys, ye = info.ys + info.ym; ///< j-range: [ys, ye)
1319 PetscInt zs = info.zs, ze = info.zs + info.zm; ///< k-range: [zs, ze)
1320
1321 // Global domain dimensions (total allocated, includes dummy at end)
1322 PetscInt mx = info.mx, my = info.my, mz = info.mz;
1323 PetscInt IM = user->IM; ///< Physical domain size in i (exclude dummy)
1324 PetscInt JM = user->JM; ///< Physical domain size in j (exclude dummy)
1325 PetscInt KM = user->KM; ///< Physical domain size in k (exclude dummy)
1326
1327 // ========================================================================
1328 // Interior loop bounds (adjusted to avoid ghost/boundary cells)
1329 // These are used for nvert checks where we need valid cell indices
1330 // ========================================================================
1331 PetscInt lxs = xs; if(xs == 0) lxs = xs + 1; ///< Start at 1 if on -Xi boundary
1332 PetscInt lxe = xe; if(xe == mx) lxe = xe - 1; ///< End at mx-1 if on +Xi boundary
1333 PetscInt lys = ys; if(ys == 0) lys = ys + 1; ///< Start at 1 if on -Eta boundary
1334 PetscInt lye = ye; if(ye == my) lye = ye - 1; ///< End at my-1 if on +Eta boundary
1335 PetscInt lzs = zs; if(zs == 0) lzs = zs + 1; ///< Start at 1 if on -Zeta boundary
1336 PetscInt lze = ze; if(ze == mz) lze = ze - 1; ///< End at mz-1 if on +Zeta boundary
1337
1338 // ========================================================================
1339 // Physical node bounds (exclude dummy indices at mx-1, my-1, mz-1)
1340 // These are used for coordinate loops where we want ALL physical nodes
1341 // ========================================================================
1342 PetscInt i_max = (xe == mx) ? mx - 1 : xe; ///< Exclude dummy at i=mx-1 (e.g., i=25)
1343 PetscInt j_max = (ye == my) ? my - 1 : ye; ///< Exclude dummy at j=my-1 (e.g., j=25)
1344 PetscInt k_max = (ze == mz) ? mz - 1 : ze; ///< Exclude dummy at k=mz-1 (e.g., k=97)
1345
1346 // ========================================================================
1347 // Array pointers for field access
1348 // ========================================================================
1349 Vec lCoor; ///< Local ghosted coordinate vector
1350 Cmpnts ***coor; ///< Nodal coordinates [k][j][i]
1351 Cmpnts ***csi, ***eta, ***zet; ///< Face metric tensors [k][j][i]
1352 PetscReal ***nvert; ///< Cell blanking field [k][j][i] (shifted +1)
1353
1354 PetscFunctionBeginUser;
1356
1357 // ========================================================================
1358 // Step 1: Check if this rank owns the specified boundary face
1359 // ========================================================================
1360 PetscBool owns_face = PETSC_FALSE;
1361 ierr = CanRankServiceFace(&info,IM,JM,KM,face_id,&owns_face); CHKERRQ(ierr);
1362 if(owns_face){
1363 // ========================================================================
1364 // Step 2: Get read-only array access for all required fields
1365 // ========================================================================
1366 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
1367 ierr = DMDAVecGetArrayRead(user->fda, lCoor, &coor); CHKERRQ(ierr);
1368 ierr = DMDAVecGetArrayRead(user->da, user->lNvert, &nvert); CHKERRQ(ierr);
1369 ierr = DMDAVecGetArrayRead(user->fda, user->lCsi, &csi); CHKERRQ(ierr);
1370 ierr = DMDAVecGetArrayRead(user->fda, user->lEta, &eta); CHKERRQ(ierr);
1371 ierr = DMDAVecGetArrayRead(user->fda, user->lZet, &zet); CHKERRQ(ierr);
1372
1373 // ========================================================================
1374 // Step 3: Loop over the specified face and accumulate center and area
1375 // ========================================================================
1376 switch (face_id) {
1377
1378 // ====================================================================
1379 // BC_FACE_NEG_X: Face at i=0 (bottom boundary in i-direction)
1380 // ====================================================================
1381 case BC_FACE_NEG_X:
1382 if (xs == 0) {
1383 PetscInt i = 0; // Face is at node index i=0
1384
1385 // ---- Part 1: Center calculation (ALL physical nodes) ----
1386 // Loop over ALL physical nodes on this face
1387 // For my=26, mz=98: j∈[0,24], k∈[0,96] → 25×97 = 2,425 nodes
1388 for (PetscInt k = zs; k < k_max; k++) {
1389 for (PetscInt j = ys; j < j_max; j++) {
1390 // Accumulate coordinates at node [k][j][0]
1391 local_sum[0] += coor[k][j][i].x;
1392 local_sum[1] += coor[k][j][i].y;
1393 local_sum[2] += coor[k][j][i].z;
1394 local_n_points++;
1395 }
1396 }
1397
1398 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1399 // Loop over interior range where nvert checks are valid
1400 // For my=26, mz=98: j∈[1,24], k∈[1,96] → 24×96 = 2,304 cells
1401 for (PetscInt k = lzs; k < lze; k++) {
1402 for (PetscInt j = lys; j < lye; j++) {
1403 // Check if adjacent cell is fluid
1404 // nvert[k][j][i+1] = nvert[k][j][1] checks Cell 0
1405 // (Physical Cell 0 in j-k plane, stored at shifted index [1])
1406 if (nvert[k][j][i+1] < 0.1) {
1407 // Cell is fluid - add face area contribution
1408 // Face area = magnitude of csi metric at [k][j][0]
1409 localAreaSum += sqrt(csi[k][j][i].x * csi[k][j][i].x +
1410 csi[k][j][i].y * csi[k][j][i].y +
1411 csi[k][j][i].z * csi[k][j][i].z);
1412 }
1413 }
1414 }
1415 }
1416 break;
1417
1418 // ====================================================================
1419 // BC_FACE_POS_X: Face at i=IM-1 (top boundary in i-direction)
1420 // ====================================================================
1421 case BC_FACE_POS_X:
1422 if (xe == mx) {
1423 PetscInt i = mx - 2; // Last physical node (e.g., i=24 for mx=26)
1424
1425 // ---- Part 1: Center calculation (ALL physical nodes) ----
1426 for (PetscInt k = zs; k < k_max; k++) {
1427 for (PetscInt j = ys; j < j_max; j++) {
1428 local_sum[0] += coor[k][j][i].x;
1429 local_sum[1] += coor[k][j][i].y;
1430 local_sum[2] += coor[k][j][i].z;
1431 local_n_points++;
1432 }
1433 }
1434
1435 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1436 for (PetscInt k = lzs; k < lze; k++) {
1437 for (PetscInt j = lys; j < lye; j++) {
1438 // Check if adjacent cell is fluid
1439 // nvert[k][j][i] = nvert[k][j][24] checks last cell (Cell 23)
1440 // (Physical Cell 23, stored at shifted index [24])
1441 if (nvert[k][j][i] < 0.1) {
1442 // Face area = magnitude of csi metric at [k][j][24]
1443 localAreaSum += sqrt(csi[k][j][i].x * csi[k][j][i].x +
1444 csi[k][j][i].y * csi[k][j][i].y +
1445 csi[k][j][i].z * csi[k][j][i].z);
1446 }
1447 }
1448 }
1449 }
1450 break;
1451
1452 // ====================================================================
1453 // BC_FACE_NEG_Y: Face at j=0 (bottom boundary in j-direction)
1454 // ====================================================================
1455 case BC_FACE_NEG_Y:
1456 if (ys == 0) {
1457 PetscInt j = 0; // Face is at node index j=0
1458
1459 // ---- Part 1: Center calculation (ALL physical nodes) ----
1460 // For mx=26, mz=98: i∈[0,24], k∈[0,96] → 25×97 = 2,425 nodes
1461 for (PetscInt k = zs; k < k_max; k++) {
1462 for (PetscInt i = xs; i < i_max; i++) {
1463 local_sum[0] += coor[k][j][i].x;
1464 local_sum[1] += coor[k][j][i].y;
1465 local_sum[2] += coor[k][j][i].z;
1466 local_n_points++;
1467 }
1468 }
1469
1470 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1471 // For mx=26, mz=98: i∈[1,24], k∈[1,96] → 24×96 = 2,304 cells
1472 for (PetscInt k = lzs; k < lze; k++) {
1473 for (PetscInt i = lxs; i < lxe; i++) {
1474 // nvert[k][j+1][i] = nvert[k][1][i] checks Cell 0
1475 if (nvert[k][j+1][i] < 0.1) {
1476 // Face area = magnitude of eta metric at [k][0][i]
1477 localAreaSum += sqrt(eta[k][j][i].x * eta[k][j][i].x +
1478 eta[k][j][i].y * eta[k][j][i].y +
1479 eta[k][j][i].z * eta[k][j][i].z);
1480 }
1481 }
1482 }
1483 }
1484 break;
1485
1486 // ====================================================================
1487 // BC_FACE_POS_Y: Face at j=JM-1 (top boundary in j-direction)
1488 // ====================================================================
1489 case BC_FACE_POS_Y:
1490 if (ye == my) {
1491 PetscInt j = my - 2; // Last physical node (e.g., j=24 for my=26)
1492
1493 // ---- Part 1: Center calculation (ALL physical nodes) ----
1494 for (PetscInt k = zs; k < k_max; k++) {
1495 for (PetscInt i = xs; i < i_max; i++) {
1496 local_sum[0] += coor[k][j][i].x;
1497 local_sum[1] += coor[k][j][i].y;
1498 local_sum[2] += coor[k][j][i].z;
1499 local_n_points++;
1500 }
1501 }
1502
1503 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1504 for (PetscInt k = lzs; k < lze; k++) {
1505 for (PetscInt i = lxs; i < lxe; i++) {
1506 // nvert[k][j][i] = nvert[k][24][i] checks last cell (Cell 23)
1507 if (nvert[k][j][i] < 0.1) {
1508 // Face area = magnitude of eta metric at [k][24][i]
1509 localAreaSum += sqrt(eta[k][j][i].x * eta[k][j][i].x +
1510 eta[k][j][i].y * eta[k][j][i].y +
1511 eta[k][j][i].z * eta[k][j][i].z);
1512 }
1513 }
1514 }
1515 }
1516 break;
1517
1518 // ====================================================================
1519 // BC_FACE_NEG_Z: Face at k=0 (inlet, bottom boundary in k-direction)
1520 // ====================================================================
1521 case BC_FACE_NEG_Z:
1522 if (zs == 0) {
1523 PetscInt k = 0; // Face is at node index k=0
1524
1525 // ---- Part 1: Center calculation (ALL physical nodes) ----
1526 // For mx=26, my=26: i∈[0,24], j∈[0,24] → 25×25 = 625 nodes
1527 for (PetscInt j = ys; j < j_max; j++) {
1528 for (PetscInt i = xs; i < i_max; i++) {
1529 local_sum[0] += coor[k][j][i].x;
1530 local_sum[1] += coor[k][j][i].y;
1531 local_sum[2] += coor[k][j][i].z;
1532 local_n_points++;
1533 }
1534 }
1535
1536 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1537 // For mx=26, my=26: i∈[1,24], j∈[1,24] → 24×24 = 576 cells
1538 for (PetscInt j = lys; j < lye; j++) {
1539 for (PetscInt i = lxs; i < lxe; i++) {
1540 // nvert[k+1][j][i] = nvert[1][j][i] checks Cell 0
1541 // (Physical Cell 0 in i-j plane, stored at shifted index [1])
1542 if (nvert[k+1][j][i] < 0.1) {
1543 // Face area = magnitude of zet metric at [0][j][i]
1544 localAreaSum += sqrt(zet[k][j][i].x * zet[k][j][i].x +
1545 zet[k][j][i].y * zet[k][j][i].y +
1546 zet[k][j][i].z * zet[k][j][i].z);
1547 }
1548 }
1549 }
1550 }
1551 break;
1552
1553 // ====================================================================
1554 // BC_FACE_POS_Z: Face at k=KM-1 (outlet, top boundary in k-direction)
1555 // ====================================================================
1556 case BC_FACE_POS_Z:
1557 if (ze == mz) {
1558 PetscInt k = mz - 2; // Last physical node (e.g., k=96 for mz=98)
1559
1560 // ---- Part 1: Center calculation (ALL physical nodes) ----
1561 // For mx=26, my=26: i∈[0,24], j∈[0,24] → 25×25 = 625 nodes
1562 for (PetscInt j = ys; j < j_max; j++) {
1563 for (PetscInt i = xs; i < i_max; i++) {
1564 local_sum[0] += coor[k][j][i].x;
1565 local_sum[1] += coor[k][j][i].y;
1566 local_sum[2] += coor[k][j][i].z;
1567 local_n_points++;
1568 }
1569 }
1570
1571 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1572 // For mx=26, my=26: i∈[1,24], j∈[1,24] → 24×24 = 576 cells
1573 for (PetscInt j = lys; j < lye; j++) {
1574 for (PetscInt i = lxs; i < lxe; i++) {
1575 // nvert[k][j][i] = nvert[96][j][i] checks last cell (Cell 95)
1576 // (Physical Cell 95, stored at shifted index [96])
1577 if (nvert[k][j][i] < 0.1) {
1578 // Face area = magnitude of zet metric at [96][j][i]
1579 localAreaSum += sqrt(zet[k][j][i].x * zet[k][j][i].x +
1580 zet[k][j][i].y * zet[k][j][i].y +
1581 zet[k][j][i].z * zet[k][j][i].z);
1582 }
1583 }
1584 }
1585 }
1586 break;
1587
1588 default:
1589 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE,
1590 "Unknown face_id %d in CalculateFaceCenterAndArea", face_id);
1591 }
1592
1593 // ========================================================================
1594 // Step 4: Restore array access (release pointers)
1595 // ========================================================================
1596 ierr = DMDAVecRestoreArrayRead(user->fda, lCoor, &coor); CHKERRQ(ierr);
1597 ierr = DMDAVecRestoreArrayRead(user->da, user->lNvert, &nvert); CHKERRQ(ierr);
1598 ierr = DMDAVecRestoreArrayRead(user->fda, user->lCsi, &csi); CHKERRQ(ierr);
1599 ierr = DMDAVecRestoreArrayRead(user->fda, user->lEta, &eta); CHKERRQ(ierr);
1600 ierr = DMDAVecRestoreArrayRead(user->fda, user->lZet, &zet); CHKERRQ(ierr);
1601 }
1602 // ========================================================================
1603 // Step 5: Perform MPI reductions to get global sums
1604 // ========================================================================
1605 // Sum coordinate contributions from all ranks
1606 ierr = MPI_Allreduce(local_sum, global_sum, 3, MPI_DOUBLE, MPI_SUM,
1607 PETSC_COMM_WORLD); CHKERRQ(ierr);
1608
1609 // Sum node counts from all ranks
1610 ierr = MPI_Allreduce(&local_n_points, &global_n_points, 1, MPI_COUNT, MPI_SUM,
1611 PETSC_COMM_WORLD); CHKERRQ(ierr);
1612
1613 // Sum area contributions from all ranks
1614 ierr = MPI_Allreduce(&localAreaSum, &globalAreaSum, 1, MPI_DOUBLE, MPI_SUM,
1615 PETSC_COMM_WORLD); CHKERRQ(ierr);
1616
1617 // ========================================================================
1618 // Step 6: Calculate geometric center by averaging coordinates
1619 // ========================================================================
1620 if (global_n_points > 0) {
1621 face_center->x = global_sum[0] / global_n_points;
1622 face_center->y = global_sum[1] / global_n_points;
1623 face_center->z = global_sum[2] / global_n_points;
1625 "Calculated center for Face %s: (x=%.4f, y=%.4f, z=%.4f) from %lld nodes\n",
1626 BCFaceToString(face_id),
1627 face_center->x, face_center->y, face_center->z,
1628 (long long)global_n_points);
1629 } else {
1630 // No nodes found - this should not happen for a valid face
1632 "WARNING: Face %s identified but no grid points found. Center not calculated.\n",
1633 BCFaceToString(face_id));
1634 face_center->x = face_center->y = face_center->z = 0.0;
1635 }
1636
1637 // ========================================================================
1638 // Step 7: Return computed total area
1639 // ========================================================================
1640 *face_area = globalAreaSum;
1642 "Calculated area for Face %s: Area=%.6f\n",
1643 BCFaceToString(face_id), *face_area);
1644
1645 PetscFunctionReturn(0);
1646}
PetscErrorCode CanRankServiceFace(const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, BCFace face_id, PetscBool *can_service_out)
Determines if the current MPI rank owns any part of a specified global face.
Definition Boundaries.c:151
const char * BCFaceToString(BCFace face)
Helper function to convert BCFace enum to a string representation.
Definition logging.c:643
Vec lNvert
Definition variables.h:760
Vec lZet
Definition variables.h:781
Vec lCsi
Definition variables.h:781
Vec lEta
Definition variables.h:781
@ BC_FACE_NEG_X
Definition variables.h:204
@ BC_FACE_POS_Z
Definition variables.h:206
@ BC_FACE_POS_Y
Definition variables.h:205
@ BC_FACE_POS_X
Definition variables.h:204
@ BC_FACE_NEG_Y
Definition variables.h:205
Here is the call graph for this function:
Here is the caller graph for this function: