PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
grid.c File Reference
#include "grid.h"
#include "logging.h"
Include dependency graph for grid.c:

Go to the source code of this file.

Macros

#define BBOX_TOLERANCE   1e-6
 
#define __FUNCT__   "ParseAndSetGridInputs"
 
#define __FUNCT__   "DefineAllGridDimensions"
 
#define __FUNCT__   "InitializeSingleGridDM"
 
#define __FUNCT__   "InitializeAllGridDMs"
 
#define __FUNCT__   "AssignAllGridCoordinates"
 
#define __FUNCT__   "SetFinestLevelCoordinates"
 
#define __FUNCT__   "GenerateAndSetCoordinates"
 
#define __FUNCT__   "ReadAndSetCoordinates"
 
#define __FUNCT__   "RestrictCoordinates"
 
#define __FUNCT__   "ComputeLocalBoundingBox"
 
#define __FUNCT__   "GatherAllBoundingBoxes"
 
#define __FUNCT__   "BroadcastAllBoundingBoxes"
 
#define __FUNCT__   "CalculateInletCenter"
 

Functions

static PetscErrorCode ParseAndSetGridInputs (UserCtx *user)
 Determines the grid source and calls the appropriate parsing routine.
 
PetscErrorCode DefineAllGridDimensions (SimCtx *simCtx)
 Orchestrates the parsing and setting of grid dimensions for all blocks.
 
static PetscErrorCode InitializeSingleGridDM (UserCtx *user, UserCtx *coarse_user)
 Creates the DMDA objects (da and fda) for a single UserCtx.
 
PetscErrorCode InitializeAllGridDMs (SimCtx *simCtx)
 Orchestrates the creation of DMDA objects for every block and multigrid level.
 
static PetscErrorCode SetFinestLevelCoordinates (UserCtx *user)
 A router that populates the coordinates for a single finest-level DMDA.
 
static PetscErrorCode GenerateAndSetCoordinates (UserCtx *user)
 Programmatically generates and sets grid coordinates based on user parameters.
 
static PetscErrorCode ReadAndSetCoordinates (UserCtx *user, FILE *fd)
 Reads physical coordinates from a file and populates the DMDA for a specific block.
 
static PetscErrorCode RestrictCoordinates (UserCtx *coarse_user, UserCtx *fine_user)
 Populates coarse grid coordinates by restricting from a fine grid.
 
PetscErrorCode AssignAllGridCoordinates (SimCtx *simCtx)
 Orchestrates the assignment of physical coordinates to all DMDA objects.
 
static PetscReal ComputeStretchedCoord (PetscInt i, PetscInt N, PetscReal L, PetscReal r)
 Computes a stretched coordinate along one dimension.
 
PetscErrorCode ComputeLocalBoundingBox (UserCtx *user, BoundingBox *localBBox)
 Computes the local bounding box of the grid on the current process.
 
PetscErrorCode GatherAllBoundingBoxes (UserCtx *user, BoundingBox **allBBoxes)
 Gathers local bounding boxes from all MPI processes to rank 0.
 
PetscErrorCode BroadcastAllBoundingBoxes (UserCtx *user, BoundingBox **bboxlist)
 Broadcasts the bounding box list from rank 0 to all ranks.
 
PetscErrorCode CalculateInletCenter (UserCtx *user)
 Calculates the geometric center of the primary inlet face.
 

Macro Definition Documentation

◆ BBOX_TOLERANCE

#define BBOX_TOLERANCE   1e-6

Definition at line 6 of file grid.c.

◆ __FUNCT__ [1/13]

#define __FUNCT__   "ParseAndSetGridInputs"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [2/13]

#define __FUNCT__   "DefineAllGridDimensions"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [3/13]

#define __FUNCT__   "InitializeSingleGridDM"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [4/13]

#define __FUNCT__   "InitializeAllGridDMs"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [5/13]

#define __FUNCT__   "AssignAllGridCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [6/13]

#define __FUNCT__   "SetFinestLevelCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [7/13]

#define __FUNCT__   "GenerateAndSetCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [8/13]

#define __FUNCT__   "ReadAndSetCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [9/13]

#define __FUNCT__   "RestrictCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [10/13]

#define __FUNCT__   "ComputeLocalBoundingBox"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [11/13]

#define __FUNCT__   "GatherAllBoundingBoxes"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [12/13]

#define __FUNCT__   "BroadcastAllBoundingBoxes"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [13/13]

#define __FUNCT__   "CalculateInletCenter"

Definition at line 9 of file grid.c.

Function Documentation

◆ ParseAndSetGridInputs()

static PetscErrorCode ParseAndSetGridInputs ( UserCtx user)
static

Determines the grid source and calls the appropriate parsing routine.

This function acts as a router. It checks the global simCtx->generate_grid flag (accessed via the user->simCtx back-pointer) to decide whether to call the parser for a programmatically generated grid or for a grid defined in a file.

Parameters
userPointer to the UserCtx for a specific block. The function will populate the geometric fields within this struct.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 22 of file grid.c.

23{
24 PetscErrorCode ierr;
25 SimCtx *simCtx = user->simCtx; // Get the global context via the back-pointer
26
27 PetscFunctionBeginUser;
28
30
31 if (simCtx->generate_grid) {
32 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: Block %d is programmatically generated. Calling generation parser.\n", simCtx->rank, user->_this);
33 ierr = ReadGridGenerationInputs(user); CHKERRQ(ierr);
34 } else {
35 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: Block %d is file-based. Calling file parser.\n", simCtx->rank, user->_this);
36 ierr = ReadGridFile(user); CHKERRQ(ierr);
37 }
38
40
41 PetscFunctionReturn(0);
42}
PetscErrorCode ReadGridFile(UserCtx *user)
Sets grid dimensions from a file for a SINGLE block using a one-time read cache.
Definition io.c:177
PetscErrorCode ReadGridGenerationInputs(UserCtx *user)
Parses command-line options for a programmatically generated grid for a SINGLE block.
Definition io.c:77
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
----— DEBUG ---------------------------------------— #define LOG_ALLOW(scope, level,...
Definition logging.h:268
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:47
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:740
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:33
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:731
PetscMPIInt rank
Definition variables.h:541
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:664
PetscInt _this
Definition variables.h:674
PetscBool generate_grid
Definition variables.h:595
The master context for the entire simulation.
Definition variables.h:538
Here is the call graph for this function:
Here is the caller graph for this function:

◆ DefineAllGridDimensions()

PetscErrorCode DefineAllGridDimensions ( SimCtx simCtx)

Orchestrates the parsing and setting of grid dimensions for all blocks.

This function serves as the high-level entry point for defining the geometric properties of each grid block in the simulation. It iterates through every block defined by simCtx->block_number.

For each block, it performs two key actions:

  1. It explicitly sets the block's index (_this) in the corresponding UserCtx struct for the finest multigrid level. This makes the context "self-aware".
  2. It calls a helper function (ParseAndSetGridInputs) to handle the detailed work of parsing options or files to populate the rest of the geometric properties for that specific block (e.g., IM, Min_X, rx).
Parameters
simCtxThe master SimCtx, which contains the number of blocks and the UserCtx hierarchy to be configured.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 65 of file grid.c.

66{
67 PetscErrorCode ierr;
68 PetscInt nblk = simCtx->block_number;
69 UserCtx *finest_users;
70
71 PetscFunctionBeginUser;
72
74
75 if (simCtx->usermg.mglevels == 0) {
76 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONGSTATE, "MG levels not set. Cannot get finest_users.");
77 }
78 // Get the UserCtx array for the finest grid level
79 finest_users = simCtx->usermg.mgctx[simCtx->usermg.mglevels - 1].user;
80
81 LOG_ALLOW(GLOBAL, LOG_INFO, "Defining grid dimensions for %d blocks...\n", nblk);
82
83 // Loop over each block to configure its grid dimensions and geometry.
84 for (PetscInt bi = 0; bi < nblk; bi++) {
85 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: --- Configuring Geometry for Block %d ---\n", simCtx->rank, bi);
86
87 // Before calling any helpers, set the block index in the context.
88 // This makes the UserCtx self-aware of which block it represents.
89 LOG_ALLOW(GLOBAL,LOG_DEBUG,"finest_users->_this = %d, bi = %d\n",finest_users[bi]._this,bi);
90 //finest_user[bi]._this = bi;
91
92 // Call the helper function for this specific block. It can now derive
93 // all necessary information from the UserCtx pointer it receives.
94 ierr = ParseAndSetGridInputs(&finest_users[bi]); CHKERRQ(ierr);
95 }
96
98
99 PetscFunctionReturn(0);
100}
static PetscErrorCode ParseAndSetGridInputs(UserCtx *user)
Determines the grid source and calls the appropriate parsing routine.
Definition grid.c:22
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:201
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:32
UserCtx * user
Definition variables.h:427
PetscInt block_number
Definition variables.h:593
UserMG usermg
Definition variables.h:631
PetscInt mglevels
Definition variables.h:434
MGCtx * mgctx
Definition variables.h:437
User-defined context containing data specific to a single computational grid level.
Definition variables.h:661
Here is the call graph for this function:
Here is the caller graph for this function:

◆ InitializeSingleGridDM()

static PetscErrorCode InitializeSingleGridDM ( UserCtx user,
UserCtx coarse_user 
)
static

Creates the DMDA objects (da and fda) for a single UserCtx.

This function is a direct adaptation of the core logic in MGDACreate. It creates the scalar (da) and vector (fda) DMs for a single grid level.

If a coarse_user context is provided, it performs the critical processor alignment calculation from the legacy code. This ensures the new (fine) DM aligns with the coarse DM for multigrid efficiency. If coarse_user is NULL, it creates the DM with a default PETSc decomposition, intended for the coarsest grid level.

Parameters
userThe UserCtx for which the DMs will be created. Its IM, JM, KM fields must be pre-populated.
coarse_userThe UserCtx of the next-coarser grid level, or NULL if user is the coarsest level.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 120 of file grid.c.

121{
122 PetscErrorCode ierr;
123 SimCtx *simCtx = user->simCtx;
124
125 DMBoundaryType xperiod = (simCtx->i_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
126 DMBoundaryType yperiod = (simCtx->j_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
127 DMBoundaryType zperiod = (simCtx->k_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
128 PetscInt stencil_width = 2; // Stencil width is 2 in the legacy code
129
130 PetscInt *lx = NULL, *ly = NULL, *lz = NULL;
131 PetscInt m, n, p;
132
133 PetscFunctionBeginUser;
134
136
137 if (coarse_user) {
138 // --- This is a FINE grid; it must be aligned with the COARSE grid ---
139 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: [Aligning DM] for block %d level %d (size %dx%dx%d) with level %d\n", simCtx->rank, user->_this, user->thislevel, user->IM, user->JM, user->KM, coarse_user->thislevel);
140
141 DMDAGetInfo(coarse_user->da, NULL, NULL, NULL, NULL, &m, &n, &p, NULL, NULL, NULL, NULL, NULL, NULL);
142 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Coarse grid processor decomposition is %d x %d x %d\n", simCtx->rank, m, n, p);
143
144 // This is the core logic from MGDACreate to ensure processor alignment.
145 PetscInt *lx_contrib, *ly_contrib, *lz_contrib;
146 ierr = PetscMalloc3(m, &lx_contrib, n, &ly_contrib, p, &lz_contrib); CHKERRQ(ierr);
147 ierr = PetscMemzero(lx_contrib, m * sizeof(PetscInt)); CHKERRQ(ierr);
148 ierr = PetscMemzero(ly_contrib, n * sizeof(PetscInt)); CHKERRQ(ierr);
149 ierr = PetscMemzero(lz_contrib, p * sizeof(PetscInt)); CHKERRQ(ierr);
150
151 DMDALocalInfo info;
152 DMDAGetLocalInfo(coarse_user->da, &info);
153 PetscInt xs = info.xs, xe = info.xs + info.xm, mx = info.mx;
154 PetscInt ys = info.ys, ye = info.ys + info.ym, my = info.my;
155 PetscInt zs = info.zs, ze = info.zs + info.zm, mz = info.mz;
156
157 PetscMPIInt rank;
158 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
159 PetscInt proc_i = rank % m;
160 PetscInt proc_j = (rank / m) % n;
161 PetscInt proc_k = rank / (m * n);
162
163 // --- X-Direction Logic (Identical to MGDACreate) ---
164 if (user->isc) lx_contrib[proc_i] = (xe - xs);
165 else {
166 if (m == 1) lx_contrib[0] = user->IM + 1;
167 else if (xs == 0) lx_contrib[0] = 2 * xe - 1;
168 else if (xe == mx) lx_contrib[proc_i] = user->IM + 1 - (2 * xs - 1);
169 else lx_contrib[proc_i] = (xe - xs) * 2;
170 }
171
172 // --- Y-Direction Logic (Identical to MGDACreate) ---
173 if (user->jsc) ly_contrib[proc_j] = (ye - ys);
174 else {
175 if (n == 1) ly_contrib[0] = user->JM + 1;
176 else if (ys == 0) ly_contrib[0] = 2 * ye - 1;
177 else if (ye == my) ly_contrib[proc_j] = user->JM + 1 - (2 * ys - 1);
178 else ly_contrib[proc_j] = (ye - ys) * 2;
179 }
180
181 // --- Z-Direction Logic (Identical to MGDACreate) ---
182 if (user->ksc) lz_contrib[proc_k] = (ze - zs);
183 else {
184 if (p == 1) lz_contrib[0] = user->KM + 1;
185 else if (zs == 0) lz_contrib[0] = 2 * ze - 1;
186 else if (ze == mz) lz_contrib[proc_k] = user->KM + 1 - (2 * zs - 1);
187 else lz_contrib[proc_k] = (ze - zs) * 2;
188 }
189 LOG_ALLOW_SYNC(LOCAL, LOG_VERBOSE, "Rank %d: Calculated this rank's node contribution to fine grid: lx=%d, ly=%d, lz=%d\n", simCtx->rank, lx_contrib[proc_i], ly_contrib[proc_j], lz_contrib[proc_k]);
190
191 // Allocate the final distribution arrays and Allreduce to get the global distribution
192 ierr = PetscMalloc3(m, &lx, n, &ly, p, &lz); CHKERRQ(ierr);
193 ierr = MPI_Allreduce(lx_contrib, lx, m, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
194 ierr = MPI_Allreduce(ly_contrib, ly, n, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
195 ierr = MPI_Allreduce(lz_contrib, lz, p, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
196
197 ierr = PetscFree3(lx_contrib, ly_contrib, lz_contrib); CHKERRQ(ierr);
198
199 } else {
200 // --- CASE 2: This is the COARSEST grid; use default or user-specified decomposition ---
201 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Creating coarsest DM for block %d level %d (size %dx%dx%d)\n", simCtx->rank, user->_this, user->thislevel, user->IM, user->JM, user->KM);
202 m = simCtx->da_procs_x;
203 n = simCtx->da_procs_y;
204 p = simCtx->da_procs_z;
205 // lx, ly, lz are NULL, so DMDACreate3d will use the m,n,p values.
206 }
207
208 // --- Create the DMDA for the current UserCtx ---
209 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Calling DMDACreate3d...\n", simCtx->rank);
210 ierr = DMDACreate3d(PETSC_COMM_WORLD, xperiod, yperiod, zperiod, DMDA_STENCIL_BOX,
211 user->IM + 1, user->JM + 1, user->KM + 1,
212 m, n, p,
213 1, stencil_width, lx, ly, lz, &user->da); CHKERRQ(ierr);
214
215 if (coarse_user) {
216 ierr = PetscFree3(lx, ly, lz); CHKERRQ(ierr);
217 }
218
219 // --- Standard DM setup applicable to all levels ---
220 ierr = DMSetUp(user->da); CHKERRQ(ierr);
221 ierr = DMGetCoordinateDM(user->da, &user->fda); CHKERRQ(ierr);
222 ierr = DMDASetUniformCoordinates(user->da, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); CHKERRQ(ierr);
223 ierr = DMDAGetLocalInfo(user->da, &user->info); CHKERRQ(ierr);
224 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: DM creation for block %d level %d complete.\n", simCtx->rank, user->_this, user->thislevel);
225
227
228 PetscFunctionReturn(0);
229}
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:46
@ LOG_TRACE
Very fine-grained tracing information for in-depth debugging.
Definition logging.h:34
@ LOG_VERBOSE
Extremely detailed logs, typically for development use only.
Definition logging.h:35
PetscInt isc
Definition variables.h:674
PetscInt da_procs_z
Definition variables.h:599
PetscInt ksc
Definition variables.h:674
PetscInt KM
Definition variables.h:670
PetscInt da_procs_y
Definition variables.h:599
PetscInt k_periodic
Definition variables.h:594
PetscInt jsc
Definition variables.h:674
PetscInt thislevel
Definition variables.h:721
PetscInt JM
Definition variables.h:670
PetscInt da_procs_x
Definition variables.h:599
PetscInt i_periodic
Definition variables.h:594
DMDALocalInfo info
Definition variables.h:668
PetscInt IM
Definition variables.h:670
PetscInt j_periodic
Definition variables.h:594
Here is the caller graph for this function:

◆ InitializeAllGridDMs()

PetscErrorCode InitializeAllGridDMs ( SimCtx simCtx)

Orchestrates the creation of DMDA objects for every block and multigrid level.

This function systematically builds the entire DMDA hierarchy. It first calculates the dimensions (IM, JM, KM) for all coarse grids based on the finest grid's dimensions and the semi-coarsening flags. It then iterates from the coarsest to the finest level, calling a powerful helper function (InitializeSingleGridDM) to create the DMs for each block, ensuring that finer grids are properly aligned with their coarser parents for multigrid efficiency.

Parameters
simCtxThe master SimCtx, containing the configured UserCtx hierarchy.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 247 of file grid.c.

248{
249 PetscErrorCode ierr;
250 UserMG *usermg = &simCtx->usermg;
251 MGCtx *mgctx = usermg->mgctx;
252 PetscInt nblk = simCtx->block_number;
253
254 PetscFunctionBeginUser;
255
257
258 LOG_ALLOW(GLOBAL, LOG_INFO, "Creating DMDA objects for all levels and blocks...\n");
259
260 // --- Part 1: Calculate Coarse Grid Dimensions & VALIDATE ---
261 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Calculating and validating coarse grid dimensions...\n");
262 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
263 for (PetscInt bi = 0; bi < nblk; bi++) {
264 UserCtx *user_coarse = &mgctx[level].user[bi];
265 UserCtx *user_fine = &mgctx[level + 1].user[bi];
266
267 user_coarse->IM = user_fine->isc ? user_fine->IM : (user_fine->IM + 1) / 2;
268 user_coarse->JM = user_fine->jsc ? user_fine->JM : (user_fine->JM + 1) / 2;
269 user_coarse->KM = user_fine->ksc ? user_fine->KM : (user_fine->KM + 1) / 2;
270
271 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Block %d, Level %d dims calculated: %d x %d x %d\n",
272 simCtx->rank, bi, level, user_coarse->IM, user_coarse->JM, user_coarse->KM);
273
274 // Validation check from legacy MGDACreate to ensure coarsening is possible
275 PetscInt check_i = user_coarse->IM * (2 - user_coarse->isc) - (user_fine->IM + 1 - user_coarse->isc);
276 PetscInt check_j = user_coarse->JM * (2 - user_coarse->jsc) - (user_fine->JM + 1 - user_coarse->jsc);
277 PetscInt check_k = user_coarse->KM * (2 - user_coarse->ksc) - (user_fine->KM + 1 - user_coarse->ksc);
278
279 if (check_i + check_j + check_k != 0) {
280 // SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG,
281 // "Grid at level %d, block %d cannot be coarsened from %dx%dx%d to %dx%dx%d with the given semi-coarsening flags. Check grid dimensions.",
282 // level, bi, user_fine->IM, user_fine->JM, user_fine->KM, user_coarse->IM, user_coarse->JM, user_coarse->KM);
283 LOG(GLOBAL,LOG_WARNING,"WARNING: Grid at level %d, block %d can't be consistently coarsened further.\n", level, bi);
284 }
285 }
286 }
287
288 // --- Part 2: Create DMs from Coarse to Fine for each Block ---
289 for (PetscInt bi = 0; bi < nblk; bi++) {
290 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "--- Creating DMs for Block %d ---\n", bi);
291
292 // Create the coarsest level DM first (passing NULL for the coarse_user)
293 ierr = InitializeSingleGridDM(&mgctx[0].user[bi], NULL); CHKERRQ(ierr);
294
295 // Create finer level DMs, passing the next-coarser context for alignment
296 for (PetscInt level = 1; level < usermg->mglevels; level++) {
297 ierr = InitializeSingleGridDM(&mgctx[level].user[bi], &mgctx[level-1].user[bi]); CHKERRQ(ierr);
298 }
299 }
300
301 // --- Optional: View the finest DM for debugging verification ---
302 if (get_log_level() >= LOG_DEBUG) {
303 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "--- Viewing Finest DMDA (Level %d, Block 0) ---\n", usermg->mglevels - 1);
304 ierr = DMView(mgctx[usermg->mglevels - 1].user[0].da, PETSC_VIEWER_STDOUT_WORLD); CHKERRQ(ierr);
305 }
306
307 LOG_ALLOW(GLOBAL, LOG_INFO, "DMDA object creation complete.\n");
308
310
311 PetscFunctionReturn(0);
312}
static PetscErrorCode InitializeSingleGridDM(UserCtx *user, UserCtx *coarse_user)
Creates the DMDA objects (da and fda) for a single UserCtx.
Definition grid.c:120
#define LOG(scope, level, fmt,...)
Logging macro for PETSc-based applications with scope control.
Definition logging.h:85
LogLevel get_log_level()
Retrieves the current logging level from the environment variable LOG_LEVEL.
Definition logging.c:39
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:30
Context for Multigrid operations.
Definition variables.h:426
User-level context for managing the entire multigrid hierarchy.
Definition variables.h:433
Here is the call graph for this function:
Here is the caller graph for this function:

◆ SetFinestLevelCoordinates()

static PetscErrorCode SetFinestLevelCoordinates ( UserCtx user)
static

A router that populates the coordinates for a single finest-level DMDA.

This function orchestrates the coordinate setting for one block. It checks the global generate_grid flag and calls the appropriate helper for either programmatic generation or reading from a file.

After the local coordinate vector is populated by a helper, this function performs the necessary DMLocalToGlobal and DMGlobalToLocal scatters to ensure that the ghost node coordinate values are correctly communicated and updated across all MPI ranks.

Parameters
userThe UserCtx for a specific block on the finest level.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 404 of file grid.c.

405{
406 PetscErrorCode ierr;
407 SimCtx *simCtx = user->simCtx;
408
409 PetscFunctionBeginUser;
410
412
413 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Setting finest level coordinates for block %d...\n", simCtx->rank, user->_this);
414
415 if (simCtx->generate_grid) {
416 ierr = GenerateAndSetCoordinates(user); CHKERRQ(ierr);
417 } else {
418
419 FILE *grid_file_handle = NULL;
420 // Only Rank 0 opens the file.
421 if (simCtx->rank == 0) {
422 grid_file_handle = fopen(simCtx->grid_file, "r");
423 if (!grid_file_handle) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open file: %s", simCtx->grid_file);
424
425 // Now, on Rank 0, we skip the entire header section once.
426 // This is the logic from your modern code's AssignGridCoordinates.
427 PetscInt headerLines = simCtx->block_number + 2; // 1 for nblk, plus one for each block's dims
428 char dummy_buffer[2048];
429 for (PetscInt s = 0; s < headerLines; ++s) {
430 if (!fgets(dummy_buffer, sizeof(dummy_buffer), grid_file_handle)) {
431 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "Unexpected EOF while skipping grid header");
432 }
433 }
434 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank 0: Skipped %d header lines, now at coordinate data.\n", headerLines);
435 }
436
437 // We now call the coordinate reader, passing the file handle.
438 // It's responsible for reading its block's data and broadcasting.
439 ierr = ReadAndSetCoordinates(user, grid_file_handle); CHKERRQ(ierr);
440
441 // Only Rank 0, which opened the file, should close it.
442 if (simCtx->rank == 0) {
443 fclose(grid_file_handle);
444 }
445 }
446
447 // After populating the local coordinate vector, we must perform a
448 // Local-to-Global and then Global-to-Local scatter to correctly
449 // populate the ghost node coordinates across process boundaries.
450 Vec gCoor, lCoor;
451 ierr = DMGetCoordinates(user->da, &gCoor); CHKERRQ(ierr);
452 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
453
454 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Scattering coordinates to update ghost nodes for block %d...\n", simCtx->rank, user->_this);
455 ierr = DMLocalToGlobalBegin(user->fda, lCoor, INSERT_VALUES, gCoor); CHKERRQ(ierr);
456 ierr = DMLocalToGlobalEnd(user->fda, lCoor, INSERT_VALUES, gCoor); CHKERRQ(ierr);
457
458 ierr = DMGlobalToLocalBegin(user->fda, gCoor, INSERT_VALUES, lCoor); CHKERRQ(ierr);
459 ierr = DMGlobalToLocalEnd(user->fda, gCoor, INSERT_VALUES, lCoor); CHKERRQ(ierr);
460
462
463 PetscFunctionReturn(0);
464}
static PetscErrorCode GenerateAndSetCoordinates(UserCtx *user)
Programmatically generates and sets grid coordinates based on user parameters.
Definition grid.c:517
static PetscErrorCode ReadAndSetCoordinates(UserCtx *user, FILE *fd)
Reads physical coordinates from a file and populates the DMDA for a specific block.
Definition grid.c:601
char grid_file[PETSC_MAX_PATH_LEN]
Definition variables.h:598
Here is the call graph for this function:
Here is the caller graph for this function:

◆ GenerateAndSetCoordinates()

static PetscErrorCode GenerateAndSetCoordinates ( UserCtx user)
static

Programmatically generates and sets grid coordinates based on user parameters.

This function populates the local coordinate vector of the provided UserCtx using the geometric properties (Min_X, Max_X, rx, etc.) that were parsed from command-line options. It supports non-linear grid stretching.

Parameters
userThe UserCtx for a specific block.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

DEBUG: This verifies the presence of a last "unphysical" layer of coordinates.

Definition at line 517 of file grid.c.

518{
519 PetscErrorCode ierr;
520 DMDALocalInfo info;
521 Cmpnts ***coor;
522 Vec lCoor;
523
524 PetscFunctionBeginUser;
525
527
528 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Generating coordinates for block %d...\n", user->simCtx->rank, user->_this);
529
530 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
531 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
532
533 PetscInt xs = info.xs, xe = info.xs + info.xm;
534 PetscInt ys = info.ys, ye = info.ys + info.ym;
535 PetscInt zs = info.zs, ze = info.zs + info.zm;
536
537 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Local Info for block %d - X range - [%d,%d], Y range - [%d,%d], Z range - [%d,%d]\n",
538 user->simCtx->rank, user->_this, xs, xe, ys, ye, zs, ze);
539 ierr = VecSet(lCoor, 0.0); CHKERRQ(ierr);
540 ierr = DMDAVecGetArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
541
542 PetscReal Lx = user->Max_X - user->Min_X;
543 PetscReal Ly = user->Max_Y - user->Min_Y;
544 PetscReal Lz = user->Max_Z - user->Min_Z;
545
546 // Loop over the local nodes, including ghost nodes, owned by this process.
547 for (PetscInt k = zs; k < ze; k++) {
548 for (PetscInt j = ys; j < ye; j++) {
549 for (PetscInt i = xs; i < xe; i++) {
550 if(k < user->KM && j < user->JM && i < user->IM){
551 coor[k][j][i].x = user->Min_X + ComputeStretchedCoord(i, user->IM, Lx, user->rx);
552 coor[k][j][i].y = user->Min_Y + ComputeStretchedCoord(j, user->JM, Ly, user->ry);
553 coor[k][j][i].z = user->Min_Z + ComputeStretchedCoord(k, user->KM, Lz, user->rz);
554 }
555 }
556 }
557 }
558
559 /// DEBUG: This verifies the presence of a last "unphysical" layer of coordinates.
560 /*
561 PetscInt KM = user->KM;
562 for (PetscInt j = ys; j < ye; j++){
563 for(PetscInt i = xs; i < xe; i++){
564 LOG_ALLOW(GLOBAL,LOG_DEBUG,"coor[%d][%d][%d].(x,y,z) = %le,%le,%le",KM,j,i,coor[KM][j][i].x,coor[KM][j][i].y,coor[KM][j][i].z);
565 }
566 }
567 */
568
569
570
571 ierr = DMDAVecRestoreArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
572
574
575 PetscFunctionReturn(0);
576}
static PetscReal ComputeStretchedCoord(PetscInt i, PetscInt N, PetscReal L, PetscReal r)
Computes a stretched coordinate along one dimension.
Definition grid.c:494
PetscReal Min_X
Definition variables.h:671
PetscReal ry
Definition variables.h:675
PetscReal Max_Y
Definition variables.h:671
PetscScalar x
Definition variables.h:101
PetscReal rz
Definition variables.h:675
PetscScalar z
Definition variables.h:101
PetscReal Min_Z
Definition variables.h:671
PetscReal Max_X
Definition variables.h:671
PetscReal Min_Y
Definition variables.h:671
PetscScalar y
Definition variables.h:101
PetscReal rx
Definition variables.h:675
PetscReal Max_Z
Definition variables.h:671
A 3D point or vector with PetscScalar components.
Definition variables.h:100
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReadAndSetCoordinates()

static PetscErrorCode ReadAndSetCoordinates ( UserCtx user,
FILE *  fd 
)
static

Reads physical coordinates from a file and populates the DMDA for a specific block.

This function handles the collective read of an interleaved (X Y Z per line) multi-block grid file. It assumes the file header (nblk, dimensions) has already been processed by ReadGridFile.

The process is robust for parallel execution:

  1. Rank 0 opens the grid file.
  2. It intelligently skips past the header section and the coordinate data for all blocks preceding the current block being processed (user->_this).
  3. It then reads the entire coordinate data for the current block into a single contiguous buffer gc.
  4. This global buffer gc is broadcast to all other MPI ranks.
  5. Each rank then loops through its local (owned + ghost) node indices, calculates the corresponding index in the global gc array, and copies the (x,y,z) coordinates into its local PETSc coordinate vector.
Parameters
userThe UserCtx for a specific block. Its _this field must be set, and its IM, JM, KM fields must be correctly pre-populated.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 601 of file grid.c.

602{
603 PetscErrorCode ierr;
604 SimCtx *simCtx = user->simCtx;
605 PetscMPIInt rank = simCtx->rank;
606 PetscInt block_index = user->_this;
607 PetscInt IM = user->IM, JM = user->JM, KM = user->KM;
608 DMDALocalInfo info;
609 Cmpnts ***coor;
610 Vec lCoor;
611 PetscReal *gc = NULL; // Global coordinate buffer, allocated on all ranks
612
613 PetscFunctionBeginUser;
614
616
617 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Reading interleaved coordinates from file for block %d...\n",
618 simCtx->rank, block_index);
619
620 // 1. Allocate the buffer on ALL ranks to receive the broadcast data.
621 // PetscInt n_nodes = (IM + 1) * (JM + 1) * (KM + 1);
622 PetscInt n_nodes = (IM) * (JM) * (KM);
623 ierr = PetscMalloc1(3 * n_nodes, &gc); CHKERRQ(ierr);
624
625 // 2. Only Rank 0 opens the file and reads the data.
626 if (rank == 0) {
627 if (!fd) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Recieved a NULL file handle.\n");
628
629 // Read the coordinate data for the CURRENT block.
630 for (PetscInt k = 0; k < KM; k++) {
631 for (PetscInt j = 0; j < JM; j++) {
632 for (PetscInt i = 0; i < IM; i++) {
633 PetscInt base_index = 3 * ((k * (JM) + j) * (IM) + i);
634 if (fscanf(fd, "%le %le %le\n", &gc[base_index], &gc[base_index + 1], &gc[base_index + 2]) != 3) {
635 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "Error reading coordinates for node (i,j,k)=(%d,%d,%d) in block %d", i, j, k, block_index);
636 }
637 }
638 }
639 }
640
641 }
642
643 // 3. Broadcast the coordinate block for the current block to all other processes.
644 ierr = MPI_Bcast(gc, 3 * n_nodes, MPIU_REAL, 0, PETSC_COMM_WORLD); CHKERRQ(ierr);
645
646 // 4. Each rank populates its local portion of the coordinate vector.
647 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
648 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
649 ierr = DMDAVecGetArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
650
651 for (PetscInt k = info.zs; k < info.zs + info.zm; k++) {
652 for (PetscInt j = info.ys; j < info.ys + info.ym; j++) {
653 for (PetscInt i = info.xs; i < info.xs + info.xm; i++) {
654 if(k< KM && j < JM && i < IM){
655 PetscInt base_idx = 3 * ((k * (JM) + j) * (IM) + i);
656 coor[k][j][i].x = gc[base_idx];
657 coor[k][j][i].y = gc[base_idx + 1];
658 coor[k][j][i].z = gc[base_idx + 2];
659 }
660 }
661 }
662 }
663
664 // 5. Clean up and restore.
665 ierr = DMDAVecRestoreArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
666 ierr = PetscFree(gc); CHKERRQ(ierr);
667
669
670 PetscFunctionReturn(0);
671}
Here is the caller graph for this function:

◆ RestrictCoordinates()

static PetscErrorCode RestrictCoordinates ( UserCtx coarse_user,
UserCtx fine_user 
)
static

Populates coarse grid coordinates by restricting from a fine grid.

This function is a direct adaptation of the coordinate restriction loop from the legacy MG_Initial function. It ensures that the physical location of a coarse grid node is identical to its corresponding parent node on the fine grid. The mapping from coarse to fine index (i -> ih) is determined by the semi-coarsening flags (isc, jsc, ksc) stored in the UserCtx.

Parameters
coarse_userThe UserCtx for the coarse grid (destination).
fine_userThe UserCtx for the fine grid (source).
Returns
PetscErrorCode

Definition at line 688 of file grid.c.

689{
690 PetscErrorCode ierr;
691 Vec c_lCoor, f_lCoor; // Coarse and Fine local coordinate vectors
692 Cmpnts ***c_coor;
693 const Cmpnts ***f_coor; // Use const for read-only access
694 DMDALocalInfo c_info;
695 PetscInt ih, jh, kh; // Fine-grid indices corresponding to coarse-grid i,j,k
696
697 PetscFunctionBeginUser;
698
700
701 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Restricting coords from level %d to level %d for block %d\n",
702 fine_user->simCtx->rank, fine_user->thislevel, coarse_user->thislevel, coarse_user->_this);
703
704 ierr = DMDAGetLocalInfo(coarse_user->da, &c_info); CHKERRQ(ierr);
705
706 ierr = DMGetCoordinatesLocal(coarse_user->da, &c_lCoor); CHKERRQ(ierr);
707 ierr = DMGetCoordinatesLocal(fine_user->da, &f_lCoor); CHKERRQ(ierr);
708
709 ierr = DMDAVecGetArray(coarse_user->fda, c_lCoor, &c_coor); CHKERRQ(ierr);
710 ierr = DMDAVecGetArrayRead(fine_user->fda, f_lCoor, &f_coor); CHKERRQ(ierr);
711
712 // Get the local owned range of the coarse grid.
713 PetscInt xs = c_info.xs, xe = c_info.xs + c_info.xm;
714 PetscInt ys = c_info.ys, ye = c_info.ys + c_info.ym;
715 PetscInt zs = c_info.zs, ze = c_info.zs + c_info.zm;
716
717 // Get the global dimensions of the coarse grid.
718 PetscInt mx = c_info.mx, my = c_info.my, mz = c_info.mz;
719
720 // If this process owns the maximum boundary node, contract the loop by one
721 // to prevent the index doubling `2*i` from going out of bounds.
722 // This is also ensuring we do not manipulate the unphysical layer of coors present in the finest level.
723 if (xe == mx) xe--;
724 if (ye == my) ye--;
725 if (ze == mz) ze--;
726
727 for (PetscInt k = zs; k < ze; k++) {
728 for (PetscInt j = ys; j < ye; j++) {
729 for (PetscInt i = xs; i < xe; i++) {
730 // Determine the corresponding parent node index on the FINE grid,
731 // respecting the semi-coarsening flags of the FINE grid's UserCtx.
732 ih = coarse_user->isc ? i : 2 * i;
733 jh = coarse_user->jsc ? j : 2 * j;
734 kh = coarse_user->ksc ? k : 2 * k;
735
736 // LOG_ALLOW(GLOBAL,LOG_DEBUG," [kh][ih][jh] = %d,%d,%d - k,j,i = %d,%d,%d.\n",kh,jh,ih,k,j,i);
737
738 c_coor[k][j][i] = f_coor[kh][jh][ih];
739 }
740 }
741 }
742
743 ierr = DMDAVecRestoreArray(coarse_user->fda, c_lCoor, &c_coor); CHKERRQ(ierr);
744 ierr = DMDAVecRestoreArrayRead(fine_user->fda, f_lCoor, &f_coor); CHKERRQ(ierr);
745
746 // After populating the local portion, scatter to update ghost regions.
747 Vec c_gCoor;
748 ierr = DMGetCoordinates(coarse_user->da, &c_gCoor); CHKERRQ(ierr);
749 ierr = DMLocalToGlobalBegin(coarse_user->fda, c_lCoor, INSERT_VALUES, c_gCoor); CHKERRQ(ierr);
750 ierr = DMLocalToGlobalEnd(coarse_user->fda, c_lCoor, INSERT_VALUES, c_gCoor); CHKERRQ(ierr);
751 ierr = DMGlobalToLocalBegin(coarse_user->fda, c_gCoor, INSERT_VALUES, c_lCoor); CHKERRQ(ierr);
752 ierr = DMGlobalToLocalEnd(coarse_user->fda, c_gCoor, INSERT_VALUES, c_lCoor); CHKERRQ(ierr);
753
755
756 PetscFunctionReturn(0);
757}
Here is the caller graph for this function:

◆ AssignAllGridCoordinates()

PetscErrorCode AssignAllGridCoordinates ( SimCtx simCtx)

Orchestrates the assignment of physical coordinates to all DMDA objects.

This function manages the entire process of populating the coordinate vectors for every DMDA across all multigrid levels and blocks. It follows a two-part strategy that is essential for multigrid methods:

  1. Populate Finest Level: It first loops through each block and calls a helper (SetFinestLevelCoordinates) to set the physical coordinates for the highest-resolution grid (the finest multigrid level).
  2. Restrict to Coarser Levels: It then iterates downwards from the finest level, calling a helper (RestrictCoordinates) to copy the coordinate values from the fine grid nodes to their corresponding parent nodes on the coarser grids. This ensures all levels represent the exact same geometry.
Parameters
simCtxThe master SimCtx, containing the configured UserCtx hierarchy.
Returns
PetscErrorCode 0 on success, or a PETSc error code on failure.

Definition at line 340 of file grid.c.

341{
342 PetscErrorCode ierr;
343 UserMG *usermg = &simCtx->usermg;
344 PetscInt nblk = simCtx->block_number;
345
346 PetscFunctionBeginUser;
347
349
350 LOG_ALLOW(GLOBAL, LOG_INFO, "Assigning physical coordinates to all grid DMs...\n");
351
352 // --- Part 1: Populate the Finest Grid Level ---
353 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Setting coordinates for the finest grid level (%d)...\n", usermg->mglevels - 1);
354 for (PetscInt bi = 0; bi < nblk; bi++) {
355 UserCtx *fine_user = &usermg->mgctx[usermg->mglevels - 1].user[bi];
356 ierr = SetFinestLevelCoordinates(fine_user); CHKERRQ(ierr);
357 LOG_ALLOW(GLOBAL,LOG_TRACE,"The Finest level coordinates for block %d have been set.\n",bi);
359 ierr = LOG_FIELD_MIN_MAX(fine_user,"Coordinates");
360 }
361 }
362 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Finest level coordinates have been set for all blocks.\n");
363
364 // --- Part 2: Restrict Coordinates to Coarser Levels ---
365 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Restricting coordinates to coarser grid levels...\n");
366 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
367 for (PetscInt bi = 0; bi < nblk; bi++) {
368 UserCtx *coarse_user = &usermg->mgctx[level].user[bi];
369 UserCtx *fine_user = &usermg->mgctx[level + 1].user[bi];
370 ierr = RestrictCoordinates(coarse_user, fine_user); CHKERRQ(ierr);
371
372 LOG_ALLOW(GLOBAL,LOG_TRACE,"Coordinates restricted to block %d level %d.\n",bi,level);
374 ierr = LOG_FIELD_MIN_MAX(coarse_user,"Coordinates");
375 }
376 }
377 }
378
379 LOG_ALLOW(GLOBAL, LOG_INFO, "Physical coordinates assigned to all grid levels and blocks.\n");
380
382
383 PetscFunctionReturn(0);
384}
static PetscErrorCode RestrictCoordinates(UserCtx *coarse_user, UserCtx *fine_user)
Populates coarse grid coordinates by restricting from a fine grid.
Definition grid.c:688
static PetscErrorCode SetFinestLevelCoordinates(UserCtx *user)
A router that populates the coordinates for a single finest-level DMDA.
Definition grid.c:404
#define __FUNCT__
Definition grid.c:9
PetscBool is_function_allowed(const char *functionName)
Checks if a given function is in the allow-list.
Definition logging.c:157
PetscErrorCode LOG_FIELD_MIN_MAX(UserCtx *user, const char *fieldName)
Computes and logs the local and global min/max values of a 3-component vector field.
Definition logging.c:1273
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ComputeStretchedCoord()

static PetscReal ComputeStretchedCoord ( PetscInt  i,
PetscInt  N,
PetscReal  L,
PetscReal  r 
)
inlinestatic

Computes a stretched coordinate along one dimension.

This function computes a coordinate based on a geometric stretching ratio. If the ratio (r) is 1.0, a uniform distribution is used: x(i) = L * (i/N)

If r != 1.0, a geometric stretching is applied: x(i) = L * [ (r^(i/N) - 1 ) / (r - 1) ]

Here:

  • i : The current index along the dimension.
  • N : The total number of divisions along that dimension.
  • L : The length of the domain along that dimension.
  • r : The stretching ratio. r > 1.0 stretches the grid in a geometric fashion increasing spacing away from the start, whereas 0 < r < 1.0 would cluster points near the start.
Parameters
[in]iCurrent index (0 <= i <= N).
[in]NNumber of segments along the dimension.
[in]LTotal length of the domain.
[in]rStretching ratio.
Returns
PetscReal The computed coordinate at index i.
Note
This function does not return a PetscErrorCode because it does not allocate memory or call PETSc routines that can fail. It is just a helper calculation function.

Definition at line 494 of file grid.c.

495{
496 if (N <=1) return 0.0;
497 PetscReal fraction = (PetscReal)i / ((PetscReal)N - 1.0);
498 if (PetscAbsReal(r - 1.0) < 1.0e-9) { // Use a tolerance for float comparison
499 return L * fraction;
500 } else {
501 return L * (PetscPowReal(r, fraction) - 1.0) / (r - 1.0);
502 }
503}
Here is the caller graph for this function:

◆ ComputeLocalBoundingBox()

PetscErrorCode ComputeLocalBoundingBox ( UserCtx user,
BoundingBox localBBox 
)

Computes the local bounding box of the grid on the current process.

This function calculates the minimum and maximum coordinates (x, y, z) of the local grid points owned by the current MPI process. It iterates over the local portion of the grid, examines each grid point's coordinates, and updates the minimum and maximum values accordingly.

The computed bounding box is stored in the provided localBBox structure, and the user->bbox field is also updated with this bounding box for consistency within the user context.

Parameters
[in]userPointer to the user-defined context containing grid information. This context must be properly initialized before calling this function.
[out]localBBoxPointer to the BoundingBox structure where the computed local bounding box will be stored. The structure should be allocated by the caller.
Returns
PetscErrorCode Returns 0 on success, non-zero on failure.

Definition at line 780 of file grid.c.

781{
782 PetscErrorCode ierr;
783 PetscInt i, j, k;
784 PetscMPIInt rank;
785 PetscInt xs, ys, zs, xe, ye, ze;
786 DMDALocalInfo info;
787 Vec coordinates;
788 Cmpnts ***coordArray;
789 Cmpnts minCoords, maxCoords;
790
791 PetscFunctionBeginUser;
792
794
795 // Start of function execution
796 LOG_ALLOW(GLOBAL, LOG_INFO, "Entering the function.\n");
797
798 // Validate input Pointers
799 if (!user) {
800 LOG_ALLOW(LOCAL, LOG_ERROR, "Input 'user' Pointer is NULL.\n");
801 return PETSC_ERR_ARG_NULL;
802 }
803 if (!localBBox) {
804 LOG_ALLOW(LOCAL, LOG_ERROR, "Output 'localBBox' Pointer is NULL.\n");
805 return PETSC_ERR_ARG_NULL;
806 }
807
808 // Get MPI rank
809 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
810
811 // Get the local coordinates vector from the DMDA
812 ierr = DMGetCoordinatesLocal(user->da, &coordinates);
813 if (ierr) {
814 LOG_ALLOW(LOCAL, LOG_ERROR, "Error getting local coordinates vector.\n");
815 return ierr;
816 }
817
818 if (!coordinates) {
819 LOG_ALLOW(LOCAL, LOG_ERROR, "Coordinates vector is NULL.\n");
820 return PETSC_ERR_ARG_NULL;
821 }
822
823 // Access the coordinate array for reading
824 ierr = DMDAVecGetArrayRead(user->fda, coordinates, &coordArray);
825 if (ierr) {
826 LOG_ALLOW(LOCAL, LOG_ERROR, "Error accessing coordinate array.\n");
827 return ierr;
828 }
829
830 // Get the local grid information (indices and sizes)
831 ierr = DMDAGetLocalInfo(user->da, &info);
832 if (ierr) {
833 LOG_ALLOW(LOCAL, LOG_ERROR, "Error getting DMDA local info.\n");
834 return ierr;
835 }
836
837
838 xs = info.gxs; xe = xs + info.gxm;
839 ys = info.gys; ye = ys + info.gym;
840 zs = info.gzs; ze = zs + info.gzm;
841
842 /*
843 xs = info.xs; xe = xs + info.xm;
844 ys = info.ys; ye = ys + info.ym;
845 zs = info.zs; ze = zs + info.zm;
846 */
847
848 // Initialize min and max coordinates with extreme values
849 minCoords.x = minCoords.y = minCoords.z = PETSC_MAX_REAL;
850 maxCoords.x = maxCoords.y = maxCoords.z = PETSC_MIN_REAL;
851
852 LOG_ALLOW(LOCAL, LOG_TRACE, "[Rank %d] Grid indices (Including Ghosts): xs=%d, xe=%d, ys=%d, ye=%d, zs=%d, ze=%d.\n",rank, xs, xe, ys, ye, zs, ze);
853
854 // Iterate over the local grid to find min and max coordinates
855 for (k = zs; k < ze; k++) {
856 for (j = ys; j < ye; j++) {
857 for (i = xs; i < xe; i++) {
858 // Only consider nodes within the physical domain.
859 if(i < user->IM && j < user->JM && k < user->KM){
860 Cmpnts coord = coordArray[k][j][i];
861
862 // Update min and max coordinates
863 if (coord.x < minCoords.x) minCoords.x = coord.x;
864 if (coord.y < minCoords.y) minCoords.y = coord.y;
865 if (coord.z < minCoords.z) minCoords.z = coord.z;
866
867 if (coord.x > maxCoords.x) maxCoords.x = coord.x;
868 if (coord.y > maxCoords.y) maxCoords.y = coord.y;
869 if (coord.z > maxCoords.z) maxCoords.z = coord.z;
870 }
871 }
872 }
873 }
874
875
876 // Add tolerance to bboxes.
877 minCoords.x = minCoords.x - BBOX_TOLERANCE;
878 minCoords.y = minCoords.y - BBOX_TOLERANCE;
879 minCoords.z = minCoords.z - BBOX_TOLERANCE;
880
881 maxCoords.x = maxCoords.x + BBOX_TOLERANCE;
882 maxCoords.y = maxCoords.y + BBOX_TOLERANCE;
883 maxCoords.z = maxCoords.z + BBOX_TOLERANCE;
884
885 LOG_ALLOW(LOCAL,LOG_DEBUG," Tolerance added to the limits: %.8e .\n",(PetscReal)BBOX_TOLERANCE);
886
887 // Log the computed min and max coordinates
888 LOG_ALLOW(LOCAL, LOG_INFO,"[Rank %d] Bounding Box Ranges = X[%.6f, %.6f], Y[%.6f,%.6f], Z[%.6f, %.6f].\n",rank,minCoords.x, maxCoords.x,minCoords.y, maxCoords.y, minCoords.z, maxCoords.z);
889
890
891
892 // Restore the coordinate array
893 ierr = DMDAVecRestoreArrayRead(user->fda, coordinates, &coordArray);
894 if (ierr) {
895 LOG_ALLOW(LOCAL, LOG_ERROR, "Error restoring coordinate array.\n");
896 return ierr;
897 }
898
899 // Set the local bounding box
900 localBBox->min_coords = minCoords;
901 localBBox->max_coords = maxCoords;
902
903 // Update the bounding box inside the UserCtx for consistency
904 user->bbox = *localBBox;
905
906 LOG_ALLOW(GLOBAL, LOG_INFO, "Exiting the function successfully.\n");
907
909
910 PetscFunctionReturn(0);
911}
#define BBOX_TOLERANCE
Definition grid.c:6
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:29
Cmpnts max_coords
Maximum x, y, z coordinates of the bounding box.
Definition variables.h:156
Cmpnts min_coords
Minimum x, y, z coordinates of the bounding box.
Definition variables.h:155
BoundingBox bbox
Definition variables.h:672
Here is the caller graph for this function:

◆ GatherAllBoundingBoxes()

PetscErrorCode GatherAllBoundingBoxes ( UserCtx user,
BoundingBox **  allBBoxes 
)

Gathers local bounding boxes from all MPI processes to rank 0.

Each rank computes its local bounding box, then all ranks participate in an MPI_Gather to send their BoundingBox to rank 0. Rank 0 allocates the result array and returns it via allBBoxes.

Parameters
[in]userPointer to UserCtx (must be non-NULL).
[out]allBBoxesOn rank 0, receives malloc’d array of size size. On other ranks, set to NULL.
Returns
PetscErrorCode

Definition at line 928 of file grid.c.

929{
930 PetscErrorCode ierr;
931 PetscMPIInt rank, size;
932 BoundingBox *bboxArray = NULL;
933 BoundingBox localBBox;
934
935 PetscFunctionBeginUser;
936
938
939 /* Validate */
940 if (!user || !allBBoxes) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL,
941 "GatherAllBoundingBoxes: NULL pointer");
942
943 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRMPI(ierr);
944 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRMPI(ierr);
945
946 /* Compute local bbox */
947 ierr = ComputeLocalBoundingBox(user, &localBBox); CHKERRQ(ierr);
948
949 /* Ensure everyone is synchronized before the gather */
950 MPI_Barrier(PETSC_COMM_WORLD);
952 "Rank %d: about to MPI_Gather(localBBox)\n", rank);
953
954 /* Allocate on root */
955 if (rank == 0) {
956 bboxArray = (BoundingBox*)malloc(size * sizeof(BoundingBox));
957 if (!bboxArray) SETERRABORT(PETSC_COMM_WORLD, PETSC_ERR_MEM,
958 "GatherAllBoundingBoxes: malloc failed");
959 }
960
961 /* Collective: every rank must call */
962 ierr = MPI_Gather(&localBBox, sizeof(BoundingBox), MPI_BYTE,
963 bboxArray, sizeof(BoundingBox), MPI_BYTE,
964 0, PETSC_COMM_WORLD);
965 CHKERRMPI(ierr);
966
967 MPI_Barrier(PETSC_COMM_WORLD);
969 "Rank %d: completed MPI_Gather(localBBox)\n", rank);
970
971 /* Return result */
972 if (rank == 0) {
973 *allBBoxes = bboxArray;
974 } else {
975 *allBBoxes = NULL;
976 }
977
979
980 PetscFunctionReturn(0);
981}
PetscErrorCode ComputeLocalBoundingBox(UserCtx *user, BoundingBox *localBBox)
Computes the local bounding box of the grid on the current process.
Definition grid.c:780
Defines a 3D axis-aligned bounding box.
Definition variables.h:154
Here is the call graph for this function:
Here is the caller graph for this function:

◆ BroadcastAllBoundingBoxes()

PetscErrorCode BroadcastAllBoundingBoxes ( UserCtx user,
BoundingBox **  bboxlist 
)

Broadcasts the bounding box list from rank 0 to all ranks.

Broadcasts the bounding box information collected on rank 0 to all other ranks.

After GatherAllBoundingBoxes, rank 0 has an array of size boxes. This routine makes sure every rank ends up with its own malloc’d copy.

Parameters
[in]userPointer to UserCtx (unused here, but kept for signature).
[in,out]bboxlistOn entry: rank 0’s array; on exit: every rank’s array.
Returns
PetscErrorCode

Definition at line 996 of file grid.c.

997{
998 PetscErrorCode ierr;
999 PetscMPIInt rank, size;
1000
1001 PetscFunctionBeginUser;
1002
1004
1005 if (!bboxlist) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL,
1006 "BroadcastAllBoundingBoxes: NULL pointer");
1007
1008 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRMPI(ierr);
1009 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRMPI(ierr);
1010
1011 /* Non-root ranks must allocate before the Bcast */
1012 if (rank != 0) {
1013 *bboxlist = (BoundingBox*)malloc(size * sizeof(BoundingBox));
1014 if (!*bboxlist) SETERRABORT(PETSC_COMM_WORLD, PETSC_ERR_MEM,
1015 "BroadcastAllBoundingBoxes: malloc failed");
1016 }
1017
1018 MPI_Barrier(PETSC_COMM_WORLD);
1020 "Rank %d: about to MPI_Bcast(%d boxes)\n", rank, size);
1021
1022 /* Collective: every rank must call */
1023 ierr = MPI_Bcast(*bboxlist, size * sizeof(BoundingBox), MPI_BYTE,
1024 0, PETSC_COMM_WORLD);
1025 CHKERRMPI(ierr);
1026
1027 MPI_Barrier(PETSC_COMM_WORLD);
1029 "Rank %d: completed MPI_Bcast(%d boxes)\n", rank, size);
1030
1031
1033
1034 PetscFunctionReturn(0);
1035}
Here is the caller graph for this function:

◆ CalculateInletCenter()

PetscErrorCode CalculateInletCenter ( UserCtx user)

Calculates the geometric center of the primary inlet face.

This function identifies the first face designated as an INLET in the boundary condition configuration. It then iterates over all grid nodes on that physical face across all MPI processes, calculates the average of their coordinates, and stores the result in the user's SimCtx (CMx_c, CMy_c, CMz_c).

This provides an automatic, robust way to determine the center for profiles like parabolic flow, removing the need for manual user input.

Parameters
userThe main UserCtx struct, containing BC config and the grid coordinate vector.
Returns
PetscErrorCode 0 on success.

Definition at line 1054 of file grid.c.

1055{
1056 PetscErrorCode ierr;
1057 BCFace inlet_face_id = -1;
1058 PetscBool inlet_found = PETSC_FALSE;
1059
1060 PetscReal local_sum[3] = {0.0, 0.0, 0.0};
1061 PetscReal global_sum[3] = {0.0, 0.0, 0.0};
1062 PetscCount local_n_points = 0;
1063 PetscCount global_n_points = 0;
1064
1065 DM da = user->da;
1066 DMDALocalInfo info = user->info;
1067 PetscInt xs = info.xs, xe = info.xs + info.xm;
1068 PetscInt ys = info.ys, ye = info.ys + info.ym;
1069 PetscInt zs = info.zs, ze = info.zs + info.zm;
1070 PetscInt mx = info.mx, my = info.my, mz = info.mz;
1071 Vec lCoor;
1072 Cmpnts ***coor;
1073
1074
1075 PetscFunctionBeginUser;
1076
1078
1079 // 1. Identify the primary inlet face from the configuration
1080 for (int i = 0; i < 6; i++) {
1081 if (user->boundary_faces[i].mathematical_type == INLET) {
1082 inlet_face_id = user->boundary_faces[i].face_id;
1083 inlet_found = PETSC_TRUE;
1084 break; // Use the first inlet found
1085 }
1086 }
1087
1088 if (!inlet_found) {
1089 LOG_ALLOW(GLOBAL, LOG_INFO, "No INLET face found. Skipping inlet center calculation.\n");
1090 PetscFunctionReturn(0);
1091 }
1092
1093 // 2. Get the nodal coordinates
1094 ierr = DMGetCoordinatesLocal(user->da,&lCoor);
1095 ierr = DMDAVecGetArrayRead(user->fda, lCoor, &coor); CHKERRQ(ierr);
1096
1097 // 3. Loop over the identified inlet face and sum local coordinates
1098 switch (inlet_face_id) {
1099 case BC_FACE_NEG_X:
1100 if (xs == 0) {
1101 for (PetscInt k = zs; k < ze; k++) for (PetscInt j = ys; j < ye; j++) {
1102 if(j < user->JM && k < user->KM){ // Ensure within physical domain
1103 local_sum[0] += coor[k][j][0].x;
1104 local_sum[1] += coor[k][j][0].y;
1105 local_sum[2] += coor[k][j][0].z;
1106 local_n_points++;
1107 }
1108 }
1109 }
1110 break;
1111 case BC_FACE_POS_X:
1112 if (xe == mx) { // another check could be if (xe > user->IM - 1)
1113 for (PetscInt k = zs; k < ze; k++) for (PetscInt j = ys; j < ye; j++) {
1114 if(j < user->JM && k < user->KM){ // Ensure within physical domain
1115 local_sum[0] += coor[k][j][mx-2].x; // mx-1 is the ghost layer
1116 local_sum[1] += coor[k][j][mx-2].y; // mx-2 = IM - 1.
1117 local_sum[2] += coor[k][j][mx-2].z;
1118 local_n_points++;
1119 }
1120 }
1121 }
1122 break;
1123 case BC_FACE_NEG_Y:
1124 if (ys == 0) {
1125 for (PetscInt k = zs; k < ze; k++) for (PetscInt i = xs; i < xe; i++) {
1126 if(i < user->IM && k < user->KM){ // Ensure within physical domain
1127 local_sum[0] += coor[k][0][i].x;
1128 local_sum[1] += coor[k][0][i].y;
1129 local_sum[2] += coor[k][0][i].z;
1130 local_n_points++;
1131 }
1132 }
1133 }
1134 break;
1135 case BC_FACE_POS_Y:
1136 if (ye == my) { // another check could be if (ye > user->JM - 1)
1137 for (PetscInt k = zs; k < ze; k++) for (PetscInt i = xs; i < xe; i++) {
1138 local_sum[0] += coor[k][my-2][i].x; // my-1 is the ghost layer
1139 local_sum[1] += coor[k][my-2][i].y; // my-2 = JM - 1.
1140 local_sum[2] += coor[k][my-2][i].z;
1141 local_n_points++;
1142 }
1143 }
1144 break;
1145 case BC_FACE_NEG_Z:
1146 if (zs == 0) {
1147 for (PetscInt j = ys; j < ye; j++) for (PetscInt i = xs; i < xe; i++) {
1148 if(i < user->IM && j < user->JM){ // Ensure within physical domain
1149 local_sum[0] += coor[0][j][i].x;
1150 local_sum[1] += coor[0][j][i].y;
1151 local_sum[2] += coor[0][j][i].z;
1152 local_n_points++;
1153 }
1154 }
1155 }
1156 break;
1157 case BC_FACE_POS_Z:
1158 if (ze == mz) { // another check could be if (ze > user->KM - 1)
1159 for (PetscInt j = ys; j < ye; j++) for (PetscInt i = xs; i < xe; i++) {
1160 if(i < user->IM && j < user->JM){ // Ensure within physical domain
1161 local_sum[0] += coor[mz-2][j][i].x; // mz-1 is the ghost layer
1162 local_sum[1] += coor[mz-2][j][i].y; // mz-2 = KM - 1.
1163 local_sum[2] += coor[mz-2][j][i].z;
1164 local_n_points++;
1165 }
1166 }
1167 }
1168 break;
1169 }
1170
1171 ierr = DMDAVecRestoreArrayRead(user->fda, lCoor, &coor); CHKERRQ(ierr);
1172
1173 // 4. Perform MPI Allreduce to get global sums
1174 ierr = MPI_Allreduce(local_sum, global_sum, 3, MPI_DOUBLE, MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1175 ierr = MPI_Allreduce(&local_n_points, &global_n_points, 1, MPI_INT, MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1176
1177 // 5. Calculate average and store in SimCtx
1178 if (global_n_points > 0) {
1179 user->simCtx->CMx_c = global_sum[0] / global_n_points;
1180 user->simCtx->CMy_c = global_sum[1] / global_n_points;
1181 user->simCtx->CMz_c = global_sum[2] / global_n_points;
1182 LOG_ALLOW(GLOBAL, LOG_INFO, "Calculated inlet center for Face %s: (x=%.4f, y=%.4f, z=%.4f)\n",
1183 BCFaceToString((BCFace)inlet_face_id), user->simCtx->CMx_c, user->simCtx->CMy_c, user->simCtx->CMz_c);
1184 } else {
1185 LOG_ALLOW(GLOBAL, LOG_WARNING, "WARNING: Inlet face was identified but no grid points found on it. Center not calculated.\n");
1186 }
1187
1189
1190 PetscFunctionReturn(0);
1191}
const char * BCFaceToString(BCFace face)
Helper function to convert BCFace enum to a string representation.
Definition logging.c:643
@ INLET
Definition variables.h:214
BoundaryFaceConfig boundary_faces[6]
Definition variables.h:679
PetscReal CMy_c
Definition variables.h:589
PetscReal CMz_c
Definition variables.h:589
BCType mathematical_type
Definition variables.h:273
PetscReal CMx_c
Definition variables.h:589
BCFace
Identifies the six logical faces of a structured computational block.
Definition variables.h:200
@ BC_FACE_NEG_X
Definition variables.h:201
@ BC_FACE_POS_Z
Definition variables.h:203
@ BC_FACE_POS_Y
Definition variables.h:202
@ BC_FACE_NEG_Z
Definition variables.h:203
@ BC_FACE_POS_X
Definition variables.h:201
@ BC_FACE_NEG_Y
Definition variables.h:202
Here is the call graph for this function:
Here is the caller graph for this function: