PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
Macros | Functions
grid.c File Reference
#include "grid.h"
#include "logging.h"
Include dependency graph for grid.c:

Go to the source code of this file.

Macros

#define BBOX_TOLERANCE   1e-6
 
#define __FUNCT__   "ParseAndSetGridInputs"
 
#define __FUNCT__   "DefineAllGridDimensions"
 
#define __FUNCT__   "InitializeSingleGridDM"
 
#define __FUNCT__   "InitializeAllGridDMs"
 
#define __FUNCT__   "AssignAllGridCoordinates"
 
#define __FUNCT__   "SetFinestLevelCoordinates"
 
#define __FUNCT__   "GenerateAndSetCoordinates"
 
#define __FUNCT__   "ReadAndSetCoordinates"
 
#define __FUNCT__   "RestrictCoordinates"
 
#define __FUNCT__   "ComputeLocalBoundingBox"
 
#define __FUNCT__   "GatherAllBoundingBoxes"
 
#define __FUNCT__   "BroadcastAllBoundingBoxes"
 
#define __FUNCT__   "CalculateInletProperties"
 
#define __FUNCT__   "CalculateOutletProperties"
 
#define __FUNCT__   "CalculateFaceCenterAndArea"
 

Functions

static PetscErrorCode ParseAndSetGridInputs (UserCtx *user)
 Internal helper implementation: ParseAndSetGridInputs().
 
PetscErrorCode DefineAllGridDimensions (SimCtx *simCtx)
 Internal helper implementation: DefineAllGridDimensions().
 
static PetscErrorCode InitializeSingleGridDM (UserCtx *user, UserCtx *coarse_user)
 Internal helper implementation: InitializeSingleGridDM().
 
PetscErrorCode InitializeAllGridDMs (SimCtx *simCtx)
 Internal helper implementation: InitializeAllGridDMs().
 
static PetscErrorCode SetFinestLevelCoordinates (UserCtx *user)
 Internal helper implementation: SetFinestLevelCoordinates().
 
static PetscErrorCode GenerateAndSetCoordinates (UserCtx *user)
 Internal helper implementation: GenerateAndSetCoordinates().
 
static PetscErrorCode ReadAndSetCoordinates (UserCtx *user, FILE *fd)
 Internal helper implementation: ReadAndSetCoordinates().
 
static PetscErrorCode RestrictCoordinates (UserCtx *coarse_user, UserCtx *fine_user)
 Internal helper implementation: RestrictCoordinates().
 
PetscErrorCode AssignAllGridCoordinates (SimCtx *simCtx)
 Internal helper implementation: AssignAllGridCoordinates().
 
static PetscReal ComputeStretchedCoord (PetscInt i, PetscInt N, PetscReal L, PetscReal r)
 Internal helper implementation: ComputeStretchedCoord().
 
PetscErrorCode ComputeLocalBoundingBox (UserCtx *user, BoundingBox *localBBox)
 Implementation of ComputeLocalBoundingBox().
 
PetscErrorCode GatherAllBoundingBoxes (UserCtx *user, BoundingBox **allBBoxes)
 Implementation of GatherAllBoundingBoxes().
 
PetscErrorCode BroadcastAllBoundingBoxes (UserCtx *user, BoundingBox **bboxlist)
 Internal helper implementation: BroadcastAllBoundingBoxes().
 
PetscErrorCode CalculateInletProperties (UserCtx *user)
 Implementation of CalculateInletProperties().
 
PetscErrorCode CalculateOutletProperties (UserCtx *user)
 Implementation of CalculateOutletProperties().
 
PetscErrorCode CalculateFaceCenterAndArea (UserCtx *user, BCFace face_id, Cmpnts *face_center, PetscReal *face_area)
 Implementation of CalculateFaceCenterAndArea().
 

Macro Definition Documentation

◆ BBOX_TOLERANCE

#define BBOX_TOLERANCE   1e-6

Definition at line 6 of file grid.c.

◆ __FUNCT__ [1/15]

#define __FUNCT__   "ParseAndSetGridInputs"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [2/15]

#define __FUNCT__   "DefineAllGridDimensions"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [3/15]

#define __FUNCT__   "InitializeSingleGridDM"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [4/15]

#define __FUNCT__   "InitializeAllGridDMs"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [5/15]

#define __FUNCT__   "AssignAllGridCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [6/15]

#define __FUNCT__   "SetFinestLevelCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [7/15]

#define __FUNCT__   "GenerateAndSetCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [8/15]

#define __FUNCT__   "ReadAndSetCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [9/15]

#define __FUNCT__   "RestrictCoordinates"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [10/15]

#define __FUNCT__   "ComputeLocalBoundingBox"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [11/15]

#define __FUNCT__   "GatherAllBoundingBoxes"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [12/15]

#define __FUNCT__   "BroadcastAllBoundingBoxes"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [13/15]

#define __FUNCT__   "CalculateInletProperties"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [14/15]

#define __FUNCT__   "CalculateOutletProperties"

Definition at line 9 of file grid.c.

◆ __FUNCT__ [15/15]

#define __FUNCT__   "CalculateFaceCenterAndArea"

Definition at line 9 of file grid.c.

Function Documentation

◆ ParseAndSetGridInputs()

static PetscErrorCode ParseAndSetGridInputs ( UserCtx user)
static

Internal helper implementation: ParseAndSetGridInputs().

Local to this translation unit.

Definition at line 14 of file grid.c.

15{
16 PetscErrorCode ierr;
17 SimCtx *simCtx = user->simCtx; // Get the global context via the back-pointer
18
19 PetscFunctionBeginUser;
20
22 if(strcmp(simCtx->eulerianSource,"analytical")==0 &&
24 ierr = SetAnalyticalGridInfo(user); CHKERRQ(ierr);
25 } else if (simCtx->generate_grid) {
26 if (strcmp(simCtx->eulerianSource, "analytical") == 0) {
28 "Rank %d: Analytical type '%s' uses programmatic grid ingestion for block %d.\n",
29 simCtx->rank, simCtx->AnalyticalSolutionType, user->_this);
30 } else {
31 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: Block %d is programmatically generated. Calling generation parser.\n", simCtx->rank, user->_this);
32 }
33 ierr = ReadGridGenerationInputs(user); CHKERRQ(ierr);
34 } else {
35 if (strcmp(simCtx->eulerianSource, "analytical") == 0) {
37 "Rank %d: Analytical type '%s' uses file-based grid ingestion for block %d.\n",
38 simCtx->rank, simCtx->AnalyticalSolutionType, user->_this);
39 } else {
40 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: Block %d is file-based. Calling file parser.\n", simCtx->rank, user->_this);
41 }
42 ierr = ReadGridFile(user); CHKERRQ(ierr);
43 }
44
46
47 PetscFunctionReturn(0);
48}
PetscBool AnalyticalTypeRequiresCustomGeometry(const char *analytical_type)
Reports whether an analytical type requires custom geometry/decomposition logic.
PetscErrorCode SetAnalyticalGridInfo(UserCtx *user)
Sets the grid domain and resolution for analytical solution cases.
PetscErrorCode ReadGridFile(UserCtx *user)
Sets grid dimensions from a file for a SINGLE block using a one-time read cache.
Definition io.c:219
PetscErrorCode ReadGridGenerationInputs(UserCtx *user)
Parses command-line options for a programmatically generated grid for a SINGLE block.
Definition io.c:85
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
Synchronized logging macro that checks both the log level and whether the calling function is in the ...
Definition logging.h:252
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:45
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:779
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:31
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:770
PetscMPIInt rank
Definition variables.h:646
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:814
PetscInt _this
Definition variables.h:824
PetscBool generate_grid
Definition variables.h:714
char eulerianSource[PETSC_MAX_PATH_LEN]
Definition variables.h:663
char AnalyticalSolutionType[PETSC_MAX_PATH_LEN]
Definition variables.h:676
The master context for the entire simulation.
Definition variables.h:643
Here is the call graph for this function:
Here is the caller graph for this function:

◆ DefineAllGridDimensions()

PetscErrorCode DefineAllGridDimensions ( SimCtx simCtx)

Internal helper implementation: DefineAllGridDimensions().

Orchestrates the parsing and setting of grid dimensions for all blocks.

Local to this translation unit.

Definition at line 57 of file grid.c.

58{
59 PetscErrorCode ierr;
60 PetscInt nblk = simCtx->block_number;
61 UserCtx *finest_users;
62
63 PetscFunctionBeginUser;
64
66
67 if (simCtx->usermg.mglevels == 0) {
68 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONGSTATE, "MG levels not set. Cannot get finest_users.");
69 }
70 // Get the UserCtx array for the finest grid level
71 finest_users = simCtx->usermg.mgctx[simCtx->usermg.mglevels - 1].user;
72
73 LOG_ALLOW(GLOBAL, LOG_INFO, "Defining grid dimensions for %d blocks...\n", nblk);
74 if (strcmp(simCtx->eulerianSource, "analytical") == 0 &&
77 "Analytical type '%s' requires custom geometry; preloading finest-grid IM/JM/KM once.\n",
79 ierr = PopulateFinestUserGridResolutionFromOptions(finest_users, nblk); CHKERRQ(ierr);
80 }
81
82 // Loop over each block to configure its grid dimensions and geometry.
83 for (PetscInt bi = 0; bi < nblk; bi++) {
84 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: --- Configuring Geometry for Block %d ---\n", simCtx->rank, bi);
85
86 // Before calling any helpers, set the block index in the context.
87 // This makes the UserCtx self-aware of which block it represents.
88 LOG_ALLOW(GLOBAL,LOG_DEBUG,"finest_users->_this = %d, bi = %d\n",finest_users[bi]._this,bi);
89 //finest_user[bi]._this = bi;
90
91 // Call the helper function for this specific block. It can now derive
92 // all necessary information from the UserCtx pointer it receives.
93 ierr = ParseAndSetGridInputs(&finest_users[bi]); CHKERRQ(ierr);
94 }
95
97
98 PetscFunctionReturn(0);
99}
static PetscErrorCode ParseAndSetGridInputs(UserCtx *user)
Internal helper implementation: ParseAndSetGridInputs().
Definition grid.c:14
PetscErrorCode PopulateFinestUserGridResolutionFromOptions(UserCtx *finest_users, PetscInt nblk)
Parses grid resolution arrays (-im, -jm, -km) once and applies them to all finest-grid blocks.
Definition io.c:170
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:199
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:30
UserCtx * user
Definition variables.h:528
PetscInt block_number
Definition variables.h:712
UserMG usermg
Definition variables.h:764
PetscInt mglevels
Definition variables.h:535
MGCtx * mgctx
Definition variables.h:538
User-defined context containing data specific to a single computational grid level.
Definition variables.h:811
Here is the call graph for this function:
Here is the caller graph for this function:

◆ InitializeSingleGridDM()

static PetscErrorCode InitializeSingleGridDM ( UserCtx user,
UserCtx coarse_user 
)
static

Internal helper implementation: InitializeSingleGridDM().

Local to this translation unit.

Definition at line 107 of file grid.c.

108{
109 PetscErrorCode ierr;
110 SimCtx *simCtx = user->simCtx;
111
112 DMBoundaryType xperiod = (simCtx->i_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
113 DMBoundaryType yperiod = (simCtx->j_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
114 DMBoundaryType zperiod = (simCtx->k_periodic) ? DM_BOUNDARY_PERIODIC : DM_BOUNDARY_NONE;
115 PetscInt stencil_width = (simCtx->i_periodic || simCtx->j_periodic || simCtx->k_periodic) ? 3:2; // Stencil width is 2 in the legacy code
116
117 PetscInt *lx = NULL, *ly = NULL, *lz = NULL;
118 PetscInt m, n, p;
119
120 PetscFunctionBeginUser;
121
123
124 if (coarse_user) {
125 // --- This is a FINE grid; it must be aligned with the COARSE grid ---
126 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: [Aligning DM] for block %d level %d (size %dx%dx%d) with level %d\n", simCtx->rank, user->_this, user->thislevel, user->IM, user->JM, user->KM, coarse_user->thislevel);
127
128 DMDAGetInfo(coarse_user->da, NULL, NULL, NULL, NULL, &m, &n, &p, NULL, NULL, NULL, NULL, NULL, NULL);
129 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Coarse grid processor decomposition is %d x %d x %d\n", simCtx->rank, m, n, p);
130
131 // This is the core logic from MGDACreate to ensure processor alignment.
132 PetscInt *lx_contrib, *ly_contrib, *lz_contrib;
133 ierr = PetscMalloc3(m, &lx_contrib, n, &ly_contrib, p, &lz_contrib); CHKERRQ(ierr);
134 ierr = PetscMemzero(lx_contrib, m * sizeof(PetscInt)); CHKERRQ(ierr);
135 ierr = PetscMemzero(ly_contrib, n * sizeof(PetscInt)); CHKERRQ(ierr);
136 ierr = PetscMemzero(lz_contrib, p * sizeof(PetscInt)); CHKERRQ(ierr);
137
138 DMDALocalInfo info;
139 DMDAGetLocalInfo(coarse_user->da, &info);
140 PetscInt xs = info.xs, xe = info.xs + info.xm, mx = info.mx;
141 PetscInt ys = info.ys, ye = info.ys + info.ym, my = info.my;
142 PetscInt zs = info.zs, ze = info.zs + info.zm, mz = info.mz;
143
144 PetscMPIInt rank;
145 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
146 PetscInt proc_i = rank % m;
147 PetscInt proc_j = (rank / m) % n;
148 PetscInt proc_k = rank / (m * n);
149
150 // --- X-Direction Logic (Identical to MGDACreate) ---
151 if (user->isc) lx_contrib[proc_i] = (xe - xs);
152 else {
153 if (m == 1) lx_contrib[0] = user->IM + 1;
154 else if (xs == 0) lx_contrib[0] = 2 * xe - 1;
155 else if (xe == mx) lx_contrib[proc_i] = user->IM + 1 - (2 * xs - 1);
156 else lx_contrib[proc_i] = (xe - xs) * 2;
157 }
158
159 // --- Y-Direction Logic (Identical to MGDACreate) ---
160 if (user->jsc) ly_contrib[proc_j] = (ye - ys);
161 else {
162 if (n == 1) ly_contrib[0] = user->JM + 1;
163 else if (ys == 0) ly_contrib[0] = 2 * ye - 1;
164 else if (ye == my) ly_contrib[proc_j] = user->JM + 1 - (2 * ys - 1);
165 else ly_contrib[proc_j] = (ye - ys) * 2;
166 }
167
168 // --- Z-Direction Logic (Identical to MGDACreate) ---
169 if (user->ksc) lz_contrib[proc_k] = (ze - zs);
170 else {
171 if (p == 1) lz_contrib[0] = user->KM + 1;
172 else if (zs == 0) lz_contrib[0] = 2 * ze - 1;
173 else if (ze == mz) lz_contrib[proc_k] = user->KM + 1 - (2 * zs - 1);
174 else lz_contrib[proc_k] = (ze - zs) * 2;
175 }
176 LOG_ALLOW_SYNC(LOCAL, LOG_VERBOSE, "Rank %d: Calculated this rank's node contribution to fine grid: lx=%d, ly=%d, lz=%d\n", simCtx->rank, lx_contrib[proc_i], ly_contrib[proc_j], lz_contrib[proc_k]);
177
178 // Allocate the final distribution arrays and Allreduce to get the global distribution
179 ierr = PetscMalloc3(m, &lx, n, &ly, p, &lz); CHKERRQ(ierr);
180 ierr = MPI_Allreduce(lx_contrib, lx, m, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
181 ierr = MPI_Allreduce(ly_contrib, ly, n, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
182 ierr = MPI_Allreduce(lz_contrib, lz, p, MPIU_INT, MPI_MAX, PETSC_COMM_WORLD); CHKERRQ(ierr);
183
184 ierr = PetscFree3(lx_contrib, ly_contrib, lz_contrib); CHKERRQ(ierr);
185
186 } else {
187 // --- CASE 2: This is the COARSEST grid; use default or user-specified decomposition ---
188 if(simCtx->exec_mode == EXEC_MODE_SOLVER){
189
190 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Creating coarsest DM for block %d level %d (size %dx%dx%d)\n", simCtx->rank, user->_this, user->thislevel, user->IM, user->JM, user->KM);
191 m = simCtx->da_procs_x;
192 n = simCtx->da_procs_y;
193 p = simCtx->da_procs_z;
194
195 } else if(simCtx->exec_mode == EXEC_MODE_POSTPROCESSOR){
196
197 LOG_ALLOW(GLOBAL,LOG_ERROR,"Currently Only Single Rank is supported. \n");
198
199 m = n = p = PETSC_DECIDE;
200
201 }
202 // lx, ly, lz are NULL, so DMDACreate3d will use the m,n,p values.
203 }
204
205 // --- Create the DMDA for the current UserCtx ---
206 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Calling DMDACreate3d...\n", simCtx->rank);
207 ierr = DMDACreate3d(PETSC_COMM_WORLD, xperiod, yperiod, zperiod, DMDA_STENCIL_BOX,
208 user->IM + 1, user->JM + 1, user->KM + 1,
209 m, n, p,
210 1, stencil_width, lx, ly, lz, &user->da); CHKERRQ(ierr);
211
212 if (coarse_user) {
213 ierr = PetscFree3(lx, ly, lz); CHKERRQ(ierr);
214 }
215
216 // --- Standard DM setup applicable to all levels ---
217 ierr = DMSetUp(user->da); CHKERRQ(ierr);
218 ierr = DMGetCoordinateDM(user->da, &user->fda); CHKERRQ(ierr);
219 ierr = DMDASetUniformCoordinates(user->da, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); CHKERRQ(ierr);
220 ierr = DMDAGetLocalInfo(user->da, &user->info); CHKERRQ(ierr);
221 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: DM creation for block %d level %d complete.\n", simCtx->rank, user->_this, user->thislevel);
222
224
225 PetscFunctionReturn(0);
226}
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:44
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:28
@ LOG_TRACE
Very fine-grained tracing information for in-depth debugging.
Definition logging.h:32
@ LOG_VERBOSE
Extremely detailed logs, typically for development use only.
Definition logging.h:33
PetscInt isc
Definition variables.h:824
PetscInt da_procs_z
Definition variables.h:718
PetscInt ksc
Definition variables.h:824
PetscInt KM
Definition variables.h:820
PetscInt da_procs_y
Definition variables.h:718
PetscInt k_periodic
Definition variables.h:713
PetscInt jsc
Definition variables.h:824
PetscInt thislevel
Definition variables.h:874
PetscInt JM
Definition variables.h:820
PetscInt da_procs_x
Definition variables.h:718
PetscInt i_periodic
Definition variables.h:713
DMDALocalInfo info
Definition variables.h:818
@ EXEC_MODE_SOLVER
Definition variables.h:616
@ EXEC_MODE_POSTPROCESSOR
Definition variables.h:617
PetscInt IM
Definition variables.h:820
ExecutionMode exec_mode
Definition variables.h:662
PetscInt j_periodic
Definition variables.h:713
Here is the caller graph for this function:

◆ InitializeAllGridDMs()

PetscErrorCode InitializeAllGridDMs ( SimCtx simCtx)

Internal helper implementation: InitializeAllGridDMs().

Orchestrates the creation of DMDA objects for every block and multigrid level.

Local to this translation unit.

Definition at line 235 of file grid.c.

236{
237 PetscErrorCode ierr;
238 UserMG *usermg = &simCtx->usermg;
239 MGCtx *mgctx = usermg->mgctx;
240 PetscInt nblk = simCtx->block_number;
241
242 PetscFunctionBeginUser;
243
245
246 LOG_ALLOW(GLOBAL,LOG_INFO, "Pre-scanning BCs to identify domain periodicity.\n");
247 ierr = DeterminePeriodicity(simCtx); CHKERRQ(ierr);
248
249 LOG_ALLOW(GLOBAL, LOG_INFO, "Creating DMDA objects for all levels and blocks...\n");
250
251 // --- Part 1: Calculate Coarse Grid Dimensions & VALIDATE ---
252 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Calculating and validating coarse grid dimensions...\n");
253 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
254 for (PetscInt bi = 0; bi < nblk; bi++) {
255 UserCtx *user_coarse = &mgctx[level].user[bi];
256 UserCtx *user_fine = &mgctx[level + 1].user[bi];
257
258 user_coarse->IM = user_fine->isc ? user_fine->IM : (user_fine->IM + 1) / 2;
259 user_coarse->JM = user_fine->jsc ? user_fine->JM : (user_fine->JM + 1) / 2;
260 user_coarse->KM = user_fine->ksc ? user_fine->KM : (user_fine->KM + 1) / 2;
261
262 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Block %d, Level %d dims calculated: %d x %d x %d\n",
263 simCtx->rank, bi, level, user_coarse->IM, user_coarse->JM, user_coarse->KM);
264
265 // Validation check from legacy MGDACreate to ensure coarsening is possible
266 PetscInt check_i = user_coarse->IM * (2 - user_coarse->isc) - (user_fine->IM + 1 - user_coarse->isc);
267 PetscInt check_j = user_coarse->JM * (2 - user_coarse->jsc) - (user_fine->JM + 1 - user_coarse->jsc);
268 PetscInt check_k = user_coarse->KM * (2 - user_coarse->ksc) - (user_fine->KM + 1 - user_coarse->ksc);
269
270 if (check_i + check_j + check_k != 0) {
271 // SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG,
272 // "Grid at level %d, block %d cannot be coarsened from %dx%dx%d to %dx%dx%d with the given semi-coarsening flags. Check grid dimensions.",
273 // level, bi, user_fine->IM, user_fine->JM, user_fine->KM, user_coarse->IM, user_coarse->JM, user_coarse->KM);
274 LOG(GLOBAL,LOG_WARNING,"WARNING: Grid at level %d, block %d can't be consistently coarsened further.\n", level, bi);
275 }
276 }
277 }
278
279 // --- Part 2: Create DMs from Coarse to Fine for each Block ---
280 for (PetscInt bi = 0; bi < nblk; bi++) {
281 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "--- Creating DMs for Block %d ---\n", bi);
282
283 // Create the coarsest level DM first (passing NULL for the coarse_user)
284 ierr = InitializeSingleGridDM(&mgctx[0].user[bi], NULL); CHKERRQ(ierr);
285
286 // Create finer level DMs, passing the next-coarser context for alignment
287 for (PetscInt level = 1; level < usermg->mglevels; level++) {
288 ierr = InitializeSingleGridDM(&mgctx[level].user[bi], &mgctx[level-1].user[bi]); CHKERRQ(ierr);
289 }
290 }
291
292 // --- Optional: View the finest DM for debugging verification ---
293 if (get_log_level() >= LOG_DEBUG) {
294 LOG_ALLOW_SYNC(GLOBAL, LOG_INFO, "--- Viewing Finest DMDA (Level %d, Block 0) ---\n", usermg->mglevels - 1);
295 ierr = DMView(mgctx[usermg->mglevels - 1].user[0].da, PETSC_VIEWER_STDOUT_WORLD); CHKERRQ(ierr);
296 }
297
298 LOG_ALLOW(GLOBAL, LOG_INFO, "DMDA object creation complete.\n");
299
301
302 PetscFunctionReturn(0);
303}
static PetscErrorCode InitializeSingleGridDM(UserCtx *user, UserCtx *coarse_user)
Internal helper implementation: InitializeSingleGridDM().
Definition grid.c:107
PetscErrorCode DeterminePeriodicity(SimCtx *simCtx)
Scans all block-specific boundary condition files to determine a globally consistent periodicity for ...
Definition io.c:634
#define LOG(scope, level, fmt,...)
Logging macro for PETSc-based applications with scope control.
Definition logging.h:83
LogLevel get_log_level()
Retrieves the current logging level from the environment variable LOG_LEVEL.
Definition logging.c:84
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:29
Context for Multigrid operations.
Definition variables.h:527
User-level context for managing the entire multigrid hierarchy.
Definition variables.h:534
Here is the call graph for this function:
Here is the caller graph for this function:

◆ SetFinestLevelCoordinates()

static PetscErrorCode SetFinestLevelCoordinates ( UserCtx user)
static

Internal helper implementation: SetFinestLevelCoordinates().

Local to this translation unit.

Definition at line 370 of file grid.c.

371{
372 PetscErrorCode ierr;
373 SimCtx *simCtx = user->simCtx;
374
375 PetscFunctionBeginUser;
376
378
379 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Setting finest level coordinates for block %d...\n", simCtx->rank, user->_this);
380
381 if (simCtx->generate_grid) {
382 ierr = GenerateAndSetCoordinates(user); CHKERRQ(ierr);
383 } else {
384
385 FILE *grid_file_handle = NULL;
386 // Only Rank 0 opens the file.
387 if (simCtx->rank == 0) {
388 grid_file_handle = fopen(simCtx->grid_file, "r");
389 if (!grid_file_handle) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open file: %s", simCtx->grid_file);
390
391 // Now, on Rank 0, we skip the entire header section once.
392 // This is the logic from your modern code's AssignGridCoordinates.
393 PetscInt headerLines = simCtx->block_number + 2; // 1 for nblk, plus one for each block's dims
394 char dummy_buffer[2048];
395 for (PetscInt s = 0; s < headerLines; ++s) {
396 if (!fgets(dummy_buffer, sizeof(dummy_buffer), grid_file_handle)) {
397 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "Unexpected EOF while skipping grid header");
398 }
399 }
400 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank 0: Skipped %d header lines, now at coordinate data.\n", headerLines);
401 }
402
403 // We now call the coordinate reader, passing the file handle.
404 // It's responsible for reading its block's data and broadcasting.
405 ierr = ReadAndSetCoordinates(user, grid_file_handle); CHKERRQ(ierr);
406
407 // Only Rank 0, which opened the file, should close it.
408 if (simCtx->rank == 0) {
409 fclose(grid_file_handle);
410 }
411 }
412
413 // After populating the local coordinate vector, we must perform a
414 // Local-to-Global and then Global-to-Local scatter to correctly
415 // populate the ghost node coordinates across process boundaries.
416 Vec gCoor, lCoor;
417 ierr = DMGetCoordinates(user->da, &gCoor); CHKERRQ(ierr);
418 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
419
420 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Scattering coordinates to update ghost nodes for block %d...\n", simCtx->rank, user->_this);
421 ierr = DMLocalToGlobalBegin(user->fda, lCoor, INSERT_VALUES, gCoor); CHKERRQ(ierr);
422 ierr = DMLocalToGlobalEnd(user->fda, lCoor, INSERT_VALUES, gCoor); CHKERRQ(ierr);
423
424 ierr = DMGlobalToLocalBegin(user->fda, gCoor, INSERT_VALUES, lCoor); CHKERRQ(ierr);
425 ierr = DMGlobalToLocalEnd(user->fda, gCoor, INSERT_VALUES, lCoor); CHKERRQ(ierr);
426
428
429 PetscFunctionReturn(0);
430}
static PetscErrorCode GenerateAndSetCoordinates(UserCtx *user)
Internal helper implementation: GenerateAndSetCoordinates().
Definition grid.c:452
static PetscErrorCode ReadAndSetCoordinates(UserCtx *user, FILE *fd)
Internal helper implementation: ReadAndSetCoordinates().
Definition grid.c:522
char grid_file[PETSC_MAX_PATH_LEN]
Definition variables.h:717
Here is the call graph for this function:
Here is the caller graph for this function:

◆ GenerateAndSetCoordinates()

static PetscErrorCode GenerateAndSetCoordinates ( UserCtx user)
static

Internal helper implementation: GenerateAndSetCoordinates().

Local to this translation unit.

DEBUG: This verifies the presence of a last "unphysical" layer of coordinates.

Definition at line 452 of file grid.c.

453{
454 PetscErrorCode ierr;
455 DMDALocalInfo info;
456 Cmpnts ***coor;
457 Vec lCoor;
458
459 PetscFunctionBeginUser;
460
462
463 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Generating coordinates for block %d...\n", user->simCtx->rank, user->_this);
464
465 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
466 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
467
468 PetscInt xs = info.xs, xe = info.xs + info.xm;
469 PetscInt ys = info.ys, ye = info.ys + info.ym;
470 PetscInt zs = info.zs, ze = info.zs + info.zm;
471
472 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Local Info for block %d - X range - [%d,%d], Y range - [%d,%d], Z range - [%d,%d]\n",
473 user->simCtx->rank, user->_this, xs, xe, ys, ye, zs, ze);
474
475 LOG_ALLOW_SYNC(LOCAL, LOG_TRACE, "Rank %d: Local Info for block %d - X domain - [%.4f,%.4f], Y range - [%.4f,%.4f], Z range - [%.4f,%.4f]\n",
476 user->simCtx->rank, user->_this, user->Min_X,user->Max_X,user->Min_Y,user->Max_Y,user->Min_Z,user->Max_Z);
477
478 ierr = VecSet(lCoor, 0.0); CHKERRQ(ierr);
479 ierr = DMDAVecGetArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
480
481 PetscReal Lx = user->Max_X - user->Min_X;
482 PetscReal Ly = user->Max_Y - user->Min_Y;
483 PetscReal Lz = user->Max_Z - user->Min_Z;
484
485 // Loop over the local nodes, including ghost nodes, owned by this process.
486 for (PetscInt k = zs; k < ze; k++) {
487 for (PetscInt j = ys; j < ye; j++) {
488 for (PetscInt i = xs; i < xe; i++) {
489 if(k < user->KM && j < user->JM && i < user->IM){
490 coor[k][j][i].x = user->Min_X + ComputeStretchedCoord(i, user->IM, Lx, user->rx);
491 coor[k][j][i].y = user->Min_Y + ComputeStretchedCoord(j, user->JM, Ly, user->ry);
492 coor[k][j][i].z = user->Min_Z + ComputeStretchedCoord(k, user->KM, Lz, user->rz);
493 }
494 }
495 }
496 }
497
498 /// DEBUG: This verifies the presence of a last "unphysical" layer of coordinates.
499 /*
500 PetscInt KM = user->KM;
501 for (PetscInt j = ys; j < ye; j++){
502 for(PetscInt i = xs; i < xe; i++){
503 LOG_ALLOW(GLOBAL,LOG_DEBUG,"coor[%d][%d][%d].(x,y,z) = %le,%le,%le",KM,j,i,coor[KM][j][i].x,coor[KM][j][i].y,coor[KM][j][i].z);
504 }
505 }
506 */
507
508
509
510 ierr = DMDAVecRestoreArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
511
513
514 PetscFunctionReturn(0);
515}
static PetscReal ComputeStretchedCoord(PetscInt i, PetscInt N, PetscReal L, PetscReal r)
Internal helper implementation: ComputeStretchedCoord().
Definition grid.c:435
PetscReal Min_X
Definition variables.h:821
PetscReal ry
Definition variables.h:825
PetscReal Max_Y
Definition variables.h:821
PetscScalar x
Definition variables.h:101
PetscReal rz
Definition variables.h:825
PetscScalar z
Definition variables.h:101
PetscReal Min_Z
Definition variables.h:821
PetscReal Max_X
Definition variables.h:821
PetscReal Min_Y
Definition variables.h:821
PetscScalar y
Definition variables.h:101
PetscReal rx
Definition variables.h:825
PetscReal Max_Z
Definition variables.h:821
A 3D point or vector with PetscScalar components.
Definition variables.h:100
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReadAndSetCoordinates()

static PetscErrorCode ReadAndSetCoordinates ( UserCtx user,
FILE *  fd 
)
static

Internal helper implementation: ReadAndSetCoordinates().

Local to this translation unit.

Definition at line 522 of file grid.c.

523{
524 PetscErrorCode ierr;
525 SimCtx *simCtx = user->simCtx;
526 PetscMPIInt rank = simCtx->rank;
527 PetscInt block_index = user->_this;
528 PetscInt IM = user->IM, JM = user->JM, KM = user->KM;
529 DMDALocalInfo info;
530 Cmpnts ***coor;
531 Vec lCoor;
532 PetscReal *gc = NULL; // Global coordinate buffer, allocated on all ranks
533
534 PetscFunctionBeginUser;
535
537
538 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Reading interleaved coordinates from file for block %d...\n",
539 simCtx->rank, block_index);
540
541 // 1. Allocate the buffer on ALL ranks to receive the broadcast data.
542 // PetscInt n_nodes = (IM + 1) * (JM + 1) * (KM + 1);
543 PetscInt n_nodes = (IM) * (JM) * (KM);
544 ierr = PetscMalloc1(3 * n_nodes, &gc); CHKERRQ(ierr);
545
546 // 2. Only Rank 0 opens the file and reads the data.
547 if (rank == 0) {
548 if (!fd) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Recieved a NULL file handle.\n");
549
550 // Read the coordinate data for the CURRENT block.
551 for (PetscInt k = 0; k < KM; k++) {
552 for (PetscInt j = 0; j < JM; j++) {
553 for (PetscInt i = 0; i < IM; i++) {
554 PetscInt base_index = 3 * ((k * (JM) + j) * (IM) + i);
555 if (fscanf(fd, "%le %le %le\n", &gc[base_index], &gc[base_index + 1], &gc[base_index + 2]) != 3) {
556 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "Error reading coordinates for node (i,j,k)=(%d,%d,%d) in block %d", i, j, k, block_index);
557 }
558 }
559 }
560 }
561
562 }
563
564 // 3. Broadcast the coordinate block for the current block to all other processes.
565 ierr = MPI_Bcast(gc, 3 * n_nodes, MPIU_REAL, 0, PETSC_COMM_WORLD); CHKERRQ(ierr);
566
567 // 4. Each rank populates its local portion of the coordinate vector.
568 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
569 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
570 ierr = DMDAVecGetArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
571
572 for (PetscInt k = info.zs; k < info.zs + info.zm; k++) {
573 for (PetscInt j = info.ys; j < info.ys + info.ym; j++) {
574 for (PetscInt i = info.xs; i < info.xs + info.xm; i++) {
575 if(k< KM && j < JM && i < IM){
576 PetscInt base_idx = 3 * ((k * (JM) + j) * (IM) + i);
577 coor[k][j][i].x = gc[base_idx];
578 coor[k][j][i].y = gc[base_idx + 1];
579 coor[k][j][i].z = gc[base_idx + 2];
580 }
581 }
582 }
583 }
584
585 // 5. Clean up and restore.
586 ierr = DMDAVecRestoreArray(user->fda, lCoor, &coor); CHKERRQ(ierr);
587 ierr = PetscFree(gc); CHKERRQ(ierr);
588
590
591 PetscFunctionReturn(0);
592}
Here is the caller graph for this function:

◆ RestrictCoordinates()

static PetscErrorCode RestrictCoordinates ( UserCtx coarse_user,
UserCtx fine_user 
)
static

Internal helper implementation: RestrictCoordinates().

Local to this translation unit.

Definition at line 600 of file grid.c.

601{
602 PetscErrorCode ierr;
603 Vec c_lCoor, f_lCoor; // Coarse and Fine local coordinate vectors
604 Cmpnts ***c_coor;
605 const Cmpnts ***f_coor; // Use const for read-only access
606 DMDALocalInfo c_info;
607 PetscInt ih, jh, kh; // Fine-grid indices corresponding to coarse-grid i,j,k
608
609 PetscFunctionBeginUser;
610
612
613 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Restricting coords from level %d to level %d for block %d\n",
614 fine_user->simCtx->rank, fine_user->thislevel, coarse_user->thislevel, coarse_user->_this);
615
616 ierr = DMDAGetLocalInfo(coarse_user->da, &c_info); CHKERRQ(ierr);
617
618 ierr = DMGetCoordinatesLocal(coarse_user->da, &c_lCoor); CHKERRQ(ierr);
619 ierr = DMGetCoordinatesLocal(fine_user->da, &f_lCoor); CHKERRQ(ierr);
620
621 ierr = DMDAVecGetArray(coarse_user->fda, c_lCoor, &c_coor); CHKERRQ(ierr);
622 ierr = DMDAVecGetArrayRead(fine_user->fda, f_lCoor, &f_coor); CHKERRQ(ierr);
623
624 // Get the local owned range of the coarse grid.
625 PetscInt xs = c_info.xs, xe = c_info.xs + c_info.xm;
626 PetscInt ys = c_info.ys, ye = c_info.ys + c_info.ym;
627 PetscInt zs = c_info.zs, ze = c_info.zs + c_info.zm;
628
629 // Get the global dimensions of the coarse grid.
630 PetscInt mx = c_info.mx, my = c_info.my, mz = c_info.mz;
631
632 // If this process owns the maximum boundary node, contract the loop by one
633 // to prevent the index doubling `2*i` from going out of bounds.
634 // This is also ensuring we do not manipulate the unphysical layer of coors present in the finest level.
635 if (xe == mx) xe--;
636 if (ye == my) ye--;
637 if (ze == mz) ze--;
638
639 for (PetscInt k = zs; k < ze; k++) {
640 for (PetscInt j = ys; j < ye; j++) {
641 for (PetscInt i = xs; i < xe; i++) {
642 // Determine the corresponding parent node index on the FINE grid,
643 // respecting the semi-coarsening flags of the FINE grid's UserCtx.
644 ih = coarse_user->isc ? i : 2 * i;
645 jh = coarse_user->jsc ? j : 2 * j;
646 kh = coarse_user->ksc ? k : 2 * k;
647
648 // LOG_ALLOW(GLOBAL,LOG_DEBUG," [kh][ih][jh] = %d,%d,%d - k,j,i = %d,%d,%d.\n",kh,jh,ih,k,j,i);
649
650 c_coor[k][j][i] = f_coor[kh][jh][ih];
651 }
652 }
653 }
654
655 ierr = DMDAVecRestoreArray(coarse_user->fda, c_lCoor, &c_coor); CHKERRQ(ierr);
656 ierr = DMDAVecRestoreArrayRead(fine_user->fda, f_lCoor, &f_coor); CHKERRQ(ierr);
657
658 // After populating the local portion, scatter to update ghost regions.
659 Vec c_gCoor;
660 ierr = DMGetCoordinates(coarse_user->da, &c_gCoor); CHKERRQ(ierr);
661 ierr = DMLocalToGlobalBegin(coarse_user->fda, c_lCoor, INSERT_VALUES, c_gCoor); CHKERRQ(ierr);
662 ierr = DMLocalToGlobalEnd(coarse_user->fda, c_lCoor, INSERT_VALUES, c_gCoor); CHKERRQ(ierr);
663 ierr = DMGlobalToLocalBegin(coarse_user->fda, c_gCoor, INSERT_VALUES, c_lCoor); CHKERRQ(ierr);
664 ierr = DMGlobalToLocalEnd(coarse_user->fda, c_gCoor, INSERT_VALUES, c_lCoor); CHKERRQ(ierr);
665
667
668 PetscFunctionReturn(0);
669}
Here is the caller graph for this function:

◆ AssignAllGridCoordinates()

PetscErrorCode AssignAllGridCoordinates ( SimCtx simCtx)

Internal helper implementation: AssignAllGridCoordinates().

Orchestrates the assignment of physical coordinates to all DMDA objects.

Local to this translation unit.

Definition at line 317 of file grid.c.

318{
319 PetscErrorCode ierr;
320 UserMG *usermg = &simCtx->usermg;
321 PetscInt nblk = simCtx->block_number;
322
323 PetscFunctionBeginUser;
324
326
327 LOG_ALLOW(GLOBAL, LOG_INFO, "Assigning physical coordinates to all grid DMs...\n");
328
329 // --- Part 1: Populate the Finest Grid Level ---
330 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Setting coordinates for the finest grid level (%d)...\n", usermg->mglevels - 1);
331 for (PetscInt bi = 0; bi < nblk; bi++) {
332 UserCtx *fine_user = &usermg->mgctx[usermg->mglevels - 1].user[bi];
333 ierr = SetFinestLevelCoordinates(fine_user); CHKERRQ(ierr);
334 LOG_ALLOW(GLOBAL,LOG_TRACE,"The Finest level coordinates for block %d have been set.\n",bi);
336 ierr = LOG_FIELD_MIN_MAX(fine_user,"Coordinates");
337 }
338 }
339 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Finest level coordinates have been set for all blocks.\n");
340
341 // --- Part 2: Restrict Coordinates to Coarser Levels ---
342 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Restricting coordinates to coarser grid levels...\n");
343 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
344 for (PetscInt bi = 0; bi < nblk; bi++) {
345 UserCtx *coarse_user = &usermg->mgctx[level].user[bi];
346 UserCtx *fine_user = &usermg->mgctx[level + 1].user[bi];
347 ierr = RestrictCoordinates(coarse_user, fine_user); CHKERRQ(ierr);
348
349 LOG_ALLOW(GLOBAL,LOG_TRACE,"Coordinates restricted to block %d level %d.\n",bi,level);
351 ierr = LOG_FIELD_MIN_MAX(coarse_user,"Coordinates");
352 }
353 }
354 }
355
356 LOG_ALLOW(GLOBAL, LOG_INFO, "Physical coordinates assigned to all grid levels and blocks.\n");
357
359
360 PetscFunctionReturn(0);
361}
static PetscErrorCode RestrictCoordinates(UserCtx *coarse_user, UserCtx *fine_user)
Internal helper implementation: RestrictCoordinates().
Definition grid.c:600
static PetscErrorCode SetFinestLevelCoordinates(UserCtx *user)
Internal helper implementation: SetFinestLevelCoordinates().
Definition grid.c:370
#define __FUNCT__
Definition grid.c:9
PetscBool is_function_allowed(const char *functionName)
Checks if a given function is in the allow-list.
Definition logging.c:183
PetscErrorCode LOG_FIELD_MIN_MAX(UserCtx *user, const char *fieldName)
Computes and logs the local and global min/max values of a 3-component vector field.
Definition logging.c:1367
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ComputeStretchedCoord()

static PetscReal ComputeStretchedCoord ( PetscInt  i,
PetscInt  N,
PetscReal  L,
PetscReal  r 
)
inlinestatic

Internal helper implementation: ComputeStretchedCoord().

Local to this translation unit.

Definition at line 435 of file grid.c.

436{
437 if (N <=1) return 0.0;
438 PetscReal fraction = (PetscReal)i / ((PetscReal)N - 1.0);
439 if (PetscAbsReal(r - 1.0) < 1.0e-9) { // Use a tolerance for float comparison
440 return L * fraction;
441 } else {
442 return L * (PetscPowReal(r, fraction) - 1.0) / (r - 1.0);
443 }
444}
Here is the caller graph for this function:

◆ ComputeLocalBoundingBox()

PetscErrorCode ComputeLocalBoundingBox ( UserCtx user,
BoundingBox localBBox 
)

Implementation of ComputeLocalBoundingBox().

Computes the local bounding box of the grid on the current process.

Full API contract (arguments, ownership, side effects) is documented with the header declaration in include/grid.h.

See also
ComputeLocalBoundingBox()

Definition at line 679 of file grid.c.

680{
681 PetscErrorCode ierr;
682 PetscInt i, j, k;
683 PetscMPIInt rank;
684 PetscInt xs, ys, zs, xe, ye, ze;
685 DMDALocalInfo info;
686 Vec coordinates;
687 Cmpnts ***coordArray;
688 Cmpnts minCoords, maxCoords;
689
690 PetscFunctionBeginUser;
691
693
694 // Start of function execution
695 LOG_ALLOW(GLOBAL, LOG_INFO, "Entering the function.\n");
696
697 // Validate input Pointers
698 if (!user) {
699 LOG_ALLOW(LOCAL, LOG_ERROR, "Input 'user' Pointer is NULL.\n");
700 return PETSC_ERR_ARG_NULL;
701 }
702 if (!localBBox) {
703 LOG_ALLOW(LOCAL, LOG_ERROR, "Output 'localBBox' Pointer is NULL.\n");
704 return PETSC_ERR_ARG_NULL;
705 }
706
707 // Get MPI rank
708 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
709
710 // Get the local coordinates vector from the DMDA
711 ierr = DMGetCoordinatesLocal(user->da, &coordinates);
712 if (ierr) {
713 LOG_ALLOW(LOCAL, LOG_ERROR, "Error getting local coordinates vector.\n");
714 return ierr;
715 }
716
717 if (!coordinates) {
718 LOG_ALLOW(LOCAL, LOG_ERROR, "Coordinates vector is NULL.\n");
719 return PETSC_ERR_ARG_NULL;
720 }
721
722 // Access the coordinate array for reading
723 ierr = DMDAVecGetArrayRead(user->fda, coordinates, &coordArray);
724 if (ierr) {
725 LOG_ALLOW(LOCAL, LOG_ERROR, "Error accessing coordinate array.\n");
726 return ierr;
727 }
728
729 // Get the local grid information (indices and sizes)
730 ierr = DMDAGetLocalInfo(user->da, &info);
731 if (ierr) {
732 LOG_ALLOW(LOCAL, LOG_ERROR, "Error getting DMDA local info.\n");
733 return ierr;
734 }
735
736
737 xs = info.gxs; xe = xs + info.gxm;
738 ys = info.gys; ye = ys + info.gym;
739 zs = info.gzs; ze = zs + info.gzm;
740
741 /*
742 xs = info.xs; xe = xs + info.xm;
743 ys = info.ys; ye = ys + info.ym;
744 zs = info.zs; ze = zs + info.zm;
745 */
746
747 // Initialize min and max coordinates with extreme values
748 minCoords.x = minCoords.y = minCoords.z = PETSC_MAX_REAL;
749 maxCoords.x = maxCoords.y = maxCoords.z = PETSC_MIN_REAL;
750
751 LOG_ALLOW(LOCAL, LOG_TRACE, "[Rank %d] Grid indices (Including Ghosts): xs=%d, xe=%d, ys=%d, ye=%d, zs=%d, ze=%d.\n",rank, xs, xe, ys, ye, zs, ze);
752
753 // Iterate over the local grid to find min and max coordinates
754 for (k = zs; k < ze; k++) {
755 for (j = ys; j < ye; j++) {
756 for (i = xs; i < xe; i++) {
757 // Only consider nodes within the physical domain.
758 if(i < user->IM && j < user->JM && k < user->KM){
759 Cmpnts coord = coordArray[k][j][i];
760
761 // Update min and max coordinates
762 if (coord.x < minCoords.x) minCoords.x = coord.x;
763 if (coord.y < minCoords.y) minCoords.y = coord.y;
764 if (coord.z < minCoords.z) minCoords.z = coord.z;
765
766 if (coord.x > maxCoords.x) maxCoords.x = coord.x;
767 if (coord.y > maxCoords.y) maxCoords.y = coord.y;
768 if (coord.z > maxCoords.z) maxCoords.z = coord.z;
769 }
770 }
771 }
772 }
773
774
775 // Add tolerance to bboxes.
776 minCoords.x = minCoords.x - BBOX_TOLERANCE;
777 minCoords.y = minCoords.y - BBOX_TOLERANCE;
778 minCoords.z = minCoords.z - BBOX_TOLERANCE;
779
780 maxCoords.x = maxCoords.x + BBOX_TOLERANCE;
781 maxCoords.y = maxCoords.y + BBOX_TOLERANCE;
782 maxCoords.z = maxCoords.z + BBOX_TOLERANCE;
783
784 LOG_ALLOW(LOCAL,LOG_DEBUG," Tolerance added to the limits: %.8e .\n",(PetscReal)BBOX_TOLERANCE);
785
786 // Log the computed min and max coordinates
787 LOG_ALLOW(LOCAL, LOG_INFO,"[Rank %d] Bounding Box Ranges = X[%.6f, %.6f], Y[%.6f,%.6f], Z[%.6f, %.6f].\n",rank,minCoords.x, maxCoords.x,minCoords.y, maxCoords.y, minCoords.z, maxCoords.z);
788
789
790
791 // Restore the coordinate array
792 ierr = DMDAVecRestoreArrayRead(user->fda, coordinates, &coordArray);
793 if (ierr) {
794 LOG_ALLOW(LOCAL, LOG_ERROR, "Error restoring coordinate array.\n");
795 return ierr;
796 }
797
798 // Set the local bounding box
799 localBBox->min_coords = minCoords;
800 localBBox->max_coords = maxCoords;
801
802 // Update the bounding box inside the UserCtx for consistency
803 user->bbox = *localBBox;
804
805 LOG_ALLOW(GLOBAL, LOG_INFO, "Exiting the function successfully.\n");
806
808
809 PetscFunctionReturn(0);
810}
#define BBOX_TOLERANCE
Definition grid.c:6
Cmpnts max_coords
Maximum x, y, z coordinates of the bounding box.
Definition variables.h:156
Cmpnts min_coords
Minimum x, y, z coordinates of the bounding box.
Definition variables.h:155
BoundingBox bbox
Definition variables.h:822
Here is the caller graph for this function:

◆ GatherAllBoundingBoxes()

PetscErrorCode GatherAllBoundingBoxes ( UserCtx user,
BoundingBox **  allBBoxes 
)

Implementation of GatherAllBoundingBoxes().

Gathers local bounding boxes from all MPI processes to rank 0.

Full API contract (arguments, ownership, side effects) is documented with the header declaration in include/grid.h.

See also
GatherAllBoundingBoxes()

Definition at line 821 of file grid.c.

822{
823 PetscErrorCode ierr;
824 PetscMPIInt rank, size;
825 BoundingBox *bboxArray = NULL;
826 BoundingBox localBBox;
827
828 PetscFunctionBeginUser;
829
831
832 /* Validate */
833 if (!user || !allBBoxes) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL,
834 "GatherAllBoundingBoxes: NULL pointer");
835
836 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRMPI(ierr);
837 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRMPI(ierr);
838
839 /* Compute local bbox */
840 ierr = ComputeLocalBoundingBox(user, &localBBox); CHKERRQ(ierr);
841
842 /* Ensure everyone is synchronized before the gather */
843 MPI_Barrier(PETSC_COMM_WORLD);
845 "Rank %d: about to MPI_Gather(localBBox)\n", rank);
846
847 /* Allocate on root */
848 if (rank == 0) {
849 bboxArray = (BoundingBox*)malloc(size * sizeof(BoundingBox));
850 if (!bboxArray) SETERRABORT(PETSC_COMM_WORLD, PETSC_ERR_MEM,
851 "GatherAllBoundingBoxes: malloc failed");
852 }
853
854 /* Collective: every rank must call */
855 ierr = MPI_Gather(&localBBox, sizeof(BoundingBox), MPI_BYTE,
856 bboxArray, sizeof(BoundingBox), MPI_BYTE,
857 0, PETSC_COMM_WORLD);
858 CHKERRMPI(ierr);
859
860 MPI_Barrier(PETSC_COMM_WORLD);
862 "Rank %d: completed MPI_Gather(localBBox)\n", rank);
863
864 /* Return result */
865 if (rank == 0) {
866 *allBBoxes = bboxArray;
867 } else {
868 *allBBoxes = NULL;
869 }
870
872
873 PetscFunctionReturn(0);
874}
PetscErrorCode ComputeLocalBoundingBox(UserCtx *user, BoundingBox *localBBox)
Implementation of ComputeLocalBoundingBox().
Definition grid.c:679
Defines a 3D axis-aligned bounding box.
Definition variables.h:154
Here is the call graph for this function:
Here is the caller graph for this function:

◆ BroadcastAllBoundingBoxes()

PetscErrorCode BroadcastAllBoundingBoxes ( UserCtx user,
BoundingBox **  bboxlist 
)

Internal helper implementation: BroadcastAllBoundingBoxes().

Broadcasts the bounding box information collected on rank 0 to all other ranks.

Local to this translation unit.

Definition at line 883 of file grid.c.

884{
885 PetscErrorCode ierr;
886 (void)user;
887 PetscMPIInt rank, size;
888
889 PetscFunctionBeginUser;
890
892
893 if (!bboxlist) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL,
894 "BroadcastAllBoundingBoxes: NULL pointer");
895
896 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRMPI(ierr);
897 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRMPI(ierr);
898
899 /* Non-root ranks must allocate before the Bcast */
900 if (rank != 0) {
901 *bboxlist = (BoundingBox*)malloc(size * sizeof(BoundingBox));
902 if (!*bboxlist) SETERRABORT(PETSC_COMM_WORLD, PETSC_ERR_MEM,
903 "BroadcastAllBoundingBoxes: malloc failed");
904 }
905
906 MPI_Barrier(PETSC_COMM_WORLD);
908 "Rank %d: about to MPI_Bcast(%d boxes)\n", rank, size);
909
910 /* Collective: every rank must call */
911 ierr = MPI_Bcast(*bboxlist, size * sizeof(BoundingBox), MPI_BYTE,
912 0, PETSC_COMM_WORLD);
913 CHKERRMPI(ierr);
914
915 MPI_Barrier(PETSC_COMM_WORLD);
917 "Rank %d: completed MPI_Bcast(%d boxes)\n", rank, size);
918
919
921
922 PetscFunctionReturn(0);
923}
Here is the caller graph for this function:

◆ CalculateInletProperties()

PetscErrorCode CalculateInletProperties ( UserCtx user)

Implementation of CalculateInletProperties().

Calculates the center and area of the primary INLET face.

Full API contract (arguments, ownership, side effects) is documented with the header declaration in include/grid.h.

See also
CalculateInletProperties()

Definition at line 933 of file grid.c.

934{
935 PetscErrorCode ierr;
936 BCFace inlet_face_id = -1;
937 PetscBool inlet_found = PETSC_FALSE;
938
939 PetscFunctionBeginUser;
941
942 // 1. Identify the primary inlet face from the configuration
943 for (int i = 0; i < 6; i++) {
944 if (user->boundary_faces[i].mathematical_type == INLET) {
945 inlet_face_id = user->boundary_faces[i].face_id;
946 inlet_found = PETSC_TRUE;
947 break; // Use the first inlet found
948 }
949 }
950
951 if (!inlet_found) {
952 LOG_ALLOW(GLOBAL, LOG_INFO, "No INLET face found. Skipping inlet center calculation.\n");
954 PetscFunctionReturn(0);
955 }
956
957 Cmpnts inlet_center;
958 PetscReal inlet_area;
959
960 // 2. Call the generic utility to compute the center and area of any face.
961 ierr = CalculateFaceCenterAndArea(user,inlet_face_id,&inlet_center,&inlet_area); CHKERRQ(ierr);
962
963 // 3. Store results in the SimCtx
964 user->simCtx->CMx_c = inlet_center.x;
965 user->simCtx->CMy_c = inlet_center.y;
966 user->simCtx->CMz_c = inlet_center.z;
967 user->simCtx->AreaInSum = inlet_area;
968
970 "Rank[%d] Inlet Center: (%.6f, %.6f, %.6f), Area: %.6f\n",
971 user->simCtx->rank, inlet_center.x, inlet_center.y, inlet_center.z, inlet_area);
972
974 PetscFunctionReturn(0);
975
976}
PetscErrorCode CalculateFaceCenterAndArea(UserCtx *user, BCFace face_id, Cmpnts *face_center, PetscReal *face_area)
Implementation of CalculateFaceCenterAndArea().
Definition grid.c:1029
@ INLET
Definition variables.h:258
BoundaryFaceConfig boundary_faces[6]
Definition variables.h:829
PetscReal CMy_c
Definition variables.h:705
PetscReal CMz_c
Definition variables.h:705
BCType mathematical_type
Definition variables.h:336
PetscReal AreaInSum
Definition variables.h:726
PetscReal CMx_c
Definition variables.h:705
BCFace
Identifies the six logical faces of a structured computational block.
Definition variables.h:244
Here is the call graph for this function:
Here is the caller graph for this function:

◆ CalculateOutletProperties()

PetscErrorCode CalculateOutletProperties ( UserCtx user)

Implementation of CalculateOutletProperties().

Calculates the center and area of the primary OUTLET face.

Full API contract (arguments, ownership, side effects) is documented with the header declaration in include/grid.h.

See also
CalculateOutletProperties()

Definition at line 986 of file grid.c.

987{
988 PetscErrorCode ierr;
989 BCFace outlet_face_id = -1;
990 PetscBool outlet_found = PETSC_FALSE;
991 PetscFunctionBeginUser;
993 // 1. Identify the primary outlet face from the configuration
994 for (int i = 0; i < 6; i++) {
995 if (user->boundary_faces[i].mathematical_type == OUTLET) {
996 outlet_face_id = user->boundary_faces[i].face_id;
997 outlet_found = PETSC_TRUE;
998 break; // Use the first outlet found
999 }
1000 }
1001 if (!outlet_found) {
1002 LOG_ALLOW(GLOBAL, LOG_INFO, "No OUTLET face found. Skipping outlet center calculation.\n");
1004 PetscFunctionReturn(0);
1005 }
1006 PetscReal outlet_area;
1007 Cmpnts outlet_center;
1008 // 2. Call the generic utility to compute the center and area of any face
1009 ierr = CalculateFaceCenterAndArea(user,outlet_face_id,&outlet_center,&outlet_area); CHKERRQ(ierr);
1010 // 3. Store results in the SimCtx
1011 user->simCtx->AreaOutSum = outlet_area;
1012
1014 "Outlet Center: (%.6f, %.6f, %.6f), Area: %.6f\n",
1015 outlet_center.x, outlet_center.y, outlet_center.z, outlet_area);
1016
1018 PetscFunctionReturn(0);
1019}
@ OUTLET
Definition variables.h:257
PetscReal AreaOutSum
Definition variables.h:726
Here is the call graph for this function:
Here is the caller graph for this function:

◆ CalculateFaceCenterAndArea()

PetscErrorCode CalculateFaceCenterAndArea ( UserCtx user,
BCFace  face_id,
Cmpnts face_center,
PetscReal *  face_area 
)

Implementation of CalculateFaceCenterAndArea().

Calculates the geometric center and total area of a specified boundary face.

Full API contract (arguments, ownership, side effects) is documented with the header declaration in include/grid.h.

See also
CalculateFaceCenterAndArea()

< Local sum of (x,y,z) coordinates

< Local sum of face area magnitudes

< Local count of nodes

< Global sum of coordinates

< Global sum of areas

< Global count of nodes

< i-range: [xs, xe)

< j-range: [ys, ye)

< k-range: [zs, ze)

< Physical domain size in i (exclude dummy)

< Physical domain size in j (exclude dummy)

< Physical domain size in k (exclude dummy)

< Start at 1 if on -Xi boundary

< End at mx-1 if on +Xi boundary

< Start at 1 if on -Eta boundary

< End at my-1 if on +Eta boundary

< Start at 1 if on -Zeta boundary

< End at mz-1 if on +Zeta boundary

< Exclude dummy at i=mx-1 (e.g., i=25)

< Exclude dummy at j=my-1 (e.g., j=25)

< Exclude dummy at k=mz-1 (e.g., k=97)

< Local ghosted coordinate vector

< Nodal coordinates [k][j][i]

< Face metric tensors [k][j][i]

< Cell blanking field [k][j][i] (shifted +1)

Definition at line 1029 of file grid.c.

1031{
1032 PetscErrorCode ierr;
1033 DMDALocalInfo info;
1034
1035 // ========================================================================
1036 // Local accumulators for this rank's contribution
1037 // ========================================================================
1038 PetscReal local_sum[3] = {0.0, 0.0, 0.0}; ///< Local sum of (x,y,z) coordinates
1039 PetscReal localAreaSum = 0.0; ///< Local sum of face area magnitudes
1040 PetscCount local_n_points = 0; ///< Local count of nodes
1041
1042 // ========================================================================
1043 // Global accumulators after MPI reduction
1044 // ========================================================================
1045 PetscReal global_sum[3] = {0.0, 0.0, 0.0}; ///< Global sum of coordinates
1046 PetscReal globalAreaSum = 0.0; ///< Global sum of areas
1047 PetscCount global_n_points = 0; ///< Global count of nodes
1048
1049 // ========================================================================
1050 // Grid information and array pointers
1051 // ========================================================================
1052 info = user->info;
1053
1054 // Rank's owned range in global indices
1055 PetscInt xs = info.xs, xe = info.xs + info.xm; ///< i-range: [xs, xe)
1056 PetscInt ys = info.ys, ye = info.ys + info.ym; ///< j-range: [ys, ye)
1057 PetscInt zs = info.zs, ze = info.zs + info.zm; ///< k-range: [zs, ze)
1058
1059 // Global domain dimensions (total allocated, includes dummy at end)
1060 PetscInt mx = info.mx, my = info.my, mz = info.mz;
1061 PetscInt IM = user->IM; ///< Physical domain size in i (exclude dummy)
1062 PetscInt JM = user->JM; ///< Physical domain size in j (exclude dummy)
1063 PetscInt KM = user->KM; ///< Physical domain size in k (exclude dummy)
1064
1065 // ========================================================================
1066 // Interior loop bounds (adjusted to avoid ghost/boundary cells)
1067 // These are used for nvert checks where we need valid cell indices
1068 // ========================================================================
1069 PetscInt lxs = xs; if(xs == 0) lxs = xs + 1; ///< Start at 1 if on -Xi boundary
1070 PetscInt lxe = xe; if(xe == mx) lxe = xe - 1; ///< End at mx-1 if on +Xi boundary
1071 PetscInt lys = ys; if(ys == 0) lys = ys + 1; ///< Start at 1 if on -Eta boundary
1072 PetscInt lye = ye; if(ye == my) lye = ye - 1; ///< End at my-1 if on +Eta boundary
1073 PetscInt lzs = zs; if(zs == 0) lzs = zs + 1; ///< Start at 1 if on -Zeta boundary
1074 PetscInt lze = ze; if(ze == mz) lze = ze - 1; ///< End at mz-1 if on +Zeta boundary
1075
1076 // ========================================================================
1077 // Physical node bounds (exclude dummy indices at mx-1, my-1, mz-1)
1078 // These are used for coordinate loops where we want ALL physical nodes
1079 // ========================================================================
1080 PetscInt i_max = (xe == mx) ? mx - 1 : xe; ///< Exclude dummy at i=mx-1 (e.g., i=25)
1081 PetscInt j_max = (ye == my) ? my - 1 : ye; ///< Exclude dummy at j=my-1 (e.g., j=25)
1082 PetscInt k_max = (ze == mz) ? mz - 1 : ze; ///< Exclude dummy at k=mz-1 (e.g., k=97)
1083
1084 // ========================================================================
1085 // Array pointers for field access
1086 // ========================================================================
1087 Vec lCoor; ///< Local ghosted coordinate vector
1088 Cmpnts ***coor; ///< Nodal coordinates [k][j][i]
1089 Cmpnts ***csi, ***eta, ***zet; ///< Face metric tensors [k][j][i]
1090 PetscReal ***nvert; ///< Cell blanking field [k][j][i] (shifted +1)
1091
1092 PetscFunctionBeginUser;
1094
1095 // ========================================================================
1096 // Step 1: Check if this rank owns the specified boundary face
1097 // ========================================================================
1098 PetscBool owns_face = PETSC_FALSE;
1099 ierr = CanRankServiceFace(&info,IM,JM,KM,face_id,&owns_face); CHKERRQ(ierr);
1100 if(owns_face){
1101 // ========================================================================
1102 // Step 2: Get read-only array access for all required fields
1103 // ========================================================================
1104 ierr = DMGetCoordinatesLocal(user->da, &lCoor); CHKERRQ(ierr);
1105 ierr = DMDAVecGetArrayRead(user->fda, lCoor, &coor); CHKERRQ(ierr);
1106 ierr = DMDAVecGetArrayRead(user->da, user->lNvert, &nvert); CHKERRQ(ierr);
1107 ierr = DMDAVecGetArrayRead(user->fda, user->lCsi, &csi); CHKERRQ(ierr);
1108 ierr = DMDAVecGetArrayRead(user->fda, user->lEta, &eta); CHKERRQ(ierr);
1109 ierr = DMDAVecGetArrayRead(user->fda, user->lZet, &zet); CHKERRQ(ierr);
1110
1111 // ========================================================================
1112 // Step 3: Loop over the specified face and accumulate center and area
1113 // ========================================================================
1114 switch (face_id) {
1115
1116 // ====================================================================
1117 // BC_FACE_NEG_X: Face at i=0 (bottom boundary in i-direction)
1118 // ====================================================================
1119 case BC_FACE_NEG_X:
1120 if (xs == 0) {
1121 PetscInt i = 0; // Face is at node index i=0
1122
1123 // ---- Part 1: Center calculation (ALL physical nodes) ----
1124 // Loop over ALL physical nodes on this face
1125 // For my=26, mz=98: j∈[0,24], k∈[0,96] → 25×97 = 2,425 nodes
1126 for (PetscInt k = zs; k < k_max; k++) {
1127 for (PetscInt j = ys; j < j_max; j++) {
1128 // Accumulate coordinates at node [k][j][0]
1129 local_sum[0] += coor[k][j][i].x;
1130 local_sum[1] += coor[k][j][i].y;
1131 local_sum[2] += coor[k][j][i].z;
1132 local_n_points++;
1133 }
1134 }
1135
1136 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1137 // Loop over interior range where nvert checks are valid
1138 // For my=26, mz=98: j∈[1,24], k∈[1,96] → 24×96 = 2,304 cells
1139 for (PetscInt k = lzs; k < lze; k++) {
1140 for (PetscInt j = lys; j < lye; j++) {
1141 // Check if adjacent cell is fluid
1142 // nvert[k][j][i+1] = nvert[k][j][1] checks Cell 0
1143 // (Physical Cell 0 in j-k plane, stored at shifted index [1])
1144 if (nvert[k][j][i+1] < 0.1) {
1145 // Cell is fluid - add face area contribution
1146 // Face area = magnitude of csi metric at [k][j][0]
1147 localAreaSum += sqrt(csi[k][j][i].x * csi[k][j][i].x +
1148 csi[k][j][i].y * csi[k][j][i].y +
1149 csi[k][j][i].z * csi[k][j][i].z);
1150 }
1151 }
1152 }
1153 }
1154 break;
1155
1156 // ====================================================================
1157 // BC_FACE_POS_X: Face at i=IM-1 (top boundary in i-direction)
1158 // ====================================================================
1159 case BC_FACE_POS_X:
1160 if (xe == mx) {
1161 PetscInt i = mx - 2; // Last physical node (e.g., i=24 for mx=26)
1162
1163 // ---- Part 1: Center calculation (ALL physical nodes) ----
1164 for (PetscInt k = zs; k < k_max; k++) {
1165 for (PetscInt j = ys; j < j_max; j++) {
1166 local_sum[0] += coor[k][j][i].x;
1167 local_sum[1] += coor[k][j][i].y;
1168 local_sum[2] += coor[k][j][i].z;
1169 local_n_points++;
1170 }
1171 }
1172
1173 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1174 for (PetscInt k = lzs; k < lze; k++) {
1175 for (PetscInt j = lys; j < lye; j++) {
1176 // Check if adjacent cell is fluid
1177 // nvert[k][j][i] = nvert[k][j][24] checks last cell (Cell 23)
1178 // (Physical Cell 23, stored at shifted index [24])
1179 if (nvert[k][j][i] < 0.1) {
1180 // Face area = magnitude of csi metric at [k][j][24]
1181 localAreaSum += sqrt(csi[k][j][i].x * csi[k][j][i].x +
1182 csi[k][j][i].y * csi[k][j][i].y +
1183 csi[k][j][i].z * csi[k][j][i].z);
1184 }
1185 }
1186 }
1187 }
1188 break;
1189
1190 // ====================================================================
1191 // BC_FACE_NEG_Y: Face at j=0 (bottom boundary in j-direction)
1192 // ====================================================================
1193 case BC_FACE_NEG_Y:
1194 if (ys == 0) {
1195 PetscInt j = 0; // Face is at node index j=0
1196
1197 // ---- Part 1: Center calculation (ALL physical nodes) ----
1198 // For mx=26, mz=98: i∈[0,24], k∈[0,96] → 25×97 = 2,425 nodes
1199 for (PetscInt k = zs; k < k_max; k++) {
1200 for (PetscInt i = xs; i < i_max; i++) {
1201 local_sum[0] += coor[k][j][i].x;
1202 local_sum[1] += coor[k][j][i].y;
1203 local_sum[2] += coor[k][j][i].z;
1204 local_n_points++;
1205 }
1206 }
1207
1208 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1209 // For mx=26, mz=98: i∈[1,24], k∈[1,96] → 24×96 = 2,304 cells
1210 for (PetscInt k = lzs; k < lze; k++) {
1211 for (PetscInt i = lxs; i < lxe; i++) {
1212 // nvert[k][j+1][i] = nvert[k][1][i] checks Cell 0
1213 if (nvert[k][j+1][i] < 0.1) {
1214 // Face area = magnitude of eta metric at [k][0][i]
1215 localAreaSum += sqrt(eta[k][j][i].x * eta[k][j][i].x +
1216 eta[k][j][i].y * eta[k][j][i].y +
1217 eta[k][j][i].z * eta[k][j][i].z);
1218 }
1219 }
1220 }
1221 }
1222 break;
1223
1224 // ====================================================================
1225 // BC_FACE_POS_Y: Face at j=JM-1 (top boundary in j-direction)
1226 // ====================================================================
1227 case BC_FACE_POS_Y:
1228 if (ye == my) {
1229 PetscInt j = my - 2; // Last physical node (e.g., j=24 for my=26)
1230
1231 // ---- Part 1: Center calculation (ALL physical nodes) ----
1232 for (PetscInt k = zs; k < k_max; k++) {
1233 for (PetscInt i = xs; i < i_max; i++) {
1234 local_sum[0] += coor[k][j][i].x;
1235 local_sum[1] += coor[k][j][i].y;
1236 local_sum[2] += coor[k][j][i].z;
1237 local_n_points++;
1238 }
1239 }
1240
1241 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1242 for (PetscInt k = lzs; k < lze; k++) {
1243 for (PetscInt i = lxs; i < lxe; i++) {
1244 // nvert[k][j][i] = nvert[k][24][i] checks last cell (Cell 23)
1245 if (nvert[k][j][i] < 0.1) {
1246 // Face area = magnitude of eta metric at [k][24][i]
1247 localAreaSum += sqrt(eta[k][j][i].x * eta[k][j][i].x +
1248 eta[k][j][i].y * eta[k][j][i].y +
1249 eta[k][j][i].z * eta[k][j][i].z);
1250 }
1251 }
1252 }
1253 }
1254 break;
1255
1256 // ====================================================================
1257 // BC_FACE_NEG_Z: Face at k=0 (inlet, bottom boundary in k-direction)
1258 // ====================================================================
1259 case BC_FACE_NEG_Z:
1260 if (zs == 0) {
1261 PetscInt k = 0; // Face is at node index k=0
1262
1263 // ---- Part 1: Center calculation (ALL physical nodes) ----
1264 // For mx=26, my=26: i∈[0,24], j∈[0,24] → 25×25 = 625 nodes
1265 for (PetscInt j = ys; j < j_max; j++) {
1266 for (PetscInt i = xs; i < i_max; i++) {
1267 local_sum[0] += coor[k][j][i].x;
1268 local_sum[1] += coor[k][j][i].y;
1269 local_sum[2] += coor[k][j][i].z;
1270 local_n_points++;
1271 }
1272 }
1273
1274 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1275 // For mx=26, my=26: i∈[1,24], j∈[1,24] → 24×24 = 576 cells
1276 for (PetscInt j = lys; j < lye; j++) {
1277 for (PetscInt i = lxs; i < lxe; i++) {
1278 // nvert[k+1][j][i] = nvert[1][j][i] checks Cell 0
1279 // (Physical Cell 0 in i-j plane, stored at shifted index [1])
1280 if (nvert[k+1][j][i] < 0.1) {
1281 // Face area = magnitude of zet metric at [0][j][i]
1282 localAreaSum += sqrt(zet[k][j][i].x * zet[k][j][i].x +
1283 zet[k][j][i].y * zet[k][j][i].y +
1284 zet[k][j][i].z * zet[k][j][i].z);
1285 }
1286 }
1287 }
1288 }
1289 break;
1290
1291 // ====================================================================
1292 // BC_FACE_POS_Z: Face at k=KM-1 (outlet, top boundary in k-direction)
1293 // ====================================================================
1294 case BC_FACE_POS_Z:
1295 if (ze == mz) {
1296 PetscInt k = mz - 2; // Last physical node (e.g., k=96 for mz=98)
1297
1298 // ---- Part 1: Center calculation (ALL physical nodes) ----
1299 // For mx=26, my=26: i∈[0,24], j∈[0,24] → 25×25 = 625 nodes
1300 for (PetscInt j = ys; j < j_max; j++) {
1301 for (PetscInt i = xs; i < i_max; i++) {
1302 local_sum[0] += coor[k][j][i].x;
1303 local_sum[1] += coor[k][j][i].y;
1304 local_sum[2] += coor[k][j][i].z;
1305 local_n_points++;
1306 }
1307 }
1308
1309 // ---- Part 2: Area calculation (INTERIOR cells only) ----
1310 // For mx=26, my=26: i∈[1,24], j∈[1,24] → 24×24 = 576 cells
1311 for (PetscInt j = lys; j < lye; j++) {
1312 for (PetscInt i = lxs; i < lxe; i++) {
1313 // nvert[k][j][i] = nvert[96][j][i] checks last cell (Cell 95)
1314 // (Physical Cell 95, stored at shifted index [96])
1315 if (nvert[k][j][i] < 0.1) {
1316 // Face area = magnitude of zet metric at [96][j][i]
1317 localAreaSum += sqrt(zet[k][j][i].x * zet[k][j][i].x +
1318 zet[k][j][i].y * zet[k][j][i].y +
1319 zet[k][j][i].z * zet[k][j][i].z);
1320 }
1321 }
1322 }
1323 }
1324 break;
1325
1326 default:
1327 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE,
1328 "Unknown face_id %d in CalculateFaceCenterAndArea", face_id);
1329 }
1330
1331 // ========================================================================
1332 // Step 4: Restore array access (release pointers)
1333 // ========================================================================
1334 ierr = DMDAVecRestoreArrayRead(user->fda, lCoor, &coor); CHKERRQ(ierr);
1335 ierr = DMDAVecRestoreArrayRead(user->da, user->lNvert, &nvert); CHKERRQ(ierr);
1336 ierr = DMDAVecRestoreArrayRead(user->fda, user->lCsi, &csi); CHKERRQ(ierr);
1337 ierr = DMDAVecRestoreArrayRead(user->fda, user->lEta, &eta); CHKERRQ(ierr);
1338 ierr = DMDAVecRestoreArrayRead(user->fda, user->lZet, &zet); CHKERRQ(ierr);
1339 }
1340 // ========================================================================
1341 // Step 5: Perform MPI reductions to get global sums
1342 // ========================================================================
1343 // Sum coordinate contributions from all ranks
1344 ierr = MPI_Allreduce(local_sum, global_sum, 3, MPI_DOUBLE, MPI_SUM,
1345 PETSC_COMM_WORLD); CHKERRQ(ierr);
1346
1347 // Sum node counts from all ranks
1348 ierr = MPI_Allreduce(&local_n_points, &global_n_points, 1, MPI_COUNT, MPI_SUM,
1349 PETSC_COMM_WORLD); CHKERRQ(ierr);
1350
1351 // Sum area contributions from all ranks
1352 ierr = MPI_Allreduce(&localAreaSum, &globalAreaSum, 1, MPI_DOUBLE, MPI_SUM,
1353 PETSC_COMM_WORLD); CHKERRQ(ierr);
1354
1355 // ========================================================================
1356 // Step 6: Calculate geometric center by averaging coordinates
1357 // ========================================================================
1358 if (global_n_points > 0) {
1359 face_center->x = global_sum[0] / global_n_points;
1360 face_center->y = global_sum[1] / global_n_points;
1361 face_center->z = global_sum[2] / global_n_points;
1363 "Calculated center for Face %s: (x=%.4f, y=%.4f, z=%.4f) from %lld nodes\n",
1364 BCFaceToString(face_id),
1365 face_center->x, face_center->y, face_center->z,
1366 (long long)global_n_points);
1367 } else {
1368 // No nodes found - this should not happen for a valid face
1370 "WARNING: Face %s identified but no grid points found. Center not calculated.\n",
1371 BCFaceToString(face_id));
1372 face_center->x = face_center->y = face_center->z = 0.0;
1373 }
1374
1375 // ========================================================================
1376 // Step 7: Return computed total area
1377 // ========================================================================
1378 *face_area = globalAreaSum;
1380 "Calculated area for Face %s: Area=%.6f\n",
1381 BCFaceToString(face_id), *face_area);
1382
1383 PetscFunctionReturn(0);
1384}
PetscErrorCode CanRankServiceFace(const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, BCFace face_id, PetscBool *can_service_out)
Determines if the current MPI rank owns any part of a specified global face.
Definition Boundaries.c:126
const char * BCFaceToString(BCFace face)
Helper function to convert BCFace enum to a string representation.
Definition logging.c:669
Vec lNvert
Definition variables.h:837
Vec lZet
Definition variables.h:858
Vec lCsi
Definition variables.h:858
Vec lEta
Definition variables.h:858
@ BC_FACE_NEG_X
Definition variables.h:245
@ BC_FACE_POS_Z
Definition variables.h:247
@ BC_FACE_POS_Y
Definition variables.h:246
@ BC_FACE_NEG_Z
Definition variables.h:247
@ BC_FACE_POS_X
Definition variables.h:245
@ BC_FACE_NEG_Y
Definition variables.h:246
Here is the call graph for this function:
Here is the caller graph for this function: