PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
setup.c
Go to the documentation of this file.
1/**
2 * @file setup.c // Setup code for running any simulation
3 * @brief Test program for DMSwarm interpolation using the fdf-curvIB method.
4 * Provides the setup to start any simulation with DMSwarm and DMDAs.
5 **/
6
7 #include "setup.h"
8
9/**
10 * @brief Allocates and populates the master SimulationContext object.
11 *
12 * This function serves as the single, authoritative entry point for all
13 * simulation configuration. It merges the setup logic from both the legacy
14 * FSI/IBM solver and the modern particle solver into a unified, robust
15 * process.
16 *
17 * The function follows a strict sequence:
18 * 1. **Allocate Context & Set Defaults:** It first allocates the `SimulationContext`
19 * and populates every field with a sane, hardcoded default value. This
20 * ensures the simulation starts from a known, predictable state.
21 * 2. **Configure Logging System:** It configures the custom logging framework. It
22 * parses the `-func_config_file` option to load a list of function names
23 * allowed to produce log output. This configuration (the file path and the
24 * list of function names) is stored within the `SimulationContext` for
25 * later reference and cleanup.
26 * 3. **Parse All Options:** It performs a comprehensive pass of `PetscOptionsGet...`
27 * calls for every possible command-line flag, overriding the default
28 * values set in step 1.
29 * 4. **Log Summary:** After all options are parsed, it uses the now-active
30 * logging system to print a summary of the key simulation parameters.
31 *
32 * @param[in] argc Argument count passed from `main`.
33 * @param[in] argv Argument vector passed from `main`.
34 * @param[out] p_simCtx On success, this will point to the newly created and
35 * fully configured `SimulationContext` pointer. The caller
36 * is responsible for eventually destroying this object by
37 * calling `FinalizeSimulation()`.
38 *
39 * @return PetscErrorCode Returns 0 on success, or a non-zero PETSc error code on failure.
40 */
41PetscErrorCode CreateSimulationContext(int argc, char **argv, SimCtx **p_simCtx)
42{
43 PetscErrorCode ierr;
44 SimCtx *simCtx;
45
46 PetscFunctionBeginUser;
47
48 // === 1. Allocate the Context Struct and Set ALL Defaults ==================
49 ierr = PetscNew(p_simCtx); CHKERRQ(ierr);
50 simCtx = *p_simCtx;
51
52 // --- Group 1: Parallelism & MPI ---
53 simCtx->rank = 0; simCtx->size = 1;
54
55 // --- Group 2: Simulation Control, Time, and I/O ---
56 simCtx->step = 0; simCtx->ti = 0.0; simCtx->StartStep = 0; simCtx->StepsToRun = 10;
57 simCtx->tiout = 10; simCtx->StartTime = 0.0; simCtx->dt = 0.001;
58 simCtx->rstart_flg = PETSC_FALSE; simCtx->OnlySetup = PETSC_FALSE;
59 simCtx->logviewer = NULL; simCtx->OutputFreq = simCtx->tiout;
60 // --- Group 3: High-Level Physics & Model Selection Flags ---
61 simCtx->immersed = 0; simCtx->movefsi = 0; simCtx->rotatefsi = 0;
62 simCtx->sediment = 0; simCtx->rheology = 0; simCtx->invicid = 0;
63 simCtx->TwoD = 0; simCtx->thin = 0; simCtx->moveframe = 0;
64 simCtx->rotateframe = 0; simCtx->blank = 0;
65 simCtx->dgf_x = 0; simCtx->dgf_y = 1; simCtx->dgf_z = 0;
66 simCtx->dgf_ax = 1; simCtx->dgf_ay = 0; simCtx->dgf_az = 0;
67 simCtx->st = 1.0;
68
69 // --- Group 4: Specific Simulation Case Flags ---
70 simCtx->cop=0; simCtx->fish=0; simCtx->fish_c=0; simCtx->fishcyl=0;
71 simCtx->eel=0; simCtx->pizza=0; simCtx->turbine=0; simCtx->Pipe=0;
72 simCtx->wing=0; simCtx->hydro=0; simCtx->MHV=0; simCtx->LV=0;
73
74 // --- Group 5: Solver & Numerics Parameters ---
75 simCtx->implicit = 0; simCtx->implicit_type = 0; simCtx->imp_MAX_IT = 50;
76 simCtx->imp_atol = 1e-7; simCtx->imp_rtol = 1e-4; simCtx->imp_stol = 1.e-8;
77 simCtx->mglevels = 3; simCtx->mg_MAX_IT = 30; simCtx->mg_idx = 1;
78 simCtx->mg_preItr = 1; simCtx->mg_poItr = 1;
79 simCtx->poisson = 0; simCtx->poisson_tol = 5.e-9;
80 simCtx->STRONG_COUPLING = 0; simCtx->InitialGuessOne = 0;simCtx->central=0;
81 simCtx->ren = 100.0; simCtx->cfl = 0.1; simCtx->vnn = 0.1;
82 simCtx->cdisx = 0.0; simCtx->cdisy = 0.0; simCtx->cdisz = 0.0;
83 simCtx->FieldInitialization = 0;
84 simCtx->InitialConstantContra.x = 0.0;
85 simCtx->InitialConstantContra.y = 0.0;
86 simCtx->InitialConstantContra.z = 0.0;
87
88 // --- Group 6: Physical & Geometric Parameters ---
89 simCtx->NumberOfBodies = 1; simCtx->Flux_in = 1.0; simCtx->angle = 0.0;
90 simCtx->L_dim = 1.0; simCtx->St_exp = 0.5; simCtx->wavelength = 0.95;
91 simCtx->max_angle = -54. * 3.1415926 / 180.;
92 simCtx->CMx_c=0.0; simCtx->CMy_c=0.0; simCtx->CMz_c=0.0;
93 simCtx->regime = 1; simCtx->radi = 10;
94 simCtx->chact_leng = 1.0;
95
96 // --- Group 7: Grid, Domain, and Boundary Condition Settings ---
97 simCtx->block_number = 1; simCtx->inletprofile = 1;
98 simCtx->grid1d = 0; simCtx->Ogrid = 0; simCtx->channelz = 0;
99 simCtx->i_periodic = 0; simCtx->j_periodic = 0; simCtx->k_periodic = 0;
100 simCtx->blkpbc = 10; simCtx->pseudo_periodic = 0;
101 strcpy(simCtx->grid_file, "config/grid.dat");
102 simCtx->generate_grid = PETSC_FALSE;
103 simCtx->da_procs_x = PETSC_DECIDE;
104 simCtx->da_procs_y = PETSC_DECIDE;
105 simCtx->da_procs_z = PETSC_DECIDE;
106 simCtx->grid_rotation_angle = 0.0;
107 simCtx->Croty = 0.0; simCtx->Crotz = 0.0;
108 simCtx->num_bcs_files = 1;
109 ierr = PetscMalloc1(1, &simCtx->bcs_files); CHKERRQ(ierr);
110 ierr = PetscStrallocpy("config/bcs.dat", &simCtx->bcs_files[0]); CHKERRQ(ierr);
111 simCtx->FluxInSum = 0.0; simCtx->FluxOutSum = 0.0; simCtx->Fluxsum = 0.0;
112 simCtx->AreaOutSum = 0.0;
113 simCtx->U_bc = 0.0; simCtx->ccc = 0;
114 simCtx->ratio = 0.0;
115
116
117 // --- Group 8: Turbulence Modeling (LES/RANS) ---
118 simCtx->averaging = PETSC_FALSE; simCtx->les = 0; simCtx->rans = 0;
119 simCtx->wallfunction = 0; simCtx->mixed = 0; simCtx->clark = 0;
120 simCtx->dynamic_freq = 1; simCtx->max_cs = 0.5;
121 simCtx->testfilter_ik = 0; simCtx->testfilter_1d = 0;
122 simCtx->i_homo_filter = 0; simCtx->j_homo_filter = 0; simCtx->k_homo_filter = 0;
123
124 // --- Group 9: Particle / DMSwarm Data & Settings ---
125 simCtx->np = 0; simCtx->readFields = PETSC_FALSE;
126 simCtx->dm_swarm = NULL; simCtx->bboxlist = NULL;
127 simCtx->ParticleInitialization = 0;
128
129 // --- Group 10: Immersed Boundary & FSI Data Object Pointers ---
130 simCtx->ibm = NULL; simCtx->ibmv = NULL; simCtx->fsi = NULL;
131 simCtx->rstart_fsi = PETSC_FALSE; simCtx->duplicate = 0;
132
133 // --- Group 11: Logging and Custom Configuration ---
134 strcpy(simCtx->allowedFile, "config/whitelist.dat");
135 simCtx->useCfg = PETSC_FALSE;
136 simCtx->allowedFuncs = NULL;
137 simCtx->nAllowed = 0;
138 simCtx->LoggingFrequency = 10;
139 simCtx->summationRHS = 0.0;
140 simCtx->MaxDiv = 0.0;
141 simCtx->MaxDivFlatArg = 0; simCtx->MaxDivx = 0; simCtx->MaxDivy = 0; simCtx->MaxDivz = 0;
142 strcpy(simCtx->criticalFuncsFile, "config/profile.cfg");
143 simCtx->useCriticalFuncsCfg = PETSC_FALSE;
144 simCtx->criticalFuncs = NULL;
145 simCtx->nCriticalFuncs = 0;
146 // --- Group 11: Post-Processing Information ---
147 strcpy(simCtx->PostprocessingControlFile, "config/postprocess.cfg");
148 ierr = PetscNew(&simCtx->pps); CHKERRQ(ierr);
149
150 // === 2. Get MPI Info and Handle Config File =============================
151 // -- Group 1: Parallelism & MPI Information
152 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &simCtx->rank); CHKERRQ(ierr);
153 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &simCtx->size); CHKERRQ(ierr);
154
155 ierr = PetscOptionsInsertFile(PETSC_COMM_WORLD, NULL, "config/control.dat", PETSC_FALSE);
156 if (ierr == PETSC_ERR_FILE_OPEN) {
157 if (simCtx->rank == 0) {
158 PetscPrintf(PETSC_COMM_SELF, "INFO: Could not open optional 'control.dat' file. Proceeding with defaults and command-line options.\n");
159 }
160 ierr = 0; // Clear the benign file-not-found error.
161 } else {
162 CHKERRQ(ierr); // A different error occurred, so we should stop.
163 }
164
165 // === 3. A Configure Logging System ========================================
166 // This logic determines the logging configuration and STORES it in simCtx for
167 // later reference and cleanup.
168 ierr = PetscOptionsGetString(NULL, NULL, "-func_config_file", simCtx->allowedFile, PETSC_MAX_PATH_LEN, &simCtx->useCfg); CHKERRQ(ierr);
169
170 if (simCtx->useCfg) {
171 ierr = LoadAllowedFunctionsFromFile(simCtx->allowedFile, &simCtx->allowedFuncs, &simCtx->nAllowed);
172 if (ierr) {
173 // Use direct PetscPrintf as logging system isn't fully active yet.
174 PetscPrintf(PETSC_COMM_SELF, "[%s] WARNING: Failed to load allowed functions from '%s'. Falling back to default list.\n", __func__, simCtx->allowedFile);
175 simCtx->useCfg = PETSC_FALSE; // Mark as failed.
176 ierr = 0; // Clear the error to allow fallback.
177 }
178 }
179 if (!simCtx->useCfg) {
180 // Fallback to default logging functions if no file was used or if loading failed.
181 simCtx->nAllowed = 2;
182 ierr = PetscMalloc1(simCtx->nAllowed, &simCtx->allowedFuncs); CHKERRQ(ierr);
183 ierr = PetscStrallocpy("main", &simCtx->allowedFuncs[0]); CHKERRQ(ierr);
184 ierr = PetscStrallocpy("CreateSimulationContext", &simCtx->allowedFuncs[1]); CHKERRQ(ierr);
185 }
186
187 // Activate the configuration by passing it to the logging module's setup function.
188 set_allowed_functions((const char**)simCtx->allowedFuncs, (size_t)simCtx->nAllowed);
189
190 // Now that the logger is configured, we can use it.
191 LOG_ALLOW(LOCAL, LOG_INFO, "Context created. Initializing on rank %d of %d.\n", simCtx->rank, simCtx->size);
192 print_log_level(); // This will now correctly reflect the LOG_LEVEL environment variable.
193
194 // === 3.B Configure Profiling System ========================================
195 ierr = PetscOptionsGetString(NULL, NULL, "-profile_config_file", simCtx->criticalFuncsFile, PETSC_MAX_PATH_LEN, &simCtx->useCriticalFuncsCfg); CHKERRQ(ierr);
196 if (simCtx->useCriticalFuncsCfg) {
198 if (ierr) {
199 PetscPrintf(PETSC_COMM_SELF, "[%s] WARNING: Failed to load critical profiling functions from '%s'. Falling back to default list.\n", __func__, simCtx->criticalFuncsFile);
200 simCtx->useCriticalFuncsCfg = PETSC_FALSE;
201 ierr = 0;
202 }
203 }
204 if (!simCtx->useCriticalFuncsCfg) {
205 // Fallback to a hardcoded default list if no file or loading failed
206 simCtx->nCriticalFuncs = 4;
207 ierr = PetscMalloc1(simCtx->nCriticalFuncs, &simCtx->criticalFuncs); CHKERRQ(ierr);
208 ierr = PetscStrallocpy("Flow_Solver", &simCtx->criticalFuncs[0]); CHKERRQ(ierr);
209 ierr = PetscStrallocpy("AdvanceSimulation", &simCtx->criticalFuncs[1]); CHKERRQ(ierr);
210 ierr = PetscStrallocpy("LocateAllParticlesInGrid_TEST", &simCtx->criticalFuncs[2]); CHKERRQ(ierr);
211 ierr = PetscStrallocpy("InterpolateAllFieldsToSwarm", &simCtx->criticalFuncs[3]); CHKERRQ(ierr);
212 }
213
214 // Initialize the profiling system with the current updated simulation context.
215 ierr = ProfilingInitialize(simCtx); CHKERRQ(ierr);
216
217 // === 4. Parse All Command Line Options ==================================
218 LOG_ALLOW(GLOBAL, LOG_INFO, "Parsing command-line options...\n");
219
220 // --- Group 2
221 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 2: Simulation Control,Time and I/O.\n");
222 // Read the integer step number to start from.
223 ierr = PetscOptionsGetInt(NULL, NULL, "-rstart", &simCtx->StartStep, &simCtx->rstart_flg); CHKERRQ(ierr);
224
225 // Read the physical time to start from.
226 // The default is already 0.0, so this will only be non-zero if the user provides it.
227 ierr = PetscOptionsGetReal(NULL, NULL, "-ti", &simCtx->StartTime, NULL); CHKERRQ(ierr);
228
229 ierr = PetscOptionsGetInt(NULL,NULL, "-totalsteps", &simCtx->StepsToRun, NULL); CHKERRQ(ierr);
230 ierr = PetscOptionsGetBool(NULL, NULL, "-only_setup", &simCtx->OnlySetup, NULL); CHKERRQ(ierr);
231 ierr = PetscOptionsGetReal(NULL, NULL, "-dt", &simCtx->dt, NULL); CHKERRQ(ierr);
232 ierr = PetscOptionsGetInt(NULL, NULL, "-tio", &simCtx->tiout, NULL); CHKERRQ(ierr);
233 simCtx->OutputFreq = simCtx->tiout;
234
235 // --- Group 3
236 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 3: High-Level Physics & Model Selection Flags\n");
237 ierr = PetscOptionsGetInt(NULL, NULL, "-imm", &simCtx->immersed, NULL); CHKERRQ(ierr);
238 ierr = PetscOptionsGetInt(NULL, NULL, "-fsi", &simCtx->movefsi, NULL); CHKERRQ(ierr);
239 ierr = PetscOptionsGetInt(NULL, NULL, "-rfsi", &simCtx->rotatefsi, NULL); CHKERRQ(ierr);
240 ierr = PetscOptionsGetInt(NULL, NULL, "-sediment", &simCtx->sediment, NULL); CHKERRQ(ierr);
241 ierr = PetscOptionsGetInt(NULL, NULL, "-rheology", &simCtx->rheology, NULL); CHKERRQ(ierr);
242 ierr = PetscOptionsGetInt(NULL, NULL, "-inv", &simCtx->invicid, NULL); CHKERRQ(ierr);
243 ierr = PetscOptionsGetInt(NULL, NULL, "-TwoD", &simCtx->TwoD, NULL); CHKERRQ(ierr);
244 ierr = PetscOptionsGetInt(NULL, NULL, "-thin", &simCtx->thin, NULL); CHKERRQ(ierr);
245 ierr = PetscOptionsGetInt(NULL, NULL, "-mframe", &simCtx->moveframe, NULL); CHKERRQ(ierr);
246 ierr = PetscOptionsGetInt(NULL, NULL, "-rframe", &simCtx->rotateframe, NULL); CHKERRQ(ierr);
247 ierr = PetscOptionsGetInt(NULL, NULL, "-blk", &simCtx->blank, NULL); CHKERRQ(ierr);
248 ierr = PetscOptionsGetInt(NULL, NULL, "-dgf_z", &simCtx->dgf_z, NULL); CHKERRQ(ierr);
249 ierr = PetscOptionsGetInt(NULL, NULL, "-dgf_y", &simCtx->dgf_y, NULL); CHKERRQ(ierr);
250 ierr = PetscOptionsGetInt(NULL, NULL, "-dgf_x", &simCtx->dgf_x, NULL); CHKERRQ(ierr);
251 ierr = PetscOptionsGetInt(NULL, NULL, "-dgf_az", &simCtx->dgf_az, NULL); CHKERRQ(ierr);
252 ierr = PetscOptionsGetInt(NULL, NULL, "-dgf_ay", &simCtx->dgf_ay, NULL); CHKERRQ(ierr);
253 ierr = PetscOptionsGetInt(NULL, NULL, "-dgf_ax", &simCtx->dgf_ax, NULL); CHKERRQ(ierr);
254
255 // --- Group 4
256 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 4: Specific Simulation Case Flags \n");
257 ierr = PetscOptionsGetInt(NULL, NULL, "-cop", &simCtx->cop, NULL); CHKERRQ(ierr);
258 ierr = PetscOptionsGetInt(NULL, NULL, "-fish", &simCtx->fish, NULL); CHKERRQ(ierr);
259 ierr = PetscOptionsGetInt(NULL, NULL, "-pizza", &simCtx->pizza, NULL); CHKERRQ(ierr);
260 ierr = PetscOptionsGetInt(NULL, NULL, "-turbine", &simCtx->turbine, NULL); CHKERRQ(ierr);
261 ierr = PetscOptionsGetInt(NULL, NULL, "-fishcyl", &simCtx->fishcyl, NULL); CHKERRQ(ierr);
262 ierr = PetscOptionsGetInt(NULL, NULL, "-eel", &simCtx->eel, NULL); CHKERRQ(ierr);
263 ierr = PetscOptionsGetInt(NULL, NULL, "-cstart", &simCtx->fish_c, NULL); CHKERRQ(ierr);
264 ierr = PetscOptionsGetInt(NULL, NULL, "-wing", &simCtx->wing, NULL); CHKERRQ(ierr);
265 ierr = PetscOptionsGetInt(NULL, NULL, "-mhv", &simCtx->MHV, NULL); CHKERRQ(ierr);
266 ierr = PetscOptionsGetInt(NULL, NULL, "-hydro", &simCtx->hydro, NULL); CHKERRQ(ierr);
267 ierr = PetscOptionsGetInt(NULL, NULL, "-lv", &simCtx->LV, NULL); CHKERRQ(ierr);
268 ierr = PetscOptionsGetInt(NULL, NULL, "-Pipe", &simCtx->Pipe, NULL); CHKERRQ(ierr);
269
270 // --- Group 5
271 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 5: Solver & Numerics Parameters \n");
272 ierr = PetscOptionsGetInt(NULL, NULL, "-imp", &simCtx->implicit, NULL); CHKERRQ(ierr);
273 ierr = PetscOptionsGetInt(NULL, NULL, "-imp_type", &simCtx->implicit_type, NULL); CHKERRQ(ierr);
274 ierr = PetscOptionsGetInt(NULL, NULL, "-imp_MAX_IT", &simCtx->imp_MAX_IT, NULL); CHKERRQ(ierr);
275 ierr = PetscOptionsGetReal(NULL, NULL, "-imp_atol", &simCtx->imp_atol, NULL); CHKERRQ(ierr);
276 ierr = PetscOptionsGetReal(NULL, NULL, "-imp_rtol", &simCtx->imp_rtol, NULL); CHKERRQ(ierr);
277 ierr = PetscOptionsGetReal(NULL, NULL, "-imp_stol", &simCtx->imp_stol, NULL); CHKERRQ(ierr);
278 ierr = PetscOptionsGetInt(NULL, NULL, "-central", &simCtx->central, NULL); CHKERRQ(ierr);
279
280 // --- Multigrid Options ---
281 ierr = PetscOptionsGetInt(NULL, NULL, "-mg_level", &simCtx->mglevels, NULL); CHKERRQ(ierr);
282 ierr = PetscOptionsGetInt(NULL, NULL, "-mg_max_it", &simCtx->mg_MAX_IT, NULL); CHKERRQ(ierr);
283 ierr = PetscOptionsGetInt(NULL, NULL, "-mg_idx", &simCtx->mg_idx, NULL); CHKERRQ(ierr);
284 ierr = PetscOptionsGetInt(NULL, NULL, "-mg_pre_it", &simCtx->mg_preItr, NULL); CHKERRQ(ierr);
285 ierr = PetscOptionsGetInt(NULL, NULL, "-mg_post_it", &simCtx->mg_poItr, NULL); CHKERRQ(ierr);
286
287 // --- Other Solver Options ---
288 ierr = PetscOptionsGetInt(NULL, NULL, "-poisson", &simCtx->poisson, NULL); CHKERRQ(ierr);
289 ierr = PetscOptionsGetReal(NULL, NULL, "-poisson_tol", &simCtx->poisson_tol, NULL); CHKERRQ(ierr);
290 ierr = PetscOptionsGetInt(NULL, NULL, "-str", &simCtx->STRONG_COUPLING, NULL); CHKERRQ(ierr);
291 ierr = PetscOptionsGetInt(NULL, NULL, "-init1", &simCtx->InitialGuessOne, NULL); CHKERRQ(ierr);
292 ierr = PetscOptionsGetReal(NULL, NULL, "-ren", &simCtx->ren, NULL); CHKERRQ(ierr);
293 ierr = PetscOptionsGetReal(NULL, NULL, "-cfl", &simCtx->cfl, NULL); CHKERRQ(ierr);
294 ierr = PetscOptionsGetReal(NULL, NULL, "-vnn", &simCtx->vnn, NULL); CHKERRQ(ierr);
295 ierr = PetscOptionsGetInt(NULL, NULL, "-finit", &simCtx->FieldInitialization, NULL); CHKERRQ(ierr);
296 ierr = PetscOptionsGetReal(NULL, NULL, "-ucont_x", &simCtx->InitialConstantContra.x, NULL); CHKERRQ(ierr);
297 ierr = PetscOptionsGetReal(NULL, NULL, "-ucont_y", &simCtx->InitialConstantContra.y, NULL); CHKERRQ(ierr);
298 ierr = PetscOptionsGetReal(NULL, NULL, "-ucont_z", &simCtx->InitialConstantContra.z, NULL); CHKERRQ(ierr);
299 // NOTE: cdisx,cdisy,cdisz haven't been parsed, add if necessary.
300
301 // --- Group 6
302 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 6: Physical & Geometric Parameters \n");
303 ierr = PetscOptionsGetInt(NULL, NULL, "-body", &simCtx->NumberOfBodies, NULL); CHKERRQ(ierr);
304 // NOTE: Flux_in and angle are not parsed in the original code, they are set programmatically. We will follow that.
305 ierr = PetscOptionsGetReal(NULL, NULL, "-St_exp", &simCtx->St_exp, NULL); CHKERRQ(ierr);
306 ierr = PetscOptionsGetReal(NULL, NULL, "-wlngth", &simCtx->wavelength, NULL); CHKERRQ(ierr);
307 ierr = PetscOptionsGetReal(NULL, NULL, "-x_c", &simCtx->CMx_c, NULL); CHKERRQ(ierr);
308 ierr = PetscOptionsGetReal(NULL, NULL, "-y_c", &simCtx->CMy_c, NULL); CHKERRQ(ierr);
309 ierr = PetscOptionsGetReal(NULL, NULL, "-z_c", &simCtx->CMz_c, NULL); CHKERRQ(ierr);
310 ierr = PetscOptionsGetInt(NULL, NULL, "-reg", &simCtx->regime, NULL); CHKERRQ(ierr);
311 ierr = PetscOptionsGetInt(NULL, NULL, "-radi", &simCtx->radi, NULL); CHKERRQ(ierr);
312 ierr = PetscOptionsGetReal(NULL, NULL, "-chact_leng", &simCtx->chact_leng, NULL); CHKERRQ(ierr);
313 // NOTE: L_dim and max_angle are calculated based on other flags (like MHV) in the legacy code.
314 // We will defer that logic to a later setup stage and not parse them directly.
315
316 // --- Group 7
317 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 7: Grid, Domain, and Boundary Condition Settings \n");
318 ierr = PetscOptionsGetInt(NULL, NULL, "-nblk", &simCtx->block_number, NULL); CHKERRQ(ierr); // This is also a modern option
319 ierr = PetscOptionsGetInt(NULL, NULL, "-inlet", &simCtx->inletprofile, NULL); CHKERRQ(ierr);
320 ierr = PetscOptionsGetInt(NULL, NULL, "-Ogrid", &simCtx->Ogrid, NULL); CHKERRQ(ierr);
321 // NOTE: channelz was not parsed, likely set programmatically. We will omit its parsing call.
322 ierr = PetscOptionsGetInt(NULL, NULL, "-grid1d", &simCtx->grid1d, NULL); CHKERRQ(ierr);
323 ierr = PetscOptionsGetBool(NULL, NULL, "-grid", &simCtx->generate_grid, NULL); CHKERRQ(ierr);
324 ierr = PetscOptionsGetString(NULL, NULL, "-grid_file", simCtx->grid_file, PETSC_MAX_PATH_LEN, NULL); CHKERRQ(ierr);
325 ierr = PetscOptionsGetInt(NULL, NULL, "-da_processors_x", &simCtx->da_procs_x, NULL); CHKERRQ(ierr);
326 ierr = PetscOptionsGetInt(NULL, NULL, "-da_processors_y", &simCtx->da_procs_y, NULL); CHKERRQ(ierr);
327 ierr = PetscOptionsGetInt(NULL, NULL, "-da_processors_z", &simCtx->da_procs_z, NULL); CHKERRQ(ierr);
328 ierr = PetscOptionsGetInt(NULL, NULL, "-i_periodic", &simCtx->i_periodic, NULL); CHKERRQ(ierr);
329 ierr = PetscOptionsGetInt(NULL, NULL, "-j_periodic", &simCtx->j_periodic, NULL); CHKERRQ(ierr);
330 ierr = PetscOptionsGetInt(NULL, NULL, "-k_periodic", &simCtx->k_periodic, NULL); CHKERRQ(ierr);
331 ierr = PetscOptionsGetInt(NULL, NULL, "-pbc_domain", &simCtx->blkpbc, NULL); CHKERRQ(ierr);
332 // NOTE: pseudo_periodic was not parsed. We will omit its parsing call.
333 ierr = PetscOptionsGetReal(NULL, NULL, "-grid_rotation_angle", &simCtx->grid_rotation_angle, NULL); CHKERRQ(ierr);
334 ierr = PetscOptionsGetReal(NULL, NULL, "-Croty", &simCtx->Croty, NULL); CHKERRQ(ierr);
335 ierr = PetscOptionsGetReal(NULL, NULL, "-Crotz", &simCtx->Crotz, NULL); CHKERRQ(ierr);
336 PetscBool bcs_flg;
337 char file_list_str[PETSC_MAX_PATH_LEN * 10]; // Buffer for comma-separated list
338
339 ierr = PetscOptionsGetString(NULL, NULL, "-bcs_files", file_list_str, sizeof(file_list_str), &bcs_flg); CHKERRQ(ierr);
340 ierr = PetscOptionsGetReal(NULL, NULL, "-U_bc", &simCtx->U_bc, NULL); CHKERRQ(ierr);
341
342 if (bcs_flg) {
343 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Found -bcs_files option, overriding default.\n");
344
345 // A. Clean up the default memory we allocated in Phase 1.
346 ierr = PetscFree(simCtx->bcs_files[0]); CHKERRQ(ierr);
347 ierr = PetscFree(simCtx->bcs_files); CHKERRQ(ierr);
348 simCtx->num_bcs_files = 0;
349 simCtx->bcs_files = NULL;
350
351 // B. Parse the user-provided comma-separated list.
352 char *token;
353 char *str_copy;
354 ierr = PetscStrallocpy(file_list_str, &str_copy); CHKERRQ(ierr);
355
356 // First pass: count the number of files.
357 token = strtok(str_copy, ",");
358 while (token) {
359 simCtx->num_bcs_files++;
360 token = strtok(NULL, ",");
361 }
362 ierr = PetscFree(str_copy); CHKERRQ(ierr);
363
364 // Second pass: allocate memory and store the filenames.
365 ierr = PetscMalloc1(simCtx->num_bcs_files, &simCtx->bcs_files); CHKERRQ(ierr);
366 ierr = PetscStrallocpy(file_list_str, &str_copy); CHKERRQ(ierr);
367 token = strtok(str_copy, ",");
368 for (PetscInt i = 0; i < simCtx->num_bcs_files; i++) {
369 ierr = PetscStrallocpy(token, &simCtx->bcs_files[i]); CHKERRQ(ierr);
370 token = strtok(NULL, ",");
371 }
372 ierr = PetscFree(str_copy); CHKERRQ(ierr);
373 }
374
375
376 // --- Group 8
377 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 8: Turbulence Modeling (LES/RANS) \n");
378 ierr = PetscOptionsGetInt(NULL, NULL, "-les", &simCtx->les, NULL); CHKERRQ(ierr);
379 ierr = PetscOptionsGetInt(NULL, NULL, "-rans", &simCtx->rans, NULL); CHKERRQ(ierr);
380 ierr = PetscOptionsGetInt(NULL, NULL, "-wallfunction", &simCtx->wallfunction, NULL); CHKERRQ(ierr);
381 ierr = PetscOptionsGetInt(NULL, NULL, "-mixed", &simCtx->mixed, NULL); CHKERRQ(ierr);
382 ierr = PetscOptionsGetInt(NULL, NULL, "-clark", &simCtx->clark, NULL); CHKERRQ(ierr);
383 ierr = PetscOptionsGetInt(NULL, NULL, "-dynamic_freq", &simCtx->dynamic_freq, NULL); CHKERRQ(ierr);
384 ierr = PetscOptionsGetReal(NULL, NULL, "-max_cs", &simCtx->max_cs, NULL); CHKERRQ(ierr);
385 ierr = PetscOptionsGetInt(NULL, NULL, "-testfilter_ik", &simCtx->testfilter_ik, NULL); CHKERRQ(ierr);
386 ierr = PetscOptionsGetInt(NULL, NULL, "-testfilter_1d", &simCtx->testfilter_1d, NULL); CHKERRQ(ierr);
387 ierr = PetscOptionsGetInt(NULL, NULL, "-i_homo_filter", &simCtx->i_homo_filter, NULL); CHKERRQ(ierr);
388 ierr = PetscOptionsGetInt(NULL, NULL, "-j_homo_filter", &simCtx->j_homo_filter, NULL); CHKERRQ(ierr);
389 ierr = PetscOptionsGetInt(NULL, NULL, "-k_homo_filter", &simCtx->k_homo_filter, NULL); CHKERRQ(ierr);
390 ierr = PetscOptionsGetBool(NULL, NULL, "-averaging", &simCtx->averaging, NULL); CHKERRQ(ierr);
391
392 // --- Group 9
393 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 9: Particle / DMSwarm Data & Settings \n");
394 ierr = PetscOptionsGetInt(NULL, NULL, "-numParticles", &simCtx->np, NULL); CHKERRQ(ierr);
395 ierr = PetscOptionsGetBool(NULL, NULL, "-read_fields", &simCtx->readFields, NULL); CHKERRQ(ierr);
396 ierr = PetscOptionsGetInt(NULL, NULL, "-pinit", &simCtx->ParticleInitialization, NULL); CHKERRQ(ierr);
397
398 // --- Group 10
399 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 10: Immersed Boundary & FSI Data Object Pointers \n");
400 ierr = PetscOptionsGetBool(NULL, NULL, "-rs_fsi", &simCtx->rstart_fsi, NULL); CHKERRQ(ierr);
401 ierr = PetscOptionsGetInt(NULL, NULL, "-duplicate", &simCtx->duplicate, NULL); CHKERRQ(ierr);
402
403 // --- Group 11
404 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 11: Top-Level Managers & Custom Configuration \n");
405 ierr = PetscOptionsGetInt(NULL, NULL, "-logfreq", &simCtx->LoggingFrequency, NULL); CHKERRQ(ierr);
406
407 if (simCtx->num_bcs_files != simCtx->block_number) {
408 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_INCOMP, "Number of BC files (%d) does not match number of blocks (%d). Use -bcs_files \"file1.dat,file2.dat,...\".", simCtx->num_bcs_files, simCtx->block_number);
409 }
410
411 // --- Group 12
412 LOG_ALLOW(GLOBAL,LOG_DEBUG, "Parsing Group 12: Post-Processing Information.");
413 // This logic determines the Post Processing configuration and STORES it in simCtx for later reference and cleanup.
414 ierr = PetscOptionsGetString(NULL,NULL,"-postprocessing_config_file",simCtx->PostprocessingControlFile,PETSC_MAX_PATH_LEN,NULL); CHKERRQ(ierr);
415 ierr = PetscNew(&simCtx->pps); CHKERRQ(ierr);
416 ierr = ParsePostProcessingSettings(simCtx);
417
418 // === 5. Log Summary and Finalize Setup ==================================
419 LOG_ALLOW(GLOBAL, LOG_DEBUG, "-- Console Output Functions [Total : %d] : --\n", simCtx->nAllowed);
420 for (PetscInt i = 0; i < simCtx->nAllowed; ++i) {
421 LOG_ALLOW(GLOBAL, LOG_DEBUG, " [%2d] «%s»\n", i, simCtx->allowedFuncs[i]);
422 }
423
424 LOG_ALLOW(GLOBAL, LOG_INFO, "Configuration complete. Key parameters:\n");
425 LOG_ALLOW(GLOBAL, LOG_INFO, " - Run mode: %s\n", simCtx->OnlySetup ? "SETUP ONLY" : "Full Simulation");
426 LOG_ALLOW(GLOBAL, LOG_INFO, " - Time steps: %d (from %d to %d)\n", simCtx->StepsToRun, simCtx->StartStep, simCtx->StartStep + simCtx->StepsToRun);
427 LOG_ALLOW(GLOBAL, LOG_INFO, " - Time step size (dt): %g\n", simCtx->dt);
428 LOG_ALLOW(GLOBAL, LOG_INFO, " - Immersed Boundary: %s\n", simCtx->immersed ? "ENABLED" : "DISABLED");
429 LOG_ALLOW(GLOBAL, LOG_INFO, " - Particles: %d\n", simCtx->np);
430
431 // --- Initialize PETSc's internal performance logging stage ---
432 ierr = PetscLogDefaultBegin(); CHKERRQ(ierr);
433
434 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Finished CreateSimulationContext successfully on rank %d.\n", simCtx->rank);
435
436
437 PetscFunctionReturn(0);
438}
439
440
441/**
442 * @brief Allocates the memory for the UserMG and UserCtx hierarchy.
443 *
444 * This function performs the foundational memory allocation for the solver's
445 * data structures. It reads the number of multigrid levels and grid blocks
446 * from the master SimulationContext, then allocates the arrays for MGCtx and
447 * UserCtx.
448 *
449 * It performs three critical tasks:
450 * 1. Allocates the MGCtx array within the UserMG struct.
451 * 2. For each level, allocates the UserCtx array for all blocks.
452 * 3. Sets the `simCtx` back-pointer in every single UserCtx, linking it to the
453 * master configuration.
454 *
455 * @param simCtx The master SimulationContext, which contains configuration and
456 * will store the resulting allocated hierarchy in its `usermg` member.
457 * @return PetscErrorCode
458 */
459static PetscErrorCode AllocateContextHierarchy(SimCtx *simCtx)
460{
461 PetscErrorCode ierr;
462 UserMG *usermg = &simCtx->usermg;
463 MGCtx *mgctx;
464 PetscInt nblk = simCtx->block_number;
465 PetscBool found;
466 PetscFunctionBeginUser;
467
468 LOG_ALLOW(GLOBAL, LOG_INFO, "Allocating context hierarchy for %d levels and %d blocks...\n", simCtx->mglevels, nblk);
469
470 // Store the number of levels in the UserMG struct itself
471 usermg->mglevels = simCtx->mglevels;
472
473 // --- 1. Allocate the array of MGCtx structs ---
474 ierr = PetscMalloc(usermg->mglevels * sizeof(MGCtx), &usermg->mgctx); CHKERRQ(ierr);
475 mgctx = usermg->mgctx;
476 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Allocated MGCtx array of size %d.\n", simCtx->rank, usermg->mglevels);
477
478 // --- 2. Parse semi-coarsening options (logic from MG_Initial) ---
479 // These flags determine if a dimension is coarsened in the multigrid hierarchy.
480 PetscInt *isc, *jsc, *ksc;
481 ierr = PetscMalloc3(nblk, &isc, nblk, &jsc, nblk, &ksc); CHKERRQ(ierr);
482 // Set defaults to FALSE (full coarsening)
483 for (PetscInt i = 0; i < nblk; ++i) {
484 isc[i] = 0; jsc[i] = 0; ksc[i] = 0;
485 }
486
487// Use a temporary variable for the 'count' argument to the parsing function.
488 // This protects the original 'nblk' which is needed for the loop bounds.
489 PetscInt n_opts_found = nblk;
490 ierr = PetscOptionsGetIntArray(NULL, NULL, "-mg_i_semi", isc, &n_opts_found, &found); CHKERRQ(ierr);
491
492 n_opts_found = nblk; // Reset the temp variable before the next call
493 ierr = PetscOptionsGetIntArray(NULL, NULL, "-mg_j_semi", jsc, &n_opts_found, &found); CHKERRQ(ierr);
494
495 n_opts_found = nblk; // Reset the temp variable before the next call
496 ierr = PetscOptionsGetIntArray(NULL, NULL, "-mg_k_semi", ksc, &n_opts_found, &found); CHKERRQ(ierr);
497
498 // --- 3. Loop over levels and blocks to allocate UserCtx arrays ---
499 for (PetscInt level = 0; level < simCtx->mglevels; level++) {
500
501 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Setting up MG Level %d...\n", simCtx->rank, level);
502 // Allocate the array of UserCtx structs for this level
503 ierr = PetscMalloc(nblk * sizeof(UserCtx), &mgctx[level].user); CHKERRQ(ierr);
504 // It's good practice to zero out the memory to avoid uninitialized values
505 ierr = PetscMemzero(mgctx[level].user, nblk * sizeof(UserCtx)); CHKERRQ(ierr);
506 mgctx[level].thislevel = level;
507
508 for (PetscInt bi = 0; bi < nblk; bi++) {
509 UserCtx *currentUser = &mgctx[level].user[bi];
510 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Initializing UserCtx for Level %d, Block %d.\n", simCtx->rank, level, bi);
511
512 // --- CRITICAL STEP: Set the back-pointer to the master context ---
513 currentUser->simCtx = simCtx;
514
515 // Initialize other per-context values
516 currentUser->thislevel = level;
517 currentUser->_this = bi; // Store the block index
518 currentUser->mglevels = usermg->mglevels;
519
520 // Assign semi-coarsening flags
521 currentUser->isc = isc[bi];
522 currentUser->jsc = jsc[bi];
523 currentUser->ksc = ksc[bi];
524
525 // Link to finer/coarser contexts for multigrid operations
526 if (level > 0) {
527 currentUser->user_c = &mgctx[level-1].user[bi];
528 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: -> Linked to coarser context (user_c).\n", simCtx->rank);
529 }
530 if (level < usermg->mglevels - 1) {
531 currentUser->user_f = &mgctx[level+1].user[bi];
532 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "Rank %d: -> Linked to finer context (user_f).\n", simCtx->rank);
533 }
534 }
535 }
536
537 // Log a summary of the parsed flags on each rank.
538 if (get_log_level() >= LOG_DEBUG && nblk > 0) {
539 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Final semi-coarsening configuration view:\n", simCtx->rank);
540 for (PetscInt bi = 0; bi < nblk; ++bi) {
541 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Block %d: i-semi=%d, j-semi=%d, k-semi=%d\n", simCtx->rank, bi, isc[bi], jsc[bi], ksc[bi]);
542 }
543 }
544
545 // Clean up temporary arrays
546 ierr = PetscFree3(isc, jsc, ksc); CHKERRQ(ierr);
547
548 LOG_ALLOW(GLOBAL, LOG_INFO, "Context hierarchy allocation complete.\n");
549 PetscFunctionReturn(0);
550}
551
552static PetscErrorCode SetupSolverParameters(SimCtx *simCtx){
553
554 PetscFunctionBeginUser;
555
556 LOG_ALLOW(GLOBAL,LOG_INFO, " -- Setting up solver parameters -- .\n");
557
558 UserMG *usermg = &simCtx->usermg;
559 MGCtx *mgctx = usermg->mgctx;
560 PetscInt nblk = simCtx->block_number;
561
562 for (PetscInt level = usermg->mglevels-1; level >=0; level--) {
563 for (PetscInt bi = 0; bi < nblk; bi++) {
564 UserCtx *user = &mgctx[level].user[bi];
565 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Setting up parameters for level %d, block %d\n", simCtx->rank, level, bi);
566
567 user->assignedA = PETSC_FALSE;
568 user->multinullspace = PETSC_FALSE;
569 }
570 }
571 PetscFunctionReturn(0);
572}
573/**
574 * @brief The main orchestrator for setting up all grid-related components.
575 * (Implementation of the orchestrator function itself)
576 */
577PetscErrorCode SetupGridAndSolvers(SimCtx *simCtx)
578{
579 PetscErrorCode ierr;
580 PetscFunctionBeginUser;
581
582 LOG_ALLOW(GLOBAL, LOG_INFO, "--- Starting Grid and Solvers Setup ---\n");
583
584 // Phase 1: Allocate the UserMG and UserCtx hierarchy
585 ierr = AllocateContextHierarchy(simCtx); CHKERRQ(ierr);
586
587 // [ The next phases will be added here ]
588 ierr = DefineAllGridDimensions(simCtx); CHKERRQ(ierr);
589 ierr = InitializeAllGridDMs(simCtx); CHKERRQ(ierr);
590 ierr = AssignAllGridCoordinates(simCtx);
591 ierr = CreateAndInitializeAllVectors(simCtx); CHKERRQ(ierr);
592 ierr = SetupSolverParameters(simCtx); CHKERRQ(ierr);
593 ierr = CalculateAllGridMetrics(simCtx); CHKERRQ(ierr);
594
595 LOG_ALLOW(GLOBAL, LOG_INFO, "--- Grid and Solvers Setup Complete ---\n");
596 PetscFunctionReturn(0);
597}
598
599/**
600 * @brief Creates and initializes all PETSc Vec objects for all fields.
601 *
602 * This function iterates through every UserCtx in the multigrid and multi-block
603 * hierarchy. For each context, it creates the comprehensive set of global and
604 * local PETSc Vecs required by the flow solver (e.g., Ucont, P, Nvert, metrics,
605 * turbulence fields, etc.). Each vector is initialized to zero.
606 *
607 * @param simCtx The master SimCtx, containing the configured UserCtx hierarchy.
608 * @return PetscErrorCode
609 */
611{
612 PetscErrorCode ierr;
613 UserMG *usermg = &simCtx->usermg;
614 MGCtx *mgctx = usermg->mgctx;
615 PetscInt nblk = simCtx->block_number;
616
617 PetscFunctionBeginUser;
618
619 LOG_ALLOW(GLOBAL, LOG_INFO, "Creating and initializing all simulation vectors...\n");
620
621 for (PetscInt level = usermg->mglevels-1; level >=0; level--) {
622 for (PetscInt bi = 0; bi < nblk; bi++) {
623 UserCtx *user = &mgctx[level].user[bi];
624
625 if(!user->da || !user->fda) {
626 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONGSTATE, "DMs not properly initialized in UserCtx before vector creation.");
627 }
628
629 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Creating vectors for level %d, block %d\n", simCtx->rank, level, bi);
630
631 // --- Group A: Primary Flow Fields (Global and Local) ---
632 // These are the core solution variables.
633 ierr = DMCreateGlobalVector(user->fda, &user->Ucont); CHKERRQ(ierr); ierr = VecSet(user->Ucont, 0.0); CHKERRQ(ierr);
634 ierr = DMCreateGlobalVector(user->fda, &user->Ucat); CHKERRQ(ierr); ierr = VecSet(user->Ucat, 0.0); CHKERRQ(ierr);
635 ierr = DMCreateGlobalVector(user->da, &user->P); CHKERRQ(ierr); ierr = VecSet(user->P, 0.0); CHKERRQ(ierr);
636 ierr = DMCreateGlobalVector(user->da, &user->Nvert); CHKERRQ(ierr); ierr = VecSet(user->Nvert, 0.0); CHKERRQ(ierr);
637
638 ierr = DMCreateLocalVector(user->fda, &user->lUcont); CHKERRQ(ierr); ierr = VecSet(user->lUcont, 0.0); CHKERRQ(ierr);
639 ierr = DMCreateLocalVector(user->fda, &user->lUcat); CHKERRQ(ierr); ierr = VecSet(user->lUcat, 0.0); CHKERRQ(ierr);
640 ierr = DMCreateLocalVector(user->da, &user->lP); CHKERRQ(ierr); ierr = VecSet(user->lP, 0.0); CHKERRQ(ierr);
641 ierr = DMCreateLocalVector(user->da, &user->lNvert); CHKERRQ(ierr); ierr = VecSet(user->lNvert, 0.0); CHKERRQ(ierr);
642
643 // --- Group B: Time-Stepping & Workspace Fields (Finest Level Only) ---
644 if (level == usermg->mglevels - 1) {
645 ierr = VecDuplicate(user->Ucont, &user->Ucont_o); CHKERRQ(ierr); ierr = VecSet(user->Ucont_o, 0.0); CHKERRQ(ierr);
646 ierr = VecDuplicate(user->Ucont, &user->Ucont_rm1); CHKERRQ(ierr); ierr = VecSet(user->Ucont_rm1, 0.0); CHKERRQ(ierr);
647 ierr = VecDuplicate(user->Ucat, &user->Ucat_o); CHKERRQ(ierr); ierr = VecSet(user->Ucat_o, 0.0); CHKERRQ(ierr);
648 ierr = VecDuplicate(user->P, &user->P_o); CHKERRQ(ierr); ierr = VecSet(user->P_o, 0.0); CHKERRQ(ierr);
649 ierr = VecDuplicate(user->lUcont, &user->lUcont_o); CHKERRQ(ierr); ierr = VecSet(user->lUcont_o, 0.0); CHKERRQ(ierr);
650 ierr = VecDuplicate(user->lUcont, &user->lUcont_rm1); CHKERRQ(ierr); ierr = VecSet(user->lUcont_rm1, 0.0); CHKERRQ(ierr);
651 ierr = DMCreateLocalVector(user->da, &user->lNvert_o); CHKERRQ(ierr); ierr = VecSet(user->lNvert_o, 0.0); CHKERRQ(ierr);
652 ierr = VecDuplicate(user->Nvert, &user->Nvert_o); CHKERRQ(ierr); ierr = VecSet(user->Nvert_o, 0.0); CHKERRQ(ierr);
653
654 }
655
656 // --- Group C: Grid Metrics (Cell-Centered) ---
657 ierr = DMCreateGlobalVector(user->fda, &user->Csi); CHKERRQ(ierr); ierr = VecSet(user->Csi, 0.0); CHKERRQ(ierr);
658 ierr = VecDuplicate(user->Csi, &user->Eta); CHKERRQ(ierr); ierr = VecSet(user->Eta, 0.0); CHKERRQ(ierr);
659 ierr = VecDuplicate(user->Csi, &user->Zet); CHKERRQ(ierr); ierr = VecSet(user->Zet, 0.0); CHKERRQ(ierr);
660 ierr = DMCreateGlobalVector(user->da, &user->Aj); CHKERRQ(ierr); ierr = VecSet(user->Aj, 0.0); CHKERRQ(ierr);
661 ierr = VecDuplicate(user->Aj, &user->Phi); CHKERRQ(ierr); ierr = VecSet(user->Phi, 0.0); CHKERRQ(ierr);
662
663 ierr = DMCreateLocalVector(user->fda, &user->lCsi); CHKERRQ(ierr); ierr = VecSet(user->lCsi, 0.0); CHKERRQ(ierr);
664 ierr = VecDuplicate(user->lCsi, &user->lEta); CHKERRQ(ierr); ierr = VecSet(user->lEta, 0.0); CHKERRQ(ierr);
665 ierr = VecDuplicate(user->lCsi, &user->lZet); CHKERRQ(ierr); ierr = VecSet(user->lZet, 0.0); CHKERRQ(ierr);
666 ierr = DMCreateLocalVector(user->da, &user->lAj); CHKERRQ(ierr); ierr = VecSet(user->lAj, 0.0); CHKERRQ(ierr);
667 ierr = VecDuplicate(user->lAj, &user->lPhi); CHKERRQ(ierr); ierr = VecSet(user->lPhi, 0.0); CHKERRQ(ierr);
668 // --- Group D: Grid Metrics (Face-Centered) ---
669 // Vector metrics are duplicated from Csi (DOF=3, fda-based)
670 ierr = VecDuplicate(user->Csi, &user->ICsi); CHKERRQ(ierr); ierr = VecSet(user->ICsi, 0.0); CHKERRQ(ierr);
671 ierr = VecDuplicate(user->Csi, &user->IEta); CHKERRQ(ierr); ierr = VecSet(user->IEta, 0.0); CHKERRQ(ierr);
672 ierr = VecDuplicate(user->Csi, &user->IZet); CHKERRQ(ierr); ierr = VecSet(user->IZet, 0.0); CHKERRQ(ierr);
673 ierr = VecDuplicate(user->Csi, &user->JCsi); CHKERRQ(ierr); ierr = VecSet(user->JCsi, 0.0); CHKERRQ(ierr);
674 ierr = VecDuplicate(user->Csi, &user->JEta); CHKERRQ(ierr); ierr = VecSet(user->JEta, 0.0); CHKERRQ(ierr);
675 ierr = VecDuplicate(user->Csi, &user->JZet); CHKERRQ(ierr); ierr = VecSet(user->JZet, 0.0); CHKERRQ(ierr);
676 ierr = VecDuplicate(user->Csi, &user->KCsi); CHKERRQ(ierr); ierr = VecSet(user->KCsi, 0.0); CHKERRQ(ierr);
677 ierr = VecDuplicate(user->Csi, &user->KEta); CHKERRQ(ierr); ierr = VecSet(user->KEta, 0.0); CHKERRQ(ierr);
678 ierr = VecDuplicate(user->Csi, &user->KZet); CHKERRQ(ierr); ierr = VecSet(user->KZet, 0.0); CHKERRQ(ierr);
679 // Scalar metrics are duplicated from Aj (DOF=1, da-based)
680 ierr = VecDuplicate(user->Aj, &user->IAj); CHKERRQ(ierr); ierr = VecSet(user->IAj, 0.0); CHKERRQ(ierr);
681 ierr = VecDuplicate(user->Aj, &user->JAj); CHKERRQ(ierr); ierr = VecSet(user->JAj, 0.0); CHKERRQ(ierr);
682 ierr = VecDuplicate(user->Aj, &user->KAj); CHKERRQ(ierr); ierr = VecSet(user->KAj, 0.0); CHKERRQ(ierr);
683
684 ierr = VecDuplicate(user->lCsi, &user->lICsi); CHKERRQ(ierr); ierr = VecSet(user->lICsi, 0.0); CHKERRQ(ierr);
685 ierr = VecDuplicate(user->lCsi, &user->lIEta); CHKERRQ(ierr); ierr = VecSet(user->lIEta, 0.0); CHKERRQ(ierr);
686 ierr = VecDuplicate(user->lCsi, &user->lIZet); CHKERRQ(ierr); ierr = VecSet(user->lIZet, 0.0); CHKERRQ(ierr);
687 ierr = VecDuplicate(user->lCsi, &user->lJCsi); CHKERRQ(ierr); ierr = VecSet(user->lJCsi, 0.0); CHKERRQ(ierr);
688 ierr = VecDuplicate(user->lCsi, &user->lJEta); CHKERRQ(ierr); ierr = VecSet(user->lJEta, 0.0); CHKERRQ(ierr);
689 ierr = VecDuplicate(user->lCsi, &user->lJZet); CHKERRQ(ierr); ierr = VecSet(user->lJZet, 0.0); CHKERRQ(ierr);
690 ierr = VecDuplicate(user->lCsi, &user->lKCsi); CHKERRQ(ierr); ierr = VecSet(user->lKCsi, 0.0); CHKERRQ(ierr);
691 ierr = VecDuplicate(user->lCsi, &user->lKEta); CHKERRQ(ierr); ierr = VecSet(user->lKEta, 0.0); CHKERRQ(ierr);
692 ierr = VecDuplicate(user->lCsi, &user->lKZet); CHKERRQ(ierr); ierr = VecSet(user->lKZet, 0.0); CHKERRQ(ierr);
693
694 ierr = VecDuplicate(user->lAj, &user->lIAj); CHKERRQ(ierr); ierr = VecSet(user->lIAj, 0.0); CHKERRQ(ierr);
695 ierr = VecDuplicate(user->lAj, &user->lJAj); CHKERRQ(ierr); ierr = VecSet(user->lJAj, 0.0); CHKERRQ(ierr);
696 ierr = VecDuplicate(user->lAj, &user->lKAj); CHKERRQ(ierr); ierr = VecSet(user->lKAj, 0.0); CHKERRQ(ierr);
697
698 // --- Group E: Cell/Face Center Coordinates and Grid Spacing ---
699 ierr = DMCreateGlobalVector(user->fda, &user->Cent); CHKERRQ(ierr); ierr = VecSet(user->Cent, 0.0); CHKERRQ(ierr);
700 ierr = DMCreateLocalVector(user->fda, &user->lCent); CHKERRQ(ierr); ierr = VecSet(user->lCent, 0.0); CHKERRQ(ierr);
701
702 ierr = VecDuplicate(user->Cent, &user->GridSpace); CHKERRQ(ierr); ierr = VecSet(user->GridSpace, 0.0); CHKERRQ(ierr);
703 ierr = VecDuplicate(user->lCent, &user->lGridSpace); CHKERRQ(ierr); ierr = VecSet(user->lGridSpace, 0.0); CHKERRQ(ierr);
704
705 // Face-center coordinate vectors are GLOBAL to hold calculated values before scattering
706 ierr = VecDuplicate(user->Cent, &user->Centx); CHKERRQ(ierr); ierr = VecSet(user->Centx, 0.0); CHKERRQ(ierr);
707 ierr = VecDuplicate(user->Cent, &user->Centy); CHKERRQ(ierr); ierr = VecSet(user->Centy, 0.0); CHKERRQ(ierr);
708 ierr = VecDuplicate(user->Cent, &user->Centz); CHKERRQ(ierr); ierr = VecSet(user->Centz, 0.0); CHKERRQ(ierr);
709
710 if(level == usermg->mglevels -1){
711 // --- Group F: Turbulence Models (Finest Level Only) ---
712 if (simCtx->les || simCtx->rans) {
713 ierr = DMCreateGlobalVector(user->da, &user->Nu_t); CHKERRQ(ierr); ierr = VecSet(user->Nu_t, 0.0); CHKERRQ(ierr);
714 ierr = DMCreateLocalVector(user->da, &user->lNu_t); CHKERRQ(ierr); ierr = VecSet(user->lNu_t, 0.0); CHKERRQ(ierr);
715
716 // Add K_Omega, CS, etc. here as needed
717
718 // Note: Add any other vectors from the legacy MG_Initial here as needed.
719 // For example: Rhs, Forcing, turbulence Vecs (K_Omega, Nu_t)...
720
721 }
722 // --- Group G: Particle Methods
723 if(simCtx->np>0){
724 ierr = DMCreateGlobalVector(user->da,&user->ParticleCount); CHKERRQ(ierr); ierr = VecSet(user->ParticleCount,0.0); CHKERRQ(ierr);
725
726 ierr = DMCreateGlobalVector(user->da,&user->Psi); CHKERRQ(ierr); ierr = VecSet(user->Psi,0.0); CHKERRQ(ierr);
727
728 LOG_ALLOW(GLOBAL,LOG_DEBUG,"ParticleCount & Scalar(Psi) created for %d particles.\n",simCtx->np);
729 }
730 }
731 // --- Group H: Boundary Condition vectors needed by the legacy FormBCS ---
732 ierr = DMCreateGlobalVector(user->fda, &user->Bcs.Ubcs); CHKERRQ(ierr);
733 ierr = VecSet(user->Bcs.Ubcs, 0.0); CHKERRQ(ierr);
734 ierr = DMCreateGlobalVector(user->fda, &user->Bcs.Uch); CHKERRQ(ierr);
735 ierr = VecSet(user->Bcs.Uch, 0.0); CHKERRQ(ierr);
736
737 if(level == usermg->mglevels -1){
738 if(simCtx->exec_mode == EXEC_MODE_POSTPROCESSOR){
739 LOG_ALLOW(LOCAL, LOG_DEBUG, "Post-processor mode detected. Allocating derived field vectors.\n");
740
741 ierr = VecDuplicate(user->P, &user->P_nodal); CHKERRQ(ierr);
742 ierr = VecSet(user->P_nodal, 0.0); CHKERRQ(ierr);
743
744 ierr = VecDuplicate(user->Ucat, &user->Ucat_nodal); CHKERRQ(ierr);
745 ierr = VecSet(user->Ucat_nodal, 0.0); CHKERRQ(ierr);
746
747 ierr = VecDuplicate(user->P, &user->Qcrit); CHKERRQ(ierr);
748 ierr = VecSet(user->Qcrit, 0.0); CHKERRQ(ierr);
749 }else{
750 user->P_nodal = NULL;
751 user->Ucat_nodal = NULL;
752 user->Qcrit = NULL;
753 }
754 }
755
756 }
757}
758
759 LOG_ALLOW(GLOBAL, LOG_INFO, "All simulation vectors created and initialized.\n");
760 PetscFunctionReturn(0);
761}
762
763
764/**
765 * @brief Updates the local vector (including ghost points) from its corresponding global vector.
766 *
767 * This function identifies the correct global vector, local vector, and DM based on the
768 * provided fieldName and performs the standard PETSc DMGlobalToLocalBegin/End sequence.
769 * Includes optional debugging output (max norms before/after).
770 *
771 * @param user The UserCtx structure containing the vectors and DMs.
772 * @param fieldName The name of the field to update ("Ucat", "Ucont", "P", "Nvert", etc.).
773 *
774 * @return PetscErrorCode 0 on success, non-zero on failure.
775 *
776 * @note This function assumes the global vector associated with fieldName has already
777 * been populated with the desired data (including any boundary conditions).
778 */
779PetscErrorCode UpdateLocalGhosts(UserCtx* user, const char *fieldName)
780{
781 PetscErrorCode ierr;
782 PetscMPIInt rank;
783 Vec globalVec = NULL;
784 Vec localVec = NULL;
785 DM dm = NULL; // The DM associated with this field pair
786
787 PetscFunctionBeginUser; // Use User version for application code
788 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
789 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Starting ghost update for field '%s'.\n", rank, fieldName);
790
791 // --- 1. Identify the correct Vectors and DM ---
792 if (strcmp(fieldName, "Ucat") == 0) {
793 globalVec = user->Ucat;
794 localVec = user->lUcat;
795 dm = user->fda;
796 } else if (strcmp(fieldName, "Ucont") == 0) {
797 globalVec = user->Ucont;
798 localVec = user->lUcont;
799 dm = user->fda;
800 } else if (strcmp(fieldName, "P") == 0) {
801 globalVec = user->P;
802 localVec = user->lP;
803 dm = user->da;
804 } else if (strcmp(fieldName, "Csi") == 0) {
805 globalVec = user->Csi;
806 localVec = user->lCsi;
807 dm = user->fda;
808 } else if (strcmp(fieldName, "Eta") == 0) {
809 globalVec = user->Eta;
810 localVec = user->lEta;
811 dm = user->fda;
812 } else if (strcmp(fieldName, "Zet") == 0) {
813 globalVec = user->Zet;
814 localVec = user->lZet;
815 dm = user->fda;
816 }else if (strcmp(fieldName, "Nvert") == 0) {
817 globalVec = user->Nvert;
818 localVec = user->lNvert;
819 dm = user->da;
820 // Add other fields as needed
821 } else if (strcmp(fieldName, "Aj") == 0) {
822 globalVec = user->Aj;
823 localVec = user->lAj;
824 dm = user->da;
825 } else if (strcmp(fieldName, "Cent") == 0) {
826 globalVec = user->Cent;
827 localVec = user->lCent;
828 dm = user->fda;
829 }else if (strcmp(fieldName, "GridSpace") == 0) {
830 globalVec = user->GridSpace;
831 localVec = user->lGridSpace;
832 dm = user->fda;
833 }else if (strcmp(fieldName,"ICsi") == 0){
834 globalVec = user->ICsi;
835 localVec = user->lICsi;
836 dm = user->fda;
837 }else if (strcmp(fieldName,"IEta") == 0){
838 globalVec = user->IEta;
839 localVec = user->lIEta;
840 dm = user->fda;
841 }else if (strcmp(fieldName,"IZet") == 0){
842 globalVec = user->IZet;
843 localVec = user->lIZet;
844 dm = user->fda;
845 }else if (strcmp(fieldName,"JCsi") == 0){
846 globalVec = user->JCsi;
847 localVec = user->lJCsi;
848 dm = user->fda;
849 }else if (strcmp(fieldName,"JEta") == 0){
850 globalVec = user->JEta;
851 localVec = user->lJEta;
852 dm = user->fda;
853 }else if (strcmp(fieldName,"JZet") == 0){
854 globalVec = user->JZet;
855 localVec = user->lJZet;
856 dm = user->fda;
857 }else if (strcmp(fieldName,"KCsi") == 0){
858 globalVec = user->KCsi;
859 localVec = user->lKCsi;
860 dm = user->fda;
861 }else if (strcmp(fieldName,"KEta") == 0){
862 globalVec = user->KEta;
863 localVec = user->lKEta;
864 dm = user->fda;
865 }else if (strcmp(fieldName,"KZet") == 0){
866 globalVec = user->KZet;
867 localVec = user->lKZet;
868 dm = user->fda;
869 }else if (strcmp(fieldName,"IAj") == 0){
870 globalVec = user->IAj;
871 localVec = user->lIAj;
872 dm = user->da;
873 }else if (strcmp(fieldName,"JAj") == 0){
874 globalVec = user->JAj;
875 localVec = user->lJAj;
876 dm = user->da;
877 }else if (strcmp(fieldName,"KAj") == 0){
878 globalVec = user->KAj;
879 localVec = user->lKAj;
880 dm = user->da;
881 }else if (strcmp(fieldName,"Phi") == 0){
882 globalVec = user->Phi;
883 localVec = user->lPhi;
884 dm = user->da;
885 }else {
886 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "Field '%s' not recognized for ghost update.", fieldName);
887 }
888
889 // --- 2. Check if components were found ---
890 if (!globalVec) {
891 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Global vector for field '%s' is NULL.", fieldName);
892 }
893 if (!localVec) {
894 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Local vector for field '%s' is NULL.", fieldName);
895 }
896 if (!dm) {
897 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "DM for field '%s' is NULL.", fieldName);
898 }
899
900 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Identified components for '%s': DM=%p, GlobalVec=%p, LocalVec=%p.\n",
901 rank, fieldName, (void*)dm, (void*)globalVec, (void*)localVec);
902
903 // --- 3. Optional Debugging: Norm Before Update ---
904 // Use your logging convention check
905 // if (get_log_level() >= LOG_LEVEL_DEBUG && is_function_allowed("UpdateLocalGhosts")) { // Example check
906 if(get_log_level() == LOG_DEBUG && is_function_allowed(__func__)){
907 PetscReal norm_global_before;
908 ierr = VecNorm(globalVec, NORM_INFINITY, &norm_global_before); CHKERRQ(ierr);
909 LOG_ALLOW(GLOBAL, LOG_INFO,"Max norm '%s' (Global) BEFORE Ghost Update: %g\n", fieldName, norm_global_before);
910 // Optional: Norm of local vector before update (might contain old ghost values)
911 // PetscReal norm_local_before;
912 // ierr = VecNorm(localVec, NORM_INFINITY, &norm_local_before); CHKERRQ(ierr);
913 // LOG_ALLOW(GLOBAL, LOG_DEBUG,"Max norm '%s' (Local) BEFORE Ghost Update: %g\n", fieldName, norm_local_before);
914 }
915
916 // --- 4. Perform the Global-to-Local Transfer (Ghost Update) ---
917 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Calling DMGlobalToLocalBegin/End for '%s'.\n", rank, fieldName);
918 ierr = DMGlobalToLocalBegin(dm, globalVec, INSERT_VALUES, localVec); CHKERRQ(ierr);
919 ierr = DMGlobalToLocalEnd(dm, globalVec, INSERT_VALUES, localVec); CHKERRQ(ierr);
920 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Completed DMGlobalToLocalBegin/End for '%s'.\n", rank, fieldName);
921
922 // --- 5. Optional Debugging: Norm After Update ---
923 // Use your logging convention check
924 // if (get_log_level() >= LOG_LEVEL_DEBUG && is_function_allowed("UpdateLocalGhosts")) { // Example check
925 if(get_log_level() == LOG_DEBUG && is_function_allowed(__func__)){ // Using your specific check
926 PetscReal norm_local_after;
927 ierr = VecNorm(localVec, NORM_INFINITY, &norm_local_after); CHKERRQ(ierr);
928 LOG_ALLOW(GLOBAL, LOG_INFO,"Max norm '%s' (Local) AFTER Ghost Update: %g\n", fieldName, norm_local_after);
929
930 // --- 6. Optional Debugging: Specific Point Checks (Example for Ucat on Rank 0/1) ---
931 // (Keep this conditional if it's only for specific debug scenarios)
932 if (strcmp(fieldName, "Ucat") == 0) { // Only do detailed checks for Ucat for now
933 PetscMPIInt rank_test;
934 MPI_Comm_rank(PETSC_COMM_WORLD, &rank_test);
935
936 // Get Local Info needed for indexing checks
937 DMDALocalInfo info_check;
938 ierr = DMDAGetLocalInfo(dm, &info_check); CHKERRQ(ierr); // Use the correct dm
939
940 // Buffer for array pointer
941 Cmpnts ***lUcat_arr_test = NULL;
942 PetscErrorCode ierr_test = 0;
943
944 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Testing '%s' access immediately after ghost update...\n", rank_test, fieldName);
945 ierr_test = DMDAVecGetArrayDOFRead(dm, localVec, &lUcat_arr_test); // Use correct dm and localVec
946
947 if (ierr_test) {
948 LOG_ALLOW(LOCAL, LOG_ERROR, "Rank %d: ERROR %d getting '%s' array after ghost update!\n", rank_test, ierr_test, fieldName);
949 } else if (!lUcat_arr_test) {
950 LOG_ALLOW(LOCAL, LOG_ERROR, "Rank %d: ERROR NULL pointer getting '%s' array after ghost update!\n", rank_test, fieldName);
951 }
952 else {
953 // Check owned interior point (e.g., first interior point)
954 PetscInt k_int = info_check.zs + (info_check.zm > 1 ? 1 : 0); // Global k index (at least zs+1 if possible)
955 PetscInt j_int = info_check.ys + (info_check.ym > 1 ? 1 : 0); // Global j index
956 PetscInt i_int = info_check.xs + (info_check.xm > 1 ? 1 : 0); // Global i index
957 // Ensure indices are within global bounds if domain is very small
958 //if (k_int >= info_check.mz-1) k_int = info_check.mz-2; if (k_int < 1) k_int = 1;
959 //if (j_int >= info_check.my-1) j_int = info_check.my-2; if (j_int < 1) j_int = 1;
960 // if (i_int >= info_check.mx-1) i_int = info_check.mx-2; if (i_int < 1) i_int = 1;
961 // clamp k_int to [1 .. mz-2]
962 if (k_int >= info_check.mz - 1) {
963 k_int = info_check.mz - 2;
964 }
965 if (k_int < 1) {
966 k_int = 1;
967 }
968
969 // clamp j_int to [1 .. my-2]
970 if (j_int >= info_check.my - 1) {
971 j_int = info_check.my - 2;
972 }
973 if (j_int < 1) {
974 j_int = 1;
975 }
976
977 // clamp i_int to [1 .. mx-2]
978 if (i_int >= info_check.mx - 1) {
979 i_int = info_check.mx - 2;
980 }
981 if (i_int < 1) {
982 i_int = 1;
983 }
984
985 // Only attempt read if indices are actually owned (relevant for multi-rank)
986 if (k_int >= info_check.zs && k_int < info_check.zs + info_check.zm &&
987 j_int >= info_check.ys && j_int < info_check.ys + info_check.ym &&
988 i_int >= info_check.xs && i_int < info_check.xs + info_check.xm)
989 {
990 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Attempting test read OWNED INTERIOR [%d][%d][%d] (Global)\n", rank_test, k_int, j_int, i_int);
991 Cmpnts test_val_owned_interior = lUcat_arr_test[k_int][j_int][i_int];
992 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: SUCCESS reading owned interior: x=%g\n", rank_test, test_val_owned_interior.x);
993 } else {
994 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Skipping interior test read for non-owned index [%d][%d][%d].\n", rank_test, k_int, j_int, i_int);
995 }
996
997
998 // Check owned boundary point (e.g., first owned point)
999 PetscInt k_bnd = info_check.zs; // Global k index
1000 PetscInt j_bnd = info_check.ys; // Global j index
1001 PetscInt i_bnd = info_check.xs; // Global i index
1002 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Attempting test read OWNED BOUNDARY [%d][%d][%d] (Global)\n", rank_test, k_bnd, j_bnd, i_bnd);
1003 Cmpnts test_val_owned_boundary = lUcat_arr_test[k_bnd][j_bnd][i_bnd];
1004 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: SUCCESS reading owned boundary: x=%g\n", rank_test, test_val_owned_boundary.x);
1005
1006
1007 // Check ghost point (e.g., one layer below in k, if applicable)
1008 if (info_check.zs > 0) { // Only if there's a rank below
1009 PetscInt k_ghost = info_check.zs - 1;
1010 PetscInt j_ghost = info_check.ys; // Use start of owned y, simple example
1011 PetscInt i_ghost = info_check.xs; // Use start of owned x, simple example
1012 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Attempting test read GHOST [%d][%d][%d] (Global)\n", rank_test, k_ghost, j_ghost, i_ghost);
1013 Cmpnts test_val_ghost = lUcat_arr_test[k_ghost][j_ghost][i_ghost];
1014 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: SUCCESS reading ghost: x=%g\n", rank_test, test_val_ghost.x);
1015 } else {
1016 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Skipping ghost test read (zs=0).\n", rank_test);
1017 }
1018
1019 // Restore the array
1020 ierr_test = DMDAVecRestoreArrayDOFRead(dm, localVec, &lUcat_arr_test);
1021 if(ierr_test){ LOG_ALLOW(LOCAL, LOG_ERROR, "Rank %d: ERROR %d restoring '%s' array after test read!\n", rank_test, ierr_test, fieldName); }
1022 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d: Finished testing '%s' access.\n", rank_test, fieldName);
1023 }
1024 } // end if Ucat
1025 } // end debug logging check
1026
1027 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Completed ghost update for field '%s'.\n", rank, fieldName);
1028 PetscFunctionReturn(0);
1029}
1030
1031
1032/**
1033 * @brief (Orchestrator) Sets up all boundary conditions for the simulation.
1034 */
1035PetscErrorCode SetupBoundaryConditions(SimCtx *simCtx)
1036{
1037 PetscErrorCode ierr;
1038 PetscFunctionBeginUser;
1039
1040 LOG_ALLOW(GLOBAL,LOG_INFO, "--- Setting up Boundary Conditions ---\n");
1041
1042 // --- Parse and Adapt for each block on the finest level ---
1043 LOG_ALLOW(GLOBAL,LOG_INFO,"Parsing BC configuration file and adapting to legacy system for finest grid.\n");
1044 UserCtx *user_finest = simCtx->usermg.mgctx[simCtx->usermg.mglevels-1].user;
1045 for (PetscInt bi = 0; bi < simCtx->block_number; bi++) {
1046 LOG_ALLOW(GLOBAL,LOG_DEBUG, " -> Processing Block %d:\n", bi);
1047
1048 // --- Generate the filename for the current block ---
1049 const char *current_bc_filename = simCtx->bcs_files[bi];
1050 LOG_ALLOW(GLOBAL,LOG_DEBUG," -> Processing Block %d using config file '%s'\n", bi, current_bc_filename);
1051 // This will populate user_finest[bi].boundary_faces
1052 ierr = ParseAllBoundaryConditions(&user_finest[bi],current_bc_filename); CHKERRQ(ierr);
1053
1054 // Call the adapter to translate into the legacy format
1055 ierr = TranslateModernBCsToLegacy(&user_finest[bi]); CHKERRQ(ierr);
1056 }
1057
1058 LOG_ALLOW(GLOBAL,LOG_INFO, "--- Boundary Conditions setup complete ---\n");
1059
1060 PetscFunctionReturn(0);
1061}
1062
1063/**
1064 * @brief Allocates a 3D array of PetscReal values using PetscCalloc.
1065 *
1066 * This function dynamically allocates memory for a 3D array of PetscReal values
1067 * with dimensions nz (layers) x ny (rows) x nx (columns). It uses PetscCalloc1
1068 * to ensure the memory is zero-initialized.
1069 *
1070 * The allocation is done in three steps:
1071 * 1. Allocate an array of nz pointers (one for each layer).
1072 * 2. Allocate a contiguous block for nz*ny row pointers and assign each layer’s row pointers.
1073 * 3. Allocate a contiguous block for all nz*ny*nx PetscReal values.
1074 *
1075 * This setup allows the array to be accessed as array[k][j][i], and the memory
1076 * for the data is contiguous, which improves cache efficiency.
1077 *
1078 * @param[out] array Pointer to the 3D array to be allocated.
1079 * @param[in] nz Number of layers (z-direction).
1080 * @param[in] ny Number of rows (y-direction).
1081 * @param[in] nx Number of columns (x-direction).
1082 *
1083 * @return PetscErrorCode 0 on success, nonzero on failure.
1084 */
1085PetscErrorCode Allocate3DArrayScalar(PetscReal ****array, PetscInt nz, PetscInt ny, PetscInt nx)
1086{
1087 PetscErrorCode ierr;
1088 PetscReal ***data;
1089 PetscReal *dataContiguous;
1090 PetscInt k, j;
1091
1092 PetscFunctionBegin;
1093 /* Step 1: Allocate memory for an array of nz layer pointers (zero-initialized) */
1094 ierr = PetscCalloc1(nz, &data); CHKERRQ(ierr);
1095
1096 /* Step 2: Allocate memory for all row pointers (nz * ny pointers) */
1097 ierr = PetscCalloc1(nz * ny, &data[0]); CHKERRQ(ierr);
1098 for (k = 1; k < nz; k++) {
1099 data[k] = data[0] + k * ny;
1100 }
1101
1102 /* Step 3: Allocate one contiguous block for all data elements (nz*ny*nx) */
1103 ierr = PetscCalloc1(nz * ny * nx, &dataContiguous); CHKERRQ(ierr);
1104
1105 /* Build the 3D pointer structure: each row pointer gets the correct segment of data */
1106 for (k = 0; k < nz; k++) {
1107 for (j = 0; j < ny; j++) {
1108 data[k][j] = dataContiguous + (k * ny + j) * nx;
1109 /* Memory is already zeroed by PetscCalloc1, so no manual initialization is needed */
1110 }
1111 }
1112 *array = data;
1113 PetscFunctionReturn(0);
1114}
1115
1116/**
1117 * @brief Deallocates a 3D array of PetscReal values allocated by Allocate3DArrayScalar.
1118 *
1119 * This function frees the memory allocated for a 3D array of PetscReal values.
1120 * It assumes the memory was allocated using Allocate3DArrayScalar, which allocated
1121 * three separate memory blocks: one for the contiguous data, one for the row pointers,
1122 * and one for the layer pointers.
1123 *
1124 * @param[in] array Pointer to the 3D array to be deallocated.
1125 * @param[in] nz Number of layers (z-direction).
1126 * @param[in] ny Number of rows (y-direction).
1127 *
1128 * @return PetscErrorCode 0 on success, nonzero on failure.
1129 */
1130PetscErrorCode Deallocate3DArrayScalar(PetscReal ***array, PetscInt nz, PetscInt ny)
1131{
1132 PetscErrorCode ierr;
1133
1134 PetscFunctionBegin;
1135 if (!array || !array[0] || !array[0][0] ) { // Added more robust check
1136 LOG_ALLOW(GLOBAL, LOG_WARNING, "Deallocate3DArrayScalar called with potentially unallocated or NULL array.\n");
1137 if (array) {
1138 if (array[0]) { // Check if row pointers might exist
1139 // Cannot safely access array[0][0] if array[0] might be invalid/freed
1140 // Standard deallocation below assumes valid pointers.
1141 ierr = PetscFree(array[0]); CHKERRQ(ierr); // Free row pointers if they exist
1142 }
1143 ierr = PetscFree(array); CHKERRQ(ierr); // Free layer pointers if they exist
1144 }
1145 PetscFunctionReturn(0);
1146 }
1147
1148 // --- Standard Deallocation (assuming valid allocation) ---
1149
1150 /* 1. Free the contiguous block of PetscReal values.
1151 The starting address was stored in array[0][0]. */
1152 ierr = PetscFree(array[0][0]); CHKERRQ(ierr); // Free the ACTUAL DATA
1153
1154 /* 2. Free the contiguous block of row pointers.
1155 The starting address was stored in array[0]. */
1156 ierr = PetscFree(array[0]); CHKERRQ(ierr); // Free the ROW POINTERS
1157
1158 /* 3. Free the layer pointer array.
1159 The starting address is 'array' itself. */
1160 ierr = PetscFree(array); CHKERRQ(ierr); // Free the LAYER POINTERS
1161
1162 PetscFunctionReturn(0);
1163}
1164
1165/**
1166 * @brief Allocates a 3D array of Cmpnts structures using PetscCalloc.
1167 *
1168 * This function dynamically allocates memory for a 3D array of Cmpnts (vector) structures
1169 * with dimensions nz (layers) x ny (rows) x nx (columns). It uses PetscCalloc1 to ensure
1170 * that all allocated memory is zero-initialized.
1171 *
1172 * The allocation procedure is similar to Allocate3DArrayScalar:
1173 * 1. Allocate an array of nz pointers (one for each layer).
1174 * 2. Allocate a contiguous block for nz*ny row pointers.
1175 * 3. Allocate one contiguous block for nz*ny*nx Cmpnts structures.
1176 *
1177 * After allocation, the array can be accessed as array[k][j][i] and each element
1178 * (a Cmpnts structure) will have its x, y, and z fields initialized to 0.0.
1179 *
1180 * @param[out] array Pointer to the 3D array to be allocated.
1181 * @param[in] nz Number of layers in the z-direction.
1182 * @param[in] ny Number of rows in the y-direction.
1183 * @param[in] nx Number of columns in the x-direction.
1184 *
1185 * @return PetscErrorCode 0 on success, nonzero on failure.
1186 */
1187PetscErrorCode Allocate3DArrayVector(Cmpnts ****array, PetscInt nz, PetscInt ny, PetscInt nx)
1188{
1189 PetscErrorCode ierr;
1190 Cmpnts ***data;
1191 Cmpnts *dataContiguous;
1192 PetscInt k, j;
1193 PetscMPIInt rank;
1194
1195 PetscFunctionBegin;
1196
1197 ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
1198
1199 /* Step 1: Allocate memory for nz layer pointers (zeroed) */
1200 ierr = PetscCalloc1(nz, &data); CHKERRQ(ierr);
1201
1202 LOG_ALLOW(LOCAL,LOG_DEBUG," [Rank %d] memory allocated for outermost layer (%d k-layer pointers).\n",rank,nz);
1203
1204 /* Step 2: Allocate memory for all row pointers (nz * ny pointers) */
1205 ierr = PetscCalloc1(nz * ny, &data[0]); CHKERRQ(ierr);
1206 for (k = 1; k < nz; k++) {
1207 data[k] = data[0] + k * ny;
1208 }
1209
1210 LOG_ALLOW(LOCAL,LOG_DEBUG,"[Rank %d] memory allocated for %dx%d row pointers.\n",rank,nz,ny);
1211
1212 /* Step 3: Allocate one contiguous block for nz*ny*nx Cmpnts structures (zeroed) */
1213 ierr = PetscCalloc1(nz * ny * nx, &dataContiguous); CHKERRQ(ierr);
1214
1215 LOG_ALLOW(GLOBAL,LOG_DEBUG,"[Rank %d] memory allocated for contigous block of %dx%dx%d Cmpnts structures).\n",rank,nz,ny,nx);
1216
1217 /* Build the 3D pointer structure for vector data */
1218 for (k = 0; k < nz; k++) {
1219 for (j = 0; j < ny; j++) {
1220 data[k][j] = dataContiguous + (k * ny + j) * nx;
1221 /* The PetscCalloc1 call has already initialized each Cmpnts to zero. */
1222 }
1223 }
1224
1225 LOG_ALLOW(GLOBAL,LOG_DEBUG,"[Rank %d] 3D pointer structure for vector data created. \n",rank);
1226
1227 *array = data;
1228 PetscFunctionReturn(0);
1229}
1230
1231/**
1232 * @brief Deallocates a 3D array of Cmpnts structures allocated by Allocate3DArrayVector.
1233 *
1234 * This function frees the memory allocated for a 3D array of Cmpnts structures.
1235 * It assumes the memory was allocated using Allocate3DArrayVector, which created three
1236 * separate memory blocks: one for the contiguous vector data, one for the row pointers,
1237 * and one for the layer pointers.
1238 *
1239 * @param[in] array Pointer to the 3D array to be deallocated.
1240 * @param[in] nz Number of layers in the z-direction.
1241 * @param[in] ny Number of rows in the y-direction.
1242 *
1243 * @return PetscErrorCode 0 on success, nonzero on failure.
1244 */
1245 PetscErrorCode Deallocate3DArrayVector(Cmpnts ***array, PetscInt nz, PetscInt ny)
1246{
1247 PetscErrorCode ierr;
1248
1249 PetscFunctionBegin;
1250 // If array is NULL or hasn't been allocated properly, just return.
1251 if (!array || !array[0] || !array[0][0] ) {
1252 LOG_ALLOW(GLOBAL, LOG_WARNING, "Deallocate3DArrayVector called with potentially unallocated or NULL array.\n");
1253 // Attempt to free what might exist, but be cautious
1254 if (array) {
1255 if (array[0]) { // Check if row pointers were allocated
1256 // We don't have a direct pointer to the contiguous data block
1257 // saved separately in this allocation scheme. The allocation relies
1258 // on array[0][0] pointing to it. If array[0] was freed first,
1259 // accessing array[0][0] is unsafe.
1260 // The allocation scheme where the contiguous data block is not
1261 // stored separately makes safe deallocation tricky if freeing
1262 // happens out of order or if parts are NULL.
1263
1264 // A SAFER ALLOCATION/DEALLOCATION would store the data pointer separately.
1265 // Given the current allocation scheme, the order MUST be:
1266 // 1. Free the data block (pointed to by array[0][0])
1267 // 2. Free the row pointer block (pointed to by array[0])
1268 // 3. Free the layer pointer block (pointed to by array)
1269
1270 // Let's assume the allocation was successful and pointers are valid.
1271 // Get pointer to the contiguous data block *before* freeing row pointers
1272 Cmpnts *dataContiguous = array[0][0];
1273 ierr = PetscFree(dataContiguous); CHKERRQ(ierr); // Free data block
1274
1275 // Now free the row pointers block
1276 ierr = PetscFree(array[0]); CHKERRQ(ierr); // Free row pointers
1277
1278 }
1279 // Finally, free the array of layer pointers
1280 ierr = PetscFree(array); CHKERRQ(ierr);
1281 }
1282 PetscFunctionReturn(0); // Return gracefully if input was NULL initially
1283 }
1284
1285
1286 // --- Standard Deallocation (assuming valid allocation) ---
1287
1288 /* 1. Free the contiguous block of Cmpnts structures.
1289 The starting address was stored in array[0][0] by Allocate3DArrayVector. */
1290 ierr = PetscFree(array[0][0]); CHKERRQ(ierr); // Free the ACTUAL DATA
1291
1292 /* 2. Free the contiguous block of row pointers.
1293 The starting address was stored in array[0]. */
1294 ierr = PetscFree(array[0]); CHKERRQ(ierr); // Free the ROW POINTERS
1295
1296 /* 3. Free the layer pointer array.
1297 The starting address is 'array' itself. */
1298 ierr = PetscFree(array); CHKERRQ(ierr); // Free the LAYER POINTERS
1299
1300 PetscFunctionReturn(0);
1301}
1302
1303/**
1304 * @brief Gets the global starting index of cells owned by this rank and the number of such cells.
1305 *
1306 * A cell's global index is considered the same as its origin node's global index.
1307 * This function assumes a node-centered DMDA where `info_nodes` provides all necessary
1308 * information:
1309 * - `info_nodes->xs, ys, zs`: Global starting index of the first node owned by this rank (excluding ghosts).
1310 * - `info_nodes->xm, ym, zm`: Number of nodes owned by this rank in each dimension (excluding ghosts).
1311 * - `info_nodes->mx, my, mz`: Total number of global nodes in each dimension for the entire domain.
1312 *
1313 * A cell `C_k` (0-indexed) is defined by its origin node `N_k` and extends to node `N_{k+1}`.
1314 * Thus, the last node in the global domain cannot be an origin for a cell. The last possible
1315 * cell origin node index is `GlobalNodesInDim - 2`.
1316 *
1317 * @param[in] info_nodes Pointer to the DMDALocalInfo struct for the current rank.
1318 * This struct contains local ownership information (xs, xm, etc.)
1319 * and global domain dimensions (mx, my, mz for nodes).
1320 * @param[in] dim The dimension for which to get the cell range (0 for i/x, 1 for j/y, 2 for k/z).
1321 * @param[out] xs_cell_global_out Pointer to store the global index of the first cell whose origin node
1322 * is owned by this rank. If the rank owns no valid cell origins in this
1323 * dimension, this will be the rank's starting node index, but
1324 * `xm_cell_local_out` will be 0.
1325 * @param[out] xm_cell_local_out Pointer to store the number of cells for which this rank owns the
1326 * origin node AND that origin node is a valid cell origin within the
1327 * global domain.
1328 *
1329 * @return PetscErrorCode 0 on success, or an error code on failure.
1330 *
1331 * @note Example: If GlobalNodesInDim = 11 (nodes N0 to N10), there are 10 cells (C0 to C9).
1332 * The last cell, C9, has its origin at node N9. So, N9 (index 9) is the last valid
1333 * cell origin (GlobalNodesInDim - 2 = 11 - 2 = 9).
1334 * If a rank owns nodes N8, N9, N10 (xs=8, xm=3):
1335 * - First potential origin on rank = N8.
1336 * - Last potential origin on rank (node that is not the last owned node) = N9.
1337 * - Actual last origin this rank can form = min(N9, GlobalMaxOrigin=N9) = N9.
1338 * - Number of cells = (N9 - N8 + 1) = 2 cells (C8, C9).
1339 * If a rank owns only node N10 (xs=10, xm=1):
1340 * - First potential origin on rank = N10.
1341 * - Actual last origin rank can form = min(N9, GlobalMaxOrigin=N9) (since N10-1=N9).
1342 * - first_potential_origin_on_rank (N10) > actual_last_origin_this_rank_can_form (N9) => 0 cells.
1343 */
1344PetscErrorCode GetOwnedCellRange(const DMDALocalInfo *info_nodes,
1345 PetscInt dim,
1346 PetscInt *xs_cell_global_out,
1347 PetscInt *xm_cell_local_out)
1348{
1349 PetscErrorCode ierr = 0; // Standard PETSc error code, not explicitly set here but good practice.
1350 PetscInt xs_node_global_rank; // Global index of the first node owned by this rank in the specified dimension.
1351 PetscInt num_nodes_owned_rank; // Number of nodes owned by this rank in this dimension (local count, excluding ghosts).
1352 PetscInt GlobalNodesInDim_from_info; // Total number of global nodes in this dimension, from DMDALocalInfo.
1353
1354 PetscFunctionBeginUser;
1355
1356 // --- 1. Input Validation ---
1357 if (!info_nodes || !xs_cell_global_out || !xm_cell_local_out) {
1358 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Null pointer passed to GetOwnedCellRange.");
1359 }
1360
1361 // --- 2. Extract Node Ownership and Global Dimension Information from DMDALocalInfo ---
1362 if (dim == 0) { // I-direction
1363 xs_node_global_rank = info_nodes->xs; // Starting owned node index (global)
1364 num_nodes_owned_rank = info_nodes->xm; // Number of nodes owned by this rank (local count)
1365 GlobalNodesInDim_from_info = info_nodes->mx; // Total global nodes in this dimension
1366 } else if (dim == 1) { // J-direction
1367 xs_node_global_rank = info_nodes->ys;
1368 num_nodes_owned_rank = info_nodes->ym;
1369 GlobalNodesInDim_from_info = info_nodes->my;
1370 } else if (dim == 2) { // K-direction
1371 xs_node_global_rank = info_nodes->zs;
1372 num_nodes_owned_rank = info_nodes->zm;
1373 GlobalNodesInDim_from_info = info_nodes->mz;
1374 } else {
1375 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid dimension %d in GetOwnedCellRange. Must be 0, 1, or 2.", dim);
1376 }
1377
1378 // --- 3. Handle Edge Cases for Global Domain Size ---
1379 // If the global domain has 0 or 1 node plane, no cells can be formed.
1380 if (GlobalNodesInDim_from_info <= 1) {
1381 *xs_cell_global_out = xs_node_global_rank; // Still report the rank's starting node
1382 *xm_cell_local_out = 0; // But 0 cells
1383 PetscFunctionReturn(0);
1384 }
1385 // Negative global dimension is an error (should be caught by DMDA setup, but defensive)
1386 if (GlobalNodesInDim_from_info < 0 ) {
1387 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "GlobalNodesInDim %d from DMDALocalInfo must be non-negative for dimension %d.", GlobalNodesInDim_from_info, dim);
1388 }
1389
1390 // --- 4. Determine Cell Ownership Based on Node Ownership ---
1391 // The first cell this rank *could* define has its origin at the first node this rank owns.
1392 *xs_cell_global_out = xs_node_global_rank;
1393
1394 // If the rank owns no nodes in this dimension, it can't form any cell origins.
1395 if (num_nodes_owned_rank == 0) {
1396 *xm_cell_local_out = 0;
1397 } else {
1398 // Calculate the global index of the last possible node that can serve as a cell origin.
1399 // If GlobalNodesInDim = N (nodes 0 to N-1), cells are C_0 to C_{N-2}.
1400 // The origin of cell C_{N-2} is node N_{N-2}.
1401 // So, the last valid cell origin node index is (GlobalNodesInDim - 2).
1402 PetscInt last_possible_origin_global_idx = GlobalNodesInDim_from_info - 2;
1403
1404 // Determine the range of nodes owned by this rank that could *potentially* be cell origins.
1405 // The first node owned by the rank is a potential origin.
1406 PetscInt first_potential_origin_on_rank = xs_node_global_rank;
1407
1408 // A node can be an origin if there's at least one node after it to form the cell.
1409 // So, the last node owned by the rank that could *potentially* be an origin is
1410 // the second-to-last node it owns: (xs_node_global_rank + num_nodes_owned_rank - 1) - 1
1411 // which simplifies to: xs_node_global_rank + num_nodes_owned_rank - 2.
1412 PetscInt last_potential_origin_on_rank = xs_node_global_rank + num_nodes_owned_rank - 2;
1413
1414 // The actual last origin this rank can provide is capped by the global domain limit.
1415 PetscInt actual_last_origin_this_rank_can_form = PetscMin(last_potential_origin_on_rank, last_possible_origin_global_idx);
1416
1417 // If the first potential origin this rank owns is already beyond the actual last origin it can form,
1418 // then this rank forms no valid cell origins. This happens if:
1419 // - num_nodes_owned_rank is 1 (so last_potential_origin_on_rank = first_potential_origin_on_rank - 1).
1420 // - The rank only owns nodes at the very end of the global domain (e.g., only the last global node).
1421 if (first_potential_origin_on_rank > actual_last_origin_this_rank_can_form) {
1422 *xm_cell_local_out = 0;
1423 } else {
1424 // The number of cells is the count of valid origins this rank owns.
1425 // (Count = Last Index - First Index + 1)
1426 *xm_cell_local_out = actual_last_origin_this_rank_can_form - first_potential_origin_on_rank + 1;
1427 }
1428 }
1429 PetscFunctionReturn(ierr);
1430}
1431
1432/**
1433 * @brief Gets the global cell range for a rank, including boundary cells.
1434 *
1435 * This function first calls GetOwnedCellRange to get the conservative range of
1436 * fully-contained cells. It then extends this range by applying the
1437 * "Lower-Rank-Owns-Boundary" principle. A rank claims ownership of the
1438 * boundary cells it shares with neighbors in the positive (+x, +y, +z)
1439 * directions.
1440 *
1441 * This results in a final cell range that is gap-free and suitable for building
1442 * the definitive particle ownership map.
1443 *
1444 * @param[in] info_nodes Pointer to the DMDALocalInfo struct.
1445 * @param[in] neighbors Pointer to the RankNeighbors struct containing neighbor info.
1446 * @param[in] dim The dimension (0 for i/x, 1 for j/y, 2 for k/z).
1447 * @param[out] xs_cell_global_out Pointer to store the final starting cell index.
1448 * @param[out] xm_cell_local_out Pointer to store the final number of cells.
1449 *
1450 * @return PetscErrorCode 0 on success, or an error code on failure.
1451 */
1452PetscErrorCode GetGhostedCellRange(const DMDALocalInfo *info_nodes,
1453 const RankNeighbors *neighbors,
1454 PetscInt dim,
1455 PetscInt *xs_cell_global_out,
1456 PetscInt *xm_cell_local_out)
1457{
1458 PetscErrorCode ierr;
1459
1460 PetscFunctionBeginUser;
1461
1462 // --- 1. Get the base, conservative range from the original function ---
1463 ierr = GetOwnedCellRange(info_nodes, dim, xs_cell_global_out, xm_cell_local_out); CHKERRQ(ierr);
1464
1465 // --- 2. Apply the "Lower-Rank-Owns-Boundary" correction ---
1466 // A rank owns the boundary if it has a neighbor in the positive direction.
1467 // We check if the neighbor's rank is valid (not MPI_PROC_NULL, which is < 0).
1468 if (dim == 0 && neighbors->rank_xp > -1) {
1469 (*xm_cell_local_out)++;
1470 } else if (dim == 1 && neighbors->rank_yp > -1) {
1471 (*xm_cell_local_out)++;
1472 } else if (dim == 2 && neighbors->rank_zp > -1) {
1473 (*xm_cell_local_out)++;
1474 }
1475
1476 PetscFunctionReturn(0);
1477}
1478
1479
1480/**
1481 * @brief Computes and stores the Cartesian neighbor ranks for the DMDA decomposition.
1482 *
1483 * This function retrieves the neighbor information from the primary DMDA (user->da)
1484 * and stores the face neighbors (xm, xp, ym, yp, zm, zp) in the user->neighbors structure.
1485 * It assumes a standard PETSc ordering for the neighbors array returned by DMDAGetNeighbors.
1486 * If DMDAGetNeighbors returns a negative rank that is not MPI_PROC_NULL (which can happen
1487 * in some PETSc/MPI configurations for non-periodic boundaries if not fully standard),
1488 * this function will sanitize it to MPI_PROC_NULL to prevent issues.
1489 *
1490 * @param[in,out] user Pointer to the UserCtx structure where neighbor info will be stored.
1491 *
1492 * @return PetscErrorCode 0 on success, non-zero on failure.
1493 */
1495{
1496 PetscErrorCode ierr;
1497 PetscMPIInt rank;
1498 PetscMPIInt size; // MPI communicator size
1499 const PetscMPIInt *neighbor_ranks_ptr; // Pointer to raw neighbor data from PETSc
1500
1501 PetscFunctionBeginUser;
1502 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1503 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr); // Get MPI size for validation
1504
1505 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Computing DMDA neighbor ranks.\n", rank);
1506
1507 if (!user || !user->da) {
1508 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx or user->da is NULL in ComputeAndStoreNeighborRanks.");
1509 }
1510
1511 // Get the neighbor information from the DMDA
1512 // neighbor_ranks_ptr will point to an internal PETSc array of 27 ranks.
1513 ierr = DMDAGetNeighbors(user->da, &neighbor_ranks_ptr); CHKERRQ(ierr);
1514
1515 // Log the raw values from DMDAGetNeighbors for boundary-relevant directions for debugging
1516 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "[Rank %d]Raw DMDAGetNeighbors: xm_raw=%d, xp_raw=%d, ym_raw=%d, yp_raw=%d, zm_raw=%d, zp_raw=%d. MPI_PROC_NULL is %d.\n",
1517 rank,
1518 neighbor_ranks_ptr[12], neighbor_ranks_ptr[14],
1519 neighbor_ranks_ptr[10], neighbor_ranks_ptr[16],
1520 neighbor_ranks_ptr[4], neighbor_ranks_ptr[22],
1521 (int)MPI_PROC_NULL);
1522
1523 // PETSc standard indices for 3D face neighbors from the 27-point stencil:
1524 // Index = k_offset*9 + j_offset*3 + i_offset (where offsets -1,0,1 map to 0,1,2)
1525 // Center: (i_off=1, j_off=1, k_off=1) => 1*9 + 1*3 + 1 = 13
1526 // X-min: (i_off=0, j_off=1, k_off=1) => 1*9 + 1*3 + 0 = 12
1527 // X-plus: (i_off=2, j_off=1, k_off=1) => 1*9 + 1*3 + 2 = 14
1528 // Y-min: (i_off=1, j_off=0, k_off=1) => 1*9 + 0*3 + 1 = 10
1529 // Y-plus: (i_off=1, j_off=2, k_off=1) => 1*9 + 2*3 + 1 = 16
1530 // Z-min: (i_off=1, j_off=1, k_off=0) => 0*9 + 1*3 + 1 = 4
1531 // Z-plus: (i_off=1, j_off=1, k_off=2) => 2*9 + 1*3 + 1 = 22
1532
1533 if (neighbor_ranks_ptr[13] != rank) {
1534 LOG_ALLOW(GLOBAL, LOG_WARNING, "Rank %d: DMDAGetNeighbors center index (13) is %d, expected current rank %d. Neighbor indexing might be non-standard or DMDA small.\n",
1535 rank, neighbor_ranks_ptr[13], rank);
1536 // This warning is important. If the center isn't the current rank, the offsets are likely wrong.
1537 // However, PETSc should ensure this unless the DM is too small for a 3x3x3 stencil.
1538 }
1539
1540 // Assign and sanitize each neighbor rank
1541 PetscMPIInt temp_neighbor;
1542
1543 temp_neighbor = neighbor_ranks_ptr[12]; // xm
1544 if (temp_neighbor < 0 || temp_neighbor >= size) {
1545 LOG_ALLOW(GLOBAL, LOG_WARNING, "[Rank %d] Correcting invalid xm neighbor %d to MPI_PROC_NULL (%d).\n", rank, temp_neighbor, (int)MPI_PROC_NULL);
1546 user->neighbors.rank_xm = MPI_PROC_NULL;
1547 } else {
1548 user->neighbors.rank_xm = temp_neighbor;
1549 }
1550
1551 temp_neighbor = neighbor_ranks_ptr[14]; // xp
1552 if (temp_neighbor < 0 || temp_neighbor >= size) {
1553 LOG_ALLOW(GLOBAL, LOG_WARNING, "[Rank %d] Correcting invalid xp neighbor %d to MPI_PROC_NULL (%d).\n", rank, temp_neighbor, (int)MPI_PROC_NULL);
1554 user->neighbors.rank_xp = MPI_PROC_NULL;
1555 } else {
1556 user->neighbors.rank_xp = temp_neighbor;
1557 }
1558
1559 temp_neighbor = neighbor_ranks_ptr[10]; // ym
1560 if (temp_neighbor < 0 || temp_neighbor >= size) {
1561 LOG_ALLOW(GLOBAL, LOG_WARNING, "[Rank %d] Correcting invalid ym neighbor %d to MPI_PROC_NULL (%d).\n", rank, temp_neighbor, (int)MPI_PROC_NULL);
1562 user->neighbors.rank_ym = MPI_PROC_NULL;
1563 } else {
1564 user->neighbors.rank_ym = temp_neighbor;
1565 }
1566
1567 temp_neighbor = neighbor_ranks_ptr[16]; // yp
1568 if (temp_neighbor < 0 || temp_neighbor >= size) {
1569 // The log for index 16 was "zm" in your output, should be yp
1570 LOG_ALLOW(GLOBAL, LOG_WARNING, "[Rank %d] Correcting invalid yp neighbor (raw index 16) %d to MPI_PROC_NULL (%d).\n", rank, temp_neighbor, (int)MPI_PROC_NULL);
1571 user->neighbors.rank_yp = MPI_PROC_NULL;
1572 } else {
1573 user->neighbors.rank_yp = temp_neighbor;
1574 }
1575
1576 temp_neighbor = neighbor_ranks_ptr[4]; // zm
1577 if (temp_neighbor < 0 || temp_neighbor >= size) {
1578 LOG_ALLOW(GLOBAL, LOG_WARNING, "[Rank %d] Correcting invalid zm neighbor %d to MPI_PROC_NULL (%d).\n", rank, temp_neighbor, (int)MPI_PROC_NULL);
1579 user->neighbors.rank_zm = MPI_PROC_NULL;
1580 } else {
1581 user->neighbors.rank_zm = temp_neighbor;
1582 }
1583
1584 temp_neighbor = neighbor_ranks_ptr[22]; // zp
1585 if (temp_neighbor < 0 || temp_neighbor >= size) {
1586 LOG_ALLOW(GLOBAL, LOG_WARNING, "[Rank %d] Correcting invalid zp neighbor %d to MPI_PROC_NULL (%d).\n", rank, temp_neighbor, (int)MPI_PROC_NULL);
1587 user->neighbors.rank_zp = MPI_PROC_NULL;
1588 } else {
1589 user->neighbors.rank_zp = temp_neighbor;
1590 }
1591
1592 LOG_ALLOW_SYNC(GLOBAL, LOG_DEBUG, "[Rank %d] Stored user->neighbors: xm=%d, xp=%d, ym=%d, yp=%d, zm=%d, zp=%d\n", rank,
1593 user->neighbors.rank_xm, user->neighbors.rank_xp,
1594 user->neighbors.rank_ym, user->neighbors.rank_yp,
1595 user->neighbors.rank_zm, user->neighbors.rank_zp);
1596 PetscSynchronizedFlush(PETSC_COMM_WORLD, PETSC_STDOUT); // Ensure logs are flushed
1597
1598 // Note: neighbor_ranks_ptr memory is managed by PETSc, do not free it.
1599 PetscFunctionReturn(0);
1600}
1601
1602/**
1603 * @brief Sets the processor layout for a given DMDA based on PETSc options.
1604 *
1605 * Reads the desired number of processors in x, y, and z directions using
1606 * PETSc options (e.g., -dm_processors_x, -dm_processors_y, -dm_processors_z).
1607 * If an option is not provided for a direction, PETSC_DECIDE is used for that direction.
1608 * Applies the layout using DMDASetNumProcs.
1609 *
1610 * Also stores the retrieved/decided values in user->procs_x/y/z if user context is provided.
1611 *
1612 * @param dm The DMDA object to configure the layout for.
1613 * @param user Pointer to the UserCtx structure (optional, used to store layout values).
1614 *
1615 * @return PetscErrorCode 0 on success, non-zero on failure.
1616 */
1617PetscErrorCode SetDMDAProcLayout(DM dm, UserCtx *user)
1618{
1619 PetscErrorCode ierr;
1620 PetscMPIInt size, rank;
1621 PetscInt px = PETSC_DECIDE, py = PETSC_DECIDE, pz = PETSC_DECIDE;
1622 PetscBool px_set = PETSC_FALSE, py_set = PETSC_FALSE, pz_set = PETSC_FALSE;
1623
1624 PetscFunctionBeginUser;
1625 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size); CHKERRQ(ierr);
1626 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank); CHKERRQ(ierr);
1627 LOG_ALLOW(GLOBAL, LOG_INFO, "Rank %d: Configuring DMDA processor layout for %d total processes.\n", rank, size);
1628
1629 // --- Read desired layout from options ---
1630 // Use names like "-dm_processors_x" which are somewhat standard in PETSc examples
1631 ierr = PetscOptionsGetInt(NULL, NULL, "-dm_processors_x", &px, &px_set); CHKERRQ(ierr);
1632 ierr = PetscOptionsGetInt(NULL, NULL, "-dm_processors_y", &py, &py_set); CHKERRQ(ierr);
1633 ierr = PetscOptionsGetInt(NULL, NULL, "-dm_processors_z", &pz, &pz_set); CHKERRQ(ierr);
1634
1635 // --- Validate User Input (Optional but Recommended) ---
1636 // Check if specified processor counts multiply to the total MPI size
1637 if (px_set && py_set && pz_set) {
1638 if (px * py * pz != size) {
1639 SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_INCOMP,
1640 "Specified processor layout %d x %d x %d = %d does not match MPI size %d",
1641 px, py, pz, px * py * pz, size);
1642 }
1643 LOG_ALLOW(GLOBAL, LOG_INFO, "Using specified processor layout: %d x %d x %d\n", px, py, pz);
1644 } else if (px_set || py_set || pz_set) {
1645 // If only some are set, PETSC_DECIDE will be used for others
1646 LOG_ALLOW(GLOBAL, LOG_INFO, "Using partially specified processor layout: %d x %d x %d (PETSC_DECIDE for unspecified)\n", px, py, pz);
1647 } else {
1648 LOG_ALLOW(GLOBAL, LOG_INFO, "Using fully automatic processor layout (PETSC_DECIDE x PETSC_DECIDE x PETSC_DECIDE)\n");
1649 }
1650 // Additional checks: Ensure px, py, pz are positive if set
1651 if ((px_set && px <= 0) || (py_set && py <= 0) || (pz_set && pz <= 0)) {
1652 SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_OUTOFRANGE, "Specified processor counts must be positive.");
1653 }
1654
1655
1656 // --- Apply the layout to the DMDA ---
1657 ierr = DMDASetNumProcs(dm, px, py, pz); CHKERRQ(ierr);
1658 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Rank %d: DMDASetNumProcs called with px=%d, py=%d, pz=%d.\n", rank, px, py, pz);
1659
1660 // --- Store the values in UserCtx (Optional) ---
1661 // Note: If PETSC_DECIDE was used, PETSc calculates the actual values during DMSetUp.
1662 // We store the *requested* values here. To get the *actual* values used,
1663 // you would need to call DMDAGetInfo after DMSetUp.
1664 /*
1665 if (user) {
1666 user->procs_x = px;
1667 user->procs_y = py;
1668 user->procs_z = pz;
1669 }
1670 */
1671
1672 PetscFunctionReturn(0);
1673}
1674
1675/**
1676 * @brief Sets up the full rank communication infrastructure for all blocks.
1677 *
1678 * This function orchestrates the setup of the parallel communication map. It uses the
1679 * existing helper functions to sequentially gather and broadcast the bounding box
1680 * information for each computational block. All ranks participate in each step,
1681 * assembling the final, unified multi-block list in parallel.
1682 *
1683 * @param[in,out] simCtx The master simulation context. After this call,
1684 * simCtx->bboxlist and the relevant user->neighbors fields
1685 * will be populated on all ranks.
1686 */
1687PetscErrorCode SetupDomainRankInfo(SimCtx *simCtx)
1688{
1689 PetscErrorCode ierr;
1690 PetscInt nblk = simCtx->block_number;
1691 PetscInt size = simCtx->size;
1692 BoundingBox *final_bboxlist = NULL;
1693
1694 PetscFunctionBeginUser;
1695
1696 LOG_ALLOW(GLOBAL, LOG_INFO, "Starting full rank communication setup for %d block(s).\n", nblk);
1697
1698 UserCtx *user_finest = simCtx->usermg.mgctx[simCtx->usermg.mglevels - 1].user;
1699
1700 // --- Step 1: Compute neighbor ranks (unchanged) ---
1701 for (int bi = 0; bi < nblk; bi++) {
1702 ierr = ComputeAndStoreNeighborRanks(&user_finest[bi]); CHKERRQ(ierr);
1703 }
1704 LOG_ALLOW(GLOBAL, LOG_INFO, "Neighbor ranks computed and stored for all blocks.\n");
1705
1706 // --- Step 2: Allocate the final, unified list on ALL ranks ---
1707 // Every rank will build this list in parallel.
1708 ierr = PetscMalloc1(size * nblk, &final_bboxlist); CHKERRQ(ierr);
1709
1710 // --- Step 3: Loop through each block, gather then broadcast its bbox list ---
1711 for (int bi = 0; bi < nblk; bi++) {
1712 // This is a temporary pointer for the current block's list.
1713 BoundingBox *block_bboxlist = NULL;
1714
1715 LOG_ALLOW(GLOBAL, LOG_INFO, "Processing bounding boxes for block %d...\n", bi);
1716
1717 // A) GATHER: On rank 0, block_bboxlist is allocated and filled. On others, it's NULL.
1718 ierr = GatherAllBoundingBoxes(&user_finest[bi], &block_bboxlist); CHKERRQ(ierr);
1719 LOG_ALLOW(GLOBAL, LOG_DEBUG, " -> Gather complete for block %d.", bi);
1720
1721 // B) BROADCAST: On non-root ranks, block_bboxlist is allocated. Then, the data
1722 // from rank 0 is broadcast to all ranks. After this call, ALL ranks have
1723 // an identical, complete copy of the bounding boxes for the current block.
1724 ierr = BroadcastAllBoundingBoxes(&user_finest[bi], &block_bboxlist); CHKERRQ(ierr);
1725 LOG_ALLOW(GLOBAL, LOG_DEBUG, " -> Broadcast complete for block %d.\n", bi);
1726
1727 // C) ASSEMBLE: Every rank now copies the data for this block into the
1728 // correct segment of its final, unified list.
1729 for (int r = 0; r < size; r++) {
1730 // The layout is [r0b0, r1b0, ..., r(size-1)b0, r0b1, r1b1, ...]
1731 final_bboxlist[bi * size + r] = block_bboxlist[r];
1732 }
1733 LOG_ALLOW(GLOBAL, LOG_DEBUG, " -> Assembly into final list complete for block %d.\n", bi);
1734
1735 // D) CLEANUP: Free the temporary list for this block on ALL ranks before the next iteration.
1736 // Your helper functions use malloc, so we must use free.
1737 free(block_bboxlist);
1738 }
1739
1740 // --- Step 4: Assign the final pointer and run the last setup step ---
1741 simCtx->bboxlist = final_bboxlist;
1742 LOG_ALLOW(GLOBAL, LOG_INFO, "Final unified bboxlist created on all ranks and stored in SimCtx.\n");
1743
1744 ierr = SetupDomainCellDecompositionMap(&user_finest[0]); CHKERRQ(ierr);
1745 LOG_ALLOW(GLOBAL, LOG_INFO, "Domain Cell Composition set and broadcasted.\n");
1746
1747 LOG_ALLOW(GLOBAL, LOG_INFO, "SetupDomainRankInfo: Completed successfully.\n");
1748 PetscFunctionReturn(0);
1749}
1750
1751/**
1752 * @brief Reconstructs Cartesian velocity (Ucat) at cell centers from contravariant
1753 * velocity (Ucont) defined on cell faces.
1754 *
1755 * This function performs the transformation from a contravariant velocity representation
1756 * (which is natural on a curvilinear grid) to a Cartesian (x,y,z) representation.
1757 * For each interior computational cell owned by the rank, it performs the following:
1758 *
1759 * 1. It averages the contravariant velocity components (U¹, U², U³) from the
1760 * surrounding faces to get an estimate of the contravariant velocity at the cell center.
1761 * 2. It averages the metric vectors (Csi, Eta, Zet) from the surrounding faces
1762 * to get an estimate of the metric tensor at the cell center. This tensor forms
1763 * the transformation matrix.
1764 * 3. It solves the linear system `[MetricTensor] * [ucat] = [ucont]` for the
1765 * Cartesian velocity vector `ucat = (u,v,w)` using Cramer's rule.
1766 * 4. The computed Cartesian velocity is stored in the global `user->Ucat` vector.
1767 *
1768 * The function operates on local, ghosted versions of the input vectors (`user->lUcont`,
1769 * `user->lCsi`, etc.) to ensure stencils are valid across processor boundaries.
1770 *
1771 * @param[in,out] user Pointer to the UserCtx structure. The function reads from
1772 * `user->lUcont`, `user->lCsi`, `user->lEta`, `user->lZet`, `user->lNvert`
1773 * and writes to the global `user->Ucat` vector.
1774 *
1775 * @return PetscErrorCode 0 on success.
1776 *
1777 * @note
1778 * - This function should be called AFTER `user->lUcont` and all local metric vectors
1779 * (`user->lCsi`, etc.) have been populated with up-to-date ghost values via `UpdateLocalGhosts`.
1780 * - It only computes `Ucat` for interior cells (not on physical boundaries) and for
1781 * cells not marked as solid/blanked by `user->lNvert`.
1782 * - The caller is responsible for subsequently applying boundary conditions to `user->Ucat`
1783 * and calling `UpdateLocalGhosts(user, "Ucat")` to populate `user->lUcat`.
1784 */
1785PetscErrorCode Contra2Cart(UserCtx *user)
1786{
1787 PetscErrorCode ierr;
1788 DMDALocalInfo info;
1789 Cmpnts ***lcsi_arr, ***leta_arr, ***lzet_arr; // Local metric arrays
1790 Cmpnts ***lucont_arr; // Local contravariant velocity array
1791 Cmpnts ***gucat_arr; // Global Cartesian velocity array
1792 PetscReal ***lnvert_arr; // Local Nvert array
1793 PetscReal ***laj_arr; // Local Jacobian Determinant inverse array
1794
1795 PetscFunctionBeginUser;
1796 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Starting Contravariant-to-Cartesian velocity transformation.\n");
1797
1798 // --- 1. Get DMDA Info and Check for Valid Inputs ---
1799 // All inputs (lUcont, lCsi, etc.) and outputs (Ucat) are on DMs from the UserCtx.
1800 // We get local info from fda, which governs the layout of most arrays here.
1801 ierr = DMDAGetLocalInfo(user->fda, &info); CHKERRQ(ierr);
1802 if (!user->lUcont || !user->lCsi || !user->lEta || !user->lZet || !user->lNvert || !user->Ucat) {
1803 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Contra2Cart requires lUcont, lCsi/Eta/Zet, lNvert, and Ucat to be non-NULL.");
1804 }
1805
1806
1807 // --- 2. Get Read-Only Array Access to Local Input Vectors (with ghosts) ---
1808 ierr = DMDAVecGetArrayRead(user->fda, user->lUcont, &lucont_arr); CHKERRQ(ierr);
1809 ierr = DMDAVecGetArrayRead(user->fda, user->lCsi, &lcsi_arr); CHKERRQ(ierr);
1810 ierr = DMDAVecGetArrayRead(user->fda, user->lEta, &leta_arr); CHKERRQ(ierr);
1811 ierr = DMDAVecGetArrayRead(user->fda, user->lZet, &lzet_arr); CHKERRQ(ierr);
1812 ierr = DMDAVecGetArrayRead(user->da, user->lNvert, &lnvert_arr); CHKERRQ(ierr);
1813 ierr = DMDAVecGetArrayRead(user->da, user->lAj, &laj_arr); CHKERRQ(ierr);
1814
1815 // --- 3. Get Write-Only Array Access to the Global Output Vector ---
1816 // We compute for local owned cells and write into the global vector.
1817 // PETSc handles mapping the global indices to the correct local memory locations.
1818 ierr = DMDAVecGetArray(user->fda, user->Ucat, &gucat_arr); CHKERRQ(ierr);
1819
1820
1821 // --- 4. Define Loop Bounds for INTERIOR Cells ---
1822 // We use adjusted bounds to avoid calculating Ucat on the physical domain boundaries,
1823 // as these are typically set explicitly by boundary condition functions.
1824 // The stencils use indices like i-1, j-1, k-1, so we must start loops at least at index 1.
1825 PetscInt i_start = (info.xs == 0) ? info.xs + 1 : info.xs;
1826 PetscInt i_end = (info.xs + info.xm == info.mx) ? info.xs + info.xm - 1 : info.xs + info.xm;
1827
1828 PetscInt j_start = (info.ys == 0) ? info.ys + 1 : info.ys;
1829 PetscInt j_end = (info.ys + info.ym == info.my) ? info.ys + info.ym - 1 : info.ys + info.ym;
1830
1831 PetscInt k_start = (info.zs == 0) ? info.zs + 1 : info.zs;
1832 PetscInt k_end = (info.zs + info.zm == info.mz) ? info.zs + info.zm - 1 : info.zs + info.zm;
1833
1834 // --- 5. Main Computation Loop ---
1835 // Loops over the GLOBAL indices of interior cells owned by this rank.
1836 for (PetscInt k_cell = k_start; k_cell < k_end; ++k_cell) {
1837 for (PetscInt j_cell = j_start; j_cell < j_end; ++j_cell) {
1838 for (PetscInt i_cell = i_start; i_cell < i_end; ++i_cell) {
1839
1840 // Check if the cell is a fluid cell (not solid/blanked)
1841 // if (lnvert_arr[k_cell][j_cell][i_cell] > 0.1) continue; // Skip solid/blanked cells
1842
1843 // Transformation matrix [mat] is the metric tensor at the cell center,
1844 // estimated by averaging metrics from adjacent faces.
1845 PetscReal mat[3][3];
1846
1847 // PetscReal aj_center = laj_arr[k_cell+1][j_cell+1][i_cell+1];
1848
1849 mat[0][0] = 0.5 * (lcsi_arr[k_cell][j_cell][i_cell-1].x + lcsi_arr[k_cell][j_cell][i_cell].x); //* aj_center;
1850 mat[0][1] = 0.5 * (lcsi_arr[k_cell][j_cell][i_cell-1].y + lcsi_arr[k_cell][j_cell][i_cell].y); //* aj_center;
1851 mat[0][2] = 0.5 * (lcsi_arr[k_cell][j_cell][i_cell-1].z + lcsi_arr[k_cell][j_cell][i_cell].z); //* aj_center;
1852
1853 mat[1][0] = 0.5 * (leta_arr[k_cell][j_cell-1][i_cell].x + leta_arr[k_cell][j_cell][i_cell].x); //* aj_center;
1854 mat[1][1] = 0.5 * (leta_arr[k_cell][j_cell-1][i_cell].y + leta_arr[k_cell][j_cell][i_cell].y); //* aj_center;
1855 mat[1][2] = 0.5 * (leta_arr[k_cell][j_cell-1][i_cell].z + leta_arr[k_cell][j_cell][i_cell].z); //* aj_center;
1856
1857 mat[2][0] = 0.5 * (lzet_arr[k_cell-1][j_cell][i_cell].x + lzet_arr[k_cell][j_cell][i_cell].x); //* aj_center;
1858 mat[2][1] = 0.5 * (lzet_arr[k_cell-1][j_cell][i_cell].y + lzet_arr[k_cell][j_cell][i_cell].y); //* aj_center;
1859 mat[2][2] = 0.5 * (lzet_arr[k_cell-1][j_cell][i_cell].z + lzet_arr[k_cell][j_cell][i_cell].z); //* aj_center;
1860
1861 // Contravariant velocity vector `q` at the cell center,
1862 // estimated by averaging face-based contravariant velocities.
1863 PetscReal q[3];
1864 q[0] = 0.5 * (lucont_arr[k_cell][j_cell][i_cell-1].x + lucont_arr[k_cell][j_cell][i_cell].x); // U¹ at cell center
1865 q[1] = 0.5 * (lucont_arr[k_cell][j_cell-1][i_cell].y + lucont_arr[k_cell][j_cell][i_cell].y); // U² at cell center
1866 q[2] = 0.5 * (lucont_arr[k_cell-1][j_cell][i_cell].z + lucont_arr[k_cell][j_cell][i_cell].z); // U³ at cell center
1867
1868 // Solve the 3x3 system `mat * ucat = q` using Cramer's rule.
1869 PetscReal det = mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1]) -
1870 mat[0][1] * (mat[1][0] * mat[2][2] - mat[1][2] * mat[2][0]) +
1871 mat[0][2] * (mat[1][0] * mat[2][1] - mat[1][1] * mat[2][0]);
1872
1873 if (PetscAbsReal(det) < 1.0e-18) {
1874 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_FLOP_COUNT, "Transformation matrix determinant is near zero at cell (%d,%d,%d) \n", i_cell, j_cell, k_cell);
1875 }
1876
1877 PetscReal det_inv = 1.0 / det;
1878
1879 PetscReal det0 = q[0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1]) -
1880 q[1] * (mat[0][1] * mat[2][2] - mat[0][2] * mat[2][1]) +
1881 q[2] * (mat[0][1] * mat[1][2] - mat[0][2] * mat[1][1]);
1882
1883 PetscReal det1 = -q[0] * (mat[1][0] * mat[2][2] - mat[1][2] * mat[2][0]) +
1884 q[1] * (mat[0][0] * mat[2][2] - mat[0][2] * mat[2][0]) -
1885 q[2] * (mat[0][0] * mat[1][2] - mat[0][2] * mat[1][0]);
1886
1887 PetscReal det2 = q[0] * (mat[1][0] * mat[2][1] - mat[1][1] * mat[2][0]) -
1888 q[1] * (mat[0][0] * mat[2][1] - mat[0][1] * mat[2][0]) +
1889 q[2] * (mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]);
1890
1891 // Store computed Cartesian velocity in the GLOBAL Ucat array at the
1892 // array index corresponding to the cell's origin node.
1893 gucat_arr[k_cell][j_cell][i_cell].x = det0 * det_inv;
1894 gucat_arr[k_cell][j_cell][i_cell].y = det1 * det_inv;
1895 gucat_arr[k_cell][j_cell][i_cell].z = det2 * det_inv;
1896 }
1897 }
1898 }
1899
1900 // --- 6. Restore Array Access ---
1901 ierr = DMDAVecRestoreArrayRead(user->fda, user->lUcont, &lucont_arr); CHKERRQ(ierr);
1902 ierr = DMDAVecRestoreArrayRead(user->fda, user->lCsi, &lcsi_arr); CHKERRQ(ierr);
1903 ierr = DMDAVecRestoreArrayRead(user->fda, user->lEta, &leta_arr); CHKERRQ(ierr);
1904 ierr = DMDAVecRestoreArrayRead(user->fda, user->lZet, &lzet_arr); CHKERRQ(ierr);
1905 ierr = DMDAVecRestoreArrayRead(user->da, user->lNvert, &lnvert_arr); CHKERRQ(ierr);
1906 ierr = DMDAVecRestoreArrayRead(user->da, user->lAj, &laj_arr); CHKERRQ(ierr);
1907 ierr = DMDAVecRestoreArray(user->fda, user->Ucat, &gucat_arr); CHKERRQ(ierr);
1908
1909 LOG_ALLOW(GLOBAL, LOG_INFO, "Completed Contravariant-to-Cartesian velocity transformation. \n");
1910 PetscFunctionReturn(0);
1911}
1912
1913
1914/**
1915 * @brief Creates and distributes a map of the domain's cell decomposition to all ranks.
1916 * @ingroup DomainInfo
1917 *
1918 * This function is a critical part of the simulation setup. It determines the global
1919 * cell ownership for each MPI rank and makes this information available to all
1920 * other ranks. This "decomposition map" is essential for the robust "Walk and Handoff"
1921 * particle migration strategy, allowing any rank to quickly identify the owner of a
1922 * target cell.
1923 *
1924 * The process involves:
1925 * 1. Each rank gets its own node ownership information from the DMDA.
1926 * 2. It converts this node information into cell ownership ranges using the
1927 * `GetOwnedCellRange` helper function.
1928 * 3. It participates in an `MPI_Allgather` collective operation to build a complete
1929 * array (`user->RankCellInfoMap`) containing the ownership information for every rank.
1930 *
1931 * This function should be called once during initialization after the primary DMDA
1932 * (user->da) has been set up.
1933 *
1934 * @param[in,out] user Pointer to the UserCtx structure. The function will allocate and
1935 * populate `user->RankCellInfoMap` and set `user->num_ranks`.
1936 *
1937 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
1938 * Errors can occur if input pointers are NULL or if MPI communication fails.
1939 */
1941{
1942 PetscErrorCode ierr;
1943 DMDALocalInfo local_node_info;
1944 RankCellInfo my_cell_info;
1945 PetscMPIInt rank, size;
1946
1947 PetscFunctionBeginUser;
1948
1949 // --- 1. Input Validation and MPI Info ---
1950 if (!user) {
1951 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx pointer is NULL in SetupDomainCellDecompositionMap.");
1952 }
1953 if (!user->da) {
1954 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "user->da is not initialized in SetupDomainCellDecompositionMap.");
1955 }
1956
1957 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
1958 ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size); CHKERRQ(ierr);
1959
1960 LOG_ALLOW(GLOBAL, LOG_INFO, "Setting up domain cell decomposition map for %d ranks.\n", size);
1961
1962 // --- 2. Determine Local Cell Ownership ---
1963 // Get the local node ownership information from the primary DMDA.
1964 ierr = DMDAGetLocalInfo(user->da, &local_node_info); CHKERRQ(ierr);
1965
1966 // Use the robust helper function to convert node ownership to cell ownership.
1967 // A cell's index is defined by its origin node.
1968
1969 ierr = GetGhostedCellRange(&local_node_info, &user->neighbors, 0, &my_cell_info.xs_cell, &my_cell_info.xm_cell); CHKERRQ(ierr);
1970 ierr = GetGhostedCellRange(&local_node_info, &user->neighbors, 1, &my_cell_info.ys_cell, &my_cell_info.ym_cell); CHKERRQ(ierr);
1971 ierr = GetGhostedCellRange(&local_node_info, &user->neighbors, 2, &my_cell_info.zs_cell, &my_cell_info.zm_cell); CHKERRQ(ierr);
1972
1973 /*
1974 ierr = GetOwnedCellRange(&local_node_info, 0, &my_cell_info.xs_cell, &my_cell_info.xm_cell); CHKERRQ(ierr);
1975 ierr = GetOwnedCellRange(&local_node_info, 1, &my_cell_info.ys_cell, &my_cell_info.ym_cell); CHKERRQ(ierr);
1976 ierr = GetOwnedCellRange(&local_node_info, 2, &my_cell_info.zs_cell, &my_cell_info.zm_cell); CHKERRQ(ierr);
1977 */
1978 // Log the calculated local ownership for debugging purposes.
1979 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d] Owns cells: i[%d, %d), j[%d, %d), k[%d, %d)\n",
1980 rank, my_cell_info.xs_cell, my_cell_info.xs_cell + my_cell_info.xm_cell,
1981 my_cell_info.ys_cell, my_cell_info.ys_cell + my_cell_info.ym_cell,
1982 my_cell_info.zs_cell, my_cell_info.zs_cell + my_cell_info.zm_cell);
1983
1984 // --- 3. Allocate and Distribute the Global Map ---
1985 // Allocate memory for the global map that will hold information from all ranks.
1986 ierr = PetscMalloc1(size, &user->RankCellInfoMap); CHKERRQ(ierr);
1987
1988 // Perform the collective communication to gather the `RankCellInfo` struct from every rank.
1989 // Each rank sends its `my_cell_info` and receives the complete array in `user->RankCellInfoMap`.
1990 // We use MPI_BYTE to ensure portability across different systems and struct padding.
1991 ierr = MPI_Allgather(&my_cell_info, sizeof(RankCellInfo), MPI_BYTE,
1992 user->RankCellInfoMap, sizeof(RankCellInfo), MPI_BYTE,
1993 PETSC_COMM_WORLD); CHKERRQ(ierr);
1994
1995 LOG_ALLOW(GLOBAL, LOG_INFO, "Domain cell decomposition map created and distributed successfully.\n");
1996
1997 PetscFunctionReturn(0);
1998}
1999
2000/**
2001 * @brief Performs a binary search for a key in a sorted array of PetscInt64.
2002 *
2003 * This is a standard binary search algorithm implemented as a PETSc-style helper function.
2004 * It efficiently determines if a given `key` exists within a `sorted` array.
2005 *
2006 * @param[in] n The number of elements in the array.
2007 * @param[in] arr A pointer to the sorted array of PetscInt64 values to be searched.
2008 * @param[in] key The PetscInt64 value to search for.
2009 * @param[out] found A pointer to a PetscBool that will be set to PETSC_TRUE if the key
2010 * is found, and PETSC_FALSE otherwise.
2011 *
2012 * @return PetscErrorCode 0 on success, or a non-zero PETSc error code on failure.
2013 *
2014 * @note The input array `arr` **must** be sorted in ascending order for the algorithm
2015 * to work correctly.
2016 */
2017PetscErrorCode BinarySearchInt64(PetscInt n, const PetscInt64 arr[], PetscInt64 key, PetscBool *found)
2018{
2019 PetscInt low = 0, high = n - 1;
2020
2021 PetscFunctionBeginUser;
2022
2023 // --- 1. Input Validation ---
2024 if (!found) {
2025 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Output pointer 'found' is NULL in PetscBinarySearchInt64.");
2026 }
2027 if (n > 0 && !arr) {
2028 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "Input array 'arr' is NULL for n > 0.");
2029 }
2030
2031 // Initialize output
2032 *found = PETSC_FALSE;
2033
2034 // --- 2. Binary Search Algorithm ---
2035 while (low <= high) {
2036 // Use this form to prevent potential integer overflow on very large arrays
2037 PetscInt mid = low + (high - low) / 2;
2038
2039 if (arr[mid] == key) {
2040 *found = PETSC_TRUE; // Key found!
2041 break; // Exit the loop
2042 }
2043
2044 if (arr[mid] < key) {
2045 low = mid + 1; // Search in the right half
2046 } else {
2047 high = mid - 1; // Search in the left half
2048 }
2049 }
2050
2051 PetscFunctionReturn(0);
2052}
2053
2054static PetscInt Gidx(PetscInt i, PetscInt j, PetscInt k, UserCtx *user)
2055
2056{
2057 PetscInt nidx;
2058 DMDALocalInfo info = user->info;
2059
2060 PetscInt mx = info.mx, my = info.my;
2061
2062 AO ao;
2063 DMDAGetAO(user->da, &ao);
2064 nidx=i+j*mx+k*mx*my;
2065
2066 AOApplicationToPetsc(ao,1,&nidx);
2067
2068 return (nidx);
2069}
2070
2071PetscErrorCode Divergence(UserCtx *user )
2072{
2073 DM da = user->da, fda = user->fda;
2074 DMDALocalInfo info = user->info;
2075
2076 PetscInt ti = user->simCtx->step;
2077
2078 PetscInt xs = info.xs, xe = info.xs + info.xm;
2079 PetscInt ys = info.ys, ye = info.ys + info.ym;
2080 PetscInt zs = info.zs, ze = info.zs + info.zm;
2081 PetscInt mx = info.mx, my = info.my, mz = info.mz;
2082
2083 PetscInt lxs, lys, lzs, lxe, lye, lze;
2084 PetscInt i, j, k;
2085
2086 Vec Div;
2087 PetscReal ***div, ***aj, ***nvert,***p;
2088 Cmpnts ***ucont;
2089 PetscReal maxdiv;
2090
2091 lxs = xs; lxe = xe;
2092 lys = ys; lye = ye;
2093 lzs = zs; lze = ze;
2094
2095 if (xs==0) lxs = xs+1;
2096 if (ys==0) lys = ys+1;
2097 if (zs==0) lzs = zs+1;
2098
2099 if (xe==mx) lxe = xe-1;
2100 if (ye==my) lye = ye-1;
2101 if (ze==mz) lze = ze-1;
2102
2103 DMDAVecGetArray(fda,user->lUcont, &ucont);
2104 DMDAVecGetArray(da, user->lAj, &aj);
2105 VecDuplicate(user->P, &Div);
2106 DMDAVecGetArray(da, Div, &div);
2107 DMDAVecGetArray(da, user->lNvert, &nvert);
2108 DMDAVecGetArray(da, user->P, &p);
2109 for (k=lzs; k<lze; k++) {
2110 for (j=lys; j<lye; j++){
2111 for (i=lxs; i<lxe; i++) {
2112 if (k==10 && j==10 && i==1){
2113 LOG_ALLOW(LOCAL,LOG_INFO,"Pressure[10][10][1] = %f | Pressure[10][10][0] = %f \n ",p[k][j][i],p[k][j][i-1]);
2114 }
2115
2116 if (k==10 && j==10 && i==mx-3)
2117 LOG_ALLOW(LOCAL,LOG_INFO,"Pressure[10][10][%d] = %f | Pressure[10][10][%d] = %f \n ",mx-2,p[k][j][mx-2],mx-1,p[k][j][mx-1]);
2118 }
2119 }
2120 }
2121 DMDAVecRestoreArray(da, user->P, &p);
2122
2123
2124 for (k=lzs; k<lze; k++) {
2125 for (j=lys; j<lye; j++) {
2126 for (i=lxs; i<lxe; i++) {
2127 maxdiv = fabs((ucont[k][j][i].x - ucont[k][j][i-1].x +
2128 ucont[k][j][i].y - ucont[k][j-1][i].y +
2129 ucont[k][j][i].z - ucont[k-1][j][i].z)*aj[k][j][i]);
2130 if (nvert[k][j][i] + nvert[k+1][j][i] + nvert[k-1][j][i] +
2131 nvert[k][j+1][i] + nvert[k][j-1][i] +
2132 nvert[k][j][i+1] + nvert[k][j][i-1] > 0.1) maxdiv = 0.;
2133 div[k][j][i] = maxdiv;
2134
2135 }
2136 }
2137 }
2138
2139 if (zs==0) {
2140 k=0;
2141 for (j=ys; j<ye; j++) {
2142 for (i=xs; i<xe; i++) {
2143 div[k][j][i] = 0.;
2144 }
2145 }
2146 }
2147
2148 if (ze == mz) {
2149 k=mz-1;
2150 for (j=ys; j<ye; j++) {
2151 for (i=xs; i<xe; i++) {
2152 div[k][j][i] = 0.;
2153 }
2154 }
2155 }
2156
2157 if (xs==0) {
2158 i=0;
2159 for (k=zs; k<ze; k++) {
2160 for (j=ys; j<ye; j++) {
2161 div[k][j][i] = 0.;
2162 }
2163 }
2164 }
2165
2166 if (xe==mx) {
2167 i=mx-1;
2168 for (k=zs; k<ze; k++) {
2169 for (j=ys; j<ye; j++) {
2170 div[k][j][i] = 0;
2171 }
2172 }
2173 }
2174
2175 if (ys==0) {
2176 j=0;
2177 for (k=zs; k<ze; k++) {
2178 for (i=xs; i<xe; i++) {
2179 div[k][j][i] = 0.;
2180 }
2181 }
2182 }
2183
2184 if (ye==my) {
2185 j=my-1;
2186 for (k=zs; k<ze; k++) {
2187 for (i=xs; i<xe; i++) {
2188 div[k][j][i] = 0.;
2189 }
2190 }
2191 }
2192 DMDAVecRestoreArray(da, Div, &div);
2193 PetscInt MaxFlatIndex;
2194
2195 VecMax(Div, &MaxFlatIndex, &maxdiv);
2196
2197 LOG_ALLOW(GLOBAL,LOG_INFO,"[Step %d]] The Maximum Divergence is %e at flat index %d.\n",ti,maxdiv,MaxFlatIndex);
2198
2199 user->simCtx->MaxDivFlatArg = MaxFlatIndex;
2200 user->simCtx->MaxDiv = maxdiv;
2201
2202 for (k=zs; k<ze; k++) {
2203 for (j=ys; j<ye; j++) {
2204 for (i=xs; i<xe; i++) {
2205 if (Gidx(i,j,k,user) == MaxFlatIndex) {
2206 LOG_ALLOW(GLOBAL,LOG_INFO,"[Step %d] The Maximum Divergence(%e) is at location [%d][%d][%d]. \n", ti, maxdiv,k,j,i);
2207 user->simCtx->MaxDivz = k;
2208 user->simCtx->MaxDivy = j;
2209 user->simCtx->MaxDivx = i;
2210 }
2211 }
2212 }
2213 }
2214
2215
2216
2217 DMDAVecRestoreArray(da, user->lNvert, &nvert);
2218 DMDAVecRestoreArray(fda, user->lUcont, &ucont);
2219 DMDAVecRestoreArray(da, user->lAj, &aj);
2220 VecDestroy(&Div);
2221 return(0);
2222}
PetscErrorCode TranslateModernBCsToLegacy(UserCtx *user)
Definition Boundaries.c:391
PetscErrorCode CalculateAllGridMetrics(SimCtx *simCtx)
Orchestrates the calculation of all grid metrics.
Definition Metric.c:1639
PetscErrorCode DefineAllGridDimensions(SimCtx *simCtx)
Orchestrates the parsing and setting of grid dimensions for all blocks.
Definition grid.c:57
PetscErrorCode BroadcastAllBoundingBoxes(UserCtx *user, BoundingBox **bboxlist)
Broadcasts the bounding box information collected on rank 0 to all other ranks.
Definition grid.c:896
PetscErrorCode InitializeAllGridDMs(SimCtx *simCtx)
Orchestrates the creation of DMDA objects for every block and multigrid level.
Definition grid.c:230
PetscErrorCode AssignAllGridCoordinates(SimCtx *simCtx)
Orchestrates the assignment of physical coordinates to all DMDA objects.
Definition grid.c:317
PetscErrorCode GatherAllBoundingBoxes(UserCtx *user, BoundingBox **allBBoxes)
Gathers local bounding boxes from all MPI processes to rank 0.
Definition grid.c:837
PetscErrorCode ParsePostProcessingSettings(SimCtx *simCtx)
Initializes post-processing settings from a config file and command-line overrides.
Definition io.c:1935
PetscErrorCode ParseAllBoundaryConditions(UserCtx *user, const char *bcs_input_filename)
Parses the boundary conditions file to configure the type, handler, and any associated parameters for...
Definition io.c:339
void set_allowed_functions(const char **functionList, int count)
Sets the global list of function names that are allowed to log.
Definition logging.c:124
PetscBool is_function_allowed(const char *functionName)
Checks if a given function is in the allow-list.
Definition logging.c:159
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
----— DEBUG ---------------------------------------— #define LOG_ALLOW(scope, level,...
Definition logging.h:274
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:44
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:45
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:207
PetscErrorCode print_log_level(void)
Prints the current logging level to the console.
Definition logging.c:86
PetscErrorCode LoadAllowedFunctionsFromFile(const char filename[], char ***funcsOut, PetscInt *nOut)
Load function names from a text file.
Definition logging.c:568
LogLevel get_log_level()
Retrieves the current logging level from the environment variable LOG_LEVEL.
Definition logging.c:49
PetscErrorCode ProfilingInitialize(SimCtx *simCtx)
Initializes the custom profiling system using configuration from SimCtx.
Definition logging.c:955
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:29
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:32
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:30
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:33
PetscErrorCode GetOwnedCellRange(const DMDALocalInfo *info_nodes, PetscInt dim, PetscInt *xs_cell_global_out, PetscInt *xm_cell_local_out)
Gets the global starting index of cells owned by this rank and the number of such cells.
Definition setup.c:1344
PetscErrorCode SetupDomainRankInfo(SimCtx *simCtx)
Sets up the full rank communication infrastructure for all blocks.
Definition setup.c:1687
PetscErrorCode GetGhostedCellRange(const DMDALocalInfo *info_nodes, const RankNeighbors *neighbors, PetscInt dim, PetscInt *xs_cell_global_out, PetscInt *xm_cell_local_out)
Gets the global cell range for a rank, including boundary cells.
Definition setup.c:1452
PetscErrorCode Deallocate3DArrayVector(Cmpnts ***array, PetscInt nz, PetscInt ny)
Deallocates a 3D array of Cmpnts structures allocated by Allocate3DArrayVector.
Definition setup.c:1245
PetscErrorCode SetupGridAndSolvers(SimCtx *simCtx)
The main orchestrator for setting up all grid-related components.
Definition setup.c:577
static PetscErrorCode SetupSolverParameters(SimCtx *simCtx)
Definition setup.c:552
static PetscInt Gidx(PetscInt i, PetscInt j, PetscInt k, UserCtx *user)
Definition setup.c:2054
PetscErrorCode CreateAndInitializeAllVectors(SimCtx *simCtx)
Creates and initializes all PETSc Vec objects for all fields.
Definition setup.c:610
PetscErrorCode ComputeAndStoreNeighborRanks(UserCtx *user)
Computes and stores the Cartesian neighbor ranks for the DMDA decomposition.
Definition setup.c:1494
PetscErrorCode Contra2Cart(UserCtx *user)
Reconstructs Cartesian velocity (Ucat) at cell centers from contravariant velocity (Ucont) defined on...
Definition setup.c:1785
PetscErrorCode Allocate3DArrayScalar(PetscReal ****array, PetscInt nz, PetscInt ny, PetscInt nx)
Allocates a 3D array of PetscReal values using PetscCalloc.
Definition setup.c:1085
PetscErrorCode CreateSimulationContext(int argc, char **argv, SimCtx **p_simCtx)
Allocates and populates the master SimulationContext object.
Definition setup.c:41
PetscErrorCode SetDMDAProcLayout(DM dm, UserCtx *user)
Sets the processor layout for a given DMDA based on PETSc options.
Definition setup.c:1617
PetscErrorCode Divergence(UserCtx *user)
Definition setup.c:2071
PetscErrorCode BinarySearchInt64(PetscInt n, const PetscInt64 arr[], PetscInt64 key, PetscBool *found)
Performs a binary search for a key in a sorted array of PetscInt64.
Definition setup.c:2017
static PetscErrorCode AllocateContextHierarchy(SimCtx *simCtx)
Allocates the memory for the UserMG and UserCtx hierarchy.
Definition setup.c:459
PetscErrorCode Allocate3DArrayVector(Cmpnts ****array, PetscInt nz, PetscInt ny, PetscInt nx)
Allocates a 3D array of Cmpnts structures using PetscCalloc.
Definition setup.c:1187
PetscErrorCode UpdateLocalGhosts(UserCtx *user, const char *fieldName)
Updates the local vector (including ghost points) from its corresponding global vector.
Definition setup.c:779
PetscErrorCode SetupBoundaryConditions(SimCtx *simCtx)
(Orchestrator) Sets up all boundary conditions for the simulation.
Definition setup.c:1035
PetscErrorCode SetupDomainCellDecompositionMap(UserCtx *user)
Creates and distributes a map of the domain's cell decomposition to all ranks.
Definition setup.c:1940
PetscErrorCode Deallocate3DArrayScalar(PetscReal ***array, PetscInt nz, PetscInt ny)
Deallocates a 3D array of PetscReal values allocated by Allocate3DArrayScalar.
Definition setup.c:1130
PetscMPIInt rank_zm
Definition variables.h:178
PetscInt MHV
Definition variables.h:540
PetscInt isc
Definition variables.h:643
PetscInt turbine
Definition variables.h:540
PetscInt fishcyl
Definition variables.h:540
PetscInt clark
Definition variables.h:579
PetscInt implicit_type
Definition variables.h:543
PetscInt movefsi
Definition variables.h:535
Vec lCent
Definition variables.h:673
Vec GridSpace
Definition variables.h:673
PetscInt moveframe
Definition variables.h:536
Vec P_nodal
Definition variables.h:702
Vec JCsi
Definition variables.h:676
Vec KAj
Definition variables.h:677
PetscInt TwoD
Definition variables.h:536
PetscInt pseudo_periodic
Definition variables.h:563
UserCtx * user
Definition variables.h:418
PetscInt fish_c
Definition variables.h:540
PetscInt ys_cell
Definition variables.h:183
PetscInt dgf_z
Definition variables.h:537
Vec JEta
Definition variables.h:676
PetscReal poisson_tol
Definition variables.h:547
Vec Zet
Definition variables.h:673
PetscInt xs_cell
Definition variables.h:183
PetscMPIInt rank
Definition variables.h:516
PetscInt mglevels
Definition variables.h:689
PetscInt fish
Definition variables.h:540
PetscInt LV
Definition variables.h:540
PetscReal angle
Definition variables.h:556
PetscMPIInt rank_yp
Definition variables.h:177
PetscInt thin
Definition variables.h:536
PetscInt grid1d
Definition variables.h:562
PetscInt block_number
Definition variables.h:562
Vec lIEta
Definition variables.h:675
PetscInt da_procs_z
Definition variables.h:568
PetscInt blkpbc
Definition variables.h:563
PetscInt sediment
Definition variables.h:535
Vec lIZet
Definition variables.h:675
UserCtx * user_f
Definition variables.h:690
PetscInt channelz
Definition variables.h:562
Vec lNvert
Definition variables.h:657
Vec Phi
Definition variables.h:657
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:633
PetscInt rans
Definition variables.h:578
PetscReal StartTime
Definition variables.h:526
PetscInt dgf_az
Definition variables.h:537
PetscReal FluxOutSum
Definition variables.h:571
PetscMPIInt rank_ym
Definition variables.h:177
PetscReal CMy_c
Definition variables.h:557
Vec IZet
Definition variables.h:675
PetscMPIInt rank_xp
Definition variables.h:176
Vec Centz
Definition variables.h:674
char ** bcs_files
Definition variables.h:570
PetscReal max_angle
Definition variables.h:556
Vec IEta
Definition variables.h:675
PetscReal imp_rtol
Definition variables.h:544
PetscInt ksc
Definition variables.h:643
PetscReal st
Definition variables.h:549
PetscInt duplicate
Definition variables.h:596
PetscInt tiout
Definition variables.h:525
PetscInt implicit
Definition variables.h:543
Vec lZet
Definition variables.h:673
PetscBool assignedA
Definition variables.h:669
UserMG usermg
Definition variables.h:599
char allowedFile[PETSC_MAX_PATH_LEN]
Definition variables.h:600
Vec Csi
Definition variables.h:673
PetscBool rstart_flg
Definition variables.h:528
PetscInt da_procs_y
Definition variables.h:568
PetscReal imp_atol
Definition variables.h:544
PetscInt testfilter_1d
Definition variables.h:581
PetscInt FieldInitialization
Definition variables.h:550
PetscReal ren
Definition variables.h:549
PetscReal Crotz
Definition variables.h:566
PetscInt InitialGuessOne
Definition variables.h:548
Vec lUcont_rm1
Definition variables.h:661
PetscReal wavelength
Definition variables.h:556
PetscInt mixed
Definition variables.h:579
PetscInt zm_cell
Definition variables.h:184
Vec lIAj
Definition variables.h:675
PetscInt zs_cell
Definition variables.h:183
IBMVNodes * ibmv
Definition variables.h:593
PetscInt _this
Definition variables.h:643
Vec lKEta
Definition variables.h:677
char criticalFuncsFile[PETSC_MAX_PATH_LEN]
Definition variables.h:609
PetscReal St_exp
Definition variables.h:556
PetscReal dt
Definition variables.h:527
PetscInt StepsToRun
Definition variables.h:524
PetscInt k_periodic
Definition variables.h:563
PetscInt inletprofile
Definition variables.h:562
Vec Ucat_nodal
Definition variables.h:703
RankNeighbors neighbors
Definition variables.h:642
PetscReal cdisy
Definition variables.h:549
char ** criticalFuncs
Definition variables.h:611
PetscBool rstart_fsi
Definition variables.h:595
PetscInt np
Definition variables.h:585
PetscInt jsc
Definition variables.h:643
PetscInt thislevel
Definition variables.h:419
PetscBool averaging
Definition variables.h:582
Vec lJCsi
Definition variables.h:676
PetscInt ccc
Definition variables.h:574
PetscReal ratio
Definition variables.h:575
PetscInt mg_idx
Definition variables.h:545
Vec Ucont
Definition variables.h:657
PetscInt StartStep
Definition variables.h:523
PetscInt mg_MAX_IT
Definition variables.h:545
PetscBool OnlySetup
Definition variables.h:529
PetscInt rotatefsi
Definition variables.h:535
Vec Ubcs
Boundary condition velocity values. (Comment: "An ugly hack, waste of memory")
Definition variables.h:120
PetscReal cdisz
Definition variables.h:549
Vec Qcrit
Definition variables.h:704
PetscScalar x
Definition variables.h:100
Vec JZet
Definition variables.h:676
PetscInt dgf_x
Definition variables.h:537
PetscInt pizza
Definition variables.h:540
PetscReal MaxDiv
Definition variables.h:606
Vec Centx
Definition variables.h:674
BCS Bcs
Definition variables.h:651
char grid_file[PETSC_MAX_PATH_LEN]
Definition variables.h:567
Vec lPhi
Definition variables.h:657
PetscReal max_cs
Definition variables.h:580
PetscInt invicid
Definition variables.h:536
char ** allowedFuncs
Definition variables.h:602
PetscInt xm_cell
Definition variables.h:184
Vec lUcont_o
Definition variables.h:660
RankCellInfo * RankCellInfoMap
Definition variables.h:696
PetscInt radi
Definition variables.h:558
PetscInt mg_poItr
Definition variables.h:545
PetscInt STRONG_COUPLING
Definition variables.h:548
PetscInt ym_cell
Definition variables.h:184
Vec Ucat_o
Definition variables.h:660
PetscInt MaxDivx
Definition variables.h:607
UserCtx * user_c
Definition variables.h:690
PetscInt poisson
Definition variables.h:546
PetscInt k_homo_filter
Definition variables.h:581
PetscInt MaxDivy
Definition variables.h:607
PetscInt NumberOfBodies
Definition variables.h:555
PetscInt Ogrid
Definition variables.h:562
PetscInt MaxDivz
Definition variables.h:607
BoundingBox * bboxlist
Definition variables.h:588
PetscInt j_homo_filter
Definition variables.h:581
Vec lKZet
Definition variables.h:677
Vec Eta
Definition variables.h:673
PetscInt eel
Definition variables.h:540
PetscInt MaxDivFlatArg
Definition variables.h:607
Vec lNu_t
Definition variables.h:680
PetscReal FluxInSum
Definition variables.h:571
PetscMPIInt rank_xm
Definition variables.h:176
Vec Nu_t
Definition variables.h:680
Vec lJEta
Definition variables.h:676
Vec lCsi
Definition variables.h:673
PetscReal CMz_c
Definition variables.h:557
Vec lGridSpace
Definition variables.h:673
PetscBool generate_grid
Definition variables.h:564
PetscInt thislevel
Definition variables.h:689
PetscReal imp_stol
Definition variables.h:544
PetscInt nAllowed
Definition variables.h:603
Vec ICsi
Definition variables.h:675
PetscScalar z
Definition variables.h:100
PetscInt OutputFreq
Definition variables.h:531
Vec lKCsi
Definition variables.h:677
Vec Ucat
Definition variables.h:657
Vec ParticleCount
Definition variables.h:697
Vec Ucont_o
Definition variables.h:660
PetscInt i_homo_filter
Definition variables.h:581
PetscInt wallfunction
Definition variables.h:579
PetscInt rheology
Definition variables.h:535
PetscReal Flux_in
Definition variables.h:556
PetscInt mglevels
Definition variables.h:425
PetscReal cdisx
Definition variables.h:549
PetscInt dgf_ax
Definition variables.h:537
PetscReal cfl
Definition variables.h:549
PetscInt mglevels
Definition variables.h:545
PetscInt num_bcs_files
Definition variables.h:569
DM dm_swarm
Definition variables.h:587
PetscBool useCfg
Definition variables.h:601
PetscBool readFields
Definition variables.h:586
PetscInt central
Definition variables.h:548
PetscBool useCriticalFuncsCfg
Definition variables.h:610
PetscReal Fluxsum
Definition variables.h:571
Vec lJZet
Definition variables.h:676
Vec Nvert_o
Definition variables.h:660
PetscReal Croty
Definition variables.h:566
Vec IAj
Definition variables.h:675
PetscReal grid_rotation_angle
Definition variables.h:565
PetscInt dynamic_freq
Definition variables.h:579
PetscInt da_procs_x
Definition variables.h:568
Vec JAj
Definition variables.h:676
PetscReal U_bc
Definition variables.h:573
Vec KEta
Definition variables.h:677
Cmpnts InitialConstantContra
Definition variables.h:551
PetscMPIInt rank_zp
Definition variables.h:178
Vec Ucont_rm1
Definition variables.h:661
PetscInt i_periodic
Definition variables.h:563
Vec lUcont
Definition variables.h:657
PetscInt step
Definition variables.h:521
PetscReal AreaOutSum
Definition variables.h:572
PetscInt dgf_ay
Definition variables.h:537
Vec lAj
Definition variables.h:673
Vec lICsi
Definition variables.h:675
PetscInt testfilter_ik
Definition variables.h:581
DMDALocalInfo info
Definition variables.h:637
PetscReal chact_leng
Definition variables.h:559
PetscInt hydro
Definition variables.h:540
Vec lUcat
Definition variables.h:657
PostProcessParams * pps
Definition variables.h:616
PetscScalar y
Definition variables.h:100
PetscMPIInt size
Definition variables.h:517
@ EXEC_MODE_POSTPROCESSOR
Definition variables.h:497
Vec lEta
Definition variables.h:673
Vec KZet
Definition variables.h:677
Vec Cent
Definition variables.h:673
PetscInt regime
Definition variables.h:558
PetscInt les
Definition variables.h:578
Vec Nvert
Definition variables.h:657
Vec KCsi
Definition variables.h:677
MGCtx * mgctx
Definition variables.h:428
PetscInt mg_preItr
Definition variables.h:545
Vec lNvert_o
Definition variables.h:660
Vec Centy
Definition variables.h:674
PetscViewer logviewer
Definition variables.h:530
PetscBool multinullspace
Definition variables.h:666
ExecutionMode exec_mode
Definition variables.h:532
PetscInt ParticleInitialization
Definition variables.h:589
PetscReal L_dim
Definition variables.h:556
PetscInt imp_MAX_IT
Definition variables.h:543
Vec lJAj
Definition variables.h:676
PetscInt cop
Definition variables.h:540
PetscReal ti
Definition variables.h:522
PetscInt Pipe
Definition variables.h:540
PetscReal vnn
Definition variables.h:549
PetscInt rotateframe
Definition variables.h:536
IBMNodes * ibm
Definition variables.h:592
PetscInt nCriticalFuncs
Definition variables.h:612
PetscReal summationRHS
Definition variables.h:605
Vec lKAj
Definition variables.h:677
PetscInt immersed
Definition variables.h:535
char PostprocessingControlFile[PETSC_MAX_PATH_LEN]
Definition variables.h:615
PetscInt blank
Definition variables.h:536
PetscInt dgf_y
Definition variables.h:537
PetscInt LoggingFrequency
Definition variables.h:604
PetscReal CMx_c
Definition variables.h:557
Vec Psi
Definition variables.h:698
Vec P_o
Definition variables.h:660
Vec Uch
Characteristic velocity for boundary conditions.
Definition variables.h:121
PetscInt j_periodic
Definition variables.h:563
PetscInt wing
Definition variables.h:540
FSInfo * fsi
Definition variables.h:594
Defines a 3D axis-aligned bounding box.
Definition variables.h:153
A 3D point or vector with PetscScalar components.
Definition variables.h:99
Context for Multigrid operations.
Definition variables.h:417
A lean struct to hold the global cell ownership range for a single MPI rank.
Definition variables.h:182
Stores the MPI ranks of neighboring subdomains.
Definition variables.h:175
The master context for the entire simulation.
Definition variables.h:513
User-defined context containing data specific to a single computational grid level.
Definition variables.h:630
User-level context for managing the entire multigrid hierarchy.
Definition variables.h:424