PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
momentumsolvers.h File Reference
#include "variables.h"
#include "logging.h"
#include "rhs.h"
#include "Boundaries.h"
Include dependency graph for momentumsolvers.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define MOMENTUMSOLVERS_H
 

Functions

PetscErrorCode MomentumSolver_Explicit_RungeKutta4 (UserCtx *user, IBMNodes *ibm, FSInfo *fsi)
 Advances the momentum equations using an explicit 4th-order Runge-Kutta scheme.
 
PetscErrorCode MomentumSolver_DualTime_Picard_RK4 (UserCtx *user, IBMNodes *ibm, FSInfo *fsi)
 Solves the Momentum Equations using Dual-Time Stepping with a Fixed-Point RK4 Smoother.
 

Macro Definition Documentation

◆ MOMENTUMSOLVERS_H

#define MOMENTUMSOLVERS_H

Definition at line 2 of file momentumsolvers.h.

Function Documentation

◆ MomentumSolver_Explicit_RungeKutta4()

PetscErrorCode MomentumSolver_Explicit_RungeKutta4 ( UserCtx user,
IBMNodes ibm,
FSInfo fsi 
)
extern

Advances the momentum equations using an explicit 4th-order Runge-Kutta scheme.

Parameters
userArray of UserCtx structs for all blocks.
ibm(Optional) Pointer to IBM data. Pass NULL if disabled.
fsi(Optional) Pointer to FSI data. Pass NULL if disabled.
Returns
PetscErrorCode 0 on success.

Advances the momentum equations using an explicit 4th-order Runge-Kutta scheme.

This function computes an intermediate, non-divergence-free contravariant velocity field (Ucont) at time t_{n+1} for all computational blocks.

This is a minimally-edited version of the legacy solver. It retains its internal loop over all blocks and is intended to be called once per time step from the main FlowSolver orchestrator. All former global variables are now accessed via the SimCtx passed in through the first block's UserCtx.

Note
This does not employ a Backward Euler implicit treatment of the time derivative. The physical timestep itself is advanced explicitly. The dual-time stepping is not used here.
Parameters
userThe array of UserCtx structs for all blocks.
ibm(Optional) Pointer to the full array of IBM data structures. Pass NULL if disabled.
fsi(Optional) Pointer to the full array of FSI data structures. Pass NULL if disabled.
Returns
PetscErrorCode 0 on success.

Definition at line 82 of file momentumsolvers.c.

83{
84 PetscErrorCode ierr;
85 // --- Context Acquisition ---
86 // Get the master simulation context from the first block's UserCtx.
87 // This is the bridge to access all former global variables.
88 SimCtx *simCtx = user[0].simCtx;
89 PetscReal dt = simCtx->dt;
90 //PetscReal st = simCtx->st;
91 PetscInt istage;
92 PetscReal alfa[] = {0.25, 1.0/3.0, 0.5, 1.0};
93
94 PetscFunctionBeginUser;
95
97
98 LOG_ALLOW(GLOBAL, LOG_INFO, "Executing explicit momentum solver (Runge-Kutta) for %d block(s).\n",simCtx->block_number);
99
100 // --- 1. Pre-Loop Initialization (Legacy Logic) ---
101 // This block prepares boundary conditions and allocates the RHS vector for all blocks
102 // before the main RK loop begins. This logic is preserved from the original code.
103 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Preparing all blocks for Runge-Kutta solve...\n");
104 for (PetscInt bi = 0; bi < simCtx->block_number; bi++) {
105
106 ierr = ApplyBoundaryConditions(&user[bi]); CHKERRQ(ierr);
107
108 /*
109 // Immersed boundary interpolation (if enabled)
110 if (simCtx->immersed) {
111 LOG_ALLOW(LOCAL, LOG_DEBUG, " Performing pre-RK IBM interpolation for block %d.\n", bi);
112 for (PetscInt ibi = 0; ibi < simCtx->NumberOfBodies; ibi++) {
113 // The 'ibm' and 'fsi' pointers are passed directly from FlowSolver
114 ierr = ibm_interpolation_advanced(&user[bi], &ibm[ibi], ibi, 1); CHKERRQ(ierr);
115 }
116 }
117 */
118
119 // Allocate the persistent RHS vector for this block's context
120 ierr = VecDuplicate(user[bi].Ucont, &user[bi].Rhs); CHKERRQ(ierr);
121 }
122 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Pre-loop initialization complete.\n");
123
124 // --- 2. Main Runge-Kutta Loop ---
125 // The legacy code had an outer `pseudot` loop that only ran once. We preserve it.
126 for (PetscInt pseudot = 0; pseudot < 1; pseudot++) {
127 // Loop over each block to perform the RK stages
128 for (PetscInt bi = 0; bi < simCtx->block_number; bi++) {
129 for (istage = 0; istage < 4; istage++) {
130 LOG_ALLOW(LOCAL, LOG_DEBUG, " Block %d, RK Stage %d (alpha=%.4f)...\n", bi, istage, alfa[istage]);
131
132 // a. Calculate the Right-Hand Side (RHS) of the momentum equation.
133 ierr = ComputeRHS(&user[bi], user[bi].Rhs); CHKERRQ(ierr);
134
135 // b. Advance Ucont to the next intermediate stage using the RK coefficient.
136 // Ucont_new = Ucont_old + alpha * dt * RHS
137 ierr = VecWAXPY(user[bi].Ucont, alfa[istage] * dt, user[bi].Rhs, user[bi].Ucont_o); CHKERRQ(ierr);
138
139 // c. Update local ghost values for the new intermediate Ucont.
140 ierr = DMGlobalToLocalBegin(user[bi].fda, user[bi].Ucont, INSERT_VALUES, user[bi].lUcont); CHKERRQ(ierr);
141 ierr = DMGlobalToLocalEnd(user[bi].fda, user[bi].Ucont, INSERT_VALUES, user[bi].lUcont); CHKERRQ(ierr);
142
143 // d. Re-apply boundary conditions for the new intermediate velocity.
144 // This is crucial for the stability and accuracy of the multi-stage scheme.
145 ierr = ApplyBoundaryConditions(&user[bi]); CHKERRQ(ierr);
146
147 } // End of RK stages for one block
148
149 /*
150 // Final IBM Interpolation for the block (if enabled)
151 if (simCtx->immersed) {
152 LOG_ALLOW(LOCAL, LOG_DEBUG, " Performing post-RK IBM interpolation for block %d.\n", bi);
153 for (PetscInt ibi = 0; ibi < simCtx->NumberOfBodies; ibi++) {
154 ierr = ibm_interpolation_advanced(&user[bi], &ibm[ibi], ibi, 1); CHKERRQ(ierr);
155 }
156 }
157 */
158
159 } // End loop over blocks
160
161 // --- 3. Inter-Block Communication (Legacy Logic) ---
162 // This is called after all blocks have completed their RK stages.
163 if (simCtx->block_number > 1) {
164 // LOG_ALLOW(GLOBAL, LOG_DEBUG, "Updating multi-block interfaces after RK stages.\n");
165 // ierr = Block_Interface_U(user); CHKERRQ(ierr);
166 }
167
168 } // End of pseudo-time loop
169
170 // --- 4. Cleanup ---
171 // Destroy the RHS vectors that were created at the start of this function.
172 for (PetscInt bi = 0; bi < simCtx->block_number; bi++) {
173 ierr = VecDestroy(&user[bi].Rhs); CHKERRQ(ierr);
174 }
175
176 LOG_ALLOW(GLOBAL, LOG_INFO, "Runge-Kutta solve completed for all blocks.\n");
177
179
180 PetscFunctionReturn(0);
181}
PetscErrorCode ApplyBoundaryConditions(UserCtx *user)
Main master function to apply all boundary conditions for a time step.
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:45
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:46
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:200
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:746
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:31
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:32
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:737
PetscErrorCode ComputeRHS(UserCtx *user, Vec Rhs)
Computes the Right-Hand Side (RHS) of the momentum equations.
Definition rhs.c:1192
PetscInt block_number
Definition variables.h:649
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:731
PetscReal dt
Definition variables.h:599
The master context for the entire simulation.
Definition variables.h:585
Here is the call graph for this function:
Here is the caller graph for this function:

◆ MomentumSolver_DualTime_Picard_RK4()

PetscErrorCode MomentumSolver_DualTime_Picard_RK4 ( UserCtx user,
IBMNodes ibm,
FSInfo fsi 
)

Solves the Momentum Equations using Dual-Time Stepping with a Fixed-Point RK4 Smoother.


GLOSSARY & THEORETICAL BASIS

  1. METHODOLOGY: Dual-Time Stepping (Pseudo-Time Integration) We aim to solve the implicit BDF equation: R_spatial(U) + dU/dt_physical = 0. We do this by introducing a fictitious "Pseudo-Time" (tau) and iterating to steady state: dU/d(tau) = - [ R_spatial(U) + BDF_Terms(U) ] When dU/d(tau) -> 0, the physical time step is satisfied.
  2. ALGORITHM: Fixed-Point Iteration with Explicit Runge-Kutta This is technically a Fixed-Point iteration on the operator: U_new = U_old + pseudo_dt_scaling * dt_pseudo * Total_Residual(U_old) We use a 4-Stage Explicit RK scheme (Jameson-Schmidt-Turkel coeffs) to smooth errors.
  3. STABILITY: Backtracking Line Search If a pseudo-time step causes the Residual or Solution Error to GROW (Divergence), the solver "Backtracks": it restores the previous solution, cuts the pseudo-time step scaling factor (pseudo_dt_scaling) in half, and retries the iteration.

VARIABLE MAPPING

– Physics Variables (Legacy Names Kept) – ti : Physical Time Step Index. dt : Physical Time Step size (Delta t). st : Pseudo-Time Step size (Delta tau). alfa : Runge-Kutta stage coefficients {1/4, 1/3, 1/2, 1}.

– Convergence & Solver Control (Renamed) – pseudo_iter : Counter for the inner dual-time loop. pseudo_dt_scaling : Adaptive scalar for the pseudo-time step (formerly lambda). delta_sol_norm : The L_inf norm of the change in solution (dU).

resid_norm : The L_inf norm of the Total Residual (RHS).


GLOSSARY & THEORETICAL BASIS

  1. METHODOLOGY: Dual-Time Stepping (Pseudo-Time Integration) We aim to solve the implicit BDF equation: R_spatial(U) + dU/dt_physical = 0. We do this by introducing a fictitious "Pseudo-Time" (tau) and iterating to steady state: dU/d(tau) = - [ R_spatial(U) + BDF_Terms(U) ] When dU/d(tau) -> 0, the physical time step is satisfied.
  2. ALGORITHM: Fixed-Point Iteration with Explicit Runge-Kutta This is technically a Fixed-Point iteration on the operator: U_new = U_old + pseudo_dt_scaling * dt_pseudo * Total_Residual(U_old) We use a 4-Stage Explicit RK scheme (Jameson-Schmidt-Turkel coeffs) to smooth errors.
  3. STABILITY: Backtracking Line Search If a pseudo-time step causes the Residual or Solution Error to GROW (Divergence), the solver "Backtracks": it restores the previous solution, cuts the pseudo-time step scaling factor (pseudo_dt_scaling) in half, and retries the iteration.

VARIABLE MAPPING

– Physics Variables (Legacy Names Kept) – ti : Physical Time Step Index. dt : Physical Time Step size (Delta t). st : Pseudo-Time Step size (Delta tau). alfa : Runge-Kutta stage coefficients {1/4, 1/3, 1/2, 1}.

– Convergence & Solver Control (Renamed) – pseudo_iter : Counter for the inner dual-time loop. pseudo_dt_scaling : Adaptive scalar for the pseudo-time step (formerly lambda). delta_sol_norm : The L_inf norm of the change in solution (dU).

resid_norm : The L_inf norm of the Total Residual (RHS).

Definition at line 224 of file momentumsolvers.c.

225{
226 // --- CONTEXT ACQUISITION BLOCK ---
227 SimCtx *simCtx = user[0].simCtx;
228 const PetscInt block_number = simCtx->block_number;
229
230 // Legacy Names (Physics/Time) - Kept for brevity in formulas
231 const PetscInt ti = simCtx->step;
232 const PetscReal dt = simCtx->dt; // Physical Time Step
233 const PetscReal cfl = simCtx->pseudo_cfl; // Initial CFL Scaling
234 const PetscReal alfa[] = {0.25, 1.0/3.0, 0.5, 1.0}; // RK4 Coefficients
235 Vec *pRhs; // Per-block backup of Rhs at last accepted pseudo-state.
236
237 // State flags
238 PetscBool force_restart = PETSC_FALSE;
239
240 // Renamed Solver Parameters
241 const PetscInt max_pseudo_steps = simCtx->mom_max_pseudo_steps; // Max Dual-Time Iterations
242 const PetscReal tol_abs_delta = simCtx->mom_atol; // Stop if |dU| < tol
243 const PetscReal tol_rtol_delta = simCtx->mom_rtol; // Stop if |dU|/|dU0| < tol
244 // --- END CONTEXT ACQUISITION BLOCK ---
245
246 PetscErrorCode ierr;
247 PetscMPIInt rank;
248 PetscInt istage, pseudo_iter, ibi;
249 PetscReal ts, te, cput;
250
251 // --- Global Convergence Metrics ---
252 PetscReal global_norm_delta = 10.0;
253 PetscReal global_rel_delta = 1.0;
254 PetscReal global_rel_resid = 1.0;
255
256 // --- Local Metric Arrays (Per Block) ---
257 PetscReal *delta_sol_norm_init, *delta_sol_norm_prev, *delta_sol_norm_curr, *delta_sol_rel_curr;
258 PetscReal *resid_norm_init, *resid_norm_prev, *resid_norm_curr, *resid_rel_curr;
259 PetscReal *pseudo_dt_scaling;
260
261 PetscFunctionBeginUser;
263 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
264 LOG_ALLOW(GLOBAL, LOG_INFO, "Executing Dual-Time Momentum Solver for %d block(s).\n", block_number);
265
266 // --- Allocate metric arrays ---
267 ierr = PetscMalloc2(block_number, &delta_sol_norm_init, block_number, &delta_sol_norm_prev); CHKERRQ(ierr);
268 ierr = PetscMalloc2(block_number, &delta_sol_norm_curr, block_number, &delta_sol_rel_curr); CHKERRQ(ierr);
269 ierr = PetscMalloc2(block_number, &resid_norm_init, block_number, &resid_norm_prev); CHKERRQ(ierr);
270 ierr = PetscMalloc2(block_number, &resid_norm_curr, block_number, &pseudo_dt_scaling); CHKERRQ(ierr);
271 ierr = PetscMalloc1(block_number, &resid_rel_curr); CHKERRQ(ierr);
272 ierr = PetscMalloc1(block_number, &pRhs); CHKERRQ(ierr);
273
274 ierr = PetscTime(&ts); CHKERRQ(ierr);
275
276 // --- 1. Pre-Loop Initialization ---
277
278 if (block_number > 1) {
279 // ierr = Block_Interface_U(user); CHKERRQ(ierr);
280 }
281
282 for (PetscInt bi = 0; bi < block_number; bi++) {
283 ierr = ApplyBoundaryConditions(&user[bi]); CHKERRQ(ierr);
284
285 // Immersed boundary interpolation (if enabled)
286
287 // Allocate workspace
288 ierr = VecDuplicate(user[bi].Ucont, &user[bi].Rhs); CHKERRQ(ierr);
289 ierr = VecDuplicate(user[bi].Rhs, &pRhs[bi]); CHKERRQ(ierr);
290 ierr = VecDuplicate(user[bi].Ucont, &user[bi].dUcont); CHKERRQ(ierr);
291 ierr = VecDuplicate(user[bi].Ucont, &user[bi].pUcont); CHKERRQ(ierr);
292
293 // Initialize Backup (pUcont) with current state
294 ierr = VecCopy(user[bi].Ucont, user[bi].pUcont); CHKERRQ(ierr);
295
296 // --- Calculate Initial Total Residual (Spatial + Temporal) ---
297 ierr = ComputeTotalResidual(&user[bi]); CHKERRQ(ierr);
298
299 // Backup Rhs with current state
300 ierr = VecCopy(user[bi].Rhs, pRhs[bi]); CHKERRQ(ierr);
301
302 // Compute Initial Norms
303 ierr = VecNorm(user[bi].Rhs, NORM_INFINITY, &resid_norm_init[bi]); CHKERRQ(ierr);
304
305 // Initialize history for backtracking logic
306 resid_norm_prev[bi] = resid_norm_init[bi]; // Meaningful ratio on first iteration
307 delta_sol_norm_prev[bi] = 1000.0;
308 pseudo_dt_scaling[bi] = cfl; // Initialize adaptive scalar
309
310 LOG_ALLOW(GLOBAL,LOG_INFO," Block %d | Max RHS = %.6f | Pseudo-CFL = %.4f .\n", bi, resid_norm_init[bi], cfl);
311 }
312
313 // --- 2. Main Pseudo-Time Iteration Loop ---
314 pseudo_iter = 0;
315 while ((force_restart || (global_norm_delta > tol_abs_delta || global_rel_delta > tol_rtol_delta) || pseudo_iter < 1)
316 && pseudo_iter < max_pseudo_steps)
317 {
318 pseudo_iter++;
319 force_restart = PETSC_FALSE;
320
321 for (PetscInt bi = 0; bi < block_number; bi++) {
322
323 // === 4-Stage Explicit Runge-Kutta Loop ===
324 for (istage = 0; istage < 4; istage++) {
325
326 LOG_ALLOW(GLOBAL,LOG_TRACE," Pseudo-Iter: %d | RK-Stage: %d\n", pseudo_iter, istage);
327
328 // RK Update: U_new = U_old + (Scaler * Alpha * dt) * Total_Residual
329 ierr = VecWAXPY(user[bi].Ucont,
330 pseudo_dt_scaling[bi] * alfa[istage] * dt,
331 user[bi].Rhs,
332 user[bi].pUcont); CHKERRQ(ierr);
333
334 // Sync Ghosts & Re-apply BCs for intermediate stage
335 ierr = UpdateLocalGhosts(&user[bi],"Ucont"); CHKERRQ(ierr);
336
337 //ierr = DMGlobalToLocalBegin(user[bi].fda, user[bi].Ucont, INSERT_VALUES, user[bi].lUcont); CHKERRQ(ierr);
338 //ierr = DMGlobalToLocalEnd(user[bi].fda, user[bi].Ucont, INSERT_VALUES, user[bi].lUcont); CHKERRQ(ierr);
339
340 ierr = ApplyBoundaryConditions(&user[bi]); CHKERRQ(ierr);
341
342 // --- Re-calculate Total Residual for next stage ---
343 ierr = ComputeTotalResidual(&user[bi]); CHKERRQ(ierr);
344
345 } // End RK Stages
346
347 // Immersed boundary interpolation (if enabled)
348
349 // === Convergence Metrics Calculation ===
350
351 // Calculate dU = U_current - U_backup
352 ierr = VecWAXPY(user[bi].dUcont, -1.0, user[bi].pUcont, user[bi].Ucont); CHKERRQ(ierr);
353
354 // Compute Infinity Norms
355 ierr = VecNorm(user[bi].dUcont, NORM_INFINITY, &delta_sol_norm_curr[bi]); CHKERRQ(ierr);
356 ierr = VecNorm(user[bi].Rhs, NORM_INFINITY, &resid_norm_curr[bi]); CHKERRQ(ierr);
357
358 // Normalize relative metrics
359 if (pseudo_iter == 1) {
360 delta_sol_norm_init[bi] = delta_sol_norm_curr[bi];
361 delta_sol_rel_curr[bi] = 1.0;
362 resid_rel_curr[bi] = 1.0;
363 // resid_norm_init[bi] set correctly before the loop — do not overwrite
364 } else {
365 if (delta_sol_norm_init[bi] > 1.0e-10)
366 delta_sol_rel_curr[bi] = delta_sol_norm_curr[bi] / delta_sol_norm_init[bi];
367 else
368 delta_sol_rel_curr[bi] = 0.0;
369
370 if(resid_norm_init[bi] > 1.0e-10)
371 resid_rel_curr[bi] = resid_norm_curr[bi] / resid_norm_init[bi];
372 else
373 resid_rel_curr[bi] = 0.0;
374 }
375
376 // --- File Logging ---
377 if (!rank) {
378 FILE *f;
379 char filen[128];
380 sprintf(filen, "%s/Momentum_Solver_Convergence_History_Block_%1d.log", simCtx->log_dir, bi);
381 if(simCtx->step == simCtx->StartStep + 1 && pseudo_iter == 1) f = fopen(filen, "w");
382 else f = fopen(filen, "a");
383
384 PetscFPrintf(PETSC_COMM_WORLD, f, "Step: %d | PseudoIter(k): %d| | Pseudo-cfl: %.4f |dUk|: %le | |dUk|/|dUprev|: %le | |Rk|: %le | |Rk|/|Rprev|: %le \n",
385 (int)ti, (int)pseudo_iter, pseudo_dt_scaling[bi], delta_sol_norm_curr[bi], delta_sol_rel_curr[bi], resid_norm_curr[bi],resid_rel_curr[bi]);
386 fclose(f);
387 }
388 } // End loop over blocks
389
390 // --- Update Global Convergence Criteria ---
391 global_norm_delta = -1.0e20;
392 global_rel_delta = -1.0e20;
393 global_rel_resid = -1.0e20;
394
395 for (PetscInt bi = 0; bi < block_number; bi++) {
396 global_norm_delta = PetscMax(delta_sol_norm_curr[bi], global_norm_delta);
397 global_rel_delta = PetscMax(delta_sol_rel_curr[bi], global_rel_delta);
398 global_rel_resid = PetscMax(resid_rel_curr[bi], global_rel_resid);
399 }
400 ierr = PetscTime(&te); CHKERRQ(ierr);
401 cput = te - ts;
402 LOG_ALLOW(GLOBAL, LOG_INFO, " Pseudo-Iter(k) %d: |dUk|=%e, |dUk|/|dU0| = %e, |Rk|/|R0| = %e, CPU=%.2fs\n",
403 pseudo_iter, global_norm_delta, global_rel_delta, global_rel_resid, cput);
404
405 // === Backtracking Line Search ===
406 for (PetscInt bi = 0; bi < block_number; bi++) {
407
408 PetscReal resid_ratio = resid_norm_curr[bi] / resid_norm_prev[bi];
409
410 PetscBool is_sol_nan, is_resid_nan, is_diverging;
411 // Check for NaNs in Solution and Residual
412 is_sol_nan = (PetscBool)PetscIsNanReal(delta_sol_norm_curr[bi]);
413 is_resid_nan = (PetscBool)PetscIsNanReal(resid_norm_curr[bi]);
414
415 // TRIGGER: Divergence Detected
416 is_diverging = (PetscBool)(pseudo_iter > 1 &&
417 resid_norm_curr[bi] > simCtx->mom_dt_rk4_residual_norm_noise_allowance_factor*resid_norm_prev[bi] &&
418 delta_sol_norm_curr[bi] > delta_sol_norm_prev[bi]
419 );
420
421 if(is_diverging || is_sol_nan || is_resid_nan) {
422
423 if(is_sol_nan){
424 LOG_ALLOW(LOCAL, LOG_WARNING, " Block %d: NaN detected in solution update norm.\n", bi);
425 }
426 if(is_resid_nan){
427 LOG_ALLOW(LOCAL, LOG_WARNING, " Block %d: NaN detected in residual norm.\n", bi);
428 }
429 if(is_diverging){
430 LOG_ALLOW(LOCAL, LOG_WARNING, " Block %d: Divergence detected (|R| and |dU| increasing).\n", bi);
431 }
432
433 if(pseudo_dt_scaling[bi] < simCtx->min_pseudo_cfl) {
434 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_CONV_FAILED,
435 "Block %d: Solver diverged (NaN or explosion) and min time-step reached. Simulation aborted.", bi);
436 }
437
438 // For nan situations, we always backtrack.
439 force_restart = PETSC_TRUE;
440
441 // 1. Reduce Step Size
442 pseudo_dt_scaling[bi] *= 0.5*simCtx->pseudo_cfl_reduction_factor; // Aggressive deceleration (1/2 x reduction factor).
443 // NOTE: pseudo_iter-- is handled once after this loop to avoid multi-decrement for block_number > 1.
444
445 LOG_ALLOW(LOCAL, LOG_DEBUG, " Block %d: Backtracking triggered. Reducing scaling to %f.\n", bi, pseudo_dt_scaling[bi]);
446
447 // 3. RESTORE from Backup
448 ierr = VecCopy(user[bi].pUcont, user[bi].Ucont); CHKERRQ(ierr);
449
450 ierr = VecCopy(pRhs[bi], user[bi].Rhs); CHKERRQ(ierr);
451
452 // 4. SYNC Ghost Cells
453 ierr = UpdateLocalGhosts(&user[bi],"Ucont"); CHKERRQ(ierr);
454 //ierr = DMGlobalToLocalBegin(user[bi].fda, user[bi].Ucont, INSERT_VALUES, user[bi].lUcont); CHKERRQ(ierr);
455 //ierr = DMGlobalToLocalEnd(user[bi].fda, user[bi].Ucont, INSERT_VALUES, user[bi].lUcont); CHKERRQ(ierr);
456
457 // 5. Reset current norms to avoid polluting next loop check.
458 delta_sol_norm_curr[bi] = 1000.0;
459 resid_norm_curr[bi] = 1000.0;
460
461 }
462 else {
463 // SUCCESS: Step accepted. Update Backup and History.
464 ierr = VecCopy(user[bi].Ucont, user[bi].pUcont); CHKERRQ(ierr);
465 ierr = VecCopy(user[bi].Rhs, pRhs[bi]); CHKERRQ(ierr);
466 resid_norm_prev[bi] = resid_norm_curr[bi];
467 delta_sol_norm_prev[bi] = delta_sol_norm_curr[bi];
468
469 // Adaptive Ramping of solution speed (Pseudo-CFL)
470
471 PetscBool is_converging_fast;
472 is_converging_fast = (PetscBool)(resid_ratio < 0.9);
473 PetscBool is_stagnating;
474 is_stagnating = (PetscBool)(resid_ratio >= 0.98 && resid_ratio < 1.0);
475 PetscBool is_noisy;
476 is_noisy = (PetscBool)(resid_ratio >= 1.0 && resid_ratio < simCtx->mom_dt_rk4_residual_norm_noise_allowance_factor);
477
478 PetscReal current_cfl = pseudo_dt_scaling[bi];
479 PetscReal cfl_floor = 10*simCtx->min_pseudo_cfl; // Define a "low CFL" threshold for more aggressive acceleration.
480 PetscReal cfl_ceiling = 0.75*simCtx->max_pseudo_cfl; // Define a "high CFL" threshold for mild acceleration.
481
482 if(is_converging_fast){
483 // Residual is falling fast (good convergence)
484 pseudo_dt_scaling[bi] *= simCtx->pseudo_cfl_growth_factor;
485 } else if(is_stagnating && current_cfl < cfl_floor){
486 // Residual is falling very slow (weak convergence.) we accelerate at low CFL
487 pseudo_dt_scaling[bi] *= PetscMin(1.2*simCtx->pseudo_cfl_growth_factor, 1.1); // more aggressive acceleration at low CFL
488 } else if(is_stagnating && current_cfl >= cfl_floor && current_cfl < cfl_ceiling && !is_noisy){
489 // moderate CFL - normal acceleration
490 pseudo_dt_scaling[bi] *= simCtx->pseudo_cfl_growth_factor;
491 } else if(is_stagnating && current_cfl >= cfl_ceiling && !is_noisy){
492 // high CFL - mild acceleration (creep)
493 pseudo_dt_scaling[bi] *= PetscMax(0.95*simCtx->pseudo_cfl_growth_factor,1.005);
494 } else if(is_noisy){
495 // Residual is rising slightly (within noise allowance). we decelerate mildly.
496 if(current_cfl > cfl_ceiling){
497 // High CFL - more aggressive deceleration
498 pseudo_dt_scaling[bi] *= PetscMax(0.9*simCtx->pseudo_cfl_reduction_factor,0.9);
499 } else if(current_cfl > cfl_floor && current_cfl <= cfl_ceiling){
500 // Moderate CFL - mild deceleration
501 pseudo_dt_scaling[bi] *= simCtx->pseudo_cfl_reduction_factor;
502 } else if(current_cfl <= cfl_floor){
503 // Low CFL - gentle acceleration
504 // rise in residual likely due to noise, so we back off slightly.
505 pseudo_dt_scaling[bi] *= PetscMax(0.95*simCtx->pseudo_cfl_growth_factor,1.01);
506 }
507 }
508 // Clamp to max bound.
509 pseudo_dt_scaling[bi] = PetscMin(pseudo_dt_scaling[bi], simCtx->max_pseudo_cfl);
510 pseudo_dt_scaling[bi] = PetscMax(pseudo_dt_scaling[bi], simCtx->min_pseudo_cfl);
511 }
512 }
513
514 // Decrement once regardless of how many blocks backtracked.
515 if (force_restart) pseudo_iter--;
516
517 if (block_number > 1) {
518 // ierr = Block_Interface_U(user); CHKERRQ(ierr);
519 }
520 } // End while loop
521
522
523 // Smart CFL Carry Over
524
525 PetscReal local_min_cfl = 1.0e20;
526 PetscReal global_min_cfl;
527
528 PetscReal cfl_floor = 10*simCtx->min_pseudo_cfl;
529
530 // 1. Find the "Weakest Link" (lowest CFL) across local blocks
531 for (PetscInt bi = 0; bi < block_number; bi++) {
532 if (pseudo_dt_scaling[bi] < local_min_cfl) {
533 local_min_cfl = pseudo_dt_scaling[bi];
534 }
535 }
536
537 // 2. MPI Reduction (Crucial if blocks are distributed across ranks)
538 // We want the minimum CFL across the entire simulation, not just this processor.
539 ierr = MPI_Allreduce(&local_min_cfl, &global_min_cfl, 1, MPIU_REAL, MPI_MIN, PETSC_COMM_WORLD); CHKERRQ(ierr);
540
541 // 3. Apply Safety Factor & Update simCtx
542 // We use a factor (e.g., 0.85) to back off slightly for the start of the next timestep
543 // to accommodate the initial residual spike from the BDF term.
544
545 PetscReal cfl_carry_over_relaxation_factor = 1.0; // Could be parameterized in simCtx if desired.
546
547 PetscReal next_start_cfl;
548
549 if(global_min_cfl < cfl_floor){
550 // If the minimum CFL is very low, we back off more aggressively.
551 next_start_cfl = cfl_floor * 1.5;
552 } else{
553 next_start_cfl = global_min_cfl * cfl_carry_over_relaxation_factor;
554 }
555
556 next_start_cfl = PetscMin(next_start_cfl, simCtx->max_pseudo_cfl);
557 next_start_cfl = PetscMax(next_start_cfl, simCtx->min_pseudo_cfl);
558
559 // 5. Save for next Physical Time Step
560 if (!rank) {
561 LOG_ALLOW(GLOBAL, LOG_INFO, " CFL Adaptation: Step %d finished at CFL %.3f. Next step will start at CFL %.3f.\n",
562 simCtx->step, global_min_cfl, next_start_cfl);
563 }
564
565 // Update the context so the next call to MomentumSolver reads this new value
566 simCtx->pseudo_cfl = next_start_cfl;
567
568 // --- Final Cleanup ---
569 for (PetscInt bi = 0; bi < block_number; bi++) {
570 ierr = VecDestroy(&user[bi].Rhs); CHKERRQ(ierr);
571 ierr = VecDestroy(&user[bi].dUcont); CHKERRQ(ierr);
572 ierr = VecDestroy(&user[bi].pUcont); CHKERRQ(ierr);
573 ierr = VecDestroy(&pRhs[bi]); CHKERRQ(ierr);
574 }
575 ierr = PetscFree(pRhs); CHKERRQ(ierr);
576
577 ierr = PetscFree2(delta_sol_norm_init, delta_sol_norm_prev);CHKERRQ(ierr);
578 ierr = PetscFree2(delta_sol_norm_curr, delta_sol_rel_curr);CHKERRQ(ierr);
579 ierr = PetscFree2(resid_norm_init, resid_norm_prev);CHKERRQ(ierr);
580 ierr = PetscFree2(resid_norm_curr, pseudo_dt_scaling); CHKERRQ(ierr);
581 ierr = PetscFree(resid_rel_curr); CHKERRQ(ierr);
582
583 LOG_ALLOW(GLOBAL, LOG_INFO, "Momentum Solver converged/completed after %d pseudo-iterations.\n", pseudo_iter);
585 PetscFunctionReturn(0);
586}
@ LOG_TRACE
Very fine-grained tracing information for in-depth debugging.
Definition logging.h:33
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:29
static PetscErrorCode ComputeTotalResidual(UserCtx *user)
Computes the Total Residual for the Dual-Time stepping method.
PetscErrorCode UpdateLocalGhosts(UserCtx *user, const char *fieldName)
Updates the local vector (including ghost points) from its corresponding global vector.
Definition setup.c:1157
PetscReal mom_rtol
Definition variables.h:626
PetscReal pseudo_cfl_reduction_factor
Definition variables.h:633
PetscReal min_pseudo_cfl
Definition variables.h:634
PetscReal mom_atol
Definition variables.h:626
PetscInt StartStep
Definition variables.h:595
PetscReal max_pseudo_cfl
Definition variables.h:634
char log_dir[PETSC_MAX_PATH_LEN]
Definition variables.h:609
PetscReal pseudo_cfl_growth_factor
Definition variables.h:633
PetscReal mom_dt_rk4_residual_norm_noise_allowance_factor
Definition variables.h:635
PetscInt step
Definition variables.h:593
PetscInt mom_max_pseudo_steps
Definition variables.h:625
PetscReal pseudo_cfl
Definition variables.h:632
Here is the call graph for this function:
Here is the caller graph for this function: