PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
Boundaries.h File Reference
#include <petscpf.h>
#include <petscdmswarm.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <petsctime.h>
#include <petscsys.h>
#include <petscdmcomposite.h>
#include <petscsystypes.h>
#include "variables.h"
#include "ParticleSwarm.h"
#include "walkingsearch.h"
#include "grid.h"
#include "logging.h"
#include "io.h"
#include "interpolation.h"
#include "ParticleMotion.h"
#include "BC_Handlers.h"
#include "wallfunction.h"
Include dependency graph for Boundaries.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

PetscErrorCode BoundarySystem_Validate (UserCtx *user)
 (Public) Validates the consistency and compatibility of the parsed boundary condition system.
 
PetscErrorCode BoundaryCondition_Create (BCHandlerType handler_type, BoundaryCondition **new_bc_ptr)
 (Private) Creates and configures a specific BoundaryCondition handler object.
 
PetscErrorCode BoundarySystem_Initialize (UserCtx *user, const char *bcs_filename)
 Initializes the entire boundary system.
 
PetscErrorCode PropagateBoundaryConfigToCoarserLevels (SimCtx *simCtx)
 Propagates boundary condition configuration from finest to all coarser multigrid levels.
 
PetscErrorCode BoundarySystem_ExecuteStep (UserCtx *user)
 Executes one full boundary condition update cycle for a time step.
 
PetscErrorCode BoundarySystem_RefreshUbcs (UserCtx *user)
 (Private) A lightweight execution engine that calls the UpdateUbcs() method on all relevant handlers.
 
PetscErrorCode BoundarySystem_Destroy (UserCtx *user)
 Cleans up and destroys all boundary system resources.
 
PetscErrorCode CanRankServiceInletFace (UserCtx *user, const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, PetscBool *can_service_inlet_out)
 Determines if the current MPI rank owns any part of the globally defined inlet face, making it responsible for placing particles on that portion of the surface.
 
PetscErrorCode CanRankServiceFace (const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, BCFace face_id, PetscBool *can_service_out)
 Determines if the current MPI rank owns any part of a specified global face.
 
PetscErrorCode GetDeterministicFaceGridLocation (UserCtx *user, const DMDALocalInfo *info, PetscInt xs_gnode_rank, PetscInt ys_gnode_rank, PetscInt zs_gnode_rank, PetscInt IM_cells_global, PetscInt JM_cells_global, PetscInt KM_cells_global, PetscInt64 particle_global_id, PetscInt *ci_metric_lnode_out, PetscInt *cj_metric_lnode_out, PetscInt *ck_metric_lnode_out, PetscReal *xi_metric_logic_out, PetscReal *eta_metric_logic_out, PetscReal *zta_metric_logic_out, PetscBool *placement_successful_out)
 Places particles in a deterministic grid/raster pattern on a specified domain face.
 
PetscErrorCode GetRandomCellAndLogicalCoordsOnInletFace (UserCtx *user, const DMDALocalInfo *info, PetscInt xs_gnode_rank, PetscInt ys_gnode_rank, PetscInt zs_gnode_rank, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, PetscRandom *rand_logic_i_ptr, PetscRandom *rand_logic_j_ptr, PetscRandom *rand_logic_k_ptr, PetscInt *ci_metric_lnode_out, PetscInt *cj_metric_lnode_out, PetscInt *ck_metric_lnode_out, PetscReal *xi_metric_logic_out, PetscReal *eta_metric_logic_out, PetscReal *zta_metric_logic_out)
 Assuming the current rank services the inlet face, this function selects a random cell (owned by this rank on that face) and random logical coordinates within that cell, suitable for placing a particle on the inlet surface.
 
PetscErrorCode TranslateModernBCsToLegacy (UserCtx *user)
 
PetscErrorCode EnforceRHSBoundaryConditions (UserCtx *user)
 (Private) A generic routine to copy data for a single, named field across periodic boundaries.
 
PetscErrorCode TransferPeriodicFieldByDirection (UserCtx *user, const char *field_name, char direction)
 (Private Worker) Copies periodic data for a SINGLE field in a SINGLE direction.
 
PetscErrorCode TransferPeriodicField (UserCtx *user, const char *field_name)
 (Private) A generic routine to copy data for a single, named field across periodic boundaries.
 
PetscErrorCode TransferPeriodicFaceField (UserCtx *user, const char *field_name)
 (Primitive) Copies periodic data from the interior to the local ghost cell region for a single field.
 
PetscErrorCode ApplyMetricsPeriodicBCs (UserCtx *user)
 (Orchestrator) Updates all metric-related fields in the local ghost cell regions for periodic boundaries.
 
PetscErrorCode ApplyPeriodicBCs (UserCtx *user)
 Applies periodic boundary conditions by copying data across domain boundaries for all relevant fields.
 
PetscErrorCode ApplyUcontPeriodicBCs (UserCtx *user)
 (Orchestrator) Updates the contravariant velocity field in the local ghost cell regions for periodic boundaries.
 
PetscErrorCode EnforceUcontPeriodicity (UserCtx *user)
 Enforces strict periodicity on the interior contravariant velocity field.
 
PetscErrorCode UpdateDummyCells (UserCtx *user)
 Updates the dummy cells (ghost nodes) on the faces of the local domain for NON-PERIODIC boundaries.
 
PetscErrorCode UpdateCornerNodes (UserCtx *user)
 Updates the corner and edge ghost nodes of the local domain by averaging.
 
PetscErrorCode UpdatePeriodicCornerNodes (UserCtx *user, PetscInt num_fields, const char *field_names[])
 (Orchestrator) Performs a sequential, deterministic periodic update for a list of fields.
 
PetscErrorCode ApplyWallFunction (UserCtx *user)
 Applies wall function modeling to near-wall velocities for all wall-type boundaries.
 
PetscErrorCode RefreshBoundaryGhostCells (UserCtx *user)
 (Public) Orchestrates the "light" refresh of all boundary ghost cells after the projection step.
 
PetscErrorCode ApplyBoundaryConditions (UserCtx *user)
 Main master function to apply all boundary conditions for a time step.
 

Function Documentation

◆ BoundarySystem_Validate()

PetscErrorCode BoundarySystem_Validate ( UserCtx user)

(Public) Validates the consistency and compatibility of the parsed boundary condition system.

This function is the main entry point for all boundary condition validation. It should be called from the main setup sequence AFTER the configuration file has been parsed by ParseAllBoundaryConditions but BEFORE any BoundaryCondition handler objects are created.

It acts as a dispatcher, calling specialized private sub-validators for different complex BC setups (like driven flow) to ensure the combination of mathematical_type and handler_type across all six faces is physically and numerically valid. This provides a "fail-fast" mechanism to prevent users from running improperly configured simulations.

Parameters
userThe UserCtx for a single block, containing the populated boundary_faces configuration.
Returns
PetscErrorCode 0 on success, non-zero PETSc error code on failure.

Definition at line 958 of file Boundaries.c.

959{
960 PetscErrorCode ierr;
961 PetscFunctionBeginUser;
962
963 LOG_ALLOW(GLOBAL, LOG_INFO, "Validating parsed boundary condition configuration...\n");
964
965 // --- Rule Set 1: Driven Flow Handler Consistency ---
966 // This specialized validator will check all rules related to driven flow handlers.
967 ierr = Validate_DrivenFlowConfiguration(user); CHKERRQ(ierr);
968
969 // --- Rule Set 2: (Future Extension) Overset Interface Consistency ---
970 // ierr = Validate_OversetConfiguration(user); CHKERRQ(ierr);
971
972 LOG_ALLOW(GLOBAL, LOG_INFO, "Boundary configuration is valid.\n");
973
974 PetscFunctionReturn(0);
975}
PetscErrorCode Validate_DrivenFlowConfiguration(UserCtx *user)
(Private) Validates all consistency rules for a driven flow (channel/pipe) setup.
Definition BC_Handlers.c:27
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:46
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:200
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:31
Here is the call graph for this function:
Here is the caller graph for this function:

◆ BoundaryCondition_Create()

PetscErrorCode BoundaryCondition_Create ( BCHandlerType  handler_type,
BoundaryCondition **  new_bc_ptr 
)

(Private) Creates and configures a specific BoundaryCondition handler object.

This function acts as a factory. Based on the requested handler_type, it allocates a BoundaryCondition object and populates it with the correct set of function pointers corresponding to that specific behavior.

Parameters
handler_typeThe specific handler to create (e.g., BC_HANDLER_WALL_NOSLIP).
[out]new_bc_ptrA pointer to where the newly created BoundaryCondition object's address will be stored.
Returns
PetscErrorCode 0 on success.

Definition at line 866 of file Boundaries.c.

867{
868 PetscErrorCode ierr;
869 PetscFunctionBeginUser;
870
871 const char* handler_name = BCHandlerTypeToString(handler_type);
872 LOG_ALLOW(LOCAL, LOG_DEBUG, "Factory called for handler type %s. \n", handler_name);
873
874 ierr = PetscMalloc1(1, new_bc_ptr); CHKERRQ(ierr);
875 BoundaryCondition *bc = *new_bc_ptr;
876
877 bc->type = handler_type;
878 bc->priority = -1; // Default priority; can be overridden in specific handlers
879 bc->data = NULL;
880 bc->Initialize = NULL;
881 bc->PreStep = NULL;
882 bc->Apply = NULL;
883 bc->PostStep = NULL;
884 bc->UpdateUbcs = NULL;
885 bc->Destroy = NULL;
886
887 LOG_ALLOW(LOCAL, LOG_DEBUG, "Allocated generic handler object at address %p.\n", (void*)bc);
888
889 switch (handler_type) {
890
892 LOG_ALLOW(LOCAL, LOG_DEBUG, "Dispatching to Create_OutletConservation().\n");
893 ierr = Create_OutletConservation(bc); CHKERRQ(ierr);
894 break;
895
897 LOG_ALLOW(LOCAL, LOG_DEBUG, "Dispatching to Create_WallNoSlip().\n");
898 ierr = Create_WallNoSlip(bc); CHKERRQ(ierr);
899 break;
900
902 LOG_ALLOW(LOCAL, LOG_DEBUG, "Dispatching to Create_InletConstantVelocity().\n");
903 ierr = Create_InletConstantVelocity(bc); CHKERRQ(ierr);
904 break;
905
907 LOG_ALLOW(LOCAL,LOG_DEBUG,"Dispatching to Create_PeriodicGeometric().\n");
908 ierr = Create_PeriodicGeometric(bc);
909 break;
910
912 LOG_ALLOW(LOCAL,LOG_DEBUG,"Dispatching to Create_PeriodicDrivenConstant().\n");
914 break;
915
916 //case BC_HANDLER_PERIODIC_DRIVEN_INITIAL_FLUX:
917 // LOG_ALLOW(LOCAL,LOG_DEBUG,"Dispatching to Create_PeriodicDrivenInitial().\n");
918 // ierr = Create_PeriodicDrivenInitial(bc);
919 // break;
920
922 LOG_ALLOW(LOCAL, LOG_DEBUG, "Dispatching to Create_InletParabolicProfile().\n");
923 ierr = Create_InletParabolicProfile(bc); CHKERRQ(ierr);
924 break;
925 //Add cases for other handlers here in future phases
926
927 default:
928 LOG_ALLOW(GLOBAL, LOG_ERROR, "Handler type (%s) is not recognized or implemented in the factory.\n", handler_name);
929 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "Boundary handler type %d (%s) not recognized in factory.\n", handler_type, handler_name);
930 }
931
932 if(bc->priority < 0) {
933 LOG_ALLOW(GLOBAL, LOG_ERROR, "Handler type %d (%s) did not set a valid priority during creation.\n", handler_type, handler_name);
934 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "Boundary handler type %d (%s) did not set a valid priority during creation.\n", handler_type, handler_name);
935 }
936
937 LOG_ALLOW(LOCAL, LOG_DEBUG, "Successfully created and configured handler for %s.\n", handler_name);
938 PetscFunctionReturn(0);
939}
PetscErrorCode Create_PeriodicDrivenConstant(BoundaryCondition *bc)
(Handler Constructor) Creates and configures a BoundaryCondition object for a driven periodic flow wi...
PetscErrorCode Create_InletConstantVelocity(BoundaryCondition *bc)
Configures a BoundaryCondition object to behave as a constant velocity inlet.
PetscErrorCode Create_PeriodicGeometric(BoundaryCondition *bc)
PetscErrorCode Create_InletParabolicProfile(BoundaryCondition *bc)
(Handler Constructor) Populates a BoundaryCondition object with Parabolic Inlet behavior.
PetscErrorCode Create_WallNoSlip(BoundaryCondition *bc)
Configures a BoundaryCondition object to behave as a no-slip, stationary wall.
PetscErrorCode Create_OutletConservation(BoundaryCondition *bc)
(Handler Constructor) Populates a BoundaryCondition object with Outlet Conservation behavior.
const char * BCHandlerTypeToString(BCHandlerType handler_type)
Converts a BCHandlerType enum to its string representation.
Definition logging.c:748
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:45
@ LOG_ERROR
Critical errors that may halt the program.
Definition logging.h:28
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:32
The "virtual table" struct for a boundary condition handler object.
Definition variables.h:280
PetscErrorCode(* PreStep)(BoundaryCondition *self, BCContext *ctx, PetscReal *local_inflow, PetscReal *local_outflow)
Definition variables.h:285
BCHandlerType type
Definition variables.h:281
PetscErrorCode(* Destroy)(BoundaryCondition *self)
Definition variables.h:289
PetscErrorCode(* PostStep)(BoundaryCondition *self, BCContext *ctx,...)
Definition variables.h:287
PetscErrorCode(* Initialize)(BoundaryCondition *self, BCContext *ctx)
Definition variables.h:284
PetscErrorCode(* UpdateUbcs)(BoundaryCondition *self, BCContext *ctx)
Definition variables.h:288
PetscErrorCode(* Apply)(BoundaryCondition *self, BCContext *ctx)
Definition variables.h:286
BCPriorityType priority
Definition variables.h:282
@ BC_HANDLER_PERIODIC_GEOMETRIC
Definition variables.h:243
@ BC_HANDLER_INLET_PARABOLIC
Definition variables.h:236
@ BC_HANDLER_INLET_CONSTANT_VELOCITY
Definition variables.h:235
@ BC_HANDLER_PERIODIC_DRIVEN_CONSTANT_FLUX
Definition variables.h:245
@ BC_HANDLER_WALL_NOSLIP
Definition variables.h:232
@ BC_HANDLER_OUTLET_CONSERVATION
Definition variables.h:241
Here is the call graph for this function:
Here is the caller graph for this function:

◆ BoundarySystem_Initialize()

PetscErrorCode BoundarySystem_Initialize ( UserCtx user,
const char *  bcs_filename 
)

Initializes the entire boundary system.

Parameters
userThe main UserCtx struct.
bcs_filenameThe path to the boundary conditions configuration file.

Initializes the entire boundary system.

Definition at line 987 of file Boundaries.c.

988{
989 PetscErrorCode ierr;
990 PetscFunctionBeginUser;
991
992 LOG_ALLOW(GLOBAL, LOG_INFO, "Starting creation and initialization of all boundary handlers.\n");
993
994 // =========================================================================
995 // Step 0: Clear any existing boundary handlers (if re-initializing).
996 // This ensures no memory leaks if this function is called multiple times.
997 // =========================================================================
998 for (int i = 0; i < 6; i++) {
999 BoundaryFaceConfig *face_cfg = &user->boundary_faces[i];
1000 if (face_cfg->handler) {
1001 LOG_ALLOW(LOCAL, LOG_DEBUG, "Destroying existing handler on Face %s before re-initialization.\n", BCFaceToString((BCFace)i));
1002 if (face_cfg->handler->Destroy) {
1003 ierr = face_cfg->handler->Destroy(face_cfg->handler); CHKERRQ(ierr);
1004 }
1005 ierr = PetscFree(face_cfg->handler); CHKERRQ(ierr);
1006 face_cfg->handler = NULL;
1007 }
1008 }
1009 // =========================================================================
1010
1011 // Step 0.1: Initiate flux sums to zero
1012 user->simCtx->FluxInSum = 0.0;
1013 user->simCtx->FluxOutSum = 0.0;
1014 user->simCtx->FarFluxInSum = 0.0;
1015 user->simCtx->FarFluxOutSum = 0.0;
1016 // =========================================================================
1017
1018 // Step 1: Parse the configuration file to determine user intent.
1019 // This function, defined in io.c, populates the configuration enums and parameter
1020 // lists within the user->boundary_faces array on all MPI ranks.
1021 ierr = ParseAllBoundaryConditions(user, bcs_filename); CHKERRQ(ierr);
1022 LOG_ALLOW(GLOBAL, LOG_INFO, "Configuration file '%s' parsed successfully.\n", bcs_filename);
1023
1024 // Step 1.1: Validate the parsed configuration to ensure there are no Boundary Condition conflicts
1025 ierr = BoundarySystem_Validate(user);
1026
1027 // Step 2: Create and Initialize the handler object for each of the 6 faces.
1028 for (int i = 0; i < 6; i++) {
1029 BoundaryFaceConfig *face_cfg = &user->boundary_faces[i];
1030
1031 const char *face_name = BCFaceToString(face_cfg->face_id);
1032 const char *type_name = BCTypeToString(face_cfg->mathematical_type);
1033 const char *handler_name = BCHandlerTypeToString(face_cfg->handler_type);
1034
1035 LOG_ALLOW(LOCAL, LOG_DEBUG, "Creating handler for Face %s with Type %s and handler '%s'.\n", face_name, type_name,handler_name);
1036
1037 // Use the private factory to construct the correct handler object based on the parsed type.
1038 // The factory returns a pointer to the new handler object, which we store in the config struct.
1039 ierr = BoundaryCondition_Create(face_cfg->handler_type, &face_cfg->handler); CHKERRQ(ierr);
1040
1041 // Step 3: Call the specific Initialize() method for the newly created handler.
1042 // This allows the handler to perform its own setup, like reading parameters from the
1043 // face_cfg->params list and setting the initial field values on its face.
1044 if (face_cfg->handler && face_cfg->handler->Initialize) {
1045 LOG_ALLOW(LOCAL, LOG_DEBUG, "Calling Initialize() method for handler %s(%s) on Face %s.\n",type_name,handler_name,face_name);
1046
1047 // Prepare the context needed by the Initialize() function.
1048 BCContext ctx = {
1049 .user = user,
1050 .face_id = face_cfg->face_id,
1051 .global_inflow_sum = &user->simCtx->FluxInSum, // Global flux sums are not relevant during initialization.
1052 .global_outflow_sum = &user->simCtx->FluxOutSum,
1053 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1054 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1055 };
1056
1057 ierr = face_cfg->handler->Initialize(face_cfg->handler, &ctx); CHKERRQ(ierr);
1058 } else {
1059 LOG_ALLOW(LOCAL, LOG_DEBUG, "Handler %s(%s) for Face %s has no Initialize() method, skipping.\n", type_name,handler_name,face_name);
1060 }
1061 }
1062 // =========================================================================
1063 // NO SYNCHRONIZATION NEEDED HERE
1064 // =========================================================================
1065 // Initialize() only reads parameters and allocates memory.
1066 // It does NOT modify field values (Ucat, Ucont, Ubcs).
1067 // Field values are set by:
1068 // 1. Initial conditions (before this function)
1069 // 2. Apply() during timestepping (after this function)
1070 // The first call to ApplyBoundaryConditions() will handle synchronization.
1071 // =========================================================================
1072
1073 // ====================================================================================
1074 // --- NEW: Step 4: Synchronize Vectors After Initialization ---
1075 // This is the CRITICAL fix. The Initialize() calls have modified local vector
1076 // arrays on some ranks but not others. We must now update the global vector state
1077 // and then update all local ghost regions to be consistent.
1078 // ====================================================================================
1079
1080 //LOG_ALLOW(GLOBAL, LOG_DEBUG, "Committing global boundary initializations to local vectors.\n");
1081
1082 // Commit changes from the global vectors (Ucat, Ucont) to the local vectors (lUcat, lUcont)
1083 // NOTE: The Apply functions modified Ucat and Ucont via GetArray, which works on the global
1084 // representation.
1085 /*
1086 ierr = DMGlobalToLocalBegin(user->fda, user->Ucat, INSERT_VALUES, user->lUcat); CHKERRQ(ierr);
1087 ierr = DMGlobalToLocalEnd(user->fda, user->Ucat, INSERT_VALUES, user->lUcat); CHKERRQ(ierr);
1088
1089 ierr = DMGlobalToLocalBegin(user->fda, user->Ucont, INSERT_VALUES, user->lUcont); CHKERRQ(ierr);
1090 ierr = DMGlobalToLocalEnd(user->fda, user->Ucont, INSERT_VALUES, user->lUcont); CHKERRQ(ierr);
1091
1092 // Now, update all local vectors (including ghost cells) from the newly consistent global vectors
1093
1094 ierr = DMLocalToGlobalBegin(user->fda, user->lUcat, INSERT_VALUES, user->Ucat); CHKERRQ(ierr);
1095 ierr = DMLocalToGlobalEnd(user->fda, user->lUcat, INSERT_VALUES, user->Ucat); CHKERRQ(ierr);
1096
1097 ierr = DMLocalToGlobalBegin(user->fda, user->lUcont, INSERT_VALUES, user->Ucont); CHKERRQ(ierr);
1098 ierr = DMLocalToGlobalEnd(user->fda, user->lUcont, INSERT_VALUES, user->Ucont); CHKERRQ(ierr);
1099 */
1100
1101 LOG_ALLOW(GLOBAL, LOG_INFO, "All boundary handlers created and initialized successfully.\n");
1102 PetscFunctionReturn(0);
1103}
PetscErrorCode BoundarySystem_Validate(UserCtx *user)
(Public) Validates the consistency and compatibility of the parsed boundary condition system.
Definition Boundaries.c:958
PetscErrorCode BoundaryCondition_Create(BCHandlerType handler_type, BoundaryCondition **new_bc_ptr)
(Private) Creates and configures a specific BoundaryCondition handler object.
Definition Boundaries.c:866
PetscErrorCode ParseAllBoundaryConditions(UserCtx *user, const char *bcs_input_filename)
Parses the boundary conditions file to configure the type, handler, and any associated parameters for...
Definition io.c:439
const char * BCFaceToString(BCFace face)
Helper function to convert BCFace enum to a string representation.
Definition logging.c:643
const char * BCTypeToString(BCType type)
Helper function to convert BCType enum to a string representation.
Definition logging.c:722
PetscReal FarFluxInSum
Definition variables.h:658
PetscReal FarFluxOutSum
Definition variables.h:658
BoundaryFaceConfig boundary_faces[6]
Definition variables.h:746
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:731
PetscReal FluxOutSum
Definition variables.h:658
BCHandlerType handler_type
Definition variables.h:296
UserCtx * user
Definition variables.h:271
PetscReal FluxInSum
Definition variables.h:658
BCType mathematical_type
Definition variables.h:295
BCFace
Identifies the six logical faces of a structured computational block.
Definition variables.h:203
BoundaryCondition * handler
Definition variables.h:298
Provides execution context for a boundary condition handler.
Definition variables.h:270
Holds the complete configuration for one of the six boundary faces.
Definition variables.h:293
Here is the call graph for this function:
Here is the caller graph for this function:

◆ PropagateBoundaryConfigToCoarserLevels()

PetscErrorCode PropagateBoundaryConfigToCoarserLevels ( SimCtx simCtx)

Propagates boundary condition configuration from finest to all coarser multigrid levels.

Coarser levels need BC type information for geometric operations (e.g., periodic corrections) but do NOT need full handler objects since timestepping only occurs at the finest level. This function copies the boundary_faces configuration down the hierarchy.

Parameters
simCtxThe master SimCtx containing the multigrid hierarchy
Returns
PetscErrorCode 0 on success

Definition at line 1118 of file Boundaries.c.

1119{
1120 PetscErrorCode ierr;
1121 UserMG *usermg = &simCtx->usermg;
1122
1123 PetscFunctionBeginUser;
1125
1126 LOG_ALLOW(GLOBAL, LOG_INFO, "Propagating BC configuration from finest to coarser multigrid levels...\n");
1127
1128 // Loop from second-finest down to coarsest
1129 for (PetscInt level = usermg->mglevels - 2; level >= 0; level--) {
1130 for (PetscInt bi = 0; bi < simCtx->block_number; bi++) {
1131 UserCtx *user_coarse = &usermg->mgctx[level].user[bi];
1132 UserCtx *user_fine = &usermg->mgctx[level + 1].user[bi];
1133
1134 LOG_ALLOW_SYNC(LOCAL, LOG_DEBUG, "Rank %d: Copying BC config from level %d to level %d, block %d\n",
1135 simCtx->rank, level + 1, level, bi);
1136
1137 // Copy the 6 boundary face configurations
1138 for (int face_i = 0; face_i < 6; face_i++) {
1139 user_coarse->boundary_faces[face_i].face_id = user_fine->boundary_faces[face_i].face_id;
1140 user_coarse->boundary_faces[face_i].mathematical_type = user_fine->boundary_faces[face_i].mathematical_type;
1141 user_coarse->boundary_faces[face_i].handler_type = user_fine->boundary_faces[face_i].handler_type;
1142
1143 // Copy parameter list (deep copy)
1144 FreeBC_ParamList(user_coarse->boundary_faces[face_i].params); // Clear any existing
1145 user_coarse->boundary_faces[face_i].params = NULL;
1146
1147 BC_Param **dst_next = &user_coarse->boundary_faces[face_i].params;
1148 for (BC_Param *src = user_fine->boundary_faces[face_i].params; src; src = src->next) {
1149 BC_Param *new_param;
1150 ierr = PetscMalloc1(1, &new_param); CHKERRQ(ierr);
1151 ierr = PetscStrallocpy(src->key, &new_param->key); CHKERRQ(ierr);
1152 ierr = PetscStrallocpy(src->value, &new_param->value); CHKERRQ(ierr);
1153 new_param->next = NULL;
1154 *dst_next = new_param;
1155 dst_next = &new_param->next;
1156 }
1157
1158 // IMPORTANT: Do NOT create handler objects for coarser levels
1159 // Handlers are only needed at finest level for timestepping Apply() calls
1160 user_coarse->boundary_faces[face_i].handler = NULL;
1161 }
1162
1163 // Also propagate legacy compatibility fields if needed
1164 user_coarse->inletFaceDefined = user_fine->inletFaceDefined;
1165 user_coarse->identifiedInletBCFace = user_fine->identifiedInletBCFace;
1166 }
1167 }
1168
1169 LOG_ALLOW(GLOBAL, LOG_INFO, "BC configuration propagation complete.\n");
1170
1172 PetscFunctionReturn(0);
1173}
void FreeBC_ParamList(BC_Param *head)
Frees the memory allocated for a linked list of BC_Param structs.
Definition io.c:263
#define LOG_ALLOW_SYNC(scope, level, fmt,...)
----— DEBUG ---------------------------------------— #define LOG_ALLOW(scope, level,...
Definition logging.h:267
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:746
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:737
UserCtx * user
Definition variables.h:474
PetscBool inletFaceDefined
Definition variables.h:747
PetscMPIInt rank
Definition variables.h:588
PetscInt block_number
Definition variables.h:649
BCFace identifiedInletBCFace
Definition variables.h:748
struct BC_Param_s * next
Definition variables.h:266
char * key
Definition variables.h:264
UserMG usermg
Definition variables.h:698
char * value
Definition variables.h:265
BC_Param * params
Definition variables.h:297
PetscInt mglevels
Definition variables.h:481
MGCtx * mgctx
Definition variables.h:484
A node in a linked list for storing key-value parameters from the bcs.dat file.
Definition variables.h:263
User-defined context containing data specific to a single computational grid level.
Definition variables.h:728
User-level context for managing the entire multigrid hierarchy.
Definition variables.h:480
Here is the call graph for this function:
Here is the caller graph for this function:

◆ BoundarySystem_ExecuteStep()

PetscErrorCode BoundarySystem_ExecuteStep ( UserCtx user)

Executes one full boundary condition update cycle for a time step.

Parameters
userThe main UserCtx struct.

Executes one full boundary condition update cycle for a time step.

This function orchestrates the application of boundary conditions across all faces using a priority-based system. Each priority group is executed atomically: handlers at a given priority complete their PreStep, Apply, and PostStep phases, with MPI communication between phases as needed. This ensures proper data flow for boundary conditions that depend on results from other boundaries.

Priority execution order (matches legacy): 0 (BC_PRIORITY_INLET): Inlets - Set inflow, measure flux 1 (BC_PRIORITY_FARFIELD): Farfield - Bidirectional flow, measure flux
2 (BC_PRIORITY_WALL): Walls/Symmetry - Set velocity/gradients 3 (BC_PRIORITY_OUTLET): Outlets - Apply conservation correction

NOTE: This function is called INSIDE the ApplyBoundaryConditions iteration loop. It does NOT handle:

  • Contra2Cart (done by caller)
  • UpdateDummyCells (done by caller)
  • DMGlobalToLocal syncs (done by caller)
Parameters
userThe UserCtx containing boundary configuration and state
Returns
PetscErrorCode 0 on success

Definition at line 1207 of file Boundaries.c.

1208{
1209 PetscErrorCode ierr;
1210 PetscFunctionBeginUser;
1212
1213 LOG_ALLOW(LOCAL, LOG_DEBUG, "Starting.\n");
1214
1215 // =========================================================================
1216 // PRIORITY 0: INLETS
1217 // =========================================================================
1218
1219 PetscReal local_inflow_pre = 0.0;
1220 PetscReal local_inflow_post = 0.0;
1221 PetscReal global_inflow_pre = 0.0;
1222 PetscReal global_inflow_post = 0.0;
1223 PetscInt num_handlers[3] = {0,0,0};
1224
1225 LOG_ALLOW(LOCAL, LOG_TRACE, " (INLETS): Begin.\n");
1226
1227 // Phase 1: PreStep - Preparation (e.g., calculate profiles, read files)
1228 for (int i = 0; i < 6; i++) {
1229 BoundaryCondition *handler = user->boundary_faces[i].handler;
1230 if (!handler || handler->priority != BC_PRIORITY_INLET) continue;
1231 if (!handler->PreStep) continue;
1232
1233 num_handlers[0]++;
1234 BCContext ctx = {
1235 .user = user,
1236 .face_id = (BCFace)i,
1237 .global_inflow_sum = NULL,
1238 .global_outflow_sum = NULL,
1239 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1240 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1241 };
1242
1243 LOG_ALLOW(LOCAL, LOG_TRACE, " PreStep: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1244 ierr = handler->PreStep(handler, &ctx, &local_inflow_pre, NULL); CHKERRQ(ierr);
1245 }
1246
1247 // Optional: Global communication for PreStep (for debugging)
1248 if (local_inflow_pre != 0.0) {
1249 ierr = MPI_Allreduce(&local_inflow_pre, &global_inflow_pre, 1, MPIU_REAL,
1250 MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1251 LOG_ALLOW(GLOBAL, LOG_TRACE, " PreStep predicted flux: %.6e\n", global_inflow_pre);
1252 }
1253
1254 // Phase 2: Apply - Set boundary conditions
1255 for (int i = 0; i < 6; i++) {
1256 BoundaryCondition *handler = user->boundary_faces[i].handler;
1257 if (!handler || handler->priority != BC_PRIORITY_INLET) continue;
1258 if(!handler->Apply) continue; // For example Periodic BCs
1259
1260 num_handlers[1]++;
1261
1262 BCContext ctx = {
1263 .user = user,
1264 .face_id = (BCFace)i,
1265 .global_inflow_sum = NULL,
1266 .global_outflow_sum = NULL,
1267 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1268 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1269 };
1270
1271 LOG_ALLOW(LOCAL, LOG_TRACE, " Apply: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1272 ierr = handler->Apply(handler, &ctx); CHKERRQ(ierr);
1273 }
1274
1275 // Phase 3: PostStep - Measure actual flux
1276 for (int i = 0; i < 6; i++) {
1277 BoundaryCondition *handler = user->boundary_faces[i].handler;
1278 if (!handler || handler->priority != BC_PRIORITY_INLET) continue;
1279 if (!handler->PostStep) continue;
1280
1281 num_handlers[2]++;
1282
1283 BCContext ctx = {
1284 .user = user,
1285 .face_id = (BCFace)i,
1286 .global_inflow_sum = NULL,
1287 .global_outflow_sum = NULL,
1288 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1289 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1290 };
1291
1292 LOG_ALLOW(LOCAL, LOG_TRACE, " PostStep: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1293 ierr = handler->PostStep(handler, &ctx, &local_inflow_post, NULL); CHKERRQ(ierr);
1294 }
1295
1296 // Phase 4: Global communication - Sum flux for other priorities to use
1297 ierr = MPI_Allreduce(&local_inflow_post, &global_inflow_post, 1, MPIU_REAL,
1298 MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1299
1300 // Store for next priority levels
1301 user->simCtx->FluxInSum = global_inflow_post;
1302
1304 " (INLETS): %d Prestep(s), %d Application(s), %d Poststep(s), FluxInSum = %.6e\n",
1305 num_handlers[0],num_handlers[1],num_handlers[2], global_inflow_post);
1306
1307 // =========================================================================
1308 // PRIORITY 1: FARFIELD
1309 // =========================================================================
1310
1311 PetscReal local_farfield_in_pre = 0.0;
1312 PetscReal local_farfield_out_pre = 0.0;
1313 PetscReal local_farfield_in_post = 0.0;
1314 PetscReal local_farfield_out_post = 0.0;
1315 PetscReal global_farfield_in_pre = 0.0;
1316 PetscReal global_farfield_out_pre = 0.0;
1317 PetscReal global_farfield_in_post = 0.0;
1318 PetscReal global_farfield_out_post = 0.0;
1319 memset(num_handlers,0,sizeof(num_handlers));
1320
1321 LOG_ALLOW(LOCAL, LOG_TRACE, " (FARFIELD): Begin.\n");
1322
1323 // Phase 1: PreStep - Analyze flow direction, measure initial flux
1324 for (int i = 0; i < 6; i++) {
1325 BoundaryCondition *handler = user->boundary_faces[i].handler;
1326 if (!handler || handler->priority != BC_PRIORITY_FARFIELD) continue;
1327 if (!handler->PreStep) continue;
1328
1329 num_handlers[0]++;
1330 BCContext ctx = {
1331 .user = user,
1332 .face_id = (BCFace)i,
1333 .global_inflow_sum = &user->simCtx->FluxInSum, // Available from Priority 0
1334 .global_outflow_sum = NULL,
1335 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1336 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1337 };
1338
1339 LOG_ALLOW(LOCAL, LOG_TRACE, " PreStep: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1340 ierr = handler->PreStep(handler, &ctx, &local_farfield_in_pre, &local_farfield_out_pre);
1341 CHKERRQ(ierr);
1342 }
1343
1344 // Phase 2: Global communication (optional, for debugging)
1345 if (local_farfield_in_pre != 0.0 || local_farfield_out_pre != 0.0) {
1346 ierr = MPI_Allreduce(&local_farfield_in_pre, &global_farfield_in_pre, 1, MPIU_REAL,
1347 MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1348 ierr = MPI_Allreduce(&local_farfield_out_pre, &global_farfield_out_pre, 1, MPIU_REAL,
1349 MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1350
1352 " Farfield pre-analysis: In=%.6e, Out=%.6e\n",
1353 global_farfield_in_pre, global_farfield_out_pre);
1354 }
1355
1356 // Phase 3: Apply - Set farfield boundary conditions
1357 for (int i = 0; i < 6; i++) {
1358 BoundaryCondition *handler = user->boundary_faces[i].handler;
1359 if (!handler || handler->priority != BC_PRIORITY_FARFIELD) continue;
1360 if(!handler->Apply) continue; // For example Periodic BCs
1361
1362 num_handlers[1]++;
1363
1364 BCContext ctx = {
1365 .user = user,
1366 .face_id = (BCFace)i,
1367 .global_inflow_sum = &user->simCtx->FluxInSum,
1368 .global_outflow_sum = NULL,
1369 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1370 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1371 };
1372
1373 LOG_ALLOW(LOCAL, LOG_TRACE, " Apply: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1374 ierr = handler->Apply(handler, &ctx); CHKERRQ(ierr);
1375 }
1376
1377 // Phase 4: PostStep - Measure actual farfield fluxes
1378 for (int i = 0; i < 6; i++) {
1379 BoundaryCondition *handler = user->boundary_faces[i].handler;
1380 if (!handler || handler->priority != BC_PRIORITY_FARFIELD) continue;
1381 if (!handler->PostStep) continue;
1382
1383 num_handlers[2]++;
1384
1385 BCContext ctx = {
1386 .user = user,
1387 .face_id = (BCFace)i,
1388 .global_inflow_sum = &user->simCtx->FluxInSum,
1389 .global_outflow_sum = NULL,
1390 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1391 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1392 };
1393
1394 LOG_ALLOW(LOCAL, LOG_TRACE, " PostStep: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1395 ierr = handler->PostStep(handler, &ctx, &local_farfield_in_post, &local_farfield_out_post);
1396 CHKERRQ(ierr);
1397 }
1398
1399 // Phase 5: Global communication - Store for outlet priority
1400 if (num_handlers > 0) {
1401 ierr = MPI_Allreduce(&local_farfield_in_post, &global_farfield_in_post, 1, MPIU_REAL,
1402 MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1403 ierr = MPI_Allreduce(&local_farfield_out_post, &global_farfield_out_post, 1, MPIU_REAL,
1404 MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1405
1406 // Store for outlet handlers to use
1407 user->simCtx->FarFluxInSum = global_farfield_in_post;
1408 user->simCtx->FarFluxOutSum = global_farfield_out_post;
1409
1411 " (FARFIELD): %d Prestep(s), %d Application(s), %d Poststep(s) , InFlux=%.6e, OutFlux=%.6e\n",
1412 num_handlers[0],num_handlers[1],num_handlers[2], global_farfield_in_post, global_farfield_out_post);
1413 } else {
1414 // No farfield handlers - zero out the fluxes
1415 user->simCtx->FarFluxInSum = 0.0;
1416 user->simCtx->FarFluxOutSum = 0.0;
1417 }
1418
1419
1420 // =========================================================================
1421 // PRIORITY 2: WALLS
1422 // =========================================================================
1423
1424 memset(num_handlers,0,sizeof(num_handlers));
1425
1426 LOG_ALLOW(LOCAL, LOG_TRACE, " (WALLS): Begin.\n");
1427
1428 // Phase 1: PreStep - Preparation (usually no-op for walls)
1429 for (int i = 0; i < 6; i++) {
1430 BoundaryCondition *handler = user->boundary_faces[i].handler;
1431 if (!handler || handler->priority != BC_PRIORITY_WALL) continue;
1432 if (!handler->PreStep) continue;
1433
1434 num_handlers[0]++;
1435 BCContext ctx = {
1436 .user = user,
1437 .face_id = (BCFace)i,
1438 .global_inflow_sum = &user->simCtx->FluxInSum,
1439 .global_outflow_sum = NULL,
1440 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1441 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1442 };
1443
1444 LOG_ALLOW(LOCAL, LOG_TRACE, " PreStep: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1445 ierr = handler->PreStep(handler, &ctx, NULL, NULL); CHKERRQ(ierr);
1446 }
1447
1448 // No global communication needed for walls
1449
1450 // Phase 2: Apply - Set boundary conditions
1451 for (int i = 0; i < 6; i++) {
1452 BoundaryCondition *handler = user->boundary_faces[i].handler;
1453 if (!handler || handler->priority != BC_PRIORITY_WALL) continue;
1454 if(!handler->Apply) continue; // For example Periodic BCs
1455
1456 num_handlers[1]++;
1457
1458 BCContext ctx = {
1459 .user = user,
1460 .face_id = (BCFace)i,
1461 .global_inflow_sum = &user->simCtx->FluxInSum,
1462 .global_outflow_sum = NULL,
1463 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1464 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1465 };
1466
1467 LOG_ALLOW(LOCAL, LOG_TRACE, " Apply: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1468 ierr = handler->Apply(handler, &ctx); CHKERRQ(ierr);
1469 }
1470
1471 // Phase 3: PostStep - Post-application processing (usually no-op for walls)
1472 for (int i = 0; i < 6; i++) {
1473 BoundaryCondition *handler = user->boundary_faces[i].handler;
1474 if (!handler || handler->priority != BC_PRIORITY_WALL) continue;
1475 if (!handler->PostStep) continue;
1476
1477 num_handlers[2]++;
1478
1479 BCContext ctx = {
1480 .user = user,
1481 .face_id = (BCFace)i,
1482 .global_inflow_sum = &user->simCtx->FluxInSum,
1483 .global_outflow_sum = NULL,
1484 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1485 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1486 };
1487
1488 LOG_ALLOW(LOCAL, LOG_TRACE, " PostStep: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1489 ierr = handler->PostStep(handler, &ctx, NULL, NULL); CHKERRQ(ierr);
1490 }
1491
1492 // No global communication needed for walls
1493
1494 LOG_ALLOW(GLOBAL, LOG_INFO, " (WALLS): %d Prestep(s), %d Application(s), %d Poststep(s) applied.\n",
1495 num_handlers[0],num_handlers[1],num_handlers[2]);
1496
1497
1498 // =========================================================================
1499 // PRIORITY 3: OUTLETS
1500 // =========================================================================
1501
1502 PetscReal local_outflow_pre = 0.0;
1503 PetscReal local_outflow_post = 0.0;
1504 PetscReal global_outflow_pre = 0.0;
1505 PetscReal global_outflow_post = 0.0;
1506 memset(num_handlers,0,sizeof(num_handlers));
1507
1508 LOG_ALLOW(LOCAL, LOG_TRACE, " (OUTLETS): Begin.\n");
1509
1510 // Phase 1: PreStep - Measure uncorrected outflow (from ucat)
1511 for (int i = 0; i < 6; i++) {
1512 BoundaryCondition *handler = user->boundary_faces[i].handler;
1513 if (!handler || handler->priority != BC_PRIORITY_OUTLET) continue;
1514 if (!handler->PreStep) continue;
1515
1516 num_handlers[0]++;
1517 BCContext ctx = {
1518 .user = user,
1519 .face_id = (BCFace)i,
1520 .global_inflow_sum = &user->simCtx->FluxInSum, // From Priority 0
1521 .global_outflow_sum = NULL,
1522 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1523 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1524 };
1525
1526 LOG_ALLOW(LOCAL, LOG_TRACE, " PreStep: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1527 ierr = handler->PreStep(handler, &ctx, NULL, &local_outflow_pre); CHKERRQ(ierr);
1528 }
1529
1530 // Phase 2: Global communication - Get uncorrected outflow sum
1531 ierr = MPI_Allreduce(&local_outflow_pre, &global_outflow_pre, 1, MPIU_REAL,
1532 MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1533
1534 // Calculate total inflow (inlet + farfield inflow)
1535 PetscReal total_inflow = user->simCtx->FluxInSum + user->simCtx->FarFluxInSum;
1536
1538 " Uncorrected outflow: %.6e, Total inflow: %.6e (Inlet: %.6e + Farfield: %.6e)\n",
1539 global_outflow_pre, total_inflow, user->simCtx->FluxInSum,
1540 user->simCtx->FarFluxInSum);
1541
1542 // Phase 3: Apply - Set corrected boundary conditions
1543 for (int i = 0; i < 6; i++) {
1544 BoundaryCondition *handler = user->boundary_faces[i].handler;
1545 if (!handler || handler->priority != BC_PRIORITY_OUTLET) continue;
1546 if(!handler->Apply) continue; // For example Periodic BCs
1547
1548 num_handlers[1]++;
1549
1550 BCContext ctx = {
1551 .user = user,
1552 .face_id = (BCFace)i,
1553 .global_inflow_sum = &user->simCtx->FluxInSum, // From Priority 0
1554 .global_outflow_sum = &global_outflow_pre, // From PreStep above
1555 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1556 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1557 };
1558
1559 LOG_ALLOW(LOCAL, LOG_TRACE, " Apply: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1560 ierr = handler->Apply(handler, &ctx); CHKERRQ(ierr);
1561 }
1562
1563 // Phase 4: PostStep - Measure corrected outflow (verification)
1564 for (int i = 0; i < 6; i++) {
1565 BoundaryCondition *handler = user->boundary_faces[i].handler;
1566 if (!handler || handler->priority != BC_PRIORITY_OUTLET) continue;
1567 if (!handler->PostStep) continue;
1568
1569 num_handlers[2]++;
1570
1571 BCContext ctx = {
1572 .user = user,
1573 .face_id = (BCFace)i,
1574 .global_inflow_sum = &user->simCtx->FluxInSum,
1575 .global_outflow_sum = &global_outflow_pre,
1576 .global_farfield_inflow_sum = &user->simCtx->FarFluxInSum,
1577 .global_farfield_outflow_sum = &user->simCtx->FarFluxOutSum
1578 };
1579
1580 LOG_ALLOW(LOCAL, LOG_TRACE, " PostStep: Face %d (%s)\n", i, BCFaceToString((BCFace)i));
1581 ierr = handler->PostStep(handler, &ctx, NULL, &local_outflow_post); CHKERRQ(ierr);
1582 }
1583
1584 // Phase 5: Global communication - Verify conservation
1585 ierr = MPI_Allreduce(&local_outflow_post, &global_outflow_post, 1, MPIU_REAL,
1586 MPI_SUM, PETSC_COMM_WORLD); CHKERRQ(ierr);
1587
1588 // Store for legacy compatibility and reporting
1589 user->simCtx->FluxOutSum = global_outflow_post;
1590
1591 // Conservation check (compare total outflow vs total inflow)
1592 PetscReal total_outflow = global_outflow_post + user->simCtx->FarFluxOutSum;
1593 PetscReal flux_error = PetscAbsReal(total_outflow - total_inflow);
1594 PetscReal relative_error = (total_inflow > 1e-16) ?
1595 flux_error / total_inflow : flux_error;
1596
1598 " (OUTLETS): %d Prestep(s), %d Application(s), %d Poststep(s), FluxOutSum = %.6e\n",
1599 num_handlers[0],num_handlers[1],num_handlers[2], global_outflow_post);
1601 " Conservation: Total In=%.6e, Total Out=%.6e, Error=%.3e (%.2e)%%)\n",
1602 total_inflow, total_outflow, flux_error, relative_error * 100.0);
1603
1604 if (relative_error > 1e-6) {
1606 " WARNING: Large mass conservation error (%.2e%%)!\n",
1607 relative_error * 100.0);
1608 }
1609
1610
1611 LOG_ALLOW(LOCAL, LOG_VERBOSE, "Complete.\n");
1612
1614 PetscFunctionReturn(0);
1615}
@ LOG_TRACE
Very fine-grained tracing information for in-depth debugging.
Definition logging.h:33
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:29
@ LOG_VERBOSE
Extremely detailed logs, typically for development use only.
Definition logging.h:34
@ BC_PRIORITY_OUTLET
Definition variables.h:255
@ BC_PRIORITY_FARFIELD
Definition variables.h:253
@ BC_PRIORITY_WALL
Definition variables.h:254
@ BC_PRIORITY_INLET
Definition variables.h:252
Here is the call graph for this function:
Here is the caller graph for this function:

◆ BoundarySystem_RefreshUbcs()

PetscErrorCode BoundarySystem_RefreshUbcs ( UserCtx user)

(Private) A lightweight execution engine that calls the UpdateUbcs() method on all relevant handlers.

This function's sole purpose is to re-evaluate the target boundary values (ubcs) for flow-dependent boundary conditions (e.g., Symmetry, Outlets) after the interior velocity field has changed, such as after the projection step.

It operates based on a "pull" model: it iterates through all boundary handlers and executes their UpdateUbcs method only if the handler has provided one. This makes the system extensible, as new flow-dependent handlers can be added without changing this engine. Handlers for fixed boundary conditions (e.g., a wall with a constant velocity) will have their UpdateUbcs pointer set to NULL and will be skipped automatically.

Note
This function is a critical part of the post-projection refresh. It intentionally does NOT modify ucont and does NOT perform flux balancing.
Parameters
userThe main UserCtx struct.
Returns
PetscErrorCode 0 on success.

Definition at line 1644 of file Boundaries.c.

1645{
1646 PetscErrorCode ierr;
1647 PetscFunctionBeginUser;
1648
1649 LOG_ALLOW(GLOBAL, LOG_TRACE, "Refreshing `ubcs` targets for flow-dependent boundaries...\n");
1650
1651 // Loop through all 6 faces of the domain
1652 for (int i = 0; i < 6; i++) {
1653 BoundaryCondition *handler = user->boundary_faces[i].handler;
1654
1655 // THE FILTER:
1656 // This is the core logic. We only act if a handler exists for the face
1657 // AND that handler has explicitly implemented the `UpdateUbcs` method.
1658 if (handler && handler->UpdateUbcs) {
1659
1660 const char *face_name = BCFaceToString((BCFace)i);
1661 LOG_ALLOW(LOCAL, LOG_TRACE, " Calling UpdateUbcs() for handler on Face %s.\n", face_name);
1662
1663 // Prepare the context. For this refresh step, we don't need to pass flux sums.
1664 BCContext ctx = {
1665 .user = user,
1666 .face_id = (BCFace)i,
1667 .global_inflow_sum = NULL,
1668 .global_outflow_sum = NULL,
1669 .global_farfield_inflow_sum = NULL,
1670 .global_farfield_outflow_sum = NULL
1671 };
1672
1673 // Call the handler's specific UpdateUbcs function pointer.
1674 ierr = handler->UpdateUbcs(handler, &ctx); CHKERRQ(ierr);
1675 }
1676 }
1677
1678 PetscFunctionReturn(0);
1679}
Here is the call graph for this function:
Here is the caller graph for this function:

◆ BoundarySystem_Destroy()

PetscErrorCode BoundarySystem_Destroy ( UserCtx user)

Cleans up and destroys all boundary system resources.

Parameters
userThe main UserCtx struct.

Cleans up and destroys all boundary system resources.

This function should be called once at the end of the simulation. It iterates through all created handlers and calls their respective Destroy methods to free any privately allocated data (like parameter lists or handler-specific data), and then frees the handler object itself. This prevents memory leaks.

Parameters
userThe main UserCtx struct containing the boundary system to be destroyed.
Returns
PetscErrorCode 0 on success.

Definition at line 1699 of file Boundaries.c.

1700{
1701 PetscErrorCode ierr;
1702 PetscFunctionBeginUser;
1703
1704
1705
1706 LOG_ALLOW(GLOBAL, LOG_INFO, "Starting destruction of all boundary handlers. \n");
1707
1708 for (int i = 0; i < 6; i++) {
1709 BoundaryFaceConfig *face_cfg = &user->boundary_faces[i];
1710 const char *face_name = BCFaceToString(face_cfg->face_id);
1711
1712 // --- Step 1: Free the parameter linked list associated with this face ---
1713 if (face_cfg->params) {
1714 LOG_ALLOW(LOCAL, LOG_DEBUG, " Freeing parameter list for Face %d (%s). \n", i, face_name);
1715 FreeBC_ParamList(face_cfg->params);
1716 face_cfg->params = NULL; // Good practice to nullify dangling pointers
1717 }
1718
1719 // --- Step 2: Destroy the handler object itself ---
1720 if (face_cfg->handler) {
1721 const char *handler_name = BCHandlerTypeToString(face_cfg->handler->type);
1722 LOG_ALLOW(LOCAL, LOG_DEBUG, " Destroying handler '%s' on Face %d (%s).\n", handler_name, i, face_name);
1723
1724 // Call the handler's specific cleanup function first, if it exists.
1725 // This will free any memory stored in the handler's private `data` pointer.
1726 if (face_cfg->handler->Destroy) {
1727 ierr = face_cfg->handler->Destroy(face_cfg->handler); CHKERRQ(ierr);
1728 }
1729
1730 // Finally, free the generic BoundaryCondition object itself.
1731 ierr = PetscFree(face_cfg->handler); CHKERRQ(ierr);
1732 face_cfg->handler = NULL;
1733 }
1734 }
1735
1736 LOG_ALLOW(GLOBAL, LOG_INFO, "Destruction complete.\n");
1737 PetscFunctionReturn(0);
1738}
Here is the call graph for this function:
Here is the caller graph for this function:

◆ CanRankServiceInletFace()

PetscErrorCode CanRankServiceInletFace ( UserCtx user,
const DMDALocalInfo *  info,
PetscInt  IM_nodes_global,
PetscInt  JM_nodes_global,
PetscInt  KM_nodes_global,
PetscBool *  can_service_inlet_out 
)

Determines if the current MPI rank owns any part of the globally defined inlet face, making it responsible for placing particles on that portion of the surface.

The determination is based on the rank's owned nodes (from DMDALocalInfo) and the global node counts, in conjunction with the user->identifiedInletBCFace. A rank can service an inlet face if it owns the cells adjacent to that global boundary and has a non-zero extent (owns cells) in the tangential dimensions of that face.

Parameters
userPointer to the UserCtx structure, containing identifiedInletBCFace.
infoPointer to the DMDALocalInfo for the current rank's DA (node-based).
IM_nodes_globalGlobal number of nodes in the I-direction (e.g., user->IM + 1 if user->IM is cell count).
JM_nodes_globalGlobal number of nodes in the J-direction.
KM_nodes_globalGlobal number of nodes in the K-direction.
[out]can_service_inlet_outPointer to a PetscBool; set to PETSC_TRUE if the rank services (part of) the inlet, PETSC_FALSE otherwise.
Returns
PetscErrorCode 0 on success, non-zero on failure.

Definition at line 25 of file Boundaries.c.

28{
29 PetscErrorCode ierr;
30 PetscMPIInt rank_for_logging; // For detailed debugging logs
31 PetscFunctionBeginUser;
33
34 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank_for_logging); CHKERRQ(ierr);
35
36 *can_service_inlet_out = PETSC_FALSE; // Default to no service
37
38 if (!user->inletFaceDefined) {
39 LOG_ALLOW(LOCAL, LOG_DEBUG, "[Rank %d]: Inlet face not defined in user context. Cannot service.\n", rank_for_logging);
40 PetscFunctionReturn(0);
41 }
42
43 // Get the range of cells owned by this rank in each dimension
44 PetscInt owned_start_cell_i, num_owned_cells_on_rank_i;
45 PetscInt owned_start_cell_j, num_owned_cells_on_rank_j;
46 PetscInt owned_start_cell_k, num_owned_cells_on_rank_k;
47
48 ierr = GetOwnedCellRange(info, 0, &owned_start_cell_i, &num_owned_cells_on_rank_i); CHKERRQ(ierr);
49 ierr = GetOwnedCellRange(info, 1, &owned_start_cell_j, &num_owned_cells_on_rank_j); CHKERRQ(ierr);
50 ierr = GetOwnedCellRange(info, 2, &owned_start_cell_k, &num_owned_cells_on_rank_k); CHKERRQ(ierr);
51
52 // Determine the global index of the last cell (0-indexed) in each direction.
53 // Example: If IM_nodes_global = 11 (nodes 0-10), there are 10 cells (0-9). Last cell index is 9.
54 // Formula: global_nodes - 1 (num cells) - 1 (0-indexed) = global_nodes - 2.
55 PetscInt last_global_cell_idx_i = (IM_nodes_global > 1) ? (IM_nodes_global - 2) : -1; // -1 if 0 or 1 node (i.e., 0 cells)
56 PetscInt last_global_cell_idx_j = (JM_nodes_global > 1) ? (JM_nodes_global - 2) : -1;
57 PetscInt last_global_cell_idx_k = (KM_nodes_global > 1) ? (KM_nodes_global - 2) : -1;
58
59 switch (user->identifiedInletBCFace) {
60 case BC_FACE_NEG_X: // Inlet on the global I-minimum face (face of cell C_i=0)
61 // Rank services if its first owned node is global node 0 (info->xs == 0),
62 // and it owns cells in I, J, and K directions.
63 if (info->xs == 0 && num_owned_cells_on_rank_i > 0 &&
64 num_owned_cells_on_rank_j > 0 && num_owned_cells_on_rank_k > 0) {
65 *can_service_inlet_out = PETSC_TRUE;
66 }
67 break;
68 case BC_FACE_POS_X: // Inlet on the global I-maximum face (face of cell C_i=last_global_cell_idx_i)
69 // Rank services if it owns the last cell in I-direction,
70 // and has extent in J and K.
71 if (last_global_cell_idx_i >= 0 && /* Check for valid global domain */
72 (owned_start_cell_i + num_owned_cells_on_rank_i - 1) == last_global_cell_idx_i && /* Rank's last cell is the global last cell */
73 num_owned_cells_on_rank_j > 0 && num_owned_cells_on_rank_k > 0) {
74 *can_service_inlet_out = PETSC_TRUE;
75 }
76 break;
77 case BC_FACE_NEG_Y:
78 if (info->ys == 0 && num_owned_cells_on_rank_j > 0 &&
79 num_owned_cells_on_rank_i > 0 && num_owned_cells_on_rank_k > 0) {
80 *can_service_inlet_out = PETSC_TRUE;
81 }
82 break;
83 case BC_FACE_POS_Y:
84 if (last_global_cell_idx_j >= 0 &&
85 (owned_start_cell_j + num_owned_cells_on_rank_j - 1) == last_global_cell_idx_j &&
86 num_owned_cells_on_rank_i > 0 && num_owned_cells_on_rank_k > 0) {
87 *can_service_inlet_out = PETSC_TRUE;
88 }
89 break;
90 case BC_FACE_NEG_Z:
91 if (info->zs == 0 && num_owned_cells_on_rank_k > 0 &&
92 num_owned_cells_on_rank_i > 0 && num_owned_cells_on_rank_j > 0) {
93 *can_service_inlet_out = PETSC_TRUE;
94 }
95 break;
96 case BC_FACE_POS_Z:
97 if (last_global_cell_idx_k >= 0 &&
98 (owned_start_cell_k + num_owned_cells_on_rank_k - 1) == last_global_cell_idx_k &&
99 num_owned_cells_on_rank_i > 0 && num_owned_cells_on_rank_j > 0) {
100 *can_service_inlet_out = PETSC_TRUE;
101 }
102 break;
103 default:
104 LOG_ALLOW(LOCAL, LOG_WARNING, "[Rank %d]: Unknown inlet face %s.\n", rank_for_logging, BCFaceToString((BCFace)user->identifiedInletBCFace));
105 break;
106 }
107
109 "[Rank %d] Check Service for Inlet %s:\n"
110 " - Local Domain: starts at cell (%d,%d,%d), has (%d,%d,%d) cells.\n"
111 " - Global Domain: has (%d,%d,%d) nodes, so last cell is (%d,%d,%d).\n",
112 rank_for_logging,
114 owned_start_cell_i, owned_start_cell_j, owned_start_cell_k,
115 num_owned_cells_on_rank_i, num_owned_cells_on_rank_j, num_owned_cells_on_rank_k,
116 IM_nodes_global, JM_nodes_global, KM_nodes_global,
117 last_global_cell_idx_i, last_global_cell_idx_j, last_global_cell_idx_k);
118
119 LOG_ALLOW(LOCAL, LOG_INFO,"[Rank %d] Inlet Face %s Service Check Result: %s | Owned Cells (I,J,K): (%d,%d,%d) | Starts at Cell (%d,%d,%d)\n",
120 rank_for_logging,
122 (*can_service_inlet_out) ? "CAN SERVICE" : "CANNOT SERVICE",
123 num_owned_cells_on_rank_i, num_owned_cells_on_rank_j, num_owned_cells_on_rank_k,
124 owned_start_cell_i, owned_start_cell_j, owned_start_cell_k);
125
127
128 PetscFunctionReturn(0);
129}
PetscErrorCode GetOwnedCellRange(const DMDALocalInfo *info_nodes, PetscInt dim, PetscInt *xs_cell_global, PetscInt *xm_cell_local)
Determines the global starting index and number of CELLS owned by the current processor in a specifie...
Definition setup.c:1756
@ BC_FACE_NEG_X
Definition variables.h:204
@ BC_FACE_POS_Z
Definition variables.h:206
@ BC_FACE_POS_Y
Definition variables.h:205
@ BC_FACE_NEG_Z
Definition variables.h:206
@ BC_FACE_POS_X
Definition variables.h:204
@ BC_FACE_NEG_Y
Definition variables.h:205
Here is the call graph for this function:
Here is the caller graph for this function:

◆ CanRankServiceFace()

PetscErrorCode CanRankServiceFace ( const DMDALocalInfo *  info,
PetscInt  IM_nodes_global,
PetscInt  JM_nodes_global,
PetscInt  KM_nodes_global,
BCFace  face_id,
PetscBool *  can_service_out 
)

Determines if the current MPI rank owns any part of a specified global face.

This function is a general utility for parallel boundary operations. It checks if the local domain of the current MPI rank is adjacent to a specified global boundary face. A rank "services" a face if it owns the cells adjacent to that global boundary and has a non-zero extent (i.e., owns at least one cell) in the tangential dimensions of that face.

Parameters
infoPointer to the DMDALocalInfo for the current rank's DA.
IM_nodes_globalGlobal number of nodes in the I-direction (e.g., user->IM + 1 if user->IM is cell count).
JM_nodes_globalGlobal number of nodes in the J-direction.
KM_nodes_globalGlobal number of nodes in the K-direction.
face_idThe specific global face (e.g., BC_FACE_NEG_Z) to check.
[out]can_service_outPointer to a PetscBool; set to PETSC_TRUE if the rank services the face, PETSC_FALSE otherwise.
Returns
PetscErrorCode 0 on success.

Definition at line 151 of file Boundaries.c.

153{
154 PetscErrorCode ierr;
155 PetscMPIInt rank_for_logging;
156 PetscFunctionBeginUser;
157
159
160 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank_for_logging); CHKERRQ(ierr);
161
162 *can_service_out = PETSC_FALSE; // Default to no service
163
164 // Get the range of cells owned by this rank
165 PetscInt owned_start_cell_i, num_owned_cells_on_rank_i;
166 PetscInt owned_start_cell_j, num_owned_cells_on_rank_j;
167 PetscInt owned_start_cell_k, num_owned_cells_on_rank_k;
168 ierr = GetOwnedCellRange(info, 0, &owned_start_cell_i, &num_owned_cells_on_rank_i); CHKERRQ(ierr);
169 ierr = GetOwnedCellRange(info, 1, &owned_start_cell_j, &num_owned_cells_on_rank_j); CHKERRQ(ierr);
170 ierr = GetOwnedCellRange(info, 2, &owned_start_cell_k, &num_owned_cells_on_rank_k); CHKERRQ(ierr);
171
172 // Determine the global index of the last cell (0-indexed) in each direction.
173 PetscInt last_global_cell_idx_i = (IM_nodes_global > 1) ? (IM_nodes_global - 2) : -1;
174 PetscInt last_global_cell_idx_j = (JM_nodes_global > 1) ? (JM_nodes_global - 2) : -1;
175 PetscInt last_global_cell_idx_k = (KM_nodes_global > 1) ? (KM_nodes_global - 2) : -1;
176
177 switch (face_id) {
178 case BC_FACE_NEG_X:
179 if (info->xs == 0 && num_owned_cells_on_rank_i > 0 &&
180 num_owned_cells_on_rank_j > 0 && num_owned_cells_on_rank_k > 0) {
181 *can_service_out = PETSC_TRUE;
182 }
183 break;
184 case BC_FACE_POS_X:
185 if (last_global_cell_idx_i >= 0 &&
186 (owned_start_cell_i + num_owned_cells_on_rank_i - 1) == last_global_cell_idx_i &&
187 num_owned_cells_on_rank_j > 0 && num_owned_cells_on_rank_k > 0) {
188 *can_service_out = PETSC_TRUE;
189 }
190 break;
191 case BC_FACE_NEG_Y:
192 if (info->ys == 0 && num_owned_cells_on_rank_j > 0 &&
193 num_owned_cells_on_rank_i > 0 && num_owned_cells_on_rank_k > 0) {
194 *can_service_out = PETSC_TRUE;
195 }
196 break;
197 case BC_FACE_POS_Y:
198 if (last_global_cell_idx_j >= 0 &&
199 (owned_start_cell_j + num_owned_cells_on_rank_j - 1) == last_global_cell_idx_j &&
200 num_owned_cells_on_rank_i > 0 && num_owned_cells_on_rank_k > 0) {
201 *can_service_out = PETSC_TRUE;
202 }
203 break;
204 case BC_FACE_NEG_Z:
205 if (info->zs == 0 && num_owned_cells_on_rank_k > 0 &&
206 num_owned_cells_on_rank_i > 0 && num_owned_cells_on_rank_j > 0) {
207 *can_service_out = PETSC_TRUE;
208 }
209 break;
210 case BC_FACE_POS_Z:
211 if (last_global_cell_idx_k >= 0 &&
212 (owned_start_cell_k + num_owned_cells_on_rank_k - 1) == last_global_cell_idx_k &&
213 num_owned_cells_on_rank_i > 0 && num_owned_cells_on_rank_j > 0) {
214 *can_service_out = PETSC_TRUE;
215 }
216 break;
217 default:
218 LOG_ALLOW(LOCAL, LOG_WARNING, "Rank %d: Unknown face enum %d. \n", rank_for_logging, face_id);
219 break;
220 }
221
222 LOG_ALLOW(LOCAL, LOG_DEBUG, "Rank %d check for face %s: Result=%s. \n",
223 rank_for_logging, BCFaceToString((BCFace)face_id), (*can_service_out ? "TRUE" : "FALSE"));
224
226
227 PetscFunctionReturn(0);
228}
Here is the call graph for this function:
Here is the caller graph for this function:

◆ GetDeterministicFaceGridLocation()

PetscErrorCode GetDeterministicFaceGridLocation ( UserCtx user,
const DMDALocalInfo *  info,
PetscInt  xs_gnode_rank,
PetscInt  ys_gnode_rank,
PetscInt  zs_gnode_rank,
PetscInt  IM_cells_global,
PetscInt  JM_cells_global,
PetscInt  KM_cells_global,
PetscInt64  particle_global_id,
PetscInt *  ci_metric_lnode_out,
PetscInt *  cj_metric_lnode_out,
PetscInt *  ck_metric_lnode_out,
PetscReal *  xi_metric_logic_out,
PetscReal *  eta_metric_logic_out,
PetscReal *  zta_metric_logic_out,
PetscBool *  placement_successful_out 
)

Places particles in a deterministic grid/raster pattern on a specified domain face.

This function creates a set of equidistant, parallel lines of particles near the four edges of the face specified by user->identifiedInletBCFace. The number of lines drawn from each edge is hardcoded within this function (default is 2).

For example, if grid_layers=2 on face BC_FACE_NEG_X, the function will create particle lines at:

  • y ~ 0*dy, y ~ 1*dy (parallel to the Z-axis, starting from the J=0 edge)
  • y ~ y_max, y ~ y_max-dy (parallel to the Z-axis, starting from the J=max edge)
  • z ~ 0*dz, z ~ 1*dz (parallel to the Y-axis, starting from the K=0 edge)
  • z ~ z_max, z ~ z_max-dz (parallel to the Y-axis, starting from the K=max edge)

The particle's final position is set just inside the target cell face to ensure it is correctly located. The total number of particles (simCtx->np) is distributed as evenly as possible among all generated lines.

The function includes extensive validation to stop with an error if the requested grid placement is geometrically impossible (e.g., in a 2D domain or if layers would overlap). It also issues warnings for non-fatal but potentially unintended configurations.

Parameters
userPointer to UserCtx, which must contain a valid identifiedInletBCFace.
infoPointer to DMDALocalInfo for the current rank's grid layout.
xs_gnode_rank,ys_gnode_rank,zs_gnode_rankLocal starting node indices (incl. ghosts) for the rank's DA.
IM_cells_global,JM_cells_global,KM_cells_globalGlobal cell counts.
particle_global_idThe unique global ID of the particle being placed (from 0 to np-1).
[out]ci_metric_lnode_outLocal I-node index of the selected cell's origin.
[out]cj_metric_lnode_outLocal J-node index of the selected cell's origin.
[out]ck_metric_lnode_outLocal K-node index of the selected cell's origin.
[out]xi_metric_logic_outLogical xi-coordinate [0,1] within the cell.
[out]eta_metric_logic_outLogical eta-coordinate [0,1] within the cell.
[out]zta_metric_logic_outLogical zta-coordinate [0,1] within the cell.
[out]placement_successful_outPETSC_TRUE if the point belongs to this rank, PETSC_FALSE otherwise.
Returns
PetscErrorCode

Definition at line 268 of file Boundaries.c.

276{
277 SimCtx *simCtx = user->simCtx;
278 PetscReal global_logic_i, global_logic_j, global_logic_k;
279 PetscErrorCode ierr;
280 PetscMPIInt rank_for_logging;
281
282 PetscFunctionBeginUser;
283 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank_for_logging); CHKERRQ(ierr);
284
285 *placement_successful_out = PETSC_FALSE; // Default to failure
286
287 // --- Step 1: Configuration and Input Validation ---
288
289 // *** Hardcoded number of grid layers. Change this value to alter the pattern. ***
290 const PetscInt grid_layers = 2;
291
293 "[Rank %d] Placing particle %lld on face %s with grid_layers=%d in global domain (%d,%d,%d) cells.\n",
294 rank_for_logging, (long long)particle_global_id, BCFaceToString(user->identifiedInletBCFace), grid_layers,
295 IM_cells_global, JM_cells_global, KM_cells_global);
296
297 const char *face_name = BCFaceToString(user->identifiedInletBCFace);
298
299 // Fatal Error Checks: Ensure the requested grid is geometrically possible.
300 // The total layers from opposite faces (2 * grid_layers) must be less than the domain size.
301 switch (user->identifiedInletBCFace) {
302 case BC_FACE_NEG_X: case BC_FACE_POS_X:
303 if (JM_cells_global <= 1 || KM_cells_global <= 1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot place grid on face %s for a 2D/1D domain (J-cells=%d, K-cells=%d).", face_name, JM_cells_global, KM_cells_global);
304 if (2 * grid_layers >= JM_cells_global || 2 * grid_layers >= KM_cells_global) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Grid layers (%d) from opposing J/K faces would overlap in this domain (J-cells=%d, K-cells=%d).", grid_layers, JM_cells_global, KM_cells_global);
305 break;
306 case BC_FACE_NEG_Y: case BC_FACE_POS_Y:
307 if (IM_cells_global <= 1 || KM_cells_global <= 1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot place grid on face %s for a 2D/1D domain (I-cells=%d, K-cells=%d).", face_name, IM_cells_global, KM_cells_global);
308 if (2 * grid_layers >= IM_cells_global || 2 * grid_layers >= KM_cells_global) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Grid layers (%d) from opposing I/K faces would overlap in this domain (I-cells=%d, K-cells=%d).", grid_layers, IM_cells_global, KM_cells_global);
309 break;
310 case BC_FACE_NEG_Z: case BC_FACE_POS_Z:
311 if (IM_cells_global <= 1 || JM_cells_global <= 1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot place grid on face %s for a 2D/1D domain (I-cells=%d, J-cells=%d).", face_name, IM_cells_global, JM_cells_global);
312 if (2 * grid_layers >= IM_cells_global || 2 * grid_layers >= JM_cells_global) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Grid layers (%d) from opposing I/J faces would overlap in this domain (I-cells=%d, J-cells=%d).", grid_layers, IM_cells_global, JM_cells_global);
313 break;
314 default: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Invalid identifiedInletBCFace specified: %d", user->identifiedInletBCFace);
315 }
316
317 const PetscInt num_lines_total = 4 * grid_layers;
318 if (simCtx->np < num_lines_total) {
319 LOG_ALLOW(GLOBAL, LOG_WARNING, "Warning: Total particle count (%lld) is less than the number of grid lines requested (%d). Some lines may be empty.\n", (long long)simCtx->np, num_lines_total);
320 }
321 if (simCtx->np > 0 && simCtx->np % num_lines_total != 0) {
322 LOG_ALLOW(GLOBAL, LOG_WARNING, "Warning: Total particle count (%lld) is not evenly divisible by the number of grid lines (%d). Distribution will be uneven.\n", (long long)simCtx->np, num_lines_total);
323 }
324
325 // --- Step 2: Map global particle ID to a line and a point on that line ---
326 if (simCtx->np == 0) PetscFunctionReturn(0); // Nothing to do
327
328 LOG_ALLOW(LOCAL, LOG_TRACE, "[Rank %d] Distributing %lld particles over %d lines on face %s.\n",
329 rank_for_logging, (long long)simCtx->np, num_lines_total, face_name);
330
331 const PetscInt points_per_line = PetscMax(1, simCtx->np / num_lines_total);
332 PetscInt line_index = particle_global_id / points_per_line;
333 PetscInt point_index_on_line = particle_global_id % points_per_line;
334 line_index = PetscMin(line_index, num_lines_total - 1); // Clamp to handle uneven division
335
336 // Decode the line_index into an edge group (0-3) and a layer within that group (0 to grid_layers-1)
337 const PetscInt edge_group = line_index / grid_layers;
338 const PetscInt layer_index = line_index % grid_layers;
339
340 // --- Step 3: Calculate placement coordinates based on the decoded indices ---
341 const PetscReal layer_spacing_norm_i = (IM_cells_global > 0) ? 1.0 / (PetscReal)IM_cells_global : 0.0;
342 const PetscReal layer_spacing_norm_j = (JM_cells_global > 0) ? 1.0 / (PetscReal)JM_cells_global : 0.0;
343 const PetscReal layer_spacing_norm_k = (KM_cells_global > 0) ? 1.0 / (PetscReal)KM_cells_global : 0.0;
344
345 // Grid-aware epsilon: scale with minimum cell size to keep particles away from rank boundaries
346 const PetscReal min_layer_spacing = PetscMin(layer_spacing_norm_i, PetscMin(layer_spacing_norm_j, layer_spacing_norm_k));
347 const PetscReal epsilon = 0.5 * min_layer_spacing; // Keep particles 10% of cell width from boundaries
348
349 PetscReal variable_coord; // The coordinate that varies along a line
350 if (points_per_line <= 1) {
351 variable_coord = 0.5; // Place single point in the middle
352 } else {
353 variable_coord = ((PetscReal)point_index_on_line + 0.5)/ (PetscReal)(points_per_line);
354 }
355 variable_coord = PetscMin(1.0 - epsilon, PetscMax(epsilon, variable_coord)); // Clamp within [eps, 1-eps]
356
357 // Main logic switch to determine the three global logical coordinates
358 switch (user->identifiedInletBCFace) {
359 case BC_FACE_NEG_X:
360 global_logic_i = 0.5 * layer_spacing_norm_i; // Place near the face, in the middle of the first cell
361 if (edge_group == 0) { global_logic_j = (PetscReal)layer_index * layer_spacing_norm_j + epsilon; global_logic_k = variable_coord; }
362 else if (edge_group == 1) { global_logic_j = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_j) - epsilon; global_logic_k = variable_coord; }
363 else if (edge_group == 2) { global_logic_k = (PetscReal)layer_index * layer_spacing_norm_k + epsilon; global_logic_j = variable_coord; }
364 else /* edge_group == 3 */ { global_logic_k = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_k) - epsilon; global_logic_j = variable_coord; }
365 break;
366 case BC_FACE_POS_X:
367 global_logic_i = 1.0 - (0.5 * layer_spacing_norm_i); // Place near the face, in the middle of the last cell
368 if (edge_group == 0) { global_logic_j = (PetscReal)layer_index * layer_spacing_norm_j + epsilon; global_logic_k = variable_coord; }
369 else if (edge_group == 1) { global_logic_j = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_j) - epsilon; global_logic_k = variable_coord; }
370 else if (edge_group == 2) { global_logic_k = (PetscReal)layer_index * layer_spacing_norm_k + epsilon; global_logic_j = variable_coord; }
371 else /* edge_group == 3 */ { global_logic_k = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_k) - epsilon; global_logic_j = variable_coord; }
372 break;
373 case BC_FACE_NEG_Y:
374 global_logic_j = 0.5 * layer_spacing_norm_j;
375 if (edge_group == 0) { global_logic_i = (PetscReal)layer_index * layer_spacing_norm_i + epsilon; global_logic_k = variable_coord; }
376 else if (edge_group == 1) { global_logic_i = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_i) - epsilon; global_logic_k = variable_coord; }
377 else if (edge_group == 2) { global_logic_k = (PetscReal)layer_index * layer_spacing_norm_k + epsilon; global_logic_i = variable_coord; }
378 else /* edge_group == 3 */ { global_logic_k = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_k) - epsilon; global_logic_i = variable_coord; }
379 break;
380 case BC_FACE_POS_Y:
381 global_logic_j = 1.0 - (0.5 * layer_spacing_norm_j);
382 if (edge_group == 0) { global_logic_i = (PetscReal)layer_index * layer_spacing_norm_i + epsilon; global_logic_k = variable_coord; }
383 else if (edge_group == 1) { global_logic_i = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_i) - epsilon; global_logic_k = variable_coord; }
384 else if (edge_group == 2) { global_logic_k = (PetscReal)layer_index * layer_spacing_norm_k + epsilon; global_logic_i = variable_coord; }
385 else /* edge_group == 3 */ { global_logic_k = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_k) - epsilon; global_logic_i = variable_coord; }
386 break;
387 case BC_FACE_NEG_Z:
388 global_logic_k = 0.5 * layer_spacing_norm_k;
389 if (edge_group == 0) { global_logic_i = (PetscReal)layer_index * layer_spacing_norm_i + epsilon; global_logic_j = variable_coord; }
390 else if (edge_group == 1) { global_logic_i = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_i) - epsilon; global_logic_j = variable_coord; }
391 else if (edge_group == 2) { global_logic_j = (PetscReal)layer_index * layer_spacing_norm_j + epsilon; global_logic_i = variable_coord; }
392 else /* edge_group == 3 */ { global_logic_j = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_j) - epsilon; global_logic_i = variable_coord; }
393 break;
394 case BC_FACE_POS_Z:
395 global_logic_k = 1.0 - (0.5 * layer_spacing_norm_k);
396 if (edge_group == 0) { global_logic_i = (PetscReal)layer_index * layer_spacing_norm_i + epsilon; global_logic_j = variable_coord; }
397 else if (edge_group == 1) { global_logic_i = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_i) - epsilon; global_logic_j = variable_coord; }
398 else if (edge_group == 2) { global_logic_j = (PetscReal)layer_index * layer_spacing_norm_j + epsilon; global_logic_i = variable_coord; }
399 else /* edge_group == 3 */ { global_logic_j = 1.0 - ((PetscReal)layer_index * layer_spacing_norm_j) - epsilon; global_logic_i = variable_coord; }
400 break;
401 }
402
404 "[Rank %d] Particle %lld assigned to line %d (edge group %d, layer %d) with variable_coord=%.4f.\n"
405 " -> Global logical coords: (i,j,k) = (%.6f, %.6f, %.6f)\n",
406 rank_for_logging, (long long)particle_global_id, line_index, edge_group, layer_index, variable_coord,
407 global_logic_i, global_logic_j, global_logic_k);
408
409 // --- Step 4: Convert global logical coordinate to global cell index and intra-cell logicals ---
410 PetscReal global_cell_coord_i = global_logic_i * IM_cells_global;
411 PetscInt I_g = (PetscInt)global_cell_coord_i;
412 *xi_metric_logic_out = global_cell_coord_i - I_g;
413
414 PetscReal global_cell_coord_j = global_logic_j * JM_cells_global;
415 PetscInt J_g = (PetscInt)global_cell_coord_j;
416 *eta_metric_logic_out = global_cell_coord_j - J_g;
417
418 PetscReal global_cell_coord_k = global_logic_k * KM_cells_global;
419 PetscInt K_g = (PetscInt)global_cell_coord_k;
420 *zta_metric_logic_out = global_cell_coord_k - K_g;
421
422 // --- Step 5: Check if this rank owns the target cell and finalize outputs ---
423 if ((I_g >= info->xs && I_g < info->xs + info->xm) &&
424 (J_g >= info->ys && J_g < info->ys + info->ym) &&
425 (K_g >= info->zs && K_g < info->zs + info->zm))
426 {
427 // Convert global cell index to the local node index for this rank's DA patch
428 *ci_metric_lnode_out = (I_g - info->xs) + xs_gnode_rank;
429 *cj_metric_lnode_out = (J_g - info->ys) + ys_gnode_rank;
430 *ck_metric_lnode_out = (K_g - info->zs) + zs_gnode_rank;
431 *placement_successful_out = PETSC_TRUE;
432 }
433
435 "[Rank %d] Particle %lld placement %s.\n",
436 rank_for_logging, (long long)particle_global_id,
437 (*placement_successful_out ? "SUCCESSFUL" : "NOT ON THIS RANK"));
438
439 if(*placement_successful_out){
440 LOG_ALLOW(LOCAL,LOG_TRACE,"Local cell origin node: (I,J,K) = (%d,%d,%d), intra-cell logicals: (xi,eta,zta)=(%.6f,%.6f,%.6f)\n",
441 *ci_metric_lnode_out, *cj_metric_lnode_out, *ck_metric_lnode_out,
442 *xi_metric_logic_out, *eta_metric_logic_out, *zta_metric_logic_out);
443 }
444
445 PetscFunctionReturn(0);
446}
PetscInt np
Definition variables.h:676
The master context for the entire simulation.
Definition variables.h:585
Here is the call graph for this function:
Here is the caller graph for this function:

◆ GetRandomCellAndLogicalCoordsOnInletFace()

PetscErrorCode GetRandomCellAndLogicalCoordsOnInletFace ( UserCtx user,
const DMDALocalInfo *  info,
PetscInt  xs_gnode_rank,
PetscInt  ys_gnode_rank,
PetscInt  zs_gnode_rank,
PetscInt  IM_nodes_global,
PetscInt  JM_nodes_global,
PetscInt  KM_nodes_global,
PetscRandom *  rand_logic_i_ptr,
PetscRandom *  rand_logic_j_ptr,
PetscRandom *  rand_logic_k_ptr,
PetscInt *  ci_metric_lnode_out,
PetscInt *  cj_metric_lnode_out,
PetscInt *  ck_metric_lnode_out,
PetscReal *  xi_metric_logic_out,
PetscReal *  eta_metric_logic_out,
PetscReal *  zta_metric_logic_out 
)

Assuming the current rank services the inlet face, this function selects a random cell (owned by this rank on that face) and random logical coordinates within that cell, suitable for placing a particle on the inlet surface.

It is the caller's responsibility to ensure CanRankServiceInletFace returned true.

Parameters
userPointer to UserCtx.
infoPointer to DMDALocalInfo for the current rank (node-based).
xs_gnode,ys_gnode,zs_gnodeLocal starting node indices (incl. ghosts) for the rank's DA.
IM_nodes_global,JM_nodes_global,KM_nodes_globalGlobal node counts.
rand_logic_i_ptr,rand_logic_j_ptr,rand_logic_k_ptrPointers to RNGs for logical coords.
[out]ci_metric_lnode_out,cj_metric_lnode_out,ck_metric_lnode_outLocal node indices of the selected cell's origin (these are local to the rank's DA including ghosts).
[out]xi_metric_logic_out,eta_metric_logic_out,zta_metric_logic_outLogical coords [0,1] within the cell.
Returns
PetscErrorCode

Definition at line 467 of file Boundaries.c.

474{
475 PetscErrorCode ierr = 0;
476 PetscReal r_val_i_sel, r_val_j_sel, r_val_k_sel;
477 PetscInt local_cell_idx_on_face_dim1 = 0; // 0-indexed relative to owned cells on face
478 PetscInt local_cell_idx_on_face_dim2 = 0;
479 PetscMPIInt rank_for_logging;
480
481 PetscFunctionBeginUser;
482
484
485 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank_for_logging); CHKERRQ(ierr);
486
487 // Get number of cells this rank owns in each dimension (tangential to the face mainly)
488 PetscInt owned_start_cell_i, num_owned_cells_on_rank_i;
489 PetscInt owned_start_cell_j, num_owned_cells_on_rank_j;
490 PetscInt owned_start_cell_k, num_owned_cells_on_rank_k;
491
492 ierr = GetOwnedCellRange(info, 0, &owned_start_cell_i, &num_owned_cells_on_rank_i); CHKERRQ(ierr);
493 ierr = GetOwnedCellRange(info, 1, &owned_start_cell_j, &num_owned_cells_on_rank_j); CHKERRQ(ierr);
494 ierr = GetOwnedCellRange(info, 2, &owned_start_cell_k, &num_owned_cells_on_rank_k); CHKERRQ(ierr);
495
496 // Defaults for cell origin node (local index for the rank's DA patch, including ghosts)
497 *ci_metric_lnode_out = xs_gnode_rank; *cj_metric_lnode_out = ys_gnode_rank; *ck_metric_lnode_out = zs_gnode_rank;
498 // Defaults for logical coordinates
499 *xi_metric_logic_out = 0.5; *eta_metric_logic_out = 0.5; *zta_metric_logic_out = 0.5;
500
501 // Index of the last cell (0-indexed) in each global direction
502 PetscInt last_global_cell_idx_i = (IM_nodes_global > 1) ? (IM_nodes_global - 2) : -1;
503 PetscInt last_global_cell_idx_j = (JM_nodes_global > 1) ? (JM_nodes_global - 2) : -1;
504 PetscInt last_global_cell_idx_k = (KM_nodes_global > 1) ? (KM_nodes_global - 2) : -1;
505
506 LOG_ALLOW(LOCAL, LOG_INFO, "PARTICLE_INIT_DEBUG Rank %d: Inlet face %s.\n"
507 " Owned cells (i,j,k): (%d,%d,%d)\n"
508 " Global nodes (I,J,K): (%d,%d,%d)\n"
509 " info->xs,ys,zs (first owned node GLOBAL): (%d,%d,%d)\n"
510 " info->xm,ym,zm (num owned nodes GLOBAL): (%d,%d,%d)\n"
511 " xs_gnode_rank,ys_gnode_rank,zs_gnode_rank (DMDAGetCorners): (%d,%d,%d)\n"
512 " owned_start_cell (i,j,k) GLOBAL: (%d,%d,%d)\n"
513 " last_global_cell_idx (i,j,k): (%d,%d,%d)\n",
514 rank_for_logging, BCFaceToString((BCFace)user->identifiedInletBCFace),
515 num_owned_cells_on_rank_i,num_owned_cells_on_rank_j,num_owned_cells_on_rank_k,
516 IM_nodes_global,JM_nodes_global,KM_nodes_global,
517 info->xs, info->ys, info->zs,
518 info->xm, info->ym, info->zm,
519 xs_gnode_rank,ys_gnode_rank,zs_gnode_rank,
520 owned_start_cell_i, owned_start_cell_j, owned_start_cell_k,
521 last_global_cell_idx_i, last_global_cell_idx_j, last_global_cell_idx_k);
522
523
524 switch (user->identifiedInletBCFace) {
525 case BC_FACE_NEG_X: // Particle on -X face of cell C_0 (origin node N_0)
526 // Cell origin node is the first owned node in I by this rank (global index info->xs).
527 // Its local index within the rank's DA (incl ghosts) is xs_gnode_rank.
528 *ci_metric_lnode_out = xs_gnode_rank;
529 *xi_metric_logic_out = 1.0e-6;
530
531 // Tangential dimensions are J and K. Select an owned cell randomly on this face.
532 // num_owned_cells_on_rank_j/k must be > 0 (checked by CanRankServiceInletFace)
533 ierr = PetscRandomGetValueReal(*rand_logic_j_ptr, &r_val_j_sel); CHKERRQ(ierr);
534 local_cell_idx_on_face_dim1 = (PetscInt)(r_val_j_sel * num_owned_cells_on_rank_j); // Index among owned J-cells
535 local_cell_idx_on_face_dim1 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim1), num_owned_cells_on_rank_j - 1);
536 *cj_metric_lnode_out = ys_gnode_rank + local_cell_idx_on_face_dim1; // Offset from start of rank's J-nodes
537
538 ierr = PetscRandomGetValueReal(*rand_logic_k_ptr, &r_val_k_sel); CHKERRQ(ierr);
539 local_cell_idx_on_face_dim2 = (PetscInt)(r_val_k_sel * num_owned_cells_on_rank_k);
540 local_cell_idx_on_face_dim2 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim2), num_owned_cells_on_rank_k - 1);
541 *ck_metric_lnode_out = zs_gnode_rank + local_cell_idx_on_face_dim2;
542
543 ierr = PetscRandomGetValueReal(*rand_logic_j_ptr, eta_metric_logic_out); CHKERRQ(ierr);
544 ierr = PetscRandomGetValueReal(*rand_logic_k_ptr, zta_metric_logic_out); CHKERRQ(ierr);
545 break;
546
547 case BC_FACE_POS_X: // Particle on +X face of cell C_last_I (origin node N_last_I_origin)
548 // Origin node of the last I-cell is global_node_idx = last_global_cell_idx_i.
549 // Its local index in rank's DA: (last_global_cell_idx_i - info->xs) + xs_gnode_rank
550 *ci_metric_lnode_out = xs_gnode_rank + (last_global_cell_idx_i - info->xs);
551 *xi_metric_logic_out = 1.0 - 1.0e-6;
552
553 ierr = PetscRandomGetValueReal(*rand_logic_j_ptr, &r_val_j_sel); CHKERRQ(ierr);
554 local_cell_idx_on_face_dim1 = (PetscInt)(r_val_j_sel * num_owned_cells_on_rank_j);
555 local_cell_idx_on_face_dim1 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim1), num_owned_cells_on_rank_j - 1);
556 *cj_metric_lnode_out = ys_gnode_rank + local_cell_idx_on_face_dim1;
557
558 ierr = PetscRandomGetValueReal(*rand_logic_k_ptr, &r_val_k_sel); CHKERRQ(ierr);
559 local_cell_idx_on_face_dim2 = (PetscInt)(r_val_k_sel * num_owned_cells_on_rank_k);
560 local_cell_idx_on_face_dim2 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim2), num_owned_cells_on_rank_k - 1);
561 *ck_metric_lnode_out = zs_gnode_rank + local_cell_idx_on_face_dim2;
562
563 ierr = PetscRandomGetValueReal(*rand_logic_j_ptr, eta_metric_logic_out); CHKERRQ(ierr);
564 ierr = PetscRandomGetValueReal(*rand_logic_k_ptr, zta_metric_logic_out); CHKERRQ(ierr);
565 break;
566 // ... (Cases for Y and Z faces, following the same pattern) ...
567 case BC_FACE_NEG_Y:
568 *cj_metric_lnode_out = ys_gnode_rank;
569 *eta_metric_logic_out = 1.0e-6;
570 ierr = PetscRandomGetValueReal(*rand_logic_i_ptr, &r_val_i_sel); CHKERRQ(ierr);
571 local_cell_idx_on_face_dim1 = (PetscInt)(r_val_i_sel * num_owned_cells_on_rank_i);
572 local_cell_idx_on_face_dim1 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim1), num_owned_cells_on_rank_i - 1);
573 *ci_metric_lnode_out = xs_gnode_rank + local_cell_idx_on_face_dim1;
574 ierr = PetscRandomGetValueReal(*rand_logic_k_ptr, &r_val_k_sel); CHKERRQ(ierr);
575 local_cell_idx_on_face_dim2 = (PetscInt)(r_val_k_sel * num_owned_cells_on_rank_k);
576 local_cell_idx_on_face_dim2 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim2), num_owned_cells_on_rank_k - 1);
577 *ck_metric_lnode_out = zs_gnode_rank + local_cell_idx_on_face_dim2;
578 ierr = PetscRandomGetValueReal(*rand_logic_i_ptr, xi_metric_logic_out); CHKERRQ(ierr);
579 ierr = PetscRandomGetValueReal(*rand_logic_k_ptr, zta_metric_logic_out); CHKERRQ(ierr);
580 break;
581 case BC_FACE_POS_Y:
582 *cj_metric_lnode_out = ys_gnode_rank + (last_global_cell_idx_j - info->ys);
583 *eta_metric_logic_out = 1.0 - 1.0e-6;
584 ierr = PetscRandomGetValueReal(*rand_logic_i_ptr, &r_val_i_sel); CHKERRQ(ierr);
585 local_cell_idx_on_face_dim1 = (PetscInt)(r_val_i_sel * num_owned_cells_on_rank_i);
586 local_cell_idx_on_face_dim1 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim1), num_owned_cells_on_rank_i - 1);
587 *ci_metric_lnode_out = xs_gnode_rank + local_cell_idx_on_face_dim1;
588 ierr = PetscRandomGetValueReal(*rand_logic_k_ptr, &r_val_k_sel); CHKERRQ(ierr);
589 local_cell_idx_on_face_dim2 = (PetscInt)(r_val_k_sel * num_owned_cells_on_rank_k);
590 local_cell_idx_on_face_dim2 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim2), num_owned_cells_on_rank_k - 1);
591 *ck_metric_lnode_out = zs_gnode_rank + local_cell_idx_on_face_dim2;
592 ierr = PetscRandomGetValueReal(*rand_logic_i_ptr, xi_metric_logic_out); CHKERRQ(ierr);
593 ierr = PetscRandomGetValueReal(*rand_logic_k_ptr, zta_metric_logic_out); CHKERRQ(ierr);
594 break;
595 case BC_FACE_NEG_Z: // Your example case
596 *ck_metric_lnode_out = zs_gnode_rank; // Cell origin is the first owned node in K by this rank
597 *zta_metric_logic_out = 1.0e-6; // Place particle slightly inside this cell from its -Z face
598 // Tangential dimensions are I and J
599 ierr = PetscRandomGetValueReal(*rand_logic_i_ptr, &r_val_i_sel); CHKERRQ(ierr);
600 local_cell_idx_on_face_dim1 = (PetscInt)(r_val_i_sel * num_owned_cells_on_rank_i);
601 local_cell_idx_on_face_dim1 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim1), num_owned_cells_on_rank_i - 1);
602 *ci_metric_lnode_out = xs_gnode_rank + local_cell_idx_on_face_dim1;
603
604 ierr = PetscRandomGetValueReal(*rand_logic_j_ptr, &r_val_j_sel); CHKERRQ(ierr);
605 local_cell_idx_on_face_dim2 = (PetscInt)(r_val_j_sel * num_owned_cells_on_rank_j);
606 local_cell_idx_on_face_dim2 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim2), num_owned_cells_on_rank_j - 1);
607 *cj_metric_lnode_out = ys_gnode_rank + local_cell_idx_on_face_dim2;
608
609 ierr = PetscRandomGetValueReal(*rand_logic_i_ptr, xi_metric_logic_out); CHKERRQ(ierr); // Intra-cell logical for I
610 ierr = PetscRandomGetValueReal(*rand_logic_j_ptr, eta_metric_logic_out); CHKERRQ(ierr); // Intra-cell logical for J
611 break;
612 case BC_FACE_POS_Z:
613 *ck_metric_lnode_out = zs_gnode_rank + (last_global_cell_idx_k - info->zs);
614 *zta_metric_logic_out = 1.0 - 1.0e-6;
615 ierr = PetscRandomGetValueReal(*rand_logic_i_ptr, &r_val_i_sel); CHKERRQ(ierr);
616 local_cell_idx_on_face_dim1 = (PetscInt)(r_val_i_sel * num_owned_cells_on_rank_i);
617 local_cell_idx_on_face_dim1 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim1), num_owned_cells_on_rank_i - 1);
618 *ci_metric_lnode_out = xs_gnode_rank + local_cell_idx_on_face_dim1;
619 ierr = PetscRandomGetValueReal(*rand_logic_j_ptr, &r_val_j_sel); CHKERRQ(ierr);
620 local_cell_idx_on_face_dim2 = (PetscInt)(r_val_j_sel * num_owned_cells_on_rank_j);
621 local_cell_idx_on_face_dim2 = PetscMin(PetscMax(0, local_cell_idx_on_face_dim2), num_owned_cells_on_rank_j - 1);
622 *cj_metric_lnode_out = ys_gnode_rank + local_cell_idx_on_face_dim2;
623 ierr = PetscRandomGetValueReal(*rand_logic_i_ptr, xi_metric_logic_out); CHKERRQ(ierr);
624 ierr = PetscRandomGetValueReal(*rand_logic_j_ptr, eta_metric_logic_out); CHKERRQ(ierr);
625 break;
626 default:
627 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "GetRandomCellAndLogicOnInletFace: Invalid user->identifiedInletBCFace %d. \n", user->identifiedInletBCFace);
628 }
629
630 PetscReal eps = 1.0e-7;
632 *eta_metric_logic_out = PetscMin(PetscMax(0.0, *eta_metric_logic_out), 1.0 - eps);
633 *zta_metric_logic_out = PetscMin(PetscMax(0.0, *zta_metric_logic_out), 1.0 - eps);
635 *xi_metric_logic_out = PetscMin(PetscMax(0.0, *xi_metric_logic_out), 1.0 - eps);
636 *zta_metric_logic_out = PetscMin(PetscMax(0.0, *zta_metric_logic_out), 1.0 - eps);
637 } else {
638 *xi_metric_logic_out = PetscMin(PetscMax(0.0, *xi_metric_logic_out), 1.0 - eps);
639 *eta_metric_logic_out = PetscMin(PetscMax(0.0, *eta_metric_logic_out), 1.0 - eps);
640 }
641
642 LOG_ALLOW(LOCAL, LOG_VERBOSE, "Rank %d: Target Cell Node =(%d,%d,%d). (xi,et,zt)=(%.2e,%.2f,%.2f). \n",
643 rank_for_logging, *ci_metric_lnode_out, *cj_metric_lnode_out, *ck_metric_lnode_out,
644 *xi_metric_logic_out, *eta_metric_logic_out, *zta_metric_logic_out);
645
647
648 PetscFunctionReturn(0);
649}
Here is the call graph for this function:
Here is the caller graph for this function:

◆ TranslateModernBCsToLegacy()

PetscErrorCode TranslateModernBCsToLegacy ( UserCtx user)

Definition at line 656 of file Boundaries.c.

657{
658 PetscFunctionBeginUser;
660 LOG_ALLOW(GLOBAL,LOG_DEBUG," Translating modern BC config to legacy integer codes...\n");
661
662 for (int i = 0; i < 6; i++) {
663 BCType modern_type = user->boundary_faces[i].mathematical_type;
664 user->bctype[i] = (int)modern_type;
665
666
667 BCFace current_face = (BCFace)i;
668 const char* face_str = BCFaceToString(current_face);
669 const char* bc_type_str = BCTypeToString(modern_type);
670 LOG_ALLOW(GLOBAL,LOG_TRACE," for face %s(%d), legacy type = %d & modern type = %s .\n",face_str,i,user->bctype[i],bc_type_str);
671 }
672 LOG_ALLOW(GLOBAL,LOG_DEBUG," -> Translation complete.\n");
674 PetscFunctionReturn(0);
675}
BCType
Defines the general mathematical/physical Category of a boundary.
Definition variables.h:210
PetscInt bctype[6]
Definition variables.h:751
Here is the call graph for this function:
Here is the caller graph for this function:

◆ EnforceRHSBoundaryConditions()

PetscErrorCode EnforceRHSBoundaryConditions ( UserCtx user)

(Private) A generic routine to copy data for a single, named field across periodic boundaries.

This function encapsulates all logic for a periodic transfer. Given a field name (e.g., "P", "Ucat"), it determines the field's data type (scalar/vector), retrieves the correct DMDA and Vecs from the UserCtx, and then performs the memory copy from the local ghost array to the global array.

This must be called AFTER the corresponding local ghost vector has been updated via DMGlobalToLocal.

Parameters
userThe main UserCtx struct, containing all grid info and field data.
field_nameA string identifier for the field to transfer.
Returns
PetscErrorCode 0 on success.

Enforces boundary conditions on the momentum equation's Right-Hand-Side (RHS) vector.

This function performs two critical roles based on the legacy implementation:

  1. Strong BC Enforcement for Physical Boundaries: For non-periodic boundaries (e.g., walls, inlets), it zeroes the normal component of the RHS in a "buffer" layer of cells just inside the domain (e.g., at i=mx-2). This strongly enforces Dirichlet conditions on velocity by preventing the time-stepping scheme from altering the boundary values set by ApplyBoundaryConditions.
  2. Ghost Cell Sanitization: For all boundary faces (i=0, i=mx-1, etc.), it zeroes out all components of the RHS. Since the RHS is a cell-centered quantity in this architecture, these locations correspond to ghost cells. This step sanitizes these unused locations, ensuring they do not contain garbage data that could affect diagnostics or other routines. This sanitization is performed for ALL boundary types, including periodic ones.

This function should be called immediately after the RHS vector is fully assembled (spatial + temporal terms) and before it is used in a time-stepping update.

Parameters
userThe UserCtx for the specific block being computed.
Returns
PetscErrorCode 0 on success.

(Private) A generic routine to copy data for a single, named field across periodic boundaries.

This function is a faithful and structured replication of the boundary logic from the legacy CalcRHS routine. It accounts for the specific staggered grid architecture where a dimension of size mx has mx-1 nodes (0 to mx-2) and mx-2 cells. The face-centered RHS vector is allocated to size mx.

This results in the following indexing for the x-component of the RHS:

  • i = 0 to mx-3: Internal physical faces.
  • i = mx-2: The final physical face, located at the boundary.
  • i = mx-1: A dummy/ghost location corresponding to the last element of the vector.

This function performs two distinct roles based on this layout:

  1. Strong BC Enforcement on Physical Boundary Faces: For non-periodic boundaries, it zeroes the RHS on the domain's outermost physical faces (e.g., i=0, i=mx-2). This enforces a zero time-derivative (dU/dt = 0), ensuring that Dirichlet velocity conditions are maintained.
  2. Dummy Location Sanitization: On the positive-side dummy locations (i=mx-1, j=my-1, k=mz-1), it unconditionally zeroes out all RHS components. This is a robust housekeeping step to ensure these architecturally unused locations do not contain garbage data.
Parameters
userThe UserCtx for the specific block being computed.
Returns
PetscErrorCode 0 on success.

Definition at line 705 of file Boundaries.c.

706{
707 PetscErrorCode ierr;
708 DMDALocalInfo info = user->info;
709 Cmpnts ***rhs;
710
711 // --- Grid extents for this MPI rank and global grid dimensions ---
712 const PetscInt xs = info.xs, xe = xs + info.xm;
713 const PetscInt ys = info.ys, ye = ys + info.ym;
714 const PetscInt zs = info.zs, ze = zs + info.zm;
715 const PetscInt mx = info.mx, my = info.my, mz = info.mz;
716
717 PetscFunctionBeginUser;
719
720 // Get a writable pointer to the local data of the global RHS vector.
721 ierr = DMDAVecGetArray(user->fda, user->Rhs, &rhs); CHKERRQ(ierr);
722
723 // ========================================================================
724 // --- I-DIRECTION (X-FACES) ---
725 // ========================================================================
726
727 // --- Negative X Face (i=0, the first physical face) ---
728 if (xs == 0) {
729 // This logic applies ONLY to physical (non-periodic) boundaries.
731 const PetscInt i = 0;
732 for (PetscInt k = zs; k < ze; k++) {
733 for (PetscInt j = ys; j < ye; j++) {
734 rhs[k][j][i].x = 0.0;
735 rhs[k][j][i].y = 0.0;
736 rhs[k][j][i].z = 0.0;
737 }
738 }
739 }
740 }
741
742 // --- Positive X Face (physical face i=mx-2, dummy location i=mx-1) ---
743 if (xe == mx) {
744 // Step 1: Enforce strong BC on the LAST PHYSICAL face (i=mx-2) for non-periodic cases.
746 const PetscInt i = mx - 2;
747 for (PetscInt k = zs; k < ze; k++) {
748 for (PetscInt j = ys; j < ye; j++) {
749 rhs[k][j][i].x = 0.0;
750 }
751 }
752 }
753 // Step 2: Unconditionally sanitize the DUMMY location (i=mx-1).
754 const PetscInt i = mx - 1;
755 for (PetscInt k = zs; k < ze; k++) {
756 for (PetscInt j = ys; j < ye; j++) {
757 rhs[k][j][i].x = 0.0;
758 rhs[k][j][i].y = 0.0;
759 rhs[k][j][i].z = 0.0;
760 }
761 }
762 }
763
764 // ========================================================================
765 // --- J-DIRECTION (Y-FACES) ---
766 // ========================================================================
767
768 // --- Negative Y Face (j=0, the first physical face) ---
769 if (ys == 0) {
771 const PetscInt j = 0;
772 for (PetscInt k = zs; k < ze; k++) {
773 for (PetscInt i = xs; i < xe; i++) {
774 rhs[k][j][i].x = 0.0;
775 rhs[k][j][i].y = 0.0;
776 rhs[k][j][i].z = 0.0;
777 }
778 }
779 }
780 }
781
782 // --- Positive Y Face (physical face j=my-2, dummy location j=my-1) ---
783 if (ye == my) {
785 const PetscInt j = my - 2;
786 for (PetscInt k = zs; k < ze; k++) {
787 for (PetscInt i = xs; i < xe; i++) {
788 rhs[k][j][i].y = 0.0;
789 }
790 }
791 }
792 const PetscInt j = my - 1;
793 for (PetscInt k = zs; k < ze; k++) {
794 for (PetscInt i = xs; i < xe; i++) {
795 rhs[k][j][i].x = 0.0;
796 rhs[k][j][i].y = 0.0;
797 rhs[k][j][i].z = 0.0;
798 }
799 }
800 }
801
802 // ========================================================================
803 // --- K-DIRECTION (Z-FACES) ---
804 // ========================================================================
805
806 // --- Negative Z Face (k=0, the first physical face) ---
807 if (zs == 0) {
809 const PetscInt k = 0;
810 for (PetscInt j = ys; j < ye; j++) {
811 for (PetscInt i = xs; i < xe; i++) {
812 rhs[k][j][i].x = 0.0;
813 rhs[k][j][i].y = 0.0;
814 rhs[k][j][i].z = 0.0;
815 }
816 }
817 }
818 }
819
820 // --- Positive Z Face (physical face k=mz-2, dummy location k=mz-1) ---
821 if (ze == mz) {
823 const PetscInt k = mz - 2;
824 for (PetscInt j = ys; j < ye; j++) {
825 for (PetscInt i = xs; i < xe; i++) {
826 rhs[k][j][i].z = 0.0;
827 }
828 }
829 }
830 const PetscInt k = mz - 1;
831 for (PetscInt j = ys; j < ye; j++) {
832 for (PetscInt i = xs; i < xe; i++) {
833 rhs[k][j][i].x = 0.0;
834 rhs[k][j][i].y = 0.0;
835 rhs[k][j][i].z = 0.0;
836 }
837 }
838 }
839
840 // --- Release the pointer to the local data ---
841 ierr = DMDAVecRestoreArray(user->fda, user->Rhs, &rhs); CHKERRQ(ierr);
842
843 LOG_ALLOW(LOCAL, LOG_TRACE, "Rank %d, Block %d: Finished enforcing RHS boundary conditions.\n",
844 user->simCtx->rank, user->_this);
845
847
848 PetscFunctionReturn(0);
849}
@ PERIODIC
Definition variables.h:219
Vec Rhs
Definition variables.h:763
PetscInt _this
Definition variables.h:741
PetscScalar x
Definition variables.h:101
PetscScalar z
Definition variables.h:101
DMDALocalInfo info
Definition variables.h:735
PetscScalar y
Definition variables.h:101
A 3D point or vector with PetscScalar components.
Definition variables.h:100
Here is the caller graph for this function:

◆ TransferPeriodicFieldByDirection()

PetscErrorCode TransferPeriodicFieldByDirection ( UserCtx user,
const char *  field_name,
char  direction 
)

(Private Worker) Copies periodic data for a SINGLE field in a SINGLE direction.

This is a low-level helper that performs the memory copy from the local ghost array to the global array for a specified field and direction ('i', 'j', or 'k'). It contains NO communication logic; that is handled by the orchestrator.

Parameters
userThe main UserCtx struct.
field_nameThe string identifier for the field to transfer (e.g., "Ucat").
directionThe character 'i', 'j', or 'k' specifying the direction.
Returns
PetscErrorCode 0 on success.

Definition at line 1754 of file Boundaries.c.

1755{
1756 PetscErrorCode ierr;
1757 DMDALocalInfo info = user->info;
1758 PetscInt xs = info.xs, xe = info.xs + info.xm;
1759 PetscInt ys = info.ys, ye = info.ys + info.ym;
1760 PetscInt zs = info.zs, ze = info.zs + info.zm;
1761 PetscInt mx = info.mx, my = info.my, mz = info.mz;
1762
1763 // --- Dispatcher to get DM, Vecs, and DoF for the specified field ---
1764 DM dm;
1765 Vec global_vec;
1766 Vec local_vec;
1767 PetscInt dof;
1768 // (This dispatcher is identical to your TransferPeriodicField function)
1769 if (strcmp(field_name, "Ucat") == 0) {
1770 dm = user->fda; global_vec = user->Ucat; local_vec = user->lUcat; dof = 3;
1771 } else if (strcmp(field_name, "P") == 0) {
1772 dm = user->da; global_vec = user->P; local_vec = user->lP; dof = 1;
1773 } else if (strcmp(field_name, "Nvert") == 0) {
1774 dm = user->da; global_vec = user->Nvert; local_vec = user->lNvert; dof = 1;
1775 } else {
1776 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "Unknown field name '%s'", field_name);
1777 }
1778
1779 PetscFunctionBeginUser;
1780
1781 // --- Execute the copy logic based on DoF and Direction ---
1782 if (dof == 1) { // --- Handle SCALAR fields (PetscReal) ---
1783 PetscReal ***g_array, ***l_array;
1784 ierr = DMDAVecGetArray(dm, global_vec, &g_array); CHKERRQ(ierr);
1785 ierr = DMDAVecGetArrayRead(dm, local_vec, (void*)&l_array); CHKERRQ(ierr); // Use Read for safety
1786
1787 switch (direction) {
1788 case 'i':
1789 if (user->boundary_faces[BC_FACE_NEG_X].mathematical_type == PERIODIC && xs == 0) for (PetscInt k=zs; k<ze; k++) for (PetscInt j=ys; j<ye; j++) g_array[k][j][xs] = l_array[k][j][xs-2];
1790 if (user->boundary_faces[BC_FACE_POS_X].mathematical_type == PERIODIC && xe == mx) for (PetscInt k=zs; k<ze; k++) for (PetscInt j=ys; j<ye; j++) g_array[k][j][xe-1] = l_array[k][j][xe+1];
1791 break;
1792 case 'j':
1793 if (user->boundary_faces[BC_FACE_NEG_Y].mathematical_type == PERIODIC && ys == 0) for (PetscInt k=zs; k<ze; k++) for (PetscInt i=xs; i<xe; i++) g_array[k][ys][i] = l_array[k][ys-2][i];
1794 if (user->boundary_faces[BC_FACE_POS_Y].mathematical_type == PERIODIC && ye == my) for (PetscInt k=zs; k<ze; k++) for (PetscInt i=xs; i<xe; i++) g_array[k][ye-1][i] = l_array[k][ye+1][i];
1795 break;
1796 case 'k':
1797 if (user->boundary_faces[BC_FACE_NEG_Z].mathematical_type == PERIODIC && zs == 0) for (PetscInt j=ys; j<ye; j++) for (PetscInt i=xs; i<xe; i++) g_array[zs][j][i] = l_array[zs-2][j][i];
1798 if (user->boundary_faces[BC_FACE_POS_Z].mathematical_type == PERIODIC && ze == mz) for (PetscInt j=ys; j<ye; j++) for (PetscInt i=xs; i<xe; i++) g_array[ze-1][j][i] = l_array[ze+1][j][i];
1799 break;
1800 default: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Invalid direction '%c'", direction);
1801 }
1802 ierr = DMDAVecRestoreArray(dm, global_vec, &g_array); CHKERRQ(ierr);
1803 ierr = DMDAVecRestoreArrayRead(dm, local_vec, (void*)&l_array); CHKERRQ(ierr);
1804
1805 } else if (dof == 3) { // --- Handle VECTOR fields (Cmpnts) ---
1806 Cmpnts ***g_array, ***l_array;
1807 ierr = DMDAVecGetArray(dm, global_vec, &g_array); CHKERRQ(ierr);
1808 ierr = DMDAVecGetArrayRead(dm, local_vec, (void*)&l_array); CHKERRQ(ierr);
1809
1810 switch (direction) {
1811 case 'i':
1812 if (user->boundary_faces[BC_FACE_NEG_X].mathematical_type == PERIODIC && xs == 0) for (PetscInt k=zs; k<ze; k++) for (PetscInt j=ys; j<ye; j++) g_array[k][j][xs] = l_array[k][j][xs-2];
1813 if (user->boundary_faces[BC_FACE_POS_X].mathematical_type == PERIODIC && xe == mx) for (PetscInt k=zs; k<ze; k++) for (PetscInt j=ys; j<ye; j++) g_array[k][j][xe-1] = l_array[k][j][xe+1];
1814 break;
1815 case 'j':
1816 if (user->boundary_faces[BC_FACE_NEG_Y].mathematical_type == PERIODIC && ys == 0) for (PetscInt k=zs; k<ze; k++) for (PetscInt i=xs; i<xe; i++) g_array[k][ys][i] = l_array[k][ys-2][i];
1817 if (user->boundary_faces[BC_FACE_POS_Y].mathematical_type == PERIODIC && ye == my) for (PetscInt k=zs; k<ze; k++) for (PetscInt i=xs; i<xe; i++) g_array[k][ye-1][i] = l_array[k][ye+1][i];
1818 break;
1819 case 'k':
1820 if (user->boundary_faces[BC_FACE_NEG_Z].mathematical_type == PERIODIC && zs == 0) for (PetscInt j=ys; j<ye; j++) for (PetscInt i=xs; i<xe; i++) g_array[zs][j][i] = l_array[zs-2][j][i];
1821 if (user->boundary_faces[BC_FACE_POS_Z].mathematical_type == PERIODIC && ze == mz) for (PetscInt j=ys; j<ye; j++) for (PetscInt i=xs; i<xe; i++) g_array[ze-1][j][i] = l_array[ze+1][j][i];
1822 break;
1823 default: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Invalid direction '%c'", direction);
1824 }
1825 ierr = DMDAVecRestoreArray(dm, global_vec, &g_array); CHKERRQ(ierr);
1826 ierr = DMDAVecRestoreArrayRead(dm, local_vec, (void*)&l_array); CHKERRQ(ierr);
1827 }
1828
1829 PetscFunctionReturn(0);
1830}
Vec lNvert
Definition variables.h:755
Vec Ucat
Definition variables.h:755
Vec lUcat
Definition variables.h:755
Vec Nvert
Definition variables.h:755
Here is the caller graph for this function:

◆ TransferPeriodicField()

PetscErrorCode TransferPeriodicField ( UserCtx user,
const char *  field_name 
)

(Private) A generic routine to copy data for a single, named field across periodic boundaries.

This function encapsulates all logic for a periodic transfer. Given a field name (e.g., "P", "Ucat"), it determines the field's data type (scalar/vector), retrieves the correct DMDA and Vecs from the UserCtx, and then performs the memory copy from the local ghost array to the global array.

This must be called AFTER the corresponding local ghost vector has been updated via DMGlobalToLocal.

Parameters
userThe main UserCtx struct, containing all grid info and field data.
field_nameA string identifier for the field to transfer.
Returns
PetscErrorCode 0 on success.

Definition at line 1847 of file Boundaries.c.

1848{
1849 PetscErrorCode ierr;
1850 DMDALocalInfo info = user->info;
1851 PetscInt xs = info.xs, xe = info.xs + info.xm;
1852 PetscInt ys = info.ys, ye = info.ys + info.ym;
1853 PetscInt zs = info.zs, ze = info.zs + info.zm;
1854 PetscInt mx = info.mx, my = info.my, mz = info.mz;
1855
1856 // --- Local variables to hold the specific details of the chosen field ---
1857 DM dm;
1858 Vec global_vec;
1859 Vec local_vec;
1860 PetscInt dof;
1861
1862 PetscFunctionBeginUser;
1864
1865 // --- STEP 1: Dispatcher - Set the specific DM, Vecs, and dof based on field_name ---
1866 if (strcmp(field_name, "Ucat") == 0) {
1867 dm = user->fda;
1868 global_vec = user->Ucat;
1869 local_vec = user->lUcat;
1870 dof = 3;
1871 } else if (strcmp(field_name, "P") == 0) {
1872 dm = user->da;
1873 global_vec = user->P;
1874 local_vec = user->lP;
1875 dof = 1;
1876 } else if (strcmp(field_name, "Nvert") == 0) {
1877 dm = user->da;
1878 global_vec = user->Nvert;
1879 local_vec = user->lNvert;
1880 dof = 1;
1881 } else if (strcmp(field_name, "Eddy Viscosity") == 0) {
1882 dm = user->da;
1883 global_vec = user->Nu_t;
1884 local_vec = user->lNu_t;
1885 dof = 1;
1886 }
1887 /*
1888 // Example for future extension:
1889 else if (strcmp(field_name, "Temperature") == 0) {
1890 dm = user->da; // Assuming Temperature is scalar
1891 global_vec = user->T;
1892 local_vec = user->lT;
1893 dof = 1;
1894 }
1895 */
1896 else {
1897 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "Unknown field name '%s' in TransferPeriodicFieldByName.", field_name);
1898 }
1899
1900 LOG_ALLOW(GLOBAL,LOG_TRACE,"Periodic Transform being performed for field: %s with %d DoF.\n",field_name,dof);
1901 // --- STEP 2: Execute the copy logic using the dispatched variables ---
1902 if (dof == 1) { // --- Handle SCALAR fields (PetscReal) ---
1903 PetscReal ***g_array, ***l_array;
1904 ierr = DMDAVecGetArray(dm, global_vec, &g_array); CHKERRQ(ierr);
1905 ierr = DMDAVecGetArray(dm, local_vec, &l_array); CHKERRQ(ierr);
1906
1907 if (user->boundary_faces[BC_FACE_NEG_X].mathematical_type == PERIODIC && xs == 0) for (PetscInt k=zs; k<ze; k++) for (PetscInt j=ys; j<ye; j++) g_array[k][j][xs] = l_array[k][j][xs-2];
1908 if (user->boundary_faces[BC_FACE_POS_X].mathematical_type == PERIODIC && xe == mx) for (PetscInt k=zs; k<ze; k++) for (PetscInt j=ys; j<ye; j++) g_array[k][j][xe-1] = l_array[k][j][xe+1];
1909 if (user->boundary_faces[BC_FACE_NEG_Y].mathematical_type == PERIODIC && ys == 0) for (PetscInt k=zs; k<ze; k++) for (PetscInt i=xs; i<xe; i++) g_array[k][ys][i] = l_array[k][ys-2][i];
1910 if (user->boundary_faces[BC_FACE_POS_Y].mathematical_type == PERIODIC && ye == my) for (PetscInt k=zs; k<ze; k++) for (PetscInt i=xs; i<xe; i++) g_array[k][ye-1][i] = l_array[k][ye+1][i];
1911 if (user->boundary_faces[BC_FACE_NEG_Z].mathematical_type == PERIODIC && zs == 0) for (PetscInt j=ys; j<ye; j++) for (PetscInt i=xs; i<xe; i++) g_array[zs][j][i] = l_array[zs-2][j][i];
1912 if (user->boundary_faces[BC_FACE_POS_Z].mathematical_type == PERIODIC && ze == mz) for (PetscInt j=ys; j<ye; j++) for (PetscInt i=xs; i<xe; i++) g_array[ze-1][j][i] = l_array[ze+1][j][i];
1913
1914 ierr = DMDAVecRestoreArray(dm, global_vec, &g_array); CHKERRQ(ierr);
1915 ierr = DMDAVecRestoreArray(dm, local_vec, &l_array); CHKERRQ(ierr);
1916
1917 } else if (dof == 3) { // --- Handle VECTOR fields (Cmpnts) ---
1918 Cmpnts ***g_array, ***l_array;
1919 ierr = DMDAVecGetArray(dm, global_vec, &g_array); CHKERRQ(ierr);
1920 ierr = DMDAVecGetArray(dm, local_vec, &l_array); CHKERRQ(ierr);
1921
1922 LOG_ALLOW(GLOBAL,LOG_VERBOSE,"Array %s read successfully (Global and Local).\n",field_name);
1923
1924 if (user->boundary_faces[BC_FACE_NEG_X].mathematical_type == PERIODIC && xs == 0) for (PetscInt k=zs; k<ze; k++) for (PetscInt j=ys; j<ye; j++) g_array[k][j][xs] = l_array[k][j][xs-2];
1925 if (user->boundary_faces[BC_FACE_POS_X].mathematical_type == PERIODIC && xe == mx) for (PetscInt k=zs; k<ze; k++) for (PetscInt j=ys; j<ye; j++) g_array[k][j][xe-1] = l_array[k][j][xe+1];
1926 if (user->boundary_faces[BC_FACE_NEG_Y].mathematical_type == PERIODIC && ys == 0) for (PetscInt k=zs; k<ze; k++) for (PetscInt i=xs; i<xe; i++) g_array[k][ys][i] = l_array[k][ys-2][i];
1927 if (user->boundary_faces[BC_FACE_POS_Y].mathematical_type == PERIODIC && ye == my) for (PetscInt k=zs; k<ze; k++) for (PetscInt i=xs; i<xe; i++) g_array[k][ye-1][i] = l_array[k][ye+1][i];
1928 if (user->boundary_faces[BC_FACE_NEG_Z].mathematical_type == PERIODIC && zs == 0) for (PetscInt j=ys; j<ye; j++) for (PetscInt i=xs; i<xe; i++) g_array[zs][j][i] = l_array[zs-2][j][i];
1929 if (user->boundary_faces[BC_FACE_POS_Z].mathematical_type == PERIODIC && ze == mz) for (PetscInt j=ys; j<ye; j++) for (PetscInt i=xs; i<xe; i++) g_array[ze-1][j][i] = l_array[ze+1][j][i];
1930
1931 ierr = DMDAVecRestoreArray(dm, global_vec, &g_array); CHKERRQ(ierr);
1932 ierr = DMDAVecRestoreArray(dm, local_vec, &l_array); CHKERRQ(ierr);
1933 }
1934 else{
1935 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "This function only accepts Fields with 1 or 3 DoF.");
1936 }
1937
1939 PetscFunctionReturn(0);
1940}
Vec lNu_t
Definition variables.h:783
Vec Nu_t
Definition variables.h:783
Here is the caller graph for this function:

◆ TransferPeriodicFaceField()

PetscErrorCode TransferPeriodicFaceField ( UserCtx user,
const char *  field_name 
)

(Primitive) Copies periodic data from the interior to the local ghost cell region for a single field.

This primitive function performs a direct memory copy for a specified field, updating all periodic ghost faces (i, j, and k). It reads data from just inside the periodic boundary and writes it to the corresponding local ghost cells.

The copy is "two-cells deep" to support wider computational stencils.

This function does NOT involve any MPI communication; it operates entirely on local PETSc vectors.

Parameters
userThe main UserCtx struct.
field_nameThe string identifier for the field to update (e.g., "Csi", "Ucont").
Returns
PetscErrorCode 0 on success.

Definition at line 1959 of file Boundaries.c.

1960{
1961 PetscErrorCode ierr;
1962 DMDALocalInfo info = user->info;
1963 PetscInt gxs = info.gxs, gxe = info.gxs + info.gxm;
1964 PetscInt gys = info.gys, gye = info.gys + info.gym;
1965 PetscInt gzs = info.gzs, gze = info.gzs + info.gzm;
1966 PetscInt mx = info.mx, my = info.my, mz = info.mz;
1967
1968 // --- Dispatcher to get the correct DM, Vec, and DoF for the specified field ---
1969 DM dm;
1970 Vec local_vec;
1971 PetscInt dof;
1972 // (This dispatcher contains all 17 potential fields)
1973 if (strcmp(field_name, "Ucont") == 0) { dm = user->fda; local_vec = user->lUcont; dof = 3; }
1974 else if (strcmp(field_name, "Csi") == 0) { dm = user->fda; local_vec = user->lCsi; dof = 3; }
1975 else if (strcmp(field_name, "Eta") == 0) { dm = user->fda; local_vec = user->lEta; dof = 3; }
1976 else if (strcmp(field_name, "Zet") == 0) { dm = user->fda; local_vec = user->lZet; dof = 3; }
1977 else if (strcmp(field_name, "ICsi") == 0) { dm = user->fda; local_vec = user->lICsi; dof = 3; }
1978 else if (strcmp(field_name, "IEta") == 0) { dm = user->fda; local_vec = user->lIEta; dof = 3; }
1979 else if (strcmp(field_name, "IZet") == 0) { dm = user->fda; local_vec = user->lIZet; dof = 3; }
1980 else if (strcmp(field_name, "JCsi") == 0) { dm = user->fda; local_vec = user->lJCsi; dof = 3; }
1981 else if (strcmp(field_name, "JEta") == 0) { dm = user->fda; local_vec = user->lJEta; dof = 3; }
1982 else if (strcmp(field_name, "JZet") == 0) { dm = user->fda; local_vec = user->lJZet; dof = 3; }
1983 else if (strcmp(field_name, "KCsi") == 0) { dm = user->fda; local_vec = user->lKCsi; dof = 3; }
1984 else if (strcmp(field_name, "KEta") == 0) { dm = user->fda; local_vec = user->lKEta; dof = 3; }
1985 else if (strcmp(field_name, "KZet") == 0) { dm = user->fda; local_vec = user->lKZet; dof = 3; }
1986 else if (strcmp(field_name, "Aj") == 0) { dm = user->da; local_vec = user->lAj; dof = 1; }
1987 else if (strcmp(field_name, "IAj") == 0) { dm = user->da; local_vec = user->lIAj; dof = 1; }
1988 else if (strcmp(field_name, "JAj") == 0) { dm = user->da; local_vec = user->lJAj; dof = 1; }
1989 else if (strcmp(field_name, "KAj") == 0) { dm = user->da; local_vec = user->lKAj; dof = 1; }
1990 else {
1991 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "Unknown field name '%s' in TransferPeriodicFaceField.", field_name);
1992 }
1993
1994 PetscFunctionBeginUser;
1995
1996 void *l_array_ptr;
1997 ierr = DMDAVecGetArray(dm, local_vec, &l_array_ptr); CHKERRQ(ierr);
1998
1999 // --- I-DIRECTION ---
2001 for (PetscInt k=gzs; k<gze; k++) for (PetscInt j=gys; j<gye; j++) {
2002 if (dof == 1) {
2003 PetscReal ***arr = (PetscReal***)l_array_ptr;
2004 arr[k][j][0] = arr[k][j][mx-2];
2005 arr[k][j][-1] = arr[k][j][mx-3];
2006 } else {
2007 Cmpnts ***arr = (Cmpnts***)l_array_ptr;
2008 arr[k][j][0] = arr[k][j][mx-2];
2009 arr[k][j][-1] = arr[k][j][mx-3];
2010 }
2011 }
2012 }
2014 for (PetscInt k=gzs; k<gze; k++) for (PetscInt j=gys; j<gye; j++) {
2015 if (dof == 1) {
2016 PetscReal ***arr = (PetscReal***)l_array_ptr;
2017 arr[k][j][mx-1] = arr[k][j][1];
2018 arr[k][j][mx] = arr[k][j][2];
2019 } else {
2020 Cmpnts ***arr = (Cmpnts***)l_array_ptr;
2021 arr[k][j][mx-1] = arr[k][j][1];
2022 arr[k][j][mx] = arr[k][j][2];
2023 }
2024 }
2025 }
2026
2027 // --- J-DIRECTION ---
2029 for (PetscInt k=gzs; k<gze; k++) for (PetscInt i=gxs; i<gxe; i++) {
2030 if (dof == 1) {
2031 PetscReal ***arr = (PetscReal***)l_array_ptr;
2032 arr[k][0][i] = arr[k][my-2][i];
2033 arr[k][-1][i] = arr[k][my-3][i];
2034 } else {
2035 Cmpnts ***arr = (Cmpnts***)l_array_ptr;
2036 arr[k][0][i] = arr[k][my-2][i];
2037 arr[k][-1][i] = arr[k][my-3][i];
2038 }
2039 }
2040 }
2042 for (PetscInt k=gzs; k<gze; k++) for (PetscInt i=gxs; i<gxe; i++) {
2043 if (dof == 1) {
2044 PetscReal ***arr = (PetscReal***)l_array_ptr;
2045 arr[k][my-1][i] = arr[k][1][i];
2046 arr[k][my][i] = arr[k][2][i];
2047 } else {
2048 Cmpnts ***arr = (Cmpnts***)l_array_ptr;
2049 arr[k][my-1][i] = arr[k][1][i];
2050 arr[k][my][i] = arr[k][2][i];
2051 }
2052 }
2053 }
2054
2055 // --- K-DIRECTION ---
2057 for (PetscInt j=gys; j<gye; j++) for (PetscInt i=gxs; i<gxe; i++) {
2058 if (dof == 1) {
2059 PetscReal ***arr = (PetscReal***)l_array_ptr;
2060 arr[0][j][i] = arr[mz-2][j][i];
2061 arr[-1][j][i] = arr[mz-3][j][i];
2062 } else {
2063 Cmpnts ***arr = (Cmpnts***)l_array_ptr;
2064 arr[0][j][i] = arr[mz-2][j][i];
2065 arr[-1][j][i] = arr[mz-3][j][i];
2066 }
2067 }
2068 }
2070 for (PetscInt j=gys; j<gye; j++) for (PetscInt i=gxs; i<gxe; i++) {
2071 if (dof == 1) {
2072 PetscReal ***arr = (PetscReal***)l_array_ptr;
2073 arr[mz-1][j][i] = arr[1][j][i];
2074 arr[mz][j][i] = arr[2][j][i];
2075 } else {
2076 Cmpnts ***arr = (Cmpnts***)l_array_ptr;
2077 arr[mz-1][j][i] = arr[1][j][i];
2078 arr[mz][j][i] = arr[2][j][i];
2079 }
2080 }
2081 }
2082
2083 ierr = DMDAVecRestoreArray(dm, local_vec, &l_array_ptr); CHKERRQ(ierr);
2084 PetscFunctionReturn(0);
2085}
Vec lIEta
Definition variables.h:778
Vec lIZet
Definition variables.h:778
Vec lZet
Definition variables.h:776
Vec lIAj
Definition variables.h:778
Vec lKEta
Definition variables.h:780
Vec lJCsi
Definition variables.h:779
Vec lKZet
Definition variables.h:780
Vec lJEta
Definition variables.h:779
Vec lCsi
Definition variables.h:776
Vec lKCsi
Definition variables.h:780
Vec lJZet
Definition variables.h:779
Vec lUcont
Definition variables.h:755
Vec lAj
Definition variables.h:776
Vec lICsi
Definition variables.h:778
Vec lEta
Definition variables.h:776
Vec lJAj
Definition variables.h:779
Vec lKAj
Definition variables.h:780
Here is the caller graph for this function:

◆ ApplyMetricsPeriodicBCs()

PetscErrorCode ApplyMetricsPeriodicBCs ( UserCtx user)

(Orchestrator) Updates all metric-related fields in the local ghost cell regions for periodic boundaries.

This function calls the TransferPeriodicFaceField primitive for each of the 16 metric fields that require a 2-cell deep periodic ghost cell update. This is a direct replacement for the legacy Update_Metrics_PBC function.

Parameters
userThe main UserCtx struct.
Returns
PetscErrorCode 0 on success.

Definition at line 2099 of file Boundaries.c.

2100{
2101 PetscErrorCode ierr;
2102 PetscFunctionBeginUser;
2104
2105 const char* metric_fields[] = {
2106 "Csi", "Eta", "Zet", "ICsi", "JCsi", "KCsi", "IEta", "JEta", "KEta",
2107 "IZet", "JZet", "KZet", "Aj", "IAj", "JAj", "KAj"
2108 };
2109 PetscInt num_fields = sizeof(metric_fields) / sizeof(metric_fields[0]);
2110
2111 for (PetscInt i = 0; i < num_fields; i++) {
2112 ierr = TransferPeriodicFaceField(user, metric_fields[i]); CHKERRQ(ierr);
2113 LOG_ALLOW(GLOBAL,LOG_TRACE,"Periodic Transfer complete for %s.\n",metric_fields[i]);
2114 }
2115
2117 PetscFunctionReturn(0);
2118}
PetscErrorCode TransferPeriodicFaceField(UserCtx *user, const char *field_name)
(Primitive) Copies periodic data from the interior to the local ghost cell region for a single field.
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ApplyPeriodicBCs()

PetscErrorCode ApplyPeriodicBCs ( UserCtx user)

Applies periodic boundary conditions by copying data across domain boundaries for all relevant fields.

This function orchestrates the periodic update. It first performs a single, collective ghost-cell exchange for all fields. Then, it calls a generic helper routine to perform the memory copy for each individual field by name.

Parameters
userThe main UserCtx struct.
Returns
PetscErrorCode 0 on success.

Definition at line 2132 of file Boundaries.c.

2133{
2134 PetscErrorCode ierr;
2135 PetscBool is_any_periodic = PETSC_FALSE;
2136
2137 PetscFunctionBeginUser;
2138
2140
2141 for (int i = 0; i < 6; i++) {
2142 if (user->boundary_faces[i].mathematical_type == PERIODIC) {
2143 is_any_periodic = PETSC_TRUE;
2144 break;
2145 }
2146 }
2147
2148 if (!is_any_periodic) {
2149 LOG_ALLOW(GLOBAL,LOG_TRACE, "No periodic boundaries defined; skipping ApplyPeriodicBCs.\n");
2151 PetscFunctionReturn(0);
2152 }
2153
2154 LOG_ALLOW(GLOBAL, LOG_TRACE, "Applying periodic boundary conditions for all fields.\s");
2155
2156 // STEP 1: Perform the collective communication for all fields at once.
2157 ierr = UpdateLocalGhosts(user, "Ucat"); CHKERRQ(ierr);
2158 ierr = UpdateLocalGhosts(user, "P"); CHKERRQ(ierr);
2159 ierr = UpdateLocalGhosts(user, "Nvert"); CHKERRQ(ierr);
2160 /* if (user->solve_temperature) { ierr = UpdateLocalGhosts(user, "Temperature"); CHKERRQ(ierr); } */
2161
2162 // STEP 2: Call the generic copy routine for each field by name.
2163 ierr = TransferPeriodicField(user, "Ucat"); CHKERRQ(ierr);
2164 ierr = TransferPeriodicField(user, "P"); CHKERRQ(ierr);
2165 ierr = TransferPeriodicField(user, "Nvert"); CHKERRQ(ierr);
2166
2167 // FUTURE EXTENSION: Adding a new scalar field like Temperature is now trivial.
2168 /*
2169 if (user->solve_temperature) {
2170 ierr = TransferPeriodicField(user, "Temperature"); CHKERRQ(ierr);
2171 }
2172 */
2173
2175 PetscFunctionReturn(0);
2176}
PetscErrorCode TransferPeriodicField(UserCtx *user, const char *field_name)
(Private) A generic routine to copy data for a single, named field across periodic boundaries.
PetscErrorCode UpdateLocalGhosts(UserCtx *user, const char *fieldName)
Updates the local vector (including ghost points) from its corresponding global vector.
Definition setup.c:1157
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ApplyUcontPeriodicBCs()

PetscErrorCode ApplyUcontPeriodicBCs ( UserCtx user)

(Orchestrator) Updates the contravariant velocity field in the local ghost cell regions for periodic boundaries.

This function calls the TransferPeriodicFaceField primitive for the Ucont field. This is a direct replacement for the legacy Update_U_Cont_PBC function.

Parameters
userThe main UserCtx struct.
Returns
PetscErrorCode 0 on success.

Definition at line 2189 of file Boundaries.c.

2190{
2191 PetscErrorCode ierr;
2192 PetscFunctionBeginUser;
2194
2195 ierr = TransferPeriodicFaceField(user, "Ucont"); CHKERRQ(ierr);
2196
2198 PetscFunctionReturn(0);
2199}
Here is the call graph for this function:

◆ EnforceUcontPeriodicity()

PetscErrorCode EnforceUcontPeriodicity ( UserCtx user)

Enforces strict periodicity on the interior contravariant velocity field.

This function is a "fix-up" routine for staggered grids. After a solver step, numerical inaccuracies can lead to small discrepancies between fluxes on opposing periodic boundaries. This function manually corrects this by copying the flux value from the first boundary face (retrieved from a ghost cell) to the last interior face.

This routine involves MPI communication to synchronize the grid before and after the copy.

Parameters
userThe main UserCtx struct.
Returns
PetscErrorCode 0 on success.

Definition at line 2216 of file Boundaries.c.

2217{
2218 PetscErrorCode ierr;
2219 DMDALocalInfo info = user->info;
2220 PetscInt gxs = info.gxs, gxe = info.gxs + info.gxm;
2221 PetscInt gys = info.gys, gye = info.gys + info.gym;
2222 PetscInt gzs = info.gzs, gze = info.gzs + info.gzm;
2223 PetscInt mx = info.mx, my = info.my, mz = info.mz;
2224 Cmpnts ***ucont;
2225
2226 PetscFunctionBeginUser;
2228
2229 // STEP 1: Ensure local ghost cells are up-to-date with current global state.
2230 ierr = DMGlobalToLocalBegin(user->fda, user->Ucont, INSERT_VALUES, user->lUcont); CHKERRQ(ierr);
2231 ierr = DMGlobalToLocalEnd(user->fda, user->Ucont, INSERT_VALUES, user->lUcont); CHKERRQ(ierr);
2232
2233 ierr = DMDAVecGetArray(user->fda, user->lUcont, &ucont); CHKERRQ(ierr);
2234
2235 // STEP 2: Perform the component-wise copy from ghost cells to the last interior faces.
2237 for (PetscInt k=gzs; k<gze; k++) for (PetscInt j=gys; j<gye; j++) {
2238 ucont[k][j][mx-2].x = ucont[k][j][mx].x; // Note: ucont[mx] is ghost, gets value from ucont[0]
2239 }
2240 }
2242 for (PetscInt k=gzs; k<gze; k++) for (PetscInt i=gxs; i<gxe; i++) {
2243 ucont[k][my-2][i].y = ucont[k][my][i].y; // Note: ucont[my] is ghost, gets value from ucont[0]
2244 }
2245 }
2247 for (PetscInt j=gys; j<gye; j++) for (PetscInt i=gxs; i<gxe; i++) {
2248 ucont[mz-2][j][i].z = ucont[mz][j][i].z; // Note: ucont[mz] is ghost, gets value from ucont[0]
2249 }
2250 }
2251
2252 ierr = DMDAVecRestoreArray(user->fda, user->lUcont, &ucont); CHKERRQ(ierr);
2253
2254 // STEP 3: Communicate the changes made to the interior back to the global vector.
2255 ierr = DMLocalToGlobalBegin(user->fda, user->lUcont, INSERT_VALUES, user->Ucont); CHKERRQ(ierr);
2256 ierr = DMLocalToGlobalEnd(user->fda, user->lUcont, INSERT_VALUES, user->Ucont); CHKERRQ(ierr);
2257
2259 PetscFunctionReturn(0);
2260}
Vec Ucont
Definition variables.h:755

◆ UpdateDummyCells()

PetscErrorCode UpdateDummyCells ( UserCtx user)

Updates the dummy cells (ghost nodes) on the faces of the local domain for NON-PERIODIC boundaries.

This function's role is to apply a second-order extrapolation to set the ghost cell values based on the boundary condition value (stored in ubcs) and the first interior cell.

NOTE: This function deliberately IGNORES periodic boundaries. It is part of a larger workflow where ApplyPeriodicBCs handles periodic faces first.

CRITICAL DETAIL: This function uses shrunken loop ranges (lxs, lxe, etc.) to intentionally update only the flat part of the faces, avoiding the edges and

corners. The edges and corners are then handled separately by UpdateCornerNodes. This precisely replicates the logic of the original FormBCS function.

Parameters
userThe main UserCtx struct containing all necessary data.
Returns
PetscErrorCode 0 on success.

This function's role is to apply a second-order extrapolation to set the ghost cell values based on the boundary condition value (stored in ubcs) and the first interior cell.

NOTE: This function deliberately IGNORES periodic boundaries. It is part of a larger workflow where ApplyPeriodicBCs handles periodic faces first.

CRITICAL DETAIL: This function uses shrunken loop ranges (lxs, lxe, etc.) to intentionally update only the flat part of the faces, avoiding the edges and

corners. The edges and corners are then handled separately by UpdateCornerNodes. This precisely replicates the logic of the original function.

Parameters
userThe main UserCtx struct containing all necessary data.
Returns
PetscErrorCode 0 on success.

Definition at line 2283 of file Boundaries.c.

2284{
2285 PetscErrorCode ierr;
2286 DM fda = user->fda;
2287 DMDALocalInfo info = user->info;
2288 PetscInt xs = info.xs, xe = info.xs + info.xm;
2289 PetscInt ys = info.ys, ye = info.ys + info.ym;
2290 PetscInt zs = info.zs, ze = info.zs + info.zm;
2291 PetscInt mx = info.mx, my = info.my, mz = info.mz;
2292
2293 // --- Calculate shrunken loop ranges to avoid edges and corners ---
2294 PetscInt lxs = xs, lxe = xe;
2295 PetscInt lys = ys, lye = ye;
2296 PetscInt lzs = zs, lze = ze;
2297
2298 if (xs == 0) lxs = xs + 1;
2299 if (ys == 0) lys = ys + 1;
2300 if (zs == 0) lzs = zs + 1;
2301
2302 if (xe == mx) lxe = xe - 1;
2303 if (ye == my) lye = ye - 1;
2304 if (ze == mz) lze = ze - 1;
2305
2306 Cmpnts ***ucat, ***ubcs;
2307 PetscFunctionBeginUser;
2308
2309 ierr = DMDAVecGetArray(fda, user->Bcs.Ubcs, &ubcs); CHKERRQ(ierr);
2310 ierr = DMDAVecGetArray(fda, user->Ucat, &ucat); CHKERRQ(ierr);
2311
2312 // -X Face
2313 if (user->boundary_faces[BC_FACE_NEG_X].mathematical_type != PERIODIC && xs == 0) {
2314 for (PetscInt k = lzs; k < lze; k++) for (PetscInt j = lys; j < lye; j++) {
2315 ucat[k][j][xs].x = 2.0 * ubcs[k][j][xs].x - ucat[k][j][xs + 1].x;
2316 ucat[k][j][xs].y = 2.0 * ubcs[k][j][xs].y - ucat[k][j][xs + 1].y;
2317 ucat[k][j][xs].z = 2.0 * ubcs[k][j][xs].z - ucat[k][j][xs + 1].z;
2318 }
2319 }
2320 // +X Face
2321 if (user->boundary_faces[BC_FACE_POS_X].mathematical_type != PERIODIC && xe == mx) {
2322 for (PetscInt k = lzs; k < lze; k++) for (PetscInt j = lys; j < lye; j++) {
2323 ucat[k][j][xe-1].x = 2.0 * ubcs[k][j][xe-1].x - ucat[k][j][xe - 2].x;
2324 ucat[k][j][xe-1].y = 2.0 * ubcs[k][j][xe-1].y - ucat[k][j][xe - 2].y;
2325 ucat[k][j][xe-1].z = 2.0 * ubcs[k][j][xe-1].z - ucat[k][j][xe - 2].z;
2326 }
2327 }
2328
2329 // -Y Face
2330 if (user->boundary_faces[BC_FACE_NEG_Y].mathematical_type != PERIODIC && ys == 0) {
2331 for (PetscInt k = lzs; k < lze; k++) for (PetscInt i = lxs; i < lxe; i++) {
2332 ucat[k][ys][i].x = 2.0 * ubcs[k][ys][i].x - ucat[k][ys + 1][i].x;
2333 ucat[k][ys][i].y = 2.0 * ubcs[k][ys][i].y - ucat[k][ys + 1][i].y;
2334 ucat[k][ys][i].z = 2.0 * ubcs[k][ys][i].z - ucat[k][ys + 1][i].z;
2335 }
2336 }
2337 // +Y Face
2338 if (user->boundary_faces[BC_FACE_POS_Y].mathematical_type != PERIODIC && ye == my) {
2339 for (PetscInt k = lzs; k < lze; k++) for (PetscInt i = lxs; i < lxe; i++) {
2340 ucat[k][ye-1][i].x = 2.0 * ubcs[k][ye-1][i].x - ucat[k][ye-2][i].x;
2341 ucat[k][ye-1][i].y = 2.0 * ubcs[k][ye-1][i].y - ucat[k][ye-2][i].y;
2342 ucat[k][ye-1][i].z = 2.0 * ubcs[k][ye-1][i].z - ucat[k][ye-2][i].z;
2343 }
2344 }
2345
2346 // -Z Face
2347 if (user->boundary_faces[BC_FACE_NEG_Z].mathematical_type != PERIODIC && zs == 0) {
2348 for (PetscInt j = lys; j < lye; j++) for (PetscInt i = lxs; i < lxe; i++) {
2349 ucat[zs][j][i].x = 2.0 * ubcs[zs][j][i].x - ucat[zs + 1][j][i].x;
2350 ucat[zs][j][i].y = 2.0 * ubcs[zs][j][i].y - ucat[zs + 1][j][i].y;
2351 ucat[zs][j][i].z = 2.0 * ubcs[zs][j][i].z - ucat[zs + 1][j][i].z;
2352 }
2353 }
2354 // +Z Face
2355 if (user->boundary_faces[BC_FACE_POS_Z].mathematical_type != PERIODIC && ze == mz) {
2356 for (PetscInt j = lys; j < lye; j++) for (PetscInt i = lxs; i < lxe; i++) {
2357 ucat[ze-1][j][i].x = 2.0 * ubcs[ze-1][j][i].x - ucat[ze-2][j][i].x;
2358 ucat[ze-1][j][i].y = 2.0 * ubcs[ze-1][j][i].y - ucat[ze-2][j][i].y;
2359 ucat[ze-1][j][i].z = 2.0 * ubcs[ze-1][j][i].z - ucat[ze-2][j][i].z;
2360 }
2361 }
2362
2363 ierr = DMDAVecRestoreArray(fda, user->Bcs.Ubcs, &ubcs); CHKERRQ(ierr);
2364 ierr = DMDAVecRestoreArray(fda, user->Ucat, &ucat); CHKERRQ(ierr);
2365
2366 PetscFunctionReturn(0);
2367}
Vec Ubcs
Boundary condition velocity values. (Comment: "An ugly hack, waste of memory")
Definition variables.h:121
BCS Bcs
Definition variables.h:749
Here is the caller graph for this function:

◆ UpdateCornerNodes()

PetscErrorCode UpdateCornerNodes ( UserCtx user)

Updates the corner and edge ghost nodes of the local domain by averaging.

This function should be called AFTER the face ghost nodes are finalized by both ApplyPeriodicBCs and UpdateDummyCells. It resolves the values at shared edges and corners by averaging the values of adjacent, previously-computed ghost nodes.

The logic is generic and works correctly regardless of the boundary types on the adjacent faces (e.g., it will correctly average a periodic face neighbor with a wall face neighbor).

Parameters
userThe main UserCtx struct containing all necessary data.
Returns
PetscErrorCode 0 on success.

Definition at line 2386 of file Boundaries.c.

2387{
2388 PetscErrorCode ierr;
2389 DM da = user->da, fda = user->fda;
2390 DMDALocalInfo info = user->info;
2391 PetscInt xs = info.xs, xe = info.xs + info.xm;
2392 PetscInt ys = info.ys, ye = info.ys + info.ym;
2393 PetscInt zs = info.zs, ze = info.zs + info.zm;
2394 PetscInt mx = info.mx, my = info.my, mz = info.mz;
2395
2396 Cmpnts ***ucat;
2397 PetscReal ***p;
2398
2399 PetscFunctionBeginUser;
2400
2401 ierr = DMDAVecGetArray(fda, user->Ucat, &ucat); CHKERRQ(ierr);
2402 ierr = DMDAVecGetArray(da, user->P, &p); CHKERRQ(ierr);
2403
2404 // --- Update Edges and Corners by Averaging ---
2405 // The order of these blocks ensures that corners (where 3 faces meet) are
2406 // computed using data from edges (where 2 faces meet), which are computed first.
2407// Edges connected to the -Z face (k=zs)
2408 if (zs == 0) {
2409 if (xs == 0) {
2410 for (PetscInt j = ys; j < ye; j++) {
2411 p[zs][j][xs] = 0.5 * (p[zs+1][j][xs] + p[zs][j][xs+1]);
2412 ucat[zs][j][xs].x = 0.5 * (ucat[zs+1][j][xs].x + ucat[zs][j][xs+1].x);
2413 ucat[zs][j][xs].y = 0.5 * (ucat[zs+1][j][xs].y + ucat[zs][j][xs+1].y);
2414 ucat[zs][j][xs].z = 0.5 * (ucat[zs+1][j][xs].z + ucat[zs][j][xs+1].z);
2415 }
2416 }
2417 if (xe == mx) {
2418 for (PetscInt j = ys; j < ye; j++) {
2419 p[zs][j][mx-1] = 0.5 * (p[zs+1][j][mx-1] + p[zs][j][mx-2]);
2420 ucat[zs][j][mx-1].x = 0.5 * (ucat[zs+1][j][mx-1].x + ucat[zs][j][mx-2].x);
2421 ucat[zs][j][mx-1].y = 0.5 * (ucat[zs+1][j][mx-1].y + ucat[zs][j][mx-2].y);
2422 ucat[zs][j][mx-1].z = 0.5 * (ucat[zs+1][j][mx-1].z + ucat[zs][j][mx-2].z);
2423 }
2424 }
2425 if (ys == 0) {
2426 for (PetscInt i = xs; i < xe; i++) {
2427 p[zs][ys][i] = 0.5 * (p[zs+1][ys][i] + p[zs][ys+1][i]);
2428 ucat[zs][ys][i].x = 0.5 * (ucat[zs+1][ys][i].x + ucat[zs][ys+1][i].x);
2429 ucat[zs][ys][i].y = 0.5 * (ucat[zs+1][ys][i].y + ucat[zs][ys+1][i].y);
2430 ucat[zs][ys][i].z = 0.5 * (ucat[zs+1][ys][i].z + ucat[zs][ys+1][i].z);
2431 }
2432 }
2433 if (ye == my) {
2434 for (PetscInt i = xs; i < xe; i++) {
2435 p[zs][my-1][i] = 0.5 * (p[zs+1][my-1][i] + p[zs][my-2][i]);
2436 ucat[zs][my-1][i].x = 0.5 * (ucat[zs+1][my-1][i].x + ucat[zs][my-2][i].x);
2437 ucat[zs][my-1][i].y = 0.5 * (ucat[zs+1][my-1][i].y + ucat[zs][my-2][i].y);
2438 ucat[zs][my-1][i].z = 0.5 * (ucat[zs+1][my-1][i].z + ucat[zs][my-2][i].z);
2439 }
2440 }
2441 }
2442
2443 // Edges connected to the +Z face (k=ze-1)
2444 if (ze == mz) {
2445 if (xs == 0) {
2446 for (PetscInt j = ys; j < ye; j++) {
2447 p[mz-1][j][xs] = 0.5 * (p[mz-2][j][xs] + p[mz-1][j][xs+1]);
2448 ucat[mz-1][j][xs].x = 0.5 * (ucat[mz-2][j][xs].x + ucat[mz-1][j][xs+1].x);
2449 ucat[mz-1][j][xs].y = 0.5 * (ucat[mz-2][j][xs].y + ucat[mz-1][j][xs+1].y);
2450 ucat[mz-1][j][xs].z = 0.5 * (ucat[mz-2][j][xs].z + ucat[mz-1][j][xs+1].z);
2451 }
2452 }
2453 if (xe == mx) {
2454 for (PetscInt j = ys; j < ye; j++) {
2455 p[mz-1][j][mx-1] = 0.5 * (p[mz-2][j][mx-1] + p[mz-1][j][mx-2]);
2456 ucat[mz-1][j][mx-1].x = 0.5 * (ucat[mz-2][j][mx-1].x + ucat[mz-1][j][mx-2].x);
2457 ucat[mz-1][j][mx-1].y = 0.5 * (ucat[mz-2][j][mx-1].y + ucat[mz-1][j][mx-2].y);
2458 ucat[mz-1][j][mx-1].z = 0.5 * (ucat[mz-2][j][mx-1].z + ucat[mz-1][j][mx-2].z);
2459 }
2460 }
2461 if (ys == 0) {
2462 for (PetscInt i = xs; i < xe; i++) {
2463 p[mz-1][ys][i] = 0.5 * (p[mz-2][ys][i] + p[mz-1][ys+1][i]);
2464 ucat[mz-1][ys][i].x = 0.5 * (ucat[mz-2][ys][i].x + ucat[mz-1][ys+1][i].x);
2465 ucat[mz-1][ys][i].y = 0.5 * (ucat[mz-2][ys][i].y + ucat[mz-1][ys+1][i].y);
2466 ucat[mz-1][ys][i].z = 0.5 * (ucat[mz-2][ys][i].z + ucat[mz-1][ys+1][i].z);
2467 }
2468 }
2469 if (ye == my) {
2470 for (PetscInt i = xs; i < xe; i++) {
2471 p[mz-1][my-1][i] = 0.5 * (p[mz-2][my-1][i] + p[mz-1][my-2][i]);
2472 ucat[mz-1][my-1][i].x = 0.5 * (ucat[mz-2][my-1][i].x + ucat[mz-1][my-2][i].x);
2473 ucat[mz-1][my-1][i].y = 0.5 * (ucat[mz-2][my-1][i].y + ucat[mz-1][my-2][i].y);
2474 ucat[mz-1][my-1][i].z = 0.5 * (ucat[mz-2][my-1][i].z + ucat[mz-1][my-2][i].z);
2475 }
2476 }
2477 }
2478
2479 // Remaining edges on the XY plane (that are not on Z faces)
2480 if (ys == 0) {
2481 if (xs == 0) {
2482 for (PetscInt k = zs; k < ze; k++) {
2483 p[k][ys][xs] = 0.5 * (p[k][ys+1][xs] + p[k][ys][xs+1]);
2484 ucat[k][ys][xs].x = 0.5 * (ucat[k][ys+1][xs].x + ucat[k][ys][xs+1].x);
2485 ucat[k][ys][xs].y = 0.5 * (ucat[k][ys+1][xs].y + ucat[k][ys][xs+1].y);
2486 ucat[k][ys][xs].z = 0.5 * (ucat[k][ys+1][xs].z + ucat[k][ys][xs+1].z);
2487 }
2488 }
2489 if (xe == mx) {
2490 for (PetscInt k = zs; k < ze; k++) {
2491 p[k][ys][mx-1] = 0.5 * (p[k][ys+1][mx-1] + p[k][ys][mx-2]);
2492 ucat[k][ys][mx-1].x = 0.5 * (ucat[k][ys+1][mx-1].x + ucat[k][ys][mx-2].x);
2493 ucat[k][ys][mx-1].y = 0.5 * (ucat[k][ys+1][mx-1].y + ucat[k][ys][mx-2].y);
2494 ucat[k][ys][mx-1].z = 0.5 * (ucat[k][ys+1][mx-1].z + ucat[k][ys][mx-2].z);
2495 }
2496 }
2497 }
2498
2499 if (ye == my) {
2500 if (xs == 0) {
2501 for (PetscInt k = zs; k < ze; k++) {
2502 p[k][my-1][xs] = 0.5 * (p[k][my-2][xs] + p[k][my-1][xs+1]);
2503 ucat[k][my-1][xs].x = 0.5 * (ucat[k][my-2][xs].x + ucat[k][my-1][xs+1].x);
2504 ucat[k][my-1][xs].y = 0.5 * (ucat[k][my-2][xs].y + ucat[k][my-1][xs+1].y);
2505 ucat[k][my-1][xs].z = 0.5 * (ucat[k][my-2][xs].z + ucat[k][my-1][xs+1].z);
2506 }
2507 }
2508 if (xe == mx) {
2509 for (PetscInt k = zs; k < ze; k++) {
2510 p[k][my-1][mx-1] = 0.5 * (p[k][my-2][mx-1] + p[k][my-1][mx-2]);
2511 ucat[k][my-1][mx-1].x = 0.5 * (ucat[k][my-2][mx-1].x + ucat[k][my-1][mx-2].x);
2512 ucat[k][my-1][mx-1].y = 0.5 * (ucat[k][my-2][mx-1].y + ucat[k][my-1][mx-2].y);
2513 ucat[k][my-1][mx-1].z = 0.5 * (ucat[k][my-2][mx-1].z + ucat[k][my-1][mx-2].z);
2514 }
2515 }
2516 }
2517
2518 ierr = DMDAVecRestoreArray(fda, user->Ucat, &ucat); CHKERRQ(ierr);
2519 ierr = DMDAVecRestoreArray(da, user->P, &p); CHKERRQ(ierr);
2520
2521 PetscFunctionReturn(0);
2522}
Here is the caller graph for this function:

◆ UpdatePeriodicCornerNodes()

PetscErrorCode UpdatePeriodicCornerNodes ( UserCtx user,
PetscInt  num_fields,
const char *  field_names[] 
)

(Orchestrator) Performs a sequential, deterministic periodic update for a list of fields.

This function orchestrates the resolution of ambiguous periodic corners and edges. It takes an array of field names and updates them in a strict i-sync-j-sync-k order by calling the low-level worker TransferPeriodicFieldByDirection and the communication routine UpdateLocalGhosts.

Parameters
userThe main UserCtx struct.
num_fieldsThe number of fields in the field_names array.
field_namesAn array of strings with the names of fields to update (e.g., ["Ucat", "P"]).
Returns
PetscErrorCode 0 on success.

Definition at line 2539 of file Boundaries.c.

2540{
2541 PetscErrorCode ierr;
2542 PetscFunctionBeginUser;
2543
2544 if (num_fields == 0) PetscFunctionReturn(0);
2545
2546 // --- I-DIRECTION ---
2547 for (PetscInt i = 0; i < num_fields; i++) {
2548 ierr = TransferPeriodicFieldByDirection(user, field_names[i], 'i'); CHKERRQ(ierr);
2549 }
2550 // --- SYNC ---
2551 for (PetscInt i = 0; i < num_fields; i++) {
2552 ierr = UpdateLocalGhosts(user, field_names[i]); CHKERRQ(ierr);
2553 }
2554
2555 // --- J-DIRECTION ---
2556 for (PetscInt i = 0; i < num_fields; i++) {
2557 ierr = TransferPeriodicFieldByDirection(user, field_names[i], 'j'); CHKERRQ(ierr);
2558 }
2559 // --- SYNC ---
2560 for (PetscInt i = 0; i < num_fields; i++) {
2561 ierr = UpdateLocalGhosts(user, field_names[i]); CHKERRQ(ierr);
2562 }
2563
2564 // --- K-DIRECTION ---
2565 for (PetscInt i = 0; i < num_fields; i++) {
2566 ierr = TransferPeriodicFieldByDirection(user, field_names[i], 'k'); CHKERRQ(ierr);
2567 }
2568 // --- FINAL SYNC ---
2569 for (PetscInt i = 0; i < num_fields; i++) {
2570 ierr = UpdateLocalGhosts(user, field_names[i]); CHKERRQ(ierr);
2571 }
2572
2573 PetscFunctionReturn(0);
2574}
PetscErrorCode TransferPeriodicFieldByDirection(UserCtx *user, const char *field_name, char direction)
(Private Worker) Copies periodic data for a SINGLE field in a SINGLE direction.
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ApplyWallFunction()

PetscErrorCode ApplyWallFunction ( UserCtx user)

Applies wall function modeling to near-wall velocities for all wall-type boundaries.

This function implements log-law wall functions to model the near-wall velocity profile without fully resolving the viscous sublayer. It is applicable to ALL wall-type boundaries regardless of their specific boundary condition (no-slip, moving wall, slip, etc.), as determined by the mathematical_type being WALL.

MATHEMATICAL BACKGROUND: Wall functions bridge the gap between the wall (y=0) and the first computational cell center by using empirical log-law relationships:

  • Viscous sublayer (y+ < 11.81): u+ = y+
  • Log-law region (y+ > 11.81): u+ = (1/κ) * ln(E * y+) where u+ = u/u_Ï„, y+ = y*u_Ï„/ν, κ = 0.41 (von Karman constant), E = exp(κB)

IMPLEMENTATION DETAILS: Unlike standard boundary conditions that set ghost cell values, wall functions:

  1. Read velocity from the SECOND interior cell (i±2, j±2, k±2)
  2. Compute wall shear stress using log-law
  3. Modify velocity at the FIRST interior cell (i±1, j±1, k±1)
  4. Keep ghost cell boundary values (ubcs, ucont) at zero

WORKFLOW:

  • Called from ApplyBoundaryConditions after standard BC application
  • Operates on ucat (Cartesian velocity)
  • Updates ustar (friction velocity field) for diagnostics/turbulence models
  • Ghost cells remain zero; UpdateDummyCells handles extrapolation afterward

GEOMETRIC QUANTITIES: sb = wall-normal distance from wall to first interior cell center sc = wall-normal distance from wall to second interior cell center
These are computed from cell Jacobians (aj) and face area vectors

APPLICABILITY:

  • Requires simCtx->wallfunction = true
  • Only processes faces where mathematical_type == WALL
  • Skips solid-embedded cells (nvert >= 0.1)
Parameters
userThe UserCtx containing all simulation state and geometry
Returns
PetscErrorCode 0 on success
Note
This function modifies interior cell velocities, NOT ghost cells
Wall roughness (ks) is currently set to 1e-16 (smooth wall)
See also
wall_function_loglaw() in wallfunction.c for the actual log-law implementation
noslip() in wallfunction.c for the initial linear interpolation

Definition at line 2624 of file Boundaries.c.

2625{
2626 PetscErrorCode ierr;
2627 SimCtx *simCtx = user->simCtx;
2628 DMDALocalInfo *info = &user->info;
2629
2630 PetscFunctionBeginUser;
2631
2632 // =========================================================================
2633 // STEP 0: Early exit if wall functions are disabled
2634 // =========================================================================
2635 if (!simCtx->wallfunction) {
2636 PetscFunctionReturn(0);
2637 }
2638
2639 LOG_ALLOW(LOCAL, LOG_DEBUG, "Processing wall function boundaries.\n");
2640
2641 // =========================================================================
2642 // STEP 1: Get read/write access to all necessary field arrays
2643 // =========================================================================
2644 Cmpnts ***velocity_cartesian; // Cartesian velocity (modified)
2645 Cmpnts ***velocity_contravariant; // Contravariant velocity (set to zero at walls)
2646 Cmpnts ***velocity_boundary; // Boundary condition velocity (kept at zero)
2647 Cmpnts ***csi, ***eta, ***zet; // Metric tensor components (face normals)
2648 PetscReal ***node_vertex_flag; // Fluid/solid indicator (0=fluid, 1=solid)
2649 PetscReal ***cell_jacobian; // Grid Jacobian (1/volume)
2650 PetscReal ***friction_velocity; // u_tau (friction velocity field)
2651
2652 ierr = DMDAVecGetArray(user->fda, user->Ucat, &velocity_cartesian); CHKERRQ(ierr);
2653 ierr = DMDAVecGetArray(user->fda, user->Ucont, &velocity_contravariant); CHKERRQ(ierr);
2654 ierr = DMDAVecGetArray(user->fda, user->Bcs.Ubcs, &velocity_boundary); CHKERRQ(ierr);
2655 ierr = DMDAVecGetArrayRead(user->fda, user->lCsi, (const Cmpnts***)&csi); CHKERRQ(ierr);
2656 ierr = DMDAVecGetArrayRead(user->fda, user->lEta, (const Cmpnts***)&eta); CHKERRQ(ierr);
2657 ierr = DMDAVecGetArrayRead(user->fda, user->lZet, (const Cmpnts***)&zet); CHKERRQ(ierr);
2658 ierr = DMDAVecGetArrayRead(user->da, user->lNvert, (const PetscReal***)&node_vertex_flag); CHKERRQ(ierr);
2659 ierr = DMDAVecGetArrayRead(user->da, user->lAj, (const PetscReal***)&cell_jacobian); CHKERRQ(ierr);
2660 ierr = DMDAVecGetArray(user->da, user->lFriction_Velocity, &friction_velocity); CHKERRQ(ierr);
2661
2662 // =========================================================================
2663 // STEP 2: Define loop bounds (owned portion of the grid for this MPI rank)
2664 // =========================================================================
2665 PetscInt grid_start_i = info->xs, grid_end_i = info->xs + info->xm;
2666 PetscInt grid_start_j = info->ys, grid_end_j = info->ys + info->ym;
2667 PetscInt grid_start_k = info->zs, grid_end_k = info->zs + info->zm;
2668 PetscInt grid_size_i = info->mx, grid_size_j = info->my, grid_size_k = info->mz;
2669
2670 // Shrunken loop bounds: exclude domain edges and corners to avoid double-counting
2671 PetscInt loop_start_i = grid_start_i, loop_end_i = grid_end_i;
2672 PetscInt loop_start_j = grid_start_j, loop_end_j = grid_end_j;
2673 PetscInt loop_start_k = grid_start_k, loop_end_k = grid_end_k;
2674
2675 if (grid_start_i == 0) loop_start_i = grid_start_i + 1;
2676 if (grid_end_i == grid_size_i) loop_end_i = grid_end_i - 1;
2677 if (grid_start_j == 0) loop_start_j = grid_start_j + 1;
2678 if (grid_end_j == grid_size_j) loop_end_j = grid_end_j - 1;
2679 if (grid_start_k == 0) loop_start_k = grid_start_k + 1;
2680 if (grid_end_k == grid_size_k) loop_end_k = grid_end_k - 1;
2681
2682 // Wall roughness parameter (currently smooth wall)
2683 const PetscReal wall_roughness_height = 1.e-16;
2684
2685 // =========================================================================
2686 // STEP 3: Process each of the 6 domain faces
2687 // =========================================================================
2688 for (int face_index = 0; face_index < 6; face_index++) {
2689 BCFace current_face_id = (BCFace)face_index;
2690 BoundaryFaceConfig *face_config = &user->boundary_faces[current_face_id];
2691
2692 // Only process faces that are mathematical walls (applies to no-slip, moving, slip, etc.)
2693 if (face_config->mathematical_type != WALL) {
2694 continue;
2695 }
2696
2697 // Check if this MPI rank owns part of this face
2698 PetscBool rank_owns_this_face;
2699 ierr = CanRankServiceFace(info, user->IM, user->JM, user->KM,
2700 current_face_id, &rank_owns_this_face); CHKERRQ(ierr);
2701
2702 if (!rank_owns_this_face) {
2703 continue;
2704 }
2705
2706 LOG_ALLOW(LOCAL, LOG_TRACE, "Processing Face %d (%s)\n",
2707 current_face_id, BCFaceToString(current_face_id));
2708
2709 // =====================================================================
2710 // Process each face with appropriate indexing
2711 // =====================================================================
2712 switch(current_face_id) {
2713
2714 // =================================================================
2715 // NEGATIVE X FACE (i = 0, normal points in +X direction)
2716 // =================================================================
2717 case BC_FACE_NEG_X: {
2718 if (grid_start_i == 0) {
2719 const PetscInt ghost_cell_index = grid_start_i;
2720 const PetscInt first_interior_cell = grid_start_i + 1;
2721 const PetscInt second_interior_cell = grid_start_i + 2;
2722
2723 for (PetscInt k = loop_start_k; k < loop_end_k; k++) {
2724 for (PetscInt j = loop_start_j; j < loop_end_j; j++) {
2725
2726 // Skip if this is a solid cell (embedded boundary)
2727 if (node_vertex_flag[k][j][first_interior_cell] < 0.1) {
2728
2729 // Calculate face area from contravariant metric tensor
2730 PetscReal face_area = sqrt(
2731 csi[k][j][ghost_cell_index].x * csi[k][j][ghost_cell_index].x +
2732 csi[k][j][ghost_cell_index].y * csi[k][j][ghost_cell_index].y +
2733 csi[k][j][ghost_cell_index].z * csi[k][j][ghost_cell_index].z
2734 );
2735
2736 // Compute wall-normal distances using cell Jacobians
2737 // sb = distance from wall to first interior cell center
2738 // sc = distance from wall to second interior cell center
2739 PetscReal distance_to_first_cell = 0.5 / cell_jacobian[k][j][first_interior_cell] / face_area;
2740 PetscReal distance_to_second_cell = 2.0 * distance_to_first_cell +
2741 0.5 / cell_jacobian[k][j][second_interior_cell] / face_area;
2742
2743 // Compute unit normal vector pointing INTO the domain
2744 PetscReal wall_normal[3];
2745 wall_normal[0] = csi[k][j][ghost_cell_index].x / face_area;
2746 wall_normal[1] = csi[k][j][ghost_cell_index].y / face_area;
2747 wall_normal[2] = csi[k][j][ghost_cell_index].z / face_area;
2748
2749 // Define velocities for wall function calculation
2750 Cmpnts wall_velocity; // Ua = velocity at wall (zero for stationary wall)
2751 Cmpnts reference_velocity; // Uc = velocity at second interior cell
2752
2753 wall_velocity.x = wall_velocity.y = wall_velocity.z = 0.0;
2754 reference_velocity = velocity_cartesian[k][j][second_interior_cell];
2755
2756 // Step 1: Linear interpolation (provides initial guess)
2757 noslip(user, distance_to_second_cell, distance_to_first_cell,
2758 wall_velocity, reference_velocity,
2759 &velocity_cartesian[k][j][first_interior_cell],
2760 wall_normal[0], wall_normal[1], wall_normal[2]);
2761
2762 // Step 2: Apply log-law correction (improves near-wall velocity)
2763 wall_function_loglaw(user, wall_roughness_height,
2764 distance_to_second_cell, distance_to_first_cell,
2765 wall_velocity, reference_velocity,
2766 &velocity_cartesian[k][j][first_interior_cell],
2767 &friction_velocity[k][j][first_interior_cell],
2768 wall_normal[0], wall_normal[1], wall_normal[2]);
2769
2770 // Ensure ghost cell BC remains zero (required for proper extrapolation)
2771 velocity_boundary[k][j][ghost_cell_index].x = 0.0;
2772 velocity_boundary[k][j][ghost_cell_index].y = 0.0;
2773 velocity_boundary[k][j][ghost_cell_index].z = 0.0;
2774 velocity_contravariant[k][j][ghost_cell_index].x = 0.0;
2775 }
2776 }
2777 }
2778 }
2779 } break;
2780
2781 // =================================================================
2782 // POSITIVE X FACE (i = mx-1, normal points in -X direction)
2783 // =================================================================
2784 case BC_FACE_POS_X: {
2785 if (grid_end_i == grid_size_i) {
2786 const PetscInt ghost_cell_index = grid_end_i - 1;
2787 const PetscInt first_interior_cell = grid_end_i - 2;
2788 const PetscInt second_interior_cell = grid_end_i - 3;
2789
2790 for (PetscInt k = loop_start_k; k < loop_end_k; k++) {
2791 for (PetscInt j = loop_start_j; j < loop_end_j; j++) {
2792
2793 if (node_vertex_flag[k][j][first_interior_cell] < 0.1) {
2794
2795 PetscReal face_area = sqrt(
2796 csi[k][j][first_interior_cell].x * csi[k][j][first_interior_cell].x +
2797 csi[k][j][first_interior_cell].y * csi[k][j][first_interior_cell].y +
2798 csi[k][j][first_interior_cell].z * csi[k][j][first_interior_cell].z
2799 );
2800
2801 PetscReal distance_to_first_cell = 0.5 / cell_jacobian[k][j][first_interior_cell] / face_area;
2802 PetscReal distance_to_second_cell = 2.0 * distance_to_first_cell +
2803 0.5 / cell_jacobian[k][j][second_interior_cell] / face_area;
2804
2805 // Note: Normal flipped for +X face to point INTO domain
2806 PetscReal wall_normal[3];
2807 wall_normal[0] = -csi[k][j][first_interior_cell].x / face_area;
2808 wall_normal[1] = -csi[k][j][first_interior_cell].y / face_area;
2809 wall_normal[2] = -csi[k][j][first_interior_cell].z / face_area;
2810
2811 Cmpnts wall_velocity, reference_velocity;
2812 wall_velocity.x = wall_velocity.y = wall_velocity.z = 0.0;
2813 reference_velocity = velocity_cartesian[k][j][second_interior_cell];
2814
2815 noslip(user, distance_to_second_cell, distance_to_first_cell,
2816 wall_velocity, reference_velocity,
2817 &velocity_cartesian[k][j][first_interior_cell],
2818 wall_normal[0], wall_normal[1], wall_normal[2]);
2819
2820 wall_function_loglaw(user, wall_roughness_height,
2821 distance_to_second_cell, distance_to_first_cell,
2822 wall_velocity, reference_velocity,
2823 &velocity_cartesian[k][j][first_interior_cell],
2824 &friction_velocity[k][j][first_interior_cell],
2825 wall_normal[0], wall_normal[1], wall_normal[2]);
2826
2827 velocity_boundary[k][j][ghost_cell_index].x = 0.0;
2828 velocity_boundary[k][j][ghost_cell_index].y = 0.0;
2829 velocity_boundary[k][j][ghost_cell_index].z = 0.0;
2830 velocity_contravariant[k][j][first_interior_cell].x = 0.0;
2831 }
2832 }
2833 }
2834 }
2835 } break;
2836
2837 // =================================================================
2838 // NEGATIVE Y FACE (j = 0, normal points in +Y direction)
2839 // =================================================================
2840 case BC_FACE_NEG_Y: {
2841 if (grid_start_j == 0) {
2842 const PetscInt ghost_cell_index = grid_start_j;
2843 const PetscInt first_interior_cell = grid_start_j + 1;
2844 const PetscInt second_interior_cell = grid_start_j + 2;
2845
2846 for (PetscInt k = loop_start_k; k < loop_end_k; k++) {
2847 for (PetscInt i = loop_start_i; i < loop_end_i; i++) {
2848
2849 if (node_vertex_flag[k][first_interior_cell][i] < 0.1) {
2850
2851 PetscReal face_area = sqrt(
2852 eta[k][ghost_cell_index][i].x * eta[k][ghost_cell_index][i].x +
2853 eta[k][ghost_cell_index][i].y * eta[k][ghost_cell_index][i].y +
2854 eta[k][ghost_cell_index][i].z * eta[k][ghost_cell_index][i].z
2855 );
2856
2857 PetscReal distance_to_first_cell = 0.5 / cell_jacobian[k][first_interior_cell][i] / face_area;
2858 PetscReal distance_to_second_cell = 2.0 * distance_to_first_cell +
2859 0.5 / cell_jacobian[k][second_interior_cell][i] / face_area;
2860
2861 PetscReal wall_normal[3];
2862 wall_normal[0] = eta[k][ghost_cell_index][i].x / face_area;
2863 wall_normal[1] = eta[k][ghost_cell_index][i].y / face_area;
2864 wall_normal[2] = eta[k][ghost_cell_index][i].z / face_area;
2865
2866 Cmpnts wall_velocity, reference_velocity;
2867 wall_velocity.x = wall_velocity.y = wall_velocity.z = 0.0;
2868 reference_velocity = velocity_cartesian[k][second_interior_cell][i];
2869
2870 noslip(user, distance_to_second_cell, distance_to_first_cell,
2871 wall_velocity, reference_velocity,
2872 &velocity_cartesian[k][first_interior_cell][i],
2873 wall_normal[0], wall_normal[1], wall_normal[2]);
2874
2875 wall_function_loglaw(user, wall_roughness_height,
2876 distance_to_second_cell, distance_to_first_cell,
2877 wall_velocity, reference_velocity,
2878 &velocity_cartesian[k][first_interior_cell][i],
2879 &friction_velocity[k][first_interior_cell][i],
2880 wall_normal[0], wall_normal[1], wall_normal[2]);
2881
2882 velocity_boundary[k][ghost_cell_index][i].x = 0.0;
2883 velocity_boundary[k][ghost_cell_index][i].y = 0.0;
2884 velocity_boundary[k][ghost_cell_index][i].z = 0.0;
2885 velocity_contravariant[k][ghost_cell_index][i].y = 0.0;
2886 }
2887 }
2888 }
2889 }
2890 } break;
2891
2892 // =================================================================
2893 // POSITIVE Y FACE (j = my-1, normal points in -Y direction)
2894 // =================================================================
2895 case BC_FACE_POS_Y: {
2896 if (grid_end_j == grid_size_j) {
2897 const PetscInt ghost_cell_index = grid_end_j - 1;
2898 const PetscInt first_interior_cell = grid_end_j - 2;
2899 const PetscInt second_interior_cell = grid_end_j - 3;
2900
2901 for (PetscInt k = loop_start_k; k < loop_end_k; k++) {
2902 for (PetscInt i = loop_start_i; i < loop_end_i; i++) {
2903
2904 if (node_vertex_flag[k][first_interior_cell][i] < 0.1) {
2905
2906 PetscReal face_area = sqrt(
2907 eta[k][first_interior_cell][i].x * eta[k][first_interior_cell][i].x +
2908 eta[k][first_interior_cell][i].y * eta[k][first_interior_cell][i].y +
2909 eta[k][first_interior_cell][i].z * eta[k][first_interior_cell][i].z
2910 );
2911
2912 PetscReal distance_to_first_cell = 0.5 / cell_jacobian[k][first_interior_cell][i] / face_area;
2913 PetscReal distance_to_second_cell = 2.0 * distance_to_first_cell +
2914 0.5 / cell_jacobian[k][second_interior_cell][i] / face_area;
2915
2916 PetscReal wall_normal[3];
2917 wall_normal[0] = -eta[k][first_interior_cell][i].x / face_area;
2918 wall_normal[1] = -eta[k][first_interior_cell][i].y / face_area;
2919 wall_normal[2] = -eta[k][first_interior_cell][i].z / face_area;
2920
2921 Cmpnts wall_velocity, reference_velocity;
2922 wall_velocity.x = wall_velocity.y = wall_velocity.z = 0.0;
2923 reference_velocity = velocity_cartesian[k][second_interior_cell][i];
2924
2925 noslip(user, distance_to_second_cell, distance_to_first_cell,
2926 wall_velocity, reference_velocity,
2927 &velocity_cartesian[k][first_interior_cell][i],
2928 wall_normal[0], wall_normal[1], wall_normal[2]);
2929
2930 wall_function_loglaw(user, wall_roughness_height,
2931 distance_to_second_cell, distance_to_first_cell,
2932 wall_velocity, reference_velocity,
2933 &velocity_cartesian[k][first_interior_cell][i],
2934 &friction_velocity[k][first_interior_cell][i],
2935 wall_normal[0], wall_normal[1], wall_normal[2]);
2936
2937 velocity_boundary[k][ghost_cell_index][i].x = 0.0;
2938 velocity_boundary[k][ghost_cell_index][i].y = 0.0;
2939 velocity_boundary[k][ghost_cell_index][i].z = 0.0;
2940 velocity_contravariant[k][first_interior_cell][i].y = 0.0;
2941 }
2942 }
2943 }
2944 }
2945 } break;
2946
2947 // =================================================================
2948 // NEGATIVE Z FACE (k = 0, normal points in +Z direction)
2949 // =================================================================
2950 case BC_FACE_NEG_Z: {
2951 if (grid_start_k == 0) {
2952 const PetscInt ghost_cell_index = grid_start_k;
2953 const PetscInt first_interior_cell = grid_start_k + 1;
2954 const PetscInt second_interior_cell = grid_start_k + 2;
2955
2956 for (PetscInt j = loop_start_j; j < loop_end_j; j++) {
2957 for (PetscInt i = loop_start_i; i < loop_end_i; i++) {
2958
2959 if (node_vertex_flag[first_interior_cell][j][i] < 0.1) {
2960
2961 PetscReal face_area = sqrt(
2962 zet[ghost_cell_index][j][i].x * zet[ghost_cell_index][j][i].x +
2963 zet[ghost_cell_index][j][i].y * zet[ghost_cell_index][j][i].y +
2964 zet[ghost_cell_index][j][i].z * zet[ghost_cell_index][j][i].z
2965 );
2966
2967 PetscReal distance_to_first_cell = 0.5 / cell_jacobian[first_interior_cell][j][i] / face_area;
2968 PetscReal distance_to_second_cell = 2.0 * distance_to_first_cell +
2969 0.5 / cell_jacobian[second_interior_cell][j][i] / face_area;
2970
2971 PetscReal wall_normal[3];
2972 wall_normal[0] = zet[ghost_cell_index][j][i].x / face_area;
2973 wall_normal[1] = zet[ghost_cell_index][j][i].y / face_area;
2974 wall_normal[2] = zet[ghost_cell_index][j][i].z / face_area;
2975
2976 Cmpnts wall_velocity, reference_velocity;
2977 wall_velocity.x = wall_velocity.y = wall_velocity.z = 0.0;
2978 reference_velocity = velocity_cartesian[second_interior_cell][j][i];
2979
2980 noslip(user, distance_to_second_cell, distance_to_first_cell,
2981 wall_velocity, reference_velocity,
2982 &velocity_cartesian[first_interior_cell][j][i],
2983 wall_normal[0], wall_normal[1], wall_normal[2]);
2984
2985 wall_function_loglaw(user, wall_roughness_height,
2986 distance_to_second_cell, distance_to_first_cell,
2987 wall_velocity, reference_velocity,
2988 &velocity_cartesian[first_interior_cell][j][i],
2989 &friction_velocity[first_interior_cell][j][i],
2990 wall_normal[0], wall_normal[1], wall_normal[2]);
2991
2992 velocity_boundary[ghost_cell_index][j][i].x = 0.0;
2993 velocity_boundary[ghost_cell_index][j][i].y = 0.0;
2994 velocity_boundary[ghost_cell_index][j][i].z = 0.0;
2995 velocity_contravariant[ghost_cell_index][j][i].z = 0.0;
2996 }
2997 }
2998 }
2999 }
3000 } break;
3001
3002 // =================================================================
3003 // POSITIVE Z FACE (k = mz-1, normal points in -Z direction)
3004 // =================================================================
3005 case BC_FACE_POS_Z: {
3006 if (grid_end_k == grid_size_k) {
3007 const PetscInt ghost_cell_index = grid_end_k - 1;
3008 const PetscInt first_interior_cell = grid_end_k - 2;
3009 const PetscInt second_interior_cell = grid_end_k - 3;
3010
3011 for (PetscInt j = loop_start_j; j < loop_end_j; j++) {
3012 for (PetscInt i = loop_start_i; i < loop_end_i; i++) {
3013
3014 if (node_vertex_flag[first_interior_cell][j][i] < 0.1) {
3015
3016 PetscReal face_area = sqrt(
3017 zet[first_interior_cell][j][i].x * zet[first_interior_cell][j][i].x +
3018 zet[first_interior_cell][j][i].y * zet[first_interior_cell][j][i].y +
3019 zet[first_interior_cell][j][i].z * zet[first_interior_cell][j][i].z
3020 );
3021
3022 PetscReal distance_to_first_cell = 0.5 / cell_jacobian[first_interior_cell][j][i] / face_area;
3023 PetscReal distance_to_second_cell = 2.0 * distance_to_first_cell +
3024 0.5 / cell_jacobian[second_interior_cell][j][i] / face_area;
3025
3026 PetscReal wall_normal[3];
3027 wall_normal[0] = -zet[first_interior_cell][j][i].x / face_area;
3028 wall_normal[1] = -zet[first_interior_cell][j][i].y / face_area;
3029 wall_normal[2] = -zet[first_interior_cell][j][i].z / face_area;
3030
3031 Cmpnts wall_velocity, reference_velocity;
3032 wall_velocity.x = wall_velocity.y = wall_velocity.z = 0.0;
3033 reference_velocity = velocity_cartesian[second_interior_cell][j][i];
3034
3035 noslip(user, distance_to_second_cell, distance_to_first_cell,
3036 wall_velocity, reference_velocity,
3037 &velocity_cartesian[first_interior_cell][j][i],
3038 wall_normal[0], wall_normal[1], wall_normal[2]);
3039
3040 wall_function_loglaw(user, wall_roughness_height,
3041 distance_to_second_cell, distance_to_first_cell,
3042 wall_velocity, reference_velocity,
3043 &velocity_cartesian[first_interior_cell][j][i],
3044 &friction_velocity[first_interior_cell][j][i],
3045 wall_normal[0], wall_normal[1], wall_normal[2]);
3046
3047 velocity_boundary[ghost_cell_index][j][i].x = 0.0;
3048 velocity_boundary[ghost_cell_index][j][i].y = 0.0;
3049 velocity_boundary[ghost_cell_index][j][i].z = 0.0;
3050 velocity_contravariant[first_interior_cell][j][i].z = 0.0;
3051 }
3052 }
3053 }
3054 }
3055 } break;
3056 }
3057 }
3058
3059 // =========================================================================
3060 // STEP 4: Restore all arrays and release memory
3061 // =========================================================================
3062 ierr = DMDAVecRestoreArray(user->fda, user->Ucat, &velocity_cartesian); CHKERRQ(ierr);
3063 ierr = DMDAVecRestoreArray(user->fda, user->Ucont, &velocity_contravariant); CHKERRQ(ierr);
3064 ierr = DMDAVecRestoreArray(user->fda, user->Bcs.Ubcs, &velocity_boundary); CHKERRQ(ierr);
3065 ierr = DMDAVecRestoreArrayRead(user->fda, user->lCsi, (const Cmpnts***)&csi); CHKERRQ(ierr);
3066 ierr = DMDAVecRestoreArrayRead(user->fda, user->lEta, (const Cmpnts***)&eta); CHKERRQ(ierr);
3067 ierr = DMDAVecRestoreArrayRead(user->fda, user->lZet, (const Cmpnts***)&zet); CHKERRQ(ierr);
3068 ierr = DMDAVecRestoreArrayRead(user->da, user->lNvert, (const PetscReal***)&node_vertex_flag); CHKERRQ(ierr);
3069 ierr = DMDAVecRestoreArrayRead(user->da, user->lAj, (const PetscReal***)&cell_jacobian); CHKERRQ(ierr);
3070 ierr = DMDAVecRestoreArray(user->da, user->lFriction_Velocity, &friction_velocity); CHKERRQ(ierr);
3071
3072 LOG_ALLOW(LOCAL, LOG_DEBUG, "Complete.\n");
3073
3074 PetscFunctionReturn(0);
3075}
PetscErrorCode CanRankServiceFace(const DMDALocalInfo *info, PetscInt IM_nodes_global, PetscInt JM_nodes_global, PetscInt KM_nodes_global, BCFace face_id, PetscBool *can_service_out)
Determines if the current MPI rank owns any part of a specified global face.
Definition Boundaries.c:151
Vec lFriction_Velocity
Definition variables.h:750
@ WALL
Definition variables.h:213
PetscInt KM
Definition variables.h:737
PetscInt JM
Definition variables.h:737
PetscInt wallfunction
Definition variables.h:670
PetscInt IM
Definition variables.h:737
void wall_function_loglaw(UserCtx *user, double roughness_height, double distance_reference, double distance_boundary, Cmpnts velocity_wall, Cmpnts velocity_reference, Cmpnts *velocity_boundary, PetscReal *friction_velocity, double normal_x, double normal_y, double normal_z)
Applies log-law wall function with roughness correction.
void noslip(UserCtx *user, double distance_reference, double distance_boundary, Cmpnts velocity_wall, Cmpnts velocity_reference, Cmpnts *velocity_boundary, double normal_x, double normal_y, double normal_z)
Applies no-slip wall boundary condition with linear interpolation.
Here is the call graph for this function:
Here is the caller graph for this function:

◆ RefreshBoundaryGhostCells()

PetscErrorCode RefreshBoundaryGhostCells ( UserCtx user)

(Public) Orchestrates the "light" refresh of all boundary ghost cells after the projection step.

This function is the correct and complete replacement for the role that GhostNodeVelocity played when called from within the Projection function. Its purpose is to ensure that all ghost cells for ucat and p are made consistent with the final, divergence-free interior velocity field computed by the projection step.

This function is fundamentally different from ApplyBoundaryConditions because it does NOT modify the physical flux field (ucont) and does NOT apply physical models like the wall function. It is a purely geometric and data-consistency operation.

WORKFLOW:

  1. Calls the lightweight BoundarySystem_RefreshUbcs() engine. This re-calculates the ubcs target values ONLY for flow-dependent boundary conditions (like Symmetry or Outlets) using the newly updated interior ucat field.
  2. Calls the geometric updaters (ApplyPeriodicBCs, UpdateDummyCells, UpdateCornerNodes) in the correct, dependency-aware order to fill in all ghost cell values based on the now fully-refreshed ubcs targets.
  3. Performs a final synchronization of local PETSc vectors to ensure all MPI ranks are consistent before proceeding to the next time step.
Parameters
userThe main UserCtx struct, containing all simulation state.
Returns
PetscErrorCode 0 on success.

Definition at line 3106 of file Boundaries.c.

3107{
3108 PetscErrorCode ierr;
3109 PetscFunctionBeginUser;
3111
3112 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Starting post-projection refresh of boundary ghost cells.\n");
3113
3114 // -------------------------------------------------------------------------
3115 // STEP 1: Refresh Flow-Dependent Boundary Value Targets (`ubcs`)
3116 // -------------------------------------------------------------------------
3117 // This is the "physics" part of the refresh. It calls the lightweight engine
3118 // to update `ubcs` for any BCs (like Symmetry) that depend on the now-corrected
3119 // interior velocity field. This step does NOT touch `ucont`.
3120 ierr = BoundarySystem_RefreshUbcs(user); CHKERRQ(ierr);
3121 LOG_ALLOW(GLOBAL, LOG_VERBOSE, " `ubcs` targets refreshed.\n");
3122
3123 ierr = UpdateLocalGhosts(user,"Ucat");
3124
3125 // STEP 1.5: Apply Wall function if applicable.
3126 if(user->simCtx->wallfunction){
3127
3128 ierr = ApplyWallFunction(user); CHKERRQ(ierr);
3129
3130 }
3131 // -------------------------------------------------------------------------
3132 // STEP 2: Apply Geometric Ghost Cell Updates
3133 // -------------------------------------------------------------------------
3134 // With `ubcs` now fully up-to-date, we execute the purely geometric
3135 // operations to fill the entire ghost cell layer. The order is important.
3136
3137 // (a) Update the ghost cells on the faces of non-periodic boundaries.
3138 ierr = UpdateDummyCells(user); CHKERRQ(ierr);
3139 LOG_ALLOW(GLOBAL, LOG_VERBOSE, " Face ghost cells (dummy cells) updated.\n");
3140
3141 // (b) Handle periodic boundaries first. This is a direct data copy.
3142 // Ghost Update is done inside this function.s
3143 ierr = ApplyPeriodicBCs(user); CHKERRQ(ierr);
3144 LOG_ALLOW(GLOBAL, LOG_VERBOSE, " Periodic boundaries synchronized.\n");
3145
3146 // (c) Update the ghost cells at the edges and corners by averaging.
3147 ierr = UpdateCornerNodes(user); CHKERRQ(ierr);
3148 LOG_ALLOW(GLOBAL, LOG_VERBOSE, " Edge and corner ghost cells updated.\n");
3149
3150 // (d) Synchronize Ucat after setting corner nodes.
3151 ierr = UpdateLocalGhosts(user, "Ucat"); CHKERRQ(ierr);
3152
3153 // (e) Update the corners for periodic conditions sequentially
3154 // ensuring no race conditions are raised.
3155 const char* ucat_only[] = {"Ucat"};
3156 ierr = UpdatePeriodicCornerNodes(user,1,ucat_only);CHKERRQ(ierr);
3157
3158 LOG_ALLOW(GLOBAL, LOG_DEBUG, "Boundary ghost cell refresh complete.\n");
3160 PetscFunctionReturn(0);
3161}
PetscErrorCode ApplyPeriodicBCs(UserCtx *user)
Applies periodic boundary conditions by copying data across domain boundaries for all relevant fields...
PetscErrorCode ApplyWallFunction(UserCtx *user)
Applies wall function modeling to near-wall velocities for all wall-type boundaries.
PetscErrorCode UpdateDummyCells(UserCtx *user)
Updates the dummy cells (ghost nodes) on the faces of the local domain for NON-PERIODIC boundaries.
PetscErrorCode BoundarySystem_RefreshUbcs(UserCtx *user)
(Private) A lightweight execution engine that calls the UpdateUbcs() method on all relevant handlers.
PetscErrorCode UpdatePeriodicCornerNodes(UserCtx *user, PetscInt num_fields, const char *field_names[])
(Orchestrator) Performs a sequential, deterministic periodic update for a list of fields.
PetscErrorCode UpdateCornerNodes(UserCtx *user)
Updates the corner and edge ghost nodes of the local domain by averaging.
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ApplyBoundaryConditions()

PetscErrorCode ApplyBoundaryConditions ( UserCtx user)

Main master function to apply all boundary conditions for a time step.

This function orchestrates the entire boundary condition workflow in a specific, dependency-aware order:

  1. Iterate Non-Periodic BCs: It then enters an iterative loop to solve for the non-periodic boundary conditions. This allows complex, coupled conditions (like mass-conserving outlets that depend on inlet fluxes) to converge.
    • BoundarySystem_ExecuteStep calculates fluxes and sets boundary values (ubcs).
    • UpdateDummyCells uses these values to update the ghost cells.
  2. Apply Periodic BCs: First, it handles all periodic boundaries. This is a direct, non-iterative data copy that establishes the "wrap-around" state for all relevant fields. This provides a fixed constraint for the subsequent steps.
  3. Update Corner Nodes: After the loop, UpdateCornerNodes is called to resolve the values at ghost cell edges and corners, using the now-final values from both the periodic and non-periodic faces.
  4. Final Ghost Synchronization: Finally, it performs a global-to-local update on all key fields to ensure that every processor's ghost-cell data is fully consistent before the main solver proceeds.
Parameters
userThe main UserCtx struct containing the complete simulation state.
Returns
PetscErrorCode 0 on success.

Definition at line 3192 of file Boundaries.c.

3193{
3194 PetscErrorCode ierr;
3195 PetscFunctionBeginUser;
3197
3198 LOG_ALLOW(GLOBAL,LOG_TRACE,"Boundary Condition Application begins.\n");
3199
3200 // STEP 1: Main iteration loop for applying and converging non-periodic BCs.
3201 // The number of iterations (e.g., 3) allows information to propagate
3202 // between coupled boundaries, like an inlet and a conserving outlet.
3203 for (PetscInt iter = 0; iter < 3; iter++) {
3204 // (a) Execute the boundary system. This phase calculates fluxes across
3205 // the domain and then applies the physical logic for each non-periodic
3206 // handler, setting the `ubcs` (boundary value) array.
3207 ierr = BoundarySystem_ExecuteStep(user); CHKERRQ(ierr);
3208
3209 LOG_ALLOW(GLOBAL,LOG_VERBOSE,"Boundary Condition Setup Executed.\n");
3210
3211 // (b) Synchronize the updated ghost cells across all processors to ensure
3212 // all ucont values are current before updating the dummy cells.
3213 ierr = UpdateLocalGhosts(user, "Ucont"); CHKERRQ(ierr);
3214
3215 // (c) Convert updated Contravariant velocities to Cartesian velocities.
3216 ierr = Contra2Cart(user); CHKERRQ(ierr);
3217
3218 // (d) Synchronize the updated Cartesian velocities across all processors
3219 // to ensure all ucat values are current before updating the dummy cells.
3220 ierr = UpdateLocalGhosts(user, "Ucat"); CHKERRQ(ierr);
3221
3222 // (e) If Wall functions are enabled, apply them now to adjust near-wall velocities.
3223 if(user->simCtx->wallfunction){
3224 // Apply wall function adjustments to the boundary velocities.
3225 ierr = ApplyWallFunction(user); CHKERRQ(ierr);
3226
3227 // Synchronize the updated Cartesian velocities after wall function adjustments.
3228 ierr = UpdateLocalGhosts(user, "Ucat"); CHKERRQ(ierr);
3229
3230 LOG_ALLOW(GLOBAL,LOG_VERBOSE,"Wall Function Applied at Walls.\n");
3231 }
3232
3233 // (f) Update the first layer of ghost cells for non-periodic faces using
3234 // the newly computed `ubcs` values.
3235 ierr = UpdateDummyCells(user); CHKERRQ(ierr);
3236
3237 LOG_ALLOW(GLOBAL,LOG_VERBOSE,"Dummy Cells/Ghost Cells Updated.\n");
3238
3239 // (g) Handle all periodic boundaries. This is a parallel direct copy
3240 // that sets the absolute constraints for the rest of the solve.
3241 // There is a Ghost update happening inside this function.
3242 ierr = ApplyPeriodicBCs(user); CHKERRQ(ierr);
3243
3244 // (h) Update the corner and edge ghost nodes. This routine calculates
3245 // values for corners/edges by averaging their neighbors, which have been
3246 // finalized in the steps above (both periodic and non-periodic).
3247 ierr = UpdateCornerNodes(user); CHKERRQ(ierr);
3248
3249 // (i) Synchronize the updated edge and corner cells across all processors to ensure
3250 // consistency before the next iteration or finalization.
3251 ierr = UpdateLocalGhosts(user, "P"); CHKERRQ(ierr);
3252 ierr = UpdateLocalGhosts(user, "Ucat"); CHKERRQ(ierr);
3253 ierr = UpdateLocalGhosts(user, "Ucont"); CHKERRQ(ierr);
3254
3255 // (j) Ensure All the corners are synchronized with a well defined protocol in case of Periodic boundary conditions
3256 // To avoid race conditions.
3257 const char* all_fields[] = {"Ucat", "P", "Nvert"};
3258 ierr = UpdatePeriodicCornerNodes(user,3,all_fields); CHKERRQ(ierr);
3259
3260 }
3261
3262 // STEP 3: Final ghost node synchronization. This ensures all changes made
3263 // to the global vectors are reflected in the local ghost regions of all
3264 // processors, making the state fully consistent before the next solver stage.
3265 ierr = UpdateLocalGhosts(user, "P"); CHKERRQ(ierr);
3266 ierr = UpdateLocalGhosts(user, "Ucat"); CHKERRQ(ierr);
3267 ierr = UpdateLocalGhosts(user, "Ucont"); CHKERRQ(ierr);
3268
3270 PetscFunctionReturn(0);
3271}
PetscErrorCode BoundarySystem_ExecuteStep(UserCtx *user)
Executes all boundary condition handlers in priority order.
PetscErrorCode Contra2Cart(UserCtx *user)
Reconstructs Cartesian velocity (Ucat) at cell centers from contravariant velocity (Ucont) defined on...
Definition setup.c:2177
Here is the call graph for this function:
Here is the caller graph for this function: