PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
postprocessing_kernels.c
Go to the documentation of this file.
2
3/**
4 * @brief Computes node-centered data by averaging 8 surrounding cell-centered values,
5 * exactly replicating the legacy code's indexing and boundary handling.
6 *
7 * This kernel uses a stencil that averages the 8 cells from the corner (i,j,k) to
8 * (i+1, j+1, k+1) and stores the result at the output node (i,j,k). This matches
9 * the legacy code's behavior. It operates on the full range of output nodes necessary
10 * for the subsampled grid, preventing zero-padding at the boundaries.
11 *
12 * @param user The UserCtx, providing access to DMs and Vecs.
13 * @param in_field_name The string name of the source Vec (e.g., "P", "Ucat").
14 * @param out_field_name The string name of the destination Vec (e.g., "P_nodal").
15 * @return PetscErrorCode
16 */
17PetscErrorCode ComputeNodalAverage(UserCtx* user, const char* in_field_name, const char* out_field_name)
18{
19 PetscErrorCode ierr;
20 Vec in_vec_local = NULL, out_vec_global = NULL;
21 DM dm_in = NULL, dm_out = NULL;
22 PetscInt dof = 0;
23
24 PetscFunctionBeginUser;
25 LOG_ALLOW(GLOBAL, LOG_INFO, "-> KERNEL: Running ComputeNodalAverage on '%s' -> '%s'.\n", in_field_name, out_field_name);
26
27 // --- 1. Map string names to PETSc objects ---
28 if (strcasecmp(in_field_name, "P") == 0) { in_vec_local = user->lP; dm_in = user->da; dof = 1; }
29 else if (strcasecmp(in_field_name, "Ucat") == 0) { in_vec_local = user->lUcat; dm_in = user->fda; dof = 3; }
30 // ... (add other fields as needed) ...
31 else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG, "Unknown input field name for nodal averaging: %s", in_field_name);
32
33 if (strcasecmp(out_field_name, "P_nodal") == 0) { out_vec_global = user->P_nodal; dm_out = user->da; }
34 else if (strcasecmp(out_field_name, "Ucat_nodal") == 0) { out_vec_global = user->Ucat_nodal; dm_out = user->fda; }
35 // ... (add other fields as needed) ...
36 else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG, "Unknown output field name for nodal averaging: %s", out_field_name);
37
38 // --- 2. Ensure Input Data Ghosts are Up-to-Date ---
39 ierr = UpdateLocalGhosts(user, in_field_name); CHKERRQ(ierr);
40
41 // --- 3. Get DMDA info and array pointers ---
42 DMDALocalInfo info;
43 ierr = DMDAGetLocalInfo(dm_out, &info); CHKERRQ(ierr);
44
45 if (dof == 1) { // --- Scalar Field Averaging ---
46 const PetscReal ***l_in_arr;
47 PetscReal ***g_out_arr;
48 ierr = DMDAVecGetArrayRead(dm_in,in_vec_local, (void*)&l_in_arr); CHKERRQ(ierr);
49 ierr = DMDAVecGetArray(dm_out,out_vec_global, (void*)&g_out_arr); CHKERRQ(ierr);
50
51 // Loop over the output NODE locations. The loop bounds match the required
52 // size of the final subsampled grid.
53 for (PetscInt k = info.zs; k < info.zs + info.zm - 1; k++) {
54 for (PetscInt j = info.ys; j < info.ys + info.ym - 1; j++) {
55 for (PetscInt i = info.xs; i < info.xs + info.xm - 1; i++) {
56 g_out_arr[k][j][i] = 0.125 * (l_in_arr[k][j][i] + l_in_arr[k][j][i+1] +
57 l_in_arr[k][j+1][i] + l_in_arr[k][j+1][i+1] +
58 l_in_arr[k+1][j][i] + l_in_arr[k+1][j][i+1] +
59 l_in_arr[k+1][j+1][i] + l_in_arr[k+1][j+1][i+1]);
60 }
61 }
62 }
63 ierr = DMDAVecRestoreArrayRead(dm_in,in_vec_local, (void*)&l_in_arr); CHKERRQ(ierr);
64 ierr = DMDAVecRestoreArray(dm_out,out_vec_global, (void*)&g_out_arr); CHKERRQ(ierr);
65
66 } else if (dof == 3) { // --- Vector Field Averaging ---
67 const Cmpnts ***l_in_arr;
68 Cmpnts ***g_out_arr;
69 ierr = DMDAVecGetArrayRead(dm_in,in_vec_local, (void*)&l_in_arr); CHKERRQ(ierr);
70 ierr = DMDAVecGetArray(dm_out,out_vec_global, (void*)&g_out_arr); CHKERRQ(ierr);
71
72 for (PetscInt k = info.zs; k < info.zs + info.zm - 1; k++) {
73 for (PetscInt j = info.ys; j < info.ys + info.ym - 1; j++) {
74 for (PetscInt i = info.xs; i < info.xs + info.xm - 1; i++) {
75 g_out_arr[k][j][i].x = 0.125 * (l_in_arr[k][j][i].x + l_in_arr[k][j][i+1].x +
76 l_in_arr[k][j+1][i].x + l_in_arr[k][j+1][i+1].x +
77 l_in_arr[k+1][j][i].x + l_in_arr[k+1][j][i+1].x +
78 l_in_arr[k+1][j+1][i].x + l_in_arr[k+1][j+1][i+1].x);
79
80 g_out_arr[k][j][i].y = 0.125 * (l_in_arr[k][j][i].y + l_in_arr[k][j][i+1].y +
81 l_in_arr[k][j+1][i].y + l_in_arr[k][j+1][i+1].y +
82 l_in_arr[k+1][j][i].y + l_in_arr[k+1][j][i+1].y +
83 l_in_arr[k+1][j+1][i].y + l_in_arr[k+1][j+1][i+1].y);
84
85 g_out_arr[k][j][i].z = 0.125 * (l_in_arr[k][j][i].z + l_in_arr[k][j][i+1].z +
86 l_in_arr[k][j+1][i].z + l_in_arr[k][j+1][i+1].z +
87 l_in_arr[k+1][j][i].z + l_in_arr[k+1][j][i+1].z +
88 l_in_arr[k+1][j+1][i].z + l_in_arr[k+1][j+1][i+1].z);
89 }
90 }
91 }
92 ierr = DMDAVecRestoreArrayRead(dm_in,in_vec_local, (void*)&l_in_arr); CHKERRQ(ierr);
93 ierr = DMDAVecRestoreArray(dm_out,out_vec_global, (void*)&g_out_arr); CHKERRQ(ierr);
94 }
95 PetscFunctionReturn(0);
96}
97
98/**
99 * @brief Computes the Q-Criterion, a scalar value identifying vortex cores.
100 *
101 * This function is self-contained. It ensures all its required input data
102 * (Ucat and grid metrics) have up-to-date ghost values before proceeding with
103 * the calculation. The result is stored in the global vector user->Qcrit.
104 *
105 * @param user The UserCtx, providing access to all necessary data.
106 * @return PetscErrorCode
107 */
108PetscErrorCode ComputeQCriterion(UserCtx* user)
109{
110 PetscErrorCode ierr;
111 DMDALocalInfo info;
112 const Cmpnts ***lucat, ***lcsi, ***leta, ***lzet;
113 const PetscReal***laj, ***lnvert;
114 PetscReal ***gq;
115
116 PetscFunctionBeginUser;
117 LOG_ALLOW(GLOBAL, LOG_INFO, "-> KERNEL: Running ComputeQCriterion.\n");
118
119 // --- 1. Ensure all required ghost values are up-to-date ---
120 ierr = UpdateLocalGhosts(user, "Ucat"); CHKERRQ(ierr);
121 ierr = UpdateLocalGhosts(user, "Csi"); CHKERRQ(ierr);
122 ierr = UpdateLocalGhosts(user, "Eta"); CHKERRQ(ierr);
123 ierr = UpdateLocalGhosts(user, "Zet"); CHKERRQ(ierr);
124 ierr = UpdateLocalGhosts(user, "Aj"); CHKERRQ(ierr);
125 ierr = UpdateLocalGhosts(user, "Nvert"); CHKERRQ(ierr);
126
127 // --- 2. Get DMDA info and array pointers ---
128 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
129
130 ierr = DMDAVecGetArrayRead(user->fda, user->lUcat, (void*)&lucat); CHKERRQ(ierr);
131 ierr = DMDAVecGetArrayRead(user->fda, user->lCsi, (void*)&lcsi); CHKERRQ(ierr);
132 ierr = DMDAVecGetArrayRead(user->fda, user->lEta, (void*)&leta); CHKERRQ(ierr);
133 ierr = DMDAVecGetArrayRead(user->fda, user->lZet, (void*)&lzet); CHKERRQ(ierr);
134 ierr = DMDAVecGetArrayRead(user->da, user->lAj, (void*)&laj); CHKERRQ(ierr);
135 ierr = DMDAVecGetArrayRead(user->da, user->lNvert, (void*)&lnvert); CHKERRQ(ierr);
136 ierr = DMDAVecGetArray(user->da, user->Qcrit, (void*)&gq); CHKERRQ(ierr);
137
138 // --- 3. Define Loop Bounds for INTERIOR Cells ---
139 PetscInt i_start = (info.xs == 0) ? 1 : info.xs;
140 PetscInt i_end = (info.xs + info.xm == info.mx) ? info.mx - 1 : info.xs + info.xm;
141 PetscInt j_start = (info.ys == 0) ? 1 : info.ys;
142 PetscInt j_end = (info.ys + info.ym == info.my) ? info.my - 1 : info.ys + info.ym;
143 PetscInt k_start = (info.zs == 0) ? 1 : info.zs;
144 PetscInt k_end = (info.zs + info.zm == info.mz) ? info.mz - 1 : info.zs + info.zm;
145
146 // --- 4. Main Computation Loop ---
147 for (PetscInt k = k_start; k < k_end; k++) {
148 for (PetscInt j = j_start; j < j_end; j++) {
149 for (PetscInt i = i_start; i < i_end; i++) {
150
151 // Calculate velocity derivatives in computational space (central differences)
152 PetscReal uc = 0.5 * (lucat[k][j][i+1].x - lucat[k][j][i-1].x);
153 PetscReal vc = 0.5 * (lucat[k][j][i+1].y - lucat[k][j][i-1].y);
154 PetscReal wc = 0.5 * (lucat[k][j][i+1].z - lucat[k][j][i-1].z);
155
156 PetscReal ue = 0.5 * (lucat[k][j+1][i].x - lucat[k][j-1][i].x);
157 PetscReal ve = 0.5 * (lucat[k][j+1][i].y - lucat[k][j-1][i].y);
158 PetscReal we = 0.5 * (lucat[k][j+1][i].z - lucat[k][j-1][i].z);
159
160 PetscReal uz = 0.5 * (lucat[k+1][j][i].x - lucat[k-1][j][i].x);
161 PetscReal vz = 0.5 * (lucat[k+1][j][i].y - lucat[k-1][j][i].y);
162 PetscReal wz = 0.5 * (lucat[k+1][j][i].z - lucat[k-1][j][i].z);
163
164 // Average metrics to the cell center
165 PetscReal csi1 = 0.5 * (lcsi[k][j][i].x + lcsi[k][j][i-1].x) * laj[k][j][i];
166 PetscReal csi2 = 0.5 * (lcsi[k][j][i].y + lcsi[k][j][i-1].y) * laj[k][j][i];
167 PetscReal csi3 = 0.5 * (lcsi[k][j][i].z + lcsi[k][j][i-1].z) * laj[k][j][i];
168
169 PetscReal eta1 = 0.5 * (leta[k][j][i].x + leta[k][j-1][i].x) * laj[k][j][i];
170 PetscReal eta2 = 0.5 * (leta[k][j][i].y + leta[k][j-1][i].y) * laj[k][j][i];
171 PetscReal eta3 = 0.5 * (leta[k][j][i].z + leta[k][j-1][i].z) * laj[k][j][i];
172
173 PetscReal zet1 = 0.5 * (lzet[k][j][i].x + lzet[k-1][j][i].x) * laj[k][j][i];
174 PetscReal zet2 = 0.5 * (lzet[k][j][i].y + lzet[k-1][j][i].y) * laj[k][j][i];
175 PetscReal zet3 = 0.5 * (lzet[k][j][i].z + lzet[k-1][j][i].z) * laj[k][j][i];
176
177 // Calculate velocity gradient tensor components d_ij = du_i/dx_j
178 PetscReal d11 = uc * csi1 + ue * eta1 + uz * zet1;
179 PetscReal d12 = uc * csi2 + ue * eta2 + uz * zet2;
180 PetscReal d13 = uc * csi3 + ue * eta3 + uz * zet3;
181
182 PetscReal d21 = vc * csi1 + ve * eta1 + vz * zet1;
183 PetscReal d22 = vc * csi2 + ve * eta2 + vz * zet2;
184 PetscReal d23 = vc * csi3 + ve * eta3 + vz * zet3;
185
186 PetscReal d31 = wc * csi1 + we * eta1 + wz * zet1;
187 PetscReal d32 = wc * csi2 + we * eta2 + wz * zet2;
188 PetscReal d33 = wc * csi3 + we * eta3 + wz * zet3;
189
190 // Strain-Rate Tensor S_ij = 0.5 * (d_ij + d_ji)
191 PetscReal s11 = d11;
192 PetscReal s12 = 0.5 * (d12 + d21);
193 PetscReal s13 = 0.5 * (d13 + d31);
194 PetscReal s22 = d22;
195 PetscReal s23 = 0.5 * (d23 + d32);
196 PetscReal s33 = d33;
197
198 // Vorticity Tensor Omega_ij = 0.5 * (d_ij - d_ji)
199 PetscReal w12 = 0.5 * (d12 - d21);
200 PetscReal w13 = 0.5 * (d13 - d31);
201 PetscReal w23 = 0.5 * (d23 - d32);
202
203 // Squared norms of the tensors
204 PetscReal s_norm_sq = s11*s11 + s22*s22 + s33*s33 + 2.0*(s12*s12 + s13*s13 + s23*s23);
205 PetscReal w_norm_sq = 2.0 * (w12*w12 + w13*w13 + w23*w23);
206
207 gq[k][j][i] = 0.5 * (w_norm_sq - s_norm_sq);
208
209 if (lnvert[k][j][i] > 0.1) {
210 gq[k][j][i] = 0.0;
211 }
212 }
213 }
214 }
215
216 // --- 5. Restore arrays ---
217 ierr = DMDAVecRestoreArrayRead(user->fda, user->lUcat, (void*)&lucat); CHKERRQ(ierr);
218 ierr = DMDAVecRestoreArrayRead(user->fda, user->lCsi, (void*)&lcsi); CHKERRQ(ierr);
219 ierr = DMDAVecRestoreArrayRead(user->fda, user->lEta, (void*)&leta); CHKERRQ(ierr);
220 ierr = DMDAVecRestoreArrayRead(user->fda, user->lZet, (void*)&lzet); CHKERRQ(ierr);
221 ierr = DMDAVecRestoreArrayRead(user->da, user->lAj, (void*)&laj); CHKERRQ(ierr);
222 ierr = DMDAVecRestoreArrayRead(user->da, user->lNvert, (void*)&lnvert); CHKERRQ(ierr);
223 ierr = DMDAVecRestoreArray(user->da, user->Qcrit, (void*)&gq); CHKERRQ(ierr);
224
225 PetscFunctionReturn(0);
226}
227
228/**
229 * @brief Normalizes a relative field by subtracting a reference value.
230 *
231 * This kernel finds the relative field at a specific grid point (i,j,k) and subtracts
232 * this value from the entire field. The reference point is configurable via
233 * command-line options (-ip, -jp, -kp). The operation is performed in-place
234 * on the provided relative field vector.
235 *
236 * @param user The UserCtx, providing access to DMs and Vecs.
237 * @param relative_field_name The string name of the relative field Vec to normalize (e.g., "P").
238 * @return PetscErrorCode
239 */
240PetscErrorCode NormalizeRelativeField(UserCtx* user, const char* relative_field_name)
241{
242 PetscErrorCode ierr;
243 Vec P_vec = NULL;
244 PetscMPIInt rank;
245 PetscInt ip=1, jp=1, kp=1; // Default reference point
246 PetscReal p_ref = 0.0;
247 PetscInt ref_point_global_idx[1];
248 PetscScalar ref_value_local[1];
249 IS is_from, is_to;
250 VecScatter scatter_ctx;
251 Vec ref_vec_seq;
252
253 PetscFunctionBeginUser;
254 LOG_ALLOW(GLOBAL, LOG_INFO, "-> KERNEL: Running NormalizeRelativeField on '%s'.\n", relative_field_name);
255
256 // --- 1. Map string argument to the PETSc Vec ---
257 if (strcasecmp(relative_field_name, "P") == 0) {
258 P_vec = user->P;
259 } else {
260 SETERRQ(PETSC_COMM_SELF, 1, "NormalizeRelativeField only supports the primary 'P' field , not '%s' currently.", relative_field_name);
261 }
262
263 // --- 2. Get reference point from options and calculate its global DA index ---
264 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
265 ierr = PetscOptionsGetInt(NULL, NULL, "-ip", &ip, NULL); CHKERRQ(ierr);
266 ierr = PetscOptionsGetInt(NULL, NULL, "-jp", &jp, NULL); CHKERRQ(ierr);
267 ierr = PetscOptionsGetInt(NULL, NULL, "-kp", &kp, NULL); CHKERRQ(ierr);
268
269 // Convert the (i,j,k) logical grid coordinates to the global 1D index used by the DMDA vector
270 ref_point_global_idx[0] = kp * (user->IM * user->JM) + jp * user->IM + ip;
271
272 // --- 3. Robustly Scatter the single reference value to rank 0 ---
273 // Create an Index Set (IS) for the source point (from the global vector)
274 ierr = ISCreateGeneral(PETSC_COMM_WORLD, 1, ref_point_global_idx, PETSC_COPY_VALUES, &is_from); CHKERRQ(ierr);
275
276 // Create a sequential vector on rank 0 to hold the result
277 ierr = VecCreateSeq(PETSC_COMM_SELF, 1, &ref_vec_seq); CHKERRQ(ierr);
278
279 // Create an Index Set for the destination point (index 0 of the new sequential vector)
280 PetscInt dest_idx[1] = {0};
281 ierr = ISCreateGeneral(PETSC_COMM_SELF, 1, dest_idx, PETSC_COPY_VALUES, &is_to); CHKERRQ(ierr);
282
283 // Create the scatter context and perform the scatter
284 ierr = VecScatterCreate(P_vec, is_from, ref_vec_seq, is_to, &scatter_ctx); CHKERRQ(ierr);
285 ierr = VecScatterBegin(scatter_ctx, P_vec, ref_vec_seq, INSERT_VALUES, SCATTER_FORWARD); CHKERRQ(ierr);
286 ierr = VecScatterEnd(scatter_ctx, P_vec, ref_vec_seq, INSERT_VALUES, SCATTER_FORWARD); CHKERRQ(ierr);
287
288 // On rank 0, get the value. On other ranks, this will do nothing.
289 if (rank == 0) {
290 ierr = VecGetValues(ref_vec_seq, 1, dest_idx, ref_value_local); CHKERRQ(ierr);
291 p_ref = ref_value_local[0];
292 LOG_ALLOW(LOCAL, LOG_DEBUG, "%s reference point (%" PetscInt_FMT ", %" PetscInt_FMT ", %" PetscInt_FMT ") has value %g.\n", relative_field_name, jp, kp, ip, p_ref);
293 }
294
295 // --- 4. Broadcast the reference value from rank 0 to all other processes ---
296 ierr = MPI_Bcast(&p_ref, 1, MPIU_REAL, 0, PETSC_COMM_WORLD); CHKERRQ(ierr);
297
298 // --- 5. Perform the normalization (in-place shift) on the full distributed vector ---
299 ierr = VecShift(P_vec, -p_ref); CHKERRQ(ierr);
300 LOG_ALLOW(GLOBAL, LOG_DEBUG, "%s field normalized by subtracting %g.\n", relative_field_name, p_ref);
301
302 // --- 6. Cleanup ---
303 ierr = ISDestroy(&is_from); CHKERRQ(ierr);
304 ierr = ISDestroy(&is_to); CHKERRQ(ierr);
305 ierr = VecScatterDestroy(&scatter_ctx); CHKERRQ(ierr);
306 ierr = VecDestroy(&ref_vec_seq); CHKERRQ(ierr);
307
308 PetscFunctionReturn(0);
309}
310
311// ===========================================================================
312// Particle Post-Processing Kernels
313// ===========================================================================
314
315/**
316 * @brief Computes the specific kinetic energy (KE per unit mass) for each particle.
317 *
318 * This kernel calculates SKE = 0.5 * |velocity|^2. It requires that the
319 * velocity field exists and will populate the specific kinetic energy field.
320 * The output field must be registered before this kernel is called.
321 *
322 * @param user The UserCtx containing the DMSwarm.
323 * @param velocity_field The name of the input vector field for particle velocity.
324 * @param ske_field The name of the output scalar field to store specific KE.
325 * @return PetscErrorCode
326 */
327PetscErrorCode ComputeSpecificKE(UserCtx* user, const char* velocity_field, const char* ske_field)
328{
329 PetscErrorCode ierr;
330 PetscInt n_local;
331 const PetscScalar (*vel_arr)[3]; // Access velocity as array of 3-component vectors
332 PetscScalar *ske_arr;
333
334 PetscFunctionBeginUser;
335 LOG_ALLOW(GLOBAL, LOG_INFO, "-> KERNEL: Running ComputeSpecificKE ('%s' -> '%s').\n", velocity_field, ske_field);
336
337 // Get local data arrays from the DMSwarm
338 ierr = DMSwarmGetLocalSize(user->swarm, &n_local); CHKERRQ(ierr);
339 if (n_local == 0) PetscFunctionReturn(0);
340
341 // Get read-only access to velocity and write access to the output field
342 ierr = DMSwarmGetField(user->swarm, velocity_field, NULL, NULL, (const void**)&vel_arr); CHKERRQ(ierr);
343 ierr = DMSwarmGetField(user->post_swarm, ske_field, NULL, NULL, (void**)&ske_arr); CHKERRQ(ierr);
344
345 // Main computation loop
346 for (PetscInt p = 0; p < n_local; p++) {
347 const PetscScalar u = vel_arr[p][0];
348 const PetscScalar v = vel_arr[p][1];
349 const PetscScalar w = vel_arr[p][2];
350 const PetscScalar vel_sq = u*u + v*v + w*w;
351 ske_arr[p] = 0.5 * vel_sq;
352 }
353
354 // Restore arrays
355 ierr = DMSwarmRestoreField(user->swarm, velocity_field, NULL, NULL, (const void**)&vel_arr); CHKERRQ(ierr);
356 ierr = DMSwarmRestoreField(user->post_swarm, ske_field, NULL, NULL, (void**)&ske_arr); CHKERRQ(ierr);
357
358 PetscFunctionReturn(0);
359}
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:44
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:45
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:207
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:32
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:33
PetscErrorCode ComputeQCriterion(UserCtx *user)
Computes the Q-Criterion, a scalar value identifying vortex cores.
PetscErrorCode ComputeSpecificKE(UserCtx *user, const char *velocity_field, const char *ske_field)
Computes the specific kinetic energy (KE per unit mass) for each particle.
PetscErrorCode NormalizeRelativeField(UserCtx *user, const char *relative_field_name)
Normalizes a relative field by subtracting a reference value.
PetscErrorCode ComputeNodalAverage(UserCtx *user, const char *in_field_name, const char *out_field_name)
Computes node-centered data by averaging 8 surrounding cell-centered values, exactly replicating the ...
PetscErrorCode UpdateLocalGhosts(UserCtx *user, const char *fieldName)
Updates the local vector (including ghost points) from its corresponding global vector.
Definition setup.c:779
Vec P_nodal
Definition variables.h:702
Vec lNvert
Definition variables.h:657
Vec lZet
Definition variables.h:673
DM post_swarm
Definition variables.h:701
Vec Ucat_nodal
Definition variables.h:703
Vec Qcrit
Definition variables.h:704
PetscScalar x
Definition variables.h:100
Vec lCsi
Definition variables.h:673
PetscScalar z
Definition variables.h:100
PetscInt JM
Definition variables.h:639
Vec lAj
Definition variables.h:673
Vec lUcat
Definition variables.h:657
PetscScalar y
Definition variables.h:100
PetscInt IM
Definition variables.h:639
Vec lEta
Definition variables.h:673
A 3D point or vector with PetscScalar components.
Definition variables.h:99
User-defined context containing data specific to a single computational grid level.
Definition variables.h:630