PICurv 0.1.0
A Parallel Particle-In-Cell Solver for Curvilinear LES
Loading...
Searching...
No Matches
postprocessing_kernels.h File Reference
#include "variables.h"
#include "logging.h"
#include "io.h"
Include dependency graph for postprocessing_kernels.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

PetscErrorCode ComputeNodalAverage (UserCtx *user, const char *in_field_name, const char *out_field_name)
 Computes node-centered data by averaging 8 surrounding cell-centered values, exactly replicating the legacy code's indexing and boundary handling.
 
PetscErrorCode ComputeQCriterion (UserCtx *user)
 Computes the Q-Criterion, a scalar value identifying vortex cores.
 
PetscErrorCode NormalizeRelativeField (UserCtx *user, const char *relative_field_name)
 Normalizes a relative field by subtracting a reference value.
 
PetscErrorCode DimensionalizeField (UserCtx *user, const char *field_name)
 Scales a specified field from non-dimensional to dimensional units in-place.
 
PetscErrorCode DimensionalizeAllLoadedFields (UserCtx *user)
 Orchestrates the dimensionalization of all relevant fields loaded from a file.
 
PetscErrorCode ComputeSpecificKE (UserCtx *user, const char *velocity_field, const char *ske_field)
 Computes the specific kinetic energy (KE per unit mass) for each particle.
 

Function Documentation

◆ ComputeNodalAverage()

PetscErrorCode ComputeNodalAverage ( UserCtx user,
const char *  in_field_name,
const char *  out_field_name 
)

Computes node-centered data by averaging 8 surrounding cell-centered values, exactly replicating the legacy code's indexing and boundary handling.

This kernel uses a stencil that averages the 8 cells from the corner (i,j,k) to (i+1, j+1, k+1) and stores the result at the output node (i,j,k). This matches the legacy code's behavior. It operates on the full range of output nodes necessary for the subsampled grid, preventing zero-padding at the boundaries.

Parameters
userThe UserCtx, providing access to DMs and Vecs.
in_field_nameThe string name of the source Vec (e.g., "P", "Ucat").
out_field_nameThe string name of the destination Vec (e.g., "P_nodal").
Returns
PetscErrorCode

Definition at line 166 of file postprocessing_kernels.c.

167{
168 PetscErrorCode ierr;
169 Vec in_vec_local = NULL, out_vec_global = NULL;
170 DM dm_in = NULL, dm_out = NULL;
171 PetscInt dof = 0;
172
173 PetscFunctionBeginUser;
175 LOG_ALLOW(GLOBAL, LOG_INFO, "-> KERNEL: Running ComputeNodalAverage on '%s' -> '%s'.\n", in_field_name, out_field_name);
176
177 // --- 1. Map string names to PETSc objects ---
178 if (strcasecmp(in_field_name, "P") == 0) { in_vec_local = user->lP; dm_in = user->da; dof = 1; }
179 else if (strcasecmp(in_field_name, "Ucat") == 0) { in_vec_local = user->lUcat; dm_in = user->fda; dof = 3; }
180 else if (strcasecmp(in_field_name, "Psi") == 0) { in_vec_local = user->lPsi; dm_in = user->da; dof = 1; }
181 // ... (add other fields as needed) ...
182 else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG, "Unknown input field name for nodal averaging: %s", in_field_name);
183
184 if (strcasecmp(out_field_name, "P_nodal") == 0) { out_vec_global = user->P_nodal; dm_out = user->da; }
185 else if (strcasecmp(out_field_name, "Ucat_nodal") == 0) { out_vec_global = user->Ucat_nodal; dm_out = user->fda; }
186 else if (strcasecmp(out_field_name, "Psi_nodal") == 0) { out_vec_global = user->Psi_nodal; dm_out = user->da; }
187 // ... (add other fields as needed) ...
188 else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG, "Unknown output field name for nodal averaging: %s", out_field_name);
189
190 // --- 2. Ensure Input Data Ghosts are Up-to-Date ---
191 ierr = UpdateLocalGhosts(user, in_field_name); CHKERRQ(ierr);
192
193 // --- 3. Get DMDA info and array pointers ---
194 DMDALocalInfo info;
195 ierr = DMDAGetLocalInfo(dm_out, &info); CHKERRQ(ierr);
196
197 if (dof == 1) { // --- Scalar Field Averaging ---
198 const PetscReal ***l_in_arr;
199 PetscReal ***g_out_arr;
200 ierr = DMDAVecGetArrayRead(dm_in,in_vec_local, (void*)&l_in_arr); CHKERRQ(ierr);
201 ierr = DMDAVecGetArray(dm_out,out_vec_global, (void*)&g_out_arr); CHKERRQ(ierr);
202
203 // Loop over the output NODE locations. The loop bounds match the required
204 // size of the final subsampled grid.
205 for (PetscInt k = info.zs; k < info.zs + info.zm - 1; k++) {
206 for (PetscInt j = info.ys; j < info.ys + info.ym - 1; j++) {
207 for (PetscInt i = info.xs; i < info.xs + info.xm - 1; i++) {
208 g_out_arr[k][j][i] = 0.125 * (l_in_arr[k][j][i] + l_in_arr[k][j][i+1] +
209 l_in_arr[k][j+1][i] + l_in_arr[k][j+1][i+1] +
210 l_in_arr[k+1][j][i] + l_in_arr[k+1][j][i+1] +
211 l_in_arr[k+1][j+1][i] + l_in_arr[k+1][j+1][i+1]);
212 }
213 }
214 }
215 ierr = DMDAVecRestoreArrayRead(dm_in,in_vec_local, (void*)&l_in_arr); CHKERRQ(ierr);
216 ierr = DMDAVecRestoreArray(dm_out,out_vec_global, (void*)&g_out_arr); CHKERRQ(ierr);
217
218 } else if (dof == 3) { // --- Vector Field Averaging ---
219 const Cmpnts ***l_in_arr;
220 Cmpnts ***g_out_arr;
221 ierr = DMDAVecGetArrayRead(dm_in,in_vec_local, (void*)&l_in_arr); CHKERRQ(ierr);
222 ierr = DMDAVecGetArray(dm_out,out_vec_global, (void*)&g_out_arr); CHKERRQ(ierr);
223
224 for (PetscInt k = info.zs; k < info.zs + info.zm - 1; k++) {
225 for (PetscInt j = info.ys; j < info.ys + info.ym - 1; j++) {
226 for (PetscInt i = info.xs; i < info.xs + info.xm - 1; i++) {
227 g_out_arr[k][j][i].x = 0.125 * (l_in_arr[k][j][i].x + l_in_arr[k][j][i+1].x +
228 l_in_arr[k][j+1][i].x + l_in_arr[k][j+1][i+1].x +
229 l_in_arr[k+1][j][i].x + l_in_arr[k+1][j][i+1].x +
230 l_in_arr[k+1][j+1][i].x + l_in_arr[k+1][j+1][i+1].x);
231
232 g_out_arr[k][j][i].y = 0.125 * (l_in_arr[k][j][i].y + l_in_arr[k][j][i+1].y +
233 l_in_arr[k][j+1][i].y + l_in_arr[k][j+1][i+1].y +
234 l_in_arr[k+1][j][i].y + l_in_arr[k+1][j][i+1].y +
235 l_in_arr[k+1][j+1][i].y + l_in_arr[k+1][j+1][i+1].y);
236
237 g_out_arr[k][j][i].z = 0.125 * (l_in_arr[k][j][i].z + l_in_arr[k][j][i+1].z +
238 l_in_arr[k][j+1][i].z + l_in_arr[k][j+1][i+1].z +
239 l_in_arr[k+1][j][i].z + l_in_arr[k+1][j][i+1].z +
240 l_in_arr[k+1][j+1][i].z + l_in_arr[k+1][j+1][i+1].z);
241 }
242 }
243 }
244 ierr = DMDAVecRestoreArrayRead(dm_in,in_vec_local, (void*)&l_in_arr); CHKERRQ(ierr);
245 ierr = DMDAVecRestoreArray(dm_out,out_vec_global, (void*)&g_out_arr); CHKERRQ(ierr);
246 }
248 PetscFunctionReturn(0);
249}
#define GLOBAL
Scope for global logging across all processes.
Definition logging.h:47
#define LOG_ALLOW(scope, level, fmt,...)
Logging macro that checks both the log level and whether the calling function is in the allowed-funct...
Definition logging.h:201
#define PROFILE_FUNCTION_END
Marks the end of a profiled code block.
Definition logging.h:740
@ LOG_INFO
Informational messages about program execution.
Definition logging.h:32
#define PROFILE_FUNCTION_BEGIN
Marks the beginning of a profiled code block (typically a function).
Definition logging.h:731
PetscErrorCode UpdateLocalGhosts(UserCtx *user, const char *fieldName)
Updates the local vector (including ghost points) from its corresponding global vector.
Definition setup.c:1091
Vec P_nodal
Definition variables.h:734
Vec Ucat_nodal
Definition variables.h:735
Vec lPsi
Definition variables.h:730
PetscScalar x
Definition variables.h:101
PetscScalar z
Definition variables.h:101
Vec Psi_nodal
Definition variables.h:737
Vec lUcat
Definition variables.h:688
PetscScalar y
Definition variables.h:101
A 3D point or vector with PetscScalar components.
Definition variables.h:100
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ComputeQCriterion()

PetscErrorCode ComputeQCriterion ( UserCtx user)

Computes the Q-Criterion, a scalar value identifying vortex cores.

This function is self-contained. It ensures all its required input data (Ucat and grid metrics) have up-to-date ghost values before proceeding with the calculation. The result is stored in the global vector user->Qcrit.

Parameters
userThe UserCtx, providing access to all necessary data.
Returns
PetscErrorCode

Definition at line 264 of file postprocessing_kernels.c.

265{
266 PetscErrorCode ierr;
267 DMDALocalInfo info;
268 const Cmpnts ***lucat, ***lcsi, ***leta, ***lzet;
269 const PetscReal***laj, ***lnvert;
270 PetscReal ***gq;
271
272 PetscFunctionBeginUser;
274 LOG_ALLOW(GLOBAL, LOG_INFO, "-> KERNEL: Running ComputeQCriterion.\n");
275
276 // --- 1. Ensure all required ghost values are up-to-date ---
277 ierr = UpdateLocalGhosts(user, "Ucat"); CHKERRQ(ierr);
278 ierr = UpdateLocalGhosts(user, "Csi"); CHKERRQ(ierr);
279 ierr = UpdateLocalGhosts(user, "Eta"); CHKERRQ(ierr);
280 ierr = UpdateLocalGhosts(user, "Zet"); CHKERRQ(ierr);
281 ierr = UpdateLocalGhosts(user, "Aj"); CHKERRQ(ierr);
282 ierr = UpdateLocalGhosts(user, "Nvert"); CHKERRQ(ierr);
283
284 // --- 2. Get DMDA info and array pointers ---
285 ierr = DMDAGetLocalInfo(user->da, &info); CHKERRQ(ierr);
286
287 ierr = DMDAVecGetArrayRead(user->fda, user->lUcat, (void*)&lucat); CHKERRQ(ierr);
288 ierr = DMDAVecGetArrayRead(user->fda, user->lCsi, (void*)&lcsi); CHKERRQ(ierr);
289 ierr = DMDAVecGetArrayRead(user->fda, user->lEta, (void*)&leta); CHKERRQ(ierr);
290 ierr = DMDAVecGetArrayRead(user->fda, user->lZet, (void*)&lzet); CHKERRQ(ierr);
291 ierr = DMDAVecGetArrayRead(user->da, user->lAj, (void*)&laj); CHKERRQ(ierr);
292 ierr = DMDAVecGetArrayRead(user->da, user->lNvert, (void*)&lnvert); CHKERRQ(ierr);
293 ierr = DMDAVecGetArray(user->da, user->Qcrit, (void*)&gq); CHKERRQ(ierr);
294
295 // --- 3. Define Loop Bounds for INTERIOR Cells ---
296 PetscInt i_start = (info.xs == 0) ? 1 : info.xs;
297 PetscInt i_end = (info.xs + info.xm == info.mx) ? info.mx - 1 : info.xs + info.xm;
298 PetscInt j_start = (info.ys == 0) ? 1 : info.ys;
299 PetscInt j_end = (info.ys + info.ym == info.my) ? info.my - 1 : info.ys + info.ym;
300 PetscInt k_start = (info.zs == 0) ? 1 : info.zs;
301 PetscInt k_end = (info.zs + info.zm == info.mz) ? info.mz - 1 : info.zs + info.zm;
302
303 // --- 4. Main Computation Loop ---
304 for (PetscInt k = k_start; k < k_end; k++) {
305 for (PetscInt j = j_start; j < j_end; j++) {
306 for (PetscInt i = i_start; i < i_end; i++) {
307
308 // Calculate velocity derivatives in computational space (central differences)
309 PetscReal uc = 0.5 * (lucat[k][j][i+1].x - lucat[k][j][i-1].x);
310 PetscReal vc = 0.5 * (lucat[k][j][i+1].y - lucat[k][j][i-1].y);
311 PetscReal wc = 0.5 * (lucat[k][j][i+1].z - lucat[k][j][i-1].z);
312
313 PetscReal ue = 0.5 * (lucat[k][j+1][i].x - lucat[k][j-1][i].x);
314 PetscReal ve = 0.5 * (lucat[k][j+1][i].y - lucat[k][j-1][i].y);
315 PetscReal we = 0.5 * (lucat[k][j+1][i].z - lucat[k][j-1][i].z);
316
317 PetscReal uz = 0.5 * (lucat[k+1][j][i].x - lucat[k-1][j][i].x);
318 PetscReal vz = 0.5 * (lucat[k+1][j][i].y - lucat[k-1][j][i].y);
319 PetscReal wz = 0.5 * (lucat[k+1][j][i].z - lucat[k-1][j][i].z);
320
321 // Average metrics to the cell center
322 PetscReal csi1 = 0.5 * (lcsi[k][j][i].x + lcsi[k][j][i-1].x) * laj[k][j][i];
323 PetscReal csi2 = 0.5 * (lcsi[k][j][i].y + lcsi[k][j][i-1].y) * laj[k][j][i];
324 PetscReal csi3 = 0.5 * (lcsi[k][j][i].z + lcsi[k][j][i-1].z) * laj[k][j][i];
325
326 PetscReal eta1 = 0.5 * (leta[k][j][i].x + leta[k][j-1][i].x) * laj[k][j][i];
327 PetscReal eta2 = 0.5 * (leta[k][j][i].y + leta[k][j-1][i].y) * laj[k][j][i];
328 PetscReal eta3 = 0.5 * (leta[k][j][i].z + leta[k][j-1][i].z) * laj[k][j][i];
329
330 PetscReal zet1 = 0.5 * (lzet[k][j][i].x + lzet[k-1][j][i].x) * laj[k][j][i];
331 PetscReal zet2 = 0.5 * (lzet[k][j][i].y + lzet[k-1][j][i].y) * laj[k][j][i];
332 PetscReal zet3 = 0.5 * (lzet[k][j][i].z + lzet[k-1][j][i].z) * laj[k][j][i];
333
334 // Calculate velocity gradient tensor components d_ij = du_i/dx_j
335 PetscReal d11 = uc * csi1 + ue * eta1 + uz * zet1;
336 PetscReal d12 = uc * csi2 + ue * eta2 + uz * zet2;
337 PetscReal d13 = uc * csi3 + ue * eta3 + uz * zet3;
338
339 PetscReal d21 = vc * csi1 + ve * eta1 + vz * zet1;
340 PetscReal d22 = vc * csi2 + ve * eta2 + vz * zet2;
341 PetscReal d23 = vc * csi3 + ve * eta3 + vz * zet3;
342
343 PetscReal d31 = wc * csi1 + we * eta1 + wz * zet1;
344 PetscReal d32 = wc * csi2 + we * eta2 + wz * zet2;
345 PetscReal d33 = wc * csi3 + we * eta3 + wz * zet3;
346
347 // Strain-Rate Tensor S_ij = 0.5 * (d_ij + d_ji)
348 PetscReal s11 = d11;
349 PetscReal s12 = 0.5 * (d12 + d21);
350 PetscReal s13 = 0.5 * (d13 + d31);
351 PetscReal s22 = d22;
352 PetscReal s23 = 0.5 * (d23 + d32);
353 PetscReal s33 = d33;
354
355 // Vorticity Tensor Omega_ij = 0.5 * (d_ij - d_ji)
356 PetscReal w12 = 0.5 * (d12 - d21);
357 PetscReal w13 = 0.5 * (d13 - d31);
358 PetscReal w23 = 0.5 * (d23 - d32);
359
360 // Squared norms of the tensors
361 PetscReal s_norm_sq = s11*s11 + s22*s22 + s33*s33 + 2.0*(s12*s12 + s13*s13 + s23*s23);
362 PetscReal w_norm_sq = 2.0 * (w12*w12 + w13*w13 + w23*w23);
363
364 gq[k][j][i] = 0.5 * (w_norm_sq - s_norm_sq);
365
366 if (lnvert[k][j][i] > 0.1) {
367 gq[k][j][i] = 0.0;
368 }
369 }
370 }
371 }
372
373 // --- 5. Restore arrays ---
374 ierr = DMDAVecRestoreArrayRead(user->fda, user->lUcat, (void*)&lucat); CHKERRQ(ierr);
375 ierr = DMDAVecRestoreArrayRead(user->fda, user->lCsi, (void*)&lcsi); CHKERRQ(ierr);
376 ierr = DMDAVecRestoreArrayRead(user->fda, user->lEta, (void*)&leta); CHKERRQ(ierr);
377 ierr = DMDAVecRestoreArrayRead(user->fda, user->lZet, (void*)&lzet); CHKERRQ(ierr);
378 ierr = DMDAVecRestoreArrayRead(user->da, user->lAj, (void*)&laj); CHKERRQ(ierr);
379 ierr = DMDAVecRestoreArrayRead(user->da, user->lNvert, (void*)&lnvert); CHKERRQ(ierr);
380 ierr = DMDAVecRestoreArray(user->da, user->Qcrit, (void*)&gq); CHKERRQ(ierr);
381
383 PetscFunctionReturn(0);
384}
Vec lNvert
Definition variables.h:688
Vec lZet
Definition variables.h:705
Vec Qcrit
Definition variables.h:736
Vec lCsi
Definition variables.h:705
Vec lAj
Definition variables.h:705
Vec lEta
Definition variables.h:705
Here is the call graph for this function:
Here is the caller graph for this function:

◆ NormalizeRelativeField()

PetscErrorCode NormalizeRelativeField ( UserCtx user,
const char *  relative_field_name 
)

Normalizes a relative field by subtracting a reference value.

This kernel finds the relative field at a specific grid point (i,j,k) and subtracts this value from the entire field. The reference point is configurable via command-line options (-ip, -jp, -kp). The operation is performed in-place on the provided relative field vector.

Parameters
userThe UserCtx, providing access to DMs and Vecs.
relative_field_nameThe string name of the relative field Vec to normalize (e.g., "P").
Returns
PetscErrorCode

Definition at line 400 of file postprocessing_kernels.c.

401{
402 PetscErrorCode ierr;
403 Vec P_vec = NULL;
404 PetscMPIInt rank;
405 PetscInt ip=1, jp=1, kp=1; // Default reference point
406 PetscReal p_ref = 0.0;
407 PetscInt ref_point_global_idx[1];
408 PetscScalar ref_value_local[1];
409 IS is_from, is_to;
410 VecScatter scatter_ctx;
411 Vec ref_vec_seq;
412 PostProcessParams *pps = user->simCtx->pps;
413
414 // Fetch referenc point from pps.
415 ip = pps->reference[0];
416 jp = pps->reference[1];
417 kp = pps->reference[2];
418
419 PetscFunctionBeginUser;
421 LOG_ALLOW(GLOBAL, LOG_INFO, "-> KERNEL: Running NormalizeRelativeField on '%s'.\n", relative_field_name);
422
423 // --- 1. Map string argument to the PETSc Vec ---
424 if (strcasecmp(relative_field_name, "P") == 0) {
425 P_vec = user->P;
426 } else {
427 SETERRQ(PETSC_COMM_SELF, 1, "NormalizeRelativeField only supports the primary 'P' field , not '%s' currently.", relative_field_name);
428 }
429
430 // --- 2. Get reference point from options and calculate its global DA index ---
431 ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr);
432
433 // Convert the (i,j,k) logical grid coordinates to the global 1D index used by the DMDA vector
434 ref_point_global_idx[0] = kp * (user->IM * user->JM) + jp * user->IM + ip;
435
436 // --- 3. Robustly Scatter the single reference value to rank 0 ---
437 // Create an Index Set (IS) for the source point (from the global vector)
438 ierr = ISCreateGeneral(PETSC_COMM_WORLD, 1, ref_point_global_idx, PETSC_COPY_VALUES, &is_from); CHKERRQ(ierr);
439
440 // Create a sequential vector on rank 0 to hold the result
441 ierr = VecCreateSeq(PETSC_COMM_SELF, 1, &ref_vec_seq); CHKERRQ(ierr);
442
443 // Create an Index Set for the destination point (index 0 of the new sequential vector)
444 PetscInt dest_idx[1] = {0};
445 ierr = ISCreateGeneral(PETSC_COMM_SELF, 1, dest_idx, PETSC_COPY_VALUES, &is_to); CHKERRQ(ierr);
446
447 // Create the scatter context and perform the scatter
448 ierr = VecScatterCreate(P_vec, is_from, ref_vec_seq, is_to, &scatter_ctx); CHKERRQ(ierr);
449 ierr = VecScatterBegin(scatter_ctx, P_vec, ref_vec_seq, INSERT_VALUES, SCATTER_FORWARD); CHKERRQ(ierr);
450 ierr = VecScatterEnd(scatter_ctx, P_vec, ref_vec_seq, INSERT_VALUES, SCATTER_FORWARD); CHKERRQ(ierr);
451
452 // On rank 0, get the value. On other ranks, this will do nothing.
453 if (rank == 0) {
454 ierr = VecGetValues(ref_vec_seq, 1, dest_idx, ref_value_local); CHKERRQ(ierr);
455 p_ref = ref_value_local[0];
456 LOG_ALLOW(LOCAL, LOG_DEBUG, "%s reference point (%" PetscInt_FMT ", %" PetscInt_FMT ", %" PetscInt_FMT ") has value %g.\n", relative_field_name, jp, kp, ip, p_ref);
457 }
458
459 // --- 4. Broadcast the reference value from rank 0 to all other processes ---
460 ierr = MPI_Bcast(&p_ref, 1, MPIU_REAL, 0, PETSC_COMM_WORLD); CHKERRQ(ierr);
461
462 // --- 5. Perform the normalization (in-place shift) on the full distributed vector ---
463 ierr = VecShift(P_vec, -p_ref); CHKERRQ(ierr);
464 LOG_ALLOW(GLOBAL, LOG_DEBUG, "%s field normalized by subtracting %g.\n", relative_field_name, p_ref);
465
466 // --- 6. Cleanup ---
467 ierr = ISDestroy(&is_from); CHKERRQ(ierr);
468 ierr = ISDestroy(&is_to); CHKERRQ(ierr);
469 ierr = VecScatterDestroy(&scatter_ctx); CHKERRQ(ierr);
470 ierr = VecDestroy(&ref_vec_seq); CHKERRQ(ierr);
471
473 PetscFunctionReturn(0);
474}
#define LOCAL
Logging scope definitions for controlling message output.
Definition logging.h:46
@ LOG_DEBUG
Detailed debugging information.
Definition logging.h:33
SimCtx * simCtx
Back-pointer to the master simulation context.
Definition variables.h:664
PetscInt reference[3]
Definition variables.h:477
PetscInt JM
Definition variables.h:670
PostProcessParams * pps
Definition variables.h:648
PetscInt IM
Definition variables.h:670
Holds all configuration parameters for a post-processing run.
Definition variables.h:452
Here is the caller graph for this function:

◆ DimensionalizeField()

PetscErrorCode DimensionalizeField ( UserCtx user,
const char *  field_name 
)

Scales a specified field from non-dimensional to dimensional units in-place.

This function acts as a dispatcher. It takes the string name of a field, identifies the corresponding PETSc Vec object and the correct physical scaling factor (e.g., U_ref for velocity, P_ref for pressure), and then performs an in-place VecScale operation. It correctly handles the different physical dimensions of Cartesian velocity vs. contravariant volume flux.

Parameters
[in,out]userThe UserCtx containing the PETSc Vecs to be modified.
[in]field_nameThe case-insensitive string name of the field to dimensionalize (e.g., "Ucat", "P", "Ucont", "Coordinates", "ParticlePosition", "ParticleVelocity").
Returns
PetscErrorCode

Definition at line 20 of file postprocessing_kernels.c.

21{
22 PetscErrorCode ierr;
23 SimCtx *simCtx = user->simCtx;
24 Vec target_vec = NULL;
25 PetscReal scale_factor = 1.0;
26 char field_type[64] = "Unknown";
27 PetscBool is_swarm_field = PETSC_FALSE; // Flag for special swarm handling
28 const char *swarm_field_name = NULL; // Name of the field within the swarm
29
30 PetscFunctionBeginUser;
32 if (!user) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "UserCtx is NULL.");
33 if (!field_name) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "field_name is NULL.");
34
35 // --- 1. Identify the target Vec and the correct scaling factor ---
36 if (strcasecmp(field_name, "Ucat") == 0) {
37 target_vec = user->Ucat;
38 scale_factor = simCtx->scaling.U_ref;
39 strcpy(field_type, "Cartesian Velocity (L/T)");
40 } else if (strcasecmp(field_name, "Ucont") == 0) {
41 target_vec = user->Ucont;
42 scale_factor = simCtx->scaling.U_ref * simCtx->scaling.L_ref * simCtx->scaling.L_ref;
43 strcpy(field_type, "Contravariant Volume Flux (L^3/T)");
44 } else if (strcasecmp(field_name, "P") == 0) {
45 target_vec = user->P;
46 scale_factor = simCtx->scaling.P_ref;
47 strcpy(field_type, "Pressure (M L^-1 T^-2)");
48 } else if (strcasecmp(field_name, "Coordinates") == 0) {
49 ierr = DMGetCoordinates(user->da, &target_vec); CHKERRQ(ierr);
50 scale_factor = simCtx->scaling.L_ref;
51 strcpy(field_type, "Grid Coordinates (L)");
52 } else if (strcasecmp(field_name, "ParticlePosition") == 0) {
53 is_swarm_field = PETSC_TRUE;
54 swarm_field_name = "position";
55 scale_factor = simCtx->scaling.L_ref;
56 strcpy(field_type, "Particle Position (L)");
57 } else if (strcasecmp(field_name, "ParticleVelocity") == 0) {
58 is_swarm_field = PETSC_TRUE;
59 swarm_field_name = "velocity";
60 scale_factor = simCtx->scaling.U_ref;
61 strcpy(field_type, "Particle Velocity (L/T)");
62 } else {
63 LOG(GLOBAL, LOG_WARNING, "DimensionalizeField: Unknown or unhandled field_name '%s'. Field will not be scaled.\n", field_name);
65 PetscFunctionReturn(0);
66 }
67
68 // --- 2. Check for trivial scaling ---
69 if (PetscAbsReal(scale_factor - 1.0) < PETSC_MACHINE_EPSILON) {
70 LOG(GLOBAL, LOG_DEBUG, "DimensionalizeField: Scaling factor for '%s' is 1.0. Skipping operation.\n", field_name);
72 PetscFunctionReturn(0);
73 }
74
75 // --- 3. Perform the in-place scaling operation ---
76 LOG(GLOBAL, LOG_INFO, "Scaling '%s' field (%s) by factor %.4e.\n", field_name, field_type, scale_factor);
77
78 if (is_swarm_field) {
79 // Special handling for DMSwarm fields
80 ierr = DMSwarmCreateGlobalVectorFromField(user->swarm, swarm_field_name, &target_vec); CHKERRQ(ierr);
81 ierr = VecScale(target_vec, scale_factor); CHKERRQ(ierr);
82 ierr = DMSwarmDestroyGlobalVectorFromField(user->swarm, swarm_field_name, &target_vec); CHKERRQ(ierr);
83 } else {
84 // Standard handling for PETSc Vecs
85 if (target_vec) {
86 ierr = VecScale(target_vec, scale_factor); CHKERRQ(ierr);
87 } else {
88 SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONGSTATE, "Target vector for field '%s' was not found or is NULL.", field_name);
89 }
90 }
91
92 // --- 4. Post-scaling updates for special cases ---
93 if (strcasecmp(field_name, "Coordinates") == 0) {
94 Vec l_coords;
95 ierr = DMGetCoordinates(user->da, &target_vec); CHKERRQ(ierr); // Re-get the global vector
96 ierr = DMGetCoordinatesLocal(user->da, &l_coords); CHKERRQ(ierr);
97 ierr = DMGlobalToLocalBegin(user->fda, target_vec, INSERT_VALUES, l_coords); CHKERRQ(ierr);
98 ierr = DMGlobalToLocalEnd(user->fda, target_vec, INSERT_VALUES, l_coords); CHKERRQ(ierr);
99 }
100
102 PetscFunctionReturn(0);
103}
#define LOG(scope, level, fmt,...)
Logging macro for PETSc-based applications with scope control.
Definition logging.h:85
@ LOG_WARNING
Non-critical issues that warrant attention.
Definition logging.h:30
PetscReal L_ref
Definition variables.h:520
Vec Ucont
Definition variables.h:688
Vec Ucat
Definition variables.h:688
ScalingCtx scaling
Definition variables.h:590
PetscReal P_ref
Definition variables.h:523
PetscReal U_ref
Definition variables.h:521
The master context for the entire simulation.
Definition variables.h:538
Here is the caller graph for this function:

◆ DimensionalizeAllLoadedFields()

PetscErrorCode DimensionalizeAllLoadedFields ( UserCtx user)

Orchestrates the dimensionalization of all relevant fields loaded from a file.

This function is intended to be called in the post-processor immediately after all solver output has been read into memory. It calls DimensionalizeField() for each of the core physical quantities to convert the entire loaded state from non-dimensional to dimensional units, preparing it for analysis and visualization.

Parameters
[in,out]userThe UserCtx containing all the fields to be dimensionalized.
Returns
PetscErrorCode

Definition at line 118 of file postprocessing_kernels.c.

119{
120 PetscErrorCode ierr;
121 SimCtx *simCtx = user->simCtx;
122
123 PetscFunctionBeginUser;
125
126 LOG(GLOBAL, LOG_INFO, "--- Converting all loaded fields to dimensional units ---\n");
127
128 // Scale the grid itself first
129 ierr = DimensionalizeField(user, "Coordinates"); CHKERRQ(ierr);
130
131 // Scale primary fluid fields
132 ierr = DimensionalizeField(user, "Ucat"); CHKERRQ(ierr);
133 ierr = DimensionalizeField(user, "Ucont"); CHKERRQ(ierr);
134 ierr = DimensionalizeField(user, "P"); CHKERRQ(ierr);
135
136 // If particles are present, scale their fields
137 if (simCtx->np > 0 && user->swarm) {
138 ierr = DimensionalizeField(user, "ParticlePosition"); CHKERRQ(ierr);
139 ierr = DimensionalizeField(user, "ParticleVelocity"); CHKERRQ(ierr);
140 }
141
142 LOG(GLOBAL, LOG_INFO, "--- Field dimensionalization complete ---\n");
143
145 PetscFunctionReturn(0);
146}
PetscErrorCode DimensionalizeField(UserCtx *user, const char *field_name)
Scales a specified field from non-dimensional to dimensional units in-place.
PetscInt np
Definition variables.h:616
Here is the call graph for this function:

◆ ComputeSpecificKE()

PetscErrorCode ComputeSpecificKE ( UserCtx user,
const char *  velocity_field,
const char *  ske_field 
)

Computes the specific kinetic energy (KE per unit mass) for each particle.

This kernel calculates SKE = 0.5 * |velocity|^2. It requires that the velocity field exists and will populate the specific kinetic energy field. The output field must be registered before this kernel is called.

Parameters
userThe UserCtx containing the DMSwarm.
velocity_fieldThe name of the input vector field for particle velocity.
ske_fieldThe name of the output scalar field to store specific KE.
Returns
PetscErrorCode

Definition at line 493 of file postprocessing_kernels.c.

494{
495 PetscErrorCode ierr;
496 PetscInt n_local;
497 const PetscScalar (*vel_arr)[3]; // Access velocity as array of 3-component vectors
498 PetscScalar *ske_arr;
499
500 PetscFunctionBeginUser;
502 LOG_ALLOW(GLOBAL, LOG_INFO, "-> KERNEL: Running ComputeSpecificKE ('%s' -> '%s').\n", velocity_field, ske_field);
503
504 // Get local data arrays from the DMSwarm
505 ierr = DMSwarmGetLocalSize(user->swarm, &n_local); CHKERRQ(ierr);
506 if (n_local == 0) PetscFunctionReturn(0);
507
508 // Get read-only access to velocity and write access to the output field
509 ierr = DMSwarmGetField(user->swarm, velocity_field, NULL, NULL, (const void**)&vel_arr); CHKERRQ(ierr);
510 ierr = DMSwarmGetField(user->post_swarm, ske_field, NULL, NULL, (void**)&ske_arr); CHKERRQ(ierr);
511
512 // Main computation loop
513 for (PetscInt p = 0; p < n_local; p++) {
514 const PetscScalar u = vel_arr[p][0];
515 const PetscScalar v = vel_arr[p][1];
516 const PetscScalar w = vel_arr[p][2];
517 const PetscScalar vel_sq = u*u + v*v + w*w;
518 ske_arr[p] = 0.5 * vel_sq;
519 }
520
521 // Restore arrays
522 ierr = DMSwarmRestoreField(user->swarm, velocity_field, NULL, NULL, (const void**)&vel_arr); CHKERRQ(ierr);
523 ierr = DMSwarmRestoreField(user->post_swarm, ske_field, NULL, NULL, (void**)&ske_arr); CHKERRQ(ierr);
524
526 PetscFunctionReturn(0);
527}
DM post_swarm
Definition variables.h:733
Here is the caller graph for this function: