Neko  0.9.0
A portable framework for high-order spectral element flow simulations
lambda2_kernel.h
Go to the documentation of this file.
1 #ifndef __MATH_LAMBDA2_KERNEL_H__
2 #define __MATH_LAMBDA2_KERNEL_H__
3 /*
4  Copyright (c) 2021-2023, The Neko Authors
5  All rights reserved.
6 
7  Redistribution and use in source and binary forms, with or without
8  modification, are permitted provided that the following conditions
9  are met:
10 
11  * Redistributions of source code must retain the above copyright
12  notice, this list of conditions and the following disclaimer.
13 
14  * Redistributions in binary form must reproduce the above
15  copyright notice, this list of conditions and the following
16  disclaimer in the documentation and/or other materials provided
17  with the distribution.
18 
19  * Neither the name of the authors nor the names of its
20  contributors may be used to endorse or promote products derived
21  from this software without specific prior written permission.
22 
23  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  POSSIBILITY OF SUCH DAMAGE.
35 */
36 
42 
43 template< typename T>
44 __inline__ __device__ T eigen_val_calc(T grad11, T grad12, T grad13,
45  T grad21, T grad22, T grad23,
46  T grad31, T grad32, T grad33) {
47  T s11 = grad11;
48  T s22 = grad22;
49  T s33 = grad33;
50  T s12 = 0.5*(grad12+grad21);
51  T s13 = 0.5*(grad13+grad31);
52  T s23 = 0.5*(grad23+grad32);
53 
54  T o12 = 0.5*(grad12-grad21);
55  T o13 = 0.5*(grad13-grad31);
56  T o23 = 0.5*(grad23-grad32);
57 
58  T a11 = s11*s11 + s12*s12 + s13*s13 - o12*o12 - o13*o13;
59  T a12 = s11 * s12 + s12 * s22 + s13 * s23 - o13 * o23;
60  T a13 = s11 * s13 + s12 * s23 + s13 * s33 + o12 * o23;
61 
62  T a22 = s12*s12 + s22*s22 + s23*s23 - o12*o12 - o23*o23;
63  T a23 = s12 * s13 + s22 * s23 + s23 * s33 - o12 * o13;
64  T a33 = s13*s13 + s23*s23 + s33*s33 - o13*o13 - o23*o23;
65 
66 
67  T B = -(a11 + a22 + a33);
68  T C = -(a12*a12 + a13*a13 + a23*a23 - a11 * a22 - a11 * a33 - a22 * a33);
69  T D = -(2.0 * a12 * a13 * a23 - a11 * a23*a23 - a22 * a13*a13
70  - a33 * a12*a12 + a11 * a22 * a33);
71 
72 
73  T q = (3.0 * C - B*B) / 9.0;
74  T r = (9.0 * C * B - 27.0 * D - 2.0 * B*B*B) / 54.0;
75 
76  T theta = acos( r / sqrt(-q*q*q) );
77  T pi = 4.0*atan(1.0);
78  T eigen1 = 2.0 * sqrt(-q) * cos(theta / 3.0) - B / 3.0;
79  T eigen2 = 2.0 * sqrt(-q) * cos((theta + 2.0 * pi) / 3.0) - B / 3.0;
80  T eigen3 = 2.0 * sqrt(-q) * cos((theta + 4.0 * pi) / 3.0) - B / 3.0;
81 
82  if (eigen1 <= eigen2 && eigen2 <= eigen3)
83  return eigen2;
84  else if (eigen3 <= eigen2 && eigen2 <= eigen1)
85  return eigen2;
86  else if (eigen1 <= eigen3 && eigen3 <= eigen2)
87  return eigen3;
88  else if (eigen2 <= eigen3 && eigen3 <= eigen1)
89  return eigen3;
90  else if (eigen2 <= eigen1 && eigen1 <= eigen3)
91  return eigen1;
92  else if (eigen3 <= eigen1 && eigen1 <= eigen2)
93  return eigen1;
94  return 0.0;
95 }
96 
97 
98 template< typename T, const int LX, const int CHUNKS >
99 __global__ void lambda2_kernel_1d(T * __restrict__ lambda2,
100  const T * __restrict__ u,
101  const T * __restrict__ v,
102  const T * __restrict__ w,
103  const T * __restrict__ dx,
104  const T * __restrict__ dy,
105  const T * __restrict__ dz,
106  const T * __restrict__ drdx,
107  const T * __restrict__ dsdx,
108  const T * __restrict__ dtdx,
109  const T * __restrict__ drdy,
110  const T * __restrict__ dsdy,
111  const T * __restrict__ dtdy,
112  const T * __restrict__ drdz,
113  const T * __restrict__ dsdz,
114  const T * __restrict__ dtdz,
115  const T * __restrict__ jacinv) {
116 
117  __shared__ T shu[LX * LX * LX];
118  __shared__ T shv[LX * LX * LX];
119  __shared__ T shw[LX * LX * LX];
120 
121  __shared__ T shdx[LX * LX];
122  __shared__ T shdy[LX * LX];
123  __shared__ T shdz[LX * LX];
124 
125 
126 
127 
128  int i,j,k;
129 
130  const int e = blockIdx.x;
131  const int ele = blockIdx.x*LX*LX*LX;
132  const int iii = threadIdx.x;
133  const int nchunks = (LX * LX * LX - 1) / CHUNKS + 1;
134 
135 
136  if (iii < (LX * LX)) {
137  shdx[iii] = dx[iii];
138  shdy[iii] = dy[iii];
139  shdz[iii] = dz[iii];
140  }
141 
142  j = iii;
143  while(j < (LX * LX * LX)) {
144  shu[j] = u[j + ele];
145  shv[j] = v[j + ele];
146  shw[j] = w[j + ele];
147  j = j + CHUNKS;
148  }
149 
150  __syncthreads();
151 
152  for (int n = 0; n < nchunks; n++) {
153  const int ijk = iii + n * CHUNKS;
154  const int jk = ijk / LX;
155  i = ijk - jk * LX;
156  k = jk / LX;
157  j = jk - k * LX;
158  if ( i < LX && j < LX && k < LX ) {
159  T rtmpu = 0.0;
160  T stmpu = 0.0;
161  T ttmpu = 0.0;
162 
163  T rtmpv = 0.0;
164  T stmpv = 0.0;
165  T ttmpv = 0.0;
166 
167  T rtmpw = 0.0;
168  T stmpw = 0.0;
169  T ttmpw = 0.0;
170  for (int l = 0; l < LX; l++) {
171  rtmpu += shdx[i + l * LX] * shu[l + j * LX + k * LX * LX];
172  stmpu += shdy[j + l * LX] * shu[i + l * LX + k * LX * LX];
173  ttmpu += shdz[k + l * LX] * shu[i + j * LX + l * LX * LX];
174 
175  rtmpv += shdx[i + l * LX] * shv[l + j * LX + k * LX * LX];
176  stmpv += shdy[j + l * LX] * shv[i + l * LX + k * LX * LX];
177  ttmpv += shdz[k + l * LX] * shv[i + j * LX + l * LX * LX];
178 
179  rtmpw += shdx[i + l * LX] * shw[l + j * LX + k * LX * LX];
180  stmpw += shdy[j + l * LX] * shw[i + l * LX + k * LX * LX];
181  ttmpw += shdz[k + l * LX] * shw[i + j * LX + l * LX * LX];
182  }
183 
184  T jinv = jacinv[ijk + ele];
185 
186  T grad11 = jinv
187  * (drdx[ijk + ele] * rtmpu
188  + dsdx[ijk + ele] * stmpu
189  + dtdx[ijk + ele] * ttmpu);
190 
191  T grad12 = jinv
192  * (drdy[ijk + ele] * rtmpu
193  + dsdy[ijk + ele] * stmpu
194  + dtdy[ijk + ele] * ttmpu);
195 
196  T grad13 = jinv
197  * (drdz[ijk + ele] * rtmpu
198  + dsdz[ijk + ele] * stmpu
199  + dtdz[ijk + ele] * ttmpu);
200 
201  T grad21 = jinv
202  * (drdx[ijk + ele] * rtmpv
203  + dsdx[ijk + ele] * stmpv
204  + dtdx[ijk + ele] * ttmpv);
205 
206  T grad22 = jinv
207  * (drdy[ijk + ele] * rtmpv
208  + dsdy[ijk + ele] * stmpv
209  + dtdy[ijk + ele] * ttmpv);
210 
211  T grad23 = jinv
212  * (drdz[ijk + ele] * rtmpv
213  + dsdz[ijk + ele] * stmpv
214  + dtdz[ijk + ele] * ttmpv);
215 
216 
217  T grad31 = jinv
218  * (drdx[ijk + ele] * rtmpw
219  + dsdx[ijk + ele] * stmpw
220  + dtdx[ijk + ele] * ttmpw);
221 
222  T grad32 = jinv
223  * (drdy[ijk + ele] * rtmpw
224  + dsdy[ijk + ele] * stmpw
225  + dtdy[ijk + ele] * ttmpw);
226 
227  T grad33 = jinv
228  * (drdz[ijk + ele] * rtmpw
229  + dsdz[ijk + ele] * stmpw
230  + dtdz[ijk + ele] * ttmpw);
231  lambda2[ijk + e*LX*LX*LX] = eigen_val_calc<T>( grad11, grad12, grad13,
232  grad21, grad22, grad23,
233  grad31, grad32, grad33);
234  }
235  }
236 
237 }
238 
239 template< typename T, const int LX >
240 __global__ void __launch_bounds__(LX*LX,3)
241 lambda2_kernel_kstep(T * __restrict__ lambda2,
242  const T * __restrict__ u,
243  const T * __restrict__ v,
244  const T * __restrict__ w,
245  const T * __restrict__ dx,
246  const T * __restrict__ dy,
247  const T * __restrict__ dz,
248  const T * __restrict__ drdx,
249  const T * __restrict__ dsdx,
250  const T * __restrict__ dtdx,
251  const T * __restrict__ drdy,
252  const T * __restrict__ dsdy,
253  const T * __restrict__ dtdy,
254  const T * __restrict__ drdz,
255  const T * __restrict__ dsdz,
256  const T * __restrict__ dtdz,
257  const T * __restrict__ jacinv) {
258 
259  __shared__ T shu[LX * LX];
260  __shared__ T shv[LX * LX];
261  __shared__ T shw[LX * LX];
262 
263  __shared__ T shdx[LX * LX];
264  __shared__ T shdy[LX * LX];
265  __shared__ T shdz[LX * LX];
266 
267  const int e = blockIdx.x;
268  const int j = threadIdx.y;
269  const int i = threadIdx.x;
270  const int ij = i + j * LX;
271  const int ele = e*LX*LX*LX;
272 
273  shdx[ij] = dx[ij];
274  shdy[ij] = dy[ij];
275  shdz[ij] = dz[ij];
276 
277  T ru[LX];
278  T rv[LX];
279  T rw[LX];
280 
281 #pragma unroll LX
282  for (int k = 0; k < LX; ++k) {
283  ru[k] = u[ij + k*LX*LX + ele];
284  rv[k] = v[ij + k*LX*LX + ele];
285  rw[k] = w[ij + k*LX*LX + ele];
286  }
287 
289 
290  #pragma unroll
291  for (int k = 0; k < LX; ++k) {
292  const int ijk = ij + k*LX*LX;
293  const T jinv = jacinv[ijk+ele];
294  T ttmpu = 0.0;
295  T ttmpv = 0.0;
296  T ttmpw = 0.0;
297  shu[ij] = ru[k];
298  shv[ij] = rv[k];
299  shw[ij] = rw[k];
300  for (int l = 0; l < LX; l++) {
301  ttmpu += shdz[k+l*LX] * ru[l];
302  ttmpv += shdz[k+l*LX] * rv[l];
303  ttmpw += shdz[k+l*LX] * rw[l];
304  }
305  __syncthreads();
306 
307  T rtmpu = 0.0;
308  T stmpu = 0.0;
309  T rtmpv = 0.0;
310  T stmpv = 0.0;
311  T rtmpw = 0.0;
312  T stmpw = 0.0;
313 #pragma unroll
314  for (int l = 0; l < LX; l++) {
315  rtmpu += shdx[i+l*LX] * shu[l+j*LX];
316  stmpu += shdy[j+l*LX] * shu[i+l*LX];
317  rtmpv += shdx[i+l*LX] * shv[l+j*LX];
318  stmpv += shdy[j+l*LX] * shv[i+l*LX];
319  rtmpw += shdx[i+l*LX] * shw[l+j*LX];
320  stmpw += shdy[j+l*LX] * shw[i+l*LX];
321  }
322 
323  T grad11 = jinv * (drdx[ijk + ele] * rtmpu
324  + dsdx[ijk + ele] * stmpu
325  + dtdx[ijk + ele] * ttmpu);
326 
327  T grad12 = jinv * (drdy[ijk + ele] * rtmpu
328  + dsdy[ijk + ele] * stmpu
329  + dtdy[ijk + ele] * ttmpu);
330 
331  T grad13 = jinv * (drdz[ijk + ele] * rtmpu
332  + dsdz[ijk + ele] * stmpu
333  + dtdz[ijk + ele] * ttmpu);
334  T grad21 = jinv * (drdx[ijk + ele] * rtmpv
335  + dsdx[ijk + ele] * stmpv
336  + dtdx[ijk + ele] * ttmpv);
337 
338  T grad22 = jinv * (drdy[ijk + ele] * rtmpv
339  + dsdy[ijk + ele] * stmpv
340  + dtdy[ijk + ele] * ttmpv);
341 
342  T grad23 = jinv * (drdz[ijk + ele] * rtmpv
343  + dsdz[ijk + ele] * stmpv
344  + dtdz[ijk + ele] * ttmpv);
345  T grad31 = jinv * (drdx[ijk + ele] * rtmpw
346  + dsdx[ijk + ele] * stmpw
347  + dtdx[ijk + ele] * ttmpw);
348 
349  T grad32 = jinv * (drdy[ijk + ele] * rtmpw
350  + dsdy[ijk + ele] * stmpw
351  + dtdy[ijk + ele] * ttmpw);
352 
353  T grad33 = jinv * (drdz[ijk + ele] * rtmpw
354  + dsdz[ijk + ele] * stmpw
355  + dtdz[ijk + ele] * ttmpw);
356  lambda2[ijk + ele] = eigen_val_calc<T>( grad11, grad12, grad13,
357  grad21, grad22, grad23,
358  grad31, grad32, grad33);
359  __syncthreads();
360  }
361 }
362 
363 
364 #endif // __MATH_LAMBDA2_KERNEL_H__
__shared__ T shu[LX *LX]
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ dsdx
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ dsdy
__global__ void lambda2_kernel_1d(T *__restrict__ lambda2, const T *__restrict__ u, const T *__restrict__ v, const T *__restrict__ w, const T *__restrict__ dx, const T *__restrict__ dy, const T *__restrict__ dz, const T *__restrict__ drdx, const T *__restrict__ dsdx, const T *__restrict__ dtdx, const T *__restrict__ drdy, const T *__restrict__ dsdy, const T *__restrict__ dtdy, const T *__restrict__ drdz, const T *__restrict__ dsdz, const T *__restrict__ dtdz, const T *__restrict__ jacinv)
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ dsdz
T ru[LX]
T rv[LX]
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ drdz
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ dtdz
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ jacinv
__shared__ T shdz[LX *LX]
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ drdx
const int i
const int ij
__global__ void __launch_bounds__(LX *LX, 3) lambda2_kernel_kstep(T *__restrict__ lambda2
__shared__ T shdy[LX *LX]
T rw[LX]
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ dx
__shared__ T shw[LX *LX]
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ dy
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ dz
const int e
__shared__ T shv[LX *LX]
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ dtdy
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ drdy
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ w
__inline__ __device__ T eigen_val_calc(T grad11, T grad12, T grad13, T grad21, T grad22, T grad23, T grad31, T grad32, T grad33)
__shared__ T shdx[LX *LX]
const int ele
__global__ void const T *__restrict__ u
const int j
__global__ void const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ dtdx
__syncthreads()
__global__ void const T *__restrict__ const T *__restrict__ v
A simulation component that computes lambda2 The values are stored in the field registry under the na...
Definition: lambda2.f90:37
real(kind=rp), parameter, public pi
Definition: math.f90:76