Neko  0.9.0
A portable framework for high-order spectral element flow simulations
schwarz_kernel.h
Go to the documentation of this file.
1 #ifndef __MATH_SCHWARZ_KERNEL_H__
2 #define __MATH_SCHWARZ_KERNEL_H__
3 /*
4  Copyright (c) 2021-2022, The Neko Authors
5  All rights reserved.
6 
7  Redistribution and use in source and binary forms, with or without
8  modification, are permitted provided that the following conditions
9  are met:
10 
11  * Redistributions of source code must retain the above copyright
12  notice, this list of conditions and the following disclaimer.
13 
14  * Redistributions in binary form must reproduce the above
15  copyright notice, this list of conditions and the following
16  disclaimer in the documentation and/or other materials provided
17  with the distribution.
18 
19  * Neither the name of the authors nor the names of its
20  contributors may be used to endorse or promote products derived
21  from this software without specific prior written permission.
22 
23  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  POSSIBILITY OF SUCH DAMAGE.
35 */
36 
47 template< typename T, const int NX>
48 __global__ void schwarz_extrude_kernel(T * a1,
49  const int l1,
50  const T f1,
51  T * a2,
52  const int l2,
53  const T f2){
54 
55  __shared__ T x1[NX*NX],x2[NX*NX];
56  __shared__ T y1[NX*NX],y2[NX*NX];
57  __shared__ T z1[NX*NX],z2[NX*NX];
58  const int idx = threadIdx.x;
59  const int el = blockIdx.x*NX*NX*NX;
60 
61  if(idx < NX*NX){
62  int i = idx%NX;
63  int k = idx/NX;
64  int idx_x1 = l2 + i*NX + k*NX*NX + el;
65  x1[idx]=a2[idx_x1];
66  int idx_x2 = NX-1-l2 + i*NX + k*NX*NX + el;
67  x2[idx]=a2[idx_x2];
68 
69  int idx_y1 = i + l2*NX + k*NX*NX + el;
70  y1[idx]=a2[idx_y1];
71  int idx_y2 = i + (NX-1-l2)*NX + k*NX*NX + el;
72  y2[idx]=a2[idx_y2];
73 
74  int idx_z1 = i + k*NX + l2*NX*NX + el;
75  z1[idx]=a2[idx_z1];
76  int idx_z2 = i + k*NX + (NX-l2-1)*NX*NX + el;
77  z2[idx]=a2[idx_z2];
78  }
79  __syncthreads();
80 
81  for(int ijk = idx; ijk<NX*NX*NX; ijk+=blockDim.x){
82  int jk = ijk/NX;
83  int i = ijk - NX*jk;
84  int k = jk/NX;
85  int j = jk -k*NX;
86  if(j>0 && j< NX-1 && k > 0 && k < NX -1){
87  int idx1 = i + j*NX + k*NX*NX + el;
88  if(i == l1){
89  int idx2 = j + k*NX;
90  a1[idx1] = f1*a1[idx1] + f2*x1[idx2];
91  }
92  if(i == NX-1-l1){
93  int idx2 = j + k*NX;
94  a1[idx1] = f1*a1[idx1] + f2*x2[idx2];
95  }
96  }
97  if( i > 0 && i < NX-1 && k > 0 && k < NX -1){
98  int idx1 = i + j*NX + k*NX*NX + el;
99  if(j == l1){
100  int idx2 = i + k*NX;
101  a1[idx1] = f1*a1[idx1] + f2*y1[idx2];
102  }
103  if(j == NX-1-l1){
104  int idx2 = i + k*NX;
105  a1[idx1] = f1*a1[idx1] + f2*y2[idx2];
106  }
107  }
108  if( i > 0 && i < NX-1 && j>0 && j< NX-1 ){
109  int idx1 = i + j*NX + k*NX*NX + el;
110  if(k == l1){
111  int idx2 = i + j*NX;
112  a1[idx1] = f1*a1[idx1] + f2*z1[idx2];
113  }
114  if(k == NX-1-l1){
115  int idx2 = i + j*NX;
116  a1[idx1] = f1*a1[idx1] + f2*z2[idx2];
117  }
118  }
119  }
120 }
121 
125 template< typename T>
126 __global__ void schwarz_toext3d_kernel(T * __restrict__ a,
127  T *__restrict__ b,
128  const int nx) {
129 
130  const int idx = threadIdx.x;
131  const int nx2 = nx+2;
132  const int el2 = blockIdx.x*nx2*nx2*nx2;
133  const int el = blockIdx.x*nx*nx*nx;
134  for(int i = idx; i<nx2*nx2*nx2; i+=blockDim.x){
135  a[i+el2] = 0.0;
136  }
137  __syncthreads();
138  for(int ijk = idx; ijk<nx*nx*nx; ijk+=blockDim.x){
139  const int jk = ijk / nx;
140  const int i = ijk - jk * nx;
141  const int k = jk / nx;
142  const int j = jk - k * nx;
143  a[(i+1)+(j+1)*nx2+(k+1)*nx2*nx2+el2] = b[ijk+el];
144  }
145 }
146 
147 template< typename T>
148 __global__ void schwarz_toreg3d_kernel(T * __restrict__ b,
149  T *__restrict__ a,
150  const int nx) {
151 
152  const int idx = threadIdx.x;
153  const int nx2 = nx+2;
154  const int el2 = blockIdx.x*nx2*nx2*nx2;
155  const int el = blockIdx.x*nx*nx*nx;
156  for(int ijk = idx; ijk<nx*nx*nx; ijk+=blockDim.x){
157  const int jk = ijk / nx;
158  const int i = ijk - jk * nx;
159  const int k = jk / nx;
160  const int j = jk - k * nx;
161  b[ijk+el] = a[(i+1)+(j+1)*nx2+(k+1)*nx2*nx2+el2];
162  }
163 }
164 
165 #endif // __MATH_SCHWARZ_KERNEL_H__
const int i
const int j
__syncthreads()
__global__ void schwarz_extrude_kernel(T *a1, const int l1, const T f1, T *a2, const int l2, const T f2)
__global__ void schwarz_toreg3d_kernel(T *__restrict__ b, T *__restrict__ a, const int nx)
__global__ void schwarz_toext3d_kernel(T *__restrict__ a, T *__restrict__ b, const int nx)