Neko  0.9.99
A portable framework for high-order spectral element flow simulations
pipecg_kernel.h
Go to the documentation of this file.
1 #ifndef __KRYLOV_PIPECG_KERNEL_H__
2 #define __KRYLOV_PIPECG_KERNEL_H__
3 /*
4  Copyright (c) 2021-2022, The Neko Authors
5  All rights reserved.
6 
7  Redistribution and use in source and binary forms, with or without
8  modification, are permitted provided that the following conditions
9  are met:
10 
11  * Redistributions of source code must retain the above copyright
12  notice, this list of conditions and the following disclaimer.
13 
14  * Redistributions in binary form must reproduce the above
15  copyright notice, this list of conditions and the following
16  disclaimer in the documentation and/or other materials provided
17  with the distribution.
18 
19  * Neither the name of the authors nor the names of its
20  contributors may be used to endorse or promote products derived
21  from this software without specific prior written permission.
22 
23  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  POSSIBILITY OF SUCH DAMAGE.
35 */
36 
38 
42 template< typename T >
43 __global__ void cg_update_xp_kernel(T * __restrict__ x,
44  T * __restrict__ p,
45  T ** __restrict__ u,
46  const T * alpha,
47  const T * beta,
48  const int p_cur,
49  const int p_space,
50  const int n) {
51 
52  const int idx = blockIdx.x * blockDim.x + threadIdx.x;
53  const int str = blockDim.x * gridDim.x;
54 
55 
56  for (int i = idx; i < n; i+= str) {
57  T tmp = 0.0;
58  int p_prev = p_space;
59  for (int j = 0; j < p_cur; j ++) {
60  p[i] = beta[j]*p[i] + u[p_prev][i];
61  tmp += alpha[j]*p[i];
62  p_prev = j;
63  }
64  x[i] += tmp;
65  u[p_space][i] = u[p_space-1][i];
66  }
67 }
68 
72 template< typename T >
73 __global__ void pipecg_vecops_kernel(T * __restrict__ p,
74  T * __restrict__ q,
75  T * __restrict__ r,
76  T * __restrict__ s,
77  T * __restrict__ u1,
78  T * __restrict__ u2,
79  T * __restrict__ w,
80  T * __restrict__ z,
81  T * __restrict__ ni,
82  T * __restrict__ mi,
83  const T alpha,
84  const T beta,
85  const T * mult,
86  T * buf_h1,
87  T * buf_h2,
88  T * buf_h3,
89  const int n) {
90 
91  const int idx = blockIdx.x * blockDim.x + threadIdx.x;
92  const int str = blockDim.x * gridDim.x;
93 
94  const unsigned int lane = threadIdx.x % warpSize;
95  const unsigned int wid = threadIdx.x / warpSize;
96 
97  __shared__ T buf1[64];
98  __shared__ T buf2[64];
99  __shared__ T buf3[64];
100  T tmp1 = 0.0;
101  T tmp2 = 0.0;
102  T tmp3 = 0.0;
103 
104  for (int i = idx; i < n; i+= str) {
105  z[i] = beta * z[i] + ni[i];
106  q[i] = beta * q[i] + mi[i];
107  s[i] = beta * s[i] + w[i];
108  r[i] = r[i] - alpha * s[i];
109  u2[i] = u1[i] - alpha * q[i];
110  w[i] = w[i] - alpha * z[i];
111  tmp1 = tmp1 + r[i] * mult[i] * u2[i];
112  tmp2 = tmp2 + w[i] * mult[i] * u2[i];
113  tmp3 = tmp3 + r[i] * mult[i] * r[i];
114 
115  }
116 
117  tmp1 = reduce_warp<T>(tmp1);
118  tmp2 = reduce_warp<T>(tmp2);
119  tmp3 = reduce_warp<T>(tmp3);
120  if (lane == 0) {
121  buf1[wid] = tmp1;
122  buf2[wid] = tmp2;
123  buf3[wid] = tmp3;
124  }
125  __syncthreads();
126 
127  tmp1 = (threadIdx.x < blockDim.x / warpSize) ? buf1[lane] : 0;
128  tmp2 = (threadIdx.x < blockDim.x / warpSize) ? buf2[lane] : 0;
129  tmp3 = (threadIdx.x < blockDim.x / warpSize) ? buf3[lane] : 0;
130  if (wid == 0) {
131  tmp1 = reduce_warp<T>(tmp1);
132  tmp2 = reduce_warp<T>(tmp2);
133  tmp3 = reduce_warp<T>(tmp3);
134  }
135 
136  if (threadIdx.x == 0) {
137  buf_h1[blockIdx.x] = tmp1;
138  buf_h2[blockIdx.x] = tmp2;
139  buf_h3[blockIdx.x] = tmp3;
140  }
141 
142 }
143 
144 #endif // __KRYLOV_PIPECG_KERNEL_H__
__global__ void T *__restrict__ T *__restrict__ const T *__restrict__ const T *__restrict__ const T *__restrict__ w
const int i
__global__ void T *__restrict__ T *__restrict__ const T *__restrict__ u
const int j
__syncthreads()
__global__ void const T *__restrict__ x
Definition: cdtp_kernel.h:106
__global__ void pipecg_vecops_kernel(T *__restrict__ p, T *__restrict__ q, T *__restrict__ r, T *__restrict__ s, T *__restrict__ u1, T *__restrict__ u2, T *__restrict__ w, T *__restrict__ z, T *__restrict__ ni, T *__restrict__ mi, const T alpha, const T beta, const T *mult, T *buf_h1, T *buf_h2, T *buf_h3, const int n)
Definition: pipecg_kernel.h:72
__global__ void cg_update_xp_kernel(T *__restrict__ x, T *__restrict__ p, T **__restrict__ u, const T *alpha, const T *beta, const int p_cur, const int p_space, const int n)
Definition: pipecg_kernel.h:43