diff --git a/ggml/src/ggml-cuda/unary.cu b/ggml/src/ggml-cuda/unary.cu index bb048ba4b..8dd70bc7a 100644 --- a/ggml/src/ggml-cuda/unary.cu +++ b/ggml/src/ggml-cuda/unary.cu @@ -199,21 +199,21 @@ void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { /* gated ops */ template -static __global__ void unary_gated_op_kernel(const T * x, T * dst, const int k, const int n, const int o) { - const int i = blockDim.x*blockIdx.x + threadIdx.x; +static __global__ void unary_gated_op_kernel(const T * x, T * dst, const int64_t k, const int64_t n, const int64_t o) { + const int64_t i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { return; } // perform base op on first half of row and multiply with gate in second half - const int j = (i / n) * o + (i % n); + const int64_t j = (i / n) * o + (i % n); dst[i] = (T)(op((float)x[j]) * (float)x[j + n]); } template -static void unary_gated_cuda(const T * x, T * dst, const int k, const int n, const int o, cudaStream_t stream) { - const int num_blocks = (k + CUDA_GLU_BLOCK_SIZE - 1) / CUDA_GLU_BLOCK_SIZE; +static void unary_gated_cuda(const T * x, T * dst, const int64_t k, const int64_t n, const int64_t o, cudaStream_t stream) { + const int64_t num_blocks = (k + CUDA_GLU_BLOCK_SIZE - 1) / CUDA_GLU_BLOCK_SIZE; unary_gated_op_kernel<<>>(x, dst, k, n, o); } @@ -222,10 +222,12 @@ void ggml_cuda_op_unary_gated(ggml_backend_cuda_context & ctx, ggml_tensor * dst const ggml_tensor * src0 = dst->src[0]; const void * src0_d = src0->data; void * dst_d = dst->data; - const int nc = src0->ne[0] / 2; + const int64_t nc = src0->ne[0] / 2; cudaStream_t stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(src0->nb[0] == ggml_element_size(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);