Spaces:
Running
Running
metal : sync ggml-metal (ref #1047)
Browse files- extra/sync-ggml.sh +3 -0
- ggml-metal.h +67 -0
- ggml-metal.m +972 -0
- ggml-metal.metal +1585 -0
extra/sync-ggml.sh
CHANGED
|
@@ -5,6 +5,9 @@ cp -rpv ../ggml/src/ggml-cuda.h ./ggml-cuda.h
|
|
| 5 |
cp -rpv ../ggml/src/ggml-cuda.cu ./ggml-cuda.cu
|
| 6 |
cp -rpv ../ggml/src/ggml-opencl.h ./ggml-opencl.h
|
| 7 |
cp -rpv ../ggml/src/ggml-opencl.cpp ./ggml-opencl.cpp
|
|
|
|
|
|
|
|
|
|
| 8 |
cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h
|
| 9 |
cp -rpv ../ggml/examples/common.h ./examples/common.h
|
| 10 |
cp -rpv ../ggml/examples/common.cpp ./examples/common.cpp
|
|
|
|
| 5 |
cp -rpv ../ggml/src/ggml-cuda.cu ./ggml-cuda.cu
|
| 6 |
cp -rpv ../ggml/src/ggml-opencl.h ./ggml-opencl.h
|
| 7 |
cp -rpv ../ggml/src/ggml-opencl.cpp ./ggml-opencl.cpp
|
| 8 |
+
cp -rpv ../ggml/src/ggml-metal.h ./ggml-metal.h
|
| 9 |
+
cp -rpv ../ggml/src/ggml-metal.m ./ggml-metal.m
|
| 10 |
+
cp -rpv ../ggml/src/ggml-metal.metal ./ggml-metal.metal
|
| 11 |
cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h
|
| 12 |
cp -rpv ../ggml/examples/common.h ./examples/common.h
|
| 13 |
cp -rpv ../ggml/examples/common.cpp ./examples/common.cpp
|
ggml-metal.h
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// An interface allowing to compute ggml_cgraph with Metal
|
| 2 |
+
//
|
| 3 |
+
// This is a fully functional interface that extends ggml with GPU support for Apple devices.
|
| 4 |
+
// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
|
| 5 |
+
//
|
| 6 |
+
// How it works?
|
| 7 |
+
//
|
| 8 |
+
// As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this
|
| 9 |
+
// interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you
|
| 10 |
+
// use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.)
|
| 11 |
+
//
|
| 12 |
+
// You only need to make sure that all memory buffers that you used during the graph creation
|
| 13 |
+
// are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is
|
| 14 |
+
// used during the graph evaluation to determine the arguments of the compute kernels.
|
| 15 |
+
//
|
| 16 |
+
// Synchronization between device and host memory (for example for input and output tensors)
|
| 17 |
+
// is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions.
|
| 18 |
+
//
|
| 19 |
+
|
| 20 |
+
#pragma once
|
| 21 |
+
|
| 22 |
+
#include <stddef.h>
|
| 23 |
+
#include <stdbool.h>
|
| 24 |
+
|
| 25 |
+
// max memory buffers that can be mapped to the device
|
| 26 |
+
#define GGML_METAL_MAX_BUFFERS 16
|
| 27 |
+
|
| 28 |
+
struct ggml_tensor;
|
| 29 |
+
struct ggml_cgraph;
|
| 30 |
+
|
| 31 |
+
#ifdef __cplusplus
|
| 32 |
+
extern "C" {
|
| 33 |
+
#endif
|
| 34 |
+
|
| 35 |
+
struct ggml_metal_context;
|
| 36 |
+
|
| 37 |
+
struct ggml_metal_context * ggml_metal_init(void);
|
| 38 |
+
void ggml_metal_free(struct ggml_metal_context * ctx);
|
| 39 |
+
|
| 40 |
+
// creates a mapping between a host memory buffer and a device memory buffer
|
| 41 |
+
// - make sure to map all buffers used in the graph before calling ggml_metal_graph_compute
|
| 42 |
+
// - the mapping is used during computation to determine the arguments of the compute kernels
|
| 43 |
+
// - you don't need to keep the host memory buffer allocated as it is never accessed by Metal
|
| 44 |
+
// - max_size specifies the maximum size of a tensor and is used to create shared views such
|
| 45 |
+
// that it is guaranteed that the tensor will fit in at least one of the views
|
| 46 |
+
//
|
| 47 |
+
bool ggml_metal_add_buffer(
|
| 48 |
+
struct ggml_metal_context * ctx,
|
| 49 |
+
const char * name,
|
| 50 |
+
void * data,
|
| 51 |
+
size_t size,
|
| 52 |
+
size_t max_size);
|
| 53 |
+
|
| 54 |
+
// set data from host memory into the device
|
| 55 |
+
void ggml_metal_set_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
|
| 56 |
+
|
| 57 |
+
// get data from the device into host memory
|
| 58 |
+
void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
|
| 59 |
+
|
| 60 |
+
// same as ggml_graph_compute but uses Metal
|
| 61 |
+
// creates gf->n_threads command buffers in parallel
|
| 62 |
+
void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
|
| 63 |
+
|
| 64 |
+
#ifdef __cplusplus
|
| 65 |
+
}
|
| 66 |
+
#endif
|
| 67 |
+
|
ggml-metal.m
ADDED
|
@@ -0,0 +1,972 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#import "ggml-metal.h"
|
| 2 |
+
|
| 3 |
+
#import "ggml.h"
|
| 4 |
+
|
| 5 |
+
#import <Foundation/Foundation.h>
|
| 6 |
+
|
| 7 |
+
#import <Metal/Metal.h>
|
| 8 |
+
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
|
| 9 |
+
|
| 10 |
+
#ifdef GGML_METAL_NDEBUG
|
| 11 |
+
#define metal_printf(...)
|
| 12 |
+
#else
|
| 13 |
+
#define metal_printf(...) fprintf(stderr, __VA_ARGS__)
|
| 14 |
+
#endif
|
| 15 |
+
|
| 16 |
+
#define UNUSED(x) (void)(x)
|
| 17 |
+
|
| 18 |
+
struct ggml_metal_buffer {
|
| 19 |
+
const char * name;
|
| 20 |
+
|
| 21 |
+
void * data;
|
| 22 |
+
size_t size;
|
| 23 |
+
|
| 24 |
+
id<MTLBuffer> metal;
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
struct ggml_metal_context {
|
| 28 |
+
float * logits;
|
| 29 |
+
|
| 30 |
+
id<MTLDevice> device;
|
| 31 |
+
id<MTLCommandQueue> queue;
|
| 32 |
+
id<MTLLibrary> library;
|
| 33 |
+
|
| 34 |
+
int n_buffers;
|
| 35 |
+
struct ggml_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
|
| 36 |
+
|
| 37 |
+
// custom kernels
|
| 38 |
+
#define GGML_METAL_DECL_KERNEL(name) \
|
| 39 |
+
id<MTLFunction> function_##name; \
|
| 40 |
+
id<MTLComputePipelineState> pipeline_##name
|
| 41 |
+
|
| 42 |
+
GGML_METAL_DECL_KERNEL(add);
|
| 43 |
+
GGML_METAL_DECL_KERNEL(mul);
|
| 44 |
+
GGML_METAL_DECL_KERNEL(mul_row); // TODO: avoid this extra kernel, instead extend the "mul" kernel to support broadcast
|
| 45 |
+
GGML_METAL_DECL_KERNEL(scale);
|
| 46 |
+
GGML_METAL_DECL_KERNEL(silu);
|
| 47 |
+
GGML_METAL_DECL_KERNEL(relu);
|
| 48 |
+
GGML_METAL_DECL_KERNEL(gelu);
|
| 49 |
+
GGML_METAL_DECL_KERNEL(soft_max);
|
| 50 |
+
GGML_METAL_DECL_KERNEL(diag_mask_inf);
|
| 51 |
+
GGML_METAL_DECL_KERNEL(get_rows_f16);
|
| 52 |
+
GGML_METAL_DECL_KERNEL(get_rows_q4_0);
|
| 53 |
+
GGML_METAL_DECL_KERNEL(get_rows_q4_1);
|
| 54 |
+
GGML_METAL_DECL_KERNEL(get_rows_q2_k);
|
| 55 |
+
GGML_METAL_DECL_KERNEL(get_rows_q3_k);
|
| 56 |
+
GGML_METAL_DECL_KERNEL(get_rows_q4_k);
|
| 57 |
+
GGML_METAL_DECL_KERNEL(get_rows_q5_k);
|
| 58 |
+
GGML_METAL_DECL_KERNEL(get_rows_q6_k);
|
| 59 |
+
GGML_METAL_DECL_KERNEL(rms_norm);
|
| 60 |
+
GGML_METAL_DECL_KERNEL(norm);
|
| 61 |
+
GGML_METAL_DECL_KERNEL(mul_mat_f16_f32);
|
| 62 |
+
GGML_METAL_DECL_KERNEL(mul_mat_q4_0_f32);
|
| 63 |
+
GGML_METAL_DECL_KERNEL(mul_mat_q4_1_f32);
|
| 64 |
+
GGML_METAL_DECL_KERNEL(mul_mat_q2_k_f32);
|
| 65 |
+
GGML_METAL_DECL_KERNEL(mul_mat_q3_k_f32);
|
| 66 |
+
GGML_METAL_DECL_KERNEL(mul_mat_q4_k_f32);
|
| 67 |
+
GGML_METAL_DECL_KERNEL(mul_mat_q5_k_f32);
|
| 68 |
+
GGML_METAL_DECL_KERNEL(mul_mat_q6_k_f32);
|
| 69 |
+
GGML_METAL_DECL_KERNEL(rope);
|
| 70 |
+
GGML_METAL_DECL_KERNEL(alibi_f32);
|
| 71 |
+
GGML_METAL_DECL_KERNEL(cpy_f32_f16);
|
| 72 |
+
GGML_METAL_DECL_KERNEL(cpy_f32_f32);
|
| 73 |
+
GGML_METAL_DECL_KERNEL(cpy_f16_f16);
|
| 74 |
+
|
| 75 |
+
#undef GGML_METAL_DECL_KERNEL
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
// MSL code
|
| 79 |
+
// TODO: move the contents here when ready
|
| 80 |
+
// for now it is easier to work in a separate file
|
| 81 |
+
static NSString * const msl_library_source = @"see metal.metal";
|
| 82 |
+
|
| 83 |
+
// Here to assist with NSBundle Path Hack
|
| 84 |
+
@interface GGMLMetalClass : NSObject
|
| 85 |
+
@end
|
| 86 |
+
@implementation GGMLMetalClass
|
| 87 |
+
@end
|
| 88 |
+
|
| 89 |
+
struct ggml_metal_context * ggml_metal_init(void) {
|
| 90 |
+
fprintf(stderr, "%s: allocating\n", __func__);
|
| 91 |
+
|
| 92 |
+
struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
|
| 93 |
+
|
| 94 |
+
ctx->device = MTLCreateSystemDefaultDevice();
|
| 95 |
+
ctx->queue = [ctx->device newCommandQueue];
|
| 96 |
+
ctx->n_buffers = 0;
|
| 97 |
+
|
| 98 |
+
// determine if we can use MPS
|
| 99 |
+
if (MPSSupportsMTLDevice(ctx->device)) {
|
| 100 |
+
fprintf(stderr, "%s: using MPS\n", __func__);
|
| 101 |
+
} else {
|
| 102 |
+
fprintf(stderr, "%s: not using MPS\n", __func__);
|
| 103 |
+
GGML_ASSERT(false && "MPS not supported");
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
#if 0
|
| 107 |
+
// compile from source string and show compile log
|
| 108 |
+
{
|
| 109 |
+
NSError * error = nil;
|
| 110 |
+
|
| 111 |
+
ctx->library = [ctx->device newLibraryWithSource:msl_library_source options:nil error:&error];
|
| 112 |
+
if (error) {
|
| 113 |
+
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
|
| 114 |
+
exit(1);
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
#else
|
| 118 |
+
UNUSED(msl_library_source);
|
| 119 |
+
|
| 120 |
+
// read the source from "ggml-metal.metal" into a string and use newLibraryWithSource
|
| 121 |
+
{
|
| 122 |
+
NSError * error = nil;
|
| 123 |
+
|
| 124 |
+
//NSString * path = [[NSBundle mainBundle] pathForResource:@"../../examples/metal/metal" ofType:@"metal"];
|
| 125 |
+
NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
|
| 126 |
+
NSString * path = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
|
| 127 |
+
fprintf(stderr, "%s: loading '%s'\n", __func__, [path UTF8String]);
|
| 128 |
+
|
| 129 |
+
NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error];
|
| 130 |
+
if (error) {
|
| 131 |
+
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
|
| 132 |
+
exit(1);
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
ctx->library = [ctx->device newLibraryWithSource:src options:nil error:&error];
|
| 136 |
+
if (error) {
|
| 137 |
+
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
|
| 138 |
+
exit(1);
|
| 139 |
+
}
|
| 140 |
+
}
|
| 141 |
+
#endif
|
| 142 |
+
|
| 143 |
+
// load kernels
|
| 144 |
+
{
|
| 145 |
+
#define GGML_METAL_ADD_KERNEL(name) \
|
| 146 |
+
ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \
|
| 147 |
+
ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:nil]; \
|
| 148 |
+
fprintf(stderr, "%s: loaded %-32s %16p\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name);
|
| 149 |
+
|
| 150 |
+
GGML_METAL_ADD_KERNEL(add);
|
| 151 |
+
GGML_METAL_ADD_KERNEL(mul);
|
| 152 |
+
GGML_METAL_ADD_KERNEL(mul_row);
|
| 153 |
+
GGML_METAL_ADD_KERNEL(scale);
|
| 154 |
+
GGML_METAL_ADD_KERNEL(silu);
|
| 155 |
+
GGML_METAL_ADD_KERNEL(relu);
|
| 156 |
+
GGML_METAL_ADD_KERNEL(gelu);
|
| 157 |
+
GGML_METAL_ADD_KERNEL(soft_max);
|
| 158 |
+
GGML_METAL_ADD_KERNEL(diag_mask_inf);
|
| 159 |
+
GGML_METAL_ADD_KERNEL(get_rows_f16);
|
| 160 |
+
GGML_METAL_ADD_KERNEL(get_rows_q4_0);
|
| 161 |
+
GGML_METAL_ADD_KERNEL(get_rows_q4_1);
|
| 162 |
+
GGML_METAL_ADD_KERNEL(get_rows_q2_k);
|
| 163 |
+
GGML_METAL_ADD_KERNEL(get_rows_q3_k);
|
| 164 |
+
GGML_METAL_ADD_KERNEL(get_rows_q4_k);
|
| 165 |
+
GGML_METAL_ADD_KERNEL(get_rows_q5_k);
|
| 166 |
+
GGML_METAL_ADD_KERNEL(get_rows_q6_k);
|
| 167 |
+
GGML_METAL_ADD_KERNEL(rms_norm);
|
| 168 |
+
GGML_METAL_ADD_KERNEL(norm);
|
| 169 |
+
GGML_METAL_ADD_KERNEL(mul_mat_f16_f32);
|
| 170 |
+
GGML_METAL_ADD_KERNEL(mul_mat_q4_0_f32);
|
| 171 |
+
GGML_METAL_ADD_KERNEL(mul_mat_q4_1_f32);
|
| 172 |
+
GGML_METAL_ADD_KERNEL(mul_mat_q2_k_f32);
|
| 173 |
+
GGML_METAL_ADD_KERNEL(mul_mat_q3_k_f32);
|
| 174 |
+
GGML_METAL_ADD_KERNEL(mul_mat_q4_k_f32);
|
| 175 |
+
GGML_METAL_ADD_KERNEL(mul_mat_q5_k_f32);
|
| 176 |
+
GGML_METAL_ADD_KERNEL(mul_mat_q6_k_f32);
|
| 177 |
+
GGML_METAL_ADD_KERNEL(rope);
|
| 178 |
+
GGML_METAL_ADD_KERNEL(alibi_f32);
|
| 179 |
+
GGML_METAL_ADD_KERNEL(cpy_f32_f16);
|
| 180 |
+
GGML_METAL_ADD_KERNEL(cpy_f32_f32);
|
| 181 |
+
GGML_METAL_ADD_KERNEL(cpy_f16_f16);
|
| 182 |
+
|
| 183 |
+
#undef GGML_METAL_ADD_KERNEL
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
fprintf(stderr, "%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
| 187 |
+
fprintf(stderr, "%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
|
| 188 |
+
if (ctx->device.maxTransferRate != 0) {
|
| 189 |
+
fprintf(stderr, "%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
|
| 190 |
+
} else {
|
| 191 |
+
fprintf(stderr, "%s: maxTransferRate = built-in GPU\n", __func__);
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
return ctx;
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
void ggml_metal_free(struct ggml_metal_context * ctx) {
|
| 198 |
+
fprintf(stderr, "%s: deallocating\n", __func__);
|
| 199 |
+
|
| 200 |
+
free(ctx);
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
// finds the Metal buffer that contains the tensor data on the GPU device
|
| 204 |
+
// the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
|
| 205 |
+
// Metal buffer based on the host memory pointer
|
| 206 |
+
//
|
| 207 |
+
static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_metal_context * ctx, struct ggml_tensor * t, size_t * offs) {
|
| 208 |
+
//fprintf(stderr, "%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
|
| 209 |
+
|
| 210 |
+
const int64_t tsize = ggml_nbytes(t);
|
| 211 |
+
|
| 212 |
+
// find the view that contains the tensor fully
|
| 213 |
+
for (int i = 0; i < ctx->n_buffers; ++i) {
|
| 214 |
+
const int64_t ioffs = (int64_t) t->data - (int64_t) ctx->buffers[i].data;
|
| 215 |
+
|
| 216 |
+
if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) {
|
| 217 |
+
*offs = (size_t) ioffs;
|
| 218 |
+
|
| 219 |
+
//fprintf(stderr, "%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs);
|
| 220 |
+
|
| 221 |
+
return ctx->buffers[i].metal;
|
| 222 |
+
}
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
fprintf(stderr, "%s: error: buffer is nil\n", __func__);
|
| 226 |
+
|
| 227 |
+
return nil;
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
bool ggml_metal_add_buffer(
|
| 231 |
+
struct ggml_metal_context * ctx,
|
| 232 |
+
const char * name,
|
| 233 |
+
void * data,
|
| 234 |
+
size_t size,
|
| 235 |
+
size_t max_size) {
|
| 236 |
+
if (ctx->n_buffers >= GGML_METAL_MAX_BUFFERS) {
|
| 237 |
+
fprintf(stderr, "%s: too many buffers\n", __func__);
|
| 238 |
+
return false;
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
if (data) {
|
| 242 |
+
// verify that the buffer does not overlap with any of the existing buffers
|
| 243 |
+
for (int i = 0; i < ctx->n_buffers; ++i) {
|
| 244 |
+
const int64_t ioffs = (int64_t) data - (int64_t) ctx->buffers[i].data;
|
| 245 |
+
|
| 246 |
+
if (ioffs >= 0 && ioffs < (int64_t) ctx->buffers[i].size) {
|
| 247 |
+
fprintf(stderr, "%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name);
|
| 248 |
+
return false;
|
| 249 |
+
}
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
const size_t size_page = getpagesize();
|
| 253 |
+
|
| 254 |
+
size_t size_aligned = size;
|
| 255 |
+
if ((size_aligned % size_page) != 0) {
|
| 256 |
+
size_aligned += (size_page - (size_aligned % size_page));
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
// the buffer fits into the max buffer size allowed by the device
|
| 260 |
+
if (size_aligned <= ctx->device.maxBufferLength) {
|
| 261 |
+
ctx->buffers[ctx->n_buffers].name = name;
|
| 262 |
+
ctx->buffers[ctx->n_buffers].data = data;
|
| 263 |
+
ctx->buffers[ctx->n_buffers].size = size;
|
| 264 |
+
|
| 265 |
+
ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
|
| 266 |
+
|
| 267 |
+
if (ctx->buffers[ctx->n_buffers].metal == nil) {
|
| 268 |
+
fprintf(stderr, "%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0);
|
| 269 |
+
return false;
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0);
|
| 273 |
+
|
| 274 |
+
++ctx->n_buffers;
|
| 275 |
+
} else {
|
| 276 |
+
// this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
|
| 277 |
+
// one of the views
|
| 278 |
+
const size_t size_ovlp = ((max_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
|
| 279 |
+
const size_t size_step = ctx->device.maxBufferLength - size_ovlp;
|
| 280 |
+
const size_t size_view = ctx->device.maxBufferLength;
|
| 281 |
+
|
| 282 |
+
for (size_t i = 0; i < size; i += size_step) {
|
| 283 |
+
const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
|
| 284 |
+
|
| 285 |
+
ctx->buffers[ctx->n_buffers].name = name;
|
| 286 |
+
ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i);
|
| 287 |
+
ctx->buffers[ctx->n_buffers].size = size_step_aligned;
|
| 288 |
+
|
| 289 |
+
ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
|
| 290 |
+
|
| 291 |
+
if (ctx->buffers[ctx->n_buffers].metal == nil) {
|
| 292 |
+
fprintf(stderr, "%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0);
|
| 293 |
+
return false;
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i);
|
| 297 |
+
if (i + size_step < size) {
|
| 298 |
+
fprintf(stderr, "\n");
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
++ctx->n_buffers;
|
| 302 |
+
}
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
fprintf(stderr, ", (%8.2f / %8.2f)",
|
| 306 |
+
ctx->device.currentAllocatedSize / 1024.0 / 1024.0,
|
| 307 |
+
ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
| 308 |
+
|
| 309 |
+
if (ctx->device.currentAllocatedSize > ctx->device.recommendedMaxWorkingSetSize) {
|
| 310 |
+
fprintf(stderr, ", warning: current allocated size is greater than the recommended max working set size\n");
|
| 311 |
+
} else {
|
| 312 |
+
fprintf(stderr, "\n");
|
| 313 |
+
}
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
return true;
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
void ggml_metal_set_tensor(
|
| 320 |
+
struct ggml_metal_context * ctx,
|
| 321 |
+
struct ggml_tensor * t) {
|
| 322 |
+
metal_printf("%s: set input for tensor '%s'\n", __func__, t->name);
|
| 323 |
+
|
| 324 |
+
size_t offs;
|
| 325 |
+
id<MTLBuffer> id_dst = ggml_metal_get_buffer(ctx, t, &offs);
|
| 326 |
+
|
| 327 |
+
memcpy((void *) ((uint8_t *) id_dst.contents + offs), t->data, ggml_nbytes(t));
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
void ggml_metal_get_tensor(
|
| 331 |
+
struct ggml_metal_context * ctx,
|
| 332 |
+
struct ggml_tensor * t) {
|
| 333 |
+
metal_printf("%s: extract results for tensor '%s'\n", __func__, t->name);
|
| 334 |
+
|
| 335 |
+
size_t offs;
|
| 336 |
+
id<MTLBuffer> id_src = ggml_metal_get_buffer(ctx, t, &offs);
|
| 337 |
+
|
| 338 |
+
memcpy(t->data, (void *) ((uint8_t *) id_src.contents + offs), ggml_nbytes(t));
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
void ggml_metal_graph_compute(
|
| 342 |
+
struct ggml_metal_context * ctx,
|
| 343 |
+
struct ggml_cgraph * gf) {
|
| 344 |
+
metal_printf("%s: evaluating graph\n", __func__);
|
| 345 |
+
|
| 346 |
+
// create multiple command buffers and enqueue them
|
| 347 |
+
// then, we encode the graph into the command buffers in parallel
|
| 348 |
+
|
| 349 |
+
const int n_cb = gf->n_threads;
|
| 350 |
+
|
| 351 |
+
NSMutableArray * command_buffers = [NSMutableArray arrayWithCapacity:n_cb];
|
| 352 |
+
|
| 353 |
+
for (int i = 0; i < n_cb; ++i) {
|
| 354 |
+
command_buffers[i] = [ctx->queue commandBuffer];
|
| 355 |
+
|
| 356 |
+
// enqueue the command buffers in order to specify their execution order
|
| 357 |
+
[command_buffers[i] enqueue];
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
// TODO: is this the best way to start threads?
|
| 361 |
+
dispatch_queue_t queue = dispatch_queue_create("llama.cpp", DISPATCH_QUEUE_CONCURRENT);
|
| 362 |
+
|
| 363 |
+
for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
|
| 364 |
+
const int n_nodes_per_cb = (gf->n_nodes + n_cb - 1) / n_cb;
|
| 365 |
+
|
| 366 |
+
dispatch_async(queue, ^{
|
| 367 |
+
size_t offs_src0 = 0;
|
| 368 |
+
size_t offs_src1 = 0;
|
| 369 |
+
size_t offs_dst = 0;
|
| 370 |
+
|
| 371 |
+
id<MTLCommandBuffer> command_buffer = command_buffers[cb_idx];
|
| 372 |
+
|
| 373 |
+
id<MTLComputeCommandEncoder> encoder = nil;
|
| 374 |
+
|
| 375 |
+
const int node_start = (cb_idx + 0) * n_nodes_per_cb;
|
| 376 |
+
const int node_end = (cb_idx == n_cb - 1) ? gf->n_nodes : (cb_idx + 1) * n_nodes_per_cb;
|
| 377 |
+
|
| 378 |
+
for (int i = node_start; i < node_end; ++i) {
|
| 379 |
+
metal_printf("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
|
| 380 |
+
|
| 381 |
+
struct ggml_tensor * src0 = gf->nodes[i]->src0;
|
| 382 |
+
struct ggml_tensor * src1 = gf->nodes[i]->src1;
|
| 383 |
+
struct ggml_tensor * dst = gf->nodes[i];
|
| 384 |
+
|
| 385 |
+
const int64_t ne00 = src0 ? src0->ne[0] : 0;
|
| 386 |
+
const int64_t ne01 = src0 ? src0->ne[1] : 0;
|
| 387 |
+
const int64_t ne02 = src0 ? src0->ne[2] : 0;
|
| 388 |
+
const int64_t ne03 = src0 ? src0->ne[3] : 0;
|
| 389 |
+
|
| 390 |
+
const uint64_t nb00 = src0 ? src0->nb[0] : 0;
|
| 391 |
+
const uint64_t nb01 = src0 ? src0->nb[1] : 0;
|
| 392 |
+
const uint64_t nb02 = src0 ? src0->nb[2] : 0;
|
| 393 |
+
const uint64_t nb03 = src0 ? src0->nb[3] : 0;
|
| 394 |
+
|
| 395 |
+
const int64_t ne10 = src1 ? src1->ne[0] : 0;
|
| 396 |
+
const int64_t ne11 = src1 ? src1->ne[1] : 0;
|
| 397 |
+
const int64_t ne12 = src1 ? src1->ne[2] : 0;
|
| 398 |
+
const int64_t ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13);
|
| 399 |
+
|
| 400 |
+
const uint64_t nb10 = src1 ? src1->nb[0] : 0;
|
| 401 |
+
const uint64_t nb11 = src1 ? src1->nb[1] : 0;
|
| 402 |
+
const uint64_t nb12 = src1 ? src1->nb[2] : 0;
|
| 403 |
+
const uint64_t nb13 = src1 ? src1->nb[3] : 0; UNUSED(nb13);
|
| 404 |
+
|
| 405 |
+
const int64_t ne0 = dst ? dst->ne[0] : 0;
|
| 406 |
+
const int64_t ne1 = dst ? dst->ne[1] : 0;
|
| 407 |
+
const int64_t ne2 = dst ? dst->ne[2] : 0;
|
| 408 |
+
const int64_t ne3 = dst ? dst->ne[3] : 0;
|
| 409 |
+
|
| 410 |
+
const uint64_t nb0 = dst ? dst->nb[0] : 0;
|
| 411 |
+
const uint64_t nb1 = dst ? dst->nb[1] : 0;
|
| 412 |
+
const uint64_t nb2 = dst ? dst->nb[2] : 0;
|
| 413 |
+
const uint64_t nb3 = dst ? dst->nb[3] : 0;
|
| 414 |
+
|
| 415 |
+
const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT;
|
| 416 |
+
const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT;
|
| 417 |
+
const enum ggml_type dstt = dst ? dst->type : GGML_TYPE_COUNT;
|
| 418 |
+
|
| 419 |
+
id<MTLBuffer> id_src0 = src0 ? ggml_metal_get_buffer(ctx, src0, &offs_src0) : nil;
|
| 420 |
+
id<MTLBuffer> id_src1 = src1 ? ggml_metal_get_buffer(ctx, src1, &offs_src1) : nil;
|
| 421 |
+
id<MTLBuffer> id_dst = dst ? ggml_metal_get_buffer(ctx, dst, &offs_dst) : nil;
|
| 422 |
+
|
| 423 |
+
//metal_printf("%s: op - %s\n", __func__, ggml_op_name(dst->op));
|
| 424 |
+
//if (src0) {
|
| 425 |
+
// metal_printf("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02,
|
| 426 |
+
// ggml_is_contiguous(src0), src0->name);
|
| 427 |
+
//}
|
| 428 |
+
//if (src1) {
|
| 429 |
+
// metal_printf("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12,
|
| 430 |
+
// ggml_is_contiguous(src1), src1->name);
|
| 431 |
+
//}
|
| 432 |
+
//if (dst) {
|
| 433 |
+
// metal_printf("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2,
|
| 434 |
+
// dst->name);
|
| 435 |
+
//}
|
| 436 |
+
|
| 437 |
+
switch (dst->op) {
|
| 438 |
+
case GGML_OP_RESHAPE:
|
| 439 |
+
case GGML_OP_VIEW:
|
| 440 |
+
case GGML_OP_TRANSPOSE:
|
| 441 |
+
case GGML_OP_PERMUTE:
|
| 442 |
+
{
|
| 443 |
+
// noop
|
| 444 |
+
} break;
|
| 445 |
+
case GGML_OP_ADD:
|
| 446 |
+
{
|
| 447 |
+
if (encoder == nil) {
|
| 448 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
[encoder setComputePipelineState:ctx->pipeline_add];
|
| 452 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 453 |
+
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
| 454 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
| 455 |
+
|
| 456 |
+
const int64_t n = ggml_nelements(dst);
|
| 457 |
+
|
| 458 |
+
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 459 |
+
} break;
|
| 460 |
+
case GGML_OP_MUL:
|
| 461 |
+
{
|
| 462 |
+
if (encoder == nil) {
|
| 463 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
if (ggml_nelements(src1) == ne10) {
|
| 467 |
+
// src1 is a row
|
| 468 |
+
[encoder setComputePipelineState:ctx->pipeline_mul_row];
|
| 469 |
+
} else {
|
| 470 |
+
[encoder setComputePipelineState:ctx->pipeline_mul];
|
| 471 |
+
}
|
| 472 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 473 |
+
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
| 474 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
| 475 |
+
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
|
| 476 |
+
|
| 477 |
+
const int64_t n = ggml_nelements(dst);
|
| 478 |
+
|
| 479 |
+
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 480 |
+
} break;
|
| 481 |
+
case GGML_OP_SCALE:
|
| 482 |
+
{
|
| 483 |
+
if (encoder == nil) {
|
| 484 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
const float scale = *(const float *) src1->data;
|
| 488 |
+
|
| 489 |
+
[encoder setComputePipelineState:ctx->pipeline_scale];
|
| 490 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 491 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 492 |
+
[encoder setBytes:&scale length:sizeof(scale) atIndex:2];
|
| 493 |
+
|
| 494 |
+
const int64_t n = ggml_nelements(dst);
|
| 495 |
+
|
| 496 |
+
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 497 |
+
} break;
|
| 498 |
+
case GGML_OP_SILU:
|
| 499 |
+
{
|
| 500 |
+
if (encoder == nil) {
|
| 501 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
[encoder setComputePipelineState:ctx->pipeline_silu];
|
| 505 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 506 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 507 |
+
|
| 508 |
+
const int64_t n = ggml_nelements(dst);
|
| 509 |
+
|
| 510 |
+
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 511 |
+
} break;
|
| 512 |
+
case GGML_OP_RELU:
|
| 513 |
+
{
|
| 514 |
+
if (encoder == nil) {
|
| 515 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
[encoder setComputePipelineState:ctx->pipeline_relu];
|
| 519 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 520 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 521 |
+
|
| 522 |
+
const int64_t n = ggml_nelements(dst);
|
| 523 |
+
|
| 524 |
+
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 525 |
+
} break;
|
| 526 |
+
case GGML_OP_GELU:
|
| 527 |
+
{
|
| 528 |
+
if (encoder == nil) {
|
| 529 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
[encoder setComputePipelineState:ctx->pipeline_gelu];
|
| 533 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 534 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 535 |
+
|
| 536 |
+
const int64_t n = ggml_nelements(dst);
|
| 537 |
+
|
| 538 |
+
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 539 |
+
} break;
|
| 540 |
+
case GGML_OP_SOFT_MAX:
|
| 541 |
+
{
|
| 542 |
+
if (encoder == nil) {
|
| 543 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 544 |
+
}
|
| 545 |
+
|
| 546 |
+
const int nth = 32;
|
| 547 |
+
|
| 548 |
+
[encoder setComputePipelineState:ctx->pipeline_soft_max];
|
| 549 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 550 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 551 |
+
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
|
| 552 |
+
[encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
|
| 553 |
+
[encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
|
| 554 |
+
[encoder setThreadgroupMemoryLength:nth*sizeof(float) atIndex:0];
|
| 555 |
+
|
| 556 |
+
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
| 557 |
+
} break;
|
| 558 |
+
case GGML_OP_DIAG_MASK_INF:
|
| 559 |
+
{
|
| 560 |
+
if (encoder == nil) {
|
| 561 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
const int n_past = ((int32_t *)(src1->data))[0];
|
| 565 |
+
|
| 566 |
+
[encoder setComputePipelineState:ctx->pipeline_diag_mask_inf];
|
| 567 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 568 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 569 |
+
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
|
| 570 |
+
[encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
|
| 571 |
+
[encoder setBytes:&n_past length:sizeof(int) atIndex:4];
|
| 572 |
+
|
| 573 |
+
[encoder dispatchThreadgroups:MTLSizeMake(ne00, ne01, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 574 |
+
} break;
|
| 575 |
+
case GGML_OP_MUL_MAT:
|
| 576 |
+
{
|
| 577 |
+
// TODO: needs to be updated after PR: https://github.com/ggerganov/ggml/pull/224
|
| 578 |
+
|
| 579 |
+
GGML_ASSERT(ne00 == ne10);
|
| 580 |
+
GGML_ASSERT(ne02 == ne12);
|
| 581 |
+
|
| 582 |
+
if (ggml_is_contiguous(src0) &&
|
| 583 |
+
ggml_is_contiguous(src1) &&
|
| 584 |
+
(src0t == GGML_TYPE_F32 || src0t == GGML_TYPE_F16) && ne11 > 1) {
|
| 585 |
+
|
| 586 |
+
if (encoder != nil) {
|
| 587 |
+
[encoder endEncoding];
|
| 588 |
+
encoder = nil;
|
| 589 |
+
}
|
| 590 |
+
|
| 591 |
+
MPSDataType src0dt = src0t == GGML_TYPE_F32 ? MPSDataTypeFloat32 : MPSDataTypeFloat16;
|
| 592 |
+
MPSDataType src1dt = src1t == GGML_TYPE_F32 ? MPSDataTypeFloat32 : MPSDataTypeFloat16;
|
| 593 |
+
|
| 594 |
+
// for F32 x F32 we use MPS
|
| 595 |
+
MPSMatrixDescriptor * desc0 = [MPSMatrixDescriptor
|
| 596 |
+
matrixDescriptorWithRows:ne01 columns:ne00 rowBytes:src0->nb[1] dataType:src0dt];
|
| 597 |
+
|
| 598 |
+
MPSMatrixDescriptor * desc1 = [MPSMatrixDescriptor
|
| 599 |
+
matrixDescriptorWithRows:ne11 columns:ne10 rowBytes:src1->nb[1] dataType:src1dt];
|
| 600 |
+
|
| 601 |
+
MPSMatrixDescriptor * desc = [MPSMatrixDescriptor
|
| 602 |
+
matrixDescriptorWithRows:ne1 columns:ne0 rowBytes:dst->nb[1] dataType:MPSDataTypeFloat32];
|
| 603 |
+
|
| 604 |
+
MPSMatrixMultiplication * mul = [[MPSMatrixMultiplication alloc]
|
| 605 |
+
initWithDevice:ctx->device transposeLeft:false transposeRight:true
|
| 606 |
+
resultRows:ne11 resultColumns:ne01 interiorColumns:ne00 alpha:1.0 beta:0.0];
|
| 607 |
+
|
| 608 |
+
// we need to do ne02 multiplications
|
| 609 |
+
// TODO: is there a way to do this in parallel - currently very slow ..
|
| 610 |
+
// TODO: might be possible to offload part of the computation to ANE using Accelerate's CBLAS
|
| 611 |
+
for (int64_t i02 = 0; i02 < ne02; ++i02) {
|
| 612 |
+
size_t offs_src0_cur = offs_src0 + i02*nb02;
|
| 613 |
+
size_t offs_src1_cur = offs_src1 + i02*nb12;
|
| 614 |
+
size_t offs_dst_cur = offs_dst + i02*nb2;
|
| 615 |
+
|
| 616 |
+
MPSMatrix * mat_src0 = [[MPSMatrix alloc] initWithBuffer:id_src0 offset:offs_src0_cur descriptor:desc0];
|
| 617 |
+
MPSMatrix * mat_src1 = [[MPSMatrix alloc] initWithBuffer:id_src1 offset:offs_src1_cur descriptor:desc1];
|
| 618 |
+
MPSMatrix * mat_dst = [[MPSMatrix alloc] initWithBuffer:id_dst offset:offs_dst_cur descriptor:desc ];
|
| 619 |
+
|
| 620 |
+
[mul encodeToCommandBuffer:command_buffer leftMatrix:mat_src1 rightMatrix:mat_src0 resultMatrix:mat_dst];
|
| 621 |
+
}
|
| 622 |
+
} else {
|
| 623 |
+
if (encoder == nil) {
|
| 624 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 625 |
+
}
|
| 626 |
+
|
| 627 |
+
int nth0 = 32;
|
| 628 |
+
int nth1 = 1;
|
| 629 |
+
|
| 630 |
+
// use custom matrix x vector kernel
|
| 631 |
+
switch (src0t) {
|
| 632 |
+
case GGML_TYPE_F16:
|
| 633 |
+
{
|
| 634 |
+
GGML_ASSERT(ne02 == ne12);
|
| 635 |
+
|
| 636 |
+
nth0 = 64;
|
| 637 |
+
nth1 = 1;
|
| 638 |
+
[encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32];
|
| 639 |
+
} break;
|
| 640 |
+
case GGML_TYPE_Q4_0:
|
| 641 |
+
{
|
| 642 |
+
GGML_ASSERT(ne02 == 1);
|
| 643 |
+
GGML_ASSERT(ne12 == 1);
|
| 644 |
+
|
| 645 |
+
nth0 = 8;
|
| 646 |
+
nth1 = 8;
|
| 647 |
+
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_0_f32];
|
| 648 |
+
} break;
|
| 649 |
+
case GGML_TYPE_Q4_1:
|
| 650 |
+
{
|
| 651 |
+
GGML_ASSERT(ne02 == 1);
|
| 652 |
+
GGML_ASSERT(ne12 == 1);
|
| 653 |
+
|
| 654 |
+
nth0 = 8;
|
| 655 |
+
nth1 = 8;
|
| 656 |
+
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_1_f32];
|
| 657 |
+
} break;
|
| 658 |
+
case GGML_TYPE_Q2_K:
|
| 659 |
+
{
|
| 660 |
+
GGML_ASSERT(ne02 == 1);
|
| 661 |
+
GGML_ASSERT(ne12 == 1);
|
| 662 |
+
|
| 663 |
+
nth0 = 4;
|
| 664 |
+
nth1 = 16;
|
| 665 |
+
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q2_k_f32];
|
| 666 |
+
} break;
|
| 667 |
+
case GGML_TYPE_Q3_K:
|
| 668 |
+
{
|
| 669 |
+
GGML_ASSERT(ne02 == 1);
|
| 670 |
+
GGML_ASSERT(ne12 == 1);
|
| 671 |
+
|
| 672 |
+
nth0 = 4;
|
| 673 |
+
nth1 = 16;
|
| 674 |
+
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q3_k_f32];
|
| 675 |
+
} break;
|
| 676 |
+
case GGML_TYPE_Q4_K:
|
| 677 |
+
{
|
| 678 |
+
GGML_ASSERT(ne02 == 1);
|
| 679 |
+
GGML_ASSERT(ne12 == 1);
|
| 680 |
+
|
| 681 |
+
nth0 = 4;
|
| 682 |
+
nth1 = 16;
|
| 683 |
+
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_k_f32];
|
| 684 |
+
} break;
|
| 685 |
+
case GGML_TYPE_Q5_K:
|
| 686 |
+
{
|
| 687 |
+
GGML_ASSERT(ne02 == 1);
|
| 688 |
+
GGML_ASSERT(ne12 == 1);
|
| 689 |
+
|
| 690 |
+
nth0 = 4;
|
| 691 |
+
nth1 = 16;
|
| 692 |
+
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q5_k_f32];
|
| 693 |
+
} break;
|
| 694 |
+
case GGML_TYPE_Q6_K:
|
| 695 |
+
{
|
| 696 |
+
GGML_ASSERT(ne02 == 1);
|
| 697 |
+
GGML_ASSERT(ne12 == 1);
|
| 698 |
+
|
| 699 |
+
nth0 = 4;
|
| 700 |
+
nth1 = 16;
|
| 701 |
+
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q6_k_f32];
|
| 702 |
+
} break;
|
| 703 |
+
default:
|
| 704 |
+
{
|
| 705 |
+
fprintf(stderr, "Asserting on type %d\n",(int)src0t);
|
| 706 |
+
GGML_ASSERT(false && "not implemented");
|
| 707 |
+
}
|
| 708 |
+
};
|
| 709 |
+
|
| 710 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 711 |
+
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
| 712 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
| 713 |
+
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
|
| 714 |
+
[encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
|
| 715 |
+
[encoder setBytes:&nb00 length:sizeof(nb00) atIndex:5];
|
| 716 |
+
[encoder setBytes:&nb01 length:sizeof(nb01) atIndex:6];
|
| 717 |
+
[encoder setBytes:&nb02 length:sizeof(nb02) atIndex:7];
|
| 718 |
+
[encoder setBytes:&ne10 length:sizeof(ne10) atIndex:8];
|
| 719 |
+
[encoder setBytes:&ne11 length:sizeof(ne11) atIndex:9];
|
| 720 |
+
[encoder setBytes:&nb10 length:sizeof(nb10) atIndex:10];
|
| 721 |
+
[encoder setBytes:&nb11 length:sizeof(nb11) atIndex:11];
|
| 722 |
+
[encoder setBytes:&nb12 length:sizeof(nb12) atIndex:12];
|
| 723 |
+
[encoder setBytes:&ne0 length:sizeof(ne0) atIndex:13];
|
| 724 |
+
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:14];
|
| 725 |
+
|
| 726 |
+
if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1) {
|
| 727 |
+
[encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0];
|
| 728 |
+
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
| 729 |
+
}
|
| 730 |
+
else if (src0t == GGML_TYPE_Q2_K ||
|
| 731 |
+
src0t == GGML_TYPE_Q3_K ||
|
| 732 |
+
src0t == GGML_TYPE_Q4_K ||
|
| 733 |
+
src0t == GGML_TYPE_Q5_K ||
|
| 734 |
+
src0t == GGML_TYPE_Q6_K) {
|
| 735 |
+
[encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0];
|
| 736 |
+
[encoder dispatchThreadgroups:MTLSizeMake(ne01, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
| 737 |
+
} else {
|
| 738 |
+
[encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0];
|
| 739 |
+
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
| 740 |
+
}
|
| 741 |
+
}
|
| 742 |
+
} break;
|
| 743 |
+
case GGML_OP_GET_ROWS:
|
| 744 |
+
{
|
| 745 |
+
if (encoder == nil) {
|
| 746 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 747 |
+
}
|
| 748 |
+
|
| 749 |
+
switch (src0->type) {
|
| 750 |
+
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break;
|
| 751 |
+
case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break;
|
| 752 |
+
case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_1]; break;
|
| 753 |
+
case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q2_k]; break;
|
| 754 |
+
case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q3_k]; break;
|
| 755 |
+
case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_k]; break;
|
| 756 |
+
case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q5_k]; break;
|
| 757 |
+
case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q6_k]; break;
|
| 758 |
+
default: GGML_ASSERT(false && "not implemented");
|
| 759 |
+
}
|
| 760 |
+
|
| 761 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 762 |
+
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
| 763 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
| 764 |
+
[encoder setBytes:&(src0->ne[0]) length:sizeof( int64_t) atIndex:3];
|
| 765 |
+
[encoder setBytes:&(src0->nb[1]) length:sizeof(uint64_t) atIndex:4];
|
| 766 |
+
[encoder setBytes:&(dst->nb[1]) length:sizeof(uint64_t) atIndex:5];
|
| 767 |
+
|
| 768 |
+
const int64_t n = ggml_nelements(src1);
|
| 769 |
+
|
| 770 |
+
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 771 |
+
} break;
|
| 772 |
+
case GGML_OP_RMS_NORM:
|
| 773 |
+
{
|
| 774 |
+
if (encoder == nil) {
|
| 775 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 776 |
+
}
|
| 777 |
+
|
| 778 |
+
const float eps = 1e-6f;
|
| 779 |
+
|
| 780 |
+
const int nth = 256;
|
| 781 |
+
|
| 782 |
+
[encoder setComputePipelineState:ctx->pipeline_rms_norm];
|
| 783 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 784 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 785 |
+
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
|
| 786 |
+
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
|
| 787 |
+
[encoder setBytes:&eps length:sizeof( float) atIndex:4];
|
| 788 |
+
[encoder setThreadgroupMemoryLength:nth*sizeof(float) atIndex:0];
|
| 789 |
+
|
| 790 |
+
const int64_t nrows = ggml_nrows(src0);
|
| 791 |
+
|
| 792 |
+
[encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
| 793 |
+
} break;
|
| 794 |
+
case GGML_OP_NORM:
|
| 795 |
+
{
|
| 796 |
+
if (encoder == nil) {
|
| 797 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 798 |
+
}
|
| 799 |
+
|
| 800 |
+
const float eps = 1e-5f;
|
| 801 |
+
|
| 802 |
+
const int nth = 256;
|
| 803 |
+
|
| 804 |
+
[encoder setComputePipelineState:ctx->pipeline_norm];
|
| 805 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 806 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 807 |
+
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
|
| 808 |
+
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
|
| 809 |
+
[encoder setBytes:&eps length:sizeof( float) atIndex:4];
|
| 810 |
+
[encoder setThreadgroupMemoryLength:nth*sizeof(float) atIndex:0];
|
| 811 |
+
|
| 812 |
+
const int64_t nrows = ggml_nrows(src0);
|
| 813 |
+
|
| 814 |
+
[encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
| 815 |
+
} break;
|
| 816 |
+
case GGML_OP_ALIBI:
|
| 817 |
+
{
|
| 818 |
+
if (encoder == nil) {
|
| 819 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 820 |
+
}
|
| 821 |
+
|
| 822 |
+
GGML_ASSERT((src0t == GGML_TYPE_F32));
|
| 823 |
+
|
| 824 |
+
const int n_past = ((int32_t *) src1->data)[0]; UNUSED(n_past);
|
| 825 |
+
const int n_head = ((int32_t *) src1->data)[1];
|
| 826 |
+
const float max_bias = ((float *) src1->data)[2];
|
| 827 |
+
|
| 828 |
+
if (__builtin_popcount(n_head) != 1) {
|
| 829 |
+
GGML_ASSERT(false && "only power-of-two n_head implemented");
|
| 830 |
+
}
|
| 831 |
+
|
| 832 |
+
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
| 833 |
+
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
| 834 |
+
|
| 835 |
+
[encoder setComputePipelineState:ctx->pipeline_alibi_f32];
|
| 836 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 837 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 838 |
+
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
|
| 839 |
+
[encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
|
| 840 |
+
[encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
|
| 841 |
+
[encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
|
| 842 |
+
[encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
|
| 843 |
+
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
|
| 844 |
+
[encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
|
| 845 |
+
[encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
|
| 846 |
+
[encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
|
| 847 |
+
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
|
| 848 |
+
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
|
| 849 |
+
[encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
|
| 850 |
+
[encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
|
| 851 |
+
[encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
|
| 852 |
+
[encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
|
| 853 |
+
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
|
| 854 |
+
[encoder setBytes:&m0 length:sizeof( float) atIndex:18];
|
| 855 |
+
const int nth = 32;
|
| 856 |
+
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
| 857 |
+
} break;
|
| 858 |
+
case GGML_OP_ROPE:
|
| 859 |
+
{
|
| 860 |
+
if (encoder == nil) {
|
| 861 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 862 |
+
}
|
| 863 |
+
|
| 864 |
+
const int n_dims = ((int32_t *) src1->data)[1];
|
| 865 |
+
const int mode = ((int32_t *) src1->data)[2];
|
| 866 |
+
|
| 867 |
+
const int n_past = ((int32_t *)(src1->data))[0];
|
| 868 |
+
|
| 869 |
+
[encoder setComputePipelineState:ctx->pipeline_rope];
|
| 870 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 871 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 872 |
+
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
|
| 873 |
+
[encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
|
| 874 |
+
[encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
|
| 875 |
+
[encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
|
| 876 |
+
[encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
|
| 877 |
+
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
|
| 878 |
+
[encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
|
| 879 |
+
[encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
|
| 880 |
+
[encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
|
| 881 |
+
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
|
| 882 |
+
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
|
| 883 |
+
[encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
|
| 884 |
+
[encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
|
| 885 |
+
[encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
|
| 886 |
+
[encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
|
| 887 |
+
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
|
| 888 |
+
[encoder setBytes:&n_past length:sizeof( int) atIndex:18];
|
| 889 |
+
[encoder setBytes:&n_dims length:sizeof( int) atIndex:19];
|
| 890 |
+
[encoder setBytes:&mode length:sizeof( int) atIndex:20];
|
| 891 |
+
|
| 892 |
+
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 893 |
+
} break;
|
| 894 |
+
case GGML_OP_CPY:
|
| 895 |
+
{
|
| 896 |
+
if (encoder == nil) {
|
| 897 |
+
encoder = [command_buffer computeCommandEncoder];
|
| 898 |
+
}
|
| 899 |
+
|
| 900 |
+
const int nth = 32;
|
| 901 |
+
|
| 902 |
+
switch (src0t) {
|
| 903 |
+
case GGML_TYPE_F32:
|
| 904 |
+
{
|
| 905 |
+
switch (dstt) {
|
| 906 |
+
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f32_f16]; break;
|
| 907 |
+
case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_cpy_f32_f32]; break;
|
| 908 |
+
default: GGML_ASSERT(false && "not implemented");
|
| 909 |
+
};
|
| 910 |
+
} break;
|
| 911 |
+
case GGML_TYPE_F16:
|
| 912 |
+
{
|
| 913 |
+
switch (dstt) {
|
| 914 |
+
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f16_f16]; break;
|
| 915 |
+
case GGML_TYPE_F32: GGML_ASSERT(false && "cpy_f16_f32 not implemented"); break;
|
| 916 |
+
default: GGML_ASSERT(false && "not implemented");
|
| 917 |
+
};
|
| 918 |
+
} break;
|
| 919 |
+
default: GGML_ASSERT(false && "not implemented");
|
| 920 |
+
}
|
| 921 |
+
|
| 922 |
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 923 |
+
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 924 |
+
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
|
| 925 |
+
[encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
|
| 926 |
+
[encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
|
| 927 |
+
[encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
|
| 928 |
+
[encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
|
| 929 |
+
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
|
| 930 |
+
[encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
|
| 931 |
+
[encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
|
| 932 |
+
[encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
|
| 933 |
+
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
|
| 934 |
+
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
|
| 935 |
+
[encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
|
| 936 |
+
[encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
|
| 937 |
+
[encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
|
| 938 |
+
[encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
|
| 939 |
+
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
|
| 940 |
+
|
| 941 |
+
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
| 942 |
+
} break;
|
| 943 |
+
default:
|
| 944 |
+
fprintf(stderr, "%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
|
| 945 |
+
GGML_ASSERT(false);
|
| 946 |
+
}
|
| 947 |
+
}
|
| 948 |
+
|
| 949 |
+
if (encoder != nil) {
|
| 950 |
+
[encoder endEncoding];
|
| 951 |
+
encoder = nil;
|
| 952 |
+
}
|
| 953 |
+
|
| 954 |
+
[command_buffer commit];
|
| 955 |
+
});
|
| 956 |
+
}
|
| 957 |
+
|
| 958 |
+
// wait for all threads to finish
|
| 959 |
+
dispatch_barrier_sync(queue, ^{});
|
| 960 |
+
|
| 961 |
+
[command_buffers[n_cb - 1] waitUntilCompleted];
|
| 962 |
+
|
| 963 |
+
// check status of command buffers
|
| 964 |
+
// needed to detect if the device ran out-of-memory for example (#1881)
|
| 965 |
+
for (int i = 0; i < n_cb; i++) {
|
| 966 |
+
MTLCommandBufferStatus status = (MTLCommandBufferStatus) [command_buffers[i] status];
|
| 967 |
+
if (status != MTLCommandBufferStatusCompleted) {
|
| 968 |
+
fprintf(stderr, "%s: command buffer %d failed with status %lu\n", __func__, i, status);
|
| 969 |
+
GGML_ASSERT(false);
|
| 970 |
+
}
|
| 971 |
+
}
|
| 972 |
+
}
|
ggml-metal.metal
ADDED
|
@@ -0,0 +1,1585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <metal_stdlib>
|
| 2 |
+
|
| 3 |
+
using namespace metal;
|
| 4 |
+
|
| 5 |
+
#define MAX(x, y) ((x) > (y) ? (x) : (y))
|
| 6 |
+
|
| 7 |
+
#define QK4_0 32
|
| 8 |
+
#define QR4_0 2
|
| 9 |
+
typedef struct {
|
| 10 |
+
half d; // delta
|
| 11 |
+
uint8_t qs[QK4_0 / 2]; // nibbles / quants
|
| 12 |
+
} block_q4_0;
|
| 13 |
+
|
| 14 |
+
#define QK4_1 32
|
| 15 |
+
typedef struct {
|
| 16 |
+
half d; // delta
|
| 17 |
+
half m; // min
|
| 18 |
+
uint8_t qs[QK4_1 / 2]; // nibbles / quants
|
| 19 |
+
} block_q4_1;
|
| 20 |
+
|
| 21 |
+
static void dequantize_row_q4_0(device const block_q4_0 * x, device float * y, int k) {
|
| 22 |
+
const int qk = QK4_0;
|
| 23 |
+
|
| 24 |
+
assert(k % qk == 0);
|
| 25 |
+
|
| 26 |
+
const int nb = k / qk;
|
| 27 |
+
|
| 28 |
+
for (int i = 0; i < nb; i++) {
|
| 29 |
+
const half d = x[i].d;
|
| 30 |
+
|
| 31 |
+
for (int j = 0; j < qk/2; ++j) {
|
| 32 |
+
const int x0 = (x[i].qs[j] & 0x0F) - 8;
|
| 33 |
+
const int x1 = (x[i].qs[j] >> 4) - 8;
|
| 34 |
+
|
| 35 |
+
y[i*qk + j + 0 ] = x0*d;
|
| 36 |
+
y[i*qk + j + qk/2] = x1*d;
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
static void dequantize_row_q4_1(device const block_q4_1 * x, device float * y, int k) {
|
| 42 |
+
const int qk = QK4_1;
|
| 43 |
+
|
| 44 |
+
assert(k % qk == 0);
|
| 45 |
+
|
| 46 |
+
const int nb = k / qk;
|
| 47 |
+
|
| 48 |
+
for (int i = 0; i < nb; i++) {
|
| 49 |
+
const half d = x[i].d;
|
| 50 |
+
const half m = x[i].m;
|
| 51 |
+
|
| 52 |
+
for (int j = 0; j < qk/2; ++j) {
|
| 53 |
+
const int x0 = (x[i].qs[j] & 0x0F);
|
| 54 |
+
const int x1 = (x[i].qs[j] >> 4);
|
| 55 |
+
|
| 56 |
+
y[i*qk + j + 0 ] = x0*d + m;
|
| 57 |
+
y[i*qk + j + qk/2] = x1*d + m;
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
kernel void kernel_add(
|
| 63 |
+
device const float * src0,
|
| 64 |
+
device const float * src1,
|
| 65 |
+
device float * dst,
|
| 66 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 67 |
+
dst[tpig] = src0[tpig] + src1[tpig];
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
kernel void kernel_mul(
|
| 71 |
+
device const float * src0,
|
| 72 |
+
device const float * src1,
|
| 73 |
+
device float * dst,
|
| 74 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 75 |
+
dst[tpig] = src0[tpig] * src1[tpig];
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
// assumption: src1 is a row
|
| 79 |
+
// broadcast src1 into src0
|
| 80 |
+
kernel void kernel_mul_row(
|
| 81 |
+
device const float * src0,
|
| 82 |
+
device const float * src1,
|
| 83 |
+
device float * dst,
|
| 84 |
+
constant int64_t & ne00,
|
| 85 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 86 |
+
dst[tpig] = src0[tpig] * src1[tpig % ne00];
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
kernel void kernel_scale(
|
| 90 |
+
device const float * src0,
|
| 91 |
+
device float * dst,
|
| 92 |
+
constant float & scale,
|
| 93 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 94 |
+
dst[tpig] = src0[tpig] * scale;
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
kernel void kernel_silu(
|
| 98 |
+
device const float * src0,
|
| 99 |
+
device float * dst,
|
| 100 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 101 |
+
float x = src0[tpig];
|
| 102 |
+
dst[tpig] = x / (1.0f + exp(-x));
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
kernel void kernel_relu(
|
| 106 |
+
device const float * src0,
|
| 107 |
+
device float * dst,
|
| 108 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 109 |
+
dst[tpig] = max(0.0f, src0[tpig]);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
constant float GELU_COEF_A = 0.044715f;
|
| 113 |
+
constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
|
| 114 |
+
|
| 115 |
+
kernel void kernel_gelu(
|
| 116 |
+
device const float * src0,
|
| 117 |
+
device float * dst,
|
| 118 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 119 |
+
float x = src0[tpig];
|
| 120 |
+
dst[tpig] = 0.5f*x*(1.0f + tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
kernel void kernel_soft_max(
|
| 124 |
+
device const float * src0,
|
| 125 |
+
device float * dst,
|
| 126 |
+
constant int64_t & ne00,
|
| 127 |
+
constant int64_t & ne01,
|
| 128 |
+
constant int64_t & ne02,
|
| 129 |
+
threadgroup float * buf [[threadgroup(0)]],
|
| 130 |
+
uint3 tgpig[[threadgroup_position_in_grid]],
|
| 131 |
+
uint3 tpitg[[thread_position_in_threadgroup]],
|
| 132 |
+
uint3 ntg[[threads_per_threadgroup]]) {
|
| 133 |
+
const int64_t i03 = tgpig[2];
|
| 134 |
+
const int64_t i02 = tgpig[1];
|
| 135 |
+
const int64_t i01 = tgpig[0];
|
| 136 |
+
|
| 137 |
+
device const float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
| 138 |
+
device float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
| 139 |
+
|
| 140 |
+
// parallel max
|
| 141 |
+
buf[tpitg[0]] = -INFINITY;
|
| 142 |
+
for (int i00 = tpitg[0]; i00 < ne00; i00 += ntg[0]) {
|
| 143 |
+
buf[tpitg[0]] = MAX(buf[tpitg[0]], psrc0[i00]);
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
// reduce
|
| 147 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 148 |
+
for (uint i = ntg[0]/2; i > 0; i /= 2) {
|
| 149 |
+
if (tpitg[0] < i) {
|
| 150 |
+
buf[tpitg[0]] = MAX(buf[tpitg[0]], buf[tpitg[0] + i]);
|
| 151 |
+
}
|
| 152 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
// broadcast
|
| 156 |
+
if (tpitg[0] == 0) {
|
| 157 |
+
buf[0] = buf[0];
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 161 |
+
|
| 162 |
+
const float max = buf[0];
|
| 163 |
+
|
| 164 |
+
// parallel sum
|
| 165 |
+
buf[tpitg[0]] = 0.0f;
|
| 166 |
+
for (int i00 = tpitg[0]; i00 < ne00; i00 += ntg[0]) {
|
| 167 |
+
buf[tpitg[0]] += exp(psrc0[i00] - max);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
// reduce
|
| 171 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 172 |
+
for (uint i = ntg[0]/2; i > 0; i /= 2) {
|
| 173 |
+
if (tpitg[0] < i) {
|
| 174 |
+
buf[tpitg[0]] += buf[tpitg[0] + i];
|
| 175 |
+
}
|
| 176 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
// broadcast
|
| 180 |
+
if (tpitg[0] == 0) {
|
| 181 |
+
buf[0] = buf[0];
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 185 |
+
|
| 186 |
+
const float sum = buf[0];
|
| 187 |
+
|
| 188 |
+
for (int i00 = tpitg[0]; i00 < ne00; i00 += ntg[0]) {
|
| 189 |
+
pdst[i00] = exp(psrc0[i00] - max) / sum;
|
| 190 |
+
}
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
kernel void kernel_diag_mask_inf(
|
| 194 |
+
device const float * src0,
|
| 195 |
+
device float * dst,
|
| 196 |
+
constant int64_t & ne00,
|
| 197 |
+
constant int64_t & ne01,
|
| 198 |
+
constant int & n_past,
|
| 199 |
+
uint3 tpig[[thread_position_in_grid]]) {
|
| 200 |
+
const int64_t i02 = tpig[2];
|
| 201 |
+
const int64_t i01 = tpig[1];
|
| 202 |
+
const int64_t i00 = tpig[0];
|
| 203 |
+
|
| 204 |
+
if (i00 > n_past + i01) {
|
| 205 |
+
dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY;
|
| 206 |
+
} else {
|
| 207 |
+
dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00];
|
| 208 |
+
}
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
kernel void kernel_get_rows_f16(
|
| 212 |
+
device const void * src0,
|
| 213 |
+
device const int * src1,
|
| 214 |
+
device float * dst,
|
| 215 |
+
constant int64_t & ne00,
|
| 216 |
+
constant uint64_t & nb01,
|
| 217 |
+
constant uint64_t & nb1,
|
| 218 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 219 |
+
const int i = tpig;
|
| 220 |
+
const int r = ((device int32_t *) src1)[i];
|
| 221 |
+
|
| 222 |
+
for (int j = 0; j < ne00; j++) {
|
| 223 |
+
dst[i*nb1 + j] = ((device half *) ((device char *) src0 + r*nb01))[j];
|
| 224 |
+
}
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
kernel void kernel_get_rows_q4_0(
|
| 228 |
+
device const void * src0,
|
| 229 |
+
device const int * src1,
|
| 230 |
+
device float * dst,
|
| 231 |
+
constant int64_t & ne00,
|
| 232 |
+
constant uint64_t & nb01,
|
| 233 |
+
constant uint64_t & nb1,
|
| 234 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 235 |
+
const int i = tpig;
|
| 236 |
+
const int r = ((device int32_t *) src1)[i];
|
| 237 |
+
|
| 238 |
+
dequantize_row_q4_0(
|
| 239 |
+
(device const block_q4_0 *) ((device char *) src0 + r*nb01),
|
| 240 |
+
(device float *) ((device char *) dst + i*nb1), ne00);
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
kernel void kernel_get_rows_q4_1(
|
| 244 |
+
device const void * src0,
|
| 245 |
+
device const int * src1,
|
| 246 |
+
device float * dst,
|
| 247 |
+
constant int64_t & ne00,
|
| 248 |
+
constant uint64_t & nb01,
|
| 249 |
+
constant uint64_t & nb1,
|
| 250 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 251 |
+
const int i = tpig;
|
| 252 |
+
const int r = ((device int32_t *) src1)[i];
|
| 253 |
+
|
| 254 |
+
dequantize_row_q4_1(
|
| 255 |
+
(device const block_q4_1 *) ((device char *) src0 + r*nb01),
|
| 256 |
+
(device float *) ((device char *) dst + i*nb1), ne00);
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
kernel void kernel_norm(
|
| 260 |
+
device const void * src0,
|
| 261 |
+
device float * dst,
|
| 262 |
+
constant int64_t & ne00,
|
| 263 |
+
constant uint64_t & nb01,
|
| 264 |
+
constant float & eps,
|
| 265 |
+
threadgroup float * sum [[threadgroup(0)]],
|
| 266 |
+
uint tgpig[[threadgroup_position_in_grid]],
|
| 267 |
+
uint tpitg[[thread_position_in_threadgroup]],
|
| 268 |
+
uint ntg[[threads_per_threadgroup]]) {
|
| 269 |
+
device const float * x = (device const float *) ((device const char *) src0 + tgpig*nb01);
|
| 270 |
+
// MEAN
|
| 271 |
+
// parallel sum
|
| 272 |
+
sum[tpitg] = 0.0f;
|
| 273 |
+
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
| 274 |
+
sum[tpitg] += x[i00];
|
| 275 |
+
}
|
| 276 |
+
// reduce
|
| 277 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 278 |
+
for (uint i = ntg/2; i > 0; i /= 2) {
|
| 279 |
+
if (tpitg < i) {
|
| 280 |
+
sum[tpitg] += sum[tpitg + i];
|
| 281 |
+
}
|
| 282 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 283 |
+
}
|
| 284 |
+
// broadcast
|
| 285 |
+
if (tpitg == 0) {
|
| 286 |
+
sum[0] /= ne00;
|
| 287 |
+
}
|
| 288 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 289 |
+
const float mean = sum[0];
|
| 290 |
+
|
| 291 |
+
// recenter
|
| 292 |
+
device float * y = dst + tgpig*ne00;
|
| 293 |
+
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
| 294 |
+
y[i00] = x[i00] - mean;
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
// VARIANCE
|
| 298 |
+
// parallel sum
|
| 299 |
+
sum[tpitg] = 0.0f;
|
| 300 |
+
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
| 301 |
+
sum[tpitg] += y[i00] * y[i00];
|
| 302 |
+
}
|
| 303 |
+
// reduce
|
| 304 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 305 |
+
for (uint i = ntg/2; i > 0; i /= 2) {
|
| 306 |
+
if (tpitg < i) {
|
| 307 |
+
sum[tpitg] += sum[tpitg + i];
|
| 308 |
+
}
|
| 309 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 310 |
+
}
|
| 311 |
+
// broadcast
|
| 312 |
+
if (tpitg == 0) {
|
| 313 |
+
sum[0] /= ne00;
|
| 314 |
+
}
|
| 315 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 316 |
+
const float variance = sum[0];
|
| 317 |
+
|
| 318 |
+
const float scale = 1.0f/sqrt(variance + eps);
|
| 319 |
+
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
| 320 |
+
y[i00] = y[i00] * scale;
|
| 321 |
+
}
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
kernel void kernel_rms_norm(
|
| 326 |
+
device const void * src0,
|
| 327 |
+
device float * dst,
|
| 328 |
+
constant int64_t & ne00,
|
| 329 |
+
constant uint64_t & nb01,
|
| 330 |
+
constant float & eps,
|
| 331 |
+
threadgroup float * sum [[threadgroup(0)]],
|
| 332 |
+
uint tgpig[[threadgroup_position_in_grid]],
|
| 333 |
+
uint tpitg[[thread_position_in_threadgroup]],
|
| 334 |
+
uint ntg[[threads_per_threadgroup]]) {
|
| 335 |
+
device const float * x = (device const float *) ((device const char *) src0 + tgpig*nb01);
|
| 336 |
+
|
| 337 |
+
// parallel sum
|
| 338 |
+
sum[tpitg] = 0.0f;
|
| 339 |
+
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
| 340 |
+
sum[tpitg] += x[i00] * x[i00];
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
// reduce
|
| 344 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 345 |
+
for (uint i = ntg/2; i > 0; i /= 2) {
|
| 346 |
+
if (tpitg < i) {
|
| 347 |
+
sum[tpitg] += sum[tpitg + i];
|
| 348 |
+
}
|
| 349 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
// broadcast
|
| 353 |
+
if (tpitg == 0) {
|
| 354 |
+
sum[0] /= ne00;
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 358 |
+
|
| 359 |
+
const float mean = sum[0];
|
| 360 |
+
const float scale = 1.0f/sqrt(mean + eps);
|
| 361 |
+
|
| 362 |
+
device float * y = dst + tgpig*ne00;
|
| 363 |
+
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
| 364 |
+
y[i00] = x[i00] * scale;
|
| 365 |
+
}
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
kernel void kernel_mul_mat_q4_0_f32(
|
| 369 |
+
device const void * src0,
|
| 370 |
+
device const float * src1,
|
| 371 |
+
device float * dst,
|
| 372 |
+
constant int64_t & ne00,
|
| 373 |
+
constant int64_t & ne10,
|
| 374 |
+
constant int64_t & ne0,
|
| 375 |
+
threadgroup float * sum [[threadgroup(0)]],
|
| 376 |
+
uint2 tgpig[[threadgroup_position_in_grid]],
|
| 377 |
+
uint2 tpitg[[thread_position_in_threadgroup]],
|
| 378 |
+
uint2 tptg[[threads_per_threadgroup]]) {
|
| 379 |
+
const int nb = ne00/QK4_0;
|
| 380 |
+
|
| 381 |
+
const int64_t r0 = tgpig.x;
|
| 382 |
+
const int64_t r1 = tgpig.y;
|
| 383 |
+
|
| 384 |
+
device const block_q4_0 * x = (device const block_q4_0 *) src0 + r0*nb;
|
| 385 |
+
device const float * y = (device const float *) src1 + r1*ne10;
|
| 386 |
+
|
| 387 |
+
const int nth = tptg.x*tptg.y;
|
| 388 |
+
const int ith = tptg.y*tpitg.x + tpitg.y;
|
| 389 |
+
|
| 390 |
+
const int ix = tpitg.y/4; // 0 or 1
|
| 391 |
+
const int iy = tpitg.y - 4*ix; // 0...3
|
| 392 |
+
|
| 393 |
+
const int first = 4 * iy;
|
| 394 |
+
|
| 395 |
+
float sumf = 0;
|
| 396 |
+
|
| 397 |
+
for (int i = 2*tpitg.x + ix; i < nb; i += 2*tptg.x) {
|
| 398 |
+
|
| 399 |
+
const float d = (float)x[i].d;
|
| 400 |
+
|
| 401 |
+
device const uint8_t * xl = x[i].qs + first;
|
| 402 |
+
device const float * yl = y + i * QK4_0 + first;
|
| 403 |
+
|
| 404 |
+
float2 acc = {0.0f, 0.0f};
|
| 405 |
+
|
| 406 |
+
for (int j = 0; j < 4; ++j) {
|
| 407 |
+
|
| 408 |
+
acc[0] += yl[j] * (xl[j] & 0xF) + yl[j+16] * (xl[j] >> 4);
|
| 409 |
+
acc[1] += yl[j] + yl[j+16];
|
| 410 |
+
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
sumf += d * (acc[0] - 8.f*acc[1]);
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
sum[ith] = sumf;
|
| 417 |
+
|
| 418 |
+
//
|
| 419 |
+
// Accumulate the sum from all threads in the threadgroup
|
| 420 |
+
//
|
| 421 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 422 |
+
if (ith%4 == 0) {
|
| 423 |
+
sum[ith] += sum[ith+1] + sum[ith+2] + sum[ith+3];
|
| 424 |
+
}
|
| 425 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 426 |
+
if (ith%16 == 0) {
|
| 427 |
+
sum[ith] += sum[ith+4] + sum[ith+8] + sum[ith+12];
|
| 428 |
+
}
|
| 429 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 430 |
+
if (ith == 0) {
|
| 431 |
+
for (uint i = 16; i < nth; i += 16) sum[0] += sum[i];
|
| 432 |
+
dst[r1*ne0 + r0] = sum[0];
|
| 433 |
+
}
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
kernel void kernel_mul_mat_q4_1_f32(
|
| 437 |
+
device const void * src0,
|
| 438 |
+
device const float * src1,
|
| 439 |
+
device float * dst,
|
| 440 |
+
constant int64_t & ne00,
|
| 441 |
+
constant int64_t & ne10,
|
| 442 |
+
constant int64_t & ne0,
|
| 443 |
+
threadgroup float * sum [[threadgroup(0)]],
|
| 444 |
+
uint2 tgpig[[threadgroup_position_in_grid]],
|
| 445 |
+
uint2 tpitg[[thread_position_in_threadgroup]],
|
| 446 |
+
uint2 tptg[[threads_per_threadgroup]]) {
|
| 447 |
+
const int nb = ne00/QK4_1;
|
| 448 |
+
|
| 449 |
+
const int64_t r0 = tgpig.x;
|
| 450 |
+
const int64_t r1 = tgpig.y;
|
| 451 |
+
|
| 452 |
+
device const block_q4_1 * x = (device const block_q4_1 *) src0 + r0*nb;
|
| 453 |
+
device const float * y = (device const float *) src1 + r1*ne10;
|
| 454 |
+
|
| 455 |
+
const uint nth = tptg.x*tptg.y;
|
| 456 |
+
const uint ith = tptg.y*tpitg.x + tpitg.y;
|
| 457 |
+
|
| 458 |
+
const int ix = tpitg.y/4; // 0 or 1
|
| 459 |
+
const int iy = tpitg.y - 4*ix; // 0...3
|
| 460 |
+
|
| 461 |
+
const int first = 4 * iy;
|
| 462 |
+
|
| 463 |
+
float sumf = 0;
|
| 464 |
+
|
| 465 |
+
for (int i = 2*tpitg.x + ix; i < nb; i += 2*tptg.x) {
|
| 466 |
+
|
| 467 |
+
const float d = (float)x[i].d;
|
| 468 |
+
const float m = (float)x[i].m;
|
| 469 |
+
|
| 470 |
+
device const uint8_t * xl = x[i].qs + first;
|
| 471 |
+
device const float * yl = y + i * QK4_1 + first;
|
| 472 |
+
|
| 473 |
+
float2 acc = {0.0f, 0.0f};
|
| 474 |
+
|
| 475 |
+
for (int j = 0; j < 4; ++j) {
|
| 476 |
+
|
| 477 |
+
acc[0] += yl[j+ 0] * (d * (xl[j] & 0xF) + m);
|
| 478 |
+
acc[1] += yl[j+16] * (d * (xl[j] >> 4) + m);
|
| 479 |
+
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
sumf += acc[0] + acc[1];
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
sum[ith] = sumf;
|
| 486 |
+
|
| 487 |
+
//
|
| 488 |
+
// Accumulate the sum from all threads in the threadgroup
|
| 489 |
+
//
|
| 490 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 491 |
+
if (ith%4 == 0) {
|
| 492 |
+
sum[ith] += sum[ith+1] + sum[ith+2] + sum[ith+3];
|
| 493 |
+
}
|
| 494 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 495 |
+
if (ith%16 == 0) {
|
| 496 |
+
sum[ith] += sum[ith+4] + sum[ith+8] + sum[ith+12];
|
| 497 |
+
}
|
| 498 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 499 |
+
if (ith == 0) {
|
| 500 |
+
for (int i = 16; i < nth; i += 16) sum[0] += sum[i];
|
| 501 |
+
dst[r1*ne0 + r0] = sum[0];
|
| 502 |
+
}
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
kernel void kernel_mul_mat_f16_f32(
|
| 506 |
+
device const char * src0,
|
| 507 |
+
device const char * src1,
|
| 508 |
+
device float * dst,
|
| 509 |
+
constant int64_t & ne00,
|
| 510 |
+
constant int64_t & ne01,
|
| 511 |
+
constant uint64_t & nb00,
|
| 512 |
+
constant uint64_t & nb01,
|
| 513 |
+
constant uint64_t & nb02,
|
| 514 |
+
constant int64_t & ne10,
|
| 515 |
+
constant int64_t & ne11,
|
| 516 |
+
constant uint64_t & nb10,
|
| 517 |
+
constant uint64_t & nb11,
|
| 518 |
+
constant uint64_t & nb12,
|
| 519 |
+
constant int64_t & ne0,
|
| 520 |
+
constant int64_t & ne1,
|
| 521 |
+
threadgroup float * sum [[threadgroup(0)]],
|
| 522 |
+
uint3 tgpig[[threadgroup_position_in_grid]],
|
| 523 |
+
uint3 tpig[[thread_position_in_grid]],
|
| 524 |
+
uint3 tpitg[[thread_position_in_threadgroup]],
|
| 525 |
+
uint3 tptg[[threads_per_threadgroup]]) {
|
| 526 |
+
|
| 527 |
+
const int64_t r0 = tgpig.x;
|
| 528 |
+
const int64_t r1 = tgpig.y;
|
| 529 |
+
const int64_t im = tgpig.z;
|
| 530 |
+
|
| 531 |
+
device const half * x = (device const half *) (src0 + r0*nb01 + im*nb02);
|
| 532 |
+
device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12);
|
| 533 |
+
|
| 534 |
+
sum[tpitg.x] = 0.0f;
|
| 535 |
+
|
| 536 |
+
for (int i = tpitg.x; i < ne00; i += tptg.x) {
|
| 537 |
+
sum[tpitg.x] += (float) x[i] * (float) y[i];
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
// accumulate the sum from all threads in the threadgroup
|
| 541 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 542 |
+
for (uint i = tptg.x/2; i > 0; i /= 2) {
|
| 543 |
+
if (tpitg.x < i) {
|
| 544 |
+
sum[tpitg.x] += sum[tpitg.x + i];
|
| 545 |
+
}
|
| 546 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
if (tpitg.x == 0) {
|
| 550 |
+
dst[im*ne1*ne0 + r1*ne0 + r0] = sum[0];
|
| 551 |
+
}
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
kernel void kernel_alibi_f32(
|
| 555 |
+
device const float * src0,
|
| 556 |
+
device float * dst,
|
| 557 |
+
constant int64_t & ne00,
|
| 558 |
+
constant int64_t & ne01,
|
| 559 |
+
constant int64_t & ne02,
|
| 560 |
+
constant int64_t & ne03,
|
| 561 |
+
constant uint64_t & nb00,
|
| 562 |
+
constant uint64_t & nb01,
|
| 563 |
+
constant uint64_t & nb02,
|
| 564 |
+
constant uint64_t & nb03,
|
| 565 |
+
constant int64_t & ne0,
|
| 566 |
+
constant int64_t & ne1,
|
| 567 |
+
constant int64_t & ne2,
|
| 568 |
+
constant int64_t & ne3,
|
| 569 |
+
constant uint64_t & nb0,
|
| 570 |
+
constant uint64_t & nb1,
|
| 571 |
+
constant uint64_t & nb2,
|
| 572 |
+
constant uint64_t & nb3,
|
| 573 |
+
constant float & m0,
|
| 574 |
+
uint3 tgpig[[threadgroup_position_in_grid]],
|
| 575 |
+
uint3 tpitg[[thread_position_in_threadgroup]],
|
| 576 |
+
uint3 ntg[[threads_per_threadgroup]]) {
|
| 577 |
+
const int64_t i03 = tgpig[2];
|
| 578 |
+
const int64_t i02 = tgpig[1];
|
| 579 |
+
const int64_t i01 = tgpig[0];
|
| 580 |
+
|
| 581 |
+
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
| 582 |
+
|
| 583 |
+
const int64_t i3 = n / (ne2*ne1*ne0);
|
| 584 |
+
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
|
| 585 |
+
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
|
| 586 |
+
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
|
| 587 |
+
|
| 588 |
+
device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
|
| 589 |
+
float m_k = pow(m0, i2 + 1);
|
| 590 |
+
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
| 591 |
+
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
| 592 |
+
dst_data[i00] = src[0] + m_k * (i00 - ne00 + 1);
|
| 593 |
+
}
|
| 594 |
+
}
|
| 595 |
+
|
| 596 |
+
kernel void kernel_rope(
|
| 597 |
+
device const void * src0,
|
| 598 |
+
device float * dst,
|
| 599 |
+
constant int64_t & ne00,
|
| 600 |
+
constant int64_t & ne01,
|
| 601 |
+
constant int64_t & ne02,
|
| 602 |
+
constant int64_t & ne03,
|
| 603 |
+
constant uint64_t & nb00,
|
| 604 |
+
constant uint64_t & nb01,
|
| 605 |
+
constant uint64_t & nb02,
|
| 606 |
+
constant uint64_t & nb03,
|
| 607 |
+
constant int64_t & ne0,
|
| 608 |
+
constant int64_t & ne1,
|
| 609 |
+
constant int64_t & ne2,
|
| 610 |
+
constant int64_t & ne3,
|
| 611 |
+
constant uint64_t & nb0,
|
| 612 |
+
constant uint64_t & nb1,
|
| 613 |
+
constant uint64_t & nb2,
|
| 614 |
+
constant uint64_t & nb3,
|
| 615 |
+
constant int & n_past,
|
| 616 |
+
constant int & n_dims,
|
| 617 |
+
constant int & mode,
|
| 618 |
+
uint3 tpig[[thread_position_in_grid]]) {
|
| 619 |
+
const int64_t i3 = tpig[2];
|
| 620 |
+
const int64_t i2 = tpig[1];
|
| 621 |
+
const int64_t i1 = tpig[0];
|
| 622 |
+
|
| 623 |
+
const bool is_neox = mode & 2;
|
| 624 |
+
const float theta_scale = pow(10000.0, -2.0f/n_dims);
|
| 625 |
+
|
| 626 |
+
const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
|
| 627 |
+
|
| 628 |
+
float theta = (float)p;
|
| 629 |
+
|
| 630 |
+
if (!is_neox) {
|
| 631 |
+
for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
|
| 632 |
+
const float cos_theta = cos(theta);
|
| 633 |
+
const float sin_theta = sin(theta);
|
| 634 |
+
|
| 635 |
+
theta *= theta_scale;
|
| 636 |
+
|
| 637 |
+
device const float * const src = (device float *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
|
| 638 |
+
device float * dst_data = (device float *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
|
| 639 |
+
|
| 640 |
+
const float x0 = src[0];
|
| 641 |
+
const float x1 = src[1];
|
| 642 |
+
|
| 643 |
+
dst_data[0] = x0*cos_theta - x1*sin_theta;
|
| 644 |
+
dst_data[1] = x0*sin_theta + x1*cos_theta;
|
| 645 |
+
}
|
| 646 |
+
} else {
|
| 647 |
+
// TODO: implement
|
| 648 |
+
}
|
| 649 |
+
}
|
| 650 |
+
|
| 651 |
+
kernel void kernel_cpy_f16_f16(
|
| 652 |
+
device const half * src0,
|
| 653 |
+
device half * dst,
|
| 654 |
+
constant int64_t & ne00,
|
| 655 |
+
constant int64_t & ne01,
|
| 656 |
+
constant int64_t & ne02,
|
| 657 |
+
constant int64_t & ne03,
|
| 658 |
+
constant uint64_t & nb00,
|
| 659 |
+
constant uint64_t & nb01,
|
| 660 |
+
constant uint64_t & nb02,
|
| 661 |
+
constant uint64_t & nb03,
|
| 662 |
+
constant int64_t & ne0,
|
| 663 |
+
constant int64_t & ne1,
|
| 664 |
+
constant int64_t & ne2,
|
| 665 |
+
constant int64_t & ne3,
|
| 666 |
+
constant uint64_t & nb0,
|
| 667 |
+
constant uint64_t & nb1,
|
| 668 |
+
constant uint64_t & nb2,
|
| 669 |
+
constant uint64_t & nb3,
|
| 670 |
+
uint3 tgpig[[threadgroup_position_in_grid]],
|
| 671 |
+
uint3 tpitg[[thread_position_in_threadgroup]],
|
| 672 |
+
uint3 ntg[[threads_per_threadgroup]]) {
|
| 673 |
+
const int64_t i03 = tgpig[2];
|
| 674 |
+
const int64_t i02 = tgpig[1];
|
| 675 |
+
const int64_t i01 = tgpig[0];
|
| 676 |
+
|
| 677 |
+
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
| 678 |
+
|
| 679 |
+
const int64_t i3 = n / (ne2*ne1*ne0);
|
| 680 |
+
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
|
| 681 |
+
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
|
| 682 |
+
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
|
| 683 |
+
|
| 684 |
+
device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
|
| 685 |
+
|
| 686 |
+
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
| 687 |
+
device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
| 688 |
+
dst_data[i00] = src[0];
|
| 689 |
+
}
|
| 690 |
+
}
|
| 691 |
+
|
| 692 |
+
kernel void kernel_cpy_f32_f16(
|
| 693 |
+
device const float * src0,
|
| 694 |
+
device half * dst,
|
| 695 |
+
constant int64_t & ne00,
|
| 696 |
+
constant int64_t & ne01,
|
| 697 |
+
constant int64_t & ne02,
|
| 698 |
+
constant int64_t & ne03,
|
| 699 |
+
constant uint64_t & nb00,
|
| 700 |
+
constant uint64_t & nb01,
|
| 701 |
+
constant uint64_t & nb02,
|
| 702 |
+
constant uint64_t & nb03,
|
| 703 |
+
constant int64_t & ne0,
|
| 704 |
+
constant int64_t & ne1,
|
| 705 |
+
constant int64_t & ne2,
|
| 706 |
+
constant int64_t & ne3,
|
| 707 |
+
constant uint64_t & nb0,
|
| 708 |
+
constant uint64_t & nb1,
|
| 709 |
+
constant uint64_t & nb2,
|
| 710 |
+
constant uint64_t & nb3,
|
| 711 |
+
uint3 tgpig[[threadgroup_position_in_grid]],
|
| 712 |
+
uint3 tpitg[[thread_position_in_threadgroup]],
|
| 713 |
+
uint3 ntg[[threads_per_threadgroup]]) {
|
| 714 |
+
const int64_t i03 = tgpig[2];
|
| 715 |
+
const int64_t i02 = tgpig[1];
|
| 716 |
+
const int64_t i01 = tgpig[0];
|
| 717 |
+
|
| 718 |
+
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
| 719 |
+
|
| 720 |
+
const int64_t i3 = n / (ne2*ne1*ne0);
|
| 721 |
+
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
|
| 722 |
+
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
|
| 723 |
+
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
|
| 724 |
+
|
| 725 |
+
device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
|
| 726 |
+
|
| 727 |
+
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
| 728 |
+
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
| 729 |
+
|
| 730 |
+
dst_data[i00] = src[0];
|
| 731 |
+
}
|
| 732 |
+
}
|
| 733 |
+
|
| 734 |
+
kernel void kernel_cpy_f32_f32(
|
| 735 |
+
device const float * src0,
|
| 736 |
+
device float * dst,
|
| 737 |
+
constant int64_t & ne00,
|
| 738 |
+
constant int64_t & ne01,
|
| 739 |
+
constant int64_t & ne02,
|
| 740 |
+
constant int64_t & ne03,
|
| 741 |
+
constant uint64_t & nb00,
|
| 742 |
+
constant uint64_t & nb01,
|
| 743 |
+
constant uint64_t & nb02,
|
| 744 |
+
constant uint64_t & nb03,
|
| 745 |
+
constant int64_t & ne0,
|
| 746 |
+
constant int64_t & ne1,
|
| 747 |
+
constant int64_t & ne2,
|
| 748 |
+
constant int64_t & ne3,
|
| 749 |
+
constant uint64_t & nb0,
|
| 750 |
+
constant uint64_t & nb1,
|
| 751 |
+
constant uint64_t & nb2,
|
| 752 |
+
constant uint64_t & nb3,
|
| 753 |
+
uint3 tgpig[[threadgroup_position_in_grid]],
|
| 754 |
+
uint3 tpitg[[thread_position_in_threadgroup]],
|
| 755 |
+
uint3 ntg[[threads_per_threadgroup]]) {
|
| 756 |
+
const int64_t i03 = tgpig[2];
|
| 757 |
+
const int64_t i02 = tgpig[1];
|
| 758 |
+
const int64_t i01 = tgpig[0];
|
| 759 |
+
|
| 760 |
+
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
| 761 |
+
|
| 762 |
+
const int64_t i3 = n / (ne2*ne1*ne0);
|
| 763 |
+
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
|
| 764 |
+
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
|
| 765 |
+
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
|
| 766 |
+
|
| 767 |
+
device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
|
| 768 |
+
|
| 769 |
+
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
| 770 |
+
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
| 771 |
+
|
| 772 |
+
dst_data[i00] = src[0];
|
| 773 |
+
}
|
| 774 |
+
}
|
| 775 |
+
|
| 776 |
+
//============================================ k-quants ======================================================
|
| 777 |
+
|
| 778 |
+
#define QK_K 256
|
| 779 |
+
|
| 780 |
+
typedef struct {
|
| 781 |
+
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
|
| 782 |
+
uint8_t qs[QK_K/4]; // quants
|
| 783 |
+
half d; // super-block scale for quantized scales
|
| 784 |
+
half dmin; // super-block scale for quantized mins
|
| 785 |
+
} block_q2_k;
|
| 786 |
+
// 84 bytes / block
|
| 787 |
+
|
| 788 |
+
typedef struct {
|
| 789 |
+
uint8_t hmask[QK_K/8]; // quants - high bit
|
| 790 |
+
uint8_t qs[QK_K/4]; // quants - low 2 bits
|
| 791 |
+
uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits
|
| 792 |
+
half d; // super-block scale
|
| 793 |
+
} block_q3_k;
|
| 794 |
+
// 110 bytes / block
|
| 795 |
+
|
| 796 |
+
typedef struct {
|
| 797 |
+
half d; // super-block scale for quantized scales
|
| 798 |
+
half dmin; // super-block scale for quantized mins
|
| 799 |
+
uint8_t scales[3*QK_K/64]; // scales and mins, quantized with 6 bits
|
| 800 |
+
uint8_t qs[QK_K/2]; // 4--bit quants
|
| 801 |
+
} block_q4_k;
|
| 802 |
+
// 144 bytes / block
|
| 803 |
+
|
| 804 |
+
typedef struct {
|
| 805 |
+
half d; // super-block scale for quantized scales
|
| 806 |
+
half dmin; // super-block scale for quantized mins
|
| 807 |
+
uint8_t scales[3*QK_K/64]; // scales and mins, quantized with 6 bits
|
| 808 |
+
uint8_t qh[QK_K/8]; // quants, high bit
|
| 809 |
+
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
| 810 |
+
} block_q5_k;
|
| 811 |
+
// 176 bytes / block
|
| 812 |
+
|
| 813 |
+
typedef struct {
|
| 814 |
+
uint8_t ql[QK_K/2]; // quants, lower 4 bits
|
| 815 |
+
uint8_t qh[QK_K/4]; // quants, upper 2 bits
|
| 816 |
+
int8_t scales[QK_K/16]; // scales, quantized with 8 bits
|
| 817 |
+
half d; // super-block scale
|
| 818 |
+
} block_q6_k;
|
| 819 |
+
// 210 bytes / block
|
| 820 |
+
|
| 821 |
+
static inline uchar4 get_scale_min_k4(int j, device const uint8_t * q) {
|
| 822 |
+
uchar4 r;
|
| 823 |
+
if (j < 4) {
|
| 824 |
+
r[0] = q[j+0] & 63;
|
| 825 |
+
r[2] = q[j+1] & 63;
|
| 826 |
+
r[1] = q[j+4] & 63;
|
| 827 |
+
r[3] = q[j+5] & 63;
|
| 828 |
+
} else {
|
| 829 |
+
r[0] = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
|
| 830 |
+
r[2] = (q[j+5] & 0xF) | ((q[j-3] >> 6) << 4);
|
| 831 |
+
r[1] = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
|
| 832 |
+
r[3] = (q[j+5] >> 4) | ((q[j+1] >> 6) << 4);
|
| 833 |
+
}
|
| 834 |
+
return r;
|
| 835 |
+
}
|
| 836 |
+
|
| 837 |
+
//========================================== dequantization =============================
|
| 838 |
+
|
| 839 |
+
static void dequantize_row_q2_k(device const block_q2_k * x, device float * y, int k) {
|
| 840 |
+
assert(k % QK_K == 0);
|
| 841 |
+
const int nb = k / QK_K;
|
| 842 |
+
|
| 843 |
+
for (int i = 0; i < nb; i++) {
|
| 844 |
+
|
| 845 |
+
const float d = x[i].d;
|
| 846 |
+
const float min = x[i].dmin;
|
| 847 |
+
|
| 848 |
+
device const uint8_t * q = x[i].qs;
|
| 849 |
+
|
| 850 |
+
int is = 0;
|
| 851 |
+
float dl, ml;
|
| 852 |
+
for (int n = 0; n < QK_K; n += 128) {
|
| 853 |
+
int shift = 0;
|
| 854 |
+
for (int j = 0; j < 4; ++j) {
|
| 855 |
+
|
| 856 |
+
uint8_t sc = x[i].scales[is++];
|
| 857 |
+
dl = d * (sc & 0xF); ml = min * (sc >> 4);
|
| 858 |
+
for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
|
| 859 |
+
|
| 860 |
+
sc = x[i].scales[is++];
|
| 861 |
+
dl = d * (sc & 0xF); ml = min * (sc >> 4);
|
| 862 |
+
for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
|
| 863 |
+
|
| 864 |
+
shift += 2;
|
| 865 |
+
}
|
| 866 |
+
q += 32;
|
| 867 |
+
}
|
| 868 |
+
|
| 869 |
+
}
|
| 870 |
+
}
|
| 871 |
+
|
| 872 |
+
static void dequantize_row_q3_k(device const block_q3_k * x, device float * y, int k) {
|
| 873 |
+
assert(k % QK_K == 0);
|
| 874 |
+
const int nb = k / QK_K;
|
| 875 |
+
|
| 876 |
+
const uint16_t kmask1 = 0x0303;
|
| 877 |
+
const uint16_t kmask2 = 0x0f0f;
|
| 878 |
+
|
| 879 |
+
uint16_t aux[8];
|
| 880 |
+
thread const int8_t * scales = (thread const int8_t*)aux;
|
| 881 |
+
|
| 882 |
+
for (int i = 0; i < nb; i++) {
|
| 883 |
+
|
| 884 |
+
const float d_all = (float)(x[i].d);
|
| 885 |
+
|
| 886 |
+
device const uint8_t * q = x[i].qs;
|
| 887 |
+
device const uint8_t * h = x[i].hmask;
|
| 888 |
+
uint8_t m = 1;
|
| 889 |
+
|
| 890 |
+
device const uint16_t * a = (device const uint16_t *)x[i].scales;
|
| 891 |
+
aux[0] = (a[0] & kmask2) | (((a[4] >> 0) & kmask1) << 4);
|
| 892 |
+
aux[1] = (a[1] & kmask2) | (((a[5] >> 0) & kmask1) << 4);
|
| 893 |
+
aux[2] = (a[2] & kmask2) | (((a[4] >> 2) & kmask1) << 4);
|
| 894 |
+
aux[3] = (a[3] & kmask2) | (((a[5] >> 2) & kmask1) << 4);
|
| 895 |
+
aux[4] = ((a[0] >> 4) & kmask2) | (((a[4] >> 4) & kmask1) << 4);
|
| 896 |
+
aux[5] = ((a[1] >> 4) & kmask2) | (((a[5] >> 4) & kmask1) << 4);
|
| 897 |
+
aux[6] = ((a[2] >> 4) & kmask2) | (((a[4] >> 6) & kmask1) << 4);
|
| 898 |
+
aux[7] = ((a[3] >> 4) & kmask2) | (((a[5] >> 6) & kmask1) << 4);
|
| 899 |
+
|
| 900 |
+
int is = 0;
|
| 901 |
+
float dl;
|
| 902 |
+
for (int n = 0; n < QK_K; n += 128) {
|
| 903 |
+
int shift = 0;
|
| 904 |
+
for (int j = 0; j < 4; ++j) {
|
| 905 |
+
|
| 906 |
+
dl = d_all * (scales[is++] - 32);
|
| 907 |
+
for (int l = 0; l < 16; ++l) {
|
| 908 |
+
*y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((h[l+ 0] & m) ? 0 : 4));
|
| 909 |
+
}
|
| 910 |
+
|
| 911 |
+
dl = d_all * (scales[is++] - 32);
|
| 912 |
+
for (int l = 0; l < 16; ++l) {
|
| 913 |
+
*y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((h[l+16] & m) ? 0 : 4));
|
| 914 |
+
}
|
| 915 |
+
|
| 916 |
+
shift += 2;
|
| 917 |
+
m <<= 1;
|
| 918 |
+
}
|
| 919 |
+
q += 32;
|
| 920 |
+
}
|
| 921 |
+
|
| 922 |
+
}
|
| 923 |
+
|
| 924 |
+
}
|
| 925 |
+
|
| 926 |
+
static void dequantize_row_q4_k(device const block_q4_k * x, device float * y, int k) {
|
| 927 |
+
assert(k % QK_K == 0);
|
| 928 |
+
const int nb = k / QK_K;
|
| 929 |
+
|
| 930 |
+
|
| 931 |
+
for (int i = 0; i < nb; i++) {
|
| 932 |
+
|
| 933 |
+
const float d = x[i].d;
|
| 934 |
+
const float min = x[i].dmin;
|
| 935 |
+
|
| 936 |
+
device const uint8_t * q = x[i].qs;
|
| 937 |
+
device const uint8_t * scales = x[i].scales;
|
| 938 |
+
|
| 939 |
+
int is = 0;
|
| 940 |
+
for (int j = 0; j < QK_K; j += 64) {
|
| 941 |
+
const uchar4 sc = get_scale_min_k4(is, scales);
|
| 942 |
+
const float d1 = d * sc[0]; const float m1 = min * sc[1];
|
| 943 |
+
const float d2 = d * sc[2]; const float m2 = min * sc[3];
|
| 944 |
+
for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
|
| 945 |
+
for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
|
| 946 |
+
q += 32; is += 2;
|
| 947 |
+
}
|
| 948 |
+
|
| 949 |
+
}
|
| 950 |
+
}
|
| 951 |
+
|
| 952 |
+
static void dequantize_row_q5_k(device const block_q5_k * x, device float * y, int k) {
|
| 953 |
+
assert(k % QK_K == 0);
|
| 954 |
+
const int nb = k / QK_K;
|
| 955 |
+
|
| 956 |
+
for (int i = 0; i < nb; i++) {
|
| 957 |
+
|
| 958 |
+
const float d = (float)(x[i].d);
|
| 959 |
+
const float min = (float)(x[i].dmin);
|
| 960 |
+
|
| 961 |
+
device const uint8_t * ql = x[i].qs;
|
| 962 |
+
device const uint8_t * qh = x[i].qh;
|
| 963 |
+
|
| 964 |
+
int is = 0;
|
| 965 |
+
uint8_t u1 = 1, u2 = 2;
|
| 966 |
+
for (int j = 0; j < QK_K; j += 64) {
|
| 967 |
+
const uchar4 sc = get_scale_min_k4(is, x[i].scales);
|
| 968 |
+
const float d1 = d * sc[0]; const float m1 = min * sc[1];
|
| 969 |
+
const float d2 = d * sc[2]; const float m2 = min * sc[3];
|
| 970 |
+
for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
|
| 971 |
+
for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
|
| 972 |
+
ql += 32; is += 2;
|
| 973 |
+
u1 <<= 2; u2 <<= 2;
|
| 974 |
+
}
|
| 975 |
+
}
|
| 976 |
+
|
| 977 |
+
}
|
| 978 |
+
|
| 979 |
+
static void dequantize_row_q6_k(device const block_q6_k * x, device float * y, int k) {
|
| 980 |
+
assert(k % QK_K == 0);
|
| 981 |
+
const int nb = k / QK_K;
|
| 982 |
+
|
| 983 |
+
for (int i = 0; i < nb; i++) {
|
| 984 |
+
|
| 985 |
+
device const uint8_t * ql = x[i].ql;
|
| 986 |
+
device const uint8_t * qh = x[i].qh;
|
| 987 |
+
device const int8_t * sc = x[i].scales;
|
| 988 |
+
|
| 989 |
+
const float d = x[i].d;
|
| 990 |
+
|
| 991 |
+
for (int n = 0; n < QK_K; n += 128) {
|
| 992 |
+
for (int l = 0; l < 32; ++l) {
|
| 993 |
+
int is = l/16;
|
| 994 |
+
const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
|
| 995 |
+
const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
|
| 996 |
+
const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
|
| 997 |
+
const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
|
| 998 |
+
y[l + 0] = d * sc[is + 0] * q1;
|
| 999 |
+
y[l + 32] = d * sc[is + 2] * q2;
|
| 1000 |
+
y[l + 64] = d * sc[is + 4] * q3;
|
| 1001 |
+
y[l + 96] = d * sc[is + 6] * q4;
|
| 1002 |
+
}
|
| 1003 |
+
y += 128;
|
| 1004 |
+
ql += 64;
|
| 1005 |
+
qh += 32;
|
| 1006 |
+
sc += 8;
|
| 1007 |
+
}
|
| 1008 |
+
}
|
| 1009 |
+
}
|
| 1010 |
+
|
| 1011 |
+
kernel void kernel_get_rows_q2_k(
|
| 1012 |
+
device const void * src0,
|
| 1013 |
+
device const int * src1,
|
| 1014 |
+
device float * dst,
|
| 1015 |
+
constant int64_t & ne00,
|
| 1016 |
+
constant uint64_t & nb01,
|
| 1017 |
+
constant uint64_t & nb1,
|
| 1018 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 1019 |
+
const int i = tpig;
|
| 1020 |
+
const int r = ((device int32_t *) src1)[i];
|
| 1021 |
+
|
| 1022 |
+
dequantize_row_q2_k(
|
| 1023 |
+
(device const block_q2_k *) ((device char *) src0 + r*nb01),
|
| 1024 |
+
(device float *) ((device char *) dst + i*nb1), ne00);
|
| 1025 |
+
}
|
| 1026 |
+
|
| 1027 |
+
kernel void kernel_get_rows_q3_k(
|
| 1028 |
+
device const void * src0,
|
| 1029 |
+
device const int * src1,
|
| 1030 |
+
device float * dst,
|
| 1031 |
+
constant int64_t & ne00,
|
| 1032 |
+
constant uint64_t & nb01,
|
| 1033 |
+
constant uint64_t & nb1,
|
| 1034 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 1035 |
+
const int i = tpig;
|
| 1036 |
+
const int r = ((device int32_t *) src1)[i];
|
| 1037 |
+
|
| 1038 |
+
dequantize_row_q3_k(
|
| 1039 |
+
(device const block_q3_k *) ((device char *) src0 + r*nb01),
|
| 1040 |
+
(device float *) ((device char *) dst + i*nb1), ne00);
|
| 1041 |
+
}
|
| 1042 |
+
|
| 1043 |
+
kernel void kernel_get_rows_q4_k(
|
| 1044 |
+
device const void * src0,
|
| 1045 |
+
device const int * src1,
|
| 1046 |
+
device float * dst,
|
| 1047 |
+
constant int64_t & ne00,
|
| 1048 |
+
constant uint64_t & nb01,
|
| 1049 |
+
constant uint64_t & nb1,
|
| 1050 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 1051 |
+
const int i = tpig;
|
| 1052 |
+
const int r = ((device int32_t *) src1)[i];
|
| 1053 |
+
|
| 1054 |
+
dequantize_row_q4_k(
|
| 1055 |
+
(device const block_q4_k *) ((device char *) src0 + r*nb01),
|
| 1056 |
+
(device float *) ((device char *) dst + i*nb1), ne00);
|
| 1057 |
+
}
|
| 1058 |
+
|
| 1059 |
+
kernel void kernel_get_rows_q5_k(
|
| 1060 |
+
device const void * src0,
|
| 1061 |
+
device const int * src1,
|
| 1062 |
+
device float * dst,
|
| 1063 |
+
constant int64_t & ne00,
|
| 1064 |
+
constant uint64_t & nb01,
|
| 1065 |
+
constant uint64_t & nb1,
|
| 1066 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 1067 |
+
const int i = tpig;
|
| 1068 |
+
const int r = ((device int32_t *) src1)[i];
|
| 1069 |
+
|
| 1070 |
+
dequantize_row_q5_k(
|
| 1071 |
+
(device const block_q5_k *) ((device char *) src0 + r*nb01),
|
| 1072 |
+
(device float *) ((device char *) dst + i*nb1), ne00);
|
| 1073 |
+
}
|
| 1074 |
+
|
| 1075 |
+
kernel void kernel_get_rows_q6_k(
|
| 1076 |
+
device const void * src0,
|
| 1077 |
+
device const int * src1,
|
| 1078 |
+
device float * dst,
|
| 1079 |
+
constant int64_t & ne00,
|
| 1080 |
+
constant uint64_t & nb01,
|
| 1081 |
+
constant uint64_t & nb1,
|
| 1082 |
+
uint tpig[[thread_position_in_grid]]) {
|
| 1083 |
+
const int i = tpig;
|
| 1084 |
+
const int r = ((device int32_t *) src1)[i];
|
| 1085 |
+
|
| 1086 |
+
dequantize_row_q6_k(
|
| 1087 |
+
(device const block_q6_k *) ((device char *) src0 + r*nb01),
|
| 1088 |
+
(device float *) ((device char *) dst + i*nb1), ne00);
|
| 1089 |
+
}
|
| 1090 |
+
|
| 1091 |
+
//====================================== dot products =========================
|
| 1092 |
+
|
| 1093 |
+
kernel void kernel_mul_mat_q2_k_f32(
|
| 1094 |
+
device const void * src0,
|
| 1095 |
+
device const float * src1,
|
| 1096 |
+
device float * dst,
|
| 1097 |
+
constant int64_t & ne00,
|
| 1098 |
+
constant int64_t & ne10,
|
| 1099 |
+
constant int64_t & ne0,
|
| 1100 |
+
threadgroup float * sum [[threadgroup(0)]],
|
| 1101 |
+
uint2 tgpig[[threadgroup_position_in_grid]],
|
| 1102 |
+
uint2 tpitg[[thread_position_in_threadgroup]],
|
| 1103 |
+
uint2 tptg[[threads_per_threadgroup]]) {
|
| 1104 |
+
|
| 1105 |
+
const int nb = ne00/QK_K;
|
| 1106 |
+
|
| 1107 |
+
const int64_t r0 = tgpig.x;
|
| 1108 |
+
const int64_t r1 = tgpig.y;
|
| 1109 |
+
|
| 1110 |
+
device const block_q2_k * x = (device const block_q2_k *) src0 + r0*nb;
|
| 1111 |
+
device const float * yy = (device const float *) src1 + r1*ne10;
|
| 1112 |
+
|
| 1113 |
+
const int nth = tptg.x*tptg.y;
|
| 1114 |
+
const int ith = tptg.y*tpitg.x + tpitg.y;
|
| 1115 |
+
|
| 1116 |
+
const int tid = tpitg.y; // 0...16
|
| 1117 |
+
const int il = tid/4; // 0...3
|
| 1118 |
+
const int ir = tid%4; // 0...3
|
| 1119 |
+
const int ip = il/2; // 0 or 1
|
| 1120 |
+
const int shift1 = 4*(il%2);// 0 or 4
|
| 1121 |
+
const int shift2 = shift1+2;// 2 or 6
|
| 1122 |
+
const int n = 8;
|
| 1123 |
+
const int is = 4*il + (n*ir)/16;
|
| 1124 |
+
|
| 1125 |
+
const int y_offset = 64*il + n*ir;
|
| 1126 |
+
const int q_offset = 32*ip + n*ir;
|
| 1127 |
+
|
| 1128 |
+
sum[ith] = 0.0f;
|
| 1129 |
+
|
| 1130 |
+
float sumf = 0;
|
| 1131 |
+
for (int i = tpitg.x; i < nb; i += tptg.x) {
|
| 1132 |
+
|
| 1133 |
+
device const uint8_t * q = x[i].qs + q_offset;
|
| 1134 |
+
device const uint8_t * scales = x[i].scales + is;
|
| 1135 |
+
|
| 1136 |
+
uint8_t d1 = scales[0] & 0xF;
|
| 1137 |
+
uint8_t d2 = scales[2] & 0xF;
|
| 1138 |
+
uint8_t m1 = scales[0] >> 4;
|
| 1139 |
+
uint8_t m2 = scales[2] >> 4;
|
| 1140 |
+
|
| 1141 |
+
device const float * y = yy + i*QK_K + y_offset;
|
| 1142 |
+
|
| 1143 |
+
//float4 s = {0.f, 0.f, 0.f, 0.f};
|
| 1144 |
+
float2 s = {0.f, 0.f};
|
| 1145 |
+
float smin = 0;
|
| 1146 |
+
for (int l = 0; l < n; ++l) {
|
| 1147 |
+
s[0] += y[l+ 0] * ((q[l] >> shift1) & 3);
|
| 1148 |
+
s[1] += y[l+32] * ((q[l] >> shift2) & 3);
|
| 1149 |
+
smin += y[l+ 0] * m1 + y[l+32] * m2;
|
| 1150 |
+
}
|
| 1151 |
+
|
| 1152 |
+
const float dall = (float)x[i].d;
|
| 1153 |
+
const float dmin = (float)x[i].dmin;
|
| 1154 |
+
|
| 1155 |
+
sumf += dall * (s[0] * d1 + s[1] * d2) - dmin * smin;
|
| 1156 |
+
|
| 1157 |
+
}
|
| 1158 |
+
sum[ith] = sumf;
|
| 1159 |
+
|
| 1160 |
+
//int mask1 = (ith%4 == 0);
|
| 1161 |
+
//int mask2 = (ith%16 == 0);
|
| 1162 |
+
|
| 1163 |
+
//threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1164 |
+
//for (int i = 1; i < 4; ++i) sum[ith] += mask1 * sum[ith + i];
|
| 1165 |
+
//threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1166 |
+
//for (int i = 4; i < 16; i += 4) sum[ith] += mask2 * sum[ith + i];
|
| 1167 |
+
//threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1168 |
+
//if (ith == 0) {
|
| 1169 |
+
// for (int i = 16; i < nth; i += 16) sum[0] += sum[i];
|
| 1170 |
+
// dst[r1*ne0 + r0] = sum[0];
|
| 1171 |
+
//}
|
| 1172 |
+
|
| 1173 |
+
//
|
| 1174 |
+
// Accumulate the sum from all threads in the threadgroup
|
| 1175 |
+
// This version is slightly faster than the commented out one below,
|
| 1176 |
+
// which I copy-pasted from ggerganov's q4_0 dot product for metal.
|
| 1177 |
+
//
|
| 1178 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1179 |
+
if (ith%4 == 0) {
|
| 1180 |
+
for (int i = 1; i < 4; ++i) sum[ith] += sum[ith + i];
|
| 1181 |
+
}
|
| 1182 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1183 |
+
if (ith%16 == 0) {
|
| 1184 |
+
for (int i = 4; i < 16; i += 4) sum[ith] += sum[ith + i];
|
| 1185 |
+
}
|
| 1186 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1187 |
+
if (ith == 0) {
|
| 1188 |
+
for (int i = 16; i < nth; i += 16) sum[0] += sum[i];
|
| 1189 |
+
dst[r1*ne0 + r0] = sum[0];
|
| 1190 |
+
}
|
| 1191 |
+
}
|
| 1192 |
+
|
| 1193 |
+
kernel void kernel_mul_mat_q3_k_f32(
|
| 1194 |
+
device const void * src0,
|
| 1195 |
+
device const float * src1,
|
| 1196 |
+
device float * dst,
|
| 1197 |
+
constant int64_t & ne00,
|
| 1198 |
+
constant int64_t & ne10,
|
| 1199 |
+
constant int64_t & ne0,
|
| 1200 |
+
constant int64_t & ne1,
|
| 1201 |
+
threadgroup float * sum [[threadgroup(0)]],
|
| 1202 |
+
uint2 tgpig[[threadgroup_position_in_grid]],
|
| 1203 |
+
uint2 tpitg[[thread_position_in_threadgroup]],
|
| 1204 |
+
uint2 tptg[[threads_per_threadgroup]]) {
|
| 1205 |
+
|
| 1206 |
+
const uint16_t kmask1 = 0x0303;
|
| 1207 |
+
const uint16_t kmask2 = 0x0f0f;
|
| 1208 |
+
|
| 1209 |
+
const uint8_t m3 = 3;
|
| 1210 |
+
const int8_t m4 = 4;
|
| 1211 |
+
|
| 1212 |
+
const int nb = ne00/QK_K;
|
| 1213 |
+
|
| 1214 |
+
const int64_t r0 = tgpig.x;
|
| 1215 |
+
const int64_t r1 = tgpig.y;
|
| 1216 |
+
|
| 1217 |
+
device const block_q3_k * x = (device const block_q3_k *) src0 + r0*nb;
|
| 1218 |
+
device const float * yy = (device const float *) src1 + r1*ne10;
|
| 1219 |
+
|
| 1220 |
+
const int nth = tptg.x*tptg.y;
|
| 1221 |
+
const int ith = tptg.y*tpitg.x + tpitg.y;
|
| 1222 |
+
|
| 1223 |
+
const int tid = tpitg.y; // expecting 16
|
| 1224 |
+
const int ip = tid/8; // 0 or 1
|
| 1225 |
+
const int il = tid/2 - 4*ip; // 0...3
|
| 1226 |
+
const int ir = tid%2;
|
| 1227 |
+
const int n = 8;
|
| 1228 |
+
const int l0 = n*ir;
|
| 1229 |
+
|
| 1230 |
+
const uint8_t m = 1 << (4*ip + il);
|
| 1231 |
+
|
| 1232 |
+
const int shift = 2*il;
|
| 1233 |
+
|
| 1234 |
+
const uint16_t s_shift1 = 4*ip;
|
| 1235 |
+
const uint16_t s_shift2 = s_shift1 + 2*(il/2);
|
| 1236 |
+
const int ik = 4 + (il%2);
|
| 1237 |
+
|
| 1238 |
+
const int q_offset = 32*ip + l0;
|
| 1239 |
+
const int y_offset = 128*ip + 32*il + l0;
|
| 1240 |
+
|
| 1241 |
+
//float sumf = 0;
|
| 1242 |
+
float sumf1 = 0, sumf2 = 0;
|
| 1243 |
+
for (int i = tpitg.x; i < nb; i += tptg.x) {
|
| 1244 |
+
|
| 1245 |
+
const float d_all = (float)(x[i].d);
|
| 1246 |
+
|
| 1247 |
+
device const uint8_t * q = x[i].qs + q_offset;
|
| 1248 |
+
device const uint8_t * h = x[i].hmask + l0;
|
| 1249 |
+
device const float * y = yy + i * QK_K + y_offset;
|
| 1250 |
+
|
| 1251 |
+
device const uint16_t * a = (device const uint16_t *)x[i].scales;
|
| 1252 |
+
const char2 scales = as_type<char2>((uint16_t)(((a[il] >> s_shift1) & kmask2) | (((a[ik] >> s_shift2) & kmask1) << 4)));
|
| 1253 |
+
|
| 1254 |
+
float s = 0;
|
| 1255 |
+
for (int l = 0; l < n; ++l) {
|
| 1256 |
+
s += y[l+ 0] * ((int8_t)((q[l+ 0] >> shift) & m3) - ((h[l+ 0] & m) ? 0 : m4));
|
| 1257 |
+
}
|
| 1258 |
+
float d = d_all * s;
|
| 1259 |
+
sumf1 += d * scales[0];
|
| 1260 |
+
sumf2 += d;
|
| 1261 |
+
//sumf += d_all * s * (scales[0] - 32);
|
| 1262 |
+
|
| 1263 |
+
s = 0;
|
| 1264 |
+
for (int l = 0; l < n; ++l) {
|
| 1265 |
+
s += y[l+16] * ((int8_t)((q[l+16] >> shift) & m3) - ((h[l+16] & m) ? 0 : m4));
|
| 1266 |
+
}
|
| 1267 |
+
d = d_all * s;
|
| 1268 |
+
sumf1 += d * scales[1];
|
| 1269 |
+
sumf2 += d;
|
| 1270 |
+
//sumf += d_all * s * (scales[1] - 32);
|
| 1271 |
+
|
| 1272 |
+
}
|
| 1273 |
+
|
| 1274 |
+
//sum[ith] = sumf;
|
| 1275 |
+
sum[ith] = sumf1 - 32.f*sumf2;
|
| 1276 |
+
|
| 1277 |
+
//
|
| 1278 |
+
// Accumulate the sum from all threads in the threadgroup
|
| 1279 |
+
//
|
| 1280 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1281 |
+
if (ith%4 == 0) {
|
| 1282 |
+
for (int i = 1; i < 4; ++i) sum[ith] += sum[ith + i];
|
| 1283 |
+
}
|
| 1284 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1285 |
+
if (ith%16 == 0) {
|
| 1286 |
+
for (int i = 4; i < 16; i += 4) sum[ith] += sum[ith + i];
|
| 1287 |
+
}
|
| 1288 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1289 |
+
if (ith == 0) {
|
| 1290 |
+
for (int i = 16; i < nth; i += 16) sum[0] += sum[i];
|
| 1291 |
+
dst[r1*ne0 + r0] = sum[0];
|
| 1292 |
+
}
|
| 1293 |
+
|
| 1294 |
+
}
|
| 1295 |
+
|
| 1296 |
+
kernel void kernel_mul_mat_q4_k_f32(
|
| 1297 |
+
device const void * src0,
|
| 1298 |
+
device const float * src1,
|
| 1299 |
+
device float * dst,
|
| 1300 |
+
constant int64_t & ne00,
|
| 1301 |
+
constant int64_t & ne10,
|
| 1302 |
+
constant int64_t & ne0,
|
| 1303 |
+
threadgroup float * sum [[threadgroup(0)]],
|
| 1304 |
+
uint2 tgpig[[threadgroup_position_in_grid]],
|
| 1305 |
+
uint2 tpitg[[thread_position_in_threadgroup]],
|
| 1306 |
+
uint2 tptg[[threads_per_threadgroup]]) {
|
| 1307 |
+
|
| 1308 |
+
const uint16_t kmask1 = 0x3f3f;
|
| 1309 |
+
const uint16_t kmask2 = 0x0f0f;
|
| 1310 |
+
const uint16_t kmask3 = 0xc0c0;
|
| 1311 |
+
|
| 1312 |
+
const int nb = ne00/QK_K;
|
| 1313 |
+
|
| 1314 |
+
const int64_t r0 = tgpig.x;
|
| 1315 |
+
const int64_t r1 = tgpig.y;
|
| 1316 |
+
|
| 1317 |
+
device const block_q4_k * x = (device const block_q4_k *) src0 + r0*nb;
|
| 1318 |
+
device const float * yy = (device const float *) src1 + r1*ne10;
|
| 1319 |
+
|
| 1320 |
+
const int nth = tptg.x*tptg.y;
|
| 1321 |
+
const int ith = tptg.y*tpitg.x + tpitg.y;
|
| 1322 |
+
|
| 1323 |
+
const int tid = tpitg.y; // 0...16
|
| 1324 |
+
const int il = tid/4; // 0...3
|
| 1325 |
+
const int ir = tid - 4*il;// 0...3
|
| 1326 |
+
const int n = 4;
|
| 1327 |
+
|
| 1328 |
+
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
|
| 1329 |
+
const int in = il%2;
|
| 1330 |
+
|
| 1331 |
+
const int l0 = n*(2*ir + in);
|
| 1332 |
+
const int q_offset = 32*im + l0;
|
| 1333 |
+
const int y_offset = 64*im + l0;
|
| 1334 |
+
|
| 1335 |
+
sum[ith] = 0.0f;
|
| 1336 |
+
|
| 1337 |
+
uchar2 sc1, sc2, sc3, sc4;
|
| 1338 |
+
|
| 1339 |
+
float sumf = 0;
|
| 1340 |
+
for (int i = tpitg.x; i < nb; i += tptg.x) {
|
| 1341 |
+
|
| 1342 |
+
device const uint8_t * q1 = (x + i)->qs + q_offset;
|
| 1343 |
+
device const uint8_t * q2 = q1 + 64;
|
| 1344 |
+
device const float * y1 = yy + i*QK_K + y_offset;
|
| 1345 |
+
device const float * y2 = y1 + 128;
|
| 1346 |
+
|
| 1347 |
+
const float dall = (float)((x + i)->d);
|
| 1348 |
+
const float dmin = (float)((x + i)->dmin);
|
| 1349 |
+
|
| 1350 |
+
device const uint16_t * a = (device const uint16_t *)(x + i)->scales;
|
| 1351 |
+
sc1 = as_type<uchar2>((uint16_t)(a[im+0] & kmask1));
|
| 1352 |
+
sc2 = as_type<uchar2>((uint16_t)(a[im+2] & kmask1));
|
| 1353 |
+
sc3 = as_type<uchar2>((uint16_t)(((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2)));
|
| 1354 |
+
sc4 = as_type<uchar2>((uint16_t)(((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2)));
|
| 1355 |
+
|
| 1356 |
+
float4 s = {0.f, 0.f, 0.f, 0.f};
|
| 1357 |
+
float smin = 0;
|
| 1358 |
+
for (int l = 0; l < n; ++l) {
|
| 1359 |
+
|
| 1360 |
+
s[0] += y1[l] * (q1[l] & 0xF); s[1] += y1[l+32] * (q1[l] >> 4);
|
| 1361 |
+
s[2] += y2[l] * (q2[l] & 0xF); s[3] += y2[l+32] * (q2[l] >> 4);
|
| 1362 |
+
smin += y1[l] * sc2[0] + y1[l+32] * sc2[1] + y2[l] * sc4[0] + y2[l+32] * sc4[1];
|
| 1363 |
+
|
| 1364 |
+
}
|
| 1365 |
+
sumf += dall * (s[0] * sc1[0] + s[1] * sc1[1] + s[2] * sc3[0] + s[3] * sc3[1]) - dmin * smin;
|
| 1366 |
+
|
| 1367 |
+
}
|
| 1368 |
+
|
| 1369 |
+
sum[ith] = sumf;
|
| 1370 |
+
|
| 1371 |
+
//
|
| 1372 |
+
// Accumulate the sum from all threads in the threadgroup
|
| 1373 |
+
// This version is slightly faster than the commented out one below,
|
| 1374 |
+
// which I copy-pasted from ggerganov's q4_0 dot product for metal.
|
| 1375 |
+
//
|
| 1376 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1377 |
+
if (ith%4 == 0) {
|
| 1378 |
+
for (int i = 1; i < 4; ++i) sum[ith] += sum[ith + i];
|
| 1379 |
+
}
|
| 1380 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1381 |
+
if (ith%16 == 0) {
|
| 1382 |
+
for (int i = 4; i < 16; i += 4) sum[ith] += sum[ith + i];
|
| 1383 |
+
}
|
| 1384 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1385 |
+
if (ith == 0) {
|
| 1386 |
+
for (int i = 16; i < nth; i += 16) sum[0] += sum[i];
|
| 1387 |
+
dst[r1*ne0 + r0] = sum[0];
|
| 1388 |
+
}
|
| 1389 |
+
|
| 1390 |
+
//// accumulate the sum from all threads in the threadgroup
|
| 1391 |
+
//threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1392 |
+
//for (uint i = nth/2; i > 0; i /= 2) {
|
| 1393 |
+
// if (ith < i) {
|
| 1394 |
+
// sum[ith] += sum[ith + i];
|
| 1395 |
+
// }
|
| 1396 |
+
// threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1397 |
+
//}
|
| 1398 |
+
|
| 1399 |
+
//if (ith == 0) {
|
| 1400 |
+
// dst[r1*ne0 + r0] = sum[0];
|
| 1401 |
+
//}
|
| 1402 |
+
}
|
| 1403 |
+
|
| 1404 |
+
kernel void kernel_mul_mat_q5_k_f32(
|
| 1405 |
+
device const void * src0,
|
| 1406 |
+
device const float * src1,
|
| 1407 |
+
device float * dst,
|
| 1408 |
+
constant int64_t & ne00,
|
| 1409 |
+
constant int64_t & ne10,
|
| 1410 |
+
constant int64_t & ne0,
|
| 1411 |
+
threadgroup float * sum [[threadgroup(0)]],
|
| 1412 |
+
uint2 tgpig[[threadgroup_position_in_grid]],
|
| 1413 |
+
uint2 tpitg[[thread_position_in_threadgroup]],
|
| 1414 |
+
uint2 tptg[[threads_per_threadgroup]]) {
|
| 1415 |
+
|
| 1416 |
+
const uint16_t kmask1 = 0x3f3f;
|
| 1417 |
+
const uint16_t kmask2 = 0x0f0f;
|
| 1418 |
+
const uint16_t kmask3 = 0xc0c0;
|
| 1419 |
+
|
| 1420 |
+
const int nb = ne00/QK_K;
|
| 1421 |
+
|
| 1422 |
+
const int64_t r0 = tgpig.x;
|
| 1423 |
+
const int64_t r1 = tgpig.y;
|
| 1424 |
+
|
| 1425 |
+
device const block_q5_k * x = (device const block_q5_k *) src0 + r0*nb;
|
| 1426 |
+
device const float * yy = (device const float *) src1 + r1*ne10;
|
| 1427 |
+
|
| 1428 |
+
const int nth = tptg.x*tptg.y;
|
| 1429 |
+
const int ith = tptg.y*tpitg.x + tpitg.y;
|
| 1430 |
+
|
| 1431 |
+
const int tid = tpitg.y; // 0...16
|
| 1432 |
+
const int il = tid/4; // 0...3
|
| 1433 |
+
const int ir = tid - 4*il;// 0...3
|
| 1434 |
+
const int n = 4;
|
| 1435 |
+
|
| 1436 |
+
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
|
| 1437 |
+
const int in = il%2;
|
| 1438 |
+
|
| 1439 |
+
const int l0 = n*(2*ir + in);
|
| 1440 |
+
const int q_offset = 32*im + l0;
|
| 1441 |
+
const int y_offset = 64*im + l0;
|
| 1442 |
+
|
| 1443 |
+
const uint8_t hm1 = 1u << (2*im);
|
| 1444 |
+
const uint8_t hm2 = hm1 << 1;
|
| 1445 |
+
const uint8_t hm3 = hm1 << 4;
|
| 1446 |
+
const uint8_t hm4 = hm2 << 4;
|
| 1447 |
+
|
| 1448 |
+
uchar2 sc1, sc2, sc3, sc4;
|
| 1449 |
+
|
| 1450 |
+
float sumf = 0;
|
| 1451 |
+
for (int i = tpitg.x; i < nb; i += tptg.x) {
|
| 1452 |
+
|
| 1453 |
+
device const uint8_t * q1 = (x + i)->qs + q_offset;
|
| 1454 |
+
device const uint8_t * q2 = q1 + 64;
|
| 1455 |
+
device const uint8_t * qh = (x + i)->qh + l0;
|
| 1456 |
+
device const float * y1 = yy + i*QK_K + y_offset;
|
| 1457 |
+
device const float * y2 = y1 + 128;
|
| 1458 |
+
|
| 1459 |
+
const float dall = (float)((x + i)->d);
|
| 1460 |
+
const float dmin = (float)((x + i)->dmin);
|
| 1461 |
+
|
| 1462 |
+
device const uint16_t * a = (device const uint16_t *)(x + i)->scales;
|
| 1463 |
+
sc1 = as_type<uchar2>((uint16_t)(a[im+0] & kmask1));
|
| 1464 |
+
sc2 = as_type<uchar2>((uint16_t)(a[im+2] & kmask1));
|
| 1465 |
+
sc3 = as_type<uchar2>((uint16_t)(((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2)));
|
| 1466 |
+
sc4 = as_type<uchar2>((uint16_t)(((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2)));
|
| 1467 |
+
|
| 1468 |
+
float4 s = {0.f, 0.f, 0.f, 0.f};
|
| 1469 |
+
float smin = 0;
|
| 1470 |
+
for (int l = 0; l < n; ++l) {
|
| 1471 |
+
|
| 1472 |
+
s[0] += y1[l+ 0] * ((q1[l] & 0xF) + (qh[l] & hm1 ? 16 : 0));
|
| 1473 |
+
s[1] += y1[l+32] * ((q1[l] >> 4) + (qh[l] & hm2 ? 16 : 0));
|
| 1474 |
+
s[2] += y2[l+ 0] * ((q2[l] & 0xF) + (qh[l] & hm3 ? 16 : 0));
|
| 1475 |
+
s[3] += y2[l+32] * ((q2[l] >> 4) + (qh[l] & hm4 ? 16 : 0));
|
| 1476 |
+
smin += y1[l] * sc2[0] + y1[l+32] * sc2[1] + y2[l] * sc4[0] + y2[l+32] * sc4[1];
|
| 1477 |
+
|
| 1478 |
+
}
|
| 1479 |
+
sumf += dall * (s[0] * sc1[0] + s[1] * sc1[1] + s[2] * sc3[0] + s[3] * sc3[1]) - dmin * smin;
|
| 1480 |
+
|
| 1481 |
+
}
|
| 1482 |
+
sum[ith] = sumf;
|
| 1483 |
+
|
| 1484 |
+
//
|
| 1485 |
+
// Accumulate the sum from all threads in the threadgroup
|
| 1486 |
+
//
|
| 1487 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1488 |
+
if (ith%4 == 0) {
|
| 1489 |
+
sum[ith] += sum[ith+1] + sum[ith+2] + sum[ith+3];
|
| 1490 |
+
}
|
| 1491 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1492 |
+
if (ith%16 == 0) {
|
| 1493 |
+
sum[ith] += sum[ith+4] + sum[ith+8] + sum[ith+12];
|
| 1494 |
+
}
|
| 1495 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1496 |
+
if (ith == 0) {
|
| 1497 |
+
for (int i = 16; i < nth; i += 16) sum[0] += sum[i];
|
| 1498 |
+
dst[r1*ne0 + r0] = sum[0];
|
| 1499 |
+
}
|
| 1500 |
+
|
| 1501 |
+
}
|
| 1502 |
+
|
| 1503 |
+
kernel void kernel_mul_mat_q6_k_f32(
|
| 1504 |
+
device const void * src0,
|
| 1505 |
+
device const float * src1,
|
| 1506 |
+
device float * dst,
|
| 1507 |
+
constant int64_t & ne00,
|
| 1508 |
+
constant int64_t & ne10,
|
| 1509 |
+
constant int64_t & ne0,
|
| 1510 |
+
threadgroup float * sum [[threadgroup(0)]],
|
| 1511 |
+
uint2 tgpig[[threadgroup_position_in_grid]],
|
| 1512 |
+
uint2 tpitg[[thread_position_in_threadgroup]],
|
| 1513 |
+
uint2 tptg[[threads_per_threadgroup]]) {
|
| 1514 |
+
|
| 1515 |
+
const uint8_t kmask1 = 0x03;
|
| 1516 |
+
const uint8_t kmask2 = 0x0C;
|
| 1517 |
+
const uint8_t kmask3 = 0x30;
|
| 1518 |
+
const uint8_t kmask4 = 0xC0;
|
| 1519 |
+
|
| 1520 |
+
const int nb = ne00/QK_K;
|
| 1521 |
+
|
| 1522 |
+
const int64_t r0 = tgpig.x;
|
| 1523 |
+
const int64_t r1 = tgpig.y;
|
| 1524 |
+
|
| 1525 |
+
device const block_q6_k * x = (device const block_q6_k *) src0 + r0*nb;
|
| 1526 |
+
device const float * yy = (device const float *) src1 + r1*ne10;
|
| 1527 |
+
|
| 1528 |
+
const int nth = tptg.x*tptg.y;
|
| 1529 |
+
const int ith = tptg.y*tpitg.x + tpitg.y;
|
| 1530 |
+
|
| 1531 |
+
// Note: we absolutely assume that tptg.y = 16 and QK_K = 256!
|
| 1532 |
+
const int iqs = 16 * tpitg.y;
|
| 1533 |
+
const int ip = iqs / 128; // 0 or 1
|
| 1534 |
+
const int il = (iqs - 128*ip)/16; // 0...7
|
| 1535 |
+
const int n = 4;
|
| 1536 |
+
const int l0 = n*il;
|
| 1537 |
+
const int is = 8*ip + l0/16;
|
| 1538 |
+
|
| 1539 |
+
const int y_offset = 128*ip + l0;
|
| 1540 |
+
const int q_offset_l = 64*ip + l0;
|
| 1541 |
+
const int q_offset_h = 32*ip + l0;
|
| 1542 |
+
|
| 1543 |
+
float sumf = 0;
|
| 1544 |
+
for (int i = tpitg.x; i < nb; i += tptg.x) {
|
| 1545 |
+
|
| 1546 |
+
device const uint8_t * ql = x[i].ql + q_offset_l;
|
| 1547 |
+
device const uint8_t * qh = x[i].qh + q_offset_h;
|
| 1548 |
+
device const int8_t * sc = x[i].scales + is;
|
| 1549 |
+
|
| 1550 |
+
device const float * y = yy + i * QK_K + y_offset;
|
| 1551 |
+
|
| 1552 |
+
const float dall = x[i].d;
|
| 1553 |
+
|
| 1554 |
+
float4 sums = {0.f, 0.f, 0.f, 0.f};
|
| 1555 |
+
for (int l = 0; l < n; ++l) {
|
| 1556 |
+
sums[0] += y[l+ 0] * ((int8_t)((ql[l+ 0] & 0xF) | ((qh[l] & kmask1) << 4)) - 32);
|
| 1557 |
+
sums[1] += y[l+32] * ((int8_t)((ql[l+32] & 0xF) | ((qh[l] & kmask2) << 2)) - 32);
|
| 1558 |
+
sums[2] += y[l+64] * ((int8_t)((ql[l+ 0] >> 4) | ((qh[l] & kmask3) << 0)) - 32);
|
| 1559 |
+
sums[3] += y[l+96] * ((int8_t)((ql[l+32] >> 4) | ((qh[l] & kmask4) >> 2)) - 32);
|
| 1560 |
+
}
|
| 1561 |
+
|
| 1562 |
+
sumf += dall * (sums[0] * sc[0] + sums[1] * sc[2] + sums[2] * sc[4] + sums[3] * sc[6]);
|
| 1563 |
+
|
| 1564 |
+
}
|
| 1565 |
+
|
| 1566 |
+
sum[ith] = sumf;
|
| 1567 |
+
|
| 1568 |
+
//
|
| 1569 |
+
// Accumulate the sum from all threads in the threadgroup
|
| 1570 |
+
//
|
| 1571 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1572 |
+
if (ith%4 == 0) {
|
| 1573 |
+
for (int i = 1; i < 4; ++i) sum[ith] += sum[ith + i];
|
| 1574 |
+
}
|
| 1575 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1576 |
+
if (ith%16 == 0) {
|
| 1577 |
+
for (int i = 4; i < 16; i += 4) sum[ith] += sum[ith + i];
|
| 1578 |
+
}
|
| 1579 |
+
threadgroup_barrier(mem_flags::mem_threadgroup);
|
| 1580 |
+
if (ith == 0) {
|
| 1581 |
+
for (int i = 16; i < nth; i += 16) sum[0] += sum[i];
|
| 1582 |
+
dst[r1*ne0 + r0] = sum[0];
|
| 1583 |
+
}
|
| 1584 |
+
|
| 1585 |
+
}
|