ollama/patches/00-fix-vulkan-building.patch

15298 lines
674 KiB
Diff

From 7c5f98c4cbfaf472a0d05baa3cc61afdcaeee7de Mon Sep 17 00:00:00 2001
From: dream <dreamoftime0@gmail.com>
Date: Thu, 13 Feb 2025 18:58:59 +0800
Subject: [PATCH 2/2] fix: fix vulkan building
1. Add preset for vulkan.
2. Add backend ggml-vulkan.
3. Add some log info.
---
CMakePresets.json | 13 +-
discover/gpu.go | 7 +-
.../ggml/ggml/src/ggml-vulkan/CMakeLists.txt | 92 +
.../ggml/ggml/src/ggml-vulkan/ggml-vulkan.cpp | 8745 +++++++++++++++++
.../ggml-vulkan/vulkan-shaders/CMakeLists.txt | 9 +
.../src/ggml-vulkan/vulkan-shaders/acc.comp | 29 +
.../src/ggml-vulkan/vulkan-shaders/add.comp | 29 +
.../ggml-vulkan/vulkan-shaders/argsort.comp | 69 +
.../src/ggml-vulkan/vulkan-shaders/clamp.comp | 17 +
.../ggml-vulkan/vulkan-shaders/concat.comp | 41 +
.../vulkan-shaders/contig_copy.comp | 42 +
.../src/ggml-vulkan/vulkan-shaders/copy.comp | 20 +
.../src/ggml-vulkan/vulkan-shaders/cos.comp | 17 +
.../vulkan-shaders/dequant_f32.comp | 20 +
.../vulkan-shaders/dequant_funcs.comp | 118 +
.../vulkan-shaders/dequant_funcs_cm2.comp | 325 +
.../vulkan-shaders/dequant_head.comp | 13 +
.../vulkan-shaders/dequant_iq4_nl.comp | 32 +
.../vulkan-shaders/dequant_q2_k.comp | 34 +
.../vulkan-shaders/dequant_q3_k.comp | 42 +
.../vulkan-shaders/dequant_q4_0.comp | 30 +
.../vulkan-shaders/dequant_q4_1.comp | 32 +
.../vulkan-shaders/dequant_q4_k.comp | 68 +
.../vulkan-shaders/dequant_q5_0.comp | 34 +
.../vulkan-shaders/dequant_q5_1.comp | 35 +
.../vulkan-shaders/dequant_q5_k.comp | 70 +
.../vulkan-shaders/dequant_q6_k.comp | 33 +
.../vulkan-shaders/dequant_q8_0.comp | 31 +
.../vulkan-shaders/diag_mask_inf.comp | 34 +
.../src/ggml-vulkan/vulkan-shaders/div.comp | 27 +
.../vulkan-shaders/flash_attn_cm2.comp | 289 +
.../src/ggml-vulkan/vulkan-shaders/gelu.comp | 25 +
.../vulkan-shaders/gelu_quick.comp | 23 +
.../vulkan-shaders/generic_binary_head.comp | 64 +
.../vulkan-shaders/generic_head.comp | 9 +
.../vulkan-shaders/generic_unary_head.comp | 56 +
.../ggml-vulkan/vulkan-shaders/get_rows.comp | 28 +
.../vulkan-shaders/get_rows_quant.comp | 39 +
.../vulkan-shaders/group_norm.comp | 66 +
.../ggml-vulkan/vulkan-shaders/im2col.comp | 87 +
.../vulkan-shaders/leaky_relu.comp | 22 +
.../src/ggml-vulkan/vulkan-shaders/mul.comp | 27 +
.../mul_mat_split_k_reduce.comp | 48 +
.../vulkan-shaders/mul_mat_vec.comp | 152 +
.../vulkan-shaders/mul_mat_vec_base.comp | 118 +
.../vulkan-shaders/mul_mat_vec_nc.comp | 71 +
.../vulkan-shaders/mul_mat_vec_p021.comp | 73 +
.../vulkan-shaders/mul_mat_vec_q2_k.comp | 115 +
.../vulkan-shaders/mul_mat_vec_q3_k.comp | 103 +
.../vulkan-shaders/mul_mat_vec_q4_k.comp | 133 +
.../vulkan-shaders/mul_mat_vec_q5_k.comp | 162 +
.../vulkan-shaders/mul_mat_vec_q6_k.comp | 112 +
.../ggml-vulkan/vulkan-shaders/mul_mm.comp | 631 ++
.../vulkan-shaders/mul_mm_cm2.comp | 328 +
.../src/ggml-vulkan/vulkan-shaders/norm.comp | 44 +
.../src/ggml-vulkan/vulkan-shaders/pad.comp | 28 +
.../ggml-vulkan/vulkan-shaders/pool2d.comp | 74 +
.../src/ggml-vulkan/vulkan-shaders/relu.comp | 21 +
.../ggml-vulkan/vulkan-shaders/repeat.comp | 26 +
.../ggml-vulkan/vulkan-shaders/rms_norm.comp | 42 +
.../ggml-vulkan/vulkan-shaders/rope_head.comp | 49 +
.../ggml-vulkan/vulkan-shaders/rope_neox.comp | 37 +
.../ggml-vulkan/vulkan-shaders/rope_norm.comp | 37 +
.../src/ggml-vulkan/vulkan-shaders/scale.comp | 24 +
.../src/ggml-vulkan/vulkan-shaders/silu.comp | 22 +
.../src/ggml-vulkan/vulkan-shaders/sin.comp | 17 +
.../ggml-vulkan/vulkan-shaders/soft_max.comp | 174 +
.../ggml-vulkan/vulkan-shaders/square.comp | 17 +
.../ggml-vulkan/vulkan-shaders/sum_rows.comp | 37 +
.../src/ggml-vulkan/vulkan-shaders/tanh.comp | 20 +
.../vulkan-shaders/test_coopmat2_support.comp | 7 +
.../vulkan-shaders/timestep_embedding.comp | 41 +
.../src/ggml-vulkan/vulkan-shaders/types.comp | 323 +
.../ggml-vulkan/vulkan-shaders/upscale.comp | 36 +
.../vulkan-shaders/vulkan-shaders-gen.cpp | 594 ++
.../src/ggml-vulkan/vulkan-shaders/wkv6.comp | 87 +
76 files changed, 14642 insertions(+), 4 deletions(-)
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/CMakeLists.txt
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/ggml-vulkan.cpp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/add.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/div.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/square.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/types.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp
create mode 100644 ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp
diff --git a/CMakePresets.json b/CMakePresets.json
index 3ecb0a8f..a77f15ba 100644
--- a/CMakePresets.json
+++ b/CMakePresets.json
@@ -58,7 +58,11 @@
"cacheVariables": {
"AMDGPU_TARGETS": "gfx803;gfx900;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-"
}
- }
+ },
+ {
+ "name": "Vulkan",
+ "inherits": [ "Default" ]
+ }
],
"buildPresets": [
{
@@ -105,6 +109,11 @@
"name": "ROCm 6",
"inherits": [ "ROCm" ],
"configurePreset": "ROCm 6"
- }
+ },
+ {
+ "name": "Vulkan",
+ "targets": [ "ggml-vulkan" ],
+ "configurePreset": "Vulkan"
+ }
]
}
diff --git a/discover/gpu.go b/discover/gpu.go
index ec96f5d4..8079be99 100644
--- a/discover/gpu.go
+++ b/discover/gpu.go
@@ -197,7 +197,10 @@ func initVulkanHandles() *vulkanHandles {
libcapPaths := FindLibCapLibs()
if len(vulkanPaths) > 0 && len(libcapPaths) > 0 {
+ slog.Info("vulkan: load libvulkan and libcap ok")
vHandles.deviceCount, vHandles.vulkan, vulkanLibPath, libcapLibPath = LoadVulkanMgmt(vulkanPaths, libcapPaths)
+ } else {
+ slog.Info("vulkan: failed to load libvulkan or libcap")
}
return vHandles
@@ -426,7 +429,7 @@ func GetGPUInfo() GpuInfoList {
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
gpuInfo.Compute = fmt.Sprintf("%d.%d", memInfo.major, memInfo.minor)
gpuInfo.MinimumMemory = 0
- gpuInfo.DependencyPath = depPaths
+ gpuInfo.DependencyPath = []string{LibOllamaPath}
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
gpuInfo.DriverMajor = int(memInfo.major)
gpuInfo.DriverMinor = int(memInfo.minor)
@@ -768,7 +771,7 @@ func LoadVulkanMgmt(vulkanLibPaths []string, capLibPaths []string) (int, *C.vk_h
C.vk_init(vkLib, capLib, &resp)
if resp.err != nil {
- slog.Debug("Unable to load vulkan", "library", vkLibPath, capLibPath, "error", C.GoString(resp.err))
+ slog.Error("Unable to load vulkan", "library", vkLibPath, capLibPath, "error", C.GoString(resp.err))
C.free(unsafe.Pointer(resp.err))
} else {
return int(resp.num_devices), &resp.ch, vkLibPath, capLibPath
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-vulkan/CMakeLists.txt
new file mode 100644
index 00000000..9501de73
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/CMakeLists.txt
@@ -0,0 +1,92 @@
+find_package(Vulkan COMPONENTS glslc REQUIRED)
+
+if (Vulkan_FOUND)
+ message(STATUS "Vulkan found")
+
+ ggml_add_backend_library(ggml-vulkan
+ ggml-vulkan.cpp
+ ../../include/ggml-vulkan.h
+ )
+
+ # Compile a test shader to determine whether GL_NV_cooperative_matrix2 is supported.
+ # If it's not, there will be an error to stderr.
+ # If it's supported, set a define to indicate that we should compile those shaders
+ execute_process(COMMAND ${Vulkan_GLSLC_EXECUTABLE} -o - -fshader-stage=compute --target-env=vulkan1.3 "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/test_coopmat2_support.comp"
+ OUTPUT_VARIABLE glslc_output
+ ERROR_VARIABLE glslc_error)
+
+ if (${glslc_error} MATCHES ".*extension not supported: GL_NV_cooperative_matrix2.*")
+ message(STATUS "GL_NV_cooperative_matrix2 not supported by glslc")
+ else()
+ message(STATUS "GL_NV_cooperative_matrix2 supported by glslc")
+ add_compile_definitions(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
+ endif()
+
+ target_link_libraries(ggml-vulkan PRIVATE Vulkan::Vulkan)
+ target_include_directories(ggml-vulkan PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
+
+ # Workaround to the "can't dereference invalidated vector iterator" bug in clang-cl debug build
+ # Posssibly relevant: https://stackoverflow.com/questions/74748276/visual-studio-no-displays-the-correct-length-of-stdvector
+ if (MSVC AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+ add_compile_definitions(_ITERATOR_DEBUG_LEVEL=0)
+ endif()
+
+ if (GGML_VULKAN_CHECK_RESULTS)
+ add_compile_definitions(GGML_VULKAN_CHECK_RESULTS)
+ endif()
+
+ if (GGML_VULKAN_DEBUG)
+ add_compile_definitions(GGML_VULKAN_DEBUG)
+ endif()
+
+ if (GGML_VULKAN_MEMORY_DEBUG)
+ add_compile_definitions(GGML_VULKAN_MEMORY_DEBUG)
+ endif()
+
+ if (GGML_VULKAN_SHADER_DEBUG_INFO)
+ add_compile_definitions(GGML_VULKAN_SHADER_DEBUG_INFO)
+ endif()
+
+ if (GGML_VULKAN_PERF)
+ add_compile_definitions(GGML_VULKAN_PERF)
+ endif()
+
+ if (GGML_VULKAN_VALIDATE)
+ add_compile_definitions(GGML_VULKAN_VALIDATE)
+ endif()
+
+ if (GGML_VULKAN_RUN_TESTS)
+ add_compile_definitions(GGML_VULKAN_RUN_TESTS)
+ endif()
+
+ add_subdirectory(vulkan-shaders)
+
+ set (_ggml_vk_genshaders_cmd vulkan-shaders-gen)
+ set (_ggml_vk_header ${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.hpp)
+ set (_ggml_vk_source ${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.cpp)
+ set (_ggml_vk_input_dir ${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders)
+ set (_ggml_vk_output_dir ${CMAKE_CURRENT_BINARY_DIR}/vulkan-shaders.spv)
+
+ file(GLOB _ggml_vk_shader_deps "${_ggml_vk_input_dir}/*.comp")
+
+ add_custom_command(
+ OUTPUT ${_ggml_vk_header}
+ ${_ggml_vk_source}
+
+ COMMAND "$<TARGET_FILE_DIR:vulkan-shaders-gen>/${_ggml_vk_genshaders_cmd}"
+ --glslc ${Vulkan_GLSLC_EXECUTABLE}
+ --input-dir ${_ggml_vk_input_dir}
+ --output-dir ${_ggml_vk_output_dir}
+ --target-hpp ${_ggml_vk_header}
+ --target-cpp ${_ggml_vk_source}
+ --no-clean
+
+ DEPENDS ${_ggml_vk_shader_deps} ${_ggml_vk_genshaders_cmd}
+ COMMENT "Generate vulkan shaders"
+ )
+
+ target_sources(ggml-vulkan PRIVATE ${_ggml_vk_source} ${_ggml_vk_header})
+
+else()
+ message(WARNING "Vulkan not found")
+endif()
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ml/backend/ggml/ggml/src/ggml-vulkan/ggml-vulkan.cpp
new file mode 100644
index 00000000..d75cd6d6
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/ggml-vulkan.cpp
@@ -0,0 +1,8745 @@
+#include "ggml-vulkan.h"
+#include <vulkan/vulkan_core.h>
+#if defined(GGML_VULKAN_RUN_TESTS) || defined(GGML_VULKAN_PERF) || defined(GGML_VULKAN_CHECK_RESULTS)
+#include <chrono>
+#include "ggml-cpu.h"
+#endif
+
+#include <vulkan/vulkan.hpp>
+
+#include <algorithm>
+#include <cmath>
+#include <iomanip>
+#include <iostream>
+#include <tuple>
+#include <vector>
+#include <sstream>
+#include <utility>
+#include <memory>
+#include <limits>
+#include <map>
+#include <unordered_map>
+#include <memory>
+#include <mutex>
+#include <future>
+#include <thread>
+
+#include "ggml-impl.h"
+#include "ggml-backend-impl.h"
+
+#include "ggml-vulkan-shaders.hpp"
+
+#define VK_API_VERSION VK_API_VERSION_1_2
+
+#define CEIL_DIV(M, N) (((M) + (N)-1) / (N))
+
+#define VK_VENDOR_ID_AMD 0x1002
+#define VK_VENDOR_ID_APPLE 0x106b
+#define VK_VENDOR_ID_INTEL 0x8086
+#define VK_VENDOR_ID_NVIDIA 0x10de
+
+#define VK_DEVICE_DESCRIPTOR_POOL_SIZE 32
+
+#define GGML_VK_MAX_NODES 8192
+
+#define MAX_VK_BUFFERS 256
+
+#define VK_CHECK(err, msg) \
+ do { \
+ vk::Result err_ = (err); \
+ if (err_ != vk::Result::eSuccess) { \
+ fprintf(stderr, "ggml_vulkan: %s error %s at %s:%d\n", \
+ #err, to_string(err_).c_str(), __FILE__, __LINE__); \
+ exit(1); \
+ } \
+ } while (0)
+
+#ifdef GGML_VULKAN_DEBUG
+#define VK_LOG_DEBUG(msg) std::cerr << msg << std::endl
+#else
+#define VK_LOG_DEBUG(msg) ((void) 0)
+#endif // GGML_VULKAN_DEBUG
+
+struct ggml_backend_vk_context;
+
+struct vk_queue {
+ uint32_t queue_family_index;
+ vk::Queue queue;
+ vk::CommandPool pool;
+ uint32_t cmd_buffer_idx;
+ std::vector<vk::CommandBuffer> cmd_buffers;
+
+ vk::PipelineStageFlags stage_flags;
+
+ bool transfer_only;
+};
+
+struct vk_pipeline_struct {
+ std::string name;
+ vk::ShaderModule shader_module;
+ vk::DescriptorSetLayout dsl;
+ std::vector<vk::DescriptorPool> descriptor_pools;
+ std::vector<vk::DescriptorSet> descriptor_sets;
+ uint32_t descriptor_set_idx;
+ vk::PipelineLayout layout;
+ vk::Pipeline pipeline;
+ uint32_t push_constant_size;
+ uint32_t parameter_count;
+ std::array<uint32_t, 3> wg_denoms;
+ uint32_t align;
+};
+
+typedef std::shared_ptr<vk_pipeline_struct> vk_pipeline;
+typedef std::weak_ptr<vk_pipeline_struct> vk_pipeline_ref;
+
+static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline);
+
+struct vk_matmul_pipeline_struct {
+ vk_pipeline l, m, s;
+ vk_pipeline a_l, a_m, a_s;
+};
+
+typedef std::shared_ptr<vk_matmul_pipeline_struct> vk_matmul_pipeline;
+
+struct vk_matmul_pipeline2 {
+ vk_matmul_pipeline2() {
+ f16acc = std::make_shared<vk_matmul_pipeline_struct>();
+ f32acc = std::make_shared<vk_matmul_pipeline_struct>();
+ }
+ vk_matmul_pipeline f32acc;
+ vk_matmul_pipeline f16acc;
+};
+
+struct vk_device_struct;
+typedef std::shared_ptr<vk_device_struct> vk_device;
+typedef std::weak_ptr<vk_device_struct> vk_device_ref;
+
+struct vk_buffer_struct;
+typedef std::shared_ptr<vk_buffer_struct> vk_buffer;
+typedef std::weak_ptr<vk_buffer_struct> vk_buffer_ref;
+
+struct ggml_backend_vk_buffer_type_context {
+ std::string name;
+ vk_device device;
+};
+
+static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft);
+static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size);
+static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft);
+static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft);
+static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor);
+static ggml_backend_buffer_type_i ggml_backend_vk_buffer_type_interface = {
+ /* .get_name = */ ggml_backend_vk_buffer_type_name,
+ /* .alloc_buffer = */ ggml_backend_vk_buffer_type_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_vk_buffer_type_get_alignment,
+ /* .get_max_size = */ ggml_backend_vk_buffer_type_get_max_size,
+ /* .get_alloc_size = */ ggml_backend_vk_buffer_type_get_alloc_size,
+ /* .is_host = */ NULL,
+};
+
+#ifdef GGML_VULKAN_MEMORY_DEBUG
+class vk_memory_logger;
+#endif
+#ifdef GGML_VULKAN_PERF
+class vk_perf_logger;
+#endif
+static void ggml_vk_destroy_buffer(vk_buffer& buf);
+
+static constexpr uint32_t mul_mat_vec_max_cols = 8;
+
+struct vk_device_struct {
+ std::mutex mutex;
+
+ vk::PhysicalDevice physical_device;
+ vk::PhysicalDeviceProperties properties;
+ std::string name;
+ uint64_t max_memory_allocation_size;
+ bool fp16;
+ bool pipeline_robustness;
+ vk::Device device;
+ uint32_t vendor_id;
+ vk_queue compute_queue;
+ vk_queue transfer_queue;
+ bool single_queue;
+ uint32_t subgroup_size;
+ uint32_t shader_core_count;
+ bool uma;
+ bool float_controls_rte_fp16;
+
+ bool subgroup_size_control;
+ uint32_t subgroup_min_size;
+ uint32_t subgroup_max_size;
+ bool subgroup_require_full_support;
+
+ bool coopmat_support;
+ bool coopmat_acc_f32_support;
+ bool coopmat_acc_f16_support;
+ uint32_t coopmat_m;
+ uint32_t coopmat_n;
+ uint32_t coopmat_k;
+ bool coopmat2;
+
+ size_t idx;
+
+ bool mul_mat_l;
+ bool mul_mat_m;
+ bool mul_mat_s;
+ bool mul_mat_id_l;
+ bool mul_mat_id_m;
+ bool mul_mat_id_s;
+
+ vk_matmul_pipeline pipeline_matmul_f32;
+ vk_matmul_pipeline pipeline_matmul_f32_f16;
+ vk_matmul_pipeline2 pipeline_matmul_f16;
+ vk_matmul_pipeline2 pipeline_matmul_f16_f32;
+ vk_pipeline pipeline_matmul_split_k_reduce;
+
+ vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_COUNT];
+ vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat[GGML_TYPE_COUNT];
+
+ vk_matmul_pipeline pipeline_matmul_id_f32;
+ vk_matmul_pipeline2 pipeline_matmul_id_f16;
+ vk_matmul_pipeline2 pipeline_matmul_id_f16_f32;
+
+ vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_id[GGML_TYPE_COUNT];
+
+ vk_pipeline pipeline_dequant[GGML_TYPE_COUNT];
+ vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_COUNT][mul_mat_vec_max_cols];
+ vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_COUNT][mul_mat_vec_max_cols];
+ vk_pipeline pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_COUNT];
+
+ vk_pipeline pipeline_mul_mat_vec_p021_f16_f32;
+ vk_pipeline pipeline_mul_mat_vec_nc_f16_f32;
+ vk_pipeline pipeline_get_rows[GGML_TYPE_COUNT];
+ vk_pipeline pipeline_get_rows_f32[GGML_TYPE_COUNT];
+ vk_pipeline pipeline_acc_f32;
+ vk_pipeline pipeline_add_f32, pipeline_add_f32_norepeat;
+ vk_pipeline pipeline_add_f16_f32_f16, pipeline_add_f16_f32_f16_norepeat;
+ vk_pipeline pipeline_mul_f32, pipeline_mul_f32_norepeat;
+ vk_pipeline pipeline_div_f32, pipeline_div_f32_norepeat;
+ vk_pipeline pipeline_concat_f32, pipeline_concat_f16, pipeline_concat_i32;
+ vk_pipeline pipeline_upscale_f32;
+ vk_pipeline pipeline_scale_f32;
+ vk_pipeline pipeline_sqr_f32;
+ vk_pipeline pipeline_sin_f32;
+ vk_pipeline pipeline_cos_f32;
+ vk_pipeline pipeline_clamp_f32;
+ vk_pipeline pipeline_pad_f32;
+ vk_pipeline pipeline_repeat_f32;
+ vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16;
+ vk_pipeline pipeline_contig_cpy_f32_f32, pipeline_contig_cpy_f32_f16, pipeline_contig_cpy_f16_f16;
+ vk_pipeline pipeline_norm_f32;
+ vk_pipeline pipeline_group_norm_f32;
+ vk_pipeline pipeline_rms_norm_f32;
+ vk_pipeline pipeline_gelu_f32;
+ vk_pipeline pipeline_gelu_quick_f32;
+ vk_pipeline pipeline_silu_f32;
+ vk_pipeline pipeline_relu_f32;
+ vk_pipeline pipeline_leaky_relu_f32;
+ vk_pipeline pipeline_tanh_f32;
+ vk_pipeline pipeline_diag_mask_inf_f32;
+ vk_pipeline pipeline_soft_max_f32, pipeline_soft_max_f32_f16;
+ vk_pipeline pipeline_soft_max_f32_wg512, pipeline_soft_max_f32_f16_wg512;
+ vk_pipeline pipeline_rope_norm_f32, pipeline_rope_norm_f16;
+ vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16;
+ vk_pipeline pipeline_argsort_f32;
+ vk_pipeline pipeline_sum_rows_f32;
+ vk_pipeline pipeline_im2col_f32, pipeline_im2col_f32_f16;
+ vk_pipeline pipeline_timestep_embedding_f32;
+ vk_pipeline pipeline_pool2d_f32;
+ vk_pipeline pipeline_rwkv_wkv6_f32;
+
+ // [2][2][2] is for {f16acc,f32acc}x{large,small_rows}x{unaligned, aligned}
+ vk_pipeline pipeline_flash_attn_f32_f16_D64[GGML_TYPE_COUNT][2][2][2];
+ vk_pipeline pipeline_flash_attn_f32_f16_D80[GGML_TYPE_COUNT][2][2][2];
+ vk_pipeline pipeline_flash_attn_f32_f16_D96[GGML_TYPE_COUNT][2][2][2];
+ vk_pipeline pipeline_flash_attn_f32_f16_D112[GGML_TYPE_COUNT][2][2][2];
+ vk_pipeline pipeline_flash_attn_f32_f16_D128[GGML_TYPE_COUNT][2][2][2];
+ vk_pipeline pipeline_flash_attn_f32_f16_D256[GGML_TYPE_COUNT][2][2][2];
+
+ std::unordered_map<std::string, vk_pipeline_ref> pipelines;
+ std::unordered_map<std::string, uint64_t> pipeline_descriptor_set_requirements;
+
+ std::vector<std::tuple<void*, size_t, vk_buffer>> pinned_memory;
+
+ vk::Fence fence;
+ vk_buffer sync_staging;
+
+ ggml_backend_buffer_type buffer_type;
+
+#ifdef GGML_VULKAN_MEMORY_DEBUG
+ std::unique_ptr<vk_memory_logger> memory_logger;
+#endif
+#ifdef GGML_VULKAN_PERF
+ std::unique_ptr<vk_perf_logger> perf_logger;
+#endif
+
+ ~vk_device_struct() {
+ VK_LOG_DEBUG("destroy device " << name);
+
+ device.destroyFence(fence);
+
+ ggml_vk_destroy_buffer(sync_staging);
+
+ device.destroyCommandPool(compute_queue.pool);
+ if (!single_queue) {
+ device.destroyCommandPool(transfer_queue.pool);
+ }
+
+ for (auto& pipeline : pipelines) {
+ if (pipeline.second.expired()) {
+ continue;
+ }
+
+ vk_pipeline pl = pipeline.second.lock();
+ ggml_vk_destroy_pipeline(device, pl);
+ }
+ pipelines.clear();
+
+ device.destroy();
+ }
+};
+
+struct vk_buffer_struct {
+ vk::Buffer buffer = VK_NULL_HANDLE;
+ vk::DeviceMemory device_memory = VK_NULL_HANDLE;
+ vk::MemoryPropertyFlags memory_property_flags;
+ void * ptr;
+ size_t size = 0;
+
+ vk_device device;
+
+ ~vk_buffer_struct() {
+ if (size == 0) {
+ return;
+ }
+ VK_LOG_DEBUG("~vk_buffer_struct(" << buffer << ", " << size << ")");
+
+ device->device.freeMemory(device_memory);
+ device->device.destroyBuffer(buffer);
+ }
+};
+
+struct vk_subbuffer {
+ vk_buffer buffer;
+ uint64_t offset;
+ uint64_t size;
+
+ operator vk::DescriptorBufferInfo() const {
+ return { buffer->buffer, offset, size };
+ }
+};
+
+struct vk_semaphore {
+ vk::Semaphore s;
+ uint64_t value;
+};
+
+struct vk_submission {
+ vk::CommandBuffer buffer;
+ std::vector<vk_semaphore> wait_semaphores;
+ std::vector<vk_semaphore> signal_semaphores;
+};
+
+typedef std::vector<vk_submission> vk_sequence;
+
+struct vk_mat_mat_push_constants {
+ uint32_t M; uint32_t N; uint32_t K;
+ uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
+ uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
+ uint32_t k_split;
+ uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3;
+};
+struct vk_mat_vec_push_constants {
+ uint32_t ncols; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
+ uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
+ uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3;
+};
+
+struct vk_mat_mat_id_push_constants {
+ uint32_t M; uint32_t N; uint32_t K;
+ uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
+ uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
+ uint32_t nei0; uint32_t nei1; uint32_t nbi1; uint32_t ne11;
+};
+struct vk_mat_vec_id_push_constants {
+ uint32_t ncols; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
+ uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
+ uint32_t nei0; uint32_t ne11;
+};
+
+struct vk_flash_attn_push_constants {
+ uint32_t N;
+ uint32_t KV;
+
+ uint32_t ne1;
+ uint32_t ne2;
+ uint32_t ne3;
+
+ uint32_t neq2;
+ uint32_t neq3;
+ uint32_t nek2;
+ uint32_t nek3;
+ uint32_t nev2;
+ uint32_t nev3;
+ uint32_t nem1;
+
+ uint32_t nb02;
+ uint32_t nb03;
+ uint32_t nb12;
+ uint32_t nb13;
+ uint32_t nb22;
+ uint32_t nb23;
+ uint32_t nb31;
+
+ float scale;
+ float max_bias;
+ float logit_softcap;
+
+ uint32_t mask;
+ uint32_t n_head_log2;
+ float m0;
+ float m1;
+};
+
+struct vk_op_push_constants {
+ uint32_t KX;
+ uint32_t KY;
+ float param1;
+ float param2;
+};
+
+struct vk_op_unary_push_constants {
+ uint32_t ne;
+ uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
+ uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
+ uint32_t misalign_offsets;
+ float param1; float param2;
+ uint32_t ne0_012mp; uint32_t ne0_012L;
+ uint32_t ne0_01mp; uint32_t ne0_01L;
+ uint32_t ne0_0mp; uint32_t ne0_0L;
+ uint32_t ne1_012mp; uint32_t ne1_012L;
+ uint32_t ne1_01mp; uint32_t ne1_01L;
+ uint32_t ne1_0mp; uint32_t ne1_0L;
+};
+static_assert(sizeof(vk_op_unary_push_constants) <= 128, "sizeof(vk_op_unary_push_constants) must be <= 128");
+
+// See https://gmplib.org/~tege/divcnst-pldi94.pdf figure 4.1.
+// Precompute mp (m' in the paper) and L such that division
+// can be computed using a multiply (high 32b of 64b result)
+// and a shift:
+//
+// n/d = (mulhi(n, mp) + n) >> L;
+static void init_fastdiv_values(uint32_t d, uint32_t &mp, uint32_t &L)
+{
+ // compute L = ceil(log2(d));
+ L = 0;
+ while (L < 32 && (uint32_t{1} << L) < d) {
+ L++;
+ }
+
+ mp = (uint32_t)((uint64_t{1} << 32) * ((uint64_t{1} << L) - d) / d + 1);
+}
+
+template <typename T> void init_pushconst_fastdiv(T &p) {
+ GGML_UNUSED(p);
+ static_assert(!std::is_const<T>::value, "unexpected type");
+}
+
+template <> void init_pushconst_fastdiv(vk_op_unary_push_constants &p) {
+ // Compute magic values to divide by these six numbers.
+ init_fastdiv_values(p.ne02*p.ne01*p.ne00, p.ne0_012mp, p.ne0_012L);
+ init_fastdiv_values(p.ne01*p.ne00, p.ne0_01mp, p.ne0_01L);
+ init_fastdiv_values(p.ne00, p.ne0_0mp, p.ne0_0L);
+ init_fastdiv_values(p.ne12*p.ne11*p.ne10, p.ne1_012mp, p.ne1_012L);
+ init_fastdiv_values(p.ne11*p.ne10, p.ne1_01mp, p.ne1_01L);
+ init_fastdiv_values(p.ne10, p.ne1_0mp, p.ne1_0L);
+}
+
+struct vk_op_binary_push_constants {
+ uint32_t ne;
+ uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
+ uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
+ uint32_t ne20; uint32_t ne21; uint32_t ne22; uint32_t ne23; uint32_t nb20; uint32_t nb21; uint32_t nb22; uint32_t nb23;
+ uint32_t misalign_offsets;
+ float param1; float param2; int32_t param3;
+};
+
+struct vk_op_diag_mask_push_constants {
+ uint32_t ncols;
+ uint32_t rows_per_channel;
+ int32_t n_past;
+};
+
+struct vk_op_rope_push_constants {
+ uint32_t ncols;
+ uint32_t n_dims;
+ float freq_scale;
+ uint32_t p_delta_rows;
+ float freq_base;
+ float ext_factor;
+ float attn_factor;
+ float corr_dims[2];
+ float theta_scale;
+ uint32_t has_ff;
+};
+
+struct vk_op_soft_max_push_constants {
+ uint32_t KX;
+ uint32_t KY;
+ float scale;
+ float max_bias;
+ float m0;
+ float m1;
+ uint32_t n_head_log2;
+ uint32_t nrows_x;
+};
+
+struct vk_op_argsort_push_constants {
+ uint32_t ncols;
+ uint32_t ncols_pad;
+ int32_t order;
+};
+
+struct vk_op_im2col_push_constants {
+ uint32_t batch_offset; uint32_t offset_delta;
+ uint32_t IC;
+ uint32_t IW; uint32_t IH;
+ uint32_t OW; uint32_t OH;
+ uint32_t KW; uint32_t KH;
+ uint32_t pelements;
+ uint32_t CHW;
+ int32_t s0; int32_t s1;
+ int32_t p0; int32_t p1;
+ int32_t d0; int32_t d1;
+};
+
+struct vk_op_timestep_embedding_push_constants {
+ uint32_t nb1;
+ uint32_t dim;
+ uint32_t max_period;
+};
+
+struct vk_op_pool2d_push_constants {
+ uint32_t IW; uint32_t IH;
+ uint32_t OW; uint32_t OH;
+ uint32_t OC;
+ uint32_t pelements;
+ uint32_t op;
+ int32_t k0; int32_t k1;
+ int32_t s0; int32_t s1;
+ int32_t p0; int32_t p1;
+};
+
+struct vk_op_rwkv_wkv6_push_constants {
+ uint32_t B;
+ uint32_t T;
+ uint32_t C;
+ uint32_t H;
+};
+
+// Allow pre-recording command buffers
+struct vk_staging_memcpy {
+ vk_staging_memcpy(void * _dst, const void * _src, size_t _n) : dst(_dst), src(_src), n(_n) {}
+
+ void * dst;
+ const void * src;
+ size_t n;
+};
+
+struct vk_op_upscale_push_constants {
+ uint32_t ne; uint32_t a_offset; uint32_t d_offset;
+ uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
+ uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13;
+ float sf0; float sf1; float sf2; float sf3;
+};
+
+struct vk_context_struct {
+ vk_submission * s;
+ std::vector<vk_sequence> seqs;
+
+ int exit_tensor_idx;
+
+ std::vector<vk_staging_memcpy> in_memcpys;
+ std::vector<vk_staging_memcpy> out_memcpys;
+
+ vk_queue * q;
+};
+typedef std::shared_ptr<vk_context_struct> vk_context;
+typedef std::weak_ptr<vk_context_struct> vk_context_ref;
+
+struct ggml_vk_garbage_collector {
+ std::vector<vk_semaphore> tl_semaphores;
+ std::vector<vk_semaphore> semaphores;
+ std::vector<vk::Event> events;
+ std::vector<vk_buffer> temp_buffers;
+ std::vector<vk_context> contexts;
+};
+
+#if defined(GGML_VULKAN_MEMORY_DEBUG) || defined(GGML_VULKAN_DEBUG)
+#define VK_LOG_MEMORY(msg) std::cerr << "ggml_vulkan memory: " << msg << std::endl
+
+static std::string format_size(size_t size) {
+ const size_t kib = 1024;
+ const size_t mib = kib * 1024;
+ const size_t gib = mib * 1024;
+
+ std::ostringstream oss;
+ oss << std::fixed << std::setprecision(2);
+
+ if (size >= gib) {
+ oss << static_cast<double>(size) / gib << " GiB";
+ } else if (size >= mib) {
+ oss << static_cast<double>(size) / mib << " MiB";
+ } else if (size >= kib) {
+ oss << static_cast<double>(size) / kib << " KiB";
+ } else {
+ oss << size << " B";
+ }
+
+ return oss.str();
+}
+
+static std::mutex log_mutex;
+
+class vk_memory_logger {
+public:
+ vk_memory_logger(): total_device(0), total_host(0) {}
+ void log_allocation(vk_buffer_ref buf_ref, size_t size);
+ void log_deallocation(vk_buffer_ref buf_ref);
+
+private:
+ std::map<vk::Buffer, size_t> allocations; // Track allocations
+ size_t total_device;
+ size_t total_host;
+};
+#else
+#define VK_LOG_MEMORY(msg) ((void) 0)
+#endif // GGML_VULKAN_MEMORY_DEBUG
+
+#if defined(GGML_VULKAN_PERF)
+
+class vk_perf_logger {
+public:
+ void print_timings() {
+ std::cerr << "----------------\nVulkan Timings:" << std::endl;
+ for (const auto& t : timings) {
+ uint64_t total = 0;
+ for (const auto& time : t.second) {
+ total += time;
+ }
+ std::cerr << t.first << ": " << t.second.size() << " x " << (total / t.second.size() / 1000.0) << " ms" << std::endl;
+ }
+
+ timings.clear();
+ }
+
+ void log_timing(const ggml_tensor * node, uint64_t time) {
+ if (node->op == GGML_OP_UNARY) {
+ timings[ggml_unary_op_name(ggml_get_unary_op(node))].push_back(time);
+ return;
+ }
+ if (node->op == GGML_OP_MUL_MAT || node->op == GGML_OP_MUL_MAT_ID) {
+ const uint64_t m = node->src[0]->ne[1];
+ const uint64_t n = node->src[1]->ne[1];
+ const uint64_t k = node->src[1]->ne[0];
+ std::string name = ggml_op_name(node->op);
+ if (n == 1) {
+ name += "_VEC m=" + std::to_string(m) + " k=" + std::to_string(k);
+ } else {
+ name += " m=" + std::to_string(m) + " n=" + std::to_string(n) + " k=" + std::to_string(k);
+ }
+ timings[name].push_back(time);
+ return;
+ }
+ timings[ggml_op_name(node->op)].push_back(time);
+ }
+private:
+ std::map<std::string, std::vector<uint64_t>> timings;
+};
+#endif // GGML_VULKAN_PERF
+
+struct ggml_backend_vk_context {
+ std::string name;
+
+ vk_device device;
+
+ size_t semaphore_idx, event_idx;
+ ggml_vk_garbage_collector gc;
+ size_t prealloc_size_x, prealloc_size_y, prealloc_size_split_k;
+ vk_buffer prealloc_x, prealloc_y, prealloc_split_k;
+ vk::Fence fence;
+
+ vk_buffer buffer_pool[MAX_VK_BUFFERS];
+
+ vk_context_ref compute_ctx;
+ vk_context_ref transfer_ctx;
+
+ std::vector<vk_context_ref> tensor_ctxs;
+};
+
+static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT
+
+static uint64_t vk_tensor_offset(const ggml_tensor * tensor) {
+ if (tensor->view_src) {
+ return (uint8_t *) tensor->view_src->data - (uint8_t *) vk_ptr_base;
+ }
+ return (uint8_t *) tensor->data - (uint8_t *) vk_ptr_base;
+}
+
+struct ggml_backend_vk_buffer_context {
+ vk_device_ref device;
+ vk_buffer dev_buffer;
+ std::string name;
+
+ ggml_backend_vk_buffer_context(vk_device_ref device, vk_buffer&& dev_buffer, std::string& name) :
+ device(device),
+ dev_buffer(dev_buffer),
+ name(name) {
+ }
+
+ ~ggml_backend_vk_buffer_context() {
+ ggml_vk_destroy_buffer(dev_buffer);
+ }
+};
+
+#ifdef GGML_VULKAN_MEMORY_DEBUG
+void vk_memory_logger::log_allocation(vk_buffer_ref buf_ref, size_t size) {
+ std::lock_guard<std::mutex> guard(log_mutex);
+ vk_buffer buf = buf_ref.lock();
+ const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
+ const std::string type = device ? "device" : "host";
+ allocations[buf->buffer] = size;
+ total_device += device ? size : 0;
+ total_host += device ? 0 : size;
+ VK_LOG_MEMORY(buf->device->name << ": +" << format_size(size) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
+}
+
+void vk_memory_logger::log_deallocation(vk_buffer_ref buf_ref) {
+ if (buf_ref.expired() || buf_ref.lock()->size == 0) {
+ return;
+ }
+
+ std::lock_guard<std::mutex> guard(log_mutex);
+ vk_buffer buf = buf_ref.lock();
+ const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
+ std::string type = device ? "device" : "host";
+ auto it = allocations.find(buf->buffer);
+ total_device -= device ? it->second : 0;
+ total_host -= device ? 0 : it->second;
+ if (it != allocations.end()) {
+ VK_LOG_MEMORY(buf->device->name << ": -" << format_size(it->second) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
+ allocations.erase(it);
+ } else {
+ VK_LOG_MEMORY("ERROR " << buf->device->name << ": Attempted to deallocate unknown " << type << " memory at " << buf->buffer);
+ }
+}
+#endif // GGML_VULKAN_MEMORY_DEBUG
+
+struct vk_instance_t {
+ vk::Instance instance;
+
+ std::vector<size_t> device_indices;
+ vk_device devices[GGML_VK_MAX_DEVICES];
+};
+
+static bool vk_instance_initialized = false;
+static vk_instance_t vk_instance;
+
+#ifdef GGML_VULKAN_CHECK_RESULTS
+static size_t vk_skip_checks;
+static size_t vk_output_tensor;
+
+static void ggml_vk_print_tensor(const ggml_tensor * tensor, const char * name);
+static void ggml_vk_check_results_0(ggml_tensor * tensor);
+static void ggml_vk_check_results_1(ggml_tensor * tensor);
+#endif
+
+typedef void (*ggml_vk_func_t)(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
+
+static void ggml_backend_vk_free(ggml_backend_t backend);
+
+// variables to track number of compiles in progress
+static uint32_t compile_count = 0;
+static std::mutex compile_count_mutex;
+static std::condition_variable compile_count_cond;
+
+static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipeline, const std::string name, size_t spv_size, const void* spv_data, const std::string entrypoint,
+ uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, std::vector<uint32_t> specialization_constants,
+ uint32_t align, bool disable_robustness, bool require_full_subgroups, uint32_t required_subgroup_size) {
+ VK_LOG_DEBUG("ggml_vk_create_pipeline(" << device->name << ", " << name << ", " << entrypoint << ", " << parameter_count << ", " << push_constant_size <<
+ ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << align <<
+ ", " << disable_robustness << ", " << require_full_subgroups << ", " << required_subgroup_size << ")");
+ GGML_ASSERT(parameter_count > 0);
+ GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT
+
+ pipeline = std::make_shared<vk_pipeline_struct>();
+ pipeline->name = name;
+ pipeline->parameter_count = parameter_count;
+ pipeline->push_constant_size = push_constant_size;
+ pipeline->wg_denoms = wg_denoms;
+ pipeline->align = align;
+
+ vk::ShaderModuleCreateInfo shader_module_create_info({}, spv_size, reinterpret_cast<const uint32_t *>(spv_data));
+ pipeline->shader_module = device->device.createShaderModule(shader_module_create_info);
+
+ std::vector<vk::DescriptorSetLayoutBinding> dsl_binding;
+ std::vector<vk::DescriptorBindingFlags> dsl_binding_flags;
+ for (uint32_t i = 0; i < parameter_count; i++) {
+ dsl_binding.push_back({i, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute});
+ dsl_binding_flags.push_back({});
+ }
+
+ vk::DescriptorSetLayoutBindingFlagsCreateInfo dslbfci = { dsl_binding_flags };
+
+ vk::PushConstantRange pcr(
+ vk::ShaderStageFlagBits::eCompute,
+ 0,
+ pipeline->push_constant_size
+ );
+
+ vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info(
+ {},
+ dsl_binding);
+ descriptor_set_layout_create_info.setPNext(&dslbfci);
+ pipeline->dsl = device->device.createDescriptorSetLayout(descriptor_set_layout_create_info);
+
+ vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count * VK_DEVICE_DESCRIPTOR_POOL_SIZE);
+ vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, VK_DEVICE_DESCRIPTOR_POOL_SIZE, descriptor_pool_size);
+ pipeline->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info));
+
+ pipeline->descriptor_set_idx = 0;
+
+ vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), pipeline->dsl, pcr);
+ pipeline->layout = device->device.createPipelineLayout(pipeline_layout_create_info);
+
+ std::vector<vk::SpecializationMapEntry> specialization_entries(specialization_constants.size());
+
+ for (size_t i = 0; i < specialization_constants.size(); i++) {
+ specialization_entries[i].constantID = i;
+ specialization_entries[i].offset = i * sizeof(uint32_t);
+ specialization_entries[i].size = sizeof(uint32_t);
+ }
+
+ vk::SpecializationInfo specialization_info(
+ specialization_entries.size(),
+ specialization_entries.data(),
+ specialization_constants.size() * sizeof(uint32_t),
+ specialization_constants.data()
+ );
+
+ vk::PipelineShaderStageCreateFlags pipeline_shader_stage_create_flags{};
+
+ if (device->subgroup_require_full_support && require_full_subgroups) {
+ pipeline_shader_stage_create_flags |= vk::PipelineShaderStageCreateFlagBits::eRequireFullSubgroupsEXT;
+ }
+
+ vk::PipelineShaderStageCreateInfo pipeline_shader_create_info(
+ pipeline_shader_stage_create_flags,
+ vk::ShaderStageFlagBits::eCompute,
+ pipeline->shader_module,
+ entrypoint.c_str(),
+ &specialization_info);
+
+ vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT pipeline_shader_stage_required_subgroup_size_create_info;
+ pipeline_shader_stage_required_subgroup_size_create_info.requiredSubgroupSize = required_subgroup_size;
+ if (device->subgroup_size_control && required_subgroup_size > 0) {
+ GGML_ASSERT(device->subgroup_min_size <= required_subgroup_size && required_subgroup_size <= device->subgroup_max_size);
+ pipeline_shader_create_info.setPNext(&pipeline_shader_stage_required_subgroup_size_create_info);
+ }
+
+ vk::ComputePipelineCreateInfo compute_pipeline_create_info(
+ vk::PipelineCreateFlags{},
+ pipeline_shader_create_info,
+ pipeline->layout);
+
+ vk::PipelineRobustnessCreateInfoEXT rci;
+
+ if (device->pipeline_robustness && disable_robustness) {
+ rci.storageBuffers = vk::PipelineRobustnessBufferBehaviorEXT::eDisabled;
+ rci.uniformBuffers = vk::PipelineRobustnessBufferBehaviorEXT::eDisabled;
+ compute_pipeline_create_info.setPNext(&rci);
+ }
+
+ pipeline->pipeline = device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value;
+
+ {
+ std::lock_guard<std::mutex> guard(device->mutex);
+ device->pipelines.insert({ pipeline->name, pipeline });
+ }
+
+ {
+ std::lock_guard<std::mutex> guard(compile_count_mutex);
+ assert(compile_count > 0);
+ compile_count--;
+
+ // "Progress bar" for shader compiles
+ static uint32_t total_compile_count = 0;
+ if ((total_compile_count++ % 10) == 0) {
+ std::cerr << ".";
+ }
+ }
+ compile_count_cond.notify_all();
+}
+
+static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline) {
+ VK_LOG_DEBUG("ggml_pipeline_destroy_pipeline(" << pipeline->name << ")");
+ for (auto& pool : pipeline->descriptor_pools) {
+ device.destroyDescriptorPool(pool);
+ }
+ pipeline->descriptor_pools.clear();
+ pipeline->descriptor_sets.clear();
+ pipeline->descriptor_set_idx = 0;
+
+ device.destroyDescriptorSetLayout(pipeline->dsl);
+
+ device.destroyPipelineLayout(pipeline->layout);
+
+ device.destroyShaderModule(pipeline->shader_module);
+
+ device.destroyPipeline(pipeline->pipeline);
+}
+
+static void ggml_pipeline_request_descriptor_sets(vk_device& device, vk_pipeline& pipeline, uint32_t n) {
+ VK_LOG_DEBUG("ggml_pipeline_request_descriptor_sets(" << pipeline->name << ", " << n << ")");
+ device->pipeline_descriptor_set_requirements[pipeline->name] += n;
+}
+
+static void ggml_pipeline_allocate_descriptor_sets(vk_device& device) {
+ std::lock_guard<std::mutex> guard(device->mutex);
+
+ for (auto& pair : device->pipeline_descriptor_set_requirements) {
+ vk_pipeline pipeline = device->pipelines.at(pair.first).lock();
+ const uint64_t n = pair.second;
+
+ VK_LOG_DEBUG("ggml_pipeline_allocate_descriptor_sets(" << pipeline->name << ", " << n << ")");
+
+ if (pipeline->descriptor_sets.size() >= pipeline->descriptor_set_idx + n) {
+ // Enough descriptors are available
+ continue;
+ }
+
+ uint32_t to_alloc = pipeline->descriptor_set_idx + n - pipeline->descriptor_sets.size();
+ uint32_t pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE - pipeline->descriptor_sets.size() % VK_DEVICE_DESCRIPTOR_POOL_SIZE;
+ uint32_t pool_idx = pipeline->descriptor_sets.size() / VK_DEVICE_DESCRIPTOR_POOL_SIZE;
+
+ while (to_alloc > 0) {
+ const uint32_t alloc_count = std::min(pool_remaining, to_alloc);
+ to_alloc -= alloc_count;
+ pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE;
+
+ if (pool_idx >= pipeline->descriptor_pools.size()) {
+ vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count * VK_DEVICE_DESCRIPTOR_POOL_SIZE);
+ vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, VK_DEVICE_DESCRIPTOR_POOL_SIZE, descriptor_pool_size);
+ pipeline->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info));
+ }
+
+ std::vector<vk::DescriptorSetLayout> layouts(alloc_count);
+ for (uint32_t i = 0; i < alloc_count; i++) {
+ layouts[i] = pipeline->dsl;
+ }
+ vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pipeline->descriptor_pools[pool_idx], alloc_count, layouts.data());
+ std::vector<vk::DescriptorSet> sets = device->device.allocateDescriptorSets(descriptor_set_alloc_info);
+ pipeline->descriptor_sets.insert(pipeline->descriptor_sets.end(), sets.begin(), sets.end());
+
+ pool_idx++;
+ }
+ }
+}
+
+static void ggml_pipeline_cleanup(vk_pipeline& pipeline) {
+ VK_LOG_DEBUG("ggml_pipeline_cleanup(" << pipeline->name << ")");
+ pipeline->descriptor_set_idx = 0;
+}
+
+static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_queue& q) {
+ VK_LOG_DEBUG("ggml_vk_create_cmd_buffer()");
+ std::lock_guard<std::mutex> guard(device->mutex);
+
+ if (q.cmd_buffers.size() > q.cmd_buffer_idx) {
+ // Reuse command buffer
+ return q.cmd_buffers[q.cmd_buffer_idx++];
+ }
+
+ vk::CommandBufferAllocateInfo command_buffer_alloc_info(
+ q.pool,
+ vk::CommandBufferLevel::ePrimary,
+ 1);
+ const std::vector<vk::CommandBuffer> cmd_buffers = device->device.allocateCommandBuffers(command_buffer_alloc_info);
+ auto buf = cmd_buffers.front();
+
+ q.cmd_buffers.push_back(buf);
+ q.cmd_buffer_idx++;
+
+ return buf;
+}
+
+static vk_submission ggml_vk_create_submission(vk_device& device, vk_queue& q, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
+ VK_LOG_DEBUG("ggml_vk_create_submission()");
+ vk_submission s;
+ s.buffer = ggml_vk_create_cmd_buffer(device, q);
+ s.wait_semaphores = std::move(wait_semaphores);
+ s.signal_semaphores = std::move(signal_semaphores);
+ return s;
+}
+
+static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) {
+ if (ctx->seqs.empty()) {
+ if (fence) {
+ ctx->q->queue.submit({}, fence);
+ }
+ return;
+ }
+ VK_LOG_DEBUG("ggml_vk_submit(" << ctx << ", " << fence << ")");
+
+ std::vector<std::vector<uint64_t>> tl_wait_vals;
+ std::vector<std::vector<uint64_t>> tl_signal_vals;
+ std::vector<std::vector<vk::Semaphore>> tl_wait_semaphores;
+ std::vector<std::vector<vk::Semaphore>> tl_signal_semaphores;
+ std::vector<vk::TimelineSemaphoreSubmitInfo> tl_submit_infos;
+ std::vector<vk::SubmitInfo> submit_infos;
+ int idx = -1;
+ std::vector<std::vector<vk::PipelineStageFlags>> stage_flags;
+
+ size_t reserve = 0;
+
+ for (const auto& sequence : ctx->seqs) {
+ reserve += sequence.size();
+ }
+
+ // Pre-reserve vectors to prevent reallocation, which invalidates pointers
+ tl_wait_semaphores.reserve(reserve);
+ tl_wait_vals.reserve(reserve);
+ tl_signal_semaphores.reserve(reserve);
+ tl_signal_vals.reserve(reserve);
+ tl_submit_infos.reserve(reserve);
+ submit_infos.reserve(reserve);
+ stage_flags.reserve(reserve);
+
+ for (const auto& sequence : ctx->seqs) {
+ for (const auto& submission : sequence) {
+ stage_flags.push_back({});
+ idx++;
+ tl_wait_vals.push_back({});
+ tl_wait_semaphores.push_back({});
+ tl_signal_vals.push_back({});
+ tl_signal_semaphores.push_back({});
+ for (size_t i = 0; i < submission.wait_semaphores.size(); i++) {
+ stage_flags[idx].push_back(ctx->q->stage_flags);
+ tl_wait_vals[idx].push_back(submission.wait_semaphores[i].value);
+ tl_wait_semaphores[idx].push_back(submission.wait_semaphores[i].s);
+ }
+ for (size_t i = 0; i < submission.signal_semaphores.size(); i++) {
+ tl_signal_vals[idx].push_back(submission.signal_semaphores[i].value);
+ tl_signal_semaphores[idx].push_back(submission.signal_semaphores[i].s);
+ }
+ tl_submit_infos.push_back({
+ (uint32_t) submission.wait_semaphores.size(),
+ tl_wait_vals[idx].data(),
+ (uint32_t) submission.signal_semaphores.size(),
+ tl_signal_vals[idx].data(),
+ });
+ tl_submit_infos[idx].sType = vk::StructureType::eTimelineSemaphoreSubmitInfo;
+ tl_submit_infos[idx].pNext = nullptr;
+ vk::SubmitInfo si{
+ (uint32_t) submission.wait_semaphores.size(),
+ tl_wait_semaphores[idx].data(),
+ stage_flags[idx].data(),
+ 1,
+ &submission.buffer,
+ (uint32_t) submission.signal_semaphores.size(),
+ tl_signal_semaphores[idx].data(),
+ };
+ si.setPNext(&tl_submit_infos[idx]);
+ submit_infos.push_back(si);
+ }
+ }
+
+ ctx->q->queue.submit(submit_infos, fence);
+
+ ctx->seqs.clear();
+}
+
+static uint32_t ggml_vk_find_queue_family_index(std::vector<vk::QueueFamilyProperties>& queue_family_props, const vk::QueueFlags& required, const vk::QueueFlags& avoid, int32_t compute_index, uint32_t min_num_queues) {
+ VK_LOG_DEBUG("ggml_vk_find_queue_family_index()");
+ const uint32_t qfsize = queue_family_props.size();
+
+ // Try with avoid preferences first
+ for (uint32_t i = 0; i < qfsize; i++) {
+ if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required && !(queue_family_props[i].queueFlags & avoid)) {
+ return i;
+ }
+ }
+
+ // Fall back to only required
+ for (size_t i = 0; i < qfsize; i++) {
+ if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required) {
+ return i;
+ }
+ }
+
+ // Fall back to reusing compute queue
+ for (size_t i = 0; i < qfsize; i++) {
+ if (queue_family_props[i].queueCount >= min_num_queues && queue_family_props[i].queueFlags & required) {
+ return i;
+ }
+ }
+
+ // Fall back to ignoring min_num_queries
+ for (size_t i = 0; i < qfsize; i++) {
+ if (queue_family_props[i].queueFlags & required) {
+ return i;
+ }
+ }
+
+ // All commands that are allowed on a queue that supports transfer operations are also allowed on a queue that supports either graphics or compute operations.
+ // Thus, if the capabilities of a queue family include VK_QUEUE_GRAPHICS_BIT or VK_QUEUE_COMPUTE_BIT, then reporting the VK_QUEUE_TRANSFER_BIT capability separately for that queue family is optional.
+ if (compute_index >= 0) {
+ return compute_index;
+ }
+
+ std::cerr << "ggml_vulkan: No suitable queue family index found." << std::endl;
+
+ for(auto &q_family : queue_family_props) {
+ std::cerr << "Queue number: " + std::to_string(q_family.queueCount) << " flags: " + to_string(q_family.queueFlags) << std::endl;
+ }
+ abort();
+}
+
+static void ggml_vk_create_queue(vk_device& device, vk_queue& q, uint32_t queue_family_index, uint32_t queue_index, vk::PipelineStageFlags&& stage_flags, bool transfer_only) {
+ VK_LOG_DEBUG("ggml_vk_create_queue()");
+ std::lock_guard<std::mutex> guard(device->mutex);
+
+ q.queue_family_index = queue_family_index;
+ q.transfer_only = transfer_only;
+
+ vk::CommandPoolCreateInfo command_pool_create_info_compute(vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT), queue_family_index);
+ q.pool = device->device.createCommandPool(command_pool_create_info_compute);
+
+ q.cmd_buffer_idx = 0;
+
+ q.queue = device->device.getQueue(queue_family_index, queue_index);
+
+ q.stage_flags = stage_flags;
+}
+
+static vk_context ggml_vk_create_context(ggml_backend_vk_context * ctx, vk_queue& q) {
+ vk_context result = std::make_shared<vk_context_struct>();
+ VK_LOG_DEBUG("ggml_vk_create_context(" << result << ")");
+ ctx->gc.contexts.emplace_back(result);
+ result->q = &q;
+ return result;
+}
+
+static vk_context ggml_vk_create_temporary_context(vk_queue& q) {
+ vk_context result = std::make_shared<vk_context_struct>();
+ VK_LOG_DEBUG("ggml_vk_create_temporary_context(" << result << ")");
+ result->q = &q;
+ return result;
+}
+
+static vk_semaphore * ggml_vk_create_binary_semaphore(ggml_backend_vk_context * ctx) {
+ VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
+ vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eBinary, 0 };
+ vk::SemaphoreCreateInfo ci{};
+ ci.setPNext(&tci);
+ vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
+ ctx->gc.semaphores.push_back({ semaphore, 0 });
+ return &ctx->gc.semaphores[ctx->gc.semaphores.size() - 1];
+}
+
+static vk_semaphore * ggml_vk_create_timeline_semaphore(ggml_backend_vk_context * ctx) {
+ VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
+ if (ctx->semaphore_idx >= ctx->gc.tl_semaphores.size()) {
+ vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eTimeline, 0 };
+ vk::SemaphoreCreateInfo ci{};
+ ci.setPNext(&tci);
+ vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
+ ctx->gc.tl_semaphores.push_back({ semaphore, 0 });
+ }
+ return &ctx->gc.tl_semaphores[ctx->semaphore_idx++];
+}
+
+static vk::Event ggml_vk_create_event(ggml_backend_vk_context * ctx) {
+ if (ctx->event_idx >= ctx->gc.events.size()) {
+ ctx->gc.events.push_back(ctx->device->device.createEvent({}));
+ }
+ return ctx->gc.events[ctx->event_idx++];
+}
+
+static void ggml_vk_queue_cleanup(vk_device& device, vk_queue& q) {
+ VK_LOG_DEBUG("ggml_vk_queue_cleanup()");
+ std::lock_guard<std::mutex> guard(device->mutex);
+
+ // Requires command buffers to be done
+ device->device.resetCommandPool(q.pool);
+ q.cmd_buffer_idx = 0;
+}
+
+static uint32_t find_properties(const vk::PhysicalDeviceMemoryProperties* mem_props, vk::MemoryRequirements* mem_req, vk::MemoryPropertyFlags flags) {
+ for (uint32_t i = 0; i < mem_props->memoryTypeCount; ++i) {
+ vk::MemoryType memory_type = mem_props->memoryTypes[i];
+ if ((mem_req->memoryTypeBits & ((uint64_t)1 << i)) &&
+ (flags & memory_type.propertyFlags) == flags &&
+ mem_props->memoryHeaps[memory_type.heapIndex].size >= mem_req->size) {
+ return static_cast<int32_t>(i);
+ }
+ }
+ return UINT32_MAX;
+}
+
+static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
+ VK_LOG_DEBUG("ggml_vk_create_buffer(" << device->name << ", " << size << ", " << to_string(req_flags) << ", " << to_string(fallback_flags) << ")");
+ if (size > device->max_memory_allocation_size) {
+ throw vk::OutOfDeviceMemoryError("Requested buffer size exceeds device memory allocation limit");
+ }
+
+ std::lock_guard<std::mutex> guard(device->mutex);
+
+ vk_buffer buf = std::make_shared<vk_buffer_struct>();
+
+ if (size == 0) {
+ buf->size = 0;
+ return buf;
+ }
+
+ vk::BufferCreateInfo buffer_create_info{
+ vk::BufferCreateFlags(),
+ size,
+ vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst,
+ vk::SharingMode::eExclusive,
+ 0,
+ nullptr,
+ };
+
+ buf->buffer = device->device.createBuffer(buffer_create_info);
+
+ vk::MemoryRequirements mem_req = device->device.getBufferMemoryRequirements(buf->buffer);
+
+ vk::PhysicalDeviceMemoryProperties mem_props = device->physical_device.getMemoryProperties();
+
+ uint32_t memory_type_index = UINT32_MAX;
+
+ memory_type_index = find_properties(&mem_props, &mem_req, req_flags);
+ buf->memory_property_flags = req_flags;
+
+ if (memory_type_index == UINT32_MAX && fallback_flags) {
+ memory_type_index = find_properties(&mem_props, &mem_req, fallback_flags);
+ buf->memory_property_flags = fallback_flags;
+ }
+
+ if (memory_type_index == UINT32_MAX) {
+ device->device.destroyBuffer(buf->buffer);
+ throw vk::OutOfDeviceMemoryError("No suitable memory type found");
+ }
+
+ try {
+ buf->device_memory = device->device.allocateMemory({ mem_req.size, memory_type_index });
+ } catch (const vk::SystemError& e) {
+ if (buf->memory_property_flags != fallback_flags) {
+ // Try again with fallback flags
+ memory_type_index = find_properties(&mem_props, &mem_req, fallback_flags);
+ buf->memory_property_flags = fallback_flags;
+
+ try {
+ buf->device_memory = device->device.allocateMemory({ mem_req.size, memory_type_index });
+ }
+ catch (const vk::SystemError& e) {
+ device->device.destroyBuffer(buf->buffer);
+ throw e;
+ }
+ } else {
+ // Out of Host/Device memory, clean up buffer
+ device->device.destroyBuffer(buf->buffer);
+ throw e;
+ }
+ }
+ buf->ptr = nullptr;
+
+ if (buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
+ buf->ptr = device->device.mapMemory(buf->device_memory, 0, VK_WHOLE_SIZE);
+ }
+
+ device->device.bindBufferMemory(buf->buffer, buf->device_memory, 0);
+
+ buf->device = device;
+ buf->size = size;
+
+#ifdef GGML_VULKAN_MEMORY_DEBUG
+ device->memory_logger->log_allocation(buf, size);
+#endif
+
+ return buf;
+}
+
+static vk_buffer ggml_vk_create_buffer_check(vk_device& device, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
+ try {
+ return ggml_vk_create_buffer(device, size, req_flags, fallback_flags);
+ } catch (const vk::SystemError& e) {
+ std::cerr << "ggml_vulkan: Memory allocation of size " << size << " failed." << std::endl;
+ std::cerr << "ggml_vulkan: " << e.what() << std::endl;
+ throw e;
+ }
+}
+
+static vk_buffer ggml_vk_create_buffer_device(vk_device& device, size_t size) {
+ vk_buffer buf;
+ try {
+ if (device->uma) {
+ // Fall back to host memory type
+ buf = ggml_vk_create_buffer(device, size, vk::MemoryPropertyFlagBits::eDeviceLocal, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
+ } else {
+ // use rebar if available, otherwise fallback to device only visible memory
+ buf = ggml_vk_create_buffer(device, size, vk::MemoryPropertyFlagBits::eDeviceLocal | vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, vk::MemoryPropertyFlagBits::eDeviceLocal);
+ }
+ } catch (const vk::SystemError& e) {
+ std::cerr << "ggml_vulkan: Device memory allocation of size " << size << " failed." << std::endl;
+ std::cerr << "ggml_vulkan: " << e.what() << std::endl;
+ throw e;
+ }
+
+ return buf;
+}
+
+static void ggml_vk_destroy_buffer(vk_buffer& buf) {
+ if (buf == nullptr) {
+ return;
+ }
+
+#ifdef GGML_VULKAN_MEMORY_DEBUG
+ if (buf->device != nullptr) {
+ buf->device->memory_logger->log_deallocation(buf);
+ }
+#endif
+
+ buf.reset();
+}
+
+static vk_subbuffer ggml_vk_subbuffer(vk_buffer& buf) {
+ return { buf, 0, VK_WHOLE_SIZE };
+}
+
+static void ggml_vk_sync_buffers(vk_context& ctx) {
+ VK_LOG_DEBUG("ggml_vk_sync_buffers()");
+
+ const bool transfer_queue = ctx->q->transfer_only;
+
+ ctx->s->buffer.pipelineBarrier(
+ ctx->q->stage_flags,
+ ctx->q->stage_flags,
+ {},
+ { {
+ { !transfer_queue ? (vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) : (vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) },
+ { !transfer_queue ? (vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) : (vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) }
+ } },
+ {},
+ {}
+ );
+}
+
+static void ggml_vk_wait_events(vk_context& ctx, std::vector<vk::Event>&& events) {
+ VK_LOG_DEBUG("ggml_vk_wait_events()");
+ if (events.empty()) {
+ return;
+ }
+
+ ctx->s->buffer.waitEvents(
+ events,
+ ctx->q->stage_flags,
+ ctx->q->stage_flags,
+ {},
+ {},
+ {}
+ );
+}
+
+// number of rows/cols for flash attention shader
+static constexpr uint32_t flash_attention_num_small_rows = 32;
+static std::array<uint32_t, 2> fa_rows_cols(uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) {
+ GGML_UNUSED(clamp);
+
+ // small rows, large cols
+ if (small_rows) {
+ return {flash_attention_num_small_rows, 128};
+ }
+ // small cols to reduce register count
+ if (ggml_is_quantized(type) || D == 256) {
+ return {64, 32};
+ }
+ return {64, 64};
+};
+
+static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vector<uint32_t>& warptile, bool mul_mat_id) {
+ // Needs to be kept up to date on shader changes
+ const uint32_t bank_conflict_offset = device->coopmat_support ? 8 : 1;
+ const uint32_t type_size = device->fp16 ? sizeof(ggml_fp16_t) : sizeof(float);
+ const uint32_t warps = warptile[0] / warptile[10];
+
+ const uint32_t load_bufs = (warptile[1] + warptile[2]) * (warptile[3] + bank_conflict_offset) * type_size;
+ const uint32_t mmid_row_ids = mul_mat_id ? 3072 * sizeof(uint32_t) : 0;
+ const uint32_t coopmat_stage = device->coopmat_support ? warptile[7] * warptile[8] / warps * sizeof(float) : 0;
+
+ return (load_bufs + mmid_row_ids + coopmat_stage) <= device->properties.limits.maxComputeSharedMemorySize;
+}
+
+static void ggml_vk_load_shaders(vk_device& device) {
+ VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")");
+
+ std::cerr << "ggml_vulkan: Compiling shaders";
+
+ // some shaders have a minimum subgroup size
+ const uint32_t subgroup_size_16 = std::max(device->subgroup_size, 16u);
+ const uint32_t subgroup_size_32 = std::max(device->subgroup_size, 32u);
+
+ // mulmat
+ std::vector<uint32_t> l_warptile, m_warptile, s_warptile,
+ l_warptile_mmq, m_warptile_mmq, s_warptile_mmq,
+ l_warptile_mmq_k, m_warptile_mmq_k, s_warptile_mmq_k,
+ l_warptile_mmqid, m_warptile_mmqid, s_warptile_mmqid;
+ std::array<uint32_t, 3> l_wg_denoms, m_wg_denoms, s_wg_denoms,
+ l_mmq_wg_denoms, m_mmq_wg_denoms, s_mmq_wg_denoms,
+ l_mmq_wg_denoms_k, m_mmq_wg_denoms_k, s_mmq_wg_denoms_k,
+ l_mmqid_wg_denoms, m_mmqid_wg_denoms, s_mmqid_wg_denoms;
+
+ uint32_t l_align, m_align, s_align;
+ if (device->coopmat2) {
+ // spec constants and tile sizes for non-quant matmul/matmul_id
+ l_warptile = { 256, 128, 256, 64 };
+ m_warptile = { 256, 128, 128, 64 };
+ s_warptile = { 128, 64, 64, 64 };
+ l_wg_denoms = {128, 256, 1 };
+ m_wg_denoms = {128, 128, 1 };
+ s_wg_denoms = { 64, 64, 1 };
+
+ // spec constants and tile sizes for quant matmul (non-Qi_K)
+ l_warptile_mmq = { 256, 128, 256, 64 };
+ m_warptile_mmq = { 256, 128, 128, 64 };
+ s_warptile_mmq = { 256, 128, 128, 64 };
+ l_mmq_wg_denoms = { 128, 256, 1 };
+ m_mmq_wg_denoms = { 128, 128, 1 };
+ s_mmq_wg_denoms = { 128, 128, 1 };
+
+ // spec constants and tile sizes for quant matmul (Qi_K)
+ l_warptile_mmq_k = { 256, 128, 512, 16 };
+ m_warptile_mmq_k = { 256, 128, 256, 16 };
+ s_warptile_mmq_k = { 256, 32, 128, 64 };
+ l_mmq_wg_denoms_k = { 128, 512, 1 };
+ m_mmq_wg_denoms_k = { 128, 256, 1 };
+ s_mmq_wg_denoms_k = { 32, 128, 1 };
+
+ // spec constants and tile sizes for quant matmul_id
+ l_warptile_mmqid = { 256, 128, 128, 16 };
+ m_warptile_mmqid = { 256, 128, 64, 16 };
+ s_warptile_mmqid = { 256, 64, 64, 16 };
+ l_mmqid_wg_denoms = { 128, 128, 1 };
+ m_mmqid_wg_denoms = { 128, 64, 1 };
+ s_mmqid_wg_denoms = { 64, 64, 1 };
+
+ l_align = 128;
+ m_align = 64;
+ s_align = 32;
+ } else {
+ // Matrix cores require different warp group sizes
+ const uint32_t tm_l = device->coopmat_support ? device->coopmat_m : 4;
+ const uint32_t tm_m = device->coopmat_support ? device->coopmat_m : 4;
+ const uint32_t tm_s = device->coopmat_support ? device->coopmat_m : 2;
+ const uint32_t tn_l = device->coopmat_support ? device->coopmat_n : 4;
+ const uint32_t tn_m = device->coopmat_support ? device->coopmat_n : 2;
+ const uint32_t tn_s = device->coopmat_support ? device->coopmat_n : 2;
+ const uint32_t tk_l = device->coopmat_support ? device->coopmat_k : 1;
+ const uint32_t tk_m = device->coopmat_support ? device->coopmat_k : 1;
+ const uint32_t tk_s = device->coopmat_support ? device->coopmat_k : 1;
+
+ l_warptile = { 128, 128, 128, 16, device->subgroup_size * 2, 64, 2, tm_l, tn_l, tk_l, device->subgroup_size };
+ m_warptile = { 128, 64, 64, 16, device->subgroup_size, 32, 2, tm_m, tn_m, tk_m, device->subgroup_size };
+ s_warptile = { subgroup_size_16, 32, 32, 16, 32, 32, 2, tm_s, tn_s, tk_s, device->subgroup_size };
+
+ l_warptile_mmq = { 128, 128, 128, 32, device->subgroup_size * 2, 64, 2, tm_l, tn_l, tk_l, device->subgroup_size };
+ m_warptile_mmq = { 128, 64, 64, 32, device->subgroup_size, 32, 2, tm_m, tn_m, tk_m, device->subgroup_size };
+ s_warptile_mmq = { subgroup_size_32, 32, 32, 32, 32, 32, 2, tm_s, tn_s, tk_s, device->subgroup_size };
+
+ l_mmq_wg_denoms = l_wg_denoms = {128, 128, 1 };
+ m_mmq_wg_denoms = m_wg_denoms = { 64, 64, 1 };
+ s_mmq_wg_denoms = s_wg_denoms = { 32, 32, 1 };
+ l_align = 128;
+ m_align = 64;
+ s_align = 32;
+
+ // Fallback to smaller sizes if there's not enough shared memory. Given the current shaders
+ // and tile sizes, this should handle 16KB, 32KB, and 48KB+.
+ // This logic doesn't explicitly account for the 12KB row_ids in the mul_mat_mat_id shaders.
+ // But the numbers happen to work out for 32KB shared memory size that when using the medium
+ // size there's enough room for everything, and we assert for this.
+ uint32_t shmem_needed = (l_warptile[1] + l_warptile[2]) * (l_warptile[3] + 1) * sizeof(float);
+ if (shmem_needed > device->properties.limits.maxComputeSharedMemorySize) {
+ l_warptile = m_warptile;
+ l_wg_denoms = m_wg_denoms;
+ shmem_needed = (l_warptile[1] + l_warptile[2]) * (l_warptile[3] + 1) * sizeof(float);
+ GGML_ASSERT(shmem_needed <= device->properties.limits.maxComputeSharedMemorySize);
+ }
+ if (device->properties.limits.maxComputeSharedMemorySize >= 32768) {
+ // assert mul_mat_mat_id shaders will fit.
+ GGML_ASSERT(shmem_needed + 3072*4 <= device->properties.limits.maxComputeSharedMemorySize);
+ }
+
+ shmem_needed = (l_warptile_mmq[1] + l_warptile_mmq[2]) * (l_warptile_mmq[3] + 1) * sizeof(float);
+ if (shmem_needed > device->properties.limits.maxComputeSharedMemorySize) {
+ if (device->properties.limits.maxComputeSharedMemorySize == 32768) {
+ l_warptile_mmq = m_warptile_mmq;
+ l_mmq_wg_denoms = m_mmq_wg_denoms;
+ } else {
+ l_warptile_mmq = s_warptile_mmq;
+ l_mmq_wg_denoms = s_mmq_wg_denoms;
+ }
+ shmem_needed = (l_warptile_mmq[1] + l_warptile_mmq[2]) * (l_warptile_mmq[3] + 1) * sizeof(float);
+ GGML_ASSERT(shmem_needed <= device->properties.limits.maxComputeSharedMemorySize);
+ }
+ if (device->properties.limits.maxComputeSharedMemorySize >= 32768) {
+ // assert mul_mat_mat_id shaders will fit.
+ GGML_ASSERT(shmem_needed + 3072*4 <= device->properties.limits.maxComputeSharedMemorySize);
+ }
+ // Disable medium and large matrix multiplication if not enough shared memory is available
+ // Check mmq warptiles as the largest configuration
+ // Throw an error if not enough for any matrix multiplication is available
+ if (!ggml_vk_matmul_shmem_support(device, s_warptile_mmq, false)) {
+ std::cerr << "ggml_vulkan: Error: Shared memory size too small for matrix multiplication." << std::endl;
+ throw std::runtime_error("Shared memory size too small for matrix multiplication.");
+ } else if (!ggml_vk_matmul_shmem_support(device, m_warptile_mmq, false)) {
+ device->mul_mat_m = false;
+ device->mul_mat_l = false;
+ } else if (!ggml_vk_matmul_shmem_support(device, l_warptile_mmq, false)) {
+ device->mul_mat_l = false;
+ }
+
+ // Disable mul_mat_id if not enough shared memory is available
+ if (!ggml_vk_matmul_shmem_support(device, s_warptile_mmq, true)) {
+ device->mul_mat_id_s = false;
+ device->mul_mat_id_m = false;
+ device->mul_mat_id_l = false;
+ } else if (!ggml_vk_matmul_shmem_support(device, m_warptile_mmq, true)) {
+ device->mul_mat_id_m = false;
+ device->mul_mat_id_l = false;
+ } else if (!ggml_vk_matmul_shmem_support(device, l_warptile_mmq, true)) {
+ device->mul_mat_id_l = false;
+ }
+ }
+
+ device->pipeline_matmul_f32 = std::make_shared<vk_matmul_pipeline_struct>();
+ device->pipeline_matmul_f32_f16 = std::make_shared<vk_matmul_pipeline_struct>();
+
+ device->pipeline_matmul_id_f32 = std::make_shared<vk_matmul_pipeline_struct>();
+
+ std::vector<std::future<void>> compiles;
+ auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const std::string &entrypoint,
+ uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, const std::vector<uint32_t>& specialization_constants,
+ uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) {
+ {
+ // wait until fewer than N compiles are in progress
+ uint32_t N = std::max(1u, std::thread::hardware_concurrency());
+ std::unique_lock<std::mutex> guard(compile_count_mutex);
+ while (compile_count >= N) {
+ compile_count_cond.wait(guard);
+ }
+ compile_count++;
+ }
+ compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), name, spv_size, spv_data, entrypoint,
+ parameter_count, push_constant_size, wg_denoms, specialization_constants, align, disable_robustness, require_full_subgroups, required_subgroup_size));
+ };
+
+#if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
+ if (device->coopmat2) {
+
+ auto const &fa_wg_denoms = [&](uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) -> std::array<uint32_t, 3> {
+ return {fa_rows_cols(D, clamp, type, small_rows)[0], 1, 1};
+ };
+
+ auto const &fa_spec_constants = [&](uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) -> std::vector<uint32_t> {
+ // For large number of rows, 128 invocations seems to work best.
+ // For small number of rows (e.g. N==1), 256 works better. But matrix granularity for 256 is 32, so we
+ // can't use 256 for D==80.
+ uint32_t wg_size = (small_rows && (D % 32) == 0) ? 256 : 128;
+ auto rows_cols = fa_rows_cols(D, clamp, type, small_rows);
+ return {wg_size, rows_cols[0], rows_cols[1], (D), clamp};
+ };
+
+#define CREATE_FA2(TYPE, NAMELC, D) \
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][0][0], "flash_attn_f32_f16_D" #D "_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,false), fa_spec_constants(D,1,TYPE,false), 1); \
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][0][1], "flash_attn_f32_f16_D" #D "_aligned_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,false), fa_spec_constants(D,0,TYPE,false), fa_rows_cols(D,0,TYPE,false)[1]); \
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][0][0], "flash_attn_f32_f16_D" #D "_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,false), fa_spec_constants(D,1,TYPE,false), 1); \
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][0][1], "flash_attn_f32_f16_D" #D "_aligned_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,false), fa_spec_constants(D,0,TYPE,false), fa_rows_cols(D,0,TYPE,false)[1]); \
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][1][0], "flash_attn_f32_f16_D" #D "_f16acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,true), fa_spec_constants(D,1,TYPE,true), 1); \
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][1][1], "flash_attn_f32_f16_D" #D "_aligned_f16acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,true), fa_spec_constants(D,0,TYPE,true), fa_rows_cols(D,0,TYPE,true)[1]); \
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][1][0], "flash_attn_f32_f16_D" #D "_f32acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,true), fa_spec_constants(D,1,TYPE,true), 1); \
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][1][1], "flash_attn_f32_f16_D" #D "_aligned_f32acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,true), fa_spec_constants(D,0,TYPE,true), fa_rows_cols(D,0,TYPE,true)[1]); \
+
+#define CREATE_FA(TYPE, NAMELC) \
+ CREATE_FA2(TYPE, NAMELC, 64) \
+ CREATE_FA2(TYPE, NAMELC, 80) \
+ CREATE_FA2(TYPE, NAMELC, 96) \
+ CREATE_FA2(TYPE, NAMELC, 112) \
+ CREATE_FA2(TYPE, NAMELC, 128) \
+ CREATE_FA2(TYPE, NAMELC, 256)
+
+ CREATE_FA(GGML_TYPE_F16, f16)
+ CREATE_FA(GGML_TYPE_Q4_0, q4_0)
+ CREATE_FA(GGML_TYPE_Q4_1, q4_1)
+ CREATE_FA(GGML_TYPE_Q5_0, q5_0)
+ CREATE_FA(GGML_TYPE_Q5_1, q5_1)
+ CREATE_FA(GGML_TYPE_Q8_0, q8_0)
+ // K dequants currently disabled because D dimension is rounded up to 256 and runs inefficiently
+ //CREATE_FA(GGML_TYPE_Q2_K, q2_k)
+ //CREATE_FA(GGML_TYPE_Q3_K, q3_k)
+ //CREATE_FA(GGML_TYPE_Q4_K, q4_k)
+ //CREATE_FA(GGML_TYPE_Q5_K, q5_k)
+ //CREATE_FA(GGML_TYPE_Q6_K, q6_k)
+ CREATE_FA(GGML_TYPE_IQ4_NL, iq4_nl)
+#undef CREATE_FA
+
+ // Create 6 variants, {s,m,l}x{unaligned,aligned}
+#define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align); \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align); \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align); \
+
+ // Create 2 variants, {f16,f32} accumulator
+#define CREATE_MM2(PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
+ CREATE_MM(PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
+ CREATE_MM(PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
+
+ CREATE_MM(pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3)
+
+ CREATE_MM2(pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3)
+ CREATE_MM2(pipeline_matmul_f16_f32, matmul_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_0].f16acc, matmul_q4_0_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_1].f16acc, matmul_q4_1_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_0].f16acc, matmul_q5_0_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_1].f16acc, matmul_q5_1_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q8_0].f16acc, matmul_q8_0_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q2_K].f16acc, matmul_q2_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q3_K].f16acc, matmul_q3_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
+
+ CREATE_MM(pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM2(pipeline_matmul_id_f16, matmul_id_f16, wg_denoms, warptile, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM2(pipeline_matmul_id_f16_f32, matmul_id_f16_f32, wg_denoms, warptile, vk_mat_mat_id_push_constants, 4)
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f16acc, matmul_id_q4_1_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f16acc, matmul_id_q5_0_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f16acc, matmul_id_q5_1_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f16acc, matmul_id_q8_0_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f16acc, matmul_id_q2_k_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f16acc, matmul_id_q3_k_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
+#undef CREATE_MM
+#undef CREATE_MM2
+ } else
+#endif // defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
+ if (device->coopmat_support) {
+ // Create 6 variants, {s,m,l}x{unaligned,aligned}
+#define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
+ if (device->mul_mat ## ID ## _l) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _coopmat_len, NAMELC ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1, false, true); \
+ if (device->mul_mat ## ID ## _m) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _coopmat_len, NAMELC ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1, false, true); \
+ if (device->mul_mat ## ID ## _s) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _coopmat_len, NAMELC ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1, false, true); \
+ if (device->mul_mat ## ID ## _l) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _coopmat_len, NAMELC ## _aligned ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align, false, true); \
+ if (device->mul_mat ## ID ## _m) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _coopmat_len, NAMELC ## _aligned ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align, false, true); \
+ if (device->mul_mat ## ID ## _s) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _coopmat_len, NAMELC ## _aligned ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align, false, true); \
+
+ // Create 2 variants, {f16,f32} accumulator
+#define CREATE_MM2(PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
+ if (device->coopmat_acc_f16_support) { \
+ CREATE_MM(PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
+ } \
+ if (device->coopmat_acc_f32_support) { \
+ CREATE_MM(PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
+ } \
+
+ CREATE_MM(pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+ CREATE_MM2(pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+ CREATE_MM2(pipeline_matmul_f16_f32, matmul_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+
+ if (device->coopmat_acc_f16_support) {
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f16acc, matmul_q4_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f16acc, matmul_q4_1_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f16acc, matmul_q5_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f16acc, matmul_q5_1_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f16acc, matmul_q8_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f16acc, matmul_q2_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f16acc, matmul_q3_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ } else {
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f16acc, matmul_q4_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f16acc, matmul_q4_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f16acc, matmul_q5_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f16acc, matmul_q5_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f16acc, matmul_q8_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f16acc, matmul_q2_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f16acc, matmul_q3_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ }
+
+ // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines.
+ if (device->mul_mat_id_s || device->mul_mat_id_m || device->mul_mat_id_l) {
+ CREATE_MM(pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
+ CREATE_MM2(pipeline_matmul_id_f16, matmul_id_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
+ CREATE_MM2(pipeline_matmul_id_f16_f32, matmul_id_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
+
+ if (device->coopmat_acc_f16_support) {
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f16acc, matmul_id_q4_1_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f16acc, matmul_id_q5_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f16acc, matmul_id_q5_1_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f16acc, matmul_id_q8_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f16acc, matmul_id_q2_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f16acc, matmul_id_q3_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ } else {
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f16acc, matmul_id_q4_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f16acc, matmul_id_q5_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f16acc, matmul_id_q5_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f16acc, matmul_id_q8_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f16acc, matmul_id_q2_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f16acc, matmul_id_q3_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ }
+ }
+#undef CREATE_MM2
+#undef CREATE_MM
+ } else if (device->fp16) {
+ // Create 6 variants, {s,m,l}x{unaligned,aligned}
+#define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
+ if (device->mul_mat ## ID ## _l) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \
+ if (device->mul_mat ## ID ## _m) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \
+ if (device->mul_mat ## ID ## _s) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \
+ if (device->mul_mat ## ID ## _l) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align); \
+ if (device->mul_mat ## ID ## _m) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align); \
+ if (device->mul_mat ## ID ## _s) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align); \
+
+ // Create 2 variants, {f16,f32} accumulator
+#define CREATE_MM2(PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
+ CREATE_MM(PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
+ CREATE_MM(PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
+
+ CREATE_MM(pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+ CREATE_MM2(pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+ CREATE_MM2(pipeline_matmul_f16_f32, matmul_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f16acc, matmul_q4_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f16acc, matmul_q4_1_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f16acc, matmul_q5_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f16acc, matmul_q5_1_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f16acc, matmul_q8_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f16acc, matmul_q2_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f16acc, matmul_q3_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+
+ // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines.
+ if (device->mul_mat_id_s || device->mul_mat_id_m || device->mul_mat_id_l) {
+ CREATE_MM(pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
+ CREATE_MM2(pipeline_matmul_id_f16, matmul_id_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
+ CREATE_MM2(pipeline_matmul_id_f16_f32, matmul_id_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f16acc, matmul_id_q4_1_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f16acc, matmul_id_q5_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f16acc, matmul_id_q5_1_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f16acc, matmul_id_q8_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f16acc, matmul_id_q2_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f16acc, matmul_id_q3_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ }
+#undef CREATE_MM2
+#undef CREATE_MM
+ } else {
+ // Create 6 variants, {s,m,l}x{unaligned,aligned}
+#define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
+ if (device->mul_mat ## ID ## _l) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \
+ if (device->mul_mat ## ID ## _m) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \
+ if (device->mul_mat ## ID ## _s) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \
+ if (device->mul_mat ## ID ## _l) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align); \
+ if (device->mul_mat ## ID ## _m) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align); \
+ if (device->mul_mat ## ID ## _s) \
+ ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align); \
+
+ CREATE_MM(pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_matmul_f16.f32acc, matmul_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_matmul_f16_f32.f32acc, matmul_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f32acc, matmul_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f32acc, matmul_q4_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f32acc, matmul_q5_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f32acc, matmul_q5_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f32acc, matmul_q8_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f32acc, matmul_q2_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f32acc, matmul_q3_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+ CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
+
+ // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines.
+ if (device->mul_mat_id_s || device->mul_mat_id_m || device->mul_mat_id_l) {
+ CREATE_MM(pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
+ CREATE_MM(pipeline_matmul_id_f16.f32acc, matmul_id_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
+ CREATE_MM(pipeline_matmul_id_f16_f32.f32acc, matmul_id_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f32acc, matmul_id_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f32acc, matmul_id_q4_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f32acc, matmul_id_q5_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f32acc, matmul_id_q5_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f32acc, matmul_id_q8_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f32acc, matmul_id_q2_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f32acc, matmul_id_q3_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f32acc, matmul_id_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f32acc, matmul_id_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f32acc, matmul_id_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f32acc, matmul_id_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
+ }
+#undef CREATE_MM
+ }
+
+ // mul mat vec
+
+ // the number of rows computed per shader depends on GPU model and quant
+ uint32_t rm_stdq = 1;
+ uint32_t rm_kq = 2;
+ if (device->vendor_id == VK_VENDOR_ID_AMD) {
+ if (device->subgroup_min_size == 64 && device->subgroup_max_size == 64) { // GCN
+ rm_stdq = 2;
+ rm_kq = 4;
+ }
+ } else if (device->vendor_id == VK_VENDOR_ID_INTEL)
+ rm_stdq = 2;
+
+ for (uint32_t i = 0; i < mul_mat_vec_max_cols; ++i) {
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f32_f32_"+std::to_string(i+1), mul_mat_vec_f32_f32_f32_len, mul_mat_vec_f32_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f32_f32_"+std::to_string(i+1), mul_mat_vec_f16_f32_f32_len, mul_mat_vec_f16_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f32_f32_"+std::to_string(i+1), mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f32_f32_"+std::to_string(i+1), mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f32_f32_"+std::to_string(i+1), mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f32_f32_"+std::to_string(i+1), mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f32_f32_"+std::to_string(i+1), mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true);
+
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f16_f32_"+std::to_string(i+1), mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f16_f32_"+std::to_string(i+1), mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f16_f32_"+std::to_string(i+1), mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f16_f32_"+std::to_string(i+1), mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f16_f32_"+std::to_string(i+1), mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f16_f32_"+std::to_string(i+1), mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f16_f32_"+std::to_string(i+1), mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true);
+ }
+
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true);
+
+ // dequant shaders
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", dequant_f32_len, dequant_f32_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_0], "dequant_q4_0", dequant_q4_0_len, dequant_q4_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_1], "dequant_q4_1", dequant_q4_1_len, dequant_q4_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_0], "dequant_q5_0", dequant_q5_0_len, dequant_q5_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_1], "dequant_q5_1", dequant_q5_1_len, dequant_q5_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q8_0], "dequant_q8_0", dequant_q8_0_len, dequant_q8_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q2_K], "dequant_q2_k", dequant_q2_k_len, dequant_q2_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q3_K], "dequant_q3_k", dequant_q3_k_len, dequant_q3_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_K], "dequant_q4_k", dequant_q4_k_len, dequant_q4_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_K], "dequant_q5_k", dequant_q5_k_len, dequant_q5_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q6_K], "dequant_q6_k", dequant_q6_k_len, dequant_q6_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_NL], "dequant_iq4_nl", dequant_iq4_nl_len, dequant_iq4_nl_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
+
+ // get_rows
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F32 ], "get_rows_f32", get_rows_f32_len, get_rows_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F16 ], "get_rows_f16", get_rows_f16_len, get_rows_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_0], "get_rows_q4_0", get_rows_q4_0_len, get_rows_q4_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_1], "get_rows_q4_1", get_rows_q4_1_len, get_rows_q4_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_0], "get_rows_q5_0", get_rows_q5_0_len, get_rows_q5_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_1], "get_rows_q5_1", get_rows_q5_1_len, get_rows_q5_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q8_0], "get_rows_q8_0", get_rows_q8_0_len, get_rows_q8_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl", get_rows_iq4_nl_len, get_rows_iq4_nl_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F32 ], "get_rows_f32_f32", get_rows_f32_f32_len, get_rows_f32_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F16 ], "get_rows_f16_f32", get_rows_f16_f32_len, get_rows_f16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_0], "get_rows_q4_0_f32", get_rows_q4_0_f32_len, get_rows_q4_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_1], "get_rows_q4_1_f32", get_rows_q4_1_f32_len, get_rows_q4_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_0], "get_rows_q5_0_f32", get_rows_q5_0_f32_len, get_rows_q5_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_1], "get_rows_q5_1_f32", get_rows_q5_1_f32_len, get_rows_q5_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q8_0], "get_rows_q8_0_f32", get_rows_q8_0_f32_len, get_rows_q8_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl_f32", get_rows_iq4_nl_f32_len, get_rows_iq4_nl_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256 * 4, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_p021_f16_f32, "mul_mat_vec_p021_f16_f32", mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", 3, 6 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", 3, 7 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_group_norm_f32, "group_norm_f32", group_norm_f32_len, group_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f32, "cpy_f32_f32", cpy_f32_f32_len, cpy_f32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f16, "cpy_f32_f16", cpy_f32_f16_len, cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_f16_f16, "cpy_f16_f16", cpy_f16_f16_len, cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_f32, "contig_cpy_f32_f32", contig_cpy_f32_f32_len, contig_cpy_f32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_f16, "contig_cpy_f32_f16", contig_cpy_f32_f16_len, contig_cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f16_f16, "contig_cpy_f16_f16", contig_cpy_f16_f16_len, contig_cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_add_f32, "add_f32", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_add_f32_norepeat, "add_f32_norepeat", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {1}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_add_f16_f32_f16, "add_f16_f32_f16", add_f16_f32_f16_len, add_f16_f32_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_add_f16_f32_f16_norepeat, "add_f16_f32_f16_norepeat", add_f16_f32_f16_len, add_f16_f32_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {1}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_acc_f32, "acc_f32", acc_f32_len, acc_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_mul_f32, "mul_f32", mul_f32_len, mul_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_mul_f32_norepeat, "mul_f32_norepeat", mul_f32_len, mul_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {1}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_div_f32, "div_f32", div_f32_len, div_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_div_f32_norepeat, "div_f32_norepeat", div_f32_len, div_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {1}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_concat_f32, "concat_f32", concat_f32_len, concat_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_concat_f16, "concat_f16", concat_f16_len, concat_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_concat_i32, "concat_i32", concat_i32_len, concat_i32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_upscale_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_sin_f32, "sin_f32", sin_f32_len, sin_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_cos_f32, "cos_f32", cos_f32_len, cos_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_clamp_f32, "clamp_f32", clamp_f32_len, clamp_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_pad_f32, "pad_f32", pad_f32_len, pad_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_repeat_f32, "repeat_f32", repeat_f32_len, repeat_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_gelu_f32, "gelu_f32", gelu_f32_len, gelu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_gelu_quick_f32, "gelu_quick_f32", gelu_quick_f32_len, gelu_quick_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_silu_f32, "silu_f32", silu_f32_len, silu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_relu_f32, "relu_f32", relu_f32_len, relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_leaky_relu_f32, "leaky_relu_f32", leaky_relu_f32_len, leaky_relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_tanh_f32, "tanh_f32", tanh_f32_len, tanh_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_diag_mask_inf_f32, "diag_mask_inf_f32", diag_mask_inf_f32_len, diag_mask_inf_f32_data, "main", 2, sizeof(vk_op_diag_mask_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_wg512, "soft_max_f32_wg512", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { 512 }, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16_wg512, "soft_max_f32_f16_wg512", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { 512 }, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32, "rope_norm_f32", rope_norm_f32_len, rope_norm_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f32, "rope_neox_f32", rope_neox_f32_len, rope_neox_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
+
+ if (device->float_controls_rte_fp16) {
+ ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_rte_len, rope_norm_f16_rte_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_rte_len, rope_neox_f16_rte_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
+ } else {
+ ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_len, rope_norm_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
+ ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_len, rope_neox_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
+ }
+
+ ggml_vk_create_pipeline(device, device->pipeline_argsort_f32, "argsort_f32", argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1024, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true);
+ if (device->float_controls_rte_fp16) {
+ ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte_len, im2col_f32_f16_rte_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true);
+ } else {
+ ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true);
+ }
+
+ ggml_vk_create_pipeline(device, device->pipeline_timestep_embedding_f32, "timestep_embedding_f32", timestep_embedding_f32_len, timestep_embedding_f32_data, "main", 2, sizeof(vk_op_timestep_embedding_push_constants), {256, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_pool2d_f32, "pool2d_f32", pool2d_f32_len, pool2d_f32_data, "main", 2, sizeof(vk_op_pool2d_push_constants), {512, 1, 1}, {}, 1);
+
+ ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv6_f32, "rwkv_wkv6_f32", rwkv_wkv6_f32_len, rwkv_wkv6_f32_data, "main", 7, sizeof(vk_op_rwkv_wkv6_push_constants), {1, 1, 1}, {device->subgroup_size}, 1);
+
+ for (auto &c : compiles) {
+ c.wait();
+ }
+ std::cerr << "Done!" << std::endl;
+}
+
+static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props);
+
+static vk_device ggml_vk_get_device(size_t idx) {
+ VK_LOG_DEBUG("ggml_vk_get_device(" << idx << ")");
+
+ if (vk_instance.devices[idx] == nullptr) {
+ VK_LOG_DEBUG("Initializing new vk_device");
+ vk_device device = std::make_shared<vk_device_struct>();
+ vk_instance.devices[idx] = device;
+
+#ifdef GGML_VULKAN_MEMORY_DEBUG
+ device->memory_logger = std::unique_ptr<vk_memory_logger>(new vk_memory_logger());
+#endif
+#ifdef GGML_VULKAN_PERF
+ device->perf_logger = std::unique_ptr<vk_perf_logger>(new vk_perf_logger());
+#endif
+
+ size_t dev_num = vk_instance.device_indices[idx];
+
+ std::vector<vk::PhysicalDevice> physical_devices = vk_instance.instance.enumeratePhysicalDevices();
+
+ if (dev_num >= physical_devices.size()) {
+ std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
+ throw std::runtime_error("Device not found");
+ }
+
+ device->physical_device = physical_devices[dev_num];
+ const std::vector<vk::ExtensionProperties> ext_props = device->physical_device.enumerateDeviceExtensionProperties();
+
+ bool fp16_storage = false;
+ bool fp16_compute = false;
+ bool maintenance4_support = false;
+ bool sm_builtins = false;
+ bool amd_shader_core_properties2 = false;
+ bool pipeline_robustness = false;
+ bool coopmat2_support = false;
+ device->coopmat_support = false;
+
+ // Check if maintenance4 is supported
+ for (const auto& properties : ext_props) {
+ if (strcmp("VK_KHR_maintenance4", properties.extensionName) == 0) {
+ maintenance4_support = true;
+ } else if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
+ fp16_storage = true;
+ } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
+ fp16_compute = true;
+ } else if (strcmp("VK_NV_shader_sm_builtins", properties.extensionName) == 0) {
+ sm_builtins = true;
+ } else if (strcmp("VK_AMD_shader_core_properties2", properties.extensionName) == 0) {
+ amd_shader_core_properties2 = true;
+ } else if (strcmp("VK_EXT_pipeline_robustness", properties.extensionName) == 0) {
+ pipeline_robustness = true;
+ } else if (strcmp("VK_EXT_subgroup_size_control", properties.extensionName) == 0) {
+ device->subgroup_size_control = true;
+ } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 &&
+ !getenv("GGML_VK_DISABLE_COOPMAT")) {
+ device->coopmat_support = true;
+ device->coopmat_m = 0;
+ device->coopmat_n = 0;
+ device->coopmat_k = 0;
+ } else if (strcmp("VK_NV_cooperative_matrix2", properties.extensionName) == 0 &&
+ !getenv("GGML_VK_DISABLE_COOPMAT2")) {
+ coopmat2_support = true;
+ }
+ }
+
+ vk::PhysicalDeviceProperties2 props2;
+ vk::PhysicalDeviceMaintenance3Properties props3;
+ vk::PhysicalDeviceMaintenance4Properties props4;
+ vk::PhysicalDeviceSubgroupProperties subgroup_props;
+ vk::PhysicalDeviceDriverProperties driver_props;
+ vk::PhysicalDeviceShaderSMBuiltinsPropertiesNV sm_props;
+ vk::PhysicalDeviceShaderCoreProperties2AMD amd_shader_core_properties2_props;
+ vk::PhysicalDeviceVulkan12Properties vk12_props;
+ vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_size_control_props;
+
+ props2.pNext = &props3;
+ props3.pNext = &subgroup_props;
+ subgroup_props.pNext = &driver_props;
+ driver_props.pNext = &vk12_props;
+
+ VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&vk12_props;
+
+ if (maintenance4_support) {
+ last_struct->pNext = (VkBaseOutStructure *)&props4;
+ last_struct = (VkBaseOutStructure *)&props4;
+ }
+ if (sm_builtins) {
+ last_struct->pNext = (VkBaseOutStructure *)&sm_props;
+ last_struct = (VkBaseOutStructure *)&sm_props;
+ }
+ if (amd_shader_core_properties2) {
+ last_struct->pNext = (VkBaseOutStructure *)&amd_shader_core_properties2_props;
+ last_struct = (VkBaseOutStructure *)&amd_shader_core_properties2_props;
+ }
+ if (device->subgroup_size_control) {
+ last_struct->pNext = (VkBaseOutStructure *)&subgroup_size_control_props;
+ last_struct = (VkBaseOutStructure *)&subgroup_size_control_props;
+ }
+
+#if defined(VK_NV_cooperative_matrix2)
+ vk::PhysicalDeviceCooperativeMatrix2PropertiesNV coopmat2_props;
+ if (coopmat2_support) {
+ last_struct->pNext = (VkBaseOutStructure *)&coopmat2_props;
+ last_struct = (VkBaseOutStructure *)&coopmat2_props;
+ }
+#endif
+
+ device->physical_device.getProperties2(&props2);
+ device->properties = props2.properties;
+
+ const char* GGML_VK_FORCE_MAX_ALLOCATION_SIZE = getenv("GGML_VK_FORCE_MAX_ALLOCATION_SIZE");
+
+ if (GGML_VK_FORCE_MAX_ALLOCATION_SIZE != nullptr) {
+ device->max_memory_allocation_size = std::stoul(GGML_VK_FORCE_MAX_ALLOCATION_SIZE);
+ } else if (maintenance4_support) {
+ device->max_memory_allocation_size = std::min(props3.maxMemoryAllocationSize, props4.maxBufferSize);
+ } else {
+ device->max_memory_allocation_size = props3.maxMemoryAllocationSize;
+ }
+
+ device->vendor_id = device->properties.vendorID;
+ device->subgroup_size = subgroup_props.subgroupSize;
+ device->uma = device->properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
+ if (sm_builtins) {
+ device->shader_core_count = sm_props.shaderSMCount;
+ } else if (amd_shader_core_properties2) {
+ device->shader_core_count = amd_shader_core_properties2_props.activeComputeUnitCount;
+ } else {
+ device->shader_core_count = 0;
+ }
+ device->float_controls_rte_fp16 = vk12_props.shaderRoundingModeRTEFloat16;
+
+ const bool force_disable_f16 = getenv("GGML_VK_DISABLE_F16") != nullptr;
+
+ device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
+
+ if (!ggml_vk_khr_cooperative_matrix_support(device->properties, driver_props)) {
+ device->coopmat_support = false;
+ }
+
+ std::vector<vk::QueueFamilyProperties> queue_family_props = device->physical_device.getQueueFamilyProperties();
+
+ // Try to find a non-graphics compute queue and transfer-focused queues
+ const uint32_t compute_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eCompute, vk::QueueFlagBits::eGraphics, -1, 1);
+ const uint32_t transfer_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eTransfer, vk::QueueFlagBits::eCompute | vk::QueueFlagBits::eGraphics, compute_queue_family_index, 1);
+
+ const float priorities[] = { 1.0f, 1.0f };
+ device->single_queue = compute_queue_family_index == transfer_queue_family_index && queue_family_props[compute_queue_family_index].queueCount == 1;
+
+ std::vector<vk::DeviceQueueCreateInfo> device_queue_create_infos;
+ if (compute_queue_family_index != transfer_queue_family_index) {
+ device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
+ device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), transfer_queue_family_index, 1, priorities + 1});
+ } else if(!device->single_queue) {
+ device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 2, priorities});
+ } else {
+ device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
+ }
+ vk::DeviceCreateInfo device_create_info;
+ std::vector<const char *> device_extensions;
+ vk::PhysicalDeviceFeatures device_features = device->physical_device.getFeatures();
+
+ VkPhysicalDeviceFeatures2 device_features2;
+ device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ device_features2.pNext = nullptr;
+ device_features2.features = (VkPhysicalDeviceFeatures)device_features;
+
+ VkPhysicalDeviceVulkan11Features vk11_features;
+ vk11_features.pNext = nullptr;
+ vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
+ device_features2.pNext = &vk11_features;
+
+ VkPhysicalDeviceVulkan12Features vk12_features;
+ vk12_features.pNext = nullptr;
+ vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
+ vk11_features.pNext = &vk12_features;
+
+ last_struct = (VkBaseOutStructure *)&vk12_features;
+
+ VkPhysicalDevicePipelineRobustnessFeaturesEXT pl_robustness_features;
+ pl_robustness_features.pNext = nullptr;
+ pl_robustness_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT;
+ pl_robustness_features.pipelineRobustness = VK_FALSE;
+
+ if (pipeline_robustness) {
+ last_struct->pNext = (VkBaseOutStructure *)&pl_robustness_features;
+ last_struct = (VkBaseOutStructure *)&pl_robustness_features;
+ device_extensions.push_back("VK_EXT_pipeline_robustness");
+ }
+
+ VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroup_size_control_features;
+ subgroup_size_control_features.pNext = nullptr;
+ subgroup_size_control_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
+ subgroup_size_control_features.computeFullSubgroups = false;
+ subgroup_size_control_features.subgroupSizeControl = false;
+
+ if (device->subgroup_size_control) {
+ last_struct->pNext = (VkBaseOutStructure *)&subgroup_size_control_features;
+ last_struct = (VkBaseOutStructure *)&subgroup_size_control_features;
+ }
+
+ VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features;
+ coopmat_features.pNext = nullptr;
+ coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR;
+ coopmat_features.cooperativeMatrix = VK_FALSE;
+
+ if (device->coopmat_support) {
+ last_struct->pNext = (VkBaseOutStructure *)&coopmat_features;
+ last_struct = (VkBaseOutStructure *)&coopmat_features;
+ }
+
+#if defined(VK_NV_cooperative_matrix2)
+ VkPhysicalDeviceCooperativeMatrix2FeaturesNV coopmat2_features {};
+ coopmat2_features.pNext = nullptr;
+ coopmat2_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_2_FEATURES_NV;
+ if (coopmat2_support) {
+ last_struct->pNext = (VkBaseOutStructure *)&coopmat2_features;
+ last_struct = (VkBaseOutStructure *)&coopmat2_features;
+ device_extensions.push_back("VK_NV_cooperative_matrix2");
+ }
+#endif
+
+ vkGetPhysicalDeviceFeatures2(device->physical_device, &device_features2);
+
+ device->fp16 = device->fp16 && vk12_features.shaderFloat16;
+
+ device->pipeline_robustness = pl_robustness_features.pipelineRobustness;
+
+ if (device->subgroup_size_control) {
+ device->subgroup_min_size = subgroup_size_control_props.minSubgroupSize;
+ device->subgroup_max_size = subgroup_size_control_props.maxSubgroupSize;
+ }
+
+ device->subgroup_size_control = device->subgroup_size_control &&
+ (subgroup_size_control_props.requiredSubgroupSizeStages & vk::ShaderStageFlagBits::eCompute) &&
+ subgroup_size_control_features.subgroupSizeControl;
+
+ if (device->subgroup_size_control) {
+ device->subgroup_require_full_support = subgroup_size_control_features.computeFullSubgroups;
+ device_extensions.push_back("VK_EXT_subgroup_size_control");
+ }
+
+ device->coopmat_support = device->coopmat_support && coopmat_features.cooperativeMatrix;
+
+ if (coopmat2_support) {
+#if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
+ if (coopmat2_features.cooperativeMatrixWorkgroupScope &&
+ coopmat2_features.cooperativeMatrixFlexibleDimensions &&
+ coopmat2_features.cooperativeMatrixReductions &&
+ coopmat2_features.cooperativeMatrixConversions &&
+ coopmat2_features.cooperativeMatrixPerElementOperations &&
+ coopmat2_features.cooperativeMatrixTensorAddressing &&
+ coopmat2_features.cooperativeMatrixBlockLoads &&
+ vk12_features.bufferDeviceAddress) {
+
+ std::vector<VkCooperativeMatrixFlexibleDimensionsPropertiesNV> flexible_dimensions;
+ uint32_t count = 0;
+
+ PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV
+ _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV =
+ (PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV)
+ vk_instance.instance.getProcAddr("vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV");
+
+ _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV(device->physical_device, &count, nullptr);
+
+ VkCooperativeMatrixFlexibleDimensionsPropertiesNV empty_prop {};
+ empty_prop.sType = VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_FLEXIBLE_DIMENSIONS_PROPERTIES_NV;
+ flexible_dimensions.resize(count, empty_prop);
+
+ _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV(device->physical_device, &count, flexible_dimensions.data());
+
+ bool found_fp16_128 = false,
+ found_fp16_256 = false,
+ found_fp32_128 = false,
+ found_fp32_256 = false;
+ // need to support fp16*fp16 with fp16/fp32 accumulator, for workgroupsize 128
+ // with 32x16x16 and 256 with 32x32x16.
+ for (auto &prop : flexible_dimensions) {
+ if (prop.saturatingAccumulation == VK_FALSE &&
+ prop.scope == VK_SCOPE_WORKGROUP_KHR &&
+ prop.AType == VK_COMPONENT_TYPE_FLOAT16_KHR &&
+ prop.BType == VK_COMPONENT_TYPE_FLOAT16_KHR) {
+
+ if (prop.workgroupInvocations == 128 &&
+ prop.MGranularity <= 32 &&
+ prop.NGranularity <= 16 &&
+ prop.KGranularity <= 16) {
+ if (prop.CType == VK_COMPONENT_TYPE_FLOAT16_KHR &&
+ prop.ResultType == VK_COMPONENT_TYPE_FLOAT16_KHR) {
+ found_fp16_128 = true;
+ }
+ if (prop.CType == VK_COMPONENT_TYPE_FLOAT32_KHR &&
+ prop.ResultType == VK_COMPONENT_TYPE_FLOAT32_KHR) {
+ found_fp32_128 = true;
+ }
+ }
+ if (prop.workgroupInvocations == 256 &&
+ prop.MGranularity <= 32 &&
+ prop.NGranularity <= 32 &&
+ prop.KGranularity <= 16) {
+ if (prop.CType == VK_COMPONENT_TYPE_FLOAT16_KHR &&
+ prop.ResultType == VK_COMPONENT_TYPE_FLOAT16_KHR) {
+ found_fp16_256 = true;
+ }
+ if (prop.CType == VK_COMPONENT_TYPE_FLOAT32_KHR &&
+ prop.ResultType == VK_COMPONENT_TYPE_FLOAT32_KHR) {
+ found_fp32_256 = true;
+ }
+ }
+ }
+ }
+ if (found_fp16_128 && found_fp16_256 &&
+ found_fp32_128 && found_fp32_256 &&
+ coopmat2_props.cooperativeMatrixFlexibleDimensionsMaxDimension >= 512) {
+ device->coopmat2 = true;
+ }
+ }
+#endif
+ }
+
+ if (!vk11_features.storageBuffer16BitAccess) {
+ std::cerr << "ggml_vulkan: device " << GGML_VK_NAME << idx << " does not support 16-bit storage." << std::endl;
+ throw std::runtime_error("Unsupported device");
+ }
+
+ device_extensions.push_back("VK_KHR_16bit_storage");
+
+#ifdef GGML_VULKAN_VALIDATE
+ device_extensions.push_back("VK_KHR_shader_non_semantic_info");
+#endif
+
+ if (device->fp16) {
+ device_extensions.push_back("VK_KHR_shader_float16_int8");
+ }
+
+ if (device->coopmat_support) {
+ // Query supported shapes
+ std::vector<VkCooperativeMatrixPropertiesKHR> cm_props;
+
+ PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR =
+ (PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR)vkGetInstanceProcAddr(vk_instance.instance, "vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR");
+
+ uint32_t cm_props_num;
+
+ pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR(device->physical_device, &cm_props_num, nullptr);
+
+ cm_props.resize(cm_props_num);
+
+ for (auto& prop : cm_props) {
+ prop.sType = VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_KHR;
+ }
+
+ pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR(device->physical_device, &cm_props_num, cm_props.data());
+
+ VK_LOG_DEBUG("ggml_vulkan: Cooperative Matrix Shapes: " << cm_props.size());
+
+ for (auto& prop : cm_props) {
+ VK_LOG_DEBUG("ggml_vulkan: M: " << prop.MSize << " N: " << prop.NSize << " K: " << prop.KSize << " A: " << vk::to_string((vk::ComponentTypeKHR)prop.AType) << " B: " << vk::to_string((vk::ComponentTypeKHR)prop.BType) << " C: " << vk::to_string((vk::ComponentTypeKHR)prop.CType) << " Result: " << vk::to_string((vk::ComponentTypeKHR)prop.ResultType) << " saturatingAccumulation: " << prop.saturatingAccumulation << " scope: " << vk::to_string((vk::ScopeKHR)prop.scope));
+
+ if ((vk::ComponentTypeKHR)prop.AType == vk::ComponentTypeKHR::eFloat16 &&
+ (vk::ComponentTypeKHR)prop.BType == vk::ComponentTypeKHR::eFloat16 &&
+ (vk::ScopeKHR)prop.scope == vk::ScopeKHR::eSubgroup
+ ) {
+ if ((vk::ComponentTypeKHR)prop.CType == vk::ComponentTypeKHR::eFloat32 &&
+ (vk::ComponentTypeKHR)prop.ResultType == vk::ComponentTypeKHR::eFloat32) {
+ // coopmat sizes not set yet
+ if (device->coopmat_m == 0) {
+ device->coopmat_acc_f32_support = true;
+ device->coopmat_m = prop.MSize;
+ device->coopmat_n = prop.NSize;
+ device->coopmat_k = prop.KSize;
+ } else if (device->coopmat_m == prop.MSize && device->coopmat_n == prop.NSize && device->coopmat_k == prop.KSize) {
+ // Only enable if shape is identical
+ device->coopmat_acc_f32_support = true;
+ }
+ } else if ((vk::ComponentTypeKHR)prop.CType == vk::ComponentTypeKHR::eFloat16 &&
+ (vk::ComponentTypeKHR)prop.ResultType == vk::ComponentTypeKHR::eFloat16) {
+ // coopmat sizes not set yet
+ if (device->coopmat_m == 0) {
+ device->coopmat_acc_f16_support = true;
+ device->coopmat_m = prop.MSize;
+ device->coopmat_n = prop.NSize;
+ device->coopmat_k = prop.KSize;
+ } else if (device->coopmat_m == prop.MSize && device->coopmat_n == prop.NSize && device->coopmat_k == prop.KSize) {
+ // Only enable if shape is identical
+ device->coopmat_acc_f16_support = true;
+ }
+ }
+ }
+ }
+
+ if (device->coopmat_m == 0 || !device->coopmat_acc_f32_support) {
+ // No suitable matmul mode found
+ GGML_LOG_DEBUG("ggml_vulkan: WARNING: No suitable matrix core mode found. Disabling matrix cores.\n");
+ device->coopmat_support = false;
+ }
+ }
+
+ if (device->coopmat_support) {
+ device_extensions.push_back("VK_KHR_cooperative_matrix");
+ }
+
+ device->name = GGML_VK_NAME + std::to_string(idx);
+
+ device_create_info = {
+ vk::DeviceCreateFlags(),
+ device_queue_create_infos,
+ {},
+ device_extensions
+ };
+ device_create_info.setPNext(&device_features2);
+ device->device = device->physical_device.createDevice(device_create_info);
+
+ // Queues
+ ggml_vk_create_queue(device, device->compute_queue, compute_queue_family_index, 0, { vk::PipelineStageFlagBits::eComputeShader | vk::PipelineStageFlagBits::eTransfer }, false);
+
+ // Shaders
+ // Disable matmul tile sizes early if performance low or not supported
+ switch (device->vendor_id) {
+#ifndef GGML_VULKAN_RUN_TESTS
+ case VK_VENDOR_ID_AMD:
+ case VK_VENDOR_ID_INTEL:
+ device->mul_mat_l = false;
+ device->mul_mat_m = true;
+ device->mul_mat_s = true;
+ device->mul_mat_id_l = false;
+ device->mul_mat_id_m = true;
+ device->mul_mat_id_s = true;
+ break;
+ case VK_VENDOR_ID_APPLE:
+ device->mul_mat_l = false;
+ device->mul_mat_m = true;
+ device->mul_mat_s = false;
+ device->mul_mat_id_l = false;
+ device->mul_mat_id_m = true;
+ device->mul_mat_id_s = false;
+ break;
+#endif
+ default:
+ device->mul_mat_l = true;
+ device->mul_mat_m = true;
+ device->mul_mat_s = true;
+ device->mul_mat_id_l = true;
+ device->mul_mat_id_m = true;
+ device->mul_mat_id_s = true;
+ break;
+ }
+
+ ggml_vk_load_shaders(device);
+
+ if (!device->single_queue) {
+ const uint32_t transfer_queue_index = compute_queue_family_index == transfer_queue_family_index ? 1 : 0;
+ ggml_vk_create_queue(device, device->transfer_queue, transfer_queue_family_index, transfer_queue_index, { vk::PipelineStageFlagBits::eTransfer }, true);
+ } else {
+ // TODO: Use pointer or reference to avoid copy
+ device->transfer_queue = device->compute_queue;
+ }
+
+ device->buffer_type = {
+ /* .iface = */ ggml_backend_vk_buffer_type_interface,
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), idx),
+ /* .context = */ new ggml_backend_vk_buffer_type_context{ device->name, device },
+ };
+
+ device->fence = device->device.createFence({});
+
+ device->idx = idx;
+
+ return device;
+ }
+
+ return vk_instance.devices[idx];
+}
+
+static void ggml_vk_print_gpu_info(size_t idx) {
+ GGML_ASSERT(idx < vk_instance.device_indices.size());
+ size_t dev_num = vk_instance.device_indices[idx];
+ VK_LOG_DEBUG("ggml_vk_print_gpu_info(" << dev_num << ")");
+ GGML_ASSERT(vk_instance_initialized);
+
+ std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
+
+ if (dev_num >= devices.size()) {
+ std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
+ throw std::runtime_error("Device not found");
+ }
+
+ vk::PhysicalDevice physical_device = devices[dev_num];
+ std::vector<vk::ExtensionProperties> ext_props = physical_device.enumerateDeviceExtensionProperties();
+
+ vk::PhysicalDeviceProperties2 props2;
+ vk::PhysicalDeviceMaintenance3Properties props3;
+ vk::PhysicalDeviceSubgroupProperties subgroup_props;
+ vk::PhysicalDeviceDriverProperties driver_props;
+ props2.pNext = &props3;
+ props3.pNext = &subgroup_props;
+ subgroup_props.pNext = &driver_props;
+ physical_device.getProperties2(&props2);
+
+ const size_t subgroup_size = subgroup_props.subgroupSize;
+ const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
+
+ bool fp16_storage = false;
+ bool fp16_compute = false;
+ bool coopmat_support = false;
+ bool coopmat2_support = false;
+
+ for (auto properties : ext_props) {
+ if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
+ fp16_storage = true;
+ } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
+ fp16_compute = true;
+ } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 &&
+ !getenv("GGML_VK_DISABLE_COOPMAT")) {
+ coopmat_support = true;
+#if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
+ } else if (strcmp("VK_NV_cooperative_matrix2", properties.extensionName) == 0 &&
+ !getenv("GGML_VK_DISABLE_COOPMAT2")) {
+ coopmat2_support = true;
+#endif
+ }
+ }
+
+ if (!ggml_vk_khr_cooperative_matrix_support(props2.properties, driver_props)) {
+ coopmat_support = false;
+ }
+
+ const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16");
+ bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr;
+
+ bool fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
+
+ vk::PhysicalDeviceFeatures device_features = physical_device.getFeatures();
+
+ VkPhysicalDeviceFeatures2 device_features2;
+ device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ device_features2.pNext = nullptr;
+ device_features2.features = (VkPhysicalDeviceFeatures)device_features;
+
+ VkPhysicalDeviceVulkan11Features vk11_features;
+ vk11_features.pNext = nullptr;
+ vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
+ device_features2.pNext = &vk11_features;
+
+ VkPhysicalDeviceVulkan12Features vk12_features;
+ vk12_features.pNext = nullptr;
+ vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
+ vk11_features.pNext = &vk12_features;
+
+ // Pointer to the last chain element
+ VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&vk12_features;
+
+ VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features;
+ coopmat_features.pNext = nullptr;
+ coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR;
+ coopmat_features.cooperativeMatrix = VK_FALSE;
+
+ if (coopmat_support) {
+ last_struct->pNext = (VkBaseOutStructure *)&coopmat_features;
+ last_struct = (VkBaseOutStructure *)&coopmat_features;
+ }
+
+ vkGetPhysicalDeviceFeatures2(physical_device, &device_features2);
+
+ fp16 = fp16 && vk12_features.shaderFloat16;
+
+ coopmat_support = coopmat_support && coopmat_features.cooperativeMatrix;
+
+ std::string matrix_cores = coopmat2_support ? "NV_coopmat2" : coopmat_support ? "KHR_coopmat" : "none";
+
+ std::string device_name = props2.properties.deviceName.data();
+ GGML_LOG_DEBUG("ggml_vulkan: %zu = %s (%s) | uma: %d | fp16: %d | warp size: %zu | matrix cores: %s\n",
+ idx, device_name.c_str(), driver_props.driverName.data(), uma, fp16, subgroup_size, matrix_cores.c_str());
+
+ if (props2.properties.deviceType == vk::PhysicalDeviceType::eCpu) {
+ GGML_LOG_DEBUG("ggml_vulkan: Warning: Device type is CPU. This is probably not the device you want.\n");
+ }
+}
+
+static bool ggml_vk_instance_validation_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
+static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
+
+void ggml_vk_instance_init() {
+ if (vk_instance_initialized) {
+ return;
+ }
+ VK_LOG_DEBUG("ggml_vk_instance_init()");
+
+ vk_instance_initialized = true;
+
+ vk::ApplicationInfo app_info{ "ggml-vulkan", 1, nullptr, 0, VK_API_VERSION };
+
+ const std::vector<vk::ExtensionProperties> instance_extensions = vk::enumerateInstanceExtensionProperties();
+ const bool validation_ext = ggml_vk_instance_validation_ext_available(instance_extensions);
+#ifdef __APPLE__
+ const bool portability_enumeration_ext = ggml_vk_instance_portability_enumeration_ext_available(instance_extensions);
+#endif
+
+ std::vector<const char*> layers;
+
+ if (validation_ext) {
+ layers.push_back("VK_LAYER_KHRONOS_validation");
+ }
+ std::vector<const char*> extensions;
+ if (validation_ext) {
+ extensions.push_back("VK_EXT_validation_features");
+ }
+#ifdef __APPLE__
+ if (portability_enumeration_ext) {
+ extensions.push_back("VK_KHR_portability_enumeration");
+ }
+#endif
+ vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags{}, &app_info, layers, extensions);
+#ifdef __APPLE__
+ if (portability_enumeration_ext) {
+ instance_create_info.flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
+ }
+#endif
+
+ std::vector<vk::ValidationFeatureEnableEXT> features_enable;
+ vk::ValidationFeaturesEXT validation_features;
+
+ if (validation_ext) {
+ features_enable = { vk::ValidationFeatureEnableEXT::eBestPractices };
+ validation_features = {
+ features_enable,
+ {},
+ };
+ validation_features.setPNext(nullptr);
+ instance_create_info.setPNext(&validation_features);
+ GGML_LOG_DEBUG("ggml_vulkan: Validation layers enabled\n");
+ }
+ vk_instance.instance = vk::createInstance(instance_create_info);
+
+ size_t num_available_devices = vk_instance.instance.enumeratePhysicalDevices().size();
+
+ // Emulate behavior of CUDA_VISIBLE_DEVICES for Vulkan
+ char * devices_env = getenv("GGML_VK_VISIBLE_DEVICES");
+ if (devices_env != nullptr) {
+ std::string devices(devices_env);
+ std::replace(devices.begin(), devices.end(), ',', ' ');
+
+ std::stringstream ss(devices);
+ size_t tmp;
+ while (ss >> tmp) {
+ if(tmp >= num_available_devices) {
+ std::cerr << "ggml_vulkan: Invalid device index " << tmp << " in GGML_VK_VISIBLE_DEVICES." << std::endl;
+ throw std::runtime_error("Invalid Vulkan device index");
+ }
+ vk_instance.device_indices.push_back(tmp);
+ }
+ } else {
+ std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
+
+ // Make sure at least one device exists
+ if (devices.empty()) {
+ std::cerr << "ggml_vulkan: Error: No devices found." << std::endl;
+ GGML_ABORT("fatal error");
+ }
+
+ // Default to using all dedicated GPUs
+ for (size_t i = 0; i < devices.size(); i++) {
+ vk::PhysicalDeviceProperties2 new_props;
+ vk::PhysicalDeviceDriverProperties new_driver;
+ vk::PhysicalDeviceIDProperties new_id;
+ new_props.pNext = &new_driver;
+ new_driver.pNext = &new_id;
+ devices[i].getProperties2(&new_props);
+
+ if (new_props.properties.deviceType == vk::PhysicalDeviceType::eDiscreteGpu) {
+ // Check if there are two physical devices corresponding to the same GPU
+ auto old_device = std::find_if(
+ vk_instance.device_indices.begin(),
+ vk_instance.device_indices.end(),
+ [&devices, &new_id](const size_t k){
+ vk::PhysicalDeviceProperties2 old_props;
+ vk::PhysicalDeviceIDProperties old_id;
+ old_props.pNext = &old_id;
+ devices[k].getProperties2(&old_props);
+ return std::equal(std::begin(old_id.deviceUUID), std::end(old_id.deviceUUID), std::begin(new_id.deviceUUID));
+ }
+ );
+ if (old_device == vk_instance.device_indices.end()) {
+ vk_instance.device_indices.push_back(i);
+ } else {
+ // There can be two physical devices corresponding to the same GPU if there are 2 different drivers
+ // This can cause error when splitting layers aross the devices, need to keep only 1
+ VK_LOG_DEBUG("Device " << i << " and device " << *old_device << " have the same deviceUUID");
+
+ vk::PhysicalDeviceProperties2 old_props;
+ vk::PhysicalDeviceDriverProperties old_driver;
+ old_props.pNext = &old_driver;
+ devices[*old_device].getProperties2(&old_props);
+
+ std::map<vk::DriverId, int> driver_priorities {};
+ int old_priority = std::numeric_limits<int>::max();
+ int new_priority = std::numeric_limits<int>::max();
+
+ // Check https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkDriverId.html for the list of driver id
+ // Smaller number -> higher priority
+ switch (old_props.properties.vendorID) {
+ case VK_VENDOR_ID_AMD:
+ driver_priorities[vk::DriverId::eMesaRadv] = 1;
+ driver_priorities[vk::DriverId::eAmdOpenSource] = 2;
+ driver_priorities[vk::DriverId::eAmdProprietary] = 3;
+ break;
+ case VK_VENDOR_ID_INTEL:
+ driver_priorities[vk::DriverId::eIntelOpenSourceMESA] = 1;
+ driver_priorities[vk::DriverId::eIntelProprietaryWindows] = 2;
+ break;
+ case VK_VENDOR_ID_NVIDIA:
+ driver_priorities[vk::DriverId::eNvidiaProprietary] = 1;
+#if defined(VK_API_VERSION_1_3) && VK_HEADER_VERSION >= 235
+ driver_priorities[vk::DriverId::eMesaNvk] = 2;
+#endif
+ break;
+ }
+
+ if (driver_priorities.count(old_driver.driverID)) {
+ old_priority = driver_priorities[old_driver.driverID];
+ }
+ if (driver_priorities.count(new_driver.driverID)) {
+ new_priority = driver_priorities[new_driver.driverID];
+ }
+
+ if (new_priority < old_priority) {
+ auto r = std::remove(vk_instance.device_indices.begin(), vk_instance.device_indices.end(), *old_device);
+ vk_instance.device_indices.erase(r, vk_instance.device_indices.end());
+ vk_instance.device_indices.push_back(i);
+
+ VK_LOG_DEBUG("Prioritize device " << i << " driver " << new_driver.driverName << " over device " << *old_device << " driver " << old_driver.driverName);
+ }
+ else {
+ VK_LOG_DEBUG("Prioritize device " << *old_device << " driver " << old_driver.driverName << " over device " << i << " driver " << new_driver.driverName << std::endl);
+ }
+ }
+ }
+ }
+
+ // If no dedicated GPUs found, fall back to GPU 0
+ if (vk_instance.device_indices.empty()) {
+ vk_instance.device_indices.push_back(0);
+ }
+ }
+ GGML_LOG_DEBUG("ggml_vulkan: Found %zu Vulkan devices:\n", vk_instance.device_indices.size());
+
+ for (size_t i = 0; i < vk_instance.device_indices.size(); i++) {
+ ggml_vk_print_gpu_info(i);
+ }
+}
+
+static void ggml_vk_init(ggml_backend_vk_context * ctx, size_t idx) {
+ VK_LOG_DEBUG("ggml_vk_init(" << ctx->name << ", " << idx << ")");
+ ggml_vk_instance_init();
+ GGML_ASSERT(idx < vk_instance.device_indices.size());
+
+ ctx->name = GGML_VK_NAME + std::to_string(idx);
+
+ ctx->device = ggml_vk_get_device(idx);
+
+ ctx->semaphore_idx = 0;
+ ctx->event_idx = 0;
+
+ ctx->prealloc_size_x = 0;
+ ctx->prealloc_size_y = 0;
+ ctx->prealloc_size_split_k = 0;
+
+ ctx->fence = ctx->device->device.createFence({});
+
+#ifdef GGML_VULKAN_CHECK_RESULTS
+ const char* skip_checks = getenv("GGML_VULKAN_SKIP_CHECKS");
+ vk_skip_checks = (skip_checks == NULL ? 0 : atoi(skip_checks));
+ const char* output_tensor = getenv("GGML_VULKAN_OUTPUT_TENSOR");
+ vk_output_tensor = (output_tensor == NULL ? 0 : atoi(output_tensor));
+#endif
+}
+
+static vk_pipeline ggml_vk_get_to_fp16(ggml_backend_vk_context * ctx, ggml_type type) {
+ VK_LOG_DEBUG("ggml_vk_get_to_fp16()");
+ switch (type) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q3_K:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_Q6_K:
+ case GGML_TYPE_IQ4_NL:
+ break;
+ default:
+ return nullptr;
+ }
+
+ return ctx->device->pipeline_dequant[type];
+}
+
+static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type, ggml_prec prec) {
+ VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_pipeline(" << ggml_type_name(src0_type) << ", " << ggml_type_name(src1_type) << ")");
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_matmul_f32;
+ }
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_matmul_f32_f16;
+ }
+ if (prec == GGML_PREC_DEFAULT && ctx->device->fp16 && !(ctx->device->coopmat_support && !ctx->device->coopmat_acc_f16_support)) {
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_matmul_f16_f32.f16acc;
+ }
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_matmul_f16.f16acc;
+ }
+ } else {
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_matmul_f16_f32.f32acc;
+ }
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_matmul_f16.f32acc;
+ }
+ }
+
+ if (src1_type != GGML_TYPE_F32 && !ctx->device->coopmat2) {
+ return nullptr;
+ }
+
+ switch (src0_type) {
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q3_K:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_Q6_K:
+ case GGML_TYPE_IQ4_NL:
+ break;
+ default:
+ return nullptr;
+ }
+
+ if (ctx->device->coopmat2) {
+ assert(src1_type == GGML_TYPE_F16);
+ return ctx->device->pipeline_dequant_mul_mat_mat_f16[src0_type].f16acc;
+ }
+ return ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f32acc;
+}
+
+static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type, uint32_t num_cols) {
+ VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()");
+ GGML_ASSERT(b_type == GGML_TYPE_F32 || b_type == GGML_TYPE_F16);
+ GGML_ASSERT(num_cols >= 1 && num_cols <= mul_mat_vec_max_cols);
+
+ switch (a_type) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_F16:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q3_K:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_Q6_K:
+ case GGML_TYPE_IQ4_NL:
+ break;
+ default:
+ return nullptr;
+ }
+
+ return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[a_type][num_cols-1] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[a_type][num_cols-1];
+}
+
+static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type, ggml_prec prec) {
+ VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_id_pipeline()");
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_matmul_id_f32;
+ }
+ if (prec == GGML_PREC_DEFAULT && ctx->device->fp16 && !(ctx->device->coopmat_support && !ctx->device->coopmat_acc_f16_support)) {
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_matmul_id_f16_f32.f16acc;
+ }
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_matmul_id_f16.f16acc;
+ }
+ } else {
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_matmul_id_f16_f32.f32acc;
+ }
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_matmul_id_f16.f32acc;
+ }
+ }
+
+ GGML_ASSERT(src1_type == GGML_TYPE_F32);
+
+ switch (src0_type) {
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q3_K:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_Q6_K:
+ case GGML_TYPE_IQ4_NL:
+ break;
+ default:
+ return nullptr;
+ }
+
+ return ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat_id[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat_id[src0_type].f32acc;
+}
+
+static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec_id(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) {
+ VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()");
+ GGML_ASSERT(b_type == GGML_TYPE_F32);
+
+ switch (a_type) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_F16:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q3_K:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_Q6_K:
+ case GGML_TYPE_IQ4_NL:
+ break;
+ default:
+ return nullptr;
+ }
+
+ return ctx->device->pipeline_dequant_mul_mat_vec_id_f32[a_type];
+}
+
+static vk_buffer ggml_vk_pool_malloc(ggml_backend_vk_context * ctx, size_t size) {
+ VK_LOG_DEBUG("ggml_vk_pool_malloc(" << size << ")");
+ VK_LOG_MEMORY("ggml_vk_pool_malloc");
+
+ int best_i = -1;
+ size_t best_size = std::numeric_limits<size_t>::max(); //smallest unused buffer that fits our needs
+ int worst_i = -1;
+ size_t worst_size = 0; //largest unused buffer seen so far
+ for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
+ vk_buffer &b = ctx->buffer_pool[i];
+ if (b != nullptr && b->size >= size && b->size < best_size) {
+ best_i = i;
+ best_size = b->size;
+ }
+ if (b != nullptr && b->size > worst_size) {
+ worst_i = i;
+ worst_size = b->size;
+ }
+ }
+ if(best_i != -1) {
+ //found the smallest buffer that fits our needs
+ vk_buffer b = ctx->buffer_pool[best_i];
+ ctx->buffer_pool[best_i].reset();
+ return b;
+ }
+ if(worst_i != -1) {
+ //no buffer that fits our needs, resize largest one to save memory
+ vk_buffer& b = ctx->buffer_pool[worst_i];
+ ggml_vk_destroy_buffer(b);
+ }
+
+ return ggml_vk_create_buffer_device(ctx->device, size);
+}
+
+static void ggml_vk_pool_free(ggml_backend_vk_context * ctx, vk_buffer& buffer) {
+ VK_LOG_DEBUG("ggml_vk_pool_free(" << buffer->size << ")");
+ for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
+ vk_buffer& b = ctx->buffer_pool[i];
+ if (b == nullptr) {
+ b = buffer;
+ return;
+ }
+ }
+ std::cerr << "ggml_vulkan: WARNING: vk buffer pool full, increase MAX_VK_BUFFERS" << std::endl;
+ ggml_vk_destroy_buffer(buffer);
+}
+
+// Returns an available temporary buffer that may only be used temporarily, it will be reused
+static vk_buffer ggml_vk_create_buffer_temp(ggml_backend_vk_context * ctx, size_t size) {
+ // Try to find existing temp buffer with enough capacity
+ for (auto& buffer : ctx->gc.temp_buffers) {
+ if (buffer->size >= size) {
+ return buffer;
+ }
+ }
+
+ VK_LOG_MEMORY("ggml_vk_create_buffer_temp(" << size << ")");
+
+ // Otherwise create new buffer
+ vk_buffer buf = ggml_vk_pool_malloc(ctx, size);
+ ctx->gc.temp_buffers.push_back(buf);
+
+ return buf;
+}
+
+static void * ggml_vk_host_malloc(vk_device& device, size_t size) {
+ VK_LOG_MEMORY("ggml_vk_host_malloc(" << size << ")");
+ vk_buffer buf = ggml_vk_create_buffer(device, size,
+ vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
+ vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
+
+ if(!(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible)) {
+ fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory\n",
+ size/1024.0/1024.0);
+ device->device.freeMemory(buf->device_memory);
+ device->device.destroyBuffer(buf->buffer);
+ return nullptr;
+ }
+
+ device->pinned_memory.push_back(std::make_tuple(buf->ptr, size, buf));
+
+ return buf->ptr;
+}
+
+static void ggml_vk_host_free(vk_device& device, void* ptr) {
+ if (ptr == nullptr) {
+ return;
+ }
+ VK_LOG_MEMORY("ggml_vk_host_free(" << ptr << ")");
+ vk_buffer buf;
+ size_t index;
+ for (size_t i = 0; i < device->pinned_memory.size(); i++) {
+ const uint8_t* addr = (const uint8_t*) std::get<0>(device->pinned_memory[i]);
+ const uint8_t* endr = addr + std::get<1>(device->pinned_memory[i]);
+ if (ptr >= addr && ptr < endr) {
+ buf = std::get<2>(device->pinned_memory[i]);
+ index = i;
+ break;
+ }
+ }
+ if (buf == nullptr) {
+ fprintf(stderr, "WARNING: failed to free pinned memory: memory not in map\n");
+ return;
+ }
+
+ ggml_vk_destroy_buffer(buf);
+
+ device->pinned_memory.erase(device->pinned_memory.begin() + index);
+}
+
+static void ggml_vk_host_get(vk_device& device, const void * ptr, vk_buffer& buf, size_t& buf_offset) {
+ buf = nullptr;
+ buf_offset = 0;
+ for (size_t i = 0; i < device->pinned_memory.size(); i++) {
+ const uint8_t* addr = (const uint8_t*) std::get<0>(device->pinned_memory[i]);
+ const uint8_t* endr = addr + std::get<1>(device->pinned_memory[i]);
+ if (ptr >= addr && ptr < endr) {
+ buf = std::get<2>(device->pinned_memory[i]);
+ buf_offset = ((const uint8_t *)ptr) - addr;
+ break;
+ }
+ }
+}
+
+static vk_submission ggml_vk_begin_submission(vk_device& device, vk_queue& q, bool one_time = true) {
+ vk_submission s;
+ s.buffer = ggml_vk_create_cmd_buffer(device, q);
+ if (one_time) {
+ s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
+ } else {
+ s.buffer.begin({ vk::CommandBufferUsageFlags{} });
+ }
+
+ return s;
+}
+
+
+
+static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context* ctx, vk_context& subctx, vk_pipeline& pipeline, std::initializer_list<vk::DescriptorBufferInfo> const& descriptor_buffer_infos, size_t push_constant_size, const void* push_constants, std::array<uint32_t, 3> elements) {
+ const uint32_t wg0 = CEIL_DIV(elements[0], pipeline->wg_denoms[0]);
+ const uint32_t wg1 = CEIL_DIV(elements[1], pipeline->wg_denoms[1]);
+ const uint32_t wg2 = CEIL_DIV(elements[2], pipeline->wg_denoms[2]);
+ VK_LOG_DEBUG("ggml_vk_dispatch_pipeline(" << pipeline->name << ", {";
+ for (auto& buffer : descriptor_buffer_infos) {
+ std::cerr << "(" << buffer.buffer << ", " << buffer.offset << ", " << buffer.range << "), ";
+ }
+ std::cerr << "}, (" << wg0 << "," << wg1 << "," << wg2 << "))");
+ GGML_ASSERT(pipeline->descriptor_set_idx < pipeline->descriptor_sets.size());
+ GGML_ASSERT(descriptor_buffer_infos.size() == pipeline->parameter_count);
+
+ vk::DescriptorSet& descriptor_set = pipeline->descriptor_sets[pipeline->descriptor_set_idx++];
+ vk::WriteDescriptorSet write_descriptor_set{ descriptor_set, 0, 0, pipeline->parameter_count, vk::DescriptorType::eStorageBuffer, nullptr, descriptor_buffer_infos.begin() };
+ ctx->device->device.updateDescriptorSets({ write_descriptor_set }, {});
+
+ subctx->s->buffer.pushConstants(pipeline->layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size, push_constants);
+ subctx->s->buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->pipeline);
+ subctx->s->buffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute,
+ pipeline->layout,
+ 0,
+ { descriptor_set },
+ {});
+ subctx->s->buffer.dispatch(wg0, wg1, wg2);
+}
+
+static void ggml_vk_end_submission(vk_submission& s, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
+ s.buffer.end();
+
+ s.wait_semaphores = std::move(wait_semaphores);
+ s.signal_semaphores = std::move(signal_semaphores);
+}
+
+static void ggml_vk_ctx_end(vk_context& ctx) {
+ VK_LOG_DEBUG("ggml_vk_ctx_end(" << ctx << ", " << ctx->seqs.size() << ")");
+ if (ctx->s == nullptr) {
+ return;
+ }
+
+ ctx->s->buffer.end();
+ ctx->s = nullptr;
+}
+
+static void ggml_vk_ctx_begin(vk_device& device, vk_context& subctx) {
+ VK_LOG_DEBUG("ggml_vk_ctx_begin(" << device->name << ")");
+ if (subctx->s != nullptr) {
+ ggml_vk_ctx_end(subctx);
+ }
+
+ subctx->seqs.push_back({ ggml_vk_begin_submission(device, *subctx->q) });
+ subctx->s = subctx->seqs[subctx->seqs.size() - 1].data();
+}
+
+static size_t ggml_vk_align_size(size_t width, size_t align) {
+ VK_LOG_DEBUG("ggml_vk_align_size(" << width << ", " << align << ")");
+ return CEIL_DIV(width, align) * align;
+}
+
+static void deferred_memcpy(void * dst, const void * src, size_t size, std::vector<vk_staging_memcpy>* memcpys = nullptr) {
+ if (memcpys == nullptr) {
+ memcpy(dst, src, size);
+ } else {
+ memcpys->emplace_back(dst, src, size);
+ }
+}
+
+static void ggml_vk_ensure_sync_staging_buffer(vk_device& device, size_t size) {
+ if (device->sync_staging == nullptr || device->sync_staging->size < size) {
+ VK_LOG_MEMORY("ggml_vk_ensure_sync_staging_buffer(" << size << ")");
+ ggml_vk_destroy_buffer(device->sync_staging);
+ device->sync_staging = ggml_vk_create_buffer_check(device, size,
+ vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
+ vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
+ }
+}
+
+static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_context& subctx, vk_buffer& dst, size_t offset, const ggml_tensor * tensor, bool sync_staging = false) {
+ VK_LOG_DEBUG("ggml_vk_buffer_write_nc_async(" << tensor << ")");
+ GGML_ASSERT(!ggml_is_contiguous(tensor));
+ // Buffer is already mapped
+ if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
+ std::cerr << "ggml_vulkan: buffer_write_nc_async dst buffer is host_visible. Use synchronous write." << std::endl;
+ GGML_ABORT("fatal error");
+ }
+ // Check if src is pinned memory
+ vk_buffer buf = nullptr;
+ size_t buf_offset = 0;
+ ggml_vk_host_get(ctx->device, tensor->data, buf, buf_offset);
+
+ const uint64_t ne0 = tensor->ne[0];
+ const uint64_t ne1 = tensor->ne[1];
+ const uint64_t ne2 = tensor->ne[2];
+ const uint64_t ne3 = tensor->ne[3];
+ const uint64_t nb0 = tensor->nb[0];
+ const uint64_t nb1 = tensor->nb[1];
+ const uint64_t nb2 = tensor->nb[2];
+ const uint64_t nb3 = tensor->nb[3];
+ const ggml_type type = tensor->type;
+ const uint64_t ts = ggml_type_size(type);
+ const uint64_t bs = ggml_blck_size(type);
+
+ const uint64_t dstnb0 = ts;
+ const uint64_t dstnb1 = dstnb0*(ne0/bs);
+ const uint64_t dstnb2 = dstnb1*ne1;
+ const uint64_t dstnb3 = dstnb2*ne2;
+
+ const uint64_t ne = ggml_nelements(tensor);
+
+ if (buf != nullptr) {
+ // Memory is pinned, use as staging buffer
+ std::vector<vk::BufferCopy> slices;
+
+ for (uint64_t i3 = 0; i3 < ne3; i3++) {
+ for (uint64_t i2 = 0; i2 < ne2; i2++) {
+ // Find longest contiguous slice
+ if (ne1*nb1 == dstnb2) {
+ slices.push_back({ buf_offset + i3*nb3 + i2*nb2, offset + i3*dstnb3 + i2*dstnb2, dstnb2 });
+ } else {
+ for (uint64_t i1 = 0; i1 < ne1; i1++) {
+ if (ne0*nb0/bs == dstnb1) {
+ slices.push_back({ buf_offset + i3*nb3 + i2*nb2 + i1*nb1, offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, dstnb1 });
+ } else {
+ const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
+ const uint64_t d_off = offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
+ for (uint64_t i0 = 0; i0 < ne0; i0++) {
+ slices.push_back({ s_off + i1*nb0, d_off + i0*dstnb0, dstnb0 });
+ }
+ }
+ }
+ }
+ }
+ }
+
+ ggml_vk_sync_buffers(subctx);
+ subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
+ return;
+ }
+
+ if (!sync_staging) {
+ GGML_ABORT("Asynchronous write to non-pinned memory not supported");
+ }
+
+ // Staging buffer required
+ vk_buffer& staging = ctx->device->sync_staging;
+ const uint64_t copy_size = ts*ne/bs;
+ ggml_vk_ensure_sync_staging_buffer(ctx->device, copy_size);
+ VkBufferCopy buf_copy{ 0, offset, copy_size };
+
+ ggml_vk_sync_buffers(subctx);
+ vkCmdCopyBuffer(subctx->s->buffer, (VkBuffer)staging->buffer, (VkBuffer)dst->buffer, 1, &buf_copy);
+
+ for (uint64_t i3 = 0; i3 < ne3; i3++) {
+ for (uint64_t i2 = 0; i2 < ne2; i2++) {
+ // Find longest contiguous slice
+ if (ne1*nb1 == dstnb2) {
+ deferred_memcpy((uint8_t *)staging->ptr + i3*dstnb3 + i2*dstnb2, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2, dstnb2, &subctx->in_memcpys);
+ } else {
+ for (uint64_t i1 = 0; i1 < ne1; i1++) {
+ if (ne0*nb0/bs == dstnb1) {
+ deferred_memcpy((uint8_t *)staging->ptr + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2 + i1*nb1, dstnb1, &subctx->in_memcpys);
+ } else {
+ const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
+ const uint64_t d_off = i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
+ for (uint64_t i0 = 0; i0 < ne0; i0++) {
+ deferred_memcpy((uint8_t *)staging->ptr + d_off + i0*dstnb0, (const uint8_t *) tensor->data + s_off + i0*nb0, dstnb0, &subctx->in_memcpys);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void ggml_vk_buffer_write_2d_async(vk_context subctx, vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height, bool sync_staging = false) {
+ VK_LOG_DEBUG("ggml_vk_buffer_write_2d_async(" << width << ", " << height << ")");
+ // Buffer is already mapped
+ if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
+ std::cerr << "ggml_vulkan: buffer_write_async dst buffer is host_visible. Use synchronous write." << std::endl;
+ GGML_ABORT("fatal error");
+ }
+ // Check if src is pinned memory
+ vk_buffer buf = nullptr;
+ size_t buf_offset = 0;
+ ggml_vk_host_get(dst->device, src, buf, buf_offset);
+
+ if (buf != nullptr) {
+ // Memory is pinned, use as staging buffer
+ std::vector<vk::BufferCopy> slices(1);
+ if (width == spitch) {
+ // Only do single write if stride is equal
+ slices[0].srcOffset = buf_offset;
+ slices[0].dstOffset = offset;
+ slices[0].size = width * height;
+ } else {
+ slices.resize(height);
+ for (size_t i = 0; i < height; i++) {
+ slices[i].srcOffset = buf_offset + i * spitch;
+ slices[i].dstOffset = offset + i * width;
+ slices[i].size = width;
+ }
+ }
+
+ ggml_vk_sync_buffers(subctx);
+ subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
+ return;
+ }
+ VK_LOG_DEBUG("STAGING");
+
+ if (!sync_staging) {
+ GGML_ABORT("Asynchronous write to non-pinned memory not supported");
+ }
+
+ // Staging buffer required
+ const size_t copy_size = width*height;
+ ggml_vk_ensure_sync_staging_buffer(dst->device, copy_size);
+
+ vk_buffer& staging_buffer = dst->device->sync_staging;
+
+ VkBufferCopy buf_copy = {
+ 0,
+ offset,
+ copy_size};
+
+ ggml_vk_sync_buffers(subctx);
+ vkCmdCopyBuffer(subctx->s->buffer, (VkBuffer)staging_buffer->buffer, (VkBuffer)dst->buffer, 1, &buf_copy);
+
+ if (width == spitch) {
+ deferred_memcpy((uint8_t *)staging_buffer->ptr, src, width * height, &subctx->in_memcpys);
+ } else {
+ for (size_t i = 0; i < height; i++) {
+ deferred_memcpy((uint8_t *)staging_buffer->ptr + i * width, (const uint8_t *) src + i * spitch, width, &subctx->in_memcpys);
+ }
+ }
+}
+
+static void ggml_vk_buffer_write_async(vk_context subctx, vk_buffer& dst, size_t offset, const void * src, size_t size, bool sync_staging = false) {
+ VK_LOG_DEBUG("ggml_vk_buffer_write_async(" << size << ")");
+ return ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, size, size, 1, sync_staging);
+}
+
+static void ggml_vk_buffer_write_2d(vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height) {
+ VK_LOG_DEBUG("ggml_vk_buffer_write_2d(" << width << ", " << height << ")");
+ // Buffer is already mapped
+ if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
+ GGML_ASSERT(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
+
+ for (size_t i = 0; i < height; i++) {
+ memcpy((uint8_t *)dst->ptr + offset + i * width, (const uint8_t *) src + i * spitch, width);
+ }
+ } else {
+ vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue);
+ ggml_vk_ctx_begin(dst->device, subctx);
+ ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, spitch, width, height, true);
+ ggml_vk_ctx_end(subctx);
+
+ for (auto& cpy : subctx->in_memcpys) {
+ memcpy(cpy.dst, cpy.src, cpy.n);
+ }
+
+ ggml_vk_submit(subctx, dst->device->fence);
+ VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_buffer_write_2d waitForFences");
+ dst->device->device.resetFences({ dst->device->fence });
+ }
+}
+
+static void ggml_vk_buffer_write(vk_buffer& dst, size_t offset, const void * src, size_t size) {
+ VK_LOG_DEBUG("ggml_vk_buffer_write(" << size << ")");
+ ggml_vk_buffer_write_2d(dst, offset, src, 0, size, 1);
+}
+
+static void ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t spitch, size_t dpitch, size_t width, size_t height, bool sync_staging = false) {
+ VK_LOG_DEBUG("ggml_vk_buffer_read_2d_async(offset=" << offset << ", width=" << width << ", height=" << height << ")");
+ GGML_ASSERT(width > 0);
+ GGML_ASSERT(height > 0);
+ GGML_ASSERT(src != nullptr);
+
+ // TODO: staging_offset is not used
+
+ // Check if dst is pinned memory
+ vk_buffer buf = nullptr;
+ size_t buf_offset = 0;
+ ggml_vk_host_get(src->device, dst, buf, buf_offset);
+
+ std::vector<vk::BufferCopy> slices(1);
+ if (width == spitch && width == dpitch) {
+ // Only do single write if stride is equal
+ slices[0].srcOffset = offset;
+ slices[0].dstOffset = buf_offset;
+ slices[0].size = width * height;
+ } else {
+ slices.resize(height);
+ for (size_t i = 0; i < height; i++) {
+ slices[i].srcOffset = offset + i * spitch;
+ slices[i].dstOffset = buf_offset + i * dpitch;
+ slices[i].size = width;
+ }
+ }
+
+ if (buf != nullptr) {
+ // Memory is pinned, use as staging buffer
+ ggml_vk_sync_buffers(subctx);
+ subctx->s->buffer.copyBuffer(src->buffer, buf->buffer, slices);
+
+ return;
+ }
+ VK_LOG_DEBUG("STAGING");
+
+ if (!sync_staging) {
+ GGML_ABORT("Asynchronous read from non-pinned memory not supported");
+ }
+
+ // Fall back to staging buffer
+ const size_t copy_size = dpitch * height;
+ ggml_vk_ensure_sync_staging_buffer(src->device, copy_size);
+
+ vk_buffer& staging_buffer = src->device->sync_staging;
+
+ ggml_vk_sync_buffers(subctx);
+ subctx->s->buffer.copyBuffer(src->buffer, staging_buffer->buffer, slices);
+
+ deferred_memcpy(dst, staging_buffer->ptr, copy_size, &subctx->out_memcpys);
+}
+
+static void ggml_vk_buffer_read_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t size, bool sync_staging = false) {
+ return ggml_vk_buffer_read_2d_async(subctx, src, offset, dst, size, size, size, 1, sync_staging);
+}
+
+static void ggml_vk_buffer_read(vk_buffer& src, size_t offset, void * dst, size_t size) {
+ VK_LOG_DEBUG("ggml_vk_buffer_read(" << src->buffer << ", " << offset << ", " << size << ")");
+
+ // If the device is not an UMA device the memory is host-accessible through rebar. While writing
+ // through PCIe is sufficient fast reading back data from PCIe is slower than going through
+ // the HW device to host copy path.
+ if(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible && src->device->uma) {
+ GGML_ASSERT(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
+
+ memcpy(dst, (uint8_t *) src->ptr + offset, size);
+ } else {
+ vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue);
+ ggml_vk_ctx_begin(src->device, subctx);
+ ggml_vk_buffer_read_async(subctx, src, offset, dst, size, true);
+ ggml_vk_ctx_end(subctx);
+
+ ggml_vk_submit(subctx, src->device->fence);
+ VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_read waitForFences");
+ src->device->device.resetFences({ src->device->fence });
+
+ for (auto& cpy : subctx->out_memcpys) {
+ memcpy(cpy.dst, cpy.src, cpy.n);
+ }
+ }
+}
+
+static void ggml_vk_buffer_copy_async(vk_context& ctx, vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
+ VK_LOG_DEBUG("ggml_vk_buffer_copy_async(" << size << ")");
+ // Make sure both buffers are on same device
+ GGML_ASSERT(src->device == dst->device);
+
+ VkBufferCopy bc{ src_offset, dst_offset, size };
+
+ vkCmdCopyBuffer(ctx->s->buffer, (VkBuffer)src->buffer, (VkBuffer)dst->buffer, 1, &bc);
+}
+
+static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
+ if (src->device == dst->device) {
+ VK_LOG_DEBUG("ggml_vk_buffer_copy(SINGLE_DEVICE, " << size << ")");
+ // Copy within the device
+ vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue);
+ ggml_vk_ctx_begin(src->device, subctx);
+ ggml_vk_buffer_copy_async(subctx, dst, dst_offset, src, src_offset, size);
+ ggml_vk_ctx_end(subctx);
+ ggml_vk_submit(subctx, src->device->fence);
+ VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_copy waitForFences");
+ src->device->device.resetFences({ src->device->fence });
+ } else {
+ VK_LOG_DEBUG("ggml_vk_buffer_copy(MULTI_DEVICE, " << size << ")");
+ // Copy device to device
+ ggml_vk_ensure_sync_staging_buffer(src->device, size);
+ ggml_vk_ensure_sync_staging_buffer(dst->device, size);
+
+ // Copy to src staging buffer
+ ggml_vk_buffer_copy(src->device->sync_staging, 0, src, src_offset, size);
+ // memcpy to dst staging buffer
+ memcpy(dst->device->sync_staging->ptr, src->device->sync_staging->ptr, size);
+ // Copy to dst buffer
+ ggml_vk_buffer_copy(dst, dst_offset, dst->device->sync_staging, 0, size);
+ }
+}
+
+static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, size_t size) {
+ VK_LOG_DEBUG("ggml_vk_buffer_memset(" << offset << ", " << c << ", " << size << ")");
+
+ vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue);
+ ggml_vk_ctx_begin(dst->device, subctx);
+ subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c);
+ ggml_vk_ctx_end(subctx);
+
+ ggml_vk_submit(subctx, dst->device->fence);
+ VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_memset waitForFences");
+ dst->device->device.resetFences({ dst->device->fence });
+}
+
+static uint32_t ggml_vk_guess_split_k(ggml_backend_vk_context * ctx, int m, int n, int k, const vk_pipeline& pipeline) {
+ VK_LOG_DEBUG("ggml_vk_guess_split_k(" << m << ", " << n << ", " << k << ")");
+
+ uint32_t split_k = 1;
+ if (ctx->device->shader_core_count != 0 && m >= (int)pipeline->wg_denoms[0] && n >= (int)pipeline->wg_denoms[1]) {
+ // If k is 'large' and the SMs will fill less than halfway, use split_k.
+ uint32_t m_tiles = CEIL_DIV(m, pipeline->wg_denoms[0]);
+ uint32_t n_tiles = CEIL_DIV(n, pipeline->wg_denoms[1]);
+ if (k >= 2048 && m_tiles * n_tiles < ctx->device->shader_core_count / 2) {
+ split_k = ctx->device->shader_core_count / (m_tiles * n_tiles);
+ // Clamp to 2 or 4
+ split_k = std::min(split_k, 4u);
+ if (split_k == 3) {
+ split_k = 2;
+ }
+ }
+ }
+
+ return split_k;
+}
+
+static vk_pipeline ggml_vk_guess_matmul_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) {
+ VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ")");
+
+ if (ctx->device->coopmat2) {
+ if ((ctx->device->mul_mat_l && (m % mmp->l->wg_denoms[0]) == 0 && (n % mmp->l->wg_denoms[1]) == 0) || (!ctx->device->mul_mat_m && !ctx->device->mul_mat_s)) {
+ return aligned ? mmp->a_l : mmp->l;
+ }
+ if ((ctx->device->mul_mat_m && (m % mmp->m->wg_denoms[0]) == 0 && (n % mmp->m->wg_denoms[1]) == 0) || !ctx->device->mul_mat_s) {
+ return aligned ? mmp->a_m : mmp->m;
+ }
+ return aligned ? mmp->a_s : mmp->s;
+ }
+
+ if ((ctx->device->mul_mat_s && (m <= 32 || n <= 32)) || (!ctx->device->mul_mat_m && !ctx->device->mul_mat_l)) {
+ return aligned ? mmp->a_s : mmp->s;
+ }
+ if ((ctx->device->mul_mat_m && (m <= 64 || n <= 64)) || !ctx->device->mul_mat_l) {
+ return aligned ? mmp->a_m : mmp->m;
+ }
+ return aligned ? mmp->a_l : mmp->l;
+}
+
+static uint32_t ggml_vk_guess_matmul_pipeline_align(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n) {
+ VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ")");
+ return ggml_vk_guess_matmul_pipeline(ctx, mmp, m, n, true)->align;
+}
+
+static void ggml_vk_matmul(
+ ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline,
+ vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& split_k_buffer,
+ uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
+ uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
+ uint32_t split_k, uint32_t batch, uint32_t ne02, uint32_t ne12, uint32_t broadcast2, uint32_t broadcast3) {
+ VK_LOG_DEBUG("ggml_vk_matmul(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), split_k: (" << (split_k_buffer.buffer != nullptr ? split_k_buffer.buffer->buffer : VK_NULL_HANDLE) << ", " << split_k_buffer.offset << ", " << split_k_buffer.size << "), m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", split_k: " << split_k << ", batch: " << batch << ", ne02: " << ne02 << ", ne12: " << ne12 << ", broadcast2: " << broadcast2 << ", broadcast3: " << broadcast3 << ")");
+ ggml_vk_sync_buffers(subctx);
+ if (split_k == 1) {
+ const vk_mat_mat_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, k, ne02, ne12, broadcast2, broadcast3 };
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d }, sizeof(vk_mat_mat_push_constants), &pc, { m, n, batch });
+ return;
+ }
+
+ GGML_ASSERT(batch_stride_d == m * n);
+
+ const vk_mat_mat_push_constants pc1 = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, CEIL_DIV(k, split_k), ne02, ne12, broadcast2, broadcast3 };
+ // Make sure enough workgroups get assigned for split k to work
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, split_k_buffer }, sizeof(vk_mat_mat_push_constants), &pc1, { (CEIL_DIV(m, pipeline->wg_denoms[0]) * pipeline->wg_denoms[0]) * split_k, n, batch });
+ ggml_vk_sync_buffers(subctx);
+ const std::array<uint32_t, 2> pc2 = { (uint32_t)(m * n * batch), split_k };
+ ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_matmul_split_k_reduce, { split_k_buffer, d }, pc2.size() * sizeof(uint32_t), pc2.data(), { m * n * batch, 1, 1 });
+}
+
+static vk_pipeline ggml_vk_guess_matmul_id_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) {
+ VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ")");
+
+ if (ctx->device->coopmat2) {
+ if ((ctx->device->mul_mat_id_l && (m % mmp->l->wg_denoms[0]) == 0 && (n % mmp->l->wg_denoms[1]) == 0) || (!ctx->device->mul_mat_id_m && !ctx->device->mul_mat_id_s)) {
+ return aligned ? mmp->a_l : mmp->l;
+ }
+ if ((ctx->device->mul_mat_id_m && (m % mmp->m->wg_denoms[0]) == 0 && (n % mmp->m->wg_denoms[1]) == 0) || !ctx->device->mul_mat_id_s) {
+ return aligned ? mmp->a_m : mmp->m;
+ }
+ return aligned ? mmp->a_s : mmp->s;
+ }
+
+ if ((ctx->device->mul_mat_id_s && (m <= 32 || n <= 32)) || (!ctx->device->mul_mat_id_m && !ctx->device->mul_mat_id_l)) {
+ return aligned ? mmp->a_s : mmp->s;
+ }
+ if ((ctx->device->mul_mat_id_m && (m <= 64 || n <= 64)) || !ctx->device->mul_mat_id_l) {
+ return aligned ? mmp->a_m : mmp->m;
+ }
+ return aligned ? mmp->a_l : mmp->l;
+}
+
+static uint32_t ggml_vk_guess_matmul_id_pipeline_align(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n) {
+ VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ")");
+ return ggml_vk_guess_matmul_id_pipeline(ctx, mmp, m, n, true)->align;
+}
+
+static void ggml_vk_matmul_id(
+ ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline,
+ vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& ids,
+ uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
+ uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
+ uint32_t n_as, uint32_t nei0, uint32_t nei1, uint32_t nbi1, uint32_t ne11) {
+ VK_LOG_DEBUG("ggml_vk_matmul_id(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), ids: (" << ids.buffer->buffer << ", " << ids.offset << ", " << ids.size << "), " <<
+ "m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", " <<
+ "batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", " <<
+ "n_as: " << n_as << ", nei0: " << nei0 << ", nei1: " << nei1 << ", nbi1: " << nbi1 << ", ne11: " << ne11 << ")");
+ ggml_vk_sync_buffers(subctx);
+ const vk_mat_mat_id_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d,
+ nei0, nei1, nbi1, ne11 };
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d, ids }, sizeof(vk_mat_mat_id_push_constants), &pc, { m, nei1, n_as });
+}
+
+static bool ggml_vk_dim01_contiguous(const ggml_tensor * tensor) {
+ return
+ tensor->nb[0] == ggml_type_size(tensor->type) &&
+ tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
+ tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
+}
+
+static vk_pipeline ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src, const ggml_tensor * dst, ggml_type to) {
+
+ // Choose "contiguous copy" shader if src/dst are contiguous
+ bool contig = ggml_is_contiguous(src) && (!dst || ggml_is_contiguous(dst));
+
+ if (src->type == GGML_TYPE_F32 && to == GGML_TYPE_F32) {
+ if (contig) {
+ return ctx->device->pipeline_contig_cpy_f32_f32;
+ } else {
+ return ctx->device->pipeline_cpy_f32_f32;
+ }
+ }
+ if (src->type == GGML_TYPE_F32 && to == GGML_TYPE_F16) {
+ if (contig) {
+ return ctx->device->pipeline_contig_cpy_f32_f16;
+ } else {
+ return ctx->device->pipeline_cpy_f32_f16;
+ }
+ }
+ if (src->type == GGML_TYPE_F16 && to == GGML_TYPE_F16) {
+ if (contig) {
+ return ctx->device->pipeline_contig_cpy_f16_f16;
+ } else {
+ return ctx->device->pipeline_cpy_f16_f16;
+ }
+ }
+
+ std::cerr << "Missing CPY op for types: " << ggml_type_name(src->type) << " " << ggml_type_name(to) << std::endl;
+ GGML_ABORT("fatal error");
+}
+
+static void ggml_vk_cpy_to_contiguous(ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline pipeline, const ggml_tensor * tensor, vk_subbuffer&& in, vk_subbuffer&& out) {
+ VK_LOG_DEBUG("ggml_vk_cpy_to_contiguous((" << tensor << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << "), ";
+ std::cerr << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ")");
+ const int tensor_type_size = ggml_type_size(tensor->type);
+
+ const uint32_t ne = ggml_nelements(tensor);
+ std::array<uint32_t, 3> elements;
+
+ if (ne > 262144) {
+ elements = { 512, 512, CEIL_DIV(ne, 262144) };
+ } else if (ne > 512) {
+ elements = { 512, CEIL_DIV(ne, 512), 1 };
+ } else {
+ elements = { ne, 1, 1 };
+ }
+
+ vk_op_unary_push_constants pc = {
+ (uint32_t)ne,
+ (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], (uint32_t)tensor->nb[0] / tensor_type_size, (uint32_t)tensor->nb[1] / tensor_type_size, (uint32_t)tensor->nb[2] / tensor_type_size, (uint32_t)tensor->nb[3] / tensor_type_size,
+ (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], 1 , (uint32_t)tensor->ne[0] , (uint32_t)(tensor->ne[0] * tensor->ne[1]) , (uint32_t)(tensor->ne[0] * tensor->ne[1] * tensor->ne[2]),
+ 0,
+ 0.0f, 0.0f,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+ init_pushconst_fastdiv(pc);
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { in, out }, sizeof(vk_op_unary_push_constants), &pc, elements);
+}
+
+static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ VK_LOG_DEBUG("ggml_vk_mul_mat_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
+ std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
+ std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
+ std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
+ GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
+ GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
+
+ const uint64_t ne00 = src0->ne[0];
+ const uint64_t ne01 = src0->ne[1];
+ const uint64_t ne02 = src0->ne[2];
+ const uint64_t ne03 = src0->ne[3];
+
+ const uint64_t ne10 = src1->ne[0];
+ const uint64_t ne11 = src1->ne[1];
+ const uint64_t ne12 = src1->ne[2];
+ const uint64_t ne13 = src1->ne[3];
+
+ const uint64_t ne20 = dst->ne[0];
+ const uint64_t ne21 = dst->ne[1];
+
+ const uint64_t r2 = ne12 / ne02;
+ const uint64_t r3 = ne13 / ne03;
+
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
+
+ vk_buffer d_Qx = nullptr;
+ size_t qx_buf_offset = 0;
+ vk_buffer d_Qy = nullptr;
+ size_t qy_buf_offset = 0;
+
+ bool src0_uma = false;
+ bool src1_uma = false;
+
+ if (ctx->device->uma) {
+ ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
+ ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
+ src0_uma = d_Qx != nullptr;
+ src1_uma = d_Qy != nullptr;
+ }
+
+ const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
+ // Reformat and convert to fp16 if src1 is non-contiguous, or for coopmat2 for better perf
+ const bool y_non_contig = (ctx->device->coopmat2 && src1->type == GGML_TYPE_F32) ||
+ !ggml_vk_dim01_contiguous(src1);
+
+ const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
+
+ vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type, (ggml_prec)dst->op_params[0]);
+
+ const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
+ const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig;
+
+ if (qx_needs_dequant) {
+ // Fall back to dequant + f16 mulmat
+ mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, GGML_TYPE_F16, y_f32_kernel ? GGML_TYPE_F32 : GGML_TYPE_F16, (ggml_prec)dst->op_params[0]);
+ }
+
+ // Not implemented
+ GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
+
+ const int x_ne = ne01 * ne00;
+ const int y_ne = ne11 * ne10;
+ const int d_ne = ne11 * ne01;
+
+ const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, ne11));
+ const bool aligned = ne10 == kpad && ne01 > 8 && ne11 > 8;
+
+ vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, ne11, aligned);
+
+ const uint32_t split_k = ggml_vk_guess_split_k(ctx, ne01, ne11, ne10, pipeline);
+
+ const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
+ const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
+ const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
+ const uint64_t y_sz = y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
+ const uint64_t d_sz = sizeof(float) * d_ne;
+
+ vk_pipeline to_fp16_vk_0 = nullptr;
+ vk_pipeline to_fp16_vk_1 = nullptr;
+
+ if (x_non_contig) {
+ to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, GGML_TYPE_F16);
+ } else {
+ to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
+ }
+ if (y_non_contig) {
+ to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, GGML_TYPE_F16);
+ } else {
+ to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
+ }
+ GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
+ GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
+
+ if (dryrun) {
+ const uint64_t x_sz_upd = x_sz * ne02 * ne03;
+ const uint64_t y_sz_upd = y_sz * ne12 * ne13;
+ const uint64_t split_k_size = split_k > 1 ? d_sz * ne12 * ne13 * split_k : 0;
+ if (
+ (qx_needs_dequant && x_sz_upd > ctx->device->max_memory_allocation_size) ||
+ (qy_needs_dequant && y_sz_upd > ctx->device->max_memory_allocation_size) ||
+ (split_k > 1 && split_k_size > ctx->device->max_memory_allocation_size)) {
+ GGML_ABORT("Requested preallocation size is too large");
+ }
+ if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
+ ctx->prealloc_size_x = x_sz_upd;
+ }
+ if (qy_needs_dequant && ctx->prealloc_size_y < y_sz_upd) {
+ ctx->prealloc_size_y = y_sz_upd;
+ }
+ if (split_k > 1 && ctx->prealloc_size_split_k < split_k_size) {
+ ctx->prealloc_size_split_k = split_k_size;
+ }
+
+ // Request descriptor sets
+ ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1);
+ if (qx_needs_dequant) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
+ }
+ if (qy_needs_dequant) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1);
+ }
+ if (split_k > 1) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, 1);
+ }
+ return;
+ }
+
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
+ GGML_ASSERT(d_D != nullptr);
+ GGML_ASSERT(d_D->size >= d_buf_offset + d_sz * ne02 * ne03);
+ vk_buffer d_X;
+ uint64_t x_buf_offset = 0;
+ vk_buffer d_Y;
+ uint64_t y_buf_offset = 0;
+ if (!src0_uma) {
+ d_Qx = src0_buf_ctx->dev_buffer;
+ qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
+ GGML_ASSERT(d_Qx != nullptr);
+ }
+ if (!src1_uma) {
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
+ GGML_ASSERT(d_Qy != nullptr);
+ }
+ if (qx_needs_dequant) {
+ d_X = ctx->prealloc_x;
+ GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
+ } else {
+ d_X = d_Qx;
+ x_buf_offset = qx_buf_offset;
+ GGML_ASSERT(qx_sz == x_sz);
+ }
+ if (qy_needs_dequant) {
+ d_Y = ctx->prealloc_y;
+ GGML_ASSERT(d_Y->size >= y_sz * ne02 * ne03);
+ } else {
+ d_Y = d_Qy;
+ y_buf_offset = qy_buf_offset;
+ GGML_ASSERT(qy_sz == y_sz);
+ }
+
+ if (x_non_contig) {
+ ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
+ } else if (qx_needs_dequant) {
+ const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, vk_subbuffer{ d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
+ }
+ if (y_non_contig) {
+ ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
+ }
+
+ uint32_t stride_batch_x = ne00*ne01;
+ uint32_t stride_batch_y = ne10*ne11;
+
+ if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
+ stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
+ }
+
+ if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
+ stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
+ }
+
+ // compute
+ ggml_vk_matmul(
+ ctx, subctx, pipeline,
+ { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 },
+ { d_D, d_buf_offset, d_sz * ne12 * ne13 }, { ctx->prealloc_split_k, 0, d_sz * ne12 * ne13 * split_k },
+ ne01, ne11, ne10,
+ ne10, ne10, ne01, stride_batch_x, stride_batch_y, ne20*ne21,
+ split_k, ne12*ne13, ne02, ne12, r2, r3
+ ); // NOLINT
+}
+
+static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ VK_LOG_DEBUG("ggml_vk_mul_mat_vec_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
+ std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
+ std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
+ std::cerr << "), " << (dryrun ? "dryrun" : "") << "),)");
+ GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
+ GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
+
+ const uint64_t ne00 = src0->ne[0];
+ const uint64_t ne01 = src0->ne[1];
+ const uint64_t ne02 = src0->ne[2];
+ const uint64_t ne03 = src0->ne[3];
+
+ const uint64_t ne10 = src1->ne[0];
+ const uint64_t ne11 = src1->ne[1];
+ const uint64_t ne12 = src1->ne[2];
+ const uint64_t ne13 = src1->ne[3];
+
+ const uint64_t ne20 = dst->ne[0];
+ const uint64_t ne21 = dst->ne[1];
+ const uint64_t ne22 = dst->ne[2];
+ const uint64_t ne23 = dst->ne[3];
+
+ const uint64_t r2 = ne12 / ne02;
+ const uint64_t r3 = ne13 / ne03;
+
+ // batch_n indicates that we need to compute a few vector results, and this assumes
+ // ne12 and ne13 are 1. It overloads the batch_strides to hold the row strides.
+ GGML_ASSERT(ne11 == 1 || ne12 * ne13 == 1);
+ bool batch_n = ne11 > 1;
+
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
+
+ vk_buffer d_Qx = nullptr;
+ size_t qx_buf_offset = 0;
+ vk_buffer d_Qy = nullptr;
+ size_t qy_buf_offset = 0;
+
+ bool src0_uma = false;
+ bool src1_uma = false;
+
+ if (ctx->device->uma) {
+ ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
+ ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
+ src0_uma = d_Qx != nullptr;
+ src1_uma = d_Qy != nullptr;
+ }
+
+ const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
+ const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
+
+ const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
+
+ const bool qx_needs_dequant = x_non_contig;
+ const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
+
+ // Not implemented
+ GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
+
+ const uint64_t x_ne = ne01 * ne00;
+ const uint64_t y_ne = ne11 * ne10;
+ const uint64_t d_ne = ne11 * ne01;
+
+ const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
+ const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
+ const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
+ const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
+ const uint64_t d_sz = sizeof(float) * d_ne;
+
+ vk_pipeline to_fp16_vk_0 = nullptr;
+ vk_pipeline to_fp16_vk_1 = nullptr;
+ if (x_non_contig) {
+ to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, src0->type);
+ }
+ if (y_non_contig) {
+ to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, src1->type);
+ } else {
+ to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
+ }
+ vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, src1->type, ne11);
+ GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
+ GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
+ GGML_ASSERT(dmmv != nullptr);
+
+ if (dryrun) {
+ const uint64_t x_sz_upd = x_sz * ne02 * ne03;
+ const uint64_t y_sz_upd = y_sz * ne12 * ne13;
+ if (
+ (qx_needs_dequant && x_sz_upd > ctx->device->max_memory_allocation_size) ||
+ (qy_needs_dequant && y_sz_upd > ctx->device->max_memory_allocation_size)) {
+ GGML_ABORT("Requested preallocation size is too large");
+ }
+ if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
+ ctx->prealloc_size_x = x_sz_upd;
+ }
+ if (qy_needs_dequant && ctx->prealloc_size_y < y_sz_upd) {
+ ctx->prealloc_size_y = y_sz_upd;
+ }
+
+ // Request descriptor sets
+ if (qx_needs_dequant) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
+ }
+ if (qy_needs_dequant) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1);
+ }
+ ggml_pipeline_request_descriptor_sets(ctx->device, dmmv, 1);
+ return;
+ }
+
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
+ GGML_ASSERT(d_D != nullptr);
+ vk_buffer d_X;
+ uint64_t x_buf_offset = 0;
+ vk_buffer d_Y;
+ uint64_t y_buf_offset = 0;
+ if(!src0_uma) {
+ d_Qx = src0_buf_ctx->dev_buffer;
+ qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
+ GGML_ASSERT(d_Qx != nullptr);
+ }
+ if(!src1_uma) {
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
+ GGML_ASSERT(d_Qy != nullptr);
+ }
+ if (qx_needs_dequant) {
+ d_X = ctx->prealloc_x;
+ } else {
+ d_X = d_Qx;
+ x_buf_offset = qx_buf_offset;
+ GGML_ASSERT(qx_sz == x_sz);
+ }
+ if (qy_needs_dequant) {
+ d_Y = ctx->prealloc_y;
+ } else {
+ d_Y = d_Qy;
+ y_buf_offset = qy_buf_offset;
+ GGML_ASSERT(qy_sz == y_sz);
+ }
+
+ if (x_non_contig) {
+ GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
+ ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
+ }
+ if (y_non_contig) {
+ GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
+ ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
+ }
+
+ // For batch_n, the A matrix is the same for each batch, and B/D use the row stride as the batch stride
+ uint32_t stride_batch_x = batch_n ? 0 : ne00*ne01;
+ uint32_t stride_batch_y = batch_n ? ne10 : (ne10*ne11);
+ uint32_t stride_batch_d = batch_n ? ne20 : (ne20*ne21);
+
+ if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
+ stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
+ }
+
+ if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
+ stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
+ }
+
+ const uint32_t max_groups_x = ctx->device->properties.limits.maxComputeWorkGroupCount[0];
+
+ uint32_t groups_x = ne01;
+ uint32_t groups_z = 1;
+
+ if (ne01 > max_groups_x) {
+ groups_z = 64;
+ groups_x = CEIL_DIV(groups_x, groups_z);
+ }
+
+ // compute
+ const vk_mat_vec_push_constants pc = {
+ (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
+ stride_batch_x, stride_batch_y, stride_batch_d,
+ (uint32_t)ne02, (uint32_t)ne12, (uint32_t)r2, (uint32_t)r3,
+ };
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
+ { vk_subbuffer{ d_X, x_buf_offset, x_sz * ne02 * ne03 }, vk_subbuffer{ d_Y, y_buf_offset, y_sz * ne12 * ne13 }, vk_subbuffer{ d_D, d_buf_offset, d_sz * ne22 * ne23} },
+ sizeof(vk_mat_vec_push_constants), &pc, { groups_x, (uint32_t)(ne12 * ne13), groups_z });
+}
+
+static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ VK_LOG_DEBUG("ggml_vk_mul_mat_p021_f16_f32(" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
+ std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
+ std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
+ std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
+ GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
+ GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // NOLINT
+ GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // NOLINT
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+
+ const uint64_t ne00 = src0->ne[0];
+ const uint64_t ne01 = src0->ne[1];
+ const uint64_t ne02 = src0->ne[2];
+ // const uint64_t ne03 = src0->ne[3];
+
+ const uint64_t ne10 = src1->ne[0];
+ const uint64_t ne11 = src1->ne[1];
+ const uint64_t ne12 = src1->ne[2];
+ // const uint64_t ne13 = src1->ne[3];
+
+ GGML_ASSERT(ne11 == 1);
+
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
+
+ vk_buffer d_Qy = nullptr;
+ size_t qy_buf_offset = 0;
+
+ bool src1_uma = false;
+
+ if (ctx->device->uma) {
+ ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
+ src1_uma = d_Qy != nullptr;
+ }
+
+ const uint64_t x_ne = ne00 * ne01 * ne02;
+ const uint64_t y_ne = ne10 * ne11 * ne12;
+ const uint64_t d_ne = ne01 * ne11 * ne12;
+
+ const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
+ const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
+ const uint64_t d_sz = sizeof(float) * d_ne;
+
+ if (dryrun) {
+ // Request descriptor sets
+ ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, 1);
+ return;
+ }
+
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
+ GGML_ASSERT(d_D != nullptr);
+ vk_buffer d_Qx = src0_buf_ctx->dev_buffer;
+ const uint64_t qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
+ GGML_ASSERT(d_Qx != nullptr);
+ if (!src1_uma) {
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
+ GGML_ASSERT(d_Qx != nullptr);
+ }
+
+ const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
+ const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
+
+ const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
+ const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
+
+ // compute
+ const std::array<uint32_t, 6> pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset } }, 6 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
+}
+
+static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ VK_LOG_DEBUG("ggml_vk_mul_mat_nc_f16_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
+ std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
+ std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
+ std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
+ GGML_ASSERT(!ggml_is_transposed(src0));
+ GGML_ASSERT(!ggml_is_transposed(src1));
+ GGML_ASSERT(!ggml_is_permuted(src0));
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+
+ const uint64_t ne00 = src0->ne[0];
+ const uint64_t ne01 = src0->ne[1];
+ const uint64_t ne02 = src0->ne[2];
+ // const uint64_t ne03 = src0->ne[3];
+
+ const uint64_t nb01 = src0->nb[1];
+ const uint64_t nb02 = src0->nb[2];
+
+ // const uint64_t ne10 = src1->ne[0];
+ const uint64_t ne11 = src1->ne[1];
+ const uint64_t ne12 = src1->ne[2];
+ // const uint64_t ne13 = src1->ne[3];
+
+ GGML_ASSERT(ne11 == 1);
+
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
+
+ vk_buffer d_Qy = nullptr;
+ size_t qy_buf_offset = 0;
+
+ bool src1_uma = false;
+
+ if (ctx->device->uma) {
+ ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
+ src1_uma = d_Qy != nullptr;
+ }
+
+ const uint64_t d_ne = ne01 * ne11 * ne12;
+
+ const uint32_t row_stride_x = nb01 / sizeof(ggml_fp16_t);
+ const uint32_t channel_stride_x = nb02 / sizeof(ggml_fp16_t);
+
+ const uint64_t qx_sz = ggml_nbytes(src0);
+ const uint64_t qy_sz = ggml_nbytes(src1);
+ const uint64_t d_sz = sizeof(float) * d_ne;
+
+ if (dryrun) {
+ // Request descriptor sets
+ ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, 1);
+ return;
+ }
+
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
+ GGML_ASSERT(d_D != nullptr);
+ vk_buffer d_Qx = src0_buf_ctx->dev_buffer;
+ const uint64_t qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
+ GGML_ASSERT(d_Qx != nullptr);
+ if (!src1_uma) {
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
+ GGML_ASSERT(d_Qx != nullptr);
+ }
+
+ const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
+ const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
+
+ const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
+ const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
+
+ // compute
+ const std::array<uint32_t, 7> pc = { (uint32_t)ne00, (uint32_t)ne01, row_stride_x, channel_stride_x, (uint32_t)(ne12 / ne02), (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32,
+ { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset } }, 7 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
+}
+
+static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ VK_LOG_DEBUG("ggml_vk_mul_mat(" << src0 << ", " << src1 << ", " << dst << ")");
+ if (src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && dst->ne[1] == 1 &&
+ // detect 0213 permutation, and batch size of 1
+ src0->nb[0] <= src0->nb[2] &&
+ src0->nb[2] <= src0->nb[1] &&
+ src0->nb[1] <= src0->nb[3] &&
+ src1->nb[0] <= src1->nb[2] &&
+ src1->nb[2] <= src1->nb[1] &&
+ src1->nb[1] <= src1->nb[3] &&
+ src0->ne[3] == 1 &&
+ src1->ne[3] == 1) {
+ ggml_vk_mul_mat_vec_p021_f16_f32(ctx, subctx, src0, src1, dst, dryrun);
+ } else if (src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && dst->ne[1] == 1 &&
+ !ggml_is_permuted(src0) && !ggml_is_permuted(src1)) {
+ ggml_vk_mul_mat_vec_nc_f16_f32(ctx, subctx, src0, src1, dst, dryrun);
+ // mul_mat_vec supports batching ne12*ne13 when ne11==1, or treating ne11 as the batch size (up to four)
+ // when ne12 and ne13 are one.
+ } else if ((dst->ne[1] == 1 || (dst->ne[1] <= mul_mat_vec_max_cols && src1->ne[2] * src1->ne[3] == 1)) &&
+ (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) {
+ ggml_vk_mul_mat_vec_q_f16(ctx, subctx, src0, src1, dst, dryrun);
+ } else {
+ ggml_vk_mul_mat_q_f16(ctx, subctx, src0, src1, dst, dryrun);
+ }
+}
+
+static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst, bool dryrun = false) {
+ VK_LOG_DEBUG("ggml_vk_mul_mat_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
+ std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
+ std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
+ std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
+ GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
+ GGML_ASSERT(ids->type == GGML_TYPE_I32);
+
+ const uint64_t ne00 = src0->ne[0];
+ const uint64_t ne01 = src0->ne[1];
+ const uint64_t ne02 = src0->ne[2];
+ const uint64_t ne03 = src0->ne[3];
+
+ const uint64_t ne10 = src1->ne[0];
+ const uint64_t ne11 = src1->ne[1];
+ const uint64_t ne12 = src1->ne[2];
+ const uint64_t ne13 = src1->ne[3];
+
+ const uint64_t nei0 = ids->ne[0];
+ const uint64_t nei1 = ids->ne[1];
+ GGML_ASSERT(nei0 * nei1 <= 3072);
+
+ const uint32_t nbi1 = ids->nb[1];
+ const uint32_t nbi2 = ids->nb[2];
+
+ const uint64_t ne20 = dst->ne[0];
+ const uint64_t ne21 = dst->ne[1];
+ const uint64_t ne22 = dst->ne[2];
+ const uint64_t ne23 = dst->ne[3];
+
+ const uint64_t n_as = ne02;
+
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
+ ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context;
+
+ vk_buffer d_Qx = nullptr;
+ size_t qx_buf_offset = 0;
+ vk_buffer d_Qy = nullptr;
+ size_t qy_buf_offset = 0;
+ vk_buffer d_ids = nullptr;
+ size_t ids_buf_offset = 0;
+
+ bool src0_uma = false;
+ bool src1_uma = false;
+ bool ids_uma = false;
+
+ if (ctx->device->uma) {
+ ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
+ ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
+ ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset);
+ src0_uma = d_Qx != nullptr;
+ src1_uma = d_Qy != nullptr;
+ ids_uma = d_ids != nullptr;
+ }
+
+ const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
+ const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
+
+ const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
+
+ vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type, (ggml_prec)dst->op_params[0]);
+
+ const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
+ const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig;
+
+ if (qx_needs_dequant) {
+ GGML_ABORT("fatal error");
+ }
+
+ // Not implemented
+ GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
+
+ const uint64_t x_ne = ne01 * ne00;
+ const uint64_t y_ne = ne11 * ne10;
+ const uint64_t d_ne = ne21 * ne20;
+
+ const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_id_pipeline_align(ctx, mmp, ne01, nei1));
+ const bool aligned = ne10 == kpad && ne01 > 8 && nei1 > 8;
+
+ vk_pipeline pipeline = ggml_vk_guess_matmul_id_pipeline(ctx, mmp, ne01, nei1, aligned);
+
+ const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
+ const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
+ const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
+ const uint64_t y_sz = y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
+ const uint64_t ids_sz = nbi2;
+ const uint64_t d_sz = sizeof(float) * d_ne;
+
+ vk_pipeline to_fp16_vk_0 = nullptr;
+ vk_pipeline to_fp16_vk_1 = nullptr;
+
+ if (x_non_contig) {
+ to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, GGML_TYPE_F16);
+ } else {
+ to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
+ }
+ if (y_non_contig) {
+ to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, GGML_TYPE_F16);
+ } else {
+ to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
+ }
+ GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
+ GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
+
+ if (dryrun) {
+ const uint64_t x_sz_upd = x_sz * ne02 * ne03;
+ const uint64_t y_sz_upd = y_sz * ne12 * ne13;
+ if (
+ (qx_needs_dequant && x_sz_upd > ctx->device->max_memory_allocation_size) ||
+ (qy_needs_dequant && y_sz_upd > ctx->device->max_memory_allocation_size)) {
+ GGML_ABORT("Requested preallocation size is too large");
+ }
+ if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
+ ctx->prealloc_size_x = x_sz_upd;
+ }
+ if (qy_needs_dequant && ctx->prealloc_size_y < y_sz_upd) {
+ ctx->prealloc_size_y = y_sz_upd;
+ }
+
+ // Request descriptor sets
+ ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1);
+ if (qx_needs_dequant) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
+ }
+ if (qy_needs_dequant) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1);
+ }
+ return;
+ }
+
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
+ GGML_ASSERT(d_D != nullptr);
+ vk_buffer d_X;
+ uint64_t x_buf_offset = 0;
+ vk_buffer d_Y;
+ uint64_t y_buf_offset = 0;
+ if (!src0_uma) {
+ d_Qx = src0_buf_ctx->dev_buffer;
+ qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
+ GGML_ASSERT(d_Qx != nullptr);
+ }
+ if (!src1_uma) {
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
+ GGML_ASSERT(d_Qy != nullptr);
+ }
+ if (!ids_uma) {
+ d_ids = ids_buf_ctx->dev_buffer;
+ ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs;
+ GGML_ASSERT(d_ids != nullptr);
+ }
+ if (qx_needs_dequant) {
+ d_X = ctx->prealloc_x;
+ GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
+ } else {
+ d_X = d_Qx;
+ x_buf_offset = qx_buf_offset;
+ GGML_ASSERT(qx_sz == x_sz);
+ }
+ if (qy_needs_dequant) {
+ d_Y = ctx->prealloc_y;
+ GGML_ASSERT(d_Y->size >= y_sz * ne02 * ne03);
+ } else {
+ d_Y = d_Qy;
+ y_buf_offset = qy_buf_offset;
+ GGML_ASSERT(qy_sz == y_sz);
+ }
+
+ if (x_non_contig) {
+ ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
+ } else if (qx_needs_dequant) {
+ const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0,
+ { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, vk_subbuffer{ d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
+ }
+ if (y_non_contig) {
+ ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
+ }
+
+ uint32_t stride_batch_x = ne00*ne01;
+ uint32_t stride_batch_y = ne10*ne11;
+
+ if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
+ stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
+ }
+
+ if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
+ stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
+ }
+
+ // compute
+ ggml_vk_matmul_id(
+ ctx, subctx, pipeline,
+ { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 },
+ { d_D, d_buf_offset, d_sz * ne22 * ne23 }, { d_ids, ids_buf_offset, ids_sz },
+ ne01, ne21, ne10, ne10, ne10, ne01,
+ stride_batch_x, stride_batch_y, ne20*ne21,
+ n_as, nei0, nei1, nbi1 / ggml_type_size(ids->type), ne11
+ ); // NOLINT
+}
+
+static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst, bool dryrun = false) {
+ VK_LOG_DEBUG("ggml_vk_mul_mat_vec_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
+ std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
+ std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
+ std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
+ std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
+ GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
+ GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
+ GGML_ASSERT(ids->type == GGML_TYPE_I32);
+
+ const uint64_t ne00 = src0->ne[0];
+ const uint64_t ne01 = src0->ne[1];
+ const uint64_t ne02 = src0->ne[2];
+ const uint64_t ne03 = src0->ne[3];
+
+ const uint64_t ne10 = src1->ne[0];
+ const uint64_t ne11 = src1->ne[1];
+ const uint64_t ne12 = src1->ne[2];
+ const uint64_t ne13 = src1->ne[3];
+
+ const uint64_t nei0 = ids->ne[0];
+ const uint64_t nei1 = ids->ne[1];
+
+ const uint64_t nbi2 = ids->nb[2];
+
+ GGML_ASSERT(nei1 == 1);
+
+ const uint64_t ne20 = dst->ne[0];
+ const uint64_t ne21 = dst->ne[1];
+ const uint64_t ne22 = dst->ne[2];
+ const uint64_t ne23 = dst->ne[3];
+
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
+ ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context;
+
+ vk_buffer d_Qx = nullptr;
+ size_t qx_buf_offset = 0;
+ vk_buffer d_Qy = nullptr;
+ size_t qy_buf_offset = 0;
+ vk_buffer d_ids = nullptr;
+ size_t ids_buf_offset = 0;
+
+ bool src0_uma = false;
+ bool src1_uma = false;
+ bool ids_uma = false;
+
+ if (ctx->device->uma) {
+ ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
+ ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
+ ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset);
+ src0_uma = d_Qx != nullptr;
+ src1_uma = d_Qy != nullptr;
+ ids_uma = d_ids != nullptr;
+ }
+
+ const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
+ const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
+
+ const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
+
+ const bool qx_needs_dequant = x_non_contig;
+ const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
+
+ // Not implemented
+ GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
+
+ const uint64_t x_ne = ne01 * ne00;
+ const uint64_t y_ne = ne11 * ne10;
+ const uint64_t d_ne = ne21 * ne20;
+
+ const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
+ const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
+ const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
+ const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
+ const uint64_t ids_sz = nbi2;
+ const uint64_t d_sz = sizeof(float) * d_ne;
+
+ vk_pipeline to_fp16_vk_0 = nullptr;
+ vk_pipeline to_fp16_vk_1 = nullptr;
+ if (x_non_contig) {
+ to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, src0->type);
+ }
+ if (y_non_contig) {
+ to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, src1->type);
+ } else {
+ to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
+ }
+ vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec_id(ctx, src0->type, src1->type);
+ GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
+ GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
+ GGML_ASSERT(dmmv != nullptr);
+
+ if (dryrun) {
+ const uint64_t x_sz_upd = x_sz * ne02 * ne03;
+ const uint64_t y_sz_upd = y_sz * ne12 * ne13;
+ if (
+ (qx_needs_dequant && x_sz_upd > ctx->device->max_memory_allocation_size) ||
+ (qy_needs_dequant && y_sz_upd > ctx->device->max_memory_allocation_size)) {
+ GGML_ABORT("Requested preallocation size is too large");
+ }
+ if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
+ ctx->prealloc_size_x = x_sz_upd;
+ }
+ if (qy_needs_dequant && ctx->prealloc_size_y < y_sz_upd) {
+ ctx->prealloc_size_y = y_sz_upd;
+ }
+
+ // Request descriptor sets
+ if (qx_needs_dequant) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
+ }
+ if (qy_needs_dequant) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1);
+ }
+ ggml_pipeline_request_descriptor_sets(ctx->device, dmmv, 1);
+ return;
+ }
+
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+ const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
+ GGML_ASSERT(d_D != nullptr);
+ vk_buffer d_X;
+ uint64_t x_buf_offset = 0;
+ vk_buffer d_Y;
+ uint64_t y_buf_offset = 0;
+ if(!src0_uma) {
+ d_Qx = src0_buf_ctx->dev_buffer;
+ qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
+ GGML_ASSERT(d_Qx != nullptr);
+ }
+ if(!src1_uma) {
+ d_Qy = src1_buf_ctx->dev_buffer;
+ qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
+ GGML_ASSERT(d_Qy != nullptr);
+ }
+ if(!ids_uma) {
+ d_ids = ids_buf_ctx->dev_buffer;
+ ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs;
+ GGML_ASSERT(d_ids != nullptr);
+ }
+ if (qx_needs_dequant) {
+ d_X = ctx->prealloc_x;
+ } else {
+ d_X = d_Qx;
+ x_buf_offset = qx_buf_offset;
+ GGML_ASSERT(qx_sz == x_sz);
+ }
+ if (qy_needs_dequant) {
+ d_Y = ctx->prealloc_y;
+ } else {
+ d_Y = d_Qy;
+ y_buf_offset = qy_buf_offset;
+ GGML_ASSERT(qy_sz == y_sz);
+ }
+
+ if (x_non_contig) {
+ GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
+ ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
+ }
+ if (y_non_contig) {
+ GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
+ ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
+ }
+
+ uint32_t stride_batch_y = ne10*ne11;
+
+ if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
+ stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
+ }
+
+ const uint32_t max_groups_x = ctx->device->properties.limits.maxComputeWorkGroupCount[0];
+
+ uint32_t groups_x = ne01;
+ uint32_t groups_z = 1;
+
+ if (ne01 > max_groups_x) {
+ groups_z = 64;
+ groups_x = CEIL_DIV(groups_x, groups_z);
+ }
+
+ // compute
+ const vk_mat_vec_id_push_constants pc = {
+ (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
+ (uint32_t)x_ne, stride_batch_y, (uint32_t)(ne20*ne21),
+ (uint32_t)nei0, (uint32_t)ne11,
+ };
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
+ { vk_subbuffer{ d_X, x_buf_offset, x_sz * ne02 * ne03 },
+ vk_subbuffer{ d_Y, y_buf_offset, y_sz * ne12 * ne13 }, vk_subbuffer{ d_D, d_buf_offset, d_sz * ne22 * ne23}, vk_subbuffer{ d_ids, ids_buf_offset, ids_sz } },
+ sizeof(vk_mat_vec_id_push_constants), &pc, { groups_x, (uint32_t)nei0, groups_z });
+}
+
+static void ggml_vk_mul_mat_id(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, bool dryrun = false) {
+ VK_LOG_DEBUG("ggml_vk_mul_mat_id(" << src0 << ", " << src1 << ", " << src2 << ", " << dst << ")");
+ if (src2->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) {
+ ggml_vk_mul_mat_vec_id_q_f16(ctx, subctx, src0, src1, src2, dst, dryrun);
+ } else {
+ ggml_vk_mul_mat_id_q_f16(ctx, subctx, src0, src1, src2, dst, dryrun);
+ }
+}
+
+static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * q, const ggml_tensor * k, const ggml_tensor * v, const ggml_tensor * mask, ggml_tensor * dst, bool dryrun = false) {
+ VK_LOG_DEBUG("ggml_vk_flash_attn((" << q << ", name=" << q->name << ", type=" << q->type << ", ne0=" << q->ne[0] << ", ne1=" << q->ne[1] << ", ne2=" << q->ne[2] << ", ne3=" << q->ne[3] << ", nb0=" << q->nb[0] << ", nb1=" << q->nb[1] << ", nb2=" << q->nb[2] << ", nb3=" << q->nb[3];
+ std::cerr << "), (" << k << ", name=" << k->name << ", type=" << k->type << ", ne0=" << k->ne[0] << ", ne1=" << k->ne[1] << ", ne2=" << k->ne[2] << ", ne3=" << k->ne[3] << ", nb0=" << k->nb[0] << ", nb1=" << k->nb[1] << ", nb2=" << k->nb[2] << ", nb3=" << k->nb[3];
+ std::cerr << "), (" << v << ", name=" << v->name << ", type=" << v->type << ", ne0=" << v->ne[0] << ", ne1=" << v->ne[1] << ", ne2=" << v->ne[2] << ", ne3=" << v->ne[3] << ", nb0=" << v->nb[0] << ", nb1=" << v->nb[1] << ", nb2=" << v->nb[2] << ", nb3=" << v->nb[3];
+ std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
+ std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
+
+ GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
+ GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
+ GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
+ GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
+ GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
+ GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
+ GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
+
+ const uint32_t nem1 = mask ? mask->ne[1] : 0;
+ const uint32_t nbm1 = mask ? mask->nb[1] : 0;
+
+ const uint32_t D = neq0;
+ const uint32_t N = neq1;
+ const uint32_t KV = nek1;
+
+ GGML_ASSERT(ne0 == D);
+ GGML_ASSERT(ne2 == N);
+
+ // input tensor rows must be contiguous
+ GGML_ASSERT(nbq0 == ggml_type_size(q->type));
+ GGML_ASSERT(nbk0 == ggml_type_size(k->type));
+ GGML_ASSERT(nbv0 == ggml_type_size(v->type));
+
+ GGML_ASSERT(neq0 == D);
+ GGML_ASSERT(nek0 == D);
+ GGML_ASSERT(nev0 == D);
+
+ GGML_ASSERT(neq1 == N);
+ GGML_ASSERT(nev0 == D);
+
+ GGML_ASSERT(nev1 == nek1);
+
+ // dst cannot be transposed or permuted
+ GGML_ASSERT(nb0 == sizeof(float));
+ GGML_ASSERT(nb0 <= nb1);
+ GGML_ASSERT(nb1 <= nb2);
+ GGML_ASSERT(nb2 <= nb3);
+
+ assert(dst->type == GGML_TYPE_F32);
+ assert(q->type == GGML_TYPE_F32);
+ assert(k->type == v->type);
+
+ vk_pipeline *pipelines;
+ // XXX TODO other backends may be changing accumulator precision to default to f32 soon
+ bool f32acc = dst->op_params[3] == GGML_PREC_F32;
+ bool small_rows = N <= flash_attention_num_small_rows;
+ switch (D) {
+ case 64: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D64[k->type][f32acc][small_rows][0]; break;
+ case 80: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D80[k->type][f32acc][small_rows][0]; break;
+ case 96: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D96[k->type][f32acc][small_rows][0]; break;
+ case 112: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D112[k->type][f32acc][small_rows][0]; break;
+ case 128: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D128[k->type][f32acc][small_rows][0]; break;
+ case 256: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D256[k->type][f32acc][small_rows][0]; break;
+ default:
+ assert(!"unsupported D value");
+ return;
+ }
+ assert(pipelines);
+
+ bool aligned = (KV % pipelines[1]->align) == 0;
+ vk_pipeline pipeline = pipelines[aligned];
+ assert(pipeline);
+
+ if (dryrun) {
+ // Request descriptor sets
+ ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1);
+ return;
+ }
+
+ float scale = 1.0f;
+ float max_bias = 0.0f;
+ float logit_softcap = 0.0f;
+
+ memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float));
+ memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float));
+ memcpy(&logit_softcap, (const float *) dst->op_params + 2, sizeof(float));
+
+ if (logit_softcap != 0) {
+ scale /= logit_softcap;
+ }
+
+ const uint32_t n_head_kv = neq2;
+ const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
+ const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
+ const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
+
+ ggml_vk_sync_buffers(subctx);
+
+ vk_buffer d_Q = nullptr, d_K = nullptr, d_V = nullptr, d_D = nullptr, d_M = nullptr;
+ size_t q_buf_offset = 0, k_buf_offset = 0, v_buf_offset = 0, d_buf_offset = 0, m_buf_offset = 0;
+
+ bool Q_uma = false, K_uma = false, V_uma = false, D_uma = false, M_uma = false;
+
+ if (ctx->device->uma) {
+ ggml_vk_host_get(ctx->device, q->data, d_Q, q_buf_offset);
+ ggml_vk_host_get(ctx->device, k->data, d_K, q_buf_offset);
+ ggml_vk_host_get(ctx->device, v->data, d_V, q_buf_offset);
+ ggml_vk_host_get(ctx->device, dst->data, d_D, q_buf_offset);
+ Q_uma = d_Q != nullptr;
+ K_uma = d_K != nullptr;
+ V_uma = d_V != nullptr;
+ D_uma = d_D != nullptr;
+ if (mask) {
+ ggml_vk_host_get(ctx->device, mask->data, d_M, q_buf_offset);
+ M_uma = d_M != nullptr;
+ }
+ }
+
+
+ ggml_backend_vk_buffer_context * d_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * q_buf_ctx = (ggml_backend_vk_buffer_context *)q->buffer->context;
+ ggml_backend_vk_buffer_context * k_buf_ctx = (ggml_backend_vk_buffer_context *)k->buffer->context;
+ ggml_backend_vk_buffer_context * v_buf_ctx = (ggml_backend_vk_buffer_context *)v->buffer->context;
+
+ if (!Q_uma) {
+ d_Q = q_buf_ctx->dev_buffer;
+ q_buf_offset = vk_tensor_offset(q) + q->view_offs;
+ }
+ if (!K_uma) {
+ d_K = k_buf_ctx->dev_buffer;
+ k_buf_offset = vk_tensor_offset(k) + k->view_offs;
+ }
+ if (!V_uma) {
+ d_V = v_buf_ctx->dev_buffer;
+ v_buf_offset = vk_tensor_offset(v) + v->view_offs;
+ }
+ if (!D_uma) {
+ d_D = d_buf_ctx->dev_buffer;
+ d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
+ }
+
+ if (!M_uma) {
+ d_M = d_Q;
+ m_buf_offset = q_buf_offset;
+ if (mask) {
+ ggml_backend_vk_buffer_context * m_buf_ctx = (ggml_backend_vk_buffer_context*)mask->buffer->context;
+ d_M = m_buf_ctx->dev_buffer;
+ m_buf_offset = vk_tensor_offset(mask) + mask->view_offs;
+ }
+ }
+
+ const vk_flash_attn_push_constants pc = { N, KV, (uint32_t)ne1, (uint32_t)ne2, (uint32_t)ne3, (uint32_t)neq2, (uint32_t)neq3, (uint32_t)nek2, (uint32_t)nek3, (uint32_t)nev2, (uint32_t)nev3, nem1, (uint32_t)nbq2, (uint32_t)nbq3, (uint32_t)nbk2, (uint32_t)nbk3, (uint32_t)nbv2, (uint32_t)nbv3, nbm1, scale, max_bias, logit_softcap, mask != nullptr, n_head_log2, m0, m1 };
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
+ {
+ vk_subbuffer{d_Q, q_buf_offset, VK_WHOLE_SIZE},
+ vk_subbuffer{d_K, k_buf_offset, VK_WHOLE_SIZE},
+ vk_subbuffer{d_V, v_buf_offset, VK_WHOLE_SIZE},
+ vk_subbuffer{d_M, m_buf_offset, VK_WHOLE_SIZE},
+ vk_subbuffer{d_D, d_buf_offset, VK_WHOLE_SIZE},
+ },
+ sizeof(vk_flash_attn_push_constants), &pc, { (uint32_t)neq1, (uint32_t)neq2, (uint32_t)neq3 });
+}
+
+static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op) {
+ switch (op) {
+ case GGML_OP_GET_ROWS:
+ GGML_ASSERT(src1->type == GGML_TYPE_I32);
+ if (dst->type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_get_rows[src0->type];
+ }
+ if (dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_get_rows_f32[src0->type];
+ }
+ return nullptr;
+ case GGML_OP_ACC:
+ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_acc_f32;
+ }
+ return nullptr;
+ case GGML_OP_ADD:
+ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_add_f32_norepeat : ctx->device->pipeline_add_f32;
+ }
+ if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
+ return ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_add_f16_f32_f16_norepeat : ctx->device->pipeline_add_f16_f32_f16;
+ }
+ return nullptr;
+ case GGML_OP_MUL:
+ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_mul_f32_norepeat : ctx->device->pipeline_mul_f32;
+ }
+ return nullptr;
+ case GGML_OP_DIV:
+ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_div_f32_norepeat : ctx->device->pipeline_div_f32;
+ }
+ return nullptr;
+ case GGML_OP_CONCAT:
+ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_concat_f32;
+ }
+ if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_concat_f16;
+ }
+ if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I32) {
+ return ctx->device->pipeline_concat_i32;
+ }
+ return nullptr;
+ case GGML_OP_UPSCALE:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_upscale_f32;
+ }
+ return nullptr;
+ case GGML_OP_SCALE:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_scale_f32;
+ }
+ return nullptr;
+ case GGML_OP_SQR:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_sqr_f32;
+ }
+ return nullptr;
+ case GGML_OP_SIN:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_sin_f32;
+ }
+ return nullptr;
+ case GGML_OP_COS:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_cos_f32;
+ }
+ return nullptr;
+ case GGML_OP_CLAMP:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_clamp_f32;
+ }
+ return nullptr;
+ case GGML_OP_PAD:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_pad_f32;
+ }
+ return nullptr;
+ case GGML_OP_REPEAT:
+ if (ggml_type_size(src0->type) == sizeof(float) && ggml_type_size(dst->type) == sizeof(float)) {
+ return ctx->device->pipeline_repeat_f32;
+ }
+ return nullptr;
+ case GGML_OP_CPY:
+ case GGML_OP_CONT:
+ case GGML_OP_DUP:
+ return ggml_vk_get_cpy_pipeline(ctx, src0, dst, dst->type);
+ case GGML_OP_NORM:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_norm_f32;
+ }
+ return nullptr;
+ case GGML_OP_GROUP_NORM:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_group_norm_f32;
+ }
+ return nullptr;
+ case GGML_OP_RMS_NORM:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_rms_norm_f32;
+ }
+ return nullptr;
+ case GGML_OP_UNARY:
+ switch (ggml_get_unary_op(dst)) {
+ case GGML_UNARY_OP_SILU:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_silu_f32;
+ }
+ break;
+ case GGML_UNARY_OP_GELU:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_gelu_f32;
+ }
+ break;
+ case GGML_UNARY_OP_GELU_QUICK:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_gelu_quick_f32;
+ }
+ break;
+ case GGML_UNARY_OP_RELU:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_relu_f32;
+ }
+ break;
+ case GGML_UNARY_OP_TANH:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_tanh_f32;
+ }
+ break;
+ default:
+ break;
+ }
+ return nullptr;
+ case GGML_OP_DIAG_MASK_INF:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_diag_mask_inf_f32;
+ }
+ return nullptr;
+ case GGML_OP_SOFT_MAX:
+ GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16);
+
+ if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) {
+ return src0->ne[0] > 1024 ? ctx->device->pipeline_soft_max_f32_wg512 : ctx->device->pipeline_soft_max_f32;
+ }
+ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
+ return src0->ne[0] > 1024 ? ctx->device->pipeline_soft_max_f32_f16_wg512 : ctx->device->pipeline_soft_max_f32_f16;
+ }
+ return nullptr;
+ case GGML_OP_ROPE:
+ {
+ const int mode = ((const int32_t *) dst->op_params)[2];
+ const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
+
+ if (is_neox) {
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_rope_neox_f32;
+ }
+ if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_rope_neox_f16;
+ }
+ } else {
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_rope_norm_f32;
+ }
+ if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_rope_norm_f16;
+ }
+ }
+ return nullptr;
+ }
+ case GGML_OP_ARGSORT:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
+ return ctx->device->pipeline_argsort_f32;
+ }
+ return nullptr;
+ case GGML_OP_SUM_ROWS:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_sum_rows_f32;
+ }
+ return nullptr;
+ case GGML_OP_IM2COL:
+ if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_im2col_f32;
+ }
+ if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
+ return ctx->device->pipeline_im2col_f32_f16;
+ }
+ return nullptr;
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_timestep_embedding_f32;
+ }
+ return nullptr;
+ case GGML_OP_POOL_2D:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_pool2d_f32;
+ }
+ return nullptr;
+ case GGML_OP_RWKV_WKV6:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_rwkv_wkv6_f32;
+ }
+ return nullptr;
+ case GGML_OP_LEAKY_RELU:
+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ return ctx->device->pipeline_leaky_relu_f32;
+ }
+ return nullptr;
+ default:
+ return nullptr;
+ }
+
+ GGML_UNUSED(src2);
+}
+
+static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
+ switch (op) {
+ case GGML_OP_CPY:
+ case GGML_OP_GET_ROWS:
+ case GGML_OP_ADD:
+ case GGML_OP_MUL:
+ case GGML_OP_DIV:
+ case GGML_OP_CONCAT:
+ case GGML_OP_UPSCALE:
+ case GGML_OP_SQR:
+ case GGML_OP_SIN:
+ case GGML_OP_COS:
+ case GGML_OP_CLAMP:
+ case GGML_OP_PAD:
+ case GGML_OP_REPEAT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static uint32_t get_misalign_bytes(ggml_backend_vk_context * ctx, const ggml_tensor * t)
+{
+ return ((vk_tensor_offset(t) + t->view_offs) & (ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1));;
+}
+
+template <typename T> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, T &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
+ GGML_UNUSED(p);
+ GGML_UNUSED(src0);
+ GGML_UNUSED(src1);
+ GGML_UNUSED(src2);
+ GGML_UNUSED(dst);
+ static_assert(!std::is_const<T>::value, "unexpected type");
+ GGML_ASSERT(!src0 || get_misalign_bytes(ctx, src0) == 0);
+ GGML_ASSERT(!src1 || get_misalign_bytes(ctx, src1) == 0);
+ GGML_ASSERT(!src2 || get_misalign_bytes(ctx, src2) == 0);
+ GGML_ASSERT(!dst || get_misalign_bytes(ctx, dst) == 0);
+}
+
+template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_unary_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
+ const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
+ const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
+
+ p.misalign_offsets = (a_offset << 16) | d_offset;
+
+ GGML_UNUSED(src1);
+ GGML_UNUSED(src2);
+}
+
+template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_binary_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
+ const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
+ const uint32_t b_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type);
+ const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
+
+ GGML_ASSERT(dst->op != GGML_OP_GET_ROWS || (a_offset == 0 && b_offset == 0 && d_offset == 0));
+
+ p.misalign_offsets = (a_offset << 16) | (b_offset << 8) | d_offset;
+
+ GGML_UNUSED(src2);
+}
+
+template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_upscale_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
+ const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
+ const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
+
+ p.a_offset = a_offset;
+ p.d_offset = d_offset;
+
+ GGML_UNUSED(src1);
+ GGML_UNUSED(src2);
+}
+
+template<typename PC>
+static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op, PC&& pc, bool dryrun = false) {
+ VK_LOG_DEBUG("ggml_vk_op_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
+ if (src1 != nullptr) {
+ std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
+ }
+ if (src2 != nullptr) {
+ std::cerr << "), (" << src2 << ", name=" << src2->name << ", type=" << src2->type << ", ne0=" << src2->ne[0] << ", ne1=" << src2->ne[1] << ", ne2=" << src2->ne[2] << ", ne3=" << src2->ne[3] << ", nb0=" << src2->nb[0] << ", nb1=" << src2->nb[1] << ", nb2=" << src2->nb[2] << ", nb3=" << src2->nb[3];
+ }
+ std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
+ std::cerr << "), " << ggml_op_name(op) << ", " << (dryrun ? "dryrun" : "") << ")");
+ GGML_ASSERT(op == GGML_OP_GET_ROWS || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
+ GGML_ASSERT(ggml_vk_op_supports_incontiguous(op) || ggml_vk_dim01_contiguous(src0)); // NOLINT
+ GGML_ASSERT(dst->buffer != nullptr);
+ const uint64_t ne00 = src0->ne[0];
+ const uint64_t ne01 = src0->ne[1];
+ const uint64_t ne02 = src0->ne[2];
+ const uint64_t ne03 = src0->ne[3];
+ const uint64_t ne0 = ne00 * ne01;
+
+ const bool use_src1 = src1 != nullptr;
+ const uint64_t ne10 = use_src1 ? src1->ne[0] : 0;
+ const uint64_t ne11 = use_src1 ? src1->ne[1] : 0;
+ const uint64_t ne12 = use_src1 ? src1->ne[2] : 0;
+ const uint64_t ne13 = use_src1 ? src1->ne[3] : 0;
+ const uint64_t ne1 = ne10 * ne11;
+ // const uint64_t nb10 = use_src1 ? src1->nb[0] : 0;
+
+ const bool use_src2 = src2 != nullptr;
+ const uint64_t ne20 = use_src2 ? src2->ne[0] : 0;
+ const uint64_t ne21 = use_src2 ? src2->ne[1] : 0;
+ const uint64_t ne22 = use_src2 ? src2->ne[2] : 0;
+ const uint64_t ne23 = use_src2 ? src2->ne[3] : 0;
+ const uint64_t ne2 = ne20 * ne21;
+
+ const uint64_t ned0 = dst->ne[0];
+ const uint64_t ned1 = dst->ne[1];
+ const uint64_t ned2 = dst->ne[2];
+ const uint64_t ned3 = dst->ne[3];
+ const uint64_t ned = ned0 * ned1;
+
+ init_pushconst_fastdiv(pc);
+
+ vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, op);
+
+ if (pipeline == nullptr) {
+ std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(op) << " for " << ggml_type_name(src0->type);
+ if (src1 != nullptr) {
+ std::cerr << " and " << ggml_type_name(src1->type);
+ }
+ std::cerr << " to " << ggml_type_name(dst->type) << std::endl;
+ GGML_ABORT("fatal error");
+ }
+
+ if (dryrun) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1);
+ return;
+ }
+
+ const bool op_supports_incontiguous = ggml_vk_op_supports_incontiguous(op);
+
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ ggml_backend_vk_buffer_context * src1_buf_ctx = use_src1 ? (ggml_backend_vk_buffer_context *)src1->buffer->context : nullptr;
+ ggml_backend_vk_buffer_context * src2_buf_ctx = use_src2 ? (ggml_backend_vk_buffer_context *)src2->buffer->context : nullptr;
+
+ vk_buffer d_X = nullptr;
+ size_t x_buf_offset = 0;
+ vk_buffer d_Y = nullptr;
+ size_t y_buf_offset = 0;
+ vk_buffer d_Z = nullptr;
+ size_t z_buf_offset = 0;
+
+ bool src0_uma = false;
+ bool src1_uma = false;
+ bool src2_uma = false;
+
+ if (ctx->device->uma) {
+ ggml_vk_host_get(ctx->device, src0->data, d_X, x_buf_offset);
+ src0_uma = d_X != nullptr;
+ if (use_src1) {
+ ggml_vk_host_get(ctx->device, src1->data, d_Y, y_buf_offset);
+ src1_uma = d_Y != nullptr;
+ }
+ if (use_src2) {
+ ggml_vk_host_get(ctx->device, src2->data, d_Z, z_buf_offset);
+ src2_uma = d_Z != nullptr;
+ }
+ }
+
+ uint64_t x_sz = ggml_type_size(src0->type)/ggml_blck_size(src0->type) * ne0;
+ uint64_t y_sz = use_src1 ? ggml_type_size(src1->type) * ne1 : 0;
+ uint64_t z_sz = use_src2 ? ggml_type_size(src2->type) * ne2 : 0;
+ uint64_t d_sz = ggml_type_size(dst->type) * ned;
+
+ vk_buffer d_D = dst_buf_ctx->dev_buffer;
+
+ // Workaround for tiny tensor inputs on ROPE
+ if (op == GGML_OP_ROPE && use_src1 && y_sz > d_D->size) {
+ y_sz = VK_WHOLE_SIZE;
+ }
+
+ GGML_ASSERT(d_D != nullptr);
+ uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
+ if(!src0_uma) {
+ d_X = src0_buf_ctx->dev_buffer;
+ x_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
+ GGML_ASSERT(d_X != nullptr);
+ }
+ if (use_src1 && !src1_uma) {
+ d_Y = src1_buf_ctx->dev_buffer;
+ y_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
+ GGML_ASSERT(d_Y != nullptr);
+ }
+ if (use_src2 && !src2_uma) {
+ d_Z = src2_buf_ctx->dev_buffer;
+ z_buf_offset = vk_tensor_offset(src2) + src2->view_offs;
+ GGML_ASSERT(d_Z != nullptr);
+ }
+ // Compute misalignment offset for descriptors and store it in in push constants, then align the descriptor offsets.
+ init_pushconst_tensor_offsets(ctx, pc, src0, src1, src2, dst);
+ x_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
+ y_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
+ z_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
+ d_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
+
+ if (op_supports_incontiguous) {
+ x_sz = ggml_nbytes(src0);
+ y_sz = use_src1 ? ggml_nbytes(src1) : 0;
+ z_sz = use_src2 ? ggml_nbytes(src2) : 0;
+ d_sz = ggml_nbytes(dst);
+
+ if (x_buf_offset + x_sz >= d_X->size) {
+ x_sz = VK_WHOLE_SIZE;
+ }
+ if (use_src1 && y_buf_offset + y_sz >= d_Y->size) {
+ y_sz = VK_WHOLE_SIZE;
+ }
+ if (use_src2 && z_buf_offset + z_sz >= d_Z->size) {
+ z_sz = VK_WHOLE_SIZE;
+ }
+ if (d_buf_offset + d_sz >= d_D->size) {
+ d_sz = VK_WHOLE_SIZE;
+ }
+ }
+
+ std::array<uint32_t, 3> elements;
+
+ // Single call if dimension 2 is contiguous
+ GGML_ASSERT(op_supports_incontiguous || (ggml_is_contiguous(src0) && (src1 == nullptr || ggml_is_contiguous(src1))));
+
+ switch (op) {
+ case GGML_OP_NORM:
+ case GGML_OP_RMS_NORM:
+ case GGML_OP_SOFT_MAX:
+ case GGML_OP_SUM_ROWS:
+ {
+ const uint32_t nr = ggml_nrows(src0);
+ if (nr > 262144) {
+ elements = { 512, 512, CEIL_DIV(nr, 262144) };
+ } else if (nr > 512) {
+ elements = { 512, CEIL_DIV(nr, 512), 1 };
+ } else {
+ elements = { nr, 1, 1 };
+ }
+ } break;
+ case GGML_OP_GROUP_NORM:
+ {
+ const uint32_t num_groups = dst->op_params[0];
+ elements = { num_groups * (uint32_t)src0->ne[3], 1, 1 };
+ } break;
+ case GGML_OP_DIAG_MASK_INF:
+ case GGML_OP_ROPE:
+ elements = { (uint32_t)ggml_nrows(src0), (uint32_t)ne00, 1 };
+ break;
+ case GGML_OP_GET_ROWS:
+ elements = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)(ne11 * ne12) };
+ break;
+ case GGML_OP_ARGSORT:
+ elements = { (uint32_t)ne00, (uint32_t)ggml_nrows(src0), 1 };
+ break;
+ case GGML_OP_IM2COL:
+ {
+ const bool is_2D = dst->op_params[6] == 1;
+
+ const uint32_t IC = src1->ne[is_2D ? 2 : 1];
+
+ const uint32_t KH = is_2D ? src0->ne[1] : 1;
+ const uint32_t KW = src0->ne[0];
+
+ const uint32_t OH = is_2D ? dst->ne[2] : 1;
+ const uint32_t OW = dst->ne[1];
+
+ const uint32_t batch = src1->ne[is_2D ? 3 : 2];
+
+ elements = { OW * KW * KH, OH, batch * IC };
+ } break;
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ {
+ const uint32_t dim = dst->op_params[0];
+ uint32_t half_ceil = (dim + 1) / 2;
+ elements = { half_ceil, (uint32_t)src0->ne[0], 1 };
+ } break;
+ case GGML_OP_POOL_2D:
+ {
+ const uint32_t N = dst->ne[3];
+ const uint32_t OC = dst->ne[2];
+ const uint32_t OH = dst->ne[1];
+ const uint32_t OW = dst->ne[0];
+ elements = { N * OC * OH * OW, 1, 1};
+ } break;
+ case GGML_OP_ADD:
+ case GGML_OP_DIV:
+ case GGML_OP_MUL:
+ case GGML_OP_SCALE:
+ case GGML_OP_SQR:
+ case GGML_OP_SIN:
+ case GGML_OP_COS:
+ case GGML_OP_CLAMP:
+ case GGML_OP_PAD:
+ case GGML_OP_REPEAT:
+ case GGML_OP_CPY:
+ case GGML_OP_CONCAT:
+ case GGML_OP_UPSCALE:
+ case GGML_OP_UNARY:
+ {
+ const uint32_t ne = ggml_nelements(dst);
+ if (ne > 262144) {
+ elements = { 512, 512, CEIL_DIV(ne, 262144) };
+ } else if (ne > 512) {
+ elements = { 512, CEIL_DIV(ne, 512), 1 };
+ } else {
+ elements = { ne, 1, 1 };
+ }
+ } break;
+ default:
+ elements = { (uint32_t)ggml_nelements(src0), 1, 1 };
+ break;
+ }
+
+ if (!op_supports_incontiguous) {
+ if (x_sz != VK_WHOLE_SIZE) {
+ x_sz *= ne02 * ne03;
+ }
+ if (use_src1 && y_sz != VK_WHOLE_SIZE) {
+ y_sz *= ne12 * ne13;
+ }
+ if (use_src2 && z_sz != VK_WHOLE_SIZE) {
+ z_sz *= ne22 * ne23;
+ }
+ if (d_sz != VK_WHOLE_SIZE) {
+ d_sz *= ned2 * ned3;
+ }
+ }
+
+ if (op == GGML_OP_SOFT_MAX) {
+ // Empty src1 is possible in soft_max, but the shader needs a buffer
+ vk_subbuffer subbuf_y;
+ if (use_src1) {
+ subbuf_y = { d_Y, y_buf_offset, y_sz };
+ } else {
+ subbuf_y = { d_X, 0, x_sz };
+ }
+
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, subbuf_y, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
+ } else if (op == GGML_OP_ROPE) {
+ // Empty src2 is possible in rope, but the shader needs a buffer
+ vk_subbuffer subbuf_z;
+ if (use_src2) {
+ subbuf_z = { d_Z, z_buf_offset, z_sz };
+ } else {
+ subbuf_z = { d_X, 0, x_sz };
+ }
+
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, subbuf_z, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
+ } else if (op == GGML_OP_IM2COL) {
+ // im2col uses only src1 and dst buffers
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
+ } else if (use_src2) {
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_Z, z_buf_offset, z_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
+ } else if (use_src1) {
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
+ } else {
+ ggml_vk_sync_buffers(subctx);
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
+ }
+}
+
+static void ggml_vk_get_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t src1_type_size = ggml_type_size(src1->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_GET_ROWS, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_acc(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t src1_type_size = ggml_type_size(src1->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
+ int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
+ // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused
+ int offset = dst->op_params[3] / 4; // offset in bytes
+
+ ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_ACC, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)nb1, (uint32_t)nb2, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t)nb1, (uint32_t)nb2, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f, offset,
+ }, dryrun);
+}
+
+static void ggml_vk_add(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t src1_type_size = ggml_type_size(src1->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_ADD, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_mul(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t src1_type_size = ggml_type_size(src1->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_MUL, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_div(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t src1_type_size = ggml_type_size(src1->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_DIV, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_op_f32_rwkv6(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, const vk_op_rwkv_wkv6_push_constants&& pc, bool dryrun = false) {
+ const ggml_tensor * k = dst->src[0];
+ const ggml_tensor * v = dst->src[1];
+ const ggml_tensor * r = dst->src[2];
+ const ggml_tensor * tf = dst->src[3];
+ const ggml_tensor * td = dst->src[4];
+ const ggml_tensor * state = dst->src[5];
+
+ GGML_ASSERT(!ggml_is_quantized(k->type));
+ GGML_ASSERT(!ggml_is_quantized(v->type));
+ GGML_ASSERT(!ggml_is_quantized(r->type));
+ GGML_ASSERT(!ggml_is_quantized(tf->type));
+ GGML_ASSERT(!ggml_is_quantized(td->type));
+ GGML_ASSERT(!ggml_is_quantized(state->type));
+ GGML_ASSERT(dst->buffer != nullptr);
+
+ vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, k, v, r, dst, GGML_OP_RWKV_WKV6);
+ GGML_ASSERT(pipeline != nullptr);
+
+ if (dryrun) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1);
+ return;
+ }
+
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+ ggml_backend_vk_buffer_context * k_buf_ctx = (ggml_backend_vk_buffer_context *)k->buffer->context;
+ ggml_backend_vk_buffer_context * v_buf_ctx = (ggml_backend_vk_buffer_context *)v->buffer->context;
+ ggml_backend_vk_buffer_context * r_buf_ctx = (ggml_backend_vk_buffer_context *)r->buffer->context;
+ ggml_backend_vk_buffer_context * tf_buf_ctx = (ggml_backend_vk_buffer_context *)tf->buffer->context;
+ ggml_backend_vk_buffer_context * td_buf_ctx = (ggml_backend_vk_buffer_context *)td->buffer->context;
+ ggml_backend_vk_buffer_context * state_buf_ctx = (ggml_backend_vk_buffer_context *)state->buffer->context;
+
+ ggml_vk_sync_buffers(subctx);
+
+ vk_buffer d_D = nullptr, d_K = nullptr, d_V = nullptr, d_R = nullptr, d_TF = nullptr, d_TD = nullptr, d_State = nullptr;
+ size_t k_offset = 0, v_offset = 0, r_offset = 0, tf_offset = 0, td_offset = 0, state_offset = 0, dst_offset = 0;
+ bool K_uma = false, V_uma = false, R_uma = false, TF_uma = false, TD_uma = false, STATE_uma = false, DST_uma = false;
+
+ if (ctx->device->uma) {
+ ggml_vk_host_get(ctx->device, k->data, d_K, k_offset);
+ ggml_vk_host_get(ctx->device, v->data, d_V, v_offset);
+ ggml_vk_host_get(ctx->device, r->data, d_R, r_offset);
+ ggml_vk_host_get(ctx->device, tf->data, d_TF, tf_offset);
+ ggml_vk_host_get(ctx->device, td->data, d_TD, td_offset);
+ ggml_vk_host_get(ctx->device, state->data, d_State, state_offset);
+ ggml_vk_host_get(ctx->device, dst->data, d_D, dst_offset);
+
+ K_uma = d_K != nullptr;
+ V_uma = d_V != nullptr;
+ R_uma = d_R != nullptr;
+ TF_uma = d_TF != nullptr;
+ TD_uma = d_TD != nullptr;
+ STATE_uma = d_State != nullptr;
+ DST_uma = d_D != nullptr;
+ }
+
+ if (!K_uma) {
+ d_K = k_buf_ctx->dev_buffer;
+ k_offset = vk_tensor_offset(k) + k->view_offs;
+ }
+ if (!V_uma) {
+ d_V = v_buf_ctx->dev_buffer;
+ v_offset = vk_tensor_offset(v) + v->view_offs;
+ }
+ if (!R_uma) {
+ d_R = r_buf_ctx->dev_buffer;
+ r_offset = vk_tensor_offset(r) + r->view_offs;
+ }
+ if (!TF_uma) {
+ d_TF = tf_buf_ctx->dev_buffer;
+ tf_offset = vk_tensor_offset(tf) + tf->view_offs;
+ }
+ if (!TD_uma) {
+ d_TD = td_buf_ctx->dev_buffer;
+ td_offset = vk_tensor_offset(td) + td->view_offs;
+ }
+ if (!STATE_uma) {
+ d_State = state_buf_ctx->dev_buffer;
+ state_offset = vk_tensor_offset(state) + state->view_offs;
+ }
+ if (!DST_uma) {
+ d_D = dst_buf_ctx->dev_buffer;
+ dst_offset = vk_tensor_offset(dst) + dst->view_offs;
+ }
+
+ const uint64_t k_size = ggml_nbytes(k);
+ const uint64_t v_size = ggml_nbytes(v);
+ const uint64_t r_size = ggml_nbytes(r);
+ const uint64_t tf_size = ggml_nbytes(tf);
+ const uint64_t td_size = ggml_nbytes(td);
+ const uint64_t state_size = ggml_nbytes(state);
+ const uint64_t dst_size = ggml_nbytes(dst);
+
+ std::array<uint32_t, 3> elements = {
+ (uint32_t)(pc.B * pc.H),
+ 1,
+ 1
+ };
+
+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, {
+ vk_subbuffer{ d_K, k_offset, k_size },
+ vk_subbuffer{ d_V, v_offset, v_size },
+ vk_subbuffer{ d_R, r_offset, r_size },
+ vk_subbuffer{ d_TF, tf_offset, tf_size },
+ vk_subbuffer{ d_TD, td_offset, td_size },
+ vk_subbuffer{ d_State, state_offset, state_size },
+ vk_subbuffer{ d_D, dst_offset, dst_size }
+ }, sizeof(vk_op_rwkv_wkv6_push_constants), &pc, elements);
+}
+
+static void ggml_vk_rwkv_wkv6(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) {
+ const size_t seq_length = dst->src[0]->ne[3];
+ const size_t n_embed = dst->ne[0];
+ const size_t n_heads = dst->src[0]->ne[2];
+ const size_t n_seqs = dst->src[5]->ne[1];
+
+ ggml_vk_op_f32_rwkv6(
+ ctx, subctx, dst,
+ {
+ (uint32_t)n_seqs,
+ (uint32_t)seq_length,
+ (uint32_t)n_embed,
+ (uint32_t)n_heads,
+ },
+ dryrun
+ );
+}
+
+static void ggml_vk_concat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ int * op_params = (int *)dst->op_params;
+
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t src1_type_size = ggml_type_size(src1->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_CONCAT, {
+ (uint32_t)ggml_nelements(dst),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f, op_params[0],
+ }, dryrun);
+}
+
+static void ggml_vk_upscale(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+
+ const float sf0 = (float)dst->ne[0] / src0->ne[0];
+ const float sf1 = (float)dst->ne[1] / src0->ne[1];
+ const float sf2 = (float)dst->ne[2] / src0->ne[2];
+ const float sf3 = (float)dst->ne[3] / src0->ne[3];
+
+ ggml_vk_op_f32<vk_op_upscale_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_UPSCALE, {
+ (uint32_t)ggml_nelements(dst), 0, 0,
+ (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t)dst->ne[0], (uint32_t)dst->ne[1], (uint32_t)dst->ne[2],(uint32_t)dst->ne[3],
+ sf0, sf1, sf2, sf3,
+ }, dryrun);
+}
+
+static void ggml_vk_scale(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ float * op_params = (float *)dst->op_params;
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SCALE, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ op_params[0], 0.0f,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQR, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_sin(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SIN, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_cos(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_COS, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_clamp(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ float * op_params = (float *)dst->op_params;
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_CLAMP, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ op_params[0], op_params[1],
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_pad(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_PAD, {
+ (uint32_t)ggml_nelements(dst),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_repeat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_REPEAT, {
+ (uint32_t)ggml_nelements(dst),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_cpy(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t src0_type_size = ggml_type_size(src0->type);
+ const uint32_t dst_type_size = ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_CPY, {
+ (uint32_t)ggml_nelements(src0),
+ (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
+ (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
+ 0,
+ 0.0f, 0.0f,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ }, dryrun);
+}
+
+static void ggml_vk_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ float * op_params = (float *)dst->op_params;
+
+ ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun);
+}
+
+static void ggml_vk_group_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ const int * int_op_params = (const int *)dst->op_params;
+ const float * float_op_params = (const float *)dst->op_params;
+
+ const uint32_t num_groups = int_op_params[0];
+ const float eps = float_op_params[1];
+ const uint32_t group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
+
+ ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_GROUP_NORM, { group_size, 0, eps, 0.0f }, dryrun);
+}
+
+static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ float * op_params = (float *)dst->op_params;
+ ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_RMS_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun);
+}
+
+static void ggml_vk_unary(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f }, dryrun);
+}
+
+static void ggml_vk_diag_mask_inf(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ int32_t * op_params = (int32_t *)dst->op_params;
+ ggml_vk_op_f32<vk_op_diag_mask_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_DIAG_MASK_INF, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0] }, dryrun);
+}
+
+static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ float * op_params = (float *)dst->op_params;
+
+ float scale = op_params[0];
+ float max_bias = op_params[1];
+
+ const uint32_t ncols = (uint32_t)src0->ne[0];
+ const uint32_t nrows_x = (uint32_t)ggml_nrows(src0);
+ const uint32_t nrows_y = (uint32_t)src0->ne[1];
+
+ const uint32_t n_head_kv = nrows_x/nrows_y;
+ const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
+
+ const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
+ const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
+
+ ggml_vk_op_f32<vk_op_soft_max_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_SOFT_MAX, {
+ ncols,
+ src1 != nullptr ? nrows_y : (uint32_t)0,
+ scale, max_bias,
+ m0, m1,
+ n_head_log2,
+ nrows_x,
+ }, dryrun);
+}
+
+static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, bool dryrun = false) {
+ const int n_dims = ((int32_t *) dst->op_params)[1];
+ // const int mode = ((int32_t *) dst->op_params)[2];
+ // const int n_ctx = ((int32_t *) dst->op_params)[3];
+ const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
+ const float freq_base = ((float *) dst->op_params)[5];
+ const float freq_scale = ((float *) dst->op_params)[6];
+ const float ext_factor = ((float *) dst->op_params)[7];
+ const float attn_factor = ((float *) dst->op_params)[8];
+ const float beta_fast = ((float *) dst->op_params)[9];
+ const float beta_slow = ((float *) dst->op_params)[10];
+
+ float corr_dims[2];
+ ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
+
+ const float theta_scale = powf(freq_base, -2.0f/n_dims);
+
+ ggml_vk_op_f32<vk_op_rope_push_constants>(ctx, subctx, src0, src1, src2, dst, GGML_OP_ROPE, {
+ (uint32_t)src0->ne[0], (uint32_t)n_dims, freq_scale, (uint32_t)src0->ne[1],
+ freq_base, ext_factor, attn_factor, {corr_dims[0], corr_dims[1]}, theta_scale,
+ src2 != nullptr,
+ }, dryrun);
+}
+
+static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ int32_t * op_params = (int32_t *)dst->op_params;
+
+ uint32_t ncols = src0->ne[0];
+
+ uint32_t ncols_pad = 1;
+ while (ncols_pad < ncols) {
+ ncols_pad *= 2;
+ }
+
+ GGML_ASSERT(ncols_pad <= 1024);
+
+ ggml_vk_op_f32<vk_op_argsort_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_ARGSORT, {
+ ncols,
+ ncols_pad,
+ op_params[0],
+ }, dryrun);
+}
+
+static void ggml_vk_sum_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SUM_ROWS, { (uint32_t)src0->ne[0], 0, 0.0f, 0.0f }, dryrun);
+}
+
+static void ggml_vk_im2col(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
+ const int32_t s0 = dst->op_params[0];
+ const int32_t s1 = dst->op_params[1];
+ const int32_t p0 = dst->op_params[2];
+ const int32_t p1 = dst->op_params[3];
+ const int32_t d0 = dst->op_params[4];
+ const int32_t d1 = dst->op_params[5];
+
+ const bool is_2D = dst->op_params[6] == 1;
+
+ const uint32_t IC = src1->ne[is_2D ? 2 : 1];
+ const uint32_t IH = is_2D ? src1->ne[1] : 1;
+ const uint32_t IW = src1->ne[0];
+
+ const uint32_t KH = is_2D ? src0->ne[1] : 1;
+ const uint32_t KW = src0->ne[0];
+
+ const uint32_t OH = is_2D ? dst->ne[2] : 1;
+ const uint32_t OW = dst->ne[1];
+
+ const uint32_t offset_delta = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32
+ const uint32_t batch_offset = src1->nb[is_2D ? 3 : 2] / 4; // nb is byte offset, src is type float32
+
+ const uint32_t pelements = OW * KW * KH;
+
+ ggml_vk_op_f32<vk_op_im2col_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_IM2COL, {
+ batch_offset, offset_delta,
+ IC, IW, IH, OW, OH, KW, KH,
+ pelements,
+ IC * KH * KW,
+ s0, s1, p0, p1, d0, d1,
+ }, dryrun);
+}
+
+static void ggml_vk_timestep_embedding(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ const uint32_t dim = dst->op_params[0];
+ const uint32_t max_period = dst->op_params[1];
+ const uint32_t nb1 = dst->nb[1] / ggml_type_size(dst->type);
+
+ ggml_vk_op_f32<vk_op_timestep_embedding_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_TIMESTEP_EMBEDDING, {
+ nb1, dim, max_period,
+ }, dryrun);
+}
+
+static void ggml_vk_pool_2d(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ uint32_t op = static_cast<uint32_t>(dst->op_params[0]);
+ const int32_t k1 = dst->op_params[1];
+ const int32_t k0 = dst->op_params[2];
+ const int32_t s1 = dst->op_params[3];
+ const int32_t s0 = dst->op_params[4];
+ const int32_t p1 = dst->op_params[5];
+ const int32_t p0 = dst->op_params[6];
+
+ const uint32_t IH = src0->ne[1];
+ const uint32_t IW = src0->ne[0];
+
+ const uint32_t N = dst->ne[3];
+
+ const uint32_t OC = dst->ne[2];
+ const uint32_t OH = dst->ne[1];
+ const uint32_t OW = dst->ne[0];
+
+ const uint32_t parallel_elements = N * OC * OH * OW;
+
+ ggml_vk_op_f32<vk_op_pool2d_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_POOL_2D, {
+ IW, IH, OW, OH, OC,
+ parallel_elements,
+ op,
+ k0, k1, s0, s1, p0, p1,
+ }, dryrun);
+}
+
+static void ggml_vk_leaky_relu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
+ const float * op_params = (const float *)dst->op_params;
+ ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_LEAKY_RELU, { (uint32_t)ggml_nelements(src0), 0, op_params[0], 0.0f }, dryrun);
+}
+
+#ifdef GGML_VULKAN_RUN_TESTS
+static void ggml_vk_print_matrix_area(const void * data, ggml_type type, int ne0, int ne1, int i0, int i1, int i2) {
+ if (type != GGML_TYPE_F32 && type != GGML_TYPE_F16) {
+ return;
+ }
+ i0 = std::max(i0, 5);
+ i1 = std::max(i1, 5);
+ i2 = std::max(i2, 0);
+ fprintf(stderr, " ");
+ for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
+ fprintf(stderr, "%7d ", idx1);
+ }
+ fprintf(stderr, "\n");
+ for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
+ fprintf(stderr, "%7d: ", idx0);
+ for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
+ if (idx0 >= 0 && idx0 < ne0 && idx1 >= 0 && idx1 < ne1) {
+ float val;
+ if (type == GGML_TYPE_F32) {
+ val = *((const float *) data + i2*ne1*ne0 + idx1*ne0 + idx0);
+ } else if (type == GGML_TYPE_F16) {
+ val = ggml_fp16_to_fp32(*((const ggml_fp16_t *) data + i2*ne1*ne0 + idx1*ne0 + idx0));
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ fprintf(stderr, "% 7.2f ", val);
+ } else {
+ fprintf(stderr, " ");
+ }
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+template <typename X_TYPE, typename Y_TYPE>
+static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, int split_k, int shader_size) {
+ VK_LOG_DEBUG("ggml_vk_test_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << shader_size << ")");
+ const size_t x_ne = m * k * batch;
+ const size_t y_ne = k * n * batch;
+ const size_t d_ne = m * n * batch;
+
+ vk_pipeline p;
+ std::string shname;
+ if (shader_size == 0) {
+ if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32->a_s;
+ shname = "F32_ALIGNED_S";
+ } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32_f16->a_s;
+ shname = "F32_F16_ALIGNED_S";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16_f32.f32acc->a_s;
+ shname = "F16_F32_ALIGNED_S";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16.f32acc->a_s;
+ shname = "F16_ALIGNED_S";
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ } else if (shader_size == 1) {
+ if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32->a_m;
+ shname = "F32_ALIGNED_M";
+ } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32_f16->a_m;
+ shname = "F32_F16_ALIGNED_M";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16_f32.f32acc->a_m;
+ shname = "F16_F32_ALIGNED_M";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16.f32acc->a_m;
+ shname = "F16_ALIGNED_M";
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ } else if (shader_size == 2) {
+ if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32->a_l;
+ shname = "F32_ALIGNED_L";
+ } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32_f16->a_l;
+ shname = "F32_F16_ALIGNED_L";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16_f32.f32acc->a_l;
+ shname = "F16_F32_ALIGNED_L";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16.f32acc->a_l;
+ shname = "F16_ALIGNED_L";
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ } else {
+ GGML_ASSERT(0);
+ }
+
+ const size_t kpad = ggml_vk_align_size(k, p->align);
+
+ if (k != kpad) {
+ if (shader_size == 0) {
+ if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32->s;
+ shname = "F32_S";
+ } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32_f16->s;
+ shname = "F32_F16_S";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16_f32.f32acc->s;
+ shname = "F16_F32_S";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16.f32acc->s;
+ shname = "F16_S";
+ }
+ } else if (shader_size == 1) {
+ if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32->m;
+ shname = "F32_M";
+ } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32_f16->m;
+ shname = "F32_F16_M";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16_f32.f32acc->m;
+ shname = "F16_F32_M";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16.f32acc->m;
+ shname = "F16_M";
+ }
+ } else if (shader_size == 2) {
+ if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32->l;
+ shname = "F32_L";
+ } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f32_f16->l;
+ shname = "F32_F16_L";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16_f32.f32acc->l;
+ shname = "F16_F32_L";
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ p = ctx->device->pipeline_matmul_f16.f32acc->l;
+ shname = "F16_L";
+ }
+ }
+ }
+
+ ggml_pipeline_request_descriptor_sets(ctx->device, p, num_it);
+ if (split_k > 1) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, num_it);
+
+ if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
+ // Resize buffer
+ if (ctx->prealloc_split_k != nullptr) {
+ ggml_vk_destroy_buffer(ctx->prealloc_split_k);
+ }
+ ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne * split_k, vk::MemoryPropertyFlagBits::eDeviceLocal);
+ }
+ }
+
+ ggml_pipeline_allocate_descriptor_sets(ctx->device);
+
+ vk_buffer d_X = ggml_vk_create_buffer_check(ctx->device, sizeof(X_TYPE) * x_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
+ vk_buffer d_Y = ggml_vk_create_buffer_check(ctx->device, sizeof(Y_TYPE) * y_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
+ vk_buffer d_D = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
+
+ X_TYPE* x = (X_TYPE *) malloc(sizeof(X_TYPE) * x_ne);
+ Y_TYPE* y = (Y_TYPE *) malloc(sizeof(Y_TYPE) * y_ne);
+ float* d = (float *) malloc(sizeof(float) * d_ne);
+
+ for (size_t i = 0; i < x_ne; i++) {
+ if (std::is_same<float, X_TYPE>()) {
+ x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
+ // x[i] = 1.0f;
+ // x[i] = i + 1;
+ // x[i] = (i % k == i / k) ? 1.0f : 0.0f;
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
+ x[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
+ // x[i] = ggml_fp32_to_fp16(1.0f);
+ // x[i] = ggml_fp32_to_fp16(i + 1);
+ // x[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f);
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ }
+ for (size_t i = 0; i < y_ne; i++) {
+ if (std::is_same<float, Y_TYPE>()) {
+ y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
+ // y[i] = (i % k == i / k) ? 1.0f : 0.0f;
+ // y[i] = i + 1;
+ } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ y[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
+ // y[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f);
+ // y[i] = ggml_fp32_to_fp16(i + 1);
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ }
+
+ ggml_vk_buffer_write(d_X, 0, x, sizeof(X_TYPE) * k * m * batch);
+ ggml_vk_buffer_write(d_Y, 0, y, sizeof(Y_TYPE) * k * n * batch);
+
+ vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
+ ggml_vk_ctx_begin(ctx->device, subctx);
+ for (size_t i = 0; i < num_it; i++) {
+ ggml_vk_matmul(
+ ctx, subctx, p, ggml_vk_subbuffer(d_X), ggml_vk_subbuffer(d_Y), ggml_vk_subbuffer(d_D), ggml_vk_subbuffer(ctx->prealloc_split_k),
+ m, n, k,
+ k, k, m, k*m, k*n, m*n,
+ split_k, batch, batch, batch, 1, 1
+ );
+ }
+ ggml_vk_ctx_end(subctx);
+
+ auto begin = std::chrono::high_resolution_clock::now();
+ ggml_vk_submit(subctx, ctx->fence);
+ VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_matmul waitForFences");
+ ctx->device->device.resetFences({ ctx->fence });
+
+ auto end = std::chrono::high_resolution_clock::now();
+ double time = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
+
+ // copy dst to host
+ ggml_vk_buffer_read(d_D, 0, d, sizeof(float) * d_ne);
+
+ float * d_chk = (float *) malloc(sizeof(float) * d_ne);
+
+ ggml_init_params iparams = {
+ /*.mem_size =*/ 1024*1024*1024,
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ true,
+ };
+
+ ggml_context * ggml_ctx = ggml_init(iparams);
+
+ ggml_type src0_type;
+ ggml_type src1_type;
+
+ if (std::is_same<float, X_TYPE>()) {
+ src0_type = GGML_TYPE_F32;
+ } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
+ src0_type = GGML_TYPE_F16;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ if (std::is_same<float, Y_TYPE>()) {
+ src1_type = GGML_TYPE_F32;
+ } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
+ src1_type = GGML_TYPE_F16;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, src0_type, k, m, batch);
+ ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, src1_type, k, n, batch);
+ ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
+
+ src0_ggml->data = x;
+ src1_ggml->data = y;
+ tensor_ggml->data = d_chk;
+
+ ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
+ ggml_build_forward_expand(cgraph, tensor_ggml);
+
+ ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
+
+ ggml_free(ggml_ctx);
+
+ double avg_err = 0.0;
+ int first_err_n = -1;
+ int first_err_m = -1;
+ int first_err_b = -1;
+
+ for (size_t i = 0; i < m*n*batch; i++) {
+ double err = std::fabs(d[i] - d_chk[i]);
+ avg_err += err;
+
+ if ((err > 0.05f || std::isnan(err)) && first_err_n == -1) {
+ first_err_b = i / (m * n);
+ first_err_n = (i % (m * n)) / m;
+ first_err_m = (i % (m * n)) % m;
+ }
+ }
+
+ avg_err /= m * n;
+
+ double tflops = 2.0*m*n*k*batch*num_it / (time / 1000.0) / (1000.0*1000.0*1000.0*1000.0);
+
+ std::cerr << "TEST " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time / num_it << "ms " << tflops << " TFLOPS avg_err=" << avg_err << std::endl;
+
+ if (avg_err > 0.1 || std::isnan(avg_err)) {
+ std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
+ std::cerr << "Actual result: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+ std::cerr << "Expected result: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+
+ if (split_k > 1) {
+ float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
+ ggml_vk_buffer_read(ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
+
+ std::cerr << "d_buf0: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+
+ std::cerr << "d_buf1: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+
+ std::cerr << "d_buf2: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+
+ std::cerr << "d_buf3: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+
+ free(split_k_buf);
+ }
+ }
+
+ free(d_chk);
+
+ ggml_vk_queue_cleanup(ctx->device, ctx->device->transfer_queue);
+ ggml_vk_queue_cleanup(ctx->device, ctx->device->compute_queue);
+
+ ggml_vk_destroy_buffer(d_X);
+ ggml_vk_destroy_buffer(d_Y);
+ ggml_vk_destroy_buffer(d_D);
+
+ ggml_pipeline_cleanup(p);
+ ggml_pipeline_cleanup(ctx->device->pipeline_matmul_split_k_reduce);
+
+ free(x);
+ free(y);
+ free(d);
+}
+
+static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
+ if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16) {
+ return;
+ }
+ i0 = std::max(i0, 5);
+ i1 = std::max(i1, 5);
+ i2 = std::max(i2, 0);
+ i3 = std::max(i3, 0);
+ fprintf(stderr, " ");
+ for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
+ fprintf(stderr, "%7d ", idx1);
+ }
+ fprintf(stderr, "\n");
+ for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
+ fprintf(stderr, "%7d: ", idx0);
+ for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
+ if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
+ float val;
+ if (tensor->type == GGML_TYPE_F32) {
+ val = *(float *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
+ } else if (tensor->type == GGML_TYPE_F16) {
+ val = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ fprintf(stderr, "% 7.2f ", val);
+ } else {
+ fprintf(stderr, " ");
+ }
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+static void ggml_vk_quantize_data(const float * from, void * to, size_t ne, ggml_type quant) {
+ ggml_quantize_chunk(quant, from, to, 0, 1, ne, nullptr);
+}
+
+static void ggml_vk_dequantize_data(const void * from, float * to, size_t ne, ggml_type quant) {
+ if (quant == GGML_TYPE_F32) {
+ memcpy(to, from, sizeof(float) * ne);
+ return;
+ }
+
+ const auto * tt = ggml_get_type_traits(quant);
+
+ ggml_to_float_t dequant_fn = tt->to_float;
+
+ dequant_fn(from, to, ne);
+}
+
+static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_type quant) {
+ VK_LOG_DEBUG("ggml_vk_test_dequant(" << ne << ")");
+ const size_t x_sz = sizeof(float) * ne;
+ const size_t x_sz_f16 = sizeof(ggml_fp16_t) * ne;
+ const size_t qx_sz = ne * ggml_type_size(quant)/ggml_blck_size(quant);
+ float * x = (float *) malloc(x_sz);
+ void * qx = malloc(qx_sz);
+ vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
+ vk_buffer x_buf = ggml_vk_create_buffer_check(ctx->device, x_sz_f16, vk::MemoryPropertyFlagBits::eDeviceLocal);
+ float * x_ref = (float *) malloc(x_sz);
+ ggml_fp16_t * x_chk = (ggml_fp16_t *) malloc(x_sz_f16);
+
+ for (size_t i = 0; i < ne; i++) {
+ x[i] = rand() / (float)RAND_MAX;
+ }
+
+ vk_pipeline p = ggml_vk_get_to_fp16(ctx, quant);
+
+ ggml_vk_quantize_data(x, qx, ne, quant);
+ ggml_vk_dequantize_data(qx, x_ref, ne, quant);
+
+ ggml_pipeline_request_descriptor_sets(ctx->device, p, 1);
+
+ ggml_pipeline_allocate_descriptor_sets(ctx->device);
+
+ ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz);
+
+ vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
+ ggml_vk_ctx_begin(ctx->device, subctx);
+ const std::vector<uint32_t> pc = { 1, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne };
+ ggml_vk_dispatch_pipeline(ctx, subctx, p, { vk_subbuffer{ qx_buf, 0, qx_sz }, vk_subbuffer{ x_buf, 0, x_sz_f16 } }, pc.size() * sizeof(int), pc.data(), { (uint32_t)ne, 1, 1});
+ ggml_vk_ctx_end(subctx);
+
+ auto begin = std::chrono::high_resolution_clock::now();
+
+ ggml_vk_submit(subctx, ctx->fence);
+ VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
+ ctx->device->device.resetFences({ ctx->fence });
+
+ auto end = std::chrono::high_resolution_clock::now();
+
+ double ms_dequant = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
+ ggml_vk_buffer_read(x_buf, 0, x_chk, x_sz_f16);
+
+ int first_err = -1;
+
+ double avg_err = 0.0;
+ for (size_t i = 0; i < ne; i++) {
+ double error = std::fabs(x_ref[i] - ggml_fp16_to_fp32(x_chk[i]));
+ avg_err += error;
+
+ if (first_err < 0 && error > 0.05) {
+ first_err = i;
+ }
+ }
+
+ avg_err /= ne;
+
+ std::cerr << "TEST DEQUANT " << ggml_type_name(quant) << " time=" << ms_dequant << "ms avg_err=" << avg_err << std::endl;
+
+ if (avg_err > 0.1) {
+ std::cerr << "first_error = " << first_err << std::endl;
+ std::cerr << "Actual result: " << std::endl << std::endl;
+ for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
+ std::cerr << ggml_fp16_to_fp32(x_chk[i]) << ", ";
+ }
+ std::cerr << std::endl << "Expected result: " << std::endl << std::endl;
+ for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
+ std::cerr << x_ref[i] << ", ";
+ }
+ std::cerr << std::endl;
+ }
+
+ ggml_vk_destroy_buffer(x_buf);
+ ggml_vk_destroy_buffer(qx_buf);
+
+ free(x);
+ free(qx);
+ free(x_ref);
+ free(x_chk);
+}
+
+static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, size_t split_k, size_t shader_size, ggml_type quant) {
+ VK_LOG_DEBUG("ggml_vk_test_dequant_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << ggml_type_name(quant) << ")");
+ const size_t x_ne = m * k * batch;
+ const size_t y_ne = k * n * batch;
+ const size_t d_ne = m * n * batch;
+
+ vk_pipeline p;
+ std::string shname;
+ if (shader_size == 0) {
+ p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->a_s : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->a_s;
+ shname = std::string(ggml_type_name(quant)) + "_ALIGNED_S";
+ } else if (shader_size == 1) {
+ p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->a_m : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->a_m;
+ shname = std::string(ggml_type_name(quant)) + "_ALIGNED_M";
+ } else if (shader_size == 2) {
+ p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->a_l : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->a_l;
+ shname = std::string(ggml_type_name(quant)) + "_ALIGNED_L";
+ } else {
+ GGML_ASSERT(0);
+ }
+
+ const size_t kpad = ggml_vk_align_size(k, p->align);
+
+ if (k != kpad) {
+ if (shader_size == 0) {
+ p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->s : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->s;
+ shname = std::string(ggml_type_name(quant)) + "_S";
+ } else if (shader_size == 1) {
+ p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->m : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->m;
+ shname = std::string(ggml_type_name(quant)) + "_M";
+ } else if (shader_size == 2) {
+ p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->l : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->l;
+ shname = std::string(ggml_type_name(quant)) + "_L";
+ } else {
+ GGML_ASSERT(0);
+ }
+ }
+
+ const size_t x_sz = sizeof(float) * x_ne;
+ const size_t y_sz = sizeof(float) * y_ne;
+ const size_t qx_sz = x_ne * ggml_type_size(quant)/ggml_blck_size(quant);
+ const size_t d_sz = sizeof(float) * d_ne;
+ float * x = (float *) malloc(x_sz);
+ float * y = (float *) malloc(y_sz);
+ void * qx = malloc(qx_sz);
+ vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
+ vk_buffer y_buf = ggml_vk_create_buffer_check(ctx->device, y_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
+ vk_buffer d_buf = ggml_vk_create_buffer_check(ctx->device, d_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
+ float * d = (float *) malloc(d_sz);
+ float * d_chk = (float *) malloc(d_sz);
+
+ for (size_t i = 0; i < x_ne; i++) {
+ x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
+ }
+
+ ggml_vk_quantize_data(x, qx, x_ne, quant);
+
+ for (size_t i = 0; i < y_ne; i++) {
+ // y[i] = rand() / (float)RAND_MAX;
+ y[i] = (i % k == i / k) ? 1.0f : 0.0f;
+ }
+
+ ggml_pipeline_request_descriptor_sets(ctx->device, p, num_it);
+ if (split_k > 1) {
+ ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, num_it);
+
+ if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
+ // Resize buffer
+ if (ctx->prealloc_split_k != nullptr) {
+ ggml_vk_destroy_buffer(ctx->prealloc_split_k);
+ }
+ ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne * split_k, vk::MemoryPropertyFlagBits::eDeviceLocal);
+ }
+ }
+
+ ggml_pipeline_allocate_descriptor_sets(ctx->device);
+
+ ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz);
+ ggml_vk_buffer_write(y_buf, 0, y, y_sz);
+
+ vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
+ ggml_vk_ctx_begin(ctx->device, subctx);
+ for (size_t i = 0; i < num_it; i++) {
+ ggml_vk_matmul(
+ ctx, subctx, p, ggml_vk_subbuffer(qx_buf), ggml_vk_subbuffer(y_buf), ggml_vk_subbuffer(d_buf), ggml_vk_subbuffer(ctx->prealloc_split_k),
+ m, n, k,
+ k, k, m, k*m, k*n, m*n,
+ split_k, batch, batch, batch, 1, 1
+ );
+ }
+ ggml_vk_ctx_end(subctx);
+
+ auto begin = std::chrono::high_resolution_clock::now();
+
+ ggml_vk_submit(subctx, ctx->fence);
+ VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
+ ctx->device->device.resetFences({ ctx->fence });
+
+ auto end = std::chrono::high_resolution_clock::now();
+
+ double time_ms = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
+ ggml_vk_buffer_read(d_buf, 0, d, d_sz);
+
+ ggml_init_params iparams = {
+ /*.mem_size =*/ 1024*1024*1024,
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ true,
+ };
+
+ ggml_context * ggml_ctx = ggml_init(iparams);
+
+ ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, quant, k, m, batch);
+ ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, GGML_TYPE_F32, k, n, batch);
+ ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
+
+ src0_ggml->data = qx;
+ src1_ggml->data = y;
+ tensor_ggml->data = d_chk;
+
+ ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
+ ggml_build_forward_expand(cgraph, tensor_ggml);
+
+ ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
+
+ ggml_free(ggml_ctx);
+
+ double avg_err = 0.0;
+ int first_err_n = -1;
+ int first_err_m = -1;
+ int first_err_b = -1;
+
+ for (size_t i = 0; i < m*n*batch; i++) {
+ double err = std::fabs(d[i] - d_chk[i]);
+ avg_err += err;
+
+ if ((err > 0.05f || std::isnan(err)) && first_err_n == -1) {
+ first_err_b = i / (m * n);
+ first_err_n = (i % (m * n)) / m;
+ first_err_m = (i % (m * n)) % m;
+ }
+ }
+
+ avg_err /= m * n;
+
+ double tflops = 2.0*m*n*k*batch*num_it / (time_ms / 1000.0) / (1000.0*1000.0*1000.0*1000.0);
+
+ std::cerr << "TEST MMQ " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time_ms / num_it << "ms " << tflops << " TFLOPS avg_err=" << avg_err << std::endl;
+
+ if (avg_err > 0.01 || std::isnan(avg_err)) {
+ std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
+ std::cerr << "Actual result: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+ std::cerr << std::endl;
+ std::cerr << "Expected result: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+
+ if (split_k > 1) {
+ float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
+ ggml_vk_buffer_read(ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
+
+ std::cerr << "d_buf0: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+
+ std::cerr << "d_buf1: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+
+ std::cerr << "d_buf2: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+
+ std::cerr << "d_buf3: " << std::endl << std::endl;
+ ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
+
+ free(split_k_buf);
+ }
+ }
+
+ ggml_vk_destroy_buffer(qx_buf);
+ ggml_vk_destroy_buffer(y_buf);
+ ggml_vk_destroy_buffer(d_buf);
+
+ free(x);
+ free(qx);
+ free(y);
+ free(d);
+ free(d_chk);
+}
+#endif
+
+static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) {
+#if defined(GGML_VULKAN_RUN_TESTS)
+ const std::vector<size_t> vals {
+ 512, 512, 128,
+ 128, 512, 512,
+ 4096, 512, 4096,
+ 11008, 512, 4096,
+ 4096, 512, 11008,
+ 32000, 512, 4096,
+ 8, 8, 8,
+ 100, 46, 576,
+ 623, 111, 128,
+ 100, 46, 558,
+ 512, 1, 256,
+ 128, 110, 622,
+ 511, 511, 127,
+ 511, 511, 7,
+ 511, 511, 17,
+ 49, 49, 128,
+ 128, 49, 49,
+ 4096, 49, 4096,
+ };
+ const size_t num_it = 100;
+
+ for (size_t i = 0; i < vals.size(); i += 3) {
+ ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0);
+ ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1);
+ ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2);
+ std::cerr << '\n';
+ ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0);
+ ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1);
+ ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2);
+ std::cerr << '\n';
+ ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0);
+ ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1);
+ ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2);
+ std::cerr << '\n' << std::endl;
+
+ if (vals[i + 2] % 32 == 0) {
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0, GGML_TYPE_Q4_0);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1, GGML_TYPE_Q4_0);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2, GGML_TYPE_Q4_0);
+ std::cerr << '\n';
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0, GGML_TYPE_Q4_0);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1, GGML_TYPE_Q4_0);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2, GGML_TYPE_Q4_0);
+ std::cerr << '\n';
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0, GGML_TYPE_Q4_0);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1, GGML_TYPE_Q4_0);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2, GGML_TYPE_Q4_0);
+ std::cerr << '\n' << std::endl;
+ }
+
+ if (vals[i + 2] % 256 == 0) {
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0, GGML_TYPE_Q4_K);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1, GGML_TYPE_Q4_K);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2, GGML_TYPE_Q4_K);
+ std::cerr << '\n';
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0, GGML_TYPE_Q4_K);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1, GGML_TYPE_Q4_K);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2, GGML_TYPE_Q4_K);
+ std::cerr << '\n';
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0, GGML_TYPE_Q4_K);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1, GGML_TYPE_Q4_K);
+ ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2, GGML_TYPE_Q4_K);
+ std::cerr << '\n' << std::endl;
+ }
+ }
+
+ GGML_ABORT("fatal error");
+#endif
+
+ if (ctx->prealloc_x == nullptr || (ctx->prealloc_size_x > 0 && ctx->prealloc_x->size < ctx->prealloc_size_x)) {
+ VK_LOG_MEMORY("ggml_vk_preallocate_buffers(x_size: " << ctx->prealloc_size_x << ")");
+ // Resize buffer
+ if (ctx->prealloc_x != nullptr) {
+ ggml_vk_destroy_buffer(ctx->prealloc_x);
+ }
+ ctx->prealloc_x = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_x);
+ }
+ if (ctx->prealloc_y == nullptr || (ctx->prealloc_size_y > 0 && ctx->prealloc_y->size < ctx->prealloc_size_y)) {
+ VK_LOG_MEMORY("ggml_vk_preallocate_buffers(y_size: " << ctx->prealloc_size_y << ")");
+ // Resize buffer
+ if (ctx->prealloc_y != nullptr) {
+ ggml_vk_destroy_buffer(ctx->prealloc_y);
+ }
+ ctx->prealloc_y = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_y);
+ }
+ if (ctx->prealloc_split_k == nullptr || (ctx->prealloc_size_split_k > 0 && ctx->prealloc_split_k->size < ctx->prealloc_size_split_k)) {
+ VK_LOG_MEMORY("ggml_vk_preallocate_buffers(split_k_size: " << ctx->prealloc_size_split_k << ")");
+ // Resize buffer
+ if (ctx->prealloc_split_k != nullptr) {
+ ggml_vk_destroy_buffer(ctx->prealloc_split_k);
+ }
+ ctx->prealloc_split_k = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_split_k);
+ }
+}
+
+static bool ggml_vk_compute_forward(ggml_backend_vk_context* ctx, ggml_tensor* tensor, int tensor_idx, bool use_fence);
+
+// Returns true if node has enqueued work into the queue, false otherwise
+// If submit is true the current all operations queued so far are being submitted to Vulkan to overlap cmdlist creation and GPU execution.
+static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * node, int node_idx, ggml_tensor *node_begin, int node_idx_begin, bool dryrun, bool last_node, bool submit){
+ if (ggml_is_empty(node) || !node->buffer) {
+ return false;
+ }
+
+ VK_LOG_DEBUG("ggml_vk_build_graph(" << node << ", " << ggml_op_name(node->op) << ")");
+ ctx->semaphore_idx = 0;
+
+ const ggml_tensor * src0 = node->src[0];
+ const ggml_tensor * src1 = node->src[1];
+ const ggml_tensor * src2 = node->src[2];
+ const ggml_tensor * src3 = node->src[3];
+
+ switch (node->op) {
+ // Return on empty ops to avoid generating a compute_ctx and setting exit_tensor
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_PERMUTE:
+ case GGML_OP_TRANSPOSE:
+ case GGML_OP_NONE:
+ return false;
+ case GGML_OP_UNARY:
+ switch (ggml_get_unary_op(node)) {
+ case GGML_UNARY_OP_SILU:
+ case GGML_UNARY_OP_GELU:
+ case GGML_UNARY_OP_GELU_QUICK:
+ case GGML_UNARY_OP_RELU:
+ case GGML_UNARY_OP_TANH:
+ break;
+ default:
+ return false;
+ }
+ break;
+ case GGML_OP_REPEAT:
+ case GGML_OP_GET_ROWS:
+ case GGML_OP_ADD:
+ case GGML_OP_ACC:
+ case GGML_OP_MUL:
+ case GGML_OP_DIV:
+ case GGML_OP_CONCAT:
+ case GGML_OP_UPSCALE:
+ case GGML_OP_SCALE:
+ case GGML_OP_SQR:
+ case GGML_OP_SIN:
+ case GGML_OP_COS:
+ case GGML_OP_CLAMP:
+ case GGML_OP_PAD:
+ case GGML_OP_CPY:
+ case GGML_OP_CONT:
+ case GGML_OP_DUP:
+ case GGML_OP_NORM:
+ case GGML_OP_GROUP_NORM:
+ case GGML_OP_RMS_NORM:
+ case GGML_OP_DIAG_MASK_INF:
+ case GGML_OP_SOFT_MAX:
+ case GGML_OP_ROPE:
+ case GGML_OP_MUL_MAT:
+ case GGML_OP_MUL_MAT_ID:
+ case GGML_OP_ARGSORT:
+ case GGML_OP_SUM_ROWS:
+ case GGML_OP_IM2COL:
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ case GGML_OP_POOL_2D:
+ case GGML_OP_RWKV_WKV6:
+ case GGML_OP_LEAKY_RELU:
+ case GGML_OP_FLASH_ATTN_EXT:
+ break;
+ default:
+ std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(node->op) << std::endl;
+ GGML_ABORT("fatal error");
+ return false;
+ }
+
+ vk_context compute_ctx;
+
+ if (!dryrun) {
+ if (ctx->compute_ctx.expired()) {
+ compute_ctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
+ ctx->compute_ctx = compute_ctx;
+ ggml_vk_ctx_begin(ctx->device, compute_ctx);
+ } else {
+ compute_ctx = ctx->compute_ctx.lock();
+ }
+ } else {
+ switch (node->op) {
+ case GGML_OP_REPEAT:
+ case GGML_OP_ACC:
+ case GGML_OP_GET_ROWS:
+ case GGML_OP_ADD:
+ case GGML_OP_MUL:
+ case GGML_OP_DIV:
+ case GGML_OP_CONCAT:
+ case GGML_OP_UPSCALE:
+ case GGML_OP_SCALE:
+ case GGML_OP_SQR:
+ case GGML_OP_SIN:
+ case GGML_OP_COS:
+ case GGML_OP_CLAMP:
+ case GGML_OP_PAD:
+ case GGML_OP_CPY:
+ case GGML_OP_CONT:
+ case GGML_OP_DUP:
+ case GGML_OP_NORM:
+ case GGML_OP_GROUP_NORM:
+ case GGML_OP_RMS_NORM:
+ case GGML_OP_UNARY:
+ case GGML_OP_DIAG_MASK_INF:
+ case GGML_OP_SOFT_MAX:
+ case GGML_OP_ROPE:
+ case GGML_OP_ARGSORT:
+ case GGML_OP_SUM_ROWS:
+ case GGML_OP_IM2COL:
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ case GGML_OP_POOL_2D:
+ case GGML_OP_LEAKY_RELU:
+ {
+ // These operations all go through ggml_vk_op_f32, so short-circuit and
+ // do the only thing needed for the dryrun.
+ vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, node, node->op);
+ ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1);
+ return false;
+ }
+ default:
+ break;
+ }
+ }
+
+ switch (node->op) {
+ case GGML_OP_REPEAT:
+ ggml_vk_repeat(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_ACC:
+ ggml_vk_acc(ctx, compute_ctx, src0, src1, node, dryrun);
+
+ break;
+ case GGML_OP_GET_ROWS:
+ ggml_vk_get_rows(ctx, compute_ctx, src0, src1, node, dryrun);
+
+ break;
+ case GGML_OP_ADD:
+ ggml_vk_add(ctx, compute_ctx, src0, src1, node, dryrun);
+
+ break;
+ case GGML_OP_MUL:
+ ggml_vk_mul(ctx, compute_ctx, src0, src1, node, dryrun);
+
+ break;
+ case GGML_OP_DIV:
+ ggml_vk_div(ctx, compute_ctx, src0, src1, node, dryrun);
+
+ break;
+ case GGML_OP_CONCAT:
+ ggml_vk_concat(ctx, compute_ctx, src0, src1, node, dryrun);
+
+ break;
+ case GGML_OP_UPSCALE:
+ ggml_vk_upscale(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_SCALE:
+ ggml_vk_scale(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_SQR:
+ ggml_vk_sqr(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_SIN:
+ ggml_vk_sin(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_COS:
+ ggml_vk_cos(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_CLAMP:
+ ggml_vk_clamp(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_PAD:
+ ggml_vk_pad(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_CPY:
+ case GGML_OP_CONT:
+ case GGML_OP_DUP:
+ ggml_vk_cpy(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_NORM:
+ ggml_vk_norm(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_GROUP_NORM:
+ ggml_vk_group_norm(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_RMS_NORM:
+ ggml_vk_rms_norm(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_UNARY:
+ switch (ggml_get_unary_op(node)) {
+ case GGML_UNARY_OP_SILU:
+ case GGML_UNARY_OP_GELU:
+ case GGML_UNARY_OP_GELU_QUICK:
+ case GGML_UNARY_OP_RELU:
+ case GGML_UNARY_OP_TANH:
+ ggml_vk_unary(ctx, compute_ctx, src0, node, dryrun);
+ break;
+ default:
+ return false;
+ }
+ break;
+ case GGML_OP_DIAG_MASK_INF:
+ ggml_vk_diag_mask_inf(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_SOFT_MAX:
+ ggml_vk_soft_max(ctx, compute_ctx, src0, src1, node, dryrun);
+
+ break;
+ case GGML_OP_ROPE:
+ ggml_vk_rope(ctx, compute_ctx, src0, src1, src2, node, dryrun);
+
+ break;
+ case GGML_OP_ARGSORT:
+ ggml_vk_argsort(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_SUM_ROWS:
+ ggml_vk_sum_rows(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_IM2COL:
+ ggml_vk_im2col(ctx, compute_ctx, src0, src1, node, dryrun);
+
+ break;
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ ggml_vk_timestep_embedding(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_POOL_2D:
+ ggml_vk_pool_2d(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_LEAKY_RELU:
+ ggml_vk_leaky_relu(ctx, compute_ctx, src0, node, dryrun);
+
+ break;
+ case GGML_OP_MUL_MAT:
+ ggml_vk_mul_mat(ctx, compute_ctx, src0, src1, node, dryrun);
+
+ break;
+ case GGML_OP_MUL_MAT_ID:
+ ggml_vk_mul_mat_id(ctx, compute_ctx, src0, src1, src2, node, dryrun);
+
+ break;
+
+ case GGML_OP_FLASH_ATTN_EXT:
+ ggml_vk_flash_attn(ctx, compute_ctx, src0, src1, src2, src3, node, dryrun);
+
+ break;
+
+ case GGML_OP_RWKV_WKV6:
+ ggml_vk_rwkv_wkv6(ctx, compute_ctx, node, dryrun);
+
+ break;
+ default:
+ return false;
+ }
+
+ if (dryrun) {
+ return false;
+ }
+
+ ctx->tensor_ctxs[node_idx] = compute_ctx;
+
+#if defined(GGML_VULKAN_CHECK_RESULTS) || defined(GGML_VULKAN_PERF)
+ // Force context reset on each node so that each tensor ends up in its own context
+ // and can be run and compared to its CPU equivalent separately
+ last_node = true;
+#endif
+
+ if (submit || last_node) {
+ ggml_vk_ctx_end(compute_ctx);
+
+ // TODO probably it'd be better to pass a exit_node flag to ggml_vk_compute_forward
+ if (last_node) {
+ compute_ctx->exit_tensor_idx = node_idx_begin;
+ }
+ else {
+ compute_ctx->exit_tensor_idx = -1;
+ }
+
+ ctx->compute_ctx.reset();
+
+ bool ok = ggml_vk_compute_forward(ctx, node_begin, node_idx_begin, false);
+ if (!ok) {
+ if (node->op == GGML_OP_UNARY) {
+ std::cerr << __func__ << ": error: op not supported UNARY " << node->name << " (" << ggml_unary_op_name(static_cast<ggml_unary_op>(node->op_params[0])) << ")" << std::endl;
+ }
+ else {
+ std::cerr << __func__ << ": error: op not supported " << node->name << " (" << ggml_op_name(node->op) << ")" << std::endl;
+ }
+ }
+
+ }
+ return true;
+}
+
+static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor * tensor, int tensor_idx, bool use_fence = true){
+ ggml_backend_buffer * buf = nullptr;
+
+ switch (tensor->op) {
+ case GGML_OP_ADD:
+ case GGML_OP_ACC:
+ case GGML_OP_GET_ROWS:
+ case GGML_OP_MUL:
+ case GGML_OP_DIV:
+ case GGML_OP_CONCAT:
+ case GGML_OP_UPSCALE:
+ case GGML_OP_SCALE:
+ case GGML_OP_SQR:
+ case GGML_OP_SIN:
+ case GGML_OP_COS:
+ case GGML_OP_CLAMP:
+ case GGML_OP_PAD:
+ case GGML_OP_CPY:
+ case GGML_OP_CONT:
+ case GGML_OP_DUP:
+ case GGML_OP_NORM:
+ case GGML_OP_GROUP_NORM:
+ case GGML_OP_RMS_NORM:
+ case GGML_OP_DIAG_MASK_INF:
+ case GGML_OP_SOFT_MAX:
+ case GGML_OP_ROPE:
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_PERMUTE:
+ case GGML_OP_TRANSPOSE:
+ case GGML_OP_NONE:
+ case GGML_OP_ARGSORT:
+ case GGML_OP_SUM_ROWS:
+ case GGML_OP_IM2COL:
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ case GGML_OP_POOL_2D:
+ case GGML_OP_RWKV_WKV6:
+ case GGML_OP_LEAKY_RELU:
+ case GGML_OP_REPEAT:
+ buf = tensor->buffer;
+
+ break;
+ case GGML_OP_UNARY:
+ switch (ggml_get_unary_op(tensor)) {
+ case GGML_UNARY_OP_SILU:
+ case GGML_UNARY_OP_GELU:
+ case GGML_UNARY_OP_GELU_QUICK:
+ case GGML_UNARY_OP_RELU:
+ case GGML_UNARY_OP_TANH:
+ buf = tensor->buffer;
+ break;
+ default:
+ return false;
+ }
+ break;
+ case GGML_OP_MUL_MAT:
+ case GGML_OP_MUL_MAT_ID:
+ case GGML_OP_FLASH_ATTN_EXT:
+ buf = tensor->buffer;
+
+ break;
+ default:
+ return false;
+ }
+
+ if (buf == nullptr) {
+ return false;
+ }
+
+ VK_LOG_DEBUG("ggml_vk_compute_forward(" << tensor << ", name=" << tensor->name << ", op=" << ggml_op_name(tensor->op) << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << ", view_src=" << tensor->view_src << ", view_offs=" << tensor->view_offs << ")");
+
+ vk_context subctx = ctx->tensor_ctxs[tensor_idx].lock();
+
+ // always wait for the GPU work to be done for the last submit
+ if (tensor_idx == subctx->exit_tensor_idx) {
+ use_fence = true;
+ }
+
+ // Only run if ctx hasn't been submitted yet
+ if (!subctx->seqs.empty()) {
+#ifdef GGML_VULKAN_CHECK_RESULTS
+ ggml_vk_check_results_0(tensor);
+ use_fence = true;
+#endif
+
+ // Do staging buffer copies
+ for (auto& cpy : subctx->in_memcpys) {
+ memcpy(cpy.dst, cpy.src, cpy.n);
+ }
+
+ ggml_vk_submit(subctx, use_fence ? ctx->fence : vk::Fence{});
+
+ if (use_fence) {
+ VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_compute_forward waitForFences");
+
+ ctx->device->device.resetFences({ ctx->fence });
+ }
+#ifdef GGML_VULKAN_CHECK_RESULTS
+ ggml_vk_check_results_1(tensor);
+#endif
+ }
+
+ if (tensor_idx == subctx->exit_tensor_idx) {
+ // Do staging buffer copies
+ for (auto& cpy : subctx->out_memcpys) {
+ memcpy(cpy.dst, cpy.src, cpy.n);
+ }
+ subctx->in_memcpys.clear();
+ subctx->out_memcpys.clear();
+ }
+
+ return true;
+}
+
+// Clean up after graph processing is done
+static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) {
+ VK_LOG_DEBUG("ggml_vk_graph_cleanup()");
+ for (auto& buffer : ctx->gc.temp_buffers) {
+ ggml_vk_pool_free(ctx, buffer);
+ }
+ ctx->gc.temp_buffers.clear();
+
+ for (auto& dsr : ctx->device->pipeline_descriptor_set_requirements) {
+ vk_pipeline_ref plr = ctx->device->pipelines[dsr.first];
+
+ if (plr.expired()) {
+ continue;
+ }
+
+ vk_pipeline pl = plr.lock();
+ ggml_pipeline_cleanup(pl);
+ }
+
+ ggml_vk_queue_cleanup(ctx->device, ctx->device->compute_queue);
+ ggml_vk_queue_cleanup(ctx->device, ctx->device->transfer_queue);
+
+ for (size_t i = 0; i < ctx->gc.semaphores.size(); i++) {
+ ctx->device->device.destroySemaphore({ ctx->gc.semaphores[i].s });
+ }
+ ctx->gc.semaphores.clear();
+
+ for (size_t i = 0; i < ctx->gc.tl_semaphores.size(); i++) {
+ ctx->device->device.destroySemaphore({ ctx->gc.tl_semaphores[i].s });
+ }
+ ctx->gc.tl_semaphores.clear();
+ ctx->semaphore_idx = 0;
+
+ ctx->event_idx = 0;
+
+ for (auto& event : ctx->gc.events) {
+ ctx->device->device.resetEvent(event);
+ }
+
+ ctx->tensor_ctxs.clear();
+ ctx->gc.contexts.clear();
+ ctx->device->pipeline_descriptor_set_requirements.clear();
+}
+
+// Clean up on backend free
+static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) {
+ VK_LOG_DEBUG("ggml_vk_cleanup(" << ctx->name << ")");
+ ggml_vk_graph_cleanup(ctx);
+
+ ggml_vk_destroy_buffer(ctx->prealloc_x);
+ ggml_vk_destroy_buffer(ctx->prealloc_y);
+ ggml_vk_destroy_buffer(ctx->prealloc_split_k);
+
+ for (auto& buffer : ctx->buffer_pool) {
+ ggml_vk_destroy_buffer(buffer);
+ }
+
+ ctx->prealloc_size_x = 0;
+ ctx->prealloc_size_y = 0;
+ ctx->prealloc_size_split_k = 0;
+
+ for (auto& event : ctx->gc.events) {
+ ctx->device->device.destroyEvent(event);
+ }
+ ctx->gc.events.clear();
+
+ ctx->device->device.destroyFence(ctx->fence);
+}
+
+static int ggml_vk_get_device_count() {
+ ggml_vk_instance_init();
+
+ return vk_instance.device_indices.size();
+}
+
+static void ggml_vk_get_device_description(int device, char * description, size_t description_size) {
+ ggml_vk_instance_init();
+
+ std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
+
+ vk::PhysicalDeviceProperties props;
+ devices[device].getProperties(&props);
+
+ snprintf(description, description_size, "%s", props.deviceName.data());
+}
+
+// backend interface
+
+#define UNUSED GGML_UNUSED
+
+// device backend
+
+static bool ggml_backend_buffer_is_vk(ggml_backend_buffer_t buffer) {
+ return buffer->buft->iface.get_name == ggml_backend_vk_buffer_type_name;
+}
+
+static void ggml_backend_vk_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ VK_LOG_MEMORY("ggml_backend_vk_buffer_free_buffer()");
+ ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
+ ggml_vk_destroy_buffer(ctx->dev_buffer);
+ delete ctx;
+}
+
+static void * ggml_backend_vk_buffer_get_base(ggml_backend_buffer_t buffer) {
+ return vk_ptr_base;
+
+ UNUSED(buffer);
+}
+
+static void ggml_backend_vk_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
+ VK_LOG_DEBUG("ggml_backend_vk_buffer_init_tensor(" << buffer << " (" << buffer->context << "), " << tensor << ")");
+ if (tensor->view_src != nullptr) {
+ GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
+ }
+}
+
+static void ggml_backend_vk_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ VK_LOG_DEBUG("ggml_backend_vk_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
+ vk_buffer buf = buf_ctx->dev_buffer;
+
+ ggml_vk_buffer_write(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
+}
+
+static void ggml_backend_vk_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ VK_LOG_DEBUG("ggml_backend_vk_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
+
+ vk_buffer buf = buf_ctx->dev_buffer;
+
+ ggml_vk_buffer_read(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
+}
+
+static bool ggml_backend_vk_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
+ if (ggml_backend_buffer_is_vk(src->buffer)) {
+ ggml_backend_vk_buffer_context * src_buf_ctx = (ggml_backend_vk_buffer_context *)src->buffer->context;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+
+ vk_buffer src_buf = src_buf_ctx->dev_buffer;
+ vk_buffer dst_buf = dst_buf_ctx->dev_buffer;
+
+ ggml_vk_buffer_copy(dst_buf, vk_tensor_offset(dst) + dst->view_offs, src_buf, vk_tensor_offset(src) + src->view_offs, ggml_nbytes(src));
+
+ return true;
+ }
+ return false;
+
+ UNUSED(buffer);
+}
+
+static void ggml_backend_vk_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
+ ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
+
+ ggml_vk_buffer_memset(ctx->dev_buffer, 0, value, buffer->size);
+}
+
+static ggml_backend_buffer_i ggml_backend_vk_buffer_interface = {
+ /* .free_buffer = */ ggml_backend_vk_buffer_free_buffer,
+ /* .get_base = */ ggml_backend_vk_buffer_get_base,
+ /* .init_tensor = */ ggml_backend_vk_buffer_init_tensor,
+ /* .memset_tensor = */ NULL,
+ /* .set_tensor = */ ggml_backend_vk_buffer_set_tensor,
+ /* .get_tensor = */ ggml_backend_vk_buffer_get_tensor,
+ /* .cpy_tensor = */ ggml_backend_vk_buffer_cpy_tensor,
+ /* .clear = */ ggml_backend_vk_buffer_clear,
+ /* .reset = */ NULL,
+};
+
+// vk buffer type
+static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft) {
+ ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
+
+ return ctx->name.c_str();
+}
+
+static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+ VK_LOG_MEMORY("ggml_backend_vk_buffer_type_alloc_buffer(" << size << ")");
+ ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
+
+ vk_buffer dev_buffer = nullptr;
+ try {
+ dev_buffer = ggml_vk_create_buffer_device(ctx->device, size);
+ } catch (const vk::SystemError& e) {
+ return nullptr;
+ }
+
+ ggml_backend_vk_buffer_context * bufctx = new ggml_backend_vk_buffer_context(ctx->device, std::move(dev_buffer), ctx->name);
+
+ return ggml_backend_buffer_init(buft, ggml_backend_vk_buffer_interface, bufctx, size);
+}
+
+static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
+ ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
+ return ctx->device->properties.limits.minStorageBufferOffsetAlignment;
+}
+
+static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
+ ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
+ return ctx->device->max_memory_allocation_size;
+}
+
+static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+ return ggml_nbytes(tensor);
+
+ UNUSED(buft);
+}
+
+ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num) {
+ ggml_vk_instance_init();
+
+ VK_LOG_DEBUG("ggml_backend_vk_buffer_type(" << dev_num << ")");
+
+ vk_device dev = ggml_vk_get_device(dev_num);
+
+ return &dev->buffer_type;
+}
+
+// host buffer type
+
+static const char * ggml_backend_vk_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
+ return GGML_VK_NAME "_Host";
+
+ UNUSED(buft);
+}
+
+static const char * ggml_backend_vk_host_buffer_name(ggml_backend_buffer_t buffer) {
+ return GGML_VK_NAME "_Host";
+
+ UNUSED(buffer);
+}
+
+static void ggml_backend_vk_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ VK_LOG_MEMORY("ggml_backend_vk_host_buffer_free_buffer()");
+ ggml_vk_host_free(vk_instance.devices[0], buffer->context);
+}
+
+static ggml_backend_buffer_t ggml_backend_vk_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+ VK_LOG_MEMORY("ggml_backend_vk_host_buffer_type_alloc_buffer(" << size << ")");
+
+ size += 32; // Behave like the CPU buffer type
+ void * ptr = nullptr;
+ try {
+ ptr = ggml_vk_host_malloc(vk_instance.devices[0], size);
+ } catch (vk::SystemError& e) {
+ std::cerr << "ggml_vulkan: Failed to allocate pinned memory." << std::endl;
+ std::cerr << "ggml_vulkan: " << e.what() << std::endl;
+ // fallback to cpu buffer
+ return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
+ }
+
+ ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
+ buffer->buft = buft;
+ buffer->iface.free_buffer = ggml_backend_vk_host_buffer_free_buffer;
+
+ return buffer;
+
+ UNUSED(buft);
+}
+
+static size_t ggml_backend_vk_host_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
+ return vk_instance.devices[0]->properties.limits.minMemoryMapAlignment;
+
+ UNUSED(buft);
+}
+
+// Should be changed to return device-specific host buffer type
+// but that probably requires changes in llama.cpp
+ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type() {
+ static struct ggml_backend_buffer_type ggml_backend_vk_buffer_type_host = {
+ /* .iface = */ {
+ /* .get_name = */ ggml_backend_vk_host_buffer_type_name,
+ /* .alloc_buffer = */ ggml_backend_vk_host_buffer_type_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_vk_host_buffer_type_get_alignment,
+ /* .get_max_size = */ NULL, // defaults to SIZE_MAX
+ /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
+ /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
+ },
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), 0),
+ /* .context = */ nullptr,
+ };
+
+ // Make sure device 0 is initialized
+ ggml_vk_instance_init();
+ ggml_vk_get_device(0);
+
+ return &ggml_backend_vk_buffer_type_host;
+}
+
+
+// backend
+
+static const char * ggml_backend_vk_name(ggml_backend_t backend) {
+ ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
+
+ return ctx->name.c_str();
+}
+
+static void ggml_backend_vk_free(ggml_backend_t backend) {
+ ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
+ VK_LOG_DEBUG("ggml_backend_vk_free(" << ctx->name << ")");
+
+ ggml_vk_cleanup(ctx);
+
+ delete ctx;
+ delete backend;
+}
+
+static ggml_backend_buffer_type_t ggml_backend_vk_get_default_buffer_type(ggml_backend_t backend) {
+ ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
+
+ return &ctx->device->buffer_type;
+}
+
+static void ggml_backend_vk_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ VK_LOG_DEBUG("ggml_backend_vk_set_tensor_async(" << size << ")");
+ ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
+ GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
+
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
+
+ vk_context transfer_ctx;
+
+ if (ctx->transfer_ctx.expired()) {
+ // Initialize new transfer context
+ transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
+ ctx->transfer_ctx = transfer_ctx;
+ ggml_vk_ctx_begin(ctx->device, transfer_ctx);
+ } else {
+ transfer_ctx = ctx->transfer_ctx.lock();
+ }
+
+ vk_buffer buf = buf_ctx->dev_buffer;
+
+ ggml_vk_buffer_write_async(transfer_ctx, buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
+}
+
+static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ VK_LOG_DEBUG("ggml_backend_vk_get_tensor_async(" << size << ")");
+ ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
+ GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
+
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
+
+ vk_context transfer_ctx;
+
+ if (ctx->transfer_ctx.expired()) {
+ // Initialize new transfer context
+ transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
+ ctx->transfer_ctx = transfer_ctx;
+ ggml_vk_ctx_begin(ctx->device, transfer_ctx);
+ } else {
+ transfer_ctx = ctx->transfer_ctx.lock();
+ }
+
+ vk_buffer buf = buf_ctx->dev_buffer;
+
+ ggml_vk_buffer_read_async(transfer_ctx, buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
+}
+
+static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) {
+ VK_LOG_DEBUG("ggml_backend_vk_cpy_tensor_async()");
+ ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
+ if ((dst->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || dst->buffer->buft == ggml_backend_vk_host_buffer_type()) && ggml_backend_buffer_is_vk(src->buffer)) {
+ ggml_backend_vk_buffer_context * src_buf_ctx = (ggml_backend_vk_buffer_context *)src->buffer->context;
+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
+
+ vk_context transfer_ctx;
+
+ if (ctx->transfer_ctx.expired()) {
+ // Initialize new transfer context
+ transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
+ ctx->transfer_ctx = transfer_ctx;
+ ggml_vk_ctx_begin(ctx->device, transfer_ctx);
+ } else {
+ transfer_ctx = ctx->transfer_ctx.lock();
+ }
+
+ vk_buffer src_buf = src_buf_ctx->dev_buffer;
+ vk_buffer dst_buf = dst_buf_ctx->dev_buffer;
+
+ ggml_vk_buffer_copy_async(transfer_ctx, dst_buf, vk_tensor_offset(dst) + dst->view_offs, src_buf, vk_tensor_offset(src) + src->view_offs, ggml_nbytes(src));
+ return true;
+ }
+
+ return false;
+}
+
+static void ggml_backend_vk_synchronize(ggml_backend_t backend) {
+ VK_LOG_DEBUG("ggml_backend_vk_synchronize()");
+ ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
+ if(ctx->transfer_ctx.expired()) {
+ return;
+ }
+
+ vk_context transfer_ctx = ctx->transfer_ctx.lock();
+
+ ggml_vk_ctx_end(transfer_ctx);
+
+ for (auto& cpy : transfer_ctx->in_memcpys) {
+ memcpy(cpy.dst, cpy.src, cpy.n);
+ }
+
+ ggml_vk_submit(transfer_ctx, ctx->fence);
+ VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_backend_vk_synchronize waitForFences");
+ ctx->device->device.resetFences({ ctx->fence });
+
+ for (auto& cpy : transfer_ctx->out_memcpys) {
+ memcpy(cpy.dst, cpy.src, cpy.n);
+ }
+
+ ctx->transfer_ctx.reset();
+}
+
+static bool ggml_vk_is_empty(ggml_tensor * node) {
+ return ggml_is_empty(node) || node->op == GGML_OP_NONE || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE;
+}
+
+static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
+ VK_LOG_DEBUG("ggml_backend_vk_graph_compute(" << cgraph->n_nodes << " nodes)");
+ ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
+
+ for (int i = 0; i < cgraph->n_nodes; i++) {
+ ggml_vk_build_graph(ctx, cgraph->nodes[i], i, nullptr, 0, true, false, false);
+ }
+ ggml_vk_preallocate_buffers(ctx);
+ ggml_pipeline_allocate_descriptor_sets(ctx->device);
+
+ int last_node = cgraph->n_nodes - 1;
+
+ // If the last op in the cgraph isn't backend GPU, the command buffer doesn't get closed properly
+ while (last_node > 0 && ggml_vk_is_empty(cgraph->nodes[last_node])) {
+ last_node -= 1;
+ }
+
+ // Reserve tensor context space for all nodes
+ ctx->tensor_ctxs.resize(cgraph->n_nodes);
+
+ bool first_node_in_batch = true; // true if next node will be first node in a batch
+ int submit_node_idx = 0; // index to first node in a batch
+
+ // Submit work every nodes_per_submit nodes to overlap CPU cmdbuffer generation with GPU execution.
+ // Start with a smaller count to get work submitted right away, and increase it after each submit.
+ int nodes_per_submit = 20;
+ int submitted_nodes = 0;
+ int submit_count = 0;
+ for (int i = 0; i < cgraph->n_nodes; i++) {
+ if (first_node_in_batch) {
+ submit_node_idx = i;
+ }
+
+ bool submit = (submitted_nodes >= nodes_per_submit) || (i == last_node);
+
+ bool enqueued = ggml_vk_build_graph(ctx, cgraph->nodes[i], i, cgraph->nodes[submit_node_idx], submit_node_idx, false, i == last_node, submit);
+
+ if (enqueued) {
+ ++submitted_nodes;
+
+#ifndef GGML_VULKAN_CHECK_RESULTS
+ if (first_node_in_batch) {
+ first_node_in_batch = false;
+ }
+#endif
+ }
+
+ if (submit) {
+ first_node_in_batch = true;
+ submitted_nodes = 0;
+ switch (submit_count) {
+ case 0:
+ nodes_per_submit = 50;
+ break;
+ default:
+ nodes_per_submit = 100;
+ break;
+ }
+ submit_count++;
+ }
+ }
+
+#ifdef GGML_VULKAN_PERF
+ ctx->device->perf_logger->print_timings();
+#endif
+
+ ggml_vk_graph_cleanup(ctx);
+
+ return GGML_STATUS_SUCCESS;
+
+ UNUSED(backend);
+}
+
+// TODO: enable async and synchronize
+static ggml_backend_i ggml_backend_vk_interface = {
+ /* .get_name = */ ggml_backend_vk_name,
+ /* .free = */ ggml_backend_vk_free,
+ /* .set_tensor_async = */ NULL, // ggml_backend_vk_set_tensor_async,
+ /* .get_tensor_async = */ NULL, // ggml_backend_vk_get_tensor_async,
+ /* .cpy_tensor_async = */ NULL, // ggml_backend_vk_cpy_tensor_async,
+ /* .synchronize = */ NULL, // ggml_backend_vk_synchronize,
+ /* .graph_plan_create = */ NULL,
+ /* .graph_plan_free = */ NULL,
+ /* .graph_plan_update = */ NULL,
+ /* .graph_plan_compute = */ NULL,
+ /* .graph_compute = */ ggml_backend_vk_graph_compute,
+ /* .event_record = */ NULL,
+ /* .event_wait = */ NULL,
+};
+
+static ggml_guid_t ggml_backend_vk_guid() {
+ static ggml_guid guid = { 0xb8, 0xf7, 0x4f, 0x86, 0x40, 0x3c, 0xe1, 0x02, 0x91, 0xc8, 0xdd, 0xe9, 0x02, 0x3f, 0xc0, 0x2b };
+ return &guid;
+}
+
+ggml_backend_t ggml_backend_vk_init(size_t dev_num) {
+ VK_LOG_DEBUG("ggml_backend_vk_init(" << dev_num << ")");
+
+ ggml_backend_vk_context * ctx = new ggml_backend_vk_context;
+ ggml_vk_init(ctx, dev_num);
+
+ ggml_backend_t vk_backend = new ggml_backend {
+ /* .guid = */ ggml_backend_vk_guid(),
+ /* .interface = */ ggml_backend_vk_interface,
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), dev_num),
+ /* .context = */ ctx,
+ };
+
+ return vk_backend;
+}
+
+bool ggml_backend_is_vk(ggml_backend_t backend) {
+ return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_vk_guid());
+}
+
+int ggml_backend_vk_get_device_count() {
+ return ggml_vk_get_device_count();
+}
+
+void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size) {
+ GGML_ASSERT(device < (int) vk_instance.device_indices.size());
+ int dev_idx = vk_instance.device_indices[device];
+ ggml_vk_get_device_description(dev_idx, description, description_size);
+}
+
+void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total) {
+ GGML_ASSERT(device < (int) vk_instance.device_indices.size());
+
+ vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]];
+
+ vk::PhysicalDeviceMemoryProperties memprops = vkdev.getMemoryProperties();
+
+ for (const vk::MemoryHeap& heap : memprops.memoryHeaps) {
+ if (heap.flags & vk::MemoryHeapFlagBits::eDeviceLocal) {
+ *total = heap.size;
+ *free = heap.size;
+ break;
+ }
+ }
+}
+
+//////////////////////////
+
+struct ggml_backend_vk_device_context {
+ size_t device;
+ std::string name;
+ std::string description;
+};
+
+static const char * ggml_backend_vk_device_get_name(ggml_backend_dev_t dev) {
+ ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
+ return ctx->name.c_str();
+}
+
+static const char * ggml_backend_vk_device_get_description(ggml_backend_dev_t dev) {
+ ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
+ return ctx->description.c_str();
+}
+
+static void ggml_backend_vk_device_get_memory(ggml_backend_dev_t device, size_t * free, size_t * total) {
+ ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)device->context;
+ ggml_backend_vk_get_device_memory(ctx->device, free, total);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_vk_device_get_buffer_type(ggml_backend_dev_t dev) {
+ ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
+ return ggml_backend_vk_buffer_type(ctx->device);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_vk_device_get_host_buffer_type(ggml_backend_dev_t dev) {
+ UNUSED(dev);
+ return ggml_backend_vk_host_buffer_type();
+}
+
+static enum ggml_backend_dev_type ggml_backend_vk_device_get_type(ggml_backend_dev_t dev) {
+ UNUSED(dev);
+ return GGML_BACKEND_DEVICE_TYPE_GPU;
+}
+
+static void ggml_backend_vk_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
+ props->name = ggml_backend_vk_device_get_name(dev);
+ props->description = ggml_backend_vk_device_get_description(dev);
+ props->type = ggml_backend_vk_device_get_type(dev);
+ ggml_backend_vk_device_get_memory(dev, &props->memory_free, &props->memory_total);
+ props->caps = {
+ /* .async = */ false,
+ /* .host_buffer = */ true,
+ /* .buffer_from_host_ptr = */ false,
+ /* .events = */ false,
+ };
+}
+
+static ggml_backend_t ggml_backend_vk_device_init(ggml_backend_dev_t dev, const char * params) {
+ UNUSED(params);
+ ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
+ return ggml_backend_vk_init(ctx->device);
+}
+
+static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
+ switch (op->op) {
+ case GGML_OP_UNARY:
+ switch (ggml_get_unary_op(op)) {
+ case GGML_UNARY_OP_GELU:
+ case GGML_UNARY_OP_GELU_QUICK:
+ case GGML_UNARY_OP_SILU:
+ case GGML_UNARY_OP_RELU:
+ case GGML_UNARY_OP_TANH:
+ return ggml_is_contiguous(op->src[0]);
+ default:
+ return false;
+ }
+ break;
+ case GGML_OP_MUL_MAT:
+ case GGML_OP_MUL_MAT_ID:
+ {
+ ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
+ const vk_device& device = ggml_vk_get_device(ctx->device);
+ if (op->op == GGML_OP_MUL_MAT_ID && !device->mul_mat_id_s && !device->mul_mat_id_m && !device->mul_mat_id_l) {
+ // If there's not enough shared memory for row_ids and the result tile, fallback to CPU
+ return false;
+ }
+ switch (op->src[0]->type) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_F16:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q3_K:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_Q6_K:
+ case GGML_TYPE_IQ4_NL:
+ break;
+ default:
+ return false;
+ }
+ struct ggml_tensor * a;
+ struct ggml_tensor * b;
+ if (op->op == GGML_OP_MUL_MAT) {
+ a = op->src[0];
+ b = op->src[1];
+ } else {
+ a = op->src[2];
+ b = op->src[1];
+ }
+ if (a->ne[3] != b->ne[3]) {
+ return false;
+ }
+ if (!(ggml_vk_dim01_contiguous(op->src[0]) || op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) ||
+ !(ggml_vk_dim01_contiguous(op->src[1]) || op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F16)) {
+ return false;
+ }
+
+ return true;
+ } break;
+ case GGML_OP_FLASH_ATTN_EXT:
+ {
+ ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
+ if (!ggml_vk_get_device(ctx->device)->coopmat2) {
+ return false;
+ }
+ switch (op->src[0]->ne[0]) {
+ case 64:
+ case 80:
+ case 96:
+ case 112:
+ case 128:
+ case 256:
+ break;
+ default:
+ return false;
+ }
+ if (op->src[0]->type != GGML_TYPE_F32) {
+ return false;
+ }
+ if (op->type != GGML_TYPE_F32) {
+ return false;
+ }
+ if (op->src[3] && op->src[3]->type != GGML_TYPE_F16) {
+ return false;
+ }
+ // It's straightforward to support different K/V dequant, but would
+ // significantly increase the number of pipelines
+ if (op->src[1]->type != op->src[2]->type) {
+ return false;
+ }
+ switch (op->src[1]->type) {
+ case GGML_TYPE_F16:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ // K dequants currently disabled because D dimension is rounded up to 256 and runs inefficiently
+ //case GGML_TYPE_Q2_K:
+ //case GGML_TYPE_Q3_K:
+ //case GGML_TYPE_Q4_K:
+ //case GGML_TYPE_Q5_K:
+ //case GGML_TYPE_Q6_K:
+ case GGML_TYPE_IQ4_NL:
+ break;
+ default:
+ return false;
+ }
+ return true;
+ }
+ case GGML_OP_GET_ROWS:
+ {
+ switch (op->src[0]->type) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_F16:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_IQ4_NL:
+ return true;
+ default:
+ return false;
+ }
+ } break;
+ case GGML_OP_CONT:
+ case GGML_OP_CPY:
+ case GGML_OP_DUP:
+ {
+ ggml_type src0_type = op->src[0]->type;
+ ggml_type src1_type = op->src[1] != nullptr ? op->src[1]->type : src0_type;
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
+ return true;
+ }
+ return false;
+ } break;
+ case GGML_OP_REPEAT:
+ return ggml_type_size(op->type) == sizeof(float) && ggml_type_size(op->src[0]->type) == sizeof(float);
+ case GGML_OP_ROPE:
+ {
+ const int mode = ((const int32_t *) op->op_params)[2];
+ if (mode & GGML_ROPE_TYPE_MROPE) {
+ return false;
+ }
+ if (mode & GGML_ROPE_TYPE_VISION) {
+ return false;
+ }
+ return ggml_is_contiguous(op->src[0]);
+ }
+ case GGML_OP_NONE:
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_PERMUTE:
+ case GGML_OP_TRANSPOSE:
+ case GGML_OP_NORM:
+ case GGML_OP_GROUP_NORM:
+ case GGML_OP_RMS_NORM:
+ case GGML_OP_ADD:
+ case GGML_OP_ACC:
+ case GGML_OP_MUL:
+ case GGML_OP_DIV:
+ case GGML_OP_CONCAT:
+ case GGML_OP_UPSCALE:
+ case GGML_OP_SCALE:
+ case GGML_OP_SQR:
+ case GGML_OP_SIN:
+ case GGML_OP_COS:
+ case GGML_OP_CLAMP:
+ case GGML_OP_PAD:
+ case GGML_OP_DIAG_MASK_INF:
+ case GGML_OP_SOFT_MAX:
+ case GGML_OP_ARGSORT:
+ case GGML_OP_SUM_ROWS:
+ case GGML_OP_IM2COL:
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ case GGML_OP_POOL_2D:
+ case GGML_OP_RWKV_WKV6:
+ case GGML_OP_LEAKY_RELU:
+ return true;
+ default:
+ return false;
+ }
+
+ UNUSED(dev);
+}
+
+static bool ggml_backend_vk_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
+ if (buft->iface.get_name != ggml_backend_vk_buffer_type_name) {
+ return false;
+ }
+
+ ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
+ ggml_backend_vk_buffer_type_context * buft_ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
+
+ return buft_ctx->device->idx == ctx->device;
+}
+
+static bool ggml_backend_vk_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
+ const int min_batch_size = 32;
+
+ return (op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS) ||
+ (op->ne[2] >= min_batch_size && op->op == GGML_OP_MUL_MAT_ID);
+
+ UNUSED(dev);
+}
+
+static const struct ggml_backend_device_i ggml_backend_vk_device_i = {
+ /* .get_name = */ ggml_backend_vk_device_get_name,
+ /* .get_description = */ ggml_backend_vk_device_get_description,
+ /* .get_memory = */ ggml_backend_vk_device_get_memory,
+ /* .get_type = */ ggml_backend_vk_device_get_type,
+ /* .get_props = */ ggml_backend_vk_device_get_props,
+ /* .init_backend = */ ggml_backend_vk_device_init,
+ /* .get_buffer_type = */ ggml_backend_vk_device_get_buffer_type,
+ /* .get_host_buffer_type = */ ggml_backend_vk_device_get_host_buffer_type,
+ /* .buffer_from_host_ptr = */ NULL,
+ /* .supports_op = */ ggml_backend_vk_device_supports_op,
+ /* .supports_buft = */ ggml_backend_vk_device_supports_buft,
+ /* .offload_op = */ ggml_backend_vk_device_offload_op,
+ /* .event_new = */ NULL,
+ /* .event_free = */ NULL,
+ /* .event_synchronize = */ NULL,
+};
+
+static const char * ggml_backend_vk_reg_get_name(ggml_backend_reg_t reg) {
+ UNUSED(reg);
+ return GGML_VK_NAME;
+}
+
+static size_t ggml_backend_vk_reg_get_device_count(ggml_backend_reg_t reg) {
+ UNUSED(reg);
+ return ggml_backend_vk_get_device_count();
+}
+
+static ggml_backend_dev_t ggml_backend_vk_reg_get_device(ggml_backend_reg_t reg, size_t device) {
+ static std::vector<ggml_backend_dev_t> devices;
+
+ static bool initialized = false;
+
+ {
+ static std::mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
+ if (!initialized) {
+ for (int i = 0; i < ggml_backend_vk_get_device_count(); i++) {
+ ggml_backend_vk_device_context * ctx = new ggml_backend_vk_device_context;
+ char desc[256];
+ ggml_backend_vk_get_device_description(i, desc, sizeof(desc));
+ ctx->device = i;
+ ctx->name = GGML_VK_NAME + std::to_string(i);
+ ctx->description = desc;
+ devices.push_back(new ggml_backend_device {
+ /* .iface = */ ggml_backend_vk_device_i,
+ /* .reg = */ reg,
+ /* .context = */ ctx,
+ });
+ }
+ initialized = true;
+ }
+ }
+
+ GGML_ASSERT(device < devices.size());
+ return devices[device];
+}
+
+static const struct ggml_backend_reg_i ggml_backend_vk_reg_i = {
+ /* .get_name = */ ggml_backend_vk_reg_get_name,
+ /* .get_device_count = */ ggml_backend_vk_reg_get_device_count,
+ /* .get_device = */ ggml_backend_vk_reg_get_device,
+ /* .get_proc_address = */ NULL,
+};
+
+ggml_backend_reg_t ggml_backend_vk_reg() {
+ static ggml_backend_reg reg = {
+ /* .api_version = */ GGML_BACKEND_API_VERSION,
+ /* .iface = */ ggml_backend_vk_reg_i,
+ /* .context = */ nullptr,
+ };
+
+ return &reg;
+}
+
+// Extension availability
+static bool ggml_vk_instance_validation_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
+#ifdef GGML_VULKAN_VALIDATE
+ bool portability_enumeration_ext = false;
+ // Check for portability enumeration extension for MoltenVK support
+ for (const auto& properties : instance_extensions) {
+ if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
+ return true;
+ }
+ }
+ if (!portability_enumeration_ext) {
+ std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
+ }
+#endif
+ return false;
+
+ UNUSED(instance_extensions);
+}
+static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
+#ifdef __APPLE__
+ bool portability_enumeration_ext = false;
+ // Check for portability enumeration extension for MoltenVK support
+ for (const auto& properties : instance_extensions) {
+ if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
+ return true;
+ }
+ }
+ if (!portability_enumeration_ext) {
+ std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
+ }
+#endif
+ return false;
+
+ UNUSED(instance_extensions);
+}
+
+static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props) {
+ switch (props.vendorID) {
+ case VK_VENDOR_ID_INTEL:
+ // Intel drivers don't support coopmat properly yet
+ return false;
+ case VK_VENDOR_ID_AMD:
+ if (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource) {
+ // Workaround for AMD proprietary driver reporting support on all GPUs
+ const std::string name = props.deviceName;
+ return name.rfind("AMD Radeon RX 7", 0) == 0 || name.rfind("AMD Radeon(TM) RX 7", 0) == 0 || // RDNA 3 consumer GPUs
+ name.rfind("AMD Radeon PRO W7", 0) == 0 || name.rfind("AMD Radeon(TM) PRO W7", 0) == 0 || // RDNA 3 workstation GPUs
+ name.rfind("AMD Radeon 7", 0) == 0 || name.rfind("AMD Radeon(TM) 7", 0) == 0; // RDNA 3 APUs
+ }
+ return true;
+ default:
+ return true;
+ }
+}
+
+// checks
+
+#ifdef GGML_VULKAN_CHECK_RESULTS
+static void ggml_vk_print_graph_origin(const ggml_tensor * tensor, std::vector<const ggml_tensor *>& done, int level = 0) {
+ if (std::find(done.begin(), done.end(), tensor) != done.end() || level > 10) {
+ return;
+ }
+ for (int j = 0; j < level; j++) {
+ std::cerr << " ";
+ }
+ std::cerr << ggml_op_name(tensor->op) << " gpu=" << (tensor->extra != nullptr) << std::endl;
+
+ done.push_back(tensor);
+
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ if (tensor->src[i] != nullptr) {
+ ggml_vk_print_graph_origin(tensor->src[i], done, level + 1);
+ }
+ }
+}
+
+static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, const void * data, int i0, int i1, int i2, int i3) {
+ if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16 && tensor->type != GGML_TYPE_I32) {
+ return;
+ }
+ i0 = std::max(i0, 5);
+ i1 = std::max(i1, 5);
+ i2 = std::max(i2, 0);
+ i3 = std::max(i3, 0);
+ fprintf(stderr, " ");
+ for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
+ fprintf(stderr, "%7d ", idx1);
+ }
+ fprintf(stderr, "\n");
+ for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
+ fprintf(stderr, "%7d: ", idx0);
+ for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
+ if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
+ float val;
+ if (tensor->type == GGML_TYPE_F32) {
+ val = *(const float *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
+ } else if (tensor->type == GGML_TYPE_F16) {
+ val = ggml_fp16_to_fp32(*(const ggml_fp16_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
+ } else if (tensor->type == GGML_TYPE_I32) {
+ val = *(const int32_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ fprintf(stderr, "% 7.2f ", val);
+ } else {
+ fprintf(stderr, " ");
+ }
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+static void ggml_vk_print_tensor(const ggml_tensor * tensor, const char * name) {
+ void * tensor_data = tensor->data;
+
+ const bool is_gpu = tensor->buffer != nullptr && ggml_backend_buffer_is_vk(tensor->buffer);
+
+ if (is_gpu) {
+ const size_t tensor_size = ggml_nbytes(tensor);
+ tensor_data = malloc(tensor_size);
+
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
+
+ vk_buffer buffer_gpu = buf_ctx->dev_buffer;
+ ggml_vk_buffer_read(buffer_gpu, vk_tensor_offset(tensor) + tensor->view_offs, tensor_data, tensor_size);
+ }
+
+ std::cerr << "TENSOR CHECK " << name << " (" << tensor->name << "): " << ggml_op_name(tensor->op) << std::endl;
+ std::cerr << "tensor=" << tensor << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << std::endl;
+ if (tensor->src[0] != nullptr) {
+ std::cerr << "tensor->src[0]=" << tensor->src[0] << " name=" << tensor->src[0]->name << " op=" << ggml_op_name(tensor->src[0]->op) << " type=" << ggml_type_name(tensor->src[0]->type) << " ne0=" << tensor->src[0]->ne[0] << " nb0=" << tensor->src[0]->nb[0] << " ne1=" << tensor->src[0]->ne[1] << " nb1=" << tensor->src[0]->nb[1] << " ne2=" << tensor->src[0]->ne[2] << " nb2=" << tensor->src[0]->nb[2] << " ne3=" << tensor->src[0]->ne[3] << " nb3=" << tensor->src[0]->nb[3] << std::endl;
+ }
+ if (tensor->src[1] != nullptr) {
+ std::cerr << "tensor->src[1]=" << tensor->src[1] << " name=" << tensor->src[1]->name << " op=" << ggml_op_name(tensor->src[1]->op) << " type=" << ggml_type_name(tensor->src[1]->type) << " ne0=" << tensor->src[1]->ne[0] << " nb0=" << tensor->src[1]->nb[0] << " ne1=" << tensor->src[1]->ne[1] << " nb1=" << tensor->src[1]->nb[1] << " ne2=" << tensor->src[1]->ne[2] << " nb2=" << tensor->src[1]->nb[2] << " ne3=" << tensor->src[1]->ne[3] << " nb3=" << tensor->src[1]->nb[3] << std::endl;
+ }
+ std::cerr << std::endl << "Result:" << std::endl;
+ ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
+ std::cerr << std::endl;
+ std::vector<const ggml_tensor *> done;
+ ggml_vk_print_graph_origin(tensor, done);
+
+ if (is_gpu) {
+ free(tensor_data);
+ }
+}
+
+void * comp_result;
+size_t comp_size;
+size_t comp_nb[GGML_MAX_DIMS];
+size_t check_counter = 0;
+static void ggml_vk_check_results_0(ggml_tensor * tensor) {
+ if (tensor->op == GGML_OP_TRANSPOSE) {
+ return;
+ }
+
+ check_counter++;
+ if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
+ return;
+ }
+
+ VK_LOG_DEBUG("ggml_vk_check_results_0(" << tensor->name << ")");
+
+ ggml_tensor * src0 = tensor->src[0];
+ ggml_tensor * src1 = tensor->src[1];
+ ggml_tensor * src2 = tensor->src[2];
+ ggml_tensor * src3 = tensor->src[3];
+
+ struct ggml_init_params iparams = {
+ /*.mem_size =*/ 2ul*1024ul*1024ul*1024ul,
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ false,
+ };
+
+ struct ggml_context * ggml_ctx = ggml_init(iparams);
+
+ struct ggml_tensor * src0_clone = nullptr;
+ struct ggml_tensor * src1_clone = nullptr;
+ struct ggml_tensor * src2_clone = nullptr;
+ struct ggml_tensor * src3_clone = nullptr;
+ struct ggml_tensor * tensor_clone = nullptr;
+
+ size_t src0_size;
+ size_t src1_size;
+ size_t src2_size;
+ size_t src3_size;
+
+ void * src0_buffer = nullptr;
+ void * src1_buffer = nullptr;
+ void * src2_buffer = nullptr;
+ void * src3_buffer = nullptr;
+
+ if (src0 != nullptr) {
+ src0_clone = ggml_dup_tensor(ggml_ctx, src0);
+
+ src0_size = ggml_nbytes(src0);
+
+ src0_buffer = malloc(src0_size);
+ src0_clone->data = src0_buffer;
+ if (ggml_backend_buffer_is_host(src0->buffer)) {
+ memcpy(src0_clone->data, src0->data, src0_size);
+ memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
+ } else if (ggml_backend_buffer_is_vk(src0->buffer)) {
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
+ vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
+ uint64_t offset = vk_tensor_offset(src0) + src0->view_offs;
+ if (!ggml_is_contiguous(src0) && ggml_vk_dim01_contiguous(src0)) {
+ for (int i3 = 0; i3 < src0->ne[3]; i3++) {
+ for (int i2 = 0; i2 < src0->ne[2]; i2++) {
+ const int idx = i3*src0->ne[2] + i2;
+ ggml_vk_buffer_read(buffer_gpu, offset + idx * src0->nb[2], ((char *)src0_clone->data + idx * src0_clone->nb[2]), src0->ne[1] * src0->nb[1]);
+ }
+ }
+
+ src0_clone->nb[0] = src0->nb[0];
+ src0_clone->nb[1] = src0->nb[1];
+ for (int i = 2; i < GGML_MAX_DIMS; i++) {
+ src0_clone->nb[i] = src0_clone->nb[i - 1]*src0_clone->ne[i - 1];
+ }
+ } else {
+ if (offset + src0_size >= buffer_gpu->size) {
+ src0_size = buffer_gpu->size - offset;
+ }
+ ggml_vk_buffer_read(buffer_gpu, offset, src0_clone->data, src0_size);
+ memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
+ }
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
+ ggml_vk_print_tensor(src0, "src0");
+ }
+ }
+ if (src1 != nullptr) {
+ src1_clone = ggml_dup_tensor(ggml_ctx, src1);
+
+ src1_size = ggml_nbytes(src1);
+
+ src1_buffer = malloc(src1_size);
+ src1_clone->data = src1_buffer;
+ if (ggml_backend_buffer_is_host(src1->buffer)) {
+ memcpy(src1_clone->data, src1->data, src1_size);
+ memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
+ } else if (ggml_backend_buffer_is_vk(src1->buffer)) {
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
+ vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
+ uint64_t offset = vk_tensor_offset(src1) + src1->view_offs;
+ if (!ggml_is_contiguous(src1) && ggml_vk_dim01_contiguous(src1)) {
+ for (int i3 = 0; i3 < src1->ne[3]; i3++) {
+ for (int i2 = 0; i2 < src1->ne[2]; i2++) {
+ const int idx = i3*src1->ne[2] + i2;
+ ggml_vk_buffer_read(buffer_gpu, offset + idx * src1->nb[2], ((char *)src1_clone->data + idx * src1_clone->nb[2]), src1->ne[1] * src1->nb[1]);
+ }
+ }
+
+ src1_clone->nb[0] = src1->nb[0];
+ src1_clone->nb[1] = src1->nb[1];
+ for (int i = 2; i < GGML_MAX_DIMS; i++) {
+ src1_clone->nb[i] = src1_clone->nb[i - 1]*src1_clone->ne[i - 1];
+ }
+ } else {
+ if (offset + src1_size >= buffer_gpu->size) {
+ src1_size = buffer_gpu->size - offset;
+ }
+ ggml_vk_buffer_read(buffer_gpu, offset, src1_clone->data, src1_size);
+ memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
+ }
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
+ ggml_vk_print_tensor(src1, "src1");
+ }
+ }
+ if (src2 != nullptr) {
+ src2_clone = ggml_dup_tensor(ggml_ctx, src2);
+
+ src2_size = ggml_nbytes(src2);
+
+ src2_buffer = malloc(src2_size);
+ src2_clone->data = src2_buffer;
+ if (ggml_backend_buffer_is_host(src2->buffer)) {
+ memcpy(src2_clone->data, src2->data, src2_size);
+ memcpy(src2_clone->nb, src2->nb, sizeof(size_t) * GGML_MAX_DIMS);
+ } else if (ggml_backend_buffer_is_vk(src2->buffer)) {
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src2->buffer->context;
+ vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
+ uint64_t offset = vk_tensor_offset(src2) + src2->view_offs;
+ if (!ggml_is_contiguous(src2) && ggml_vk_dim01_contiguous(src2)) {
+ for (int i3 = 0; i3 < src2->ne[3]; i3++) {
+ for (int i2 = 0; i2 < src2->ne[2]; i2++) {
+ const int idx = i3*src2->ne[2] + i2;
+ ggml_vk_buffer_read(buffer_gpu, offset + idx * src2->nb[2], ((char *)src2_clone->data + idx * src2_clone->nb[2]), src2->ne[1] * src2->nb[1]);
+ }
+ }
+
+ src2_clone->nb[0] = src2->nb[0];
+ src2_clone->nb[1] = src2->nb[1];
+ for (int i = 2; i < GGML_MAX_DIMS; i++) {
+ src2_clone->nb[i] = src2_clone->nb[i - 1]*src2_clone->ne[i - 1];
+ }
+ } else {
+ if (offset + src2_size >= buffer_gpu->size) {
+ src2_size = buffer_gpu->size - offset;
+ }
+ ggml_vk_buffer_read(buffer_gpu, offset, src2_clone->data, src2_size);
+ memcpy(src2_clone->nb, src2->nb, sizeof(size_t) * GGML_MAX_DIMS);
+ }
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
+ ggml_vk_print_tensor(src2, "src2");
+ }
+ }
+ if (src3 != nullptr) {
+ src3_clone = ggml_dup_tensor(ggml_ctx, src3);
+
+ src3_size = ggml_nbytes(src3);
+
+ src3_buffer = malloc(src3_size);
+ src3_clone->data = src3_buffer;
+ if (ggml_backend_buffer_is_host(src3->buffer)) {
+ memcpy(src3_clone->data, src3->data, src3_size);
+ memcpy(src3_clone->nb, src3->nb, sizeof(size_t) * GGML_MAX_DIMS);
+ } else if (ggml_backend_buffer_is_vk(src3->buffer)) {
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src3->buffer->context;
+ vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
+ uint64_t offset = vk_tensor_offset(src3) + src3->view_offs;
+ if (!ggml_is_contiguous(src3) && ggml_vk_dim01_contiguous(src3)) {
+ for (int i3 = 0; i3 < src3->ne[3]; i3++) {
+ for (int i2 = 0; i2 < src3->ne[2]; i2++) {
+ const int idx = i3*src3->ne[2] + i2;
+ ggml_vk_buffer_read(buffer_gpu, offset + idx * src3->nb[2], ((char *)src3_clone->data + idx * src3_clone->nb[2]), src3->ne[1] * src3->nb[1]);
+ }
+ }
+
+ src3_clone->nb[0] = src3->nb[0];
+ src3_clone->nb[1] = src3->nb[1];
+ for (int i = 2; i < GGML_MAX_DIMS; i++) {
+ src3_clone->nb[i] = src3_clone->nb[i - 1]*src3_clone->ne[i - 1];
+ }
+ } else {
+ if (offset + src3_size >= buffer_gpu->size) {
+ src3_size = buffer_gpu->size - offset;
+ }
+ ggml_vk_buffer_read(buffer_gpu, offset, src3_clone->data, src3_size);
+ memcpy(src3_clone->nb, src3->nb, sizeof(size_t) * GGML_MAX_DIMS);
+ }
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
+ ggml_vk_print_tensor(src3, "src3");
+ }
+ }
+
+ if (tensor->op == GGML_OP_FLASH_ATTN_EXT) {
+ const float *params = (const float *)tensor->op_params;
+ tensor_clone = ggml_flash_attn_ext(ggml_ctx, src0_clone, src1_clone, src2_clone, src3_clone, params[0], params[1], params[2]);
+ } else if (tensor->op == GGML_OP_MUL_MAT) {
+ tensor_clone = ggml_mul_mat(ggml_ctx, src0_clone, src1_clone);
+ } else if (tensor->op == GGML_OP_MUL_MAT_ID) {
+ tensor_clone = ggml_mul_mat_id(ggml_ctx, src0_clone, src1_clone, src2_clone);
+ } else if (tensor->op == GGML_OP_MUL) {
+ tensor_clone = ggml_mul(ggml_ctx, src0_clone, src1_clone);
+ } else if (tensor->op == GGML_OP_DIV) {
+ tensor_clone = ggml_div(ggml_ctx, src0_clone, src1_clone);
+ } else if (tensor->op == GGML_OP_CONCAT) {
+ tensor_clone = ggml_concat(ggml_ctx, src0_clone, src1_clone, *(int *)tensor->op_params);
+ } else if (tensor->op == GGML_OP_UPSCALE) {
+ tensor_clone = ggml_upscale_ext(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
+ } else if (tensor->op == GGML_OP_SCALE) {
+ tensor_clone = ggml_scale(ggml_ctx, src0_clone, ((float *)tensor->op_params)[0]);
+ } else if (tensor->op == GGML_OP_SQR) {
+ tensor_clone = ggml_sqr(ggml_ctx, src0_clone);
+ } else if (tensor->op == GGML_OP_SIN) {
+ tensor_clone = ggml_sin(ggml_ctx, src0_clone);
+ } else if (tensor->op == GGML_OP_COS) {
+ tensor_clone = ggml_cos(ggml_ctx, src0_clone);
+ } else if (tensor->op == GGML_OP_CLAMP) {
+ tensor_clone = ggml_clamp(ggml_ctx, src0_clone, ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
+ } else if (tensor->op == GGML_OP_PAD) {
+ tensor_clone = ggml_pad(ggml_ctx, src0_clone, tensor->ne[0] - src0_clone->ne[0], tensor->ne[1] - src0_clone->ne[1], tensor->ne[2] - src0_clone->ne[2], tensor->ne[3] - src0_clone->ne[3]);
+ } else if (tensor->op == GGML_OP_REPEAT) {
+ tensor_clone = ggml_repeat(ggml_ctx, src0_clone, tensor);
+ } else if (tensor->op == GGML_OP_ADD) {
+ tensor_clone = ggml_add(ggml_ctx, src0_clone, src1_clone);
+ } else if (tensor->op == GGML_OP_ACC) {
+ tensor_clone = ggml_acc(ggml_ctx, src0_clone, src1_clone, tensor->op_params[0], tensor->op_params[1], tensor->op_params[2], tensor->op_params[3]);
+ } else if (tensor->op == GGML_OP_NORM) {
+ tensor_clone = ggml_norm(ggml_ctx, src0_clone, *(float *)tensor->op_params);
+ } else if (tensor->op == GGML_OP_GROUP_NORM) {
+ tensor_clone = ggml_group_norm(ggml_ctx, src0_clone, *(int *)tensor->op_params, ((float *)tensor->op_params)[1]);
+ } else if (tensor->op == GGML_OP_RMS_NORM) {
+ tensor_clone = ggml_rms_norm(ggml_ctx, src0_clone, *(float *)tensor->op_params);
+ } else if (tensor->op == GGML_OP_SOFT_MAX) {
+ if (src1 != nullptr) {
+ tensor_clone = ggml_soft_max_ext(ggml_ctx, src0_clone, src1_clone, ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
+ } else {
+ tensor_clone = ggml_soft_max(ggml_ctx, src0_clone);
+ }
+ } else if (tensor->op == GGML_OP_DIAG_MASK_INF) {
+ tensor_clone = ggml_diag_mask_inf(ggml_ctx, src0_clone, *(int *)tensor->op_params);
+ } else if (tensor->op == GGML_OP_ROPE) {
+ const int n_dims = ((int32_t *) tensor->op_params)[1];
+ const int mode = ((int32_t *) tensor->op_params)[2];
+ //const int n_ctx_ggml = ((int32_t *) tensor->op_params)[3];
+ const int n_ctx_orig_ggml = ((int32_t *) tensor->op_params)[4];
+ const float freq_base = ((float *) tensor->op_params)[5];
+ const float freq_scale = ((float *) tensor->op_params)[6];
+ const float ext_factor = ((float *) tensor->op_params)[7];
+ const float attn_factor = ((float *) tensor->op_params)[8];
+ const float beta_fast = ((float *) tensor->op_params)[9];
+ const float beta_slow = ((float *) tensor->op_params)[10];
+ tensor_clone = ggml_rope_ext(ggml_ctx, src0_clone, src1_clone, src2_clone, n_dims, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
+ } else if (tensor->op == GGML_OP_UNARY) {
+ switch (ggml_get_unary_op(tensor)) {
+ case GGML_UNARY_OP_SILU:
+ tensor_clone = ggml_silu(ggml_ctx, src0_clone);
+ break;
+ case GGML_UNARY_OP_GELU:
+ tensor_clone = ggml_gelu(ggml_ctx, src0_clone);
+ break;
+ case GGML_UNARY_OP_GELU_QUICK:
+ tensor_clone = ggml_gelu_quick(ggml_ctx, src0_clone);
+ break;
+ case GGML_UNARY_OP_RELU:
+ tensor_clone = ggml_relu(ggml_ctx, src0_clone);
+ break;
+ case GGML_UNARY_OP_TANH:
+ tensor_clone = ggml_tanh(ggml_ctx, src0_clone);
+ break;
+ default:
+ std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
+ GGML_ABORT("fatal error");
+ }
+ } else if (tensor->op == GGML_OP_CPY || tensor->op == GGML_OP_DUP) {
+ if (src1 == nullptr) {
+ tensor_clone = ggml_dup(ggml_ctx, src0_clone);
+ tensor_clone->type = tensor->type;
+ } else {
+ tensor_clone = ggml_cpy(ggml_ctx, src0_clone, src1_clone);
+ }
+ } else if (tensor->op == GGML_OP_CONT) {
+ tensor_clone = ggml_cont_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
+ } else if (tensor->op == GGML_OP_RESHAPE) {
+ tensor_clone = ggml_reshape_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
+ } else if (tensor->op == GGML_OP_VIEW) {
+ tensor_clone = ggml_view_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->nb[1], tensor->nb[2], tensor->nb[3], ((int32_t *) tensor->op_params)[0]);
+ } else if (tensor->op == GGML_OP_PERMUTE) {
+ int32_t * params = (int32_t *)tensor->op_params;
+ tensor_clone = ggml_permute(ggml_ctx, src0_clone, params[0], params[1], params[2], params[3]);
+ } else if (tensor->op == GGML_OP_TRANSPOSE) {
+ tensor_clone = ggml_transpose(ggml_ctx, src0_clone);
+ } else if (tensor->op == GGML_OP_GET_ROWS) {
+ tensor_clone = ggml_get_rows(ggml_ctx, src0_clone, src1_clone);
+ } else if (tensor->op == GGML_OP_ARGSORT) {
+ tensor_clone = ggml_argsort(ggml_ctx, src0_clone, (ggml_sort_order) *(int *)tensor->op_params);
+ } else if (tensor->op == GGML_OP_SUM_ROWS) {
+ tensor_clone = ggml_sum_rows(ggml_ctx, src0_clone);
+ } else if (tensor->op == GGML_OP_IM2COL) {
+ const int32_t s0 = tensor->op_params[0];
+ const int32_t s1 = tensor->op_params[1];
+ const int32_t p0 = tensor->op_params[2];
+ const int32_t p1 = tensor->op_params[3];
+ const int32_t d0 = tensor->op_params[4];
+ const int32_t d1 = tensor->op_params[5];
+
+ const bool is_2D = tensor->op_params[6] == 1;
+ tensor_clone = ggml_im2col(ggml_ctx, src0_clone, src1_clone, s0, s1, p0, p1, d0, d1, is_2D, tensor->type);
+ } else if (tensor->op == GGML_OP_TIMESTEP_EMBEDDING) {
+ const int32_t dim = tensor->op_params[0];
+ const int32_t max_period = tensor->op_params[1];
+ tensor_clone = ggml_timestep_embedding(ggml_ctx, src0_clone, dim, max_period);
+ } else if (tensor->op == GGML_OP_POOL_2D) {
+ enum ggml_op_pool op = static_cast<ggml_op_pool>(tensor->op_params[0]);
+ const int32_t k0 = tensor->op_params[1];
+ const int32_t k1 = tensor->op_params[2];
+ const int32_t s0 = tensor->op_params[3];
+ const int32_t s1 = tensor->op_params[4];
+ const int32_t p0 = tensor->op_params[5];
+ const int32_t p1 = tensor->op_params[6];
+
+ tensor_clone = ggml_pool_2d(ggml_ctx, src0_clone, op, k0, k1, s0, s1, p0, p1);
+ } else if (tensor->op == GGML_OP_LEAKY_RELU) {
+ const float * op_params = (const float *)tensor->op_params;
+ tensor_clone = ggml_leaky_relu(ggml_ctx, src0_clone, op_params[0], false);
+ } else if (tensor->op == GGML_OP_RWKV_WKV6) {
+ tensor_clone = ggml_rwkv_wkv6(ggml_ctx, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3],
+ tensor->src[4], tensor->src[5]);
+ }
+ else {
+ std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
+ GGML_ABORT("fatal error");
+ }
+
+ ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
+ ggml_build_forward_expand(cgraph, tensor_clone);
+
+ ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 8);
+
+ if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
+ ggml_vk_print_tensor(tensor_clone, "tensor_clone");
+ }
+
+ comp_size = ggml_nbytes(tensor_clone);
+
+ comp_result = malloc(comp_size);
+ memcpy(comp_result, tensor_clone->data, comp_size);
+ memcpy(comp_nb, tensor_clone->nb, sizeof(size_t) * GGML_MAX_DIMS);
+
+ if (src0 != nullptr) {
+ free(src0_buffer);
+ }
+ if (src1 != nullptr) {
+ free(src1_buffer);
+ }
+
+ ggml_free(ggml_ctx);
+
+ VK_LOG_DEBUG("END ggml_vk_check_results_0(" << tensor->name << ")");
+}
+
+static void ggml_vk_check_results_1(ggml_tensor * tensor) {
+ if (tensor->op == GGML_OP_TRANSPOSE) {
+ return;
+ }
+ if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
+ return;
+ }
+
+ VK_LOG_DEBUG("ggml_vk_check_results_1(" << tensor->name << ")");
+
+ ggml_tensor * src0 = tensor->src[0];
+ ggml_tensor * src1 = tensor->src[1];
+ ggml_tensor * src2 = tensor->src[2];
+
+ void * tensor_data = tensor->data;
+
+ if (ggml_backend_buffer_is_vk(tensor->buffer)) {
+ size_t tensor_size = ggml_nbytes(tensor);
+ tensor_data = malloc(tensor_size);
+
+ ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
+
+ vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
+ uint64_t offset = vk_tensor_offset(tensor) + tensor->view_offs;
+ if (offset + tensor_size >= buffer_gpu->size) {
+ tensor_size = buffer_gpu->size - offset;
+ }
+
+ ggml_vk_buffer_read(buffer_gpu, offset, tensor_data, tensor_size);
+ }
+
+ float first_error_result = -1.0f;
+ float first_error_correct = -1.0f;
+ std::array<int, 4> first_error = { -1, -1, -1, -1 };
+ double avg_err = 0.0;
+ size_t counter = 0;
+
+ for (int i3 = 0; i3 < tensor->ne[3]; i3++) {
+ for (int i2 = 0; i2 < tensor->ne[2]; i2++) {
+ for (int i1 = 0; i1 < tensor->ne[1]; i1++) {
+ for (int i0 = 0; i0 < tensor->ne[0]; i0++) {
+ const bool buffer_size_fit = i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0] < comp_size;
+ float correct = 0.0f;
+ float result = 0.0f;
+
+ if (buffer_size_fit) {
+ if (tensor->type == GGML_TYPE_F32) {
+ correct = *(float *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
+ result = *(float *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
+ } else if (tensor->type == GGML_TYPE_F16) {
+ correct = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
+ result = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
+ } else if (tensor->type == GGML_TYPE_I32) {
+ correct = *(int32_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
+ result = *(int32_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
+ } else {
+ std::cerr << "Results check not implemented for type " << ggml_type_name(tensor->type) << std::endl;
+ }
+ } else {
+ std::cerr << "Missing debug code for type " << ggml_type_name(tensor->type) << std::endl;
+ GGML_ABORT("fatal error");
+ }
+
+ if ((std::isnan(correct) != std::isnan(result)) || (std::isinf(correct) != std::isinf(result)) || !buffer_size_fit) {
+ std::cerr << "ERROR: Invalid value in " << ggml_op_name(tensor->op) << " i3=" << i3 << " i2=" << i2 << " i1=" << i1 << " i0=" << i0 << " result=" << result << " correct=" << correct << " avg_err=" << (avg_err / counter) << std::endl;
+ std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
+ if (src0 != nullptr) {
+ std::cerr << "src0=" << src0 << " src0->name=" << src0->name << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
+ }
+ if (src1 != nullptr) {
+ std::cerr << "src1=" << src1 << " src1->name=" << src1->name << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
+ }
+ if (src2 != nullptr) {
+ std::cerr << "src2=" << src2 << " src2->name=" << src2->name << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
+ }
+ std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
+ std::cerr << std::endl << "Result:" << std::endl;
+ ggml_vk_print_tensor_area(tensor, tensor_data, i0, i1, i2, i3);
+ std::cerr << std::endl << "Correct:" << std::endl;
+ ggml_vk_print_tensor_area(tensor, comp_result, i0, i1, i2, i3);
+ std::cerr << std::endl;
+ std::vector<const ggml_tensor *> done;
+ ggml_vk_print_graph_origin(tensor, done);
+ GGML_ABORT("fatal error");
+ }
+ if (first_error[0] == -1 && std::fabs(correct - result) > 0.1f) {
+ first_error[0] = i0;
+ first_error[1] = i1;
+ first_error[2] = i2;
+ first_error[3] = i3;
+ first_error_result = result;
+ first_error_correct = correct;
+ }
+
+ // Special case, value is infinite, avoid NaN result in avg_err
+ // NaN also appears in results, if both are nan error is 0
+ if (!std::isinf(correct) && !std::isinf(result) && !std::isnan(correct) && !std::isnan(result)) {
+ avg_err += std::fabs(correct - result);
+ }
+ counter++;
+ }
+ }
+ }
+ }
+
+ avg_err /= counter;
+
+ if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
+ std::cerr << "TENSOR CHECK: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
+ std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
+ if (src0 != nullptr) {
+ std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
+ }
+ if (src1 != nullptr) {
+ std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
+ }
+ if (src2 != nullptr) {
+ std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
+ }
+ std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
+ std::cerr << std::endl << "Result:" << std::endl;
+ ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
+ std::cerr << std::endl << "Correct:" << std::endl;
+ ggml_vk_print_tensor_area(tensor, comp_result, 5, 5, 0, 0);
+ std::cerr << std::endl;
+ std::vector<const ggml_tensor *> done;
+ ggml_vk_print_graph_origin(tensor, done);
+ }
+
+ if (avg_err > 0.05 || std::isnan(avg_err)) {
+ std::cerr << "ERROR: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
+ std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
+ if (src0 != nullptr) {
+ std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
+ }
+ if (src1 != nullptr) {
+ std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
+ }
+ if (src2 != nullptr) {
+ std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
+ }
+ std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
+ std::cerr << std::endl << "Result:" << std::endl;
+ ggml_vk_print_tensor_area(tensor, tensor_data, first_error[0], first_error[1], first_error[2], first_error[3]);
+ std::cerr << std::endl << "Correct:" << std::endl;
+ ggml_vk_print_tensor_area(tensor, comp_result, first_error[0], first_error[1], first_error[2], first_error[3]);
+ std::cerr << std::endl;
+ std::vector<const ggml_tensor *> done;
+ ggml_vk_print_graph_origin(tensor, done);
+ GGML_ABORT("fatal error");
+ } else {
+ std::cerr << check_counter << " " << tensor->name << " op=" << ggml_op_name(tensor->op) << " avg_err=" << avg_err << std::endl;
+ }
+
+ free(comp_result);
+ comp_result = nullptr;
+ comp_size = 0;
+
+ if (ggml_backend_buffer_is_vk(tensor->buffer)) {
+ free(tensor_data);
+ }
+
+ VK_LOG_DEBUG("END ggml_vk_check_results_1(" << tensor->name << ")");
+}
+#endif
+
+GGML_BACKEND_DL_IMPL(ggml_backend_vk_reg)
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt
new file mode 100644
index 00000000..bd0c74cb
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt
@@ -0,0 +1,9 @@
+find_package (Threads REQUIRED)
+find_package(Vulkan COMPONENTS glslc REQUIRED)
+
+set(TARGET vulkan-shaders-gen)
+add_executable(${TARGET} vulkan-shaders-gen.cpp)
+install(TARGETS ${TARGET} RUNTIME)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
+target_link_libraries(vulkan-shaders-gen PUBLIC Threads::Threads)
+target_link_libraries(vulkan-shaders-gen PRIVATE Vulkan::Vulkan)
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp
new file mode 100644
index 00000000..d896f1ef
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp
@@ -0,0 +1,29 @@
+#version 450
+
+#include "types.comp"
+#include "generic_binary_head.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ const uint idx = gl_GlobalInvocationID.x;
+ if (idx >= p.ne) {
+ return;
+ }
+
+ const uint offset = p.param3;
+ const uint src1_i = idx - offset;
+ const uint oz = src1_i / p.nb02;
+ const uint oy = (src1_i - (oz * p.nb02)) / p.nb01;
+ const uint ox = src1_i % p.nb01;
+
+ uint i00, i01, i02, i03;
+ get_indices(idx, i00, i01, i02, i03);
+
+ if (ox < p.ne10 && oy < p.ne11 && oz < p.ne12) {
+ data_d[get_doffset() + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + src0_idx(i00, i01, i02, i03)]) + FLOAT_TYPE(data_b[get_boffset() + ox + oy * p.ne10 + oz * p.ne10 * p.ne11]));
+ } else {
+ data_d[get_doffset() + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + src0_idx(i00, i01, i02, i03)]));
+ }
+}
+
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/add.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/add.comp
new file mode 100644
index 00000000..2b4085c4
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/add.comp
@@ -0,0 +1,29 @@
+#version 450
+
+#extension GL_EXT_shader_16bit_storage : require
+
+#include "types.comp"
+#include "generic_binary_head.comp"
+
+const uint num_threads = 256;
+
+layout(local_size_x = num_threads, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ uint idx = get_idx();
+
+ // num_threads * num_iter must equal 512, to match the wg_denoms and get_idx calculation
+ const uint num_iter = 2;
+
+ [[unroll]] for (uint i = 0; i < num_iter; ++i) {
+ if (idx >= p.ne) {
+ continue;
+ }
+ uint i00, i01, i02, i03;
+ get_indices(idx, i00, i01, i02, i03);
+
+ data_d[get_doffset() + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + src0_idx(i00, i01, i02, i03)]) + FLOAT_TYPE(data_b[get_boffset() + src1_idx(i00, i01, i02, i03)]));
+
+ idx += num_threads;
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp
new file mode 100644
index 00000000..d4fa45b1
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp
@@ -0,0 +1,69 @@
+#version 450
+
+#include "types.comp"
+
+#define BLOCK_SIZE 1024
+#define ASC 0
+
+layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) buffer D {int data_d[];};
+
+layout (push_constant) uniform parameter {
+ uint ncols;
+ uint ncols_pad;
+ uint order;
+} p;
+
+shared int dst_row[BLOCK_SIZE];
+
+void swap(uint idx0, uint idx1) {
+ int tmp = dst_row[idx0];
+ dst_row[idx0] = dst_row[idx1];
+ dst_row[idx1] = tmp;
+}
+
+void main() {
+ // bitonic sort
+ const int col = int(gl_LocalInvocationID.x);
+ const uint row = gl_WorkGroupID.y;
+
+ const uint row_offset = row * p.ncols;
+
+ // initialize indices
+ if (col < p.ncols_pad) {
+ dst_row[col] = col;
+ }
+ barrier();
+
+ for (uint k = 2; k <= p.ncols_pad; k *= 2) {
+ for (uint j = k / 2; j > 0; j /= 2) {
+ const uint ixj = col ^ j;
+ if (col < p.ncols_pad && ixj > col) {
+ if ((col & k) == 0) {
+ if (dst_row[col] >= p.ncols ||
+ (dst_row[ixj] < p.ncols && (p.order == ASC ?
+ data_a[row_offset + dst_row[col]] > data_a[row_offset + dst_row[ixj]] :
+ data_a[row_offset + dst_row[col]] < data_a[row_offset + dst_row[ixj]]))
+ ) {
+ swap(col, ixj);
+ }
+ } else {
+ if (dst_row[ixj] >= p.ncols ||
+ (dst_row[col] < p.ncols && (p.order == ASC ?
+ data_a[row_offset + dst_row[col]] < data_a[row_offset + dst_row[ixj]] :
+ data_a[row_offset + dst_row[col]] > data_a[row_offset + dst_row[ixj]]))
+ ) {
+ swap(col, ixj);
+ }
+ }
+ }
+ barrier();
+ }
+ }
+
+ if (col < p.ncols) {
+ data_d[row_offset + col] = dst_row[col];
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp
new file mode 100644
index 00000000..1e5cb8da
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp
@@ -0,0 +1,17 @@
+#version 450
+
+#include "types.comp"
+#include "generic_unary_head.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ const uint idx = get_idx();
+
+ if (idx >= p.ne) {
+ return;
+ }
+
+ const FLOAT_TYPE val = FLOAT_TYPE(data_a[get_aoffset() + src0_idx(idx)]);
+ data_d[get_doffset() + dst_idx(idx)] = D_TYPE(val < p.param1 ? p.param1 : (val > p.param2 ? p.param2 : val));
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp
new file mode 100644
index 00000000..9ee2f1fa
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp
@@ -0,0 +1,41 @@
+#version 450
+
+#include "types.comp"
+#include "generic_binary_head.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ const uint idx = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+ const int dim = p.param3;
+
+ if (idx >= p.ne) {
+ return;
+ }
+
+ const uint i3 = idx / (p.ne22*p.ne21*p.ne20);
+ const uint i3_offset = i3 * p.ne22*p.ne21*p.ne20;
+ const uint i2 = (idx - i3_offset) / (p.ne21*p.ne20);
+ const uint i2_offset = i2*p.ne21*p.ne20;
+ const uint i1 = (idx - i3_offset - i2_offset) / p.ne20;
+ const uint i0 = idx - i3_offset - i2_offset - i1*p.ne20;
+
+ uint o[4] = {0, 0, 0, 0};
+ o[dim] = dim == 0 ? p.ne00 : (dim == 1 ? p.ne01 : (dim == 2 ? p.ne02 : p.ne03));
+
+ const uint src0_idx = i3*p.nb03 + i2*p.nb02 + i1*p.nb01 + i0*p.nb00;
+ const uint src1_idx = (i3 - o[3])*p.nb13 + (i2 - o[2])*p.nb12 + (i1 - o[1])*p.nb11 + (i0 - o[0])*p.nb10;
+ const uint dst_idx = i3*p.nb23 + i2*p.nb22 + i1*p.nb21 + i0*p.nb20;
+
+ const bool is_src0 = i0 < p.ne00 && i1 < p.ne01 && i2 < p.ne02 && i3 < p.ne03;
+
+#ifndef OPTIMIZATION_ERROR_WORKAROUND
+ data_d[get_doffset() + dst_idx] = D_TYPE(is_src0 ? data_a[get_aoffset() + src0_idx] : data_b[get_boffset() + src1_idx]);
+#else
+ if (is_src0) {
+ data_d[get_doffset() + dst_idx] = data_a[get_aoffset() + src0_idx];
+ } else {
+ data_d[get_doffset() + dst_idx] = data_b[get_boffset() + src1_idx];
+ }
+#endif
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp
new file mode 100644
index 00000000..dd828c23
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp
@@ -0,0 +1,42 @@
+#version 450
+
+#include "types.comp"
+#include "generic_unary_head.comp"
+
+#extension GL_EXT_control_flow_attributes : require
+
+const uint num_threads = 128;
+
+layout(local_size_x = num_threads, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ uint idx = get_idx();
+
+ // num_threads * num_iter must equal 512, to match the wg_denoms and get_idx calculation
+ const uint num_iter = 4;
+
+ // fast path for when all four iterations are in-bounds
+ if (idx + (num_iter-1)*num_threads < p.ne) {
+ [[unroll]] for (uint i = 0; i < num_iter; ++i) {
+#ifndef OPTIMIZATION_ERROR_WORKAROUND
+ data_d[get_doffset() + idx] = D_TYPE(data_a[get_aoffset() + idx]);
+#else
+ data_d[get_doffset() + idx] = data_a[get_aoffset() + idx];
+#endif
+ idx += num_threads;
+ }
+ } else {
+ [[unroll]] for (uint i = 0; i < num_iter; ++i) {
+ if (idx >= p.ne) {
+ continue;
+ }
+
+#ifndef OPTIMIZATION_ERROR_WORKAROUND
+ data_d[get_doffset() + idx] = D_TYPE(data_a[get_aoffset() + idx]);
+#else
+ data_d[get_doffset() + idx] = data_a[get_aoffset() + idx];
+#endif
+ idx += num_threads;
+ }
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp
new file mode 100644
index 00000000..29c90649
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp
@@ -0,0 +1,20 @@
+#version 450
+
+#include "types.comp"
+#include "generic_unary_head.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ const uint idx = get_idx();
+
+ if (idx >= p.ne) {
+ return;
+ }
+
+#ifndef OPTIMIZATION_ERROR_WORKAROUND
+ data_d[get_doffset() + dst_idx(idx)] = D_TYPE(data_a[get_aoffset() + src0_idx(idx)]);
+#else
+ data_d[get_doffset() + dst_idx(idx)] = data_a[get_aoffset() + src0_idx(idx)];
+#endif
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp
new file mode 100644
index 00000000..0b8d02f5
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp
@@ -0,0 +1,17 @@
+#version 450
+
+#include "types.comp"
+#include "generic_unary_head.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ const uint idx = get_idx();
+
+ if (idx >= p.ne) {
+ return;
+ }
+
+ const FLOAT_TYPE val = FLOAT_TYPE(data_a[get_aoffset() + src0_idx(idx)]);
+ data_d[get_doffset() + dst_idx(idx)] = D_TYPE(cos(val));
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp
new file mode 100644
index 00000000..a4d3fca5
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp
@@ -0,0 +1,20 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {float data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ const uint i = gl_GlobalInvocationID.x * 16;
+
+ if (i >= p.nel) {
+ return;
+ }
+
+ [[unroll]] for (uint l = 0; l < 16; l++) {
+ data_b[i + l] = D_TYPE(data_a[i + l]);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp
new file mode 100644
index 00000000..91bb8f8d
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp
@@ -0,0 +1,118 @@
+#if !defined(DATA_A_F32) && !defined(DATA_A_F16)
+#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
+#endif
+
+#include "types.comp"
+
+#if defined(A_TYPE_PACKED16)
+layout (binding = 0) readonly buffer A_PACKED16 {A_TYPE_PACKED16 data_a_packed16[];};
+#endif
+#if defined(A_TYPE_PACKED32)
+layout (binding = 0) readonly buffer A_PACKED32 {A_TYPE_PACKED32 data_a_packed32[];};
+#endif
+
+#if defined(DATA_A_F32)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ return vec2(data_a[a_offset + ib], data_a[a_offset + ib + 1]);
+}
+#endif
+
+#if defined(DATA_A_F16)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ return vec2(data_a[a_offset + ib], data_a[a_offset + ib + 1]);
+}
+#endif
+
+#if defined(DATA_A_Q4_0)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return (vec2(vui & 0xF, vui >> 4) - 8.0f);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
+ return (vec4(vui & 0xF, (vui >> 4) & 0xF, (vui >> 8) & 0xF, vui >> 12) - 8.0f);
+}
+#endif
+
+#if defined(DATA_A_Q4_1)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return vec2(vui & 0xF, vui >> 4);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
+ return vec4(vui & 0xF, (vui >> 4) & 0xF, (vui >> 8) & 0xF, vui >> 12);
+}
+#endif
+
+#if defined(DATA_A_Q5_0)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint uint_qh = uint(data_a[a_offset + ib].qh[1]) << 16 | data_a[a_offset + ib].qh[0];
+ const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return (vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) - 16.0f);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint uint_qh = uint(data_a_packed16[a_offset + ib].qh[1]) << 16 | data_a_packed16[a_offset + ib].qh[0];
+ const ivec2 qh0 = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
+ const ivec2 qh1 = ivec2(((uint_qh >> (iqs + 1)) << 4) & 0x10, (uint_qh >> (iqs + 13)) & 0x10);
+ const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
+ return (vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) - 16.0f);
+}
+#endif
+
+#if defined(DATA_A_Q5_1)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint uint_qh = data_a[a_offset + ib].qh;
+ const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint uint_qh = data_a_packed16[a_offset + ib].qh;
+ const ivec2 qh0 = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
+ const ivec2 qh1 = ivec2(((uint_qh >> (iqs + 1)) << 4) & 0x10, (uint_qh >> (iqs + 13)) & 0x10);
+ const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
+ return vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y);
+}
+#endif
+
+#if defined(DATA_A_Q8_0)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ return vec2(int(data_a[a_offset + ib].qs[iqs]), int(data_a[a_offset + ib].qs[iqs + 1]));
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ uint32_t v0 = data_a_packed16[a_offset + ib].qs[iqs/2];
+ uint32_t v1 = data_a_packed16[a_offset + ib].qs[iqs/2 + 1];
+ return vec4(int8_t(v0 & 0xFF), int8_t(v0 >> 8), int8_t(v1 & 0xFF), int8_t(v1 >> 8));
+}
+#endif
+
+#if defined(DATA_A_IQ4_NL)
+vec2 dequantize(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a[a_offset + ib].qs[iqs]);
+ return vec2(kvalues_iq4nl[vui & 0xF], kvalues_iq4nl[vui >> 4]);
+}
+vec4 dequantize4(uint ib, uint iqs, uint a_offset) {
+ const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]);
+ return vec4(kvalues_iq4nl[vui & 0xF], kvalues_iq4nl[(vui >> 4) & 0xF], kvalues_iq4nl[(vui >> 8) & 0xF], kvalues_iq4nl[vui >> 12]);
+}
+#endif
+
+#if defined(DATA_A_F32) || defined(DATA_A_F16)
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(0, 0);
+}
+#endif
+
+#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ4_NL)
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(float(data_a[a_offset + ib].d), 0);
+}
+#endif
+
+#if defined(DATA_A_Q4_1) || defined(DATA_A_Q5_1)
+vec2 get_dm(uint ib, uint a_offset) {
+ return vec2(float(data_a[a_offset + ib].d), float(data_a[a_offset + ib].m));
+}
+#endif
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp
new file mode 100644
index 00000000..94b78598
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp
@@ -0,0 +1,325 @@
+
+#include "types.comp"
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ4_0 {
+ block_q4_0_packed16 block;
+};
+
+float16_t dequantFuncQ4_0(const in decodeBufQ4_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = uint32_t(bl.block.qs[(idx & 0xE) >> 1]);
+ qs >>= shift;
+ qs &= 0x0F0F;
+ qs = unpack8(qs)[idx & 1];
+ float16_t ret = (float16_t(qs) - float16_t(8)) * d;
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 4) buffer decodeBufQ4_1 {
+ block_q4_1 block;
+};
+
+float16_t dequantFuncQ4_1(const in decodeBufQ4_1 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const float16_t m = bl.block.m;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx & 0xF;
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = bl.block.qs[iqs];
+ qs >>= shift;
+ qs &= 0xF;
+ float16_t ret = float16_t(qs) * d + m;
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ5_0 {
+ block_q5_0 block;
+};
+
+float16_t dequantFuncQ5_0(const in decodeBufQ5_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx & 0xF;
+
+ const uint uint_qh = uint(bl.block.qh[1]) << 16 | bl.block.qh[0];
+ const uint qh = ((uint_qh >> idx) << 4) & 0x10;
+
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = bl.block.qs[iqs];
+ qs >>= shift;
+ qs &= 0xF;
+
+ float16_t ret = (float16_t(qs | qh) - float16_t(16)) * d;
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 8) buffer decodeBufQ5_1 {
+ block_q5_1 block;
+};
+
+float16_t dequantFuncQ5_1(const in decodeBufQ5_1 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const float16_t m = bl.block.m;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx & 0xF;
+
+ const uint uint_qh = bl.block.qh;
+ const uint qh = ((uint_qh >> idx) << 4) & 0x10;
+
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = bl.block.qs[iqs];
+ qs >>= shift;
+ qs &= 0xF;
+
+ float16_t ret = float16_t(qs | qh) * d + m;
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ8_0 {
+ block_q8_0_packed16 block;
+};
+
+float16_t dequantFuncQ8_0(const in decodeBufQ8_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx;
+
+ // Load 16b and select the byte for this element
+ int32_t qs = unpack8(int32_t(bl.block.qs[(iqs & 0x1E) >> 1]))[iqs & 1];
+ float16_t ret = float16_t(qs) * d;
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 4) buffer decodeBufQ2_K {
+ block_q2_K block;
+};
+
+float16_t dequantFuncQ2_K(const in decodeBufQ2_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const f16vec2 d = bl.block.d;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx;
+
+ const uint qsi = (iqs / 128) * 32 + (iqs % 32); // 0..31
+ const uint scalesi = iqs / 16; // 0..15
+ const uint qsshift = ((iqs % 128) / 32) * 2; // 0,2,4,6
+
+ uint32_t qs = bl.block.qs[qsi];
+ const uint scales = bl.block.scales[scalesi];
+ float16_t ret = d.x * float16_t(scales & 0xF) * float16_t((qs >> qsshift) & 3) - d.y * float16_t(scales >> 4);
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ3_K {
+ block_q3_K block;
+};
+
+float16_t dequantFuncQ3_K(const in decodeBufQ3_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx;
+
+ const uint n = iqs / 128; // 0,1
+ const uint qsi = n * 32 + (iqs % 32); // 0..63
+ const uint hmi = (iqs % 32); // 0..31
+ const uint j = (iqs % 128) / 8; // 0..15
+ const uint is = iqs / 16; // 0..15
+ const uint halfsplit = ((iqs % 128) / 32); // 0,1,2,3
+ const uint qsshift = halfsplit * 2; // 0,2,4,6
+ const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
+
+ uint32_t scaleidx0 = (is < 8) ? is : (is-8);
+ uint32_t scaleidx0shift = (is < 8) ? 0 : 4;
+ uint32_t scaleidx1 = is + 8 - (is/4)*4;
+ uint32_t scaleidx1shift = (is/4)*2;
+
+ const int8_t us = int8_t(((bl.block.scales[scaleidx0] >> scaleidx0shift) & 0xF) | (((bl.block.scales[scaleidx1] >> scaleidx1shift) & 3) << 4));
+
+ const float16_t dl = bl.block.d * float16_t(us - 32);
+
+ float16_t ret = dl * float16_t(int8_t((bl.block.qs[qsi ] >> qsshift) & 3) - (((bl.block.hmask[hmi ] & m) != 0) ? 0 : 4));
+
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4_K {
+ block_q4_K block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4_K_packed16 {
+ block_q4_K_packed16 block;
+};
+
+float16_t dequantFuncQ4_K(const in decodeBufQ4_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ decodeBufQ4_K_packed16 bl16 = decodeBufQ4_K_packed16(bl);
+ const uint idx = coordInBlock[1];
+
+ const uint b = (idx & 0x20) >> 5; // 0,1
+ const uint is = (idx & 0xE0) >> 5; // 0..7
+
+ const f16vec2 loadd = bl.block.d;
+
+ uint32_t sc;
+ uint32_t mbyte;
+
+ uint32_t scidx0 = (is < 4) ? is : (is + 4);
+ uint32_t scidx1 = (is < 4) ? is : (is - 4);
+ uint32_t scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ uint32_t scidxshift1 = (is < 4) ? 0 : 2;
+ uint32_t mbidx0 = is + 4;
+ uint32_t mbidx1 = (is < 4) ? is + 4 : is;
+ uint32_t mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ uint32_t mbidxshift0 = (is < 4) ? 0 : 4;
+ uint32_t mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ uint32_t mbidxshift1 = (is < 4) ? 0 : 2;
+
+ sc = uint8_t((bl.block.scales[scidx0] & 0xF) | ((bl.block.scales[scidx1] & scidxmask1) >> scidxshift1));
+ mbyte = uint8_t(((bl.block.scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((bl.block.scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const float16_t d = loadd.x * float16_t(sc);
+ const float16_t m = loadd.y * float16_t(mbyte);
+
+ uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]);
+ qs = (qs >> (b * 4)) & 0x0F0F;
+ qs = unpack8(qs)[idx & 1];
+
+ float16_t ret = d * float16_t(qs) - m;
+
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K {
+ block_q5_K block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K_packed16 {
+ block_q5_K_packed16 block;
+};
+
+float16_t dequantFuncQ5_K(const in decodeBufQ5_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ decodeBufQ5_K_packed16 bl16 = decodeBufQ5_K_packed16(bl);
+ const uint idx = coordInBlock[1];
+
+ const uint b = (idx & 0x20) >> 5; // 0,1
+ const uint is = (idx & 0xE0) >> 5; // 0..7
+
+ const uint32_t hm = 0x0101 << is;
+
+ const f16vec2 loadd = bl.block.d;
+
+ uint32_t sc;
+ uint32_t mbyte;
+
+ uint32_t scidx0 = (is < 4) ? is : (is + 4);
+ uint32_t scidx1 = (is < 4) ? is : (is - 4);
+ uint32_t scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ uint32_t scidxshift1 = (is < 4) ? 0 : 2;
+ uint32_t mbidx0 = is + 4;
+ uint32_t mbidx1 = (is < 4) ? is + 4 : is;
+ uint32_t mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ uint32_t mbidxshift0 = (is < 4) ? 0 : 4;
+ uint32_t mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ uint32_t mbidxshift1 = (is < 4) ? 0 : 2;
+
+ sc = uint8_t((bl.block.scales[scidx0] & 0xF) | ((bl.block.scales[scidx1] & scidxmask1) >> scidxshift1));
+ mbyte = uint8_t(((bl.block.scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((bl.block.scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const float16_t d = loadd.x * float16_t(sc);
+ const float16_t m = loadd.y * float16_t(mbyte);
+
+ uint qh = uint32_t(bl16.block.qh[(idx & 0x1E) >> 1]);
+ qh = qh & hm;
+ qh = unpack8(qh)[idx & 1];
+
+ uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]);
+ qs = (qs >> (b * 4)) & 0x0F0F;
+ qs = unpack8(qs)[idx & 1];
+
+ float16_t ret = d * (float16_t(qs) + (qh != 0 ? float16_t(16) : float16_t(0))) - m;
+
+ return ret;
+}
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ6_K {
+ block_q6_K block;
+};
+
+layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ6_K_packed16 {
+ block_q6_K_packed16 block;
+};
+
+float16_t dequantFuncQ6_K(const in decodeBufQ6_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ decodeBufQ6_K_packed16 bl16 = decodeBufQ6_K_packed16(bl);
+ const uint idx = coordInBlock[1];
+
+ const uint b = (idx & 0x40) >> 6; // 0,1
+ const uint qhshift = (idx & 0x60) >> 4; // 0,2,4,6
+ const uint is = (idx & 0xF0) >> 4; // 0..15
+
+ const float16_t dscale = bl.block.d * float16_t(bl.block.scales[is]);
+
+ uint ql = uint32_t(bl16.block.ql[((idx & 0x80) >> 2) + ((idx & 0x3E) >> 1)]);
+ ql = (ql >> (b * 4)) & 0x0F0F;
+
+ uint qh = uint32_t(bl16.block.qh[((idx & 0x80) >> 3) + ((idx & 0x1E) >> 1)]);
+ qh = ((qh >> qhshift) & 0x0303) << 4;
+
+ int q = unpack8(ql | qh)[idx & 1];
+
+ float16_t ret = dscale * float16_t(q - 32);
+
+ return ret;
+}
+
+#if defined(DATA_A_IQ4_NL)
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ4_NL {
+ block_iq4_nl block;
+};
+
+float16_t dequantFuncIQ4_NL(const in decodeBufIQ4_NL bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const float16_t d = bl.block.d;
+ const uint idx = coordInBlock[1];
+ const uint iqs = idx & 0xF;
+ const uint shift = (idx & 0x10) >> 2;
+ uint32_t qs = bl.block.qs[iqs];
+ qs >>= shift;
+ qs &= 0xF;
+ float16_t ret = float16_t(kvalues_iq4nl[qs]) * d;
+ return ret;
+}
+#endif
+
+#if defined(DATA_A_Q4_0)
+#define dequantFuncA dequantFuncQ4_0
+#elif defined(DATA_A_Q4_1)
+#define dequantFuncA dequantFuncQ4_1
+#elif defined(DATA_A_Q5_0)
+#define dequantFuncA dequantFuncQ5_0
+#elif defined(DATA_A_Q5_1)
+#define dequantFuncA dequantFuncQ5_1
+#elif defined(DATA_A_Q8_0)
+#define dequantFuncA dequantFuncQ8_0
+#elif defined(DATA_A_Q2_K)
+#define dequantFuncA dequantFuncQ2_K
+#elif defined(DATA_A_Q3_K)
+#define dequantFuncA dequantFuncQ3_K
+#elif defined(DATA_A_Q4_K)
+#define dequantFuncA dequantFuncQ4_K
+#elif defined(DATA_A_Q5_K)
+#define dequantFuncA dequantFuncQ5_K
+#elif defined(DATA_A_Q6_K)
+#define dequantFuncA dequantFuncQ6_K
+#elif defined(DATA_A_IQ4_NL)
+#define dequantFuncA dequantFuncIQ4_NL
+#endif
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp
new file mode 100644
index 00000000..8d806435
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp
@@ -0,0 +1,13 @@
+#extension GL_EXT_control_flow_attributes : require
+#extension GL_EXT_shader_16bit_storage : require
+
+layout (push_constant) uniform parameter
+{
+ uint M;
+ uint K;
+ uint stride_a;
+ uint stride_b;
+ uint nel;
+} p;
+
+#include "types.comp"
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp
new file mode 100644
index 00000000..8de14fc0
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp
@@ -0,0 +1,32 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {block_iq4_nl data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
+
+ init_iq4nl_shmem();
+
+ const uint tid = gl_LocalInvocationID.x % 64;
+ const uint il = tid/32;
+ const uint ir = tid%32;
+ const uint ib = 32*i + ir;
+ if (ib >= p.nel / 32) {
+ return;
+ }
+
+ const uint q_idx = 8*il;
+ const uint b_idx = 1024*i + 32*ir + q_idx;
+
+ const float d = float(data_a[ib].d);
+
+ [[unroll]] for (uint l = 0; l < 8; ++l) {
+ data_b[b_idx + l + 0] = D_TYPE(d * kvalues_iq4nl[data_a[ib].qs[q_idx + l] & 0xF]);
+ data_b[b_idx + l + 16] = D_TYPE(d * kvalues_iq4nl[data_a[ib].qs[q_idx + l] >> 4]);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp
new file mode 100644
index 00000000..157154af
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp
@@ -0,0 +1,34 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ [[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
+ const uint i = gl_WorkGroupID.x * 256 + wgy;
+ if (i >= p.M * p.K / QUANT_K) {
+ return;
+ }
+
+ const uint tid = gl_LocalInvocationID.x;
+ const uint ip = tid / 32;
+ const uint il = tid - 32 * ip;
+ const uint is = 8 * ip + il / 16;
+
+ const uint y_idx = i * QUANT_K + 128 * ip + il;
+
+ const uint ql_idx = 32 * ip + il;
+ const uint8_t qs = data_a[i].qs[32 * ip + il];
+
+ FLOAT_TYPE dall = FLOAT_TYPE(data_a[i].d.x);
+ FLOAT_TYPE dmin = FLOAT_TYPE(data_a[i].d.y);
+ data_b[y_idx + 0] = D_TYPE(dall * FLOAT_TYPE((data_a[i].scales[is+0] & 0xF) * ((qs >> 0) & 3)) - dmin * FLOAT_TYPE(data_a[i].scales[is+0] >> 4));
+ data_b[y_idx + 32] = D_TYPE(dall * FLOAT_TYPE((data_a[i].scales[is+2] & 0xF) * ((qs >> 2) & 3)) - dmin * FLOAT_TYPE(data_a[i].scales[is+2] >> 4));
+ data_b[y_idx + 64] = D_TYPE(dall * FLOAT_TYPE((data_a[i].scales[is+4] & 0xF) * ((qs >> 4) & 3)) - dmin * FLOAT_TYPE(data_a[i].scales[is+4] >> 4));
+ data_b[y_idx + 96] = D_TYPE(dall * FLOAT_TYPE((data_a[i].scales[is+6] & 0xF) * ((qs >> 6) & 3)) - dmin * FLOAT_TYPE(data_a[i].scales[is+6] >> 4));
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp
new file mode 100644
index 00000000..c17dd0d9
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp
@@ -0,0 +1,42 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ [[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
+ const uint i = uint(gl_WorkGroupID.x * 256 + wgy);
+ if (i >= p.M * p.K / QUANT_K) {
+ return;
+ }
+
+ const uint r = gl_LocalInvocationID.x / 4;
+ const uint tid = r / 2;
+ const uint is0 = r % 2;
+ const uint l0 = 16 * is0 + 4 * (gl_LocalInvocationID.x % 4);
+ const uint n = tid / 4;
+ const uint j = tid - 4*n;
+
+ const uint8_t m = uint8_t(1 << (4*n + j));
+ const uint is = 8*n + 2*j + is0;
+ const uint shift = 2*j;
+
+ const int8_t us = int8_t(is < 4 ? (data_a[i].scales[is-0] & 0xF) | (((data_a[i].scales[is+8] >> 0) & 3) << 4) :
+ is < 8 ? (data_a[i].scales[is-0] & 0xF) | (((data_a[i].scales[is+4] >> 2) & 3) << 4) :
+ is < 12 ? (data_a[i].scales[is-8] >> 4) | (((data_a[i].scales[is+0] >> 4) & 3) << 4) :
+ (data_a[i].scales[is-8] >> 4) | (((data_a[i].scales[is-4] >> 6) & 3) << 4));
+ const FLOAT_TYPE d_all = FLOAT_TYPE(data_a[i].d);
+ const FLOAT_TYPE dl = d_all * FLOAT_TYPE(us - 32);
+
+ const uint y_idx = i * QUANT_K + 128 * n + 32 * j;
+ const uint qs_idx = 32*n;
+
+ for (uint l = l0; l < l0 + 4; ++l) {
+ data_b[y_idx + l] = D_TYPE(dl * FLOAT_TYPE(int8_t((data_a[i].qs[qs_idx + l] >> shift) & 3) - (((data_a[i].hmask[l] & m) != 0) ? 0 : 4)));
+ }
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp
new file mode 100644
index 00000000..40818532
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp
@@ -0,0 +1,30 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {block_q4_0 data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
+
+ const uint tid = gl_LocalInvocationID.x % 64;
+ const uint il = tid/32;
+ const uint ir = tid%32;
+ const uint ib = 32*i + ir;
+ if (ib >= p.nel / 32) {
+ return;
+ }
+
+ const uint q_idx = 8*il;
+ const uint b_idx = 1024*i + 32*ir + q_idx;
+
+ const float d = float(data_a[ib].d);
+
+ [[unroll]] for (uint l = 0; l < 8; ++l) {
+ data_b[b_idx + l + 0] = D_TYPE(d * ((data_a[ib].qs[q_idx + l] & 0xF) - 8.0f));
+ data_b[b_idx + l + 16] = D_TYPE(d * ((data_a[ib].qs[q_idx + l] >> 4) - 8.0f));
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp
new file mode 100644
index 00000000..2f27eee6
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp
@@ -0,0 +1,32 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {block_q4_1 data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
+
+ const uint tid = gl_LocalInvocationID.x % 64;
+ const uint il = tid/32;
+ const uint ir = tid%32;
+ const uint ib = 32*i + ir;
+ if (ib >= p.nel / 32) {
+ return;
+ }
+
+ const uint b_idx = 1024*i + 32*ir + 8*il;
+
+ const float d = float(data_a[ib].d);
+ const float m = float(data_a[ib].m);
+
+ const uint q_idx = 8*il;
+
+ [[unroll]] for (uint l = 0; l < 8; ++l) {
+ data_b[b_idx + l + 0] = D_TYPE(d * (data_a[ib].qs[q_idx + l] & 0xF) + m);
+ data_b[b_idx + l + 16] = D_TYPE(d * (data_a[ib].qs[q_idx + l] >> 4) + m);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp
new file mode 100644
index 00000000..987f113a
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp
@@ -0,0 +1,68 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ [[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
+ const uint ib = gl_WorkGroupID.x * 256 + wgy;
+ if (ib >= p.M * p.K / QUANT_K) {
+ return;
+ }
+
+ const uint tid = gl_LocalInvocationID.x;
+ const uint il = tid / 8;
+ const uint ir = tid % 8;
+ const uint is = 2 * il;
+ const uint n = 4;
+
+ const FLOAT_TYPE dall = FLOAT_TYPE(data_a[ib].d.x);
+ const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[ib].d.y);
+
+ const uint y_idx = ib * QUANT_K + 64 * il + n * ir;
+ const uint qs_idx = 32*il + n * ir;
+
+ uint scidx0 = (is < 4) ? is : (is + 4);
+ uint scidx1 = (is < 4) ? is : (is - 4);
+ uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ uint scidxshift1 = (is < 4) ? 0 : 2;
+ uint mbidx0 = is + 4;
+ uint mbidx1 = (is < 4) ? is + 4 : is;
+ uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ uint mbidxshift0 = (is < 4) ? 0 : 4;
+ uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ uint mbidxshift1 = (is < 4) ? 0 : 2;
+
+ uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ uint8_t mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const FLOAT_TYPE d1 = dall * sc;
+ const FLOAT_TYPE m1 = dmin * mbyte;
+
+ scidx0 = (is < 4) ? is + 1 : (is + 5);
+ scidx1 = (is < 4) ? is + 1 : (is - 3);
+ scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ scidxshift1 = (is < 4) ? 0 : 2;
+ mbidx0 = is + 5;
+ mbidx1 = (is < 4) ? is + 5 : is + 1;
+ mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ mbidxshift0 = (is < 4) ? 0 : 4;
+ mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ mbidxshift1 = (is < 4) ? 0 : 2;
+
+ sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const FLOAT_TYPE d2 = dall * sc;
+ const FLOAT_TYPE m2 = dmin * mbyte;
+
+ [[unroll]] for (uint l = 0; l < n; ++l) {
+ data_b[y_idx + l ] = D_TYPE(d1 * FLOAT_TYPE(data_a[ib].qs[qs_idx + l] & 0xF) - m1);
+ data_b[y_idx + l + 32] = D_TYPE(d2 * FLOAT_TYPE(data_a[ib].qs[qs_idx + l] >> 4) - m2);
+ }
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp
new file mode 100644
index 00000000..b20b8052
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp
@@ -0,0 +1,34 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {block_q5_0 data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
+
+ const uint tid = gl_LocalInvocationID.x % 64;
+ const uint il = tid/32;
+ const uint ir = tid%32;
+ const uint ib = 32*i + ir;
+ if (ib >= p.nel / 32) {
+ return;
+ }
+
+ const uint b_idx = 1024*i + 32*ir + 8*il;
+
+ const float d = float(data_a[ib].d);
+ const uint qh = uint(data_a[ib].qh[1]) << 16 | data_a[ib].qh[0];
+
+ const uint q_idx = 8*il;
+
+ [[unroll]] for (uint l = 0; l < 8; ++l) {
+ const uint iqs = q_idx + l;
+ const uint vui = uint(data_a[ib].qs[iqs]);
+ data_b[b_idx + l + 0] = D_TYPE(d * (((vui & 0xF) | (((qh >> iqs) << 4) & 0x10)) - 16.0f));
+ data_b[b_idx + l + 16] = D_TYPE(d * (((vui >> 4) | ((qh >> (iqs + 12)) & 0x10)) - 16.0f));
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp
new file mode 100644
index 00000000..dc59fe3b
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp
@@ -0,0 +1,35 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {block_q5_1 data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
+
+ const uint tid = gl_LocalInvocationID.x % 64;
+ const uint il = tid/32;
+ const uint ir = tid%32;
+ const uint ib = 32*i + ir;
+ if (ib >= p.nel / 32) {
+ return;
+ }
+
+ const uint b_idx = 1024*i + 32*ir + 8*il;
+
+ const float d = float(data_a[ib].d);
+ const float m = float(data_a[ib].m);
+ const uint qh = data_a[ib].qh;
+
+ const uint q_idx = 8*il;
+
+ [[unroll]] for (uint l = 0; l < 8; ++l) {
+ const uint iqs = q_idx + l;
+ const uint vui = uint(data_a[ib].qs[iqs]);
+ data_b[b_idx + l + 0] = D_TYPE(d * (((vui & 0xF) | (((qh >> iqs) << 4) & 0x10))) + m);
+ data_b[b_idx + l + 16] = D_TYPE(d * (((vui >> 4) | ((qh >> (iqs + 12)) & 0x10))) + m);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp
new file mode 100644
index 00000000..6db5403b
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp
@@ -0,0 +1,70 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ [[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
+ const uint ib = gl_WorkGroupID.x * 256 + wgy;
+ if (ib >= p.M * p.K / QUANT_K) {
+ return;
+ }
+
+ const uint tid = gl_LocalInvocationID.x;
+ const uint il = tid / 16;
+ const uint ir = tid % 16;
+ const uint is = 2 * il;
+
+ const FLOAT_TYPE dall = FLOAT_TYPE(data_a[ib].d.x);
+ const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[ib].d.y);
+
+ const uint y_idx = ib * QUANT_K + 64 * il + 2 * ir;
+ const uint qs_idx = 32*il + 2 * ir;
+ const uint qh_idx = 2 * ir;
+
+ uint scidx0 = (is < 4) ? is : (is + 4);
+ uint scidx1 = (is < 4) ? is : (is - 4);
+ uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ uint scidxshift1 = (is < 4) ? 0 : 2;
+ uint mbidx0 = is + 4;
+ uint mbidx1 = (is < 4) ? is + 4 : is;
+ uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ uint mbidxshift0 = (is < 4) ? 0 : 4;
+ uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ uint mbidxshift1 = (is < 4) ? 0 : 2;
+
+ uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ uint8_t mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const FLOAT_TYPE d1 = dall * sc;
+ const FLOAT_TYPE m1 = dmin * mbyte;
+
+ scidx0 = (is < 4) ? is + 1 : (is + 5);
+ scidx1 = (is < 4) ? is + 1 : (is - 3);
+ scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ scidxshift1 = (is < 4) ? 0 : 2;
+ mbidx0 = is + 5;
+ mbidx1 = (is < 4) ? is + 5 : is + 1;
+ mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ mbidxshift0 = (is < 4) ? 0 : 4;
+ mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ mbidxshift1 = (is < 4) ? 0 : 2;
+
+ sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const FLOAT_TYPE d2 = dall * sc;
+ const FLOAT_TYPE m2 = dmin * mbyte;
+
+ const uint8_t hm1 = uint8_t(1 << (2 * il ));
+ const uint8_t hm2 = uint8_t(1 << (2 * il + 1));
+ data_b[y_idx ] = D_TYPE(d1 * FLOAT_TYPE((data_a[ib].qs[qs_idx ] & 0xF) + (((data_a[ib].qh[qh_idx ] & hm1) != 0) ? 16 : 0)) - m1);
+ data_b[y_idx + 1] = D_TYPE(d1 * FLOAT_TYPE((data_a[ib].qs[qs_idx + 1] & 0xF) + (((data_a[ib].qh[qh_idx + 1] & hm1) != 0) ? 16 : 0)) - m1);
+ data_b[y_idx + 32] = D_TYPE(d2 * FLOAT_TYPE((data_a[ib].qs[qs_idx ] >> 4) + (((data_a[ib].qh[qh_idx ] & hm2) != 0) ? 16 : 0)) - m2);
+ data_b[y_idx + 33] = D_TYPE(d2 * FLOAT_TYPE((data_a[ib].qs[qs_idx + 1] >> 4) + (((data_a[ib].qh[qh_idx + 1] & hm2) != 0) ? 16 : 0)) - m2);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp
new file mode 100644
index 00000000..0b913175
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp
@@ -0,0 +1,33 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ [[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
+ const uint i = gl_WorkGroupID.x * 256 + wgy;
+ if (i >= p.M * p.K / QUANT_K) {
+ return;
+ }
+ const uint tid = gl_LocalInvocationID.x;
+ const uint ip = tid / 32;
+ const uint il = tid - 32 * ip;
+ const uint is = 8 * ip + il / 16;
+
+ const uint y_idx = i * QUANT_K + 128 * ip + il;
+
+ const uint ql_idx = 64 * ip + il;
+ const uint8_t qh = data_a[i].qh[32 * ip + il];
+
+ const FLOAT_TYPE d = FLOAT_TYPE(data_a[i].d);
+
+ data_b[y_idx + 0] = D_TYPE(d * FLOAT_TYPE(data_a[i].scales[is + 0] * (int8_t((data_a[i].ql[ql_idx + 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32)));
+ data_b[y_idx + 32] = D_TYPE(d * FLOAT_TYPE(data_a[i].scales[is + 2] * (int8_t((data_a[i].ql[ql_idx + 32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32)));
+ data_b[y_idx + 64] = D_TYPE(d * FLOAT_TYPE(data_a[i].scales[is + 4] * (int8_t((data_a[i].ql[ql_idx + 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32)));
+ data_b[y_idx + 96] = D_TYPE(d * FLOAT_TYPE(data_a[i].scales[is + 6] * (int8_t((data_a[i].ql[ql_idx + 32] >> 4) | (((qh >> 6) & 3) << 4)) - 32)));
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp
new file mode 100644
index 00000000..bd1344a8
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp
@@ -0,0 +1,31 @@
+#version 450
+
+#include "dequant_head.comp"
+
+layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {block_q8_0 data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
+
+void main() {
+ const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
+
+ const uint tid = gl_LocalInvocationID.x % 64;
+ const uint il = tid/32;
+ const uint ir = tid%32;
+ const uint ib = 32*i + ir;
+ if (ib >= p.nel / 32) {
+ return;
+ }
+
+ const uint b_idx = 1024*i + 32*ir + 16*il;
+
+ const float d = float(data_a[ib].d);
+
+ const uint q_idx = 16*il;
+
+ [[unroll]] for (uint l = 0; l < 16; l += 2) {
+ data_b[b_idx + l ] = D_TYPE(d * data_a[ib].qs[q_idx + l ]);
+ data_b[b_idx + l + 1] = D_TYPE(d * data_a[ib].qs[q_idx + l + 1]);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp
new file mode 100644
index 00000000..4e68742b
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp
@@ -0,0 +1,34 @@
+#version 450
+
+#extension GL_EXT_shader_16bit_storage : require
+#extension GL_EXT_control_flow_attributes : enable
+
+layout (push_constant) uniform parameter
+{
+ uint ncols;
+ uint rows_per_channel;
+ uint n_past;
+} p;
+
+#include "types.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const uint col = gl_GlobalInvocationID.y;
+ const uint row = gl_GlobalInvocationID.x;
+
+ if (col >= p.ncols) {
+ return;
+ }
+
+ const uint i = row*p.ncols + col;
+ if (col > p.n_past + row % p.rows_per_channel) {
+ data_d[i] = D_TYPE(uintBitsToFloat(0xFF800000));
+ } else {
+ data_d[i] = D_TYPE(data_a[i]);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/div.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/div.comp
new file mode 100644
index 00000000..9fb69c6c
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/div.comp
@@ -0,0 +1,27 @@
+#version 450
+
+#include "types.comp"
+#include "generic_binary_head.comp"
+
+const uint num_threads = 256;
+
+layout(local_size_x = num_threads, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ uint idx = get_idx();
+
+ // num_threads * num_iter must equal 512, to match the wg_denoms and get_idx calculation
+ const uint num_iter = 2;
+
+ [[unroll]] for (uint i = 0; i < num_iter; ++i) {
+ if (idx >= p.ne) {
+ continue;
+ }
+ uint i00, i01, i02, i03;
+ get_indices(idx, i00, i01, i02, i03);
+
+ data_d[get_doffset() + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + src0_idx(i00, i01, i02, i03)]) / FLOAT_TYPE(data_b[get_boffset() + src1_idx(i00, i01, i02, i03)]));
+
+ idx += num_threads;
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp
new file mode 100644
index 00000000..c5be8131
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp
@@ -0,0 +1,289 @@
+#version 450
+
+#extension GL_EXT_control_flow_attributes : enable
+#extension GL_EXT_shader_16bit_storage : require
+
+#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
+
+#extension GL_KHR_memory_scope_semantics : enable
+#extension GL_KHR_cooperative_matrix : enable
+#extension GL_NV_cooperative_matrix2 : enable
+#extension GL_EXT_buffer_reference : enable
+#extension GL_KHR_shader_subgroup_ballot : enable
+#extension GL_KHR_shader_subgroup_vote : enable
+#extension GL_EXT_null_initializer : enable
+
+#include "types.comp"
+#include "dequant_funcs_cm2.comp"
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+layout (constant_id = 1) const uint32_t Br = 32;
+layout (constant_id = 2) const uint32_t Bc = 32;
+layout (constant_id = 3) const uint32_t D = 32;
+layout (constant_id = 4) const uint32_t Clamp = gl_CooperativeMatrixClampModeConstantNV;
+
+layout (push_constant) uniform parameter {
+ uint32_t N;
+ uint32_t KV;
+
+ uint32_t ne1;
+ uint32_t ne2;
+ uint32_t ne3;
+
+ uint32_t neq2;
+ uint32_t neq3;
+ uint32_t nek2;
+ uint32_t nek3;
+ uint32_t nev2;
+ uint32_t nev3;
+ uint32_t nem1;
+
+ uint32_t nb02;
+ uint32_t nb03;
+ uint32_t nb12;
+ uint32_t nb13;
+ uint32_t nb22;
+ uint32_t nb23;
+ uint32_t nb31;
+
+ float scale;
+ float max_bias;
+ float logit_softcap;
+
+ uint32_t mask;
+ uint32_t n_head_log2;
+ float m0;
+ float m1;
+} p;
+
+layout (binding = 0) readonly buffer Q {uint8_t data_q[];};
+layout (binding = 1) readonly buffer K {uint8_t data_k[];};
+layout (binding = 2) readonly buffer V {uint8_t data_v[];};
+layout (binding = 3) readonly buffer M {uint8_t data_m[];};
+layout (binding = 4) writeonly buffer O {D_TYPE data_o[];};
+
+#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b))
+
+ACC_TYPE maxReduce(const in ACC_TYPE x, const in ACC_TYPE y) {
+ return max(x, y);
+}
+
+ACC_TYPE smearReduce(const in ACC_TYPE x, const in ACC_TYPE y) {
+ return x;
+}
+
+// Replace matrix elements >= numRows or numCols with 'replace'
+ACC_TYPE replacePadding(const in uint32_t row, const in uint32_t col, const in ACC_TYPE elem, const in ACC_TYPE replace, const in uint32_t numRows, const in uint32_t numCols) {
+ if (row >= numRows || col >= numCols) {
+ return replace;
+ }
+ return elem;
+}
+
+ACC_TYPE Exp(const in uint32_t row, const in uint32_t col, const in ACC_TYPE elem)
+{
+ return exp(elem);
+}
+
+ACC_TYPE Max(const in uint32_t row, const in uint32_t col, const in ACC_TYPE elem0, const in ACC_TYPE elem1)
+{
+ return max(elem0, elem1);
+}
+
+#if defined(BLOCK_SIZE)
+#define DECODEFUNC , DEQUANTFUNC
+#else
+#define DECODEFUNC
+#endif
+
+void main() {
+#if defined(DATA_A_IQ4_NL)
+ init_iq4nl_shmem();
+#endif
+
+ const uint32_t N = p.N;
+ const uint32_t KV = p.KV;
+
+ const uint32_t Tr = CEIL_DIV(N, Br);
+ const uint32_t Tc = CEIL_DIV(KV, Bc);
+
+ const uint32_t i = gl_WorkGroupID.x;
+
+ const uint32_t iq2 = gl_WorkGroupID.y;
+ const uint32_t iq3 = gl_WorkGroupID.z;
+
+ // broadcast factors
+ const uint32_t rk2 = p.neq2/p.nek2;
+ const uint32_t rk3 = p.neq3/p.nek3;
+
+ const uint32_t rv2 = p.neq2/p.nev2;
+ const uint32_t rv3 = p.neq3/p.nev3;
+
+ // k indices
+ const uint32_t ik3 = iq3 / rk3;
+ const uint32_t ik2 = iq2 / rk2;
+
+ // v indices
+ const uint32_t iv3 = iq3 / rv3;
+ const uint32_t iv2 = iq2 / rv2;
+
+ tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutQ = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV);
+ tensorLayoutNV<2, Clamp> tensorLayoutK = createTensorLayoutNV(2, Clamp);
+ tensorLayoutNV<2, Clamp> tensorLayoutV = createTensorLayoutNV(2, Clamp);
+
+ tensorViewNV<2, false, 1, 0> tensorViewTranspose = createTensorViewNV(2, false, 1, 0);
+
+#if defined(BLOCK_SIZE)
+ tensorLayoutK = setTensorLayoutBlockSizeNV(tensorLayoutK, 1, BLOCK_SIZE);
+ tensorLayoutV = setTensorLayoutBlockSizeNV(tensorLayoutV, 1, BLOCK_SIZE);
+#endif
+
+ tensorLayoutQ = setTensorLayoutDimensionNV(tensorLayoutQ, N, D);
+ tensorLayoutK = setTensorLayoutDimensionNV(tensorLayoutK, KV, D);
+ tensorLayoutV = setTensorLayoutDimensionNV(tensorLayoutV, KV, D);
+
+ coopmat<Q_TYPE, gl_ScopeWorkgroup, Br, D, gl_MatrixUseA> Q;
+ coopmat<float16_t, gl_ScopeWorkgroup, Br, D, gl_MatrixUseA> Qf16;
+
+ uint32_t q_offset = iq2*p.nb02+iq3*p.nb03;
+ coopMatLoadTensorNV(Q, data_q, q_offset, sliceTensorLayoutNV(tensorLayoutQ, i * Br, Br, 0, D));
+
+ Qf16 = coopmat<float16_t, gl_ScopeWorkgroup, Br, D, gl_MatrixUseA>(Q);
+ Qf16 *= float16_t(p.scale);
+
+ coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, D, gl_MatrixUseAccumulator> O = coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, D, gl_MatrixUseAccumulator>(0);
+
+ coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator> L, M;
+
+ L = coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator>(0);
+ M = coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator>(-1.0/0.0);
+
+ ACC_TYPE slope = ACC_TYPE(1.0);
+
+ // ALiBi
+ if (p.max_bias > 0.0f) {
+ const uint32_t h = iq2;
+
+ const ACC_TYPE base = ACC_TYPE(h < p.n_head_log2 ? p.m0 : p.m1);
+ const int exph = int(h < p.n_head_log2 ? h + 1 : 2*(h - p.n_head_log2) + 1);
+
+ slope = pow(base, ACC_TYPE(exph));
+ }
+
+ [[dont_unroll]]
+ for (uint32_t j = 0; j < Tc; ++j) {
+
+ coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator> S = coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator>(0);
+
+ coopmat<float16_t, gl_ScopeWorkgroup, D, Bc, gl_MatrixUseB> K_T;
+
+ uint32_t k_offset = ik2*p.nb12 + ik3*p.nb13;
+ coopMatLoadTensorNV(K_T, data_k, k_offset, sliceTensorLayoutNV(tensorLayoutK, j * Bc, Bc, 0, D), tensorViewTranspose DECODEFUNC);
+ S = coopMatMulAdd(Qf16, K_T, S);
+
+ if (p.logit_softcap != 0.0f) {
+ [[unroll]]
+ for (int k = 0; k < S.length(); ++k) {
+ S[k] = ACC_TYPE(p.logit_softcap)*tanh(S[k]);
+ }
+ }
+
+ if (p.mask != 0) {
+ tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutM = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV);
+ tensorLayoutM = setTensorLayoutDimensionNV(tensorLayoutM, p.nem1, KV);
+
+ coopmat<float16_t, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator> mv;
+
+ coopMatLoadTensorNV(mv, data_m, 0, sliceTensorLayoutNV(tensorLayoutM, i * Br, Br, j * Bc, Bc));
+
+ S += slope*coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator>(mv);
+ }
+
+ // Clear padding elements to -inf, so they don't contribute to rowmax
+ if (Clamp != 0 &&
+ ((j + 1) * Bc > KV ||
+ (i + 1) * Br > N)) {
+
+ uint R = ((i + 1) * Br > N) ? (N % Br) : Br;
+ uint C = ((j + 1) * Bc > KV) ? (KV % Bc) : Bc;
+
+ coopMatPerElementNV(S, S, replacePadding, ACC_TYPE(-1.0/0.0), R, C);
+ }
+
+ coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator> rowmax, P, rowsum, eM;
+
+ coopMatReduceNV(rowmax, S, gl_CooperativeMatrixReduceRowNV, maxReduce);
+
+ coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator> Mold = M;
+
+ // M = max(rowmax, Mold)
+ // P = e^(S - M)
+ // eM = e^(Mold - M)
+ coopMatPerElementNV(M, rowmax, Max, Mold);
+ coopMatPerElementNV(P, S - M, Exp);
+ coopMatPerElementNV(eM, Mold - M, Exp);
+
+ // Clear padding elements to 0, so they don't contribute to rowsum
+ if (Clamp != 0 &&
+ ((j + 1) * Bc > KV ||
+ (i + 1) * Br > N)) {
+
+ uint R = ((i + 1) * Br > N) ? (N % Br) : Br;
+ uint C = ((j + 1) * Bc > KV) ? (KV % Bc) : Bc;
+
+ coopMatPerElementNV(P, P, replacePadding, ACC_TYPE(0.0), R, C);
+ }
+
+ coopmat<float16_t, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseA> P_A = coopmat<float16_t, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseA>(P);
+
+ // compute rowsum by multiplying by matrix of all ones.
+ coopmat<float16_t, gl_ScopeWorkgroup, Bc, Bc, gl_MatrixUseB> One = coopmat<float16_t, gl_ScopeWorkgroup, Bc, Bc, gl_MatrixUseB>(1.0);
+
+ rowsum = coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator>(0.0);
+ rowsum = coopMatMulAdd(P_A, One, rowsum);
+
+ coopmat<float16_t, gl_ScopeWorkgroup, Bc, D, gl_MatrixUseB> V;
+ uint32_t v_offset = iv2*p.nb22 + iv3*p.nb23;
+ coopMatLoadTensorNV(V, data_v, v_offset, sliceTensorLayoutNV(tensorLayoutV, j * Bc, Bc, 0, D) DECODEFUNC);
+
+ L = eM*L + rowsum;
+
+ // This is the "diagonal" matrix in the paper, but since we do componentwise
+ // multiply rather than matrix multiply it has the diagonal element smeared
+ // across the row
+ coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, D, gl_MatrixUseAccumulator> eMdiag;
+
+ // resize eM by using smear/reduce
+ coopMatReduceNV(eMdiag, eM, gl_CooperativeMatrixReduceRowNV, smearReduce);
+
+ O = eMdiag * O;
+
+ O = coopMatMulAdd(P_A, V, O);
+ }
+
+ coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, D, gl_MatrixUseAccumulator> Ldiag;
+
+ // resize L by using smear/reduce
+ coopMatReduceNV(Ldiag, L, gl_CooperativeMatrixReduceRowNV, smearReduce);
+
+ [[unroll]]
+ for (int k = 0; k < Ldiag.length(); ++k) {
+ Ldiag[k] = ACC_TYPE(1.0) / Ldiag[k];
+ }
+
+ O = Ldiag*O;
+
+ tensorLayoutNV<3, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutD = createTensorLayoutNV(3, gl_CooperativeMatrixClampModeConstantNV);
+ tensorLayoutD = setTensorLayoutDimensionNV(tensorLayoutD, p.ne2, p.ne1, D);
+
+ // permute dimensions
+ tensorViewNV<3, false, 1, 0, 2> tensorViewPermute = createTensorViewNV(3, false, 1, 0, 2);
+ uint32_t o_offset = iq3*p.ne2*p.ne1;
+
+ coopmat<D_TYPE, gl_ScopeWorkgroup, Br, D, gl_MatrixUseAccumulator> O_D = coopmat<D_TYPE, gl_ScopeWorkgroup, Br, D, gl_MatrixUseAccumulator>(O);
+ coopMatStoreTensorNV(O_D, data_o, o_offset, sliceTensorLayoutNV(tensorLayoutD, i * Br, Br, iq2, 1, 0, D), tensorViewPermute);
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp
new file mode 100644
index 00000000..4cc7a68c
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp
@@ -0,0 +1,25 @@
+#version 450
+
+#include "generic_head.comp"
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const float GELU_COEF_A = 0.044715f;
+ const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
+ const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+
+ if (i >= p.KX) {
+ return;
+ }
+
+ const float xi = float(data_a[i]);
+ const float val = SQRT_2_OVER_PI*xi*(1.0f + GELU_COEF_A*xi*xi);
+ data_d[i] = D_TYPE(0.5f*xi*(2.0f - 2.0f / (exp(2 * val) + 1)));
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp
new file mode 100644
index 00000000..e6e6fcfd
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp
@@ -0,0 +1,23 @@
+#version 450
+
+#include "generic_head.comp"
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const float GELU_QUICK_COEF = -1.702f;
+ const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+
+ if (i >= p.KX) {
+ return;
+ }
+
+ const float x = float(data_a[i]);
+ data_d[i] = D_TYPE(x * (1.0f / (1.0f + exp(GELU_QUICK_COEF * x))));
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp
new file mode 100644
index 00000000..062e2a4c
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp
@@ -0,0 +1,64 @@
+#extension GL_EXT_shader_16bit_storage : require
+#extension GL_EXT_control_flow_attributes : require
+
+layout (push_constant) uniform parameter
+{
+ uint ne;
+ uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03;
+ uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13;
+ uint ne20; uint ne21; uint ne22; uint ne23; uint nb20; uint nb21; uint nb22; uint nb23;
+ uint misalign_offsets;
+ float param1; float param2; int param3;
+} p;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
+layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
+
+// true if src0/src1 are the same shape and the indices can be reused without additional modulus
+layout(constant_id = 0) const bool norepeat = false;
+
+uint get_idx() {
+ return gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+}
+
+uint get_aoffset() { return p.misalign_offsets >> 16; }
+uint get_boffset() { return (p.misalign_offsets >> 8) & 0xFF; }
+uint get_doffset() { return p.misalign_offsets & 0xFF; }
+
+// mod and div are expensive and coordinates/dimensions are often power of 2 or equal to 1
+uint fastmod(uint a, uint b) {
+ if ((b & (b-1)) == 0) {
+ return a & (b-1);
+ }
+ return a % b;
+}
+
+uint fastdiv(uint a, uint b) {
+ return (a < b) ? 0 : (a / b);
+}
+
+void get_indices(uint idx, out uint i00, out uint i01, out uint i02, out uint i03) {
+ i03 = fastdiv(idx, (p.ne02*p.ne01*p.ne00));
+ const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
+ i02 = fastdiv((idx - i03_offset), (p.ne01*p.ne00));
+ const uint i02_offset = i02*p.ne01*p.ne00;
+ i01 = (idx - i03_offset - i02_offset) / p.ne00;
+ i00 = idx - i03_offset - i02_offset - i01*p.ne00;
+}
+
+uint src0_idx(uint i00, uint i01, uint i02, uint i03) {
+ return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + i00*p.nb00;
+}
+
+uint src1_idx(uint i00, uint i01, uint i02, uint i03) {
+ if (norepeat) {
+ return i03*p.nb13 + i02*p.nb12 + i01*p.nb11 + i00*p.nb10;
+ } else {
+ return fastmod(i03, p.ne13)*p.nb13 + fastmod(i02, p.ne12)*p.nb12 + fastmod(i01, p.ne11)*p.nb11 + fastmod(i00, p.ne10)*p.nb10;
+ }
+}
+
+uint dst_idx(uint i00, uint i01, uint i02, uint i03) {
+ return i03*p.nb23 + i02*p.nb22 + i01*p.nb21 + i00*p.nb20;
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp
new file mode 100644
index 00000000..66e46ae6
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp
@@ -0,0 +1,9 @@
+#extension GL_EXT_shader_16bit_storage : require
+
+layout (push_constant) uniform parameter
+{
+ uint KX;
+ uint KY;
+ float param1;
+ float param2;
+} p;
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp
new file mode 100644
index 00000000..68d1bc9f
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp
@@ -0,0 +1,56 @@
+#extension GL_EXT_shader_16bit_storage : require
+#extension GL_EXT_control_flow_attributes : require
+
+layout (push_constant) uniform parameter
+{
+ uint ne;
+ uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03;
+ uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13;
+ uint misalign_offsets;
+ float param1; float param2;
+
+ uint ne0_012mp; uint ne0_012L;
+ uint ne0_01mp; uint ne0_01L;
+ uint ne0_0mp; uint ne0_0L;
+ uint ne1_012mp; uint ne1_012L;
+ uint ne1_01mp; uint ne1_01L;
+ uint ne1_0mp; uint ne1_0L;
+} p;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+uint get_idx() {
+ return gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+}
+
+uint get_aoffset() { return p.misalign_offsets >> 16; }
+uint get_doffset() { return p.misalign_offsets & 0xFFFF; }
+
+// see init_fastdiv_values in ggml-vulkan.cpp
+uint fastdiv(uint n, uint mp, uint L) {
+ uint msbs, lsbs;
+ // msbs = mulhi(n, mp)
+ umulExtended(n, mp, msbs, lsbs);
+ return (msbs + n) >> L;
+}
+
+uint src0_idx(uint idx) {
+ const uint i03 = fastdiv(idx, p.ne0_012mp, p.ne0_012L);
+ const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
+ const uint i02 = fastdiv(idx - i03_offset, p.ne0_01mp, p.ne0_01L);
+ const uint i02_offset = i02*p.ne01*p.ne00;
+ const uint i01 = fastdiv(idx - i03_offset - i02_offset, p.ne0_0mp, p.ne0_0L);
+ const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00;
+ return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + i00*p.nb00;
+}
+
+uint dst_idx(uint idx) {
+ const uint i13 = fastdiv(idx, p.ne1_012mp, p.ne1_012L);
+ const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10;
+ const uint i12 = fastdiv(idx - i13_offset, p.ne1_01mp, p.ne1_01L);
+ const uint i12_offset = i12*p.ne11*p.ne10;
+ const uint i11 = fastdiv(idx - i13_offset - i12_offset, p.ne1_0mp, p.ne1_0L);
+ const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
+ return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + i10*p.nb10;
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp
new file mode 100644
index 00000000..e877ed77
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp
@@ -0,0 +1,28 @@
+#version 450
+
+#include "types.comp"
+#include "generic_binary_head.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ const uint i00 = gl_GlobalInvocationID.x;
+ const uint i10 = gl_GlobalInvocationID.y;
+ const uint i11 = (gl_GlobalInvocationID.z)/p.ne12;
+ const uint i12 = (gl_GlobalInvocationID.z)%p.ne12;
+
+ if (i00 >= p.ne00) {
+ return;
+ }
+
+ const uint i01 = data_b[get_boffset() + i10*p.nb10 + i11*p.nb11 + i12*p.nb12];
+
+ const uint a_offset = get_aoffset() + i01*p.nb01 + i11*p.nb02 + i12*p.nb03;
+ const uint d_offset = get_doffset() + i10*p.nb21 + i11*p.nb22 + i12*p.nb23;
+
+#ifndef OPTIMIZATION_ERROR_WORKAROUND
+ data_d[d_offset + i00] = D_TYPE(data_a[a_offset + i00]);
+#else
+ data_d[d_offset + i00] = data_a[a_offset + i00];
+#endif
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp
new file mode 100644
index 00000000..1426fde6
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp
@@ -0,0 +1,39 @@
+#version 450
+
+#include "types.comp"
+#include "generic_binary_head.comp"
+#include "dequant_funcs.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ const uint i00 = (gl_GlobalInvocationID.x)*2;
+ const uint i10 = gl_GlobalInvocationID.y;
+ const uint i11 = (gl_GlobalInvocationID.z)/p.ne12;
+ const uint i12 = (gl_GlobalInvocationID.z)%p.ne12;
+
+#if defined(DATA_A_IQ4_NL)
+ init_iq4nl_shmem();
+#endif
+
+ if (i00 >= p.ne00) {
+ return;
+ }
+
+ const uint i01 = data_b[i10*p.nb10 + i11*p.nb11 + i12*p.nb12];
+
+ const uint a_offset = i01*p.nb01 + i11*p.nb02 + i12*p.nb03;
+ const uint d_offset = i10*p.nb21 + i11*p.nb22 + i12*p.nb23;
+
+ const uint ib = a_offset + i00/QUANT_K; // block index
+ const uint iqs = (i00%QUANT_K)/QUANT_R; // quant index
+ const uint iybs = i00 - i00%QUANT_K; // dst block start index
+ const uint y_offset = QUANT_R == 1 ? 1 : QUANT_K/2;
+
+ vec2 v = dequantize(ib, iqs, 0);
+ const vec2 dm = get_dm(ib, 0);
+ v = v * dm.x + dm.y;
+
+ data_d[d_offset + iybs + iqs ] = D_TYPE(v.x);
+ data_d[d_offset + iybs + iqs + y_offset] = D_TYPE(v.y);
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp
new file mode 100644
index 00000000..b6a0d564
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp
@@ -0,0 +1,66 @@
+#version 450
+
+#include "generic_head.comp"
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+#define BLOCK_SIZE 512
+
+layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+shared float tmp[BLOCK_SIZE];
+
+void main() {
+ const uint group_size = p.KX;
+ const float eps = p.param1;
+
+ const uint tid = gl_LocalInvocationID.x;
+ const uint start = gl_WorkGroupID.x * group_size + tid;
+ const uint end = (gl_WorkGroupID.x + 1) * group_size;
+
+ tmp[tid] = 0.0f;
+
+ // Calculate mean
+ [[unroll]] for (uint col = start; col < end; col += BLOCK_SIZE) {
+ tmp[tid] += float(data_a[col]);
+ }
+
+ // tmp up partial tmps and write back result
+ barrier();
+ [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
+ if (tid < s) {
+ tmp[tid] += tmp[tid + s];
+ }
+ barrier();
+ }
+
+ const float mean = tmp[0] / group_size;
+ barrier();
+ tmp[tid] = 0.0f;
+
+ // Calculate variance
+ [[unroll]] for (uint col = start; col < end; col += BLOCK_SIZE) {
+ const float xi = float(data_a[col]) - mean;
+ data_d[col] = D_TYPE(xi);
+ tmp[tid] += xi * xi;
+ }
+
+ // sum up partial sums and write back result
+ barrier();
+ [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
+ if (tid < s) {
+ tmp[tid] += tmp[tid + s];
+ }
+ barrier();
+ }
+
+ const float variance = tmp[0] / group_size;
+ const float scale = inversesqrt(variance + eps);
+
+ [[unroll]] for (uint col = start; col < end; col += BLOCK_SIZE) {
+ data_d[col] *= D_TYPE(scale);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp
new file mode 100644
index 00000000..122b1e93
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp
@@ -0,0 +1,87 @@
+#version 450
+
+#extension GL_EXT_shader_16bit_storage : require
+#extension GL_EXT_spirv_intrinsics: enable
+#extension GL_EXT_control_flow_attributes : require
+
+#if RTE16
+spirv_execution_mode(capabilities = [4467], 4462, 16); // RoundingModeRTE, 16 bits
+#endif
+
+layout (push_constant) uniform parameter
+{
+ uint batch_offset; uint offset_delta;
+ uint IC;
+ uint IW; uint IH;
+ uint OW; uint OH;
+ uint KW; uint KH;
+ uint pelements;
+ uint CHW;
+ int s0; int s1;
+ int p0; int p1;
+ int d0; int d1;
+} p;
+
+#include "types.comp"
+
+layout(constant_id = 0) const uint BLOCK_SIZE = 32;
+
+const uint NUM_ITER = 512 / BLOCK_SIZE;
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const uint gidx = gl_GlobalInvocationID.x;
+
+ const uint oh = gl_GlobalInvocationID.y;
+ const uint batch = gl_GlobalInvocationID.z / p.IC;
+ const uint ic = gl_GlobalInvocationID.z % p.IC;
+
+ A_TYPE values[NUM_ITER];
+ uint offset_dst[NUM_ITER];
+ [[unroll]] for (uint idx = 0; idx < NUM_ITER; ++idx) {
+ values[idx] = A_TYPE(0);
+ }
+
+ [[unroll]] for (uint idx = 0; idx < NUM_ITER; ++idx) {
+
+ const uint i = gidx * NUM_ITER + idx;
+
+ const uint ksize = p.OW * (p.KH > 1 ? p.KW : 1);
+ const uint kx = i / ksize;
+ const uint kd = kx * ksize;
+ const uint ky = (i - kd) / p.OW;
+ const uint ix = i % p.OW;
+
+ const uint iiw = ix * p.s0 + kx * p.d0 - p.p0;
+ const uint iih = oh * p.s1 + ky * p.d1 - p.p1;
+
+ offset_dst[idx] =
+ ((batch * p.OH + oh) * p.OW + ix) * p.CHW +
+ (ic * (p.KW * p.KH) + ky * p.KW + kx);
+
+ if (i >= p.pelements) {
+ continue;
+ }
+
+ if (iih < p.IH && iiw < p.IW) {
+ const uint offset_src = ic * p.offset_delta + batch * p.batch_offset;
+ values[idx] = data_a[offset_src + iih * p.IW + iiw];
+ }
+ }
+
+ [[unroll]] for (uint idx = 0; idx < NUM_ITER; ++idx) {
+
+ const uint i = gidx * NUM_ITER + idx;
+
+ if (i >= p.pelements) {
+ continue;
+ }
+
+ data_d[offset_dst[idx]] = D_TYPE(values[idx]);
+ }
+
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp
new file mode 100644
index 00000000..d90a99ae
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp
@@ -0,0 +1,22 @@
+#version 450
+
+#include "generic_head.comp"
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+
+ if (i >= p.KX) {
+ return;
+ }
+
+ const float val = float(data_a[i]);
+ data_d[i] = D_TYPE(max(val, 0.0f) + min(val, 0.0f) * p.param1);
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp
new file mode 100644
index 00000000..43de19df
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp
@@ -0,0 +1,27 @@
+#version 450
+
+#include "types.comp"
+#include "generic_binary_head.comp"
+
+const uint num_threads = 256;
+
+layout(local_size_x = num_threads, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ uint idx = get_idx();
+
+ // num_threads * num_iter must equal 512, to match the wg_denoms and get_idx calculation
+ const uint num_iter = 2;
+
+ [[unroll]] for (uint i = 0; i < num_iter; ++i) {
+ if (idx >= p.ne) {
+ continue;
+ }
+ uint i00, i01, i02, i03;
+ get_indices(idx, i00, i01, i02, i03);
+
+ data_d[get_doffset() + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + src0_idx(i00, i01, i02, i03)]) * FLOAT_TYPE(data_b[get_boffset() + src1_idx(i00, i01, i02, i03)]));
+
+ idx += num_threads;
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp
new file mode 100644
index 00000000..4c64fd47
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp
@@ -0,0 +1,48 @@
+#version 450
+
+#extension GL_EXT_control_flow_attributes : enable
+
+layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {float data_a[];};
+layout (binding = 0) readonly buffer A4 {vec4 data_a4[];};
+layout (binding = 1) writeonly buffer D {float data_d[];};
+layout (binding = 1) writeonly buffer D4 {vec4 data_d4[];};
+
+layout (push_constant) uniform parameter {
+ uint ne;
+ uint k_num;
+} p;
+
+void main() {
+ // Each invocation handles four consecutive components
+ const uint idx = gl_GlobalInvocationID.x * 4;
+
+ if (idx >= p.ne) {
+ return;
+ }
+
+ // Check if all four components are in bounds and aligned,
+ // then use vector loads
+ if (idx + 3 < p.ne && (p.ne % 4) == 0) {
+ vec4 result = vec4(0.0f);
+
+ [[unroll]] for (uint i = 0; i < p.k_num; i++) {
+ result += data_a4[(i * p.ne + idx) / 4];
+ }
+
+ data_d4[idx / 4] = result;
+ } else {
+ [[unroll]] for (uint j = 0; j < 4; ++j) {
+ if (idx + j < p.ne) {
+ float result = 0.0f;
+
+ [[unroll]] for (uint i = 0; i < p.k_num; i++) {
+ result += data_a[i * p.ne + idx + j];
+ }
+
+ data_d[idx + j] = result;
+ }
+ }
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp
new file mode 100644
index 00000000..24875cdc
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp
@@ -0,0 +1,152 @@
+#version 450
+
+#ifdef FLOAT16
+#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
+#endif
+#extension GL_EXT_shader_explicit_arithmetic_types : require
+
+#include "mul_mat_vec_base.comp"
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+#if !defined(DATA_A_F32) && !defined(DATA_A_F16)
+#define K_PER_ITER 8
+#else
+#define K_PER_ITER 2
+#endif
+
+
+uint a_offset, b_offset, d_offset, y_offset;
+
+void iter(inout FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const uint first_row, const uint num_rows, const uint tid, const uint i, bool lastiter)
+{
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ const uint col = i*BLOCK_SIZE + K_PER_ITER*tid;
+ const uint iqs = (col%QUANT_K)/QUANT_R; // quant index
+ const uint iybs = col - col%QUANT_K; // y block start index
+
+#if K_PER_ITER == 8
+#if QUANT_R == 2
+ const B_TYPE_VEC4 bv02 = data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4];
+ const B_TYPE_VEC4 bv13 = data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs + y_offset) / 4];
+ const vec4 bv0 = vec4(bv02.x, bv13.x, bv02.y, bv13.y);
+ const vec4 bv1 = vec4(bv02.z, bv13.z, bv02.w, bv13.w);
+#else
+ const vec4 bv0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4]);
+ const vec4 bv1 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4 + 1]);
+#endif
+#else
+ // Check if the second of the pair of elements is OOB, and don't fetch B or
+ // accumulate it. We still fetch a pair of elements for A, which is fine for
+ // quantized formats since they'll be within the same block. We should
+ // probably skip fetching the second element for F16/F32, but as of now we
+ // still do.
+ const bool OOB = lastiter && (iybs + iqs + y_offset >= p.ncols);
+
+ FLOAT_TYPE b0 = 0, b1 = 0;
+ b0 = FLOAT_TYPE(data_b[j*p.batch_stride_b + b_offset + iybs + iqs]);
+ if (!OOB) {
+ b1 = FLOAT_TYPE(data_b[j*p.batch_stride_b + b_offset + iybs + iqs + y_offset]);
+ }
+#endif
+ uint ibi = first_row*p.ncols;
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ const uint ib = (ibi + col)/QUANT_K; // block index
+ ibi += p.ncols;
+
+#if K_PER_ITER == 8
+ vec4 v = dequantize4(ib, iqs, a_offset);
+ vec4 v2 = dequantize4(ib, iqs+(4/QUANT_R), a_offset);
+
+ const vec2 dm = get_dm(ib, a_offset);
+ if (dm.y != 0) { // quant has min component
+ v = v * dm.x + dm.y;
+ v2 = v2 * dm.x + dm.y;
+ }
+
+ // matrix multiplication
+ FLOAT_TYPE rowtmp = dot(bv0, v);
+ rowtmp += dot(bv1, v2);
+
+ if (dm.y == 0)
+ rowtmp *= dm.x;
+
+ temp[j][n] += rowtmp;
+#else
+ const vec2 v = dequantize(ib, iqs, a_offset);
+
+ // matrix multiplication
+ temp[j][n] = fma(FLOAT_TYPE(v.x), b0, temp[j][n]);
+ if (!OOB) {
+ temp[j][n] = fma(FLOAT_TYPE(v.y), b1, temp[j][n]);
+ }
+#endif
+ }
+ }
+}
+
+void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
+ const uint tid = gl_LocalInvocationID.x;
+
+ get_offsets(a_offset, b_offset, d_offset);
+ a_offset /= QUANT_K;
+
+ y_offset = QUANT_R == 1 ? 1 : QUANT_K/2;
+
+ FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
+ temp[j][i] = FLOAT_TYPE(0);
+ }
+ }
+
+ uint num_iters = p.ncols / (K_PER_ITER * BLOCK_SIZE);
+ if (num_iters * K_PER_ITER * BLOCK_SIZE + K_PER_ITER*tid < p.ncols) {
+ num_iters++;
+ }
+ int unroll_count = 4;
+ uint unrolled_iters = num_iters & ~(unroll_count - 1);
+
+ uint i = 0;
+ while (i < unrolled_iters) {
+ // Manually partially unroll the loop
+ [[unroll]] for (uint k = 0; k < unroll_count; ++k) {
+ iter(temp, first_row, num_rows, tid, i*K_PER_ITER, false);
+ i++;
+ }
+ }
+ unroll_count = 2;
+ unrolled_iters = num_iters & ~(unroll_count - 1);
+ while (i < unrolled_iters) {
+ // Manually partially unroll the loop
+ [[unroll]] for (uint k = 0; k < unroll_count; ++k) {
+ iter(temp, first_row, num_rows, tid, i*K_PER_ITER, false);
+ i++;
+ }
+ }
+ while (i < num_iters) {
+ iter(temp, first_row, num_rows, tid, i*K_PER_ITER, true);
+ i++;
+ }
+
+ reduce_result(temp, d_offset, first_row, num_rows, tid);
+}
+
+void main() {
+ const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
+
+#if defined(DATA_A_IQ4_NL)
+ init_iq4nl_shmem();
+#endif
+
+ // do NUM_ROWS at a time, unless there aren't enough remaining rows
+ if (first_row + NUM_ROWS <= p.stride_d) {
+ compute_outputs(first_row, NUM_ROWS);
+ } else {
+ if (first_row >= p.stride_d) {
+ return;
+ }
+ compute_outputs(first_row, p.stride_d - first_row);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp
new file mode 100644
index 00000000..903753c7
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp
@@ -0,0 +1,118 @@
+#extension GL_EXT_control_flow_attributes : enable
+#extension GL_EXT_shader_16bit_storage : require
+#extension GL_EXT_shader_8bit_storage : require
+
+#ifdef MUL_MAT_ID
+#define EXPERT_COUNT 8
+#endif
+
+#include "types.comp"
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
+layout (binding = 1) readonly buffer BV2 {B_TYPE_VEC2 data_b_v2[];};
+layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];};
+
+layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
+#ifdef MUL_MAT_ID
+layout (binding = 3) readonly buffer IDS {int data_ids[];};
+#endif
+
+#include "dequant_funcs.comp"
+
+layout (push_constant) uniform parameter
+{
+ uint ncols;
+ uint stride_a;
+ uint stride_b;
+ uint stride_d;
+
+ uint batch_stride_a;
+ uint batch_stride_b;
+ uint batch_stride_d;
+
+#ifdef MUL_MAT_ID
+ uint nei0;
+ uint ne11;
+#else
+ uint ne02;
+ uint ne12;
+ uint broadcast2;
+ uint broadcast3;
+#endif
+} p;
+
+void get_offsets(out uint a_offset, out uint b_offset, out uint d_offset) {
+#ifdef MUL_MAT_ID
+ const uint expert_idx = gl_GlobalInvocationID.y;
+#else
+ const uint batch_idx = gl_GlobalInvocationID.y;
+#endif
+
+#ifndef MUL_MAT_ID
+ uint batch_idx_a = 0;
+ if (batch_idx != 0) {
+ const uint i13 = batch_idx / p.ne12;
+ const uint i12 = batch_idx % p.ne12;
+
+ const uint i03 = i13 / p.broadcast3;
+ const uint i02 = i12 / p.broadcast2;
+
+ batch_idx_a = i03 * p.ne02 + i02;
+ }
+#else
+ const uint expert_id = data_ids[expert_idx];
+#endif
+
+ a_offset =
+#ifdef MUL_MAT_ID
+ expert_id * p.batch_stride_a;
+#else
+ batch_idx_a * p.batch_stride_a;
+#endif
+ b_offset =
+#ifdef MUL_MAT_ID
+ (expert_idx % p.ne11) * p.stride_b;
+#else
+ batch_idx * p.batch_stride_b;
+#endif
+ d_offset =
+#ifdef MUL_MAT_ID
+ expert_idx * p.stride_d;
+#else
+ batch_idx * p.batch_stride_d;
+#endif
+}
+
+layout (constant_id = 0) const uint BLOCK_SIZE = 32;
+layout (constant_id = 1) const uint NUM_ROWS = 1;
+layout (constant_id = 2) const uint NUM_COLS = 1;
+
+shared FLOAT_TYPE tmpsh[NUM_COLS][NUM_ROWS][BLOCK_SIZE];
+
+void reduce_result(const in FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const in uint32_t d_offset, const in uint32_t first_row, const in uint32_t num_rows, const in uint32_t tid) {
+ // sum up partial sums and write back result
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ tmpsh[j][n][tid] = temp[j][n];
+ }
+ }
+ barrier();
+ [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) {
+ if (tid < s) {
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ tmpsh[j][n][tid] += tmpsh[j][n][tid + s];
+ }
+ }
+ }
+ barrier();
+ }
+ if (tid == 0) {
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(tmpsh[j][n][0]);
+ }
+ }
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp
new file mode 100644
index 00000000..1cc4996d
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp
@@ -0,0 +1,71 @@
+#version 450
+
+#extension GL_EXT_control_flow_attributes : enable
+#extension GL_EXT_shader_16bit_storage : require
+
+#define BLOCK_SIZE 32
+#define FLOAT_TYPE float
+
+layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
+layout (binding = 2) writeonly buffer D {D_TYPE dst[];};
+
+layout (push_constant) uniform parameter
+{
+ uint ncols_x;
+ uint nrows_x;
+ uint row_stride_x;
+ uint channel_stride_x;
+ uint channel_x_divisor;
+ uint b_offset;
+ uint d_offset;
+} p;
+
+shared FLOAT_TYPE tmp[BLOCK_SIZE];
+
+void main() {
+ const uint tid = gl_LocalInvocationID.x;
+ const uint row_x = gl_GlobalInvocationID.y;
+ const uint channel = gl_GlobalInvocationID.z;
+ const uint channel_x = channel / p.channel_x_divisor;
+
+ const uint nrows_y = p.ncols_x;
+ const uint nrows_dst = p.nrows_x;
+ const uint row_dst = row_x;
+
+ const uint idst = channel*nrows_dst + row_dst;
+
+ tmp[tid] = 0.0f;
+
+ for (uint col_x0 = 0; col_x0 < p.ncols_x; col_x0 += BLOCK_SIZE) {
+ const uint col_x = col_x0 + tid;
+
+ if (col_x >= p.ncols_x) {
+ break;
+ }
+
+ const uint row_y = col_x;
+
+ const uint ix = channel_x*p.channel_stride_x + row_x*p.row_stride_x + col_x;
+ const uint iy = channel*nrows_y + row_y;
+
+ const FLOAT_TYPE xi = FLOAT_TYPE(data_a[ix]);
+
+ tmp[tid] = fma(xi, FLOAT_TYPE(data_b[iy]), tmp[tid]);
+ }
+
+ // sum up partial sums and write back result
+ barrier();
+ [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
+ if (tid < s) {
+ tmp[tid] += tmp[tid + s];
+ }
+ barrier();
+ }
+
+ if (tid == 0) {
+ dst[idst] = tmp[0];
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp
new file mode 100644
index 00000000..9b443807
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp
@@ -0,0 +1,73 @@
+#version 450
+
+#extension GL_EXT_control_flow_attributes : enable
+#extension GL_EXT_shader_16bit_storage : require
+
+#define BLOCK_SIZE 32
+#define FLOAT_TYPE float
+
+layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
+layout (binding = 2) writeonly buffer D {D_TYPE dst[];};
+
+layout (push_constant) uniform parameter
+{
+ uint ncols_x;
+ uint nrows_x;
+ uint nchannels_x;
+ uint nchannels_y;
+ uint b_offset;
+ uint d_offset;
+} p;
+
+shared FLOAT_TYPE tmp[BLOCK_SIZE];
+
+void main() {
+ const uint tid = gl_LocalInvocationID.x;
+ const uint row_x = gl_GlobalInvocationID.y;
+ const uint channel = gl_GlobalInvocationID.z;
+ const uint channel_x = channel / (p.nchannels_y / p.nchannels_x);
+
+ const uint nrows_y = p.ncols_x;
+ const uint nrows_dst = p.nrows_x;
+ const uint row_dst = row_x;
+
+ tmp[tid] = FLOAT_TYPE(0.0f);
+
+ for (uint col_x0 = 0; col_x0 < p.ncols_x; col_x0 += BLOCK_SIZE) {
+ const uint col_x = col_x0 + tid;
+
+ if (col_x >= p.ncols_x) {
+ break;
+ }
+
+ // x is transposed and permuted
+ const uint ix = row_x*p.nchannels_x*p.ncols_x + channel_x*p.ncols_x + col_x;
+ const FLOAT_TYPE xi = FLOAT_TYPE(data_a[ix]);
+
+ const uint row_y = col_x;
+
+ // y is not transposed but permuted
+ const uint iy = channel*nrows_y + row_y;
+
+ tmp[tid] = fma(xi, FLOAT_TYPE(data_b[iy]), tmp[tid]);
+ }
+
+ // dst is not transposed and not permuted
+ const uint idst = channel*nrows_dst + row_dst;
+
+ // sum up partial sums and write back result
+ barrier();
+ [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
+ if (tid < s) {
+ tmp[tid] += tmp[tid + s];
+ }
+ barrier();
+ }
+
+ if (tid == 0) {
+ dst[idst] = tmp[0];
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp
new file mode 100644
index 00000000..93421344
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp
@@ -0,0 +1,115 @@
+#version 450
+#extension GL_EXT_shader_explicit_arithmetic_types : require
+
+#include "mul_mat_vec_base.comp"
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
+ uint a_offset, b_offset, d_offset;
+ get_offsets(a_offset, b_offset, d_offset);
+
+ const uint num_blocks_per_row = p.ncols / QUANT_K;
+
+ // 16 threads are used to process each block
+ const uint it_size = gl_WorkGroupSize.x/16;
+ const uint tid = gl_LocalInvocationID.x;
+ const uint itid = tid%16; // 0...16
+ const uint ix = tid/16;
+
+ const uint step = 8;
+
+ const uint v_im = itid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
+ const uint v_in = itid - step*v_im; // 0...15 or 0...7
+
+ const uint l0 = 2*v_in; // 0...15
+ const uint q_offset = 32*v_im + l0;
+ const uint s_offset = 8*v_im;
+ const uint y_offset = 128*v_im + l0;
+
+ FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
+ temp[j][i] = FLOAT_TYPE(0);
+ }
+ }
+
+ [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) {
+ const uint y_idx = i * QUANT_K + y_offset;
+
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row;
+ f16vec2 d = data_a[ib0 + i].d;
+ const FLOAT_TYPE dall = d.x;
+ const FLOAT_TYPE dmin = d.y;
+
+ uint32_t s0_u32 = data_a_packed32[ib0 + i].scales[s_offset / 4 + 0];
+ uint32_t s4_u32 = data_a_packed32[ib0 + i].scales[s_offset / 4 + 1];
+
+ uint32_t s0_lo4_u32 = s0_u32 & 0x0F0F0F0F;
+ uint32_t s0_hi4_u32 = (s0_u32 >> 4) & 0x0F0F0F0F;
+ uint32_t s4_lo4_u32 = s4_u32 & 0x0F0F0F0F;
+ uint32_t s4_hi4_u32 = (s4_u32 >> 4) & 0x0F0F0F0F;
+
+ uvec4 s0_lo4 = uvec4(unpack8(s0_lo4_u32));
+ uvec4 s4_lo4 = uvec4(unpack8(s4_lo4_u32));
+ uvec4 s0_hi4 = uvec4(unpack8(s0_hi4_u32));
+ uvec4 s4_hi4 = uvec4(unpack8(s4_hi4_u32));
+
+ uint16_t qs0_u16 = data_a_packed16[ib0 + i].qs[q_offset / 2 + 0];
+ uint16_t qs16_u16 = data_a_packed16[ib0 + i].qs[q_offset / 2 + 8];
+ uvec2 qs0 = uvec2(unpack8(qs0_u16));
+ uvec2 qs16 = uvec2(unpack8(qs16_u16));
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ B_TYPE_VEC2 b0 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0];
+ B_TYPE_VEC2 b16 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8];
+ B_TYPE_VEC2 b32 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16];
+ B_TYPE_VEC2 b48 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24];
+ B_TYPE_VEC2 b64 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32];
+ B_TYPE_VEC2 b80 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40];
+ B_TYPE_VEC2 b96 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48];
+ B_TYPE_VEC2 b112 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56];
+
+ FLOAT_TYPE sum1 = FLOAT_TYPE(0.0);
+ FLOAT_TYPE sum2 = FLOAT_TYPE(0.0);
+ [[unroll]] for (int l = 0; l < 2; ++l) {
+ sum1 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_lo4[0]) * FLOAT_TYPE((qs0[l] >> 0) & 3),
+ fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_lo4[1]) * FLOAT_TYPE((qs16[l] >> 0) & 3),
+ fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_lo4[2]) * FLOAT_TYPE((qs0[l] >> 2) & 3),
+ fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_lo4[3]) * FLOAT_TYPE((qs16[l] >> 2) & 3),
+ fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_lo4[0]) * FLOAT_TYPE((qs0[l] >> 4) & 3),
+ fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_lo4[1]) * FLOAT_TYPE((qs16[l] >> 4) & 3),
+ fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_lo4[2]) * FLOAT_TYPE((qs0[l] >> 6) & 3),
+ fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_lo4[3]) * FLOAT_TYPE((qs16[l] >> 6) & 3), sum1))))))));
+ sum2 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_hi4[0]),
+ fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_hi4[1]),
+ fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_hi4[2]),
+ fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_hi4[3]),
+ fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_hi4[0]),
+ fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_hi4[1]),
+ fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_hi4[2]),
+ fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_hi4[3]), sum2))))))));
+ }
+ temp[j][n] = fma(dall, sum1, fma(-dmin, sum2, temp[j][n]));
+ }
+ }
+ }
+
+ reduce_result(temp, d_offset, first_row, num_rows, tid);
+}
+
+void main() {
+ const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
+
+ // do NUM_ROWS at a time, unless there aren't enough remaining rows
+ if (first_row + NUM_ROWS <= p.stride_d) {
+ compute_outputs(first_row, NUM_ROWS);
+ } else {
+ if (first_row >= p.stride_d) {
+ return;
+ }
+ compute_outputs(first_row, p.stride_d - first_row);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp
new file mode 100644
index 00000000..86b0159d
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp
@@ -0,0 +1,103 @@
+#version 450
+#extension GL_EXT_shader_explicit_arithmetic_types : require
+
+#include "mul_mat_vec_base.comp"
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
+ uint a_offset, b_offset, d_offset;
+ get_offsets(a_offset, b_offset, d_offset);
+
+ const uint num_blocks_per_row = p.ncols / QUANT_K;
+
+ // 16 threads are used to process each block
+ const uint it_size = gl_WorkGroupSize.x/16;
+ const uint tid = gl_LocalInvocationID.x;
+ const uint itid = tid%16; // 0...16
+ const uint ix = tid/16;
+
+ const uint step = 8;
+
+ const uint v_im = itid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
+ const uint v_in = itid - step*v_im; // 0...15 or 0...7
+
+ const uint8_t m = uint8_t(1 << (4 * v_im));
+
+ const uint l0 = 2*v_in; // 0...15
+ const uint q_offset = 32*v_im + l0;
+ const uint y_offset = 128*v_im + l0;
+
+ FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
+ temp[j][i] = FLOAT_TYPE(0);
+ }
+ }
+
+ const uint s_shift = 4 * v_im;
+
+ [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) {
+ const uint y_idx = i * QUANT_K + y_offset;
+
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row;
+ const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d);
+
+ uint16_t s0_16 = data_a_packed16[ib0 + i].scales[0];
+ uint16_t s2_16 = data_a_packed16[ib0 + i].scales[1];
+ uint16_t s4_16 = data_a_packed16[ib0 + i].scales[2];
+ uint16_t s6_16 = data_a_packed16[ib0 + i].scales[3];
+ uint16_t s8_16 = data_a_packed16[ib0 + i].scales[4];
+ uint16_t s10_16 = data_a_packed16[ib0 + i].scales[5];
+ u8vec2 s0 = unpack8(s0_16);
+ u8vec2 s2 = unpack8(s2_16);
+ u8vec2 s4 = unpack8(s4_16);
+ u8vec2 s6 = unpack8(s6_16);
+ u8vec2 s8 = unpack8(s8_16);
+ u8vec2 s10 = unpack8(s10_16);
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+
+ B_TYPE_VEC2 b0 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0];
+ B_TYPE_VEC2 b16 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8];
+ B_TYPE_VEC2 b32 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16];
+ B_TYPE_VEC2 b48 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24];
+ B_TYPE_VEC2 b64 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32];
+ B_TYPE_VEC2 b80 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40];
+ B_TYPE_VEC2 b96 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48];
+ B_TYPE_VEC2 b112 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56];
+
+ FLOAT_TYPE sum = FLOAT_TYPE(0.0);
+ [[unroll]] for (int l = 0; l < 2; ++l) {
+ sum = fma(FLOAT_TYPE(b0[l]) * FLOAT_TYPE(int8_t(((s0[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4)),
+ fma(FLOAT_TYPE(b32[l]) * FLOAT_TYPE(int8_t(((s2[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4)),
+ fma(FLOAT_TYPE(b64[l]) * FLOAT_TYPE(int8_t(((s4[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4)),
+ fma(FLOAT_TYPE(b96[l]) * FLOAT_TYPE(int8_t(((s6[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4)),
+ fma(FLOAT_TYPE(b16[l]) * FLOAT_TYPE(int8_t(((s0[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4)),
+ fma(FLOAT_TYPE(b48[l]) * FLOAT_TYPE(int8_t(((s2[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4)),
+ fma(FLOAT_TYPE(b80[l]) * FLOAT_TYPE(int8_t(((s4[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4)),
+ fma(FLOAT_TYPE(b112[l]) * FLOAT_TYPE(int8_t(((s6[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4)), sum))))))));
+ }
+ temp[j][n] = fma(d, sum, temp[j][n]);
+ }
+ }
+ }
+
+ reduce_result(temp, d_offset, first_row, num_rows, tid);
+}
+
+void main() {
+ const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
+
+ // do NUM_ROWS at a time, unless there aren't enough remaining rows
+ if (first_row + NUM_ROWS <= p.stride_d) {
+ compute_outputs(first_row, NUM_ROWS);
+ } else {
+ if (first_row >= p.stride_d) {
+ return;
+ }
+ compute_outputs(first_row, p.stride_d - first_row);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp
new file mode 100644
index 00000000..cd1dd8e8
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp
@@ -0,0 +1,133 @@
+#version 450
+
+#extension GL_EXT_shader_explicit_arithmetic_types : require
+
+#include "mul_mat_vec_base.comp"
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
+ uint a_offset, b_offset, d_offset;
+ get_offsets(a_offset, b_offset, d_offset);
+
+ const uint num_blocks_per_row = p.ncols / QUANT_K;
+
+ // 16 threads are used to process each block
+ const uint it_size = gl_WorkGroupSize.x/16;
+ const uint tid = gl_LocalInvocationID.x;
+ const uint itid = tid%16; // 0...16
+ const uint ix = tid/16;
+
+ const uint step = 4;
+
+ const uint il = itid/step; // 0...3
+ const uint ir = itid - step*il; // 0...7 or 0...3
+ const uint n = 4;
+
+ const uint v_im = il / 2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
+ const uint v_in = il % 2;
+
+ const uint l0 = n * (2 * ir + v_in); // 0...15
+ const uint q_offset = 32*v_im + l0;
+ const uint y_offset = 64*v_im + l0;
+
+ FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
+ temp[j][i] = FLOAT_TYPE(0);
+ }
+ }
+
+ [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) {
+ const uint y1_idx = i * QUANT_K + y_offset;
+ const uint y2_idx = y1_idx + 128;
+
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row;
+ f16vec2 d = data_a[ib0 + i].d;
+ const FLOAT_TYPE dall = FLOAT_TYPE(d.x);
+ const FLOAT_TYPE dmin = FLOAT_TYPE(d.y);
+
+ uint32_t scale0_u32 = data_a_packed16[ib0 + i].scales[v_im ];
+ uint32_t scale4_u32 = data_a_packed16[ib0 + i].scales[v_im + 2];
+ uint32_t scale8_u32 = data_a_packed16[ib0 + i].scales[v_im + 4];
+ uvec4 scale0 = uvec4(unpack8(scale0_u32));
+ uvec4 scale4 = uvec4(unpack8(scale4_u32));
+ uvec4 scale8 = uvec4(unpack8(scale8_u32));
+
+ const uint32_t sc0 = ( scale0.x & 0x3f);
+ const uint32_t sc1 = ( scale0.y & 0x3f);
+ const uint32_t sc2 = ( scale4.x & 0x3f);
+ const uint32_t sc3 = ( scale4.y & 0x3f);
+ const uint32_t sc4 = (( scale8.x & 0x0f) | ((scale0.x & 0xc0) >> 2));
+ const uint32_t sc5 = (( scale8.y & 0x0f) | ((scale0.y & 0xc0) >> 2));
+ const uint32_t sc6 = (((scale8.x >> 4) & 0x0f) | ((scale4.x & 0xc0) >> 2));
+ const uint32_t sc7 = (((scale8.y >> 4) & 0x0f) | ((scale4.y & 0xc0) >> 2));
+
+ uint32_t qs0_u32 = data_a_packed32[ib0 + i].qs[q_offset / 4];
+ uint32_t qs64_u32 = data_a_packed32[ib0 + i].qs[q_offset / 4 + 16];
+
+ uint32_t qs0_u32_lo4 = qs0_u32 & 0x0F0F0F0F;
+ uint32_t qs0_u32_hi4 = (qs0_u32 >> 4) & 0x0F0F0F0F;
+ uint32_t qs64_u32_lo4 = qs64_u32 & 0x0F0F0F0F;
+ uint32_t qs64_u32_hi4 = (qs64_u32 >> 4) & 0x0F0F0F0F;
+
+ uvec4 qs0_lo4 = uvec4(unpack8(qs0_u32_lo4));
+ uvec4 qs64_lo4 = uvec4(unpack8(qs64_u32_lo4));
+ uvec4 qs0_hi4 = uvec4(unpack8(qs0_u32_hi4));
+ uvec4 qs64_hi4 = uvec4(unpack8(qs64_u32_hi4));
+
+ const uint32_t q4_0 = qs0_lo4.x;
+ const uint32_t q4_1 = qs0_lo4.y;
+ const uint32_t q4_2 = qs0_lo4.z;
+ const uint32_t q4_3 = qs0_lo4.w;
+ const uint32_t q4_4 = qs0_hi4.x;
+ const uint32_t q4_5 = qs0_hi4.y;
+ const uint32_t q4_6 = qs0_hi4.z;
+ const uint32_t q4_7 = qs0_hi4.w;
+ const uint32_t q4_8 = qs64_lo4.x;
+ const uint32_t q4_9 = qs64_lo4.y;
+ const uint32_t q4_10 = qs64_lo4.z;
+ const uint32_t q4_11 = qs64_lo4.w;
+ const uint32_t q4_12 = qs64_hi4.x;
+ const uint32_t q4_13 = qs64_hi4.y;
+ const uint32_t q4_14 = qs64_hi4.z;
+ const uint32_t q4_15 = qs64_hi4.w;
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ B_TYPE_VEC4 by10 = data_b_v4[(j*p.batch_stride_b + b_offset + y1_idx) / 4];
+ B_TYPE_VEC4 by132 = data_b_v4[(j*p.batch_stride_b + b_offset + y1_idx) / 4 + 8];
+ B_TYPE_VEC4 by20 = data_b_v4[(j*p.batch_stride_b + b_offset + y2_idx) / 4];
+ B_TYPE_VEC4 by232 = data_b_v4[(j*p.batch_stride_b + b_offset + y2_idx) / 4 + 8];
+
+ const FLOAT_TYPE sx = fma(FLOAT_TYPE(by10.x), q4_0, fma(FLOAT_TYPE(by10.y), q4_1, fma(FLOAT_TYPE(by10.z), q4_2, FLOAT_TYPE(by10.w) * q4_3)));
+ const FLOAT_TYPE sy = fma(FLOAT_TYPE(by132.x), q4_4, fma(FLOAT_TYPE(by132.y), q4_5, fma(FLOAT_TYPE(by132.z), q4_6, FLOAT_TYPE(by132.w) * q4_7)));
+ const FLOAT_TYPE sz = fma(FLOAT_TYPE(by20.x), q4_8, fma(FLOAT_TYPE(by20.y), q4_9, fma(FLOAT_TYPE(by20.z), q4_10, FLOAT_TYPE(by20.w) * q4_11)));
+ const FLOAT_TYPE sw = fma(FLOAT_TYPE(by232.x), q4_12, fma(FLOAT_TYPE(by232.y), q4_13, fma(FLOAT_TYPE(by232.z), q4_14, FLOAT_TYPE(by232.w) * q4_15)));
+ const FLOAT_TYPE smin =
+ fma(FLOAT_TYPE(by10.x), sc2, fma(FLOAT_TYPE(by132.x), sc3, fma(FLOAT_TYPE(by20.x), sc6, fma(FLOAT_TYPE(by232.x), sc7,
+ fma(FLOAT_TYPE(by10.y), sc2, fma(FLOAT_TYPE(by132.y), sc3, fma(FLOAT_TYPE(by20.y), sc6, fma(FLOAT_TYPE(by232.y), sc7,
+ fma(FLOAT_TYPE(by10.z), sc2, fma(FLOAT_TYPE(by132.z), sc3, fma(FLOAT_TYPE(by20.z), sc6, fma(FLOAT_TYPE(by232.z), sc7,
+ fma(FLOAT_TYPE(by10.w), sc2, fma(FLOAT_TYPE(by132.w), sc3, fma(FLOAT_TYPE(by20.w), sc6, FLOAT_TYPE(by232.w) * sc7)))))))))))))));
+ temp[j][n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[j][n]));
+ }
+ }
+ }
+
+ reduce_result(temp, d_offset, first_row, num_rows, tid);
+}
+
+void main() {
+ const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
+
+ // do NUM_ROWS at a time, unless there aren't enough remaining rows
+ if (first_row + NUM_ROWS <= p.stride_d) {
+ compute_outputs(first_row, NUM_ROWS);
+ } else {
+ if (first_row >= p.stride_d) {
+ return;
+ }
+ compute_outputs(first_row, p.stride_d - first_row);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp
new file mode 100644
index 00000000..0a68891c
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp
@@ -0,0 +1,162 @@
+#version 450
+
+#extension GL_EXT_shader_explicit_arithmetic_types : require
+
+#include "mul_mat_vec_base.comp"
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
+ uint a_offset, b_offset, d_offset;
+ get_offsets(a_offset, b_offset, d_offset);
+
+ const uint num_blocks_per_row = p.ncols / QUANT_K;
+
+ // 16 threads are used to process each block
+ const uint it_size = gl_WorkGroupSize.x/16;
+ const uint tid = gl_LocalInvocationID.x;
+ const uint itid = tid%16; // 0...16
+ const uint ix = tid/16;
+
+ const uint il = itid/4; // 0...3
+ const uint ir = itid - 4*il; // 0...7 or 0...3
+
+ const uint v_im = il / 2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
+ const uint v_in = il % 2;
+
+ const uint l0 = 4*ir + 2*v_in; // 0...15
+ const uint q_offset = 32*v_im + l0;
+ const uint y_offset = 64*v_im + l0;
+
+ FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
+ temp[j][i] = FLOAT_TYPE(0);
+ }
+ }
+
+ [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) {
+ const uint y1_idx = i * QUANT_K + y_offset;
+ const uint y2_idx = y1_idx + 128;
+
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row;
+ f16vec2 d = data_a[ib0 + i].d;
+ const FLOAT_TYPE dall = FLOAT_TYPE(d.x);
+ const FLOAT_TYPE dmin = FLOAT_TYPE(d.y);
+
+ uint32_t scale0_u32 = data_a_packed16[ib0 + i].scales[v_im ];
+ uint32_t scale4_u32 = data_a_packed16[ib0 + i].scales[v_im + 2];
+ uint32_t scale8_u32 = data_a_packed16[ib0 + i].scales[v_im + 4];
+ uvec4 scale0 = uvec4(unpack8(scale0_u32));
+ uvec4 scale4 = uvec4(unpack8(scale4_u32));
+ uvec4 scale8 = uvec4(unpack8(scale8_u32));
+
+ const uint32_t sc0 = ( scale0.x & 0x3f);
+ const uint32_t sc1 = ( scale0.y & 0x3f);
+ const uint32_t sc2 = ( scale4.x & 0x3f);
+ const uint32_t sc3 = ( scale4.y & 0x3f);
+ const uint32_t sc4 = (( scale8.x & 0x0f) | ((scale0.x & 0xc0) >> 2));
+ const uint32_t sc5 = (( scale8.y & 0x0f) | ((scale0.y & 0xc0) >> 2));
+ const uint32_t sc6 = (((scale8.x >> 4) & 0x0f) | ((scale4.x & 0xc0) >> 2));
+ const uint32_t sc7 = (((scale8.y >> 4) & 0x0f) | ((scale4.y & 0xc0) >> 2));
+
+ uint32_t qs0_16_u32 = uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 8]) << 16);
+ uint32_t qs64_80_u32 = uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 32]) | (uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 40]) << 16);
+
+ uint32_t qs0_16_u32_lo4 = qs0_16_u32 & 0x0F0F0F0F;
+ uint32_t qs0_16_u32_hi4 = (qs0_16_u32 >> 4) & 0x0F0F0F0F;
+ uint32_t qs64_80_u32_lo4 = qs64_80_u32 & 0x0F0F0F0F;
+ uint32_t qs64_80_u32_hi4 = (qs64_80_u32 >> 4) & 0x0F0F0F0F;
+
+ uint32_t qh = pack32(u16vec2(data_a_packed16[ib0 + i].qh[l0 / 2], data_a_packed16[ib0 + i].qh[l0 / 2 + 8]));
+
+ uint32_t qs0_16_lo4_offset16 = ((qh >> (2*v_im)) & 0x01010101) << 4;
+ uint32_t qs0_16_hi4_offset16 = ((qh >> (2*v_im)) & 0x02020202) << 3;
+ uint32_t qs64_80_lo4_offset16 = ((qh >> (2*v_im)) & 0x10101010) << 0;
+ uint32_t qs64_80_hi4_offset16 = ((qh >> (2*v_im)) & 0x20202020) >> 1;
+
+ qs0_16_u32_lo4 += qs0_16_lo4_offset16;
+ qs0_16_u32_hi4 += qs0_16_hi4_offset16;
+ qs64_80_u32_lo4 += qs64_80_lo4_offset16;
+ qs64_80_u32_hi4 += qs64_80_hi4_offset16;
+
+ uvec4 qs0_16_lo4 = uvec4(unpack8(qs0_16_u32_lo4));
+ uvec4 qs64_80_lo4 = uvec4(unpack8(qs64_80_u32_lo4));
+ uvec4 qs0_16_hi4 = uvec4(unpack8(qs0_16_u32_hi4));
+ uvec4 qs64_80_hi4 = uvec4(unpack8(qs64_80_u32_hi4));
+
+ const uint32_t q4_0 = qs0_16_lo4.x;
+ const uint32_t q4_1 = qs0_16_lo4.y;
+ const uint32_t q4_2 = qs0_16_lo4.z;
+ const uint32_t q4_3 = qs0_16_lo4.w;
+ const uint32_t q4_4 = qs0_16_hi4.x;
+ const uint32_t q4_5 = qs0_16_hi4.y;
+ const uint32_t q4_6 = qs0_16_hi4.z;
+ const uint32_t q4_7 = qs0_16_hi4.w;
+ const uint32_t q4_8 = qs64_80_lo4.x;
+ const uint32_t q4_9 = qs64_80_lo4.y;
+ const uint32_t q4_10 = qs64_80_lo4.z;
+ const uint32_t q4_11 = qs64_80_lo4.w;
+ const uint32_t q4_12 = qs64_80_hi4.x;
+ const uint32_t q4_13 = qs64_80_hi4.y;
+ const uint32_t q4_14 = qs64_80_hi4.z;
+ const uint32_t q4_15 = qs64_80_hi4.w;
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ B_TYPE_VEC2 by10 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2];
+ B_TYPE_VEC2 by116 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 8];
+ B_TYPE_VEC2 by132 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 16];
+ B_TYPE_VEC2 by148 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 24];
+ B_TYPE_VEC2 by20 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2];
+ B_TYPE_VEC2 by216 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 8];
+ B_TYPE_VEC2 by232 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 16];
+ B_TYPE_VEC2 by248 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 24];
+
+ const FLOAT_TYPE sx =
+ fma(FLOAT_TYPE(by10.x), q4_0,
+ fma(FLOAT_TYPE(by10.y), q4_1,
+ fma(FLOAT_TYPE(by116.x), q4_2,
+ FLOAT_TYPE(by116.y) * q4_3)));
+ const FLOAT_TYPE sy =
+ fma(FLOAT_TYPE(by132.x), q4_4,
+ fma(FLOAT_TYPE(by132.y), q4_5,
+ fma(FLOAT_TYPE(by148.x), q4_6,
+ FLOAT_TYPE(by148.y) * q4_7)));
+ const FLOAT_TYPE sz =
+ fma(FLOAT_TYPE(by20.x), q4_8,
+ fma(FLOAT_TYPE(by20.y), q4_9,
+ fma(FLOAT_TYPE(by216.x), q4_10,
+ FLOAT_TYPE(by216.y) * q4_11)));
+ const FLOAT_TYPE sw =
+ fma(FLOAT_TYPE(by232.x), q4_12,
+ fma(FLOAT_TYPE(by232.y), q4_13,
+ fma(FLOAT_TYPE(by248.x), q4_14,
+ FLOAT_TYPE(by248.y) * q4_15)));
+ const FLOAT_TYPE smin =
+ fma(FLOAT_TYPE(by10.x) + FLOAT_TYPE(by10.y) + FLOAT_TYPE(by116.x) + FLOAT_TYPE(by116.y), sc2,
+ fma(FLOAT_TYPE(by132.x) + FLOAT_TYPE(by132.y) + FLOAT_TYPE(by148.x) + FLOAT_TYPE(by148.y), sc3,
+ fma(FLOAT_TYPE(by20.x) + FLOAT_TYPE(by20.y) + FLOAT_TYPE(by216.x) + FLOAT_TYPE(by216.y), sc6,
+ (FLOAT_TYPE(by232.x) + FLOAT_TYPE(by232.y) + FLOAT_TYPE(by248.x) + FLOAT_TYPE(by248.y)) * sc7)));
+ temp[j][n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[j][n]));
+ }
+ }
+ }
+
+ reduce_result(temp, d_offset, first_row, num_rows, tid);
+}
+
+void main() {
+ const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
+
+ // do NUM_ROWS at a time, unless there aren't enough remaining rows
+ if (first_row + NUM_ROWS <= p.stride_d) {
+ compute_outputs(first_row, NUM_ROWS);
+ } else {
+ if (first_row >= p.stride_d) {
+ return;
+ }
+ compute_outputs(first_row, p.stride_d - first_row);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp
new file mode 100644
index 00000000..70e13a56
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp
@@ -0,0 +1,112 @@
+#version 450
+
+#extension GL_EXT_shader_explicit_arithmetic_types : require
+
+#include "mul_mat_vec_base.comp"
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
+ uint a_offset, b_offset, d_offset;
+ get_offsets(a_offset, b_offset, d_offset);
+
+ const uint num_blocks_per_row = p.ncols / QUANT_K;
+
+ // 16 threads are used to process each block
+ const uint it_size = gl_WorkGroupSize.x/16;
+ const uint tid = gl_LocalInvocationID.x;
+ const uint itid = tid%16; // 0...16
+ const uint ix = tid/16;
+
+ const uint step = 8;
+
+ const uint v_im = itid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
+ const uint v_in = itid - step*v_im; // 0...15 or 0...7
+
+ const uint l0 = 4 * v_in; // 0, 4, 8, ..., 28
+ const uint is = v_in / 4;
+
+ const uint ql_offset = 64*v_im + l0;
+ const uint qh_offset = 32*v_im + l0;
+ const uint s_offset = 8*v_im + is;
+ const uint y_offset = 128*v_im + l0;
+
+ FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
+ temp[j][i] = FLOAT_TYPE(0);
+ }
+ }
+
+ [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) {
+ const uint y_idx = i * QUANT_K + y_offset;
+
+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
+ const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row;
+ const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d);
+
+ FLOAT_TYPE scales[4];
+ scales[0] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]);
+ scales[1] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]);
+ scales[2] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]);
+ scales[3] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]);
+
+ uint32_t ql0_u32 = uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 1]) << 16);
+ uint32_t ql32_u32 = uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 16]) | (uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 17]) << 16);
+
+ uint32_t ql0_u32_lo4 = ql0_u32 & 0x0F0F0F0F;
+ uint32_t ql0_u32_hi4 = (ql0_u32 >> 4) & 0x0F0F0F0F;
+ uint32_t ql32_u32_lo4 = ql32_u32 & 0x0F0F0F0F;
+ uint32_t ql32_u32_hi4 = (ql32_u32 >> 4) & 0x0F0F0F0F;
+
+ uint32_t qh_u32 = uint32_t(data_a_packed16[ib0 + i].qh[qh_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].qh[qh_offset / 2 + 1]) << 16);
+ uint32_t qh0_u32 = (qh_u32 & 0x03030303) << 4;
+ uint32_t qh2_u32 = (qh_u32 & 0x0C0C0C0C) << 2;
+ uint32_t qh4_u32 = (qh_u32 & 0x30303030) << 0;
+ uint32_t qh6_u32 = (qh_u32 & 0xC0C0C0C0) >> 2;
+
+ uint32_t q0_u32 = ql0_u32_lo4 | qh0_u32;
+ uint32_t q1_u32 = ql32_u32_lo4 | qh2_u32;
+ uint32_t q2_u32 = ql0_u32_hi4 | qh4_u32;
+ uint32_t q3_u32 = ql32_u32_hi4 | qh6_u32;
+
+ uvec4 q0 = uvec4(unpack8(q0_u32));
+ uvec4 q1 = uvec4(unpack8(q1_u32));
+ uvec4 q2 = uvec4(unpack8(q2_u32));
+ uvec4 q3 = uvec4(unpack8(q3_u32));
+
+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
+ B_TYPE_VEC4 by0 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4];
+ B_TYPE_VEC4 by32 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 8];
+ B_TYPE_VEC4 by64 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 16];
+ B_TYPE_VEC4 by96 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 24];
+
+ FLOAT_TYPE sum = FLOAT_TYPE(0.0);
+ [[unroll]] for (int l = 0; l < 4; ++l) {
+ sum = fma(FLOAT_TYPE(by0[l]) * scales[0], FLOAT_TYPE(int8_t(q0[l]) - 32),
+ fma(FLOAT_TYPE(by32[l]) * scales[1], FLOAT_TYPE(int8_t(q1[l]) - 32),
+ fma(FLOAT_TYPE(by64[l]) * scales[2], FLOAT_TYPE(int8_t(q2[l]) - 32),
+ fma(FLOAT_TYPE(by96[l]) * scales[3], FLOAT_TYPE(int8_t(q3[l]) - 32), sum))));
+ }
+ temp[j][n] += sum * d;
+ }
+ }
+ }
+
+ reduce_result(temp, d_offset, first_row, num_rows, tid);
+}
+
+void main() {
+ const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
+
+ // do NUM_ROWS at a time, unless there aren't enough remaining rows
+ if (first_row + NUM_ROWS <= p.stride_d) {
+ compute_outputs(first_row, NUM_ROWS);
+ } else {
+ if (first_row >= p.stride_d) {
+ return;
+ }
+ compute_outputs(first_row, p.stride_d - first_row);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp
new file mode 100644
index 00000000..48122cbe
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp
@@ -0,0 +1,631 @@
+#version 450
+
+#extension GL_EXT_control_flow_attributes : enable
+#extension GL_EXT_shader_16bit_storage : require
+
+#ifdef FLOAT16
+#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
+#endif
+
+#ifdef COOPMAT
+#extension GL_KHR_cooperative_matrix : enable
+#extension GL_KHR_memory_scope_semantics : enable
+#extension GL_KHR_shader_subgroup_basic : enable
+#endif
+
+#ifdef MUL_MAT_ID
+#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
+#endif
+
+#include "types.comp"
+
+#ifndef LOAD_VEC_A
+#define LOAD_VEC_A 1
+#endif
+#ifndef LOAD_VEC_B
+#define LOAD_VEC_B 1
+#endif
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
+layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
+
+#ifdef MUL_MAT_ID
+layout (binding = 3) readonly buffer IDS {int data_ids[];};
+#endif
+
+layout (push_constant) uniform parameter
+{
+ uint M;
+ uint N;
+ uint K;
+ uint stride_a;
+ uint stride_b;
+ uint stride_d;
+
+ uint batch_stride_a;
+ uint batch_stride_b;
+ uint batch_stride_d;
+
+#ifdef MUL_MAT_ID
+ uint nei0;
+ uint nei1;
+ uint nbi1;
+ uint ne11;
+#else
+ uint k_split;
+ uint ne02;
+ uint ne12;
+ uint broadcast2;
+ uint broadcast3;
+#endif
+} p;
+
+layout (constant_id = 0) const uint BLOCK_SIZE = 64;
+layout (constant_id = 1) const uint BM = 64;
+layout (constant_id = 2) const uint BN = 64;
+layout (constant_id = 3) const uint BK = 16; // Assumed to be 32 if working with a quant
+layout (constant_id = 4) const uint WM = 32;
+layout (constant_id = 5) const uint WN = 32;
+layout (constant_id = 6) const uint WMITER = 2;
+layout (constant_id = 7) const uint TM = 4;
+layout (constant_id = 8) const uint TN = 2;
+layout (constant_id = 9) const uint TK = 1; // Only needed for coopmat
+layout (constant_id = 10) const uint WARP = 32;
+
+#ifdef COOPMAT
+#define SHMEM_STRIDE (BK + 8)
+#else
+#define SHMEM_STRIDE (BK + 1)
+#endif
+
+shared FLOAT_TYPE buf_a[BM * SHMEM_STRIDE];
+shared FLOAT_TYPE buf_b[BN * SHMEM_STRIDE];
+
+#ifdef MUL_MAT_ID
+shared u16vec2 row_ids[3072];
+#endif // MUL_MAT_ID
+
+#define NUM_WARPS (BLOCK_SIZE / WARP)
+
+#ifdef COOPMAT
+shared ACC_TYPE coopmat_stage[TM * TN * NUM_WARPS];
+#endif
+
+void main() {
+#if defined(DATA_A_IQ4_NL)
+ init_iq4nl_shmem();
+#endif
+
+#ifdef MUL_MAT_ID
+ const uint expert_idx = gl_GlobalInvocationID.z;
+#else
+ const uint batch_idx = gl_GlobalInvocationID.z;
+
+ const uint i13 = batch_idx / p.ne12;
+ const uint i12 = batch_idx % p.ne12;
+
+ const uint i03 = i13 / p.broadcast3;
+ const uint i02 = i12 / p.broadcast2;
+
+ const uint batch_idx_a = i03 * p.ne02 + i02;
+#endif
+
+ const uint blocks_m = (p.M + BM - 1) / BM;
+ const uint ir = gl_WorkGroupID.x % blocks_m;
+ const uint ik = gl_WorkGroupID.x / blocks_m;
+ const uint ic = gl_WorkGroupID.y;
+
+ const uint WNITER = (WM * WN) / (WARP * TM * TN * WMITER);
+ const uint WSUBM = WM / WMITER;
+ const uint WSUBN = WN / WNITER;
+
+#ifdef COOPMAT
+ const uint warp_i = gl_SubgroupID;
+
+ const uint tiw = gl_SubgroupInvocationID;
+
+ const uint cms_per_row = WM / TM;
+ const uint cms_per_col = WN / TN;
+
+ const uint storestride = WARP / TM;
+ const uint store_r = tiw % TM;
+ const uint store_c = tiw / TM;
+#else
+ const uint warp_i = gl_LocalInvocationID.x / WARP;
+
+ const uint tiw = gl_LocalInvocationID.x % WARP;
+
+ const uint tiwr = tiw % (WSUBM / TM);
+ const uint tiwc = tiw / (WSUBM / TM);
+#endif
+
+ const uint warp_r = warp_i % (BM / WM);
+ const uint warp_c = warp_i / (BM / WM);
+
+ const uint loadr_a = gl_LocalInvocationID.x % (BK / LOAD_VEC_A);
+ const uint loadc_a = gl_LocalInvocationID.x / (BK / LOAD_VEC_A);
+ const uint loadr_b = gl_LocalInvocationID.x % (BK / LOAD_VEC_B);
+ const uint loadc_b = gl_LocalInvocationID.x / (BK / LOAD_VEC_B);
+
+ const uint loadstride_a = gl_WorkGroupSize.x * LOAD_VEC_A / BK;
+ const uint loadstride_b = gl_WorkGroupSize.x * LOAD_VEC_B / BK;
+
+#ifdef MUL_MAT_ID
+ uint _ne1 = 0;
+ for (uint ii1 = 0; ii1 < p.nei1; ii1++) {
+ for (uint ii0 = 0; ii0 < p.nei0; ii0++) {
+ if (data_ids[ii1*p.nbi1 + ii0] == expert_idx) {
+ row_ids[_ne1] = u16vec2(ii0, ii1);
+ _ne1++;
+ }
+ }
+ }
+
+ barrier();
+
+ // Workgroup has no work
+ if (ic * BN >= _ne1) return;
+#endif
+
+#ifdef MUL_MAT_ID
+ const uint start_k = 0;
+ const uint end_k = p.K;
+#else
+ const uint start_k = ik * p.k_split;
+ const uint end_k = min(p.K, (ik + 1) * p.k_split);
+#endif
+
+ uint pos_a = (
+#ifdef MUL_MAT_ID
+ expert_idx * p.batch_stride_a +
+#else
+ batch_idx_a * p.batch_stride_a +
+#endif
+ ir * BM * p.stride_a + start_k) / LOAD_VEC_A;
+#ifdef MUL_MAT_ID
+ uint pos_b = 0;
+#else
+ uint pos_b = (batch_idx * p.batch_stride_b + ic * BN * p.stride_b + start_k) / LOAD_VEC_B;
+#endif
+
+#ifdef COOPMAT
+ coopmat<float16_t, gl_ScopeSubgroup, TM, TK, gl_MatrixUseA> cache_a;
+ coopmat<float16_t, gl_ScopeSubgroup, TK, TN, gl_MatrixUseB> cache_b;
+ coopmat<ACC_TYPE, gl_ScopeSubgroup, TM, TN, gl_MatrixUseAccumulator> sums[cms_per_row * cms_per_col];
+
+ [[unroll]] for (uint i = 0; i < cms_per_row * cms_per_col; i++) {
+ sums[i] = coopmat<ACC_TYPE, gl_ScopeSubgroup, TM, TN, gl_MatrixUseAccumulator>(0.0f);
+ }
+#else
+ ACC_TYPE sums[WMITER * TM * WNITER * TN];
+ FLOAT_TYPE cache_a[WMITER * TM];
+ FLOAT_TYPE cache_b[WNITER * TN];
+
+ [[unroll]] for (uint i = 0; i < WMITER*TM*WNITER*TN; i++) {
+ sums[i] = ACC_TYPE(0.0f);
+ }
+#endif
+
+ for (uint block = start_k; block < end_k; block += BK) {
+ [[unroll]] for (uint l = 0; l < BM; l += loadstride_a) {
+
+#if defined(DATA_A_F32) || defined(DATA_A_F16)
+#if LOAD_VEC_A == 8
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
+ buf_a[buf_idx ] = FLOAT_TYPE(data_a[idx][0].x);
+ buf_a[buf_idx + 1] = FLOAT_TYPE(data_a[idx][0].y);
+ buf_a[buf_idx + 2] = FLOAT_TYPE(data_a[idx][0].z);
+ buf_a[buf_idx + 3] = FLOAT_TYPE(data_a[idx][0].w);
+ buf_a[buf_idx + 4] = FLOAT_TYPE(data_a[idx][1].x);
+ buf_a[buf_idx + 5] = FLOAT_TYPE(data_a[idx][1].y);
+ buf_a[buf_idx + 6] = FLOAT_TYPE(data_a[idx][1].z);
+ buf_a[buf_idx + 7] = FLOAT_TYPE(data_a[idx][1].w);
+#elif LOAD_VEC_A == 4
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
+ buf_a[buf_idx ] = FLOAT_TYPE(data_a[idx].x);
+ buf_a[buf_idx + 1] = FLOAT_TYPE(data_a[idx].y);
+ buf_a[buf_idx + 2] = FLOAT_TYPE(data_a[idx].z);
+ buf_a[buf_idx + 3] = FLOAT_TYPE(data_a[idx].w);
+#else
+ if (ir * BM + loadc_a + l < p.M && block + loadr_a < end_k) {
+ buf_a[(loadc_a + l) * SHMEM_STRIDE + loadr_a] = FLOAT_TYPE(data_a[pos_a + (loadc_a + l) * p.stride_a + loadr_a]);
+ } else {
+ buf_a[(loadc_a + l) * SHMEM_STRIDE + loadr_a] = FLOAT_TYPE(0.0f);
+ }
+#endif
+#elif defined(DATA_A_Q4_0)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a;
+
+ const uint ib = idx / 16;
+ const uint iqs = idx & 0xF;
+
+ const float d = float(data_a[ib].d);
+ const uint vui = uint(data_a[ib].qs[iqs]);
+ const vec2 v = (vec2(vui & 0xF, vui >> 4) - 8.0f) * d;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
+#elif defined(DATA_A_Q4_1)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a;
+
+ const uint ib = idx / 16;
+ const uint iqs = idx & 0xF;
+
+ const float d = float(data_a[ib].d);
+ const float m = float(data_a[ib].m);
+ const uint vui = uint(data_a[ib].qs[iqs]);
+ const vec2 v = vec2(vui & 0xF, vui >> 4) * d + m;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
+#elif defined(DATA_A_Q5_0)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a;
+
+ const uint ib = idx / 16;
+ const uint iqs = idx & 0xF;
+
+ const float d = float(data_a[ib].d);
+ const uint uint_qh = uint(data_a[ib].qh[1]) << 16 | data_a[ib].qh[0];
+ const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
+ const uint vui = uint(data_a[ib].qs[iqs]);
+ const vec2 v = (vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) - 16.0f) * d;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
+#elif defined(DATA_A_Q5_1)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a;
+
+ const uint ib = idx / 16;
+ const uint iqs = idx & 0xF;
+
+ const float d = float(data_a[ib].d);
+ const float m = float(data_a[ib].m);
+ const uint uint_qh = data_a[ib].qh;
+ const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10);
+ const uint vui = uint(data_a[ib].qs[iqs]);
+ const vec2 v = vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) * d + m;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
+#elif defined(DATA_A_Q8_0)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
+
+ const uint ib = idx / 16;
+ const uint iqs = (idx & 0xF) * 2;
+
+ const float d = float(data_a[ib].d);
+ const vec2 v = vec2(int(data_a[ib].qs[iqs]), int(data_a[ib].qs[iqs + 1])) * d;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 1] = FLOAT_TYPE(v.y);
+#elif defined(DATA_A_Q2_K)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint qsi = (iqs / 64) * 32 + (iqs % 16) * 2; // 0,2,4..30
+ const uint scalesi = iqs / 8; // 0..15
+ const uint qsshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
+
+ const uvec2 qs = uvec2(data_a[ib].qs[qsi], data_a[ib].qs[qsi + 1]);
+ const uint scales = data_a[ib].scales[scalesi];
+ const vec2 d = vec2(data_a[ib].d);
+
+ const vec2 v = d.x * float(scales & 0xF) * vec2((qs >> qsshift) & 3) - d.y * float(scales >> 4);
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 1] = FLOAT_TYPE(v.y);
+#elif defined(DATA_A_Q3_K)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 64; // 0,1
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
+ const uint hmi = (iqs % 16) * 2; // 0,2,4..30
+ const uint j = (iqs % 64) / 4; // 0..3
+ const uint is = iqs / 8; // 0..15
+ const uint halfsplit = ((iqs % 64) / 16); // 0,1,2,3
+ const uint qsshift = halfsplit * 2; // 0,2,4,6
+ const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128
+
+ const int8_t us = int8_t(is < 4 ? (data_a[ib].scales[is-0] & 0xF) | (((data_a[ib].scales[is+8] >> 0) & 3) << 4) :
+ is < 8 ? (data_a[ib].scales[is-0] & 0xF) | (((data_a[ib].scales[is+4] >> 2) & 3) << 4) :
+ is < 12 ? (data_a[ib].scales[is-8] >> 4) | (((data_a[ib].scales[is+0] >> 4) & 3) << 4) :
+ (data_a[ib].scales[is-8] >> 4) | (((data_a[ib].scales[is-4] >> 6) & 3) << 4));
+ const float dl = float(data_a[ib].d) * float(us - 32);
+
+ buf_a[buf_idx ] = FLOAT_TYPE(dl * float(int8_t((data_a[ib].qs[qsi ] >> qsshift) & 3) - (((data_a[ib].hmask[hmi ] & m) != 0) ? 0 : 4)));
+ buf_a[buf_idx + 1] = FLOAT_TYPE(dl * float(int8_t((data_a[ib].qs[qsi + 1] >> qsshift) & 3) - (((data_a[ib].hmask[hmi + 1] & m) != 0) ? 0 : 4)));
+#elif defined(DATA_A_Q4_K)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 32; // 0,1,2,3
+ const uint b = (iqs % 32) / 16; // 0,1
+ const uint is = 2 * n + b; // 0..7
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
+
+ const vec2 loadd = vec2(data_a[ib].d);
+
+ const uint scidx0 = (is < 4) ? is : (is + 4);
+ const uint scidx1 = (is < 4) ? is : (is - 4);
+ const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint scidxshift1 = (is < 4) ? 0 : 2;
+ const uint mbidx0 = is + 4;
+ const uint mbidx1 = (is < 4) ? is + 4 : is;
+ const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ const uint mbidxshift0 = (is < 4) ? 0 : 4;
+ const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint mbidxshift1 = (is < 4) ? 0 : 2;
+
+ const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ const uint8_t mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const float d = loadd.x * sc;
+ const float m = -loadd.y * mbyte;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF), m));
+ buf_a[buf_idx + 1] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF), m));
+#elif defined(DATA_A_Q5_K)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 32; // 0,1,2,3
+ const uint b = (iqs % 32) / 16; // 0,1
+ const uint is = 2 * n + b; // 0..7
+ const uint qsi = n * 32 + (iqs % 16) * 2; // 0,2,4..126
+ const uint qhi = (iqs % 16) * 2; // 0,2,4..30
+
+ const uint8_t hm = uint8_t(1 << (iqs / 16));
+
+ const vec2 loadd = vec2(data_a[ib].d);
+
+ const uint scidx0 = (is < 4) ? is : (is + 4);
+ const uint scidx1 = (is < 4) ? is : (is - 4);
+ const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint scidxshift1 = (is < 4) ? 0 : 2;
+ const uint mbidx0 = is + 4;
+ const uint mbidx1 = (is < 4) ? is + 4 : is;
+ const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0;
+ const uint mbidxshift0 = (is < 4) ? 0 : 4;
+ const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
+ const uint mbidxshift1 = (is < 4) ? 0 : 2;
+
+ const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1));
+ const uint8_t mbyte = uint8_t(((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1));
+
+ const float d = loadd.x * sc;
+ const float m = -loadd.y * mbyte;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi ] & hm) != 0 ? 16 : 0), m));
+ buf_a[buf_idx + 1] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi + 1] & hm) != 0 ? 16 : 0), m));
+#elif defined(DATA_A_Q6_K)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A;
+
+ const uint ib = idx / 128; // 2 values per idx
+ const uint iqs = idx % 128; // 0..127
+
+ const uint n = iqs / 64; // 0,1
+ const uint b = (iqs % 64) / 32; // 0,1
+ const uint is_b = (iqs % 16) / 8; // 0,1
+ const uint qhshift = ((iqs % 64) / 16) * 2; // 0,2,4,6
+ const uint is = 8 * n + qhshift + is_b; // 0..15
+ const uint qsi = n * 64 + (iqs % 32) * 2; // 0,2,4..126
+ const uint qhi = n * 32 + (iqs % 16) * 2; // 0,2,4..62
+
+ const float dscale = float(data_a[ib].d) * float(data_a[ib].scales[is]);
+
+ buf_a[buf_idx ] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi ] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi ] >> qhshift) & 3) << 4)) - 32));
+ buf_a[buf_idx + 1] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32));
+#elif defined(DATA_A_IQ4_NL)
+ const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
+ const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a;
+
+ const uint ib = idx / 16;
+ const uint iqs = idx & 0xF;
+
+ const float d = float(data_a[ib].d);
+ const uint vui = uint(data_a[ib].qs[iqs]);
+ const vec2 v = vec2(kvalues_iq4nl[vui & 0xF], kvalues_iq4nl[vui >> 4]) * d;
+
+ buf_a[buf_idx ] = FLOAT_TYPE(v.x);
+ buf_a[buf_idx + 16] = FLOAT_TYPE(v.y);
+#endif
+ }
+ [[unroll]] for (uint l = 0; l < BN; l += loadstride_b) {
+#if LOAD_VEC_B == 8
+#ifdef MUL_MAT_ID
+ const u16vec2 row_idx = row_ids[ic * BN + loadc_b + l];
+ const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + loadr_b;
+#else
+ const uint idx = pos_b + (loadc_b + l) * p.stride_b / LOAD_VEC_B + loadr_b;
+#endif
+ const uint buf_idx = (loadc_b + l) * SHMEM_STRIDE + loadr_b * LOAD_VEC_B;
+ buf_b[buf_idx + 0] = FLOAT_TYPE(data_b[idx][0].x);
+ buf_b[buf_idx + 1] = FLOAT_TYPE(data_b[idx][0].y);
+ buf_b[buf_idx + 2] = FLOAT_TYPE(data_b[idx][0].z);
+ buf_b[buf_idx + 3] = FLOAT_TYPE(data_b[idx][0].w);
+ buf_b[buf_idx + 4] = FLOAT_TYPE(data_b[idx][1].x);
+ buf_b[buf_idx + 5] = FLOAT_TYPE(data_b[idx][1].y);
+ buf_b[buf_idx + 6] = FLOAT_TYPE(data_b[idx][1].z);
+ buf_b[buf_idx + 7] = FLOAT_TYPE(data_b[idx][1].w);
+#elif LOAD_VEC_B == 4
+#ifdef MUL_MAT_ID
+ const u16vec2 row_idx = row_ids[ic * BN + loadc_b + l];
+ const uint idx = pos_b + row_idx.y * p.batch_stride_b / LOAD_VEC_B + (row_idx.x % p.ne11) * p.stride_b / LOAD_VEC_B + loadr_b;
+#else
+ const uint idx = pos_b + (loadc_b + l) * p.stride_b / LOAD_VEC_B + loadr_b;
+#endif
+ const uint buf_idx = (loadc_b + l) * SHMEM_STRIDE + loadr_b * LOAD_VEC_B;
+ buf_b[buf_idx + 0] = FLOAT_TYPE(data_b[idx].x);
+ buf_b[buf_idx + 1] = FLOAT_TYPE(data_b[idx].y);
+ buf_b[buf_idx + 2] = FLOAT_TYPE(data_b[idx].z);
+ buf_b[buf_idx + 3] = FLOAT_TYPE(data_b[idx].w);
+#elif !MUL_MAT_ID
+ if (ic * BN + loadc_b + l < p.N && block + loadr_b < end_k) {
+ buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = FLOAT_TYPE(data_b[pos_b + (loadc_b + l) * p.stride_b + loadr_b]);
+ } else {
+ buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = FLOAT_TYPE(0.0f);
+ }
+#else
+ const uint row_i = ic * BN + loadc_b + l;
+ if (row_i < _ne1) {
+ const u16vec2 row_idx = row_ids[row_i];
+ buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = FLOAT_TYPE(data_b[pos_b + row_idx.y * p.batch_stride_b + (row_idx.x % p.ne11) * p.stride_b + loadr_b]);
+ } else {
+ buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = FLOAT_TYPE(0.0f);
+ }
+#endif
+ }
+
+ barrier();
+
+ pos_a += BK / LOAD_VEC_A;
+ pos_b += BK / LOAD_VEC_B;
+
+#ifdef COOPMAT
+ [[unroll]] for (uint i = 0; i < BK; i += TK) {
+ [[unroll]] for (uint cm_row = 0; cm_row < cms_per_row; cm_row++) {
+ // Load from shared into cache
+ coopMatLoad(cache_a, buf_a, (warp_r * WM + cm_row * TM) * SHMEM_STRIDE + i, SHMEM_STRIDE, gl_CooperativeMatrixLayoutRowMajor);
+
+ [[unroll]] for (uint cm_col = 0; cm_col < cms_per_col; cm_col++) {
+ coopMatLoad(cache_b, buf_b, (warp_c * WN + cm_col * TN) * SHMEM_STRIDE + i, SHMEM_STRIDE, gl_CooperativeMatrixLayoutColumnMajor);
+
+ sums[cm_col * cms_per_row + cm_row] = coopMatMulAdd(cache_a, cache_b, sums[cm_col * cms_per_row + cm_row]);
+ }
+ }
+ }
+#else
+ [[unroll]] for (uint i = 0; i < BK; i++) {
+ // Load from shared into cache
+ [[unroll]] for (uint wsir = 0; wsir < WMITER; wsir++) {
+ [[unroll]] for (uint j = 0; j < TM; j++) {
+ cache_a[wsir * TM + j] = buf_a[(warp_r * WM + wsir * WSUBM + tiwr * TM + j) * SHMEM_STRIDE + i];
+ }
+ }
+ [[unroll]] for (uint wsic = 0; wsic < WNITER; wsic++) {
+ [[unroll]] for (uint j = 0; j < TN; j++) {
+ cache_b[wsic * TN + j] = buf_b[(warp_c * WN + wsic * WSUBN + tiwc * TN + j) * SHMEM_STRIDE + i];
+ }
+ }
+
+ [[unroll]] for (uint wsic = 0; wsic < WNITER; wsic++) {
+ [[unroll]] for (uint wsir = 0; wsir < WMITER; wsir++) {
+ [[unroll]] for (uint cc = 0; cc < TN; cc++) {
+ [[unroll]] for (uint cr = 0; cr < TM; cr++) {
+ const uint sums_idx = (wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr;
+ sums[sums_idx] = fma(ACC_TYPE(cache_a[wsir * TM + cr]), ACC_TYPE(cache_b[wsic * TN + cc]), sums[sums_idx]);
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ barrier();
+ }
+
+ const uint dr = ir * BM + warp_r * WM;
+ const uint dc = ic * BN + warp_c * WN;
+
+#ifndef MUL_MAT_ID
+ const uint offsets = batch_idx * p.batch_stride_d + ik * p.batch_stride_d * gl_NumWorkGroups.z;
+#endif
+
+#ifdef COOPMAT
+#ifdef MUL_MAT_ID
+ [[unroll]] for (uint cm_row = 0; cm_row < cms_per_row; cm_row++) {
+ [[unroll]] for (uint cm_col = 0; cm_col < cms_per_col; cm_col++) {
+ coopMatStore(sums[cm_col * cms_per_row + cm_row], coopmat_stage, warp_i * TM * TN, TM, gl_CooperativeMatrixLayoutColumnMajor);
+
+ [[unroll]] for (uint col = 0; col < BN; col += storestride) {
+ const uint row_i = dc + cm_col * TN + col + store_c;
+ if (row_i >= _ne1) break;
+
+ const u16vec2 row_idx = row_ids[row_i];
+
+ data_d[row_idx.y * p.batch_stride_d + row_idx.x * p.stride_d + dr + cm_row * TM + store_r] = D_TYPE(coopmat_stage[warp_i * TM * TN + (col + store_c) * TM + store_r]);
+ }
+ }
+ }
+#else
+ const bool is_aligned = p.stride_d % 4 == 0; // Assumption: D_TYPE == float
+
+ [[unroll]] for (uint cm_row = 0; cm_row < cms_per_row; cm_row++) {
+ [[unroll]] for (uint cm_col = 0; cm_col < cms_per_col; cm_col++) {
+ const bool is_in_bounds = dr + (cm_row + 1) * TM <= p.M && dc + (cm_col + 1) * TN <= p.N;
+
+ if (is_aligned && is_in_bounds) {
+ // Full coopMat is within bounds and stride_d is aligned with 16B
+ coopmat<D_TYPE, gl_ScopeSubgroup, TM, TN, gl_MatrixUseAccumulator> cm_dtype = coopmat<D_TYPE, gl_ScopeSubgroup, TM, TN, gl_MatrixUseAccumulator>(sums[cm_col * cms_per_row + cm_row]);
+ coopMatStore(cm_dtype, data_d, offsets + (dc + cm_col * TN) * p.stride_d + dr + cm_row * TM, p.stride_d, gl_CooperativeMatrixLayoutColumnMajor);
+ } else if (is_in_bounds) {
+ // Full coopMat is within bounds, but stride_d is not aligned
+ coopMatStore(sums[cm_col * cms_per_row + cm_row], coopmat_stage, warp_i * TM * TN, TM, gl_CooperativeMatrixLayoutColumnMajor);
+
+ [[unroll]] for (uint col = 0; col < TN; col += storestride) {
+ data_d[offsets + (dc + cm_col * TN + col + store_c) * p.stride_d + dr + cm_row * TM + store_r] = D_TYPE(coopmat_stage[warp_i * TM * TN + (col + store_c) * TM + store_r]);
+ }
+ } else if (dr + cm_row * TM < p.M && dc + cm_col * TN < p.N) {
+ // Partial coopMat is within bounds
+ coopMatStore(sums[cm_col * cms_per_row + cm_row], coopmat_stage, warp_i * TM * TN, TM, gl_CooperativeMatrixLayoutColumnMajor);
+
+ [[unroll]] for (uint col = 0; col < TN; col += storestride) {
+ if (dr + cm_row * TM + store_r < p.M && dc + cm_col * TN + col + store_c < p.N) {
+ data_d[offsets + (dc + cm_col * TN + col + store_c) * p.stride_d + dr + cm_row * TM + store_r] = D_TYPE(coopmat_stage[warp_i * TM * TN + (col + store_c) * TM + store_r]);
+ }
+ }
+ }
+ }
+ }
+#endif // MUL_MAT_ID
+#else
+ [[unroll]] for (uint wsic = 0; wsic < WNITER; wsic++) {
+ [[unroll]] for (uint wsir = 0; wsir < WMITER; wsir++) {
+
+ const uint dr_warp = dr + wsir * WSUBM + tiwr * TM;
+ const uint dc_warp = dc + wsic * WSUBN + tiwc * TN;
+ [[unroll]] for (uint cc = 0; cc < TN; cc++) {
+#ifdef MUL_MAT_ID
+ const uint row_i = dc_warp + cc;
+ if (row_i >= _ne1) break;
+
+ const u16vec2 row_idx = row_ids[row_i];
+#endif // MUL_MAT_ID
+ [[unroll]] for (uint cr = 0; cr < TM; cr++) {
+#ifdef MUL_MAT_ID
+ data_d[row_idx.y * p.batch_stride_d + row_idx.x * p.stride_d + dr_warp + cr] = D_TYPE(sums[(wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr]);
+#else
+ if (dr_warp + cr < p.M && dc_warp + cc < p.N) {
+ data_d[offsets + (dc_warp + cc) * p.stride_d + dr_warp + cr] = D_TYPE(sums[(wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr]);
+ }
+#endif // MUL_MAT_ID
+ }
+ }
+ }
+ }
+#endif // COOPMAT
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp
new file mode 100644
index 00000000..cbfa5dce
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp
@@ -0,0 +1,328 @@
+#version 450
+
+#extension GL_EXT_control_flow_attributes : enable
+#extension GL_EXT_shader_16bit_storage : require
+
+#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
+#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
+
+#extension GL_KHR_memory_scope_semantics : enable
+#extension GL_KHR_cooperative_matrix : enable
+#extension GL_NV_cooperative_matrix2 : enable
+#extension GL_EXT_buffer_reference : enable
+#extension GL_KHR_shader_subgroup_ballot : enable
+#extension GL_KHR_shader_subgroup_vote : enable
+
+#include "types.comp"
+
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+layout (constant_id = 1) const uint BM = 64;
+layout (constant_id = 2) const uint BN = 64;
+layout (constant_id = 3) const uint BK = 16; // Assumed to be 32 if working with a quant
+
+layout (push_constant) uniform parameter
+{
+ uint M;
+ uint N;
+ uint K;
+ uint stride_a;
+ uint stride_b;
+ uint stride_d;
+
+ uint batch_stride_a;
+ uint batch_stride_b;
+ uint batch_stride_d;
+
+#ifdef MUL_MAT_ID
+ uint nei0;
+ uint nei1;
+ uint nbi1;
+ uint ne11;
+#else
+ uint k_split;
+ uint ne02;
+ uint ne12;
+ uint broadcast2;
+ uint broadcast3;
+#endif
+} p;
+
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
+layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};
+
+#if QUANT_K > 1
+#define DECODEFUNCA , dequantFuncA
+#define MAT_A_TYPE float16_t
+
+#include "dequant_funcs_cm2.comp"
+
+#else
+#define DECODEFUNCA
+#define MAT_A_TYPE A_TYPE
+#endif
+
+#define MAT_B_TYPE B_TYPE
+
+#ifdef MUL_MAT_ID
+layout (binding = 3) readonly buffer IDS {int data_ids[];};
+
+shared u16vec4 row_ids[3072];
+
+layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufB {
+ B_TYPE b[];
+};
+
+uint _ne1;
+shared uint _ne1_sh;
+
+B_TYPE decodeFuncB(const in decodeBufB bl, const in uint blockCoords[2], const in uint coordInBlock[2])
+{
+ const uint row_i = blockCoords[0];
+
+ if (row_i >= _ne1) {
+ return B_TYPE(0.0);
+ }
+
+ const u16vec4 row_idx = row_ids[row_i];
+ B_TYPE ret = data_b[row_idx.y * p.batch_stride_b + row_idx.x * p.stride_b + blockCoords[1]];
+
+ return ret;
+}
+
+D_TYPE perElemOpD(const in uint32_t r, const in uint32_t c, const in D_TYPE elem, const in uint32_t ir, const in uint32_t ic)
+{
+ uint dr = ir * BM + r;
+ uint dc = ic * BN + c;
+
+ if (dr < p.M && dc < _ne1) {
+ uint row_i = dc;
+ const u16vec4 row_idx = row_ids[row_i];
+ data_d[row_idx.y * p.batch_stride_d + row_idx.z * p.stride_d + dr] = elem;
+ }
+ return elem;
+}
+
+#endif
+
+void main() {
+#if defined(DATA_A_IQ4_NL)
+ init_iq4nl_shmem();
+#endif
+
+#ifdef MUL_MAT_ID
+ const uint expert_idx = gl_GlobalInvocationID.z;
+#else
+ const uint batch_idx = gl_GlobalInvocationID.z;
+
+ const uint i13 = batch_idx / p.ne12;
+ const uint i12 = batch_idx % p.ne12;
+
+ const uint i03 = i13 / p.broadcast3;
+ const uint i02 = i12 / p.broadcast2;
+
+ const uint batch_idx_a = i03 * p.ne02 + i02;
+#endif
+
+ const uint blocks_m = (p.M + BM - 1) / BM;
+ const uint ir = gl_WorkGroupID.x % blocks_m;
+ const uint ik = gl_WorkGroupID.x / blocks_m;
+ const uint ic = gl_WorkGroupID.y;
+
+#ifdef MUL_MAT_ID
+ // Spread the search across all elements in the first subgroup
+ if (gl_SubgroupID == 0) {
+ _ne1 = 0;
+ uint num_elements = p.nei1 * p.nei0;
+
+ for (uint i = gl_SubgroupInvocationID; subgroupAny(i < num_elements); i += gl_SubgroupSize) {
+ bool in_range = i < num_elements;
+ uint ii0 = i % p.nei0;
+ uint ii1 = i / p.nei0;
+ uint id = in_range ? data_ids[ii1*p.nbi1 + ii0] : 0;
+ uvec4 ballot = subgroupBallot(in_range && id == expert_idx);
+ uint idx = subgroupBallotExclusiveBitCount(ballot);
+ if (in_range && id == expert_idx) {
+ row_ids[_ne1 + idx] = u16vec4(ii0 % p.ne11, ii1, ii0, 0);
+ }
+ _ne1 += subgroupBallotBitCount(ballot);
+ }
+ _ne1_sh = _ne1;
+ }
+
+ barrier();
+
+ _ne1 = _ne1_sh;
+
+ // Workgroup has no work
+ if (ic * BN >= _ne1) return;
+#endif
+
+#ifdef MUL_MAT_ID
+ uint start_k = 0;
+ const uint end_k = p.K;
+#else
+ uint start_k = ik * p.k_split;
+ const uint end_k = min(p.K, (ik + 1) * p.k_split);
+#endif
+
+ coopmat<ACC_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator> sum;
+ sum = coopmat<ACC_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator>(0.0);
+
+#ifdef MUL_MAT_ID
+ uint pos_a = (expert_idx * p.batch_stride_a) / QUANT_K;
+ uint pos_b = 0;
+#else
+ uint pos_a = (batch_idx_a * p.batch_stride_a) / QUANT_K;
+ uint pos_b = batch_idx * p.batch_stride_b;
+#endif
+
+ uint stride_a = p.stride_a / QUANT_K;
+ uint stride_b = p.stride_b;
+
+ // Hint to the compiler that values are aligned (want 16B alignment).
+ // Quants are always block-aligned, no alignment needed.
+#if ALIGNED
+#if QUANT_K == 1
+ stride_a &= ~7;
+#endif
+ stride_b &= ~7;
+#endif
+
+ // Create layouts for both clamped and unclamped accesses
+ tensorLayoutNV<2> tensorLayoutA = createTensorLayoutNV(2);
+ tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutAClamp = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV);
+ tensorLayoutNV<2> tensorLayoutB = createTensorLayoutNV(2);
+ tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutBClamp = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV);
+ tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutD = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV);
+
+#if QUANT_K > 1
+ tensorLayoutA = setTensorLayoutBlockSizeNV(tensorLayoutA, 1, QUANT_K);
+ tensorLayoutAClamp = setTensorLayoutBlockSizeNV(tensorLayoutAClamp, 1, QUANT_K);
+#endif
+
+ // Use end_k rather than p.K as the dimension because that's what
+ // we need to bound check against when using split_k
+ tensorLayoutA = setTensorLayoutDimensionNV(tensorLayoutA, p.M, end_k);
+ tensorLayoutB = setTensorLayoutDimensionNV(tensorLayoutB, p.N, end_k);
+ tensorLayoutD = setTensorLayoutDimensionNV(tensorLayoutD, p.N, p.M);
+ tensorLayoutAClamp = setTensorLayoutDimensionNV(tensorLayoutAClamp, p.M, end_k);
+ tensorLayoutBClamp = setTensorLayoutDimensionNV(tensorLayoutBClamp, p.N, end_k);
+
+ tensorViewNV<2, false, 1, 0> tensorViewTranspose = createTensorViewNV(2, false, 1, 0);
+
+#if !defined(MUL_MAT_ID)
+ // Detect a fast path where all loads are entirely in bounds and no clamping is required
+ if ((ir + 1) * BM <= p.M && (ic + 1) * BN <= p.N && (start_k % BK) == 0 && (end_k % BK) == 0 &&
+#if QUANT_K == 1
+ (stride_a % 8) == 0 &&
+#endif
+ (stride_b % 8) == 0 && (start_k % 8) == 0) {
+ // Hint to the compiler that values are aligned (want 16B alignment)
+ start_k &= ~7;
+ stride_b &= ~7;
+#if QUANT_K == 1
+ stride_a &= ~7;
+#endif
+
+ tensorLayoutA = setTensorLayoutStrideNV(tensorLayoutA, stride_a, 1);
+ tensorLayoutB = setTensorLayoutStrideNV(tensorLayoutB, stride_b, 1);
+
+ uint k_iters = (end_k - start_k + BK - 1) / BK;
+
+ for (uint block_k = start_k, i = 0; i < k_iters; block_k += BK, ++i) {
+
+ coopmat<MAT_A_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a;
+ coopmat<MAT_B_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB> mat_b;
+
+ coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA);
+ coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a_ft = coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA>(mat_a);
+
+ coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose);
+ coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB> mat_b_ft = coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB>(mat_b);
+
+ sum = coopMatMulAdd(mat_a_ft, mat_b_ft, sum);
+ }
+ } else
+#endif // !defined(MUL_MAT_ID)
+ {
+ tensorLayoutA = setTensorLayoutStrideNV(tensorLayoutA, stride_a, 1);
+
+ tensorLayoutAClamp = setTensorLayoutStrideNV(tensorLayoutAClamp, stride_a, 1);
+
+ tensorLayoutB = setTensorLayoutStrideNV(tensorLayoutB, stride_b, 1);
+
+ tensorLayoutBClamp = setTensorLayoutStrideNV(tensorLayoutBClamp, stride_b, 1);
+
+ [[dont_unroll]]
+ for (uint block_k = start_k; block_k < end_k; block_k += BK) {
+
+ coopmat<MAT_A_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a;
+ coopmat<MAT_B_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB> mat_b;
+ coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a_ft;
+ coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB> mat_b_ft;
+
+ // Clamping is expensive, so detect different code paths for each combination
+ // of A and B needing clamping.
+ bool unclampedA = (ir + 1) * BM <= p.M && block_k + BK <= end_k && (block_k % 8) == 0;
+#ifdef MUL_MAT_ID
+ bool unclampedB = true;
+#else
+ bool unclampedB = (ic + 1) * BN <= p.N && block_k + BK <= end_k && (block_k % 8) == 0;
+#endif
+ if (unclampedA && unclampedB) {
+ coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, (block_k & ~7), BK) DECODEFUNCA);
+#ifdef MUL_MAT_ID
+ coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose, decodeFuncB);
+#else
+ coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, (block_k & ~7), BK), tensorViewTranspose);
+#endif
+ mat_a_ft = coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA>(mat_a);
+ mat_b_ft = coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB>(mat_b);
+ sum = coopMatMulAdd(mat_a_ft, mat_b_ft, sum);
+ } else if (unclampedA && !unclampedB) {
+ coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, (block_k & ~7), BK) DECODEFUNCA);
+ coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutBClamp, ic * BN, BN, block_k, BK), tensorViewTranspose);
+
+ mat_a_ft = coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA>(mat_a);
+ mat_b_ft = coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB>(mat_b);
+ sum = coopMatMulAdd(mat_a_ft, mat_b_ft, sum);
+ } else if (!unclampedA && unclampedB) {
+ coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutAClamp, ir * BM, BM, block_k, BK) DECODEFUNCA);
+#ifdef MUL_MAT_ID
+ coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose, decodeFuncB);
+#else
+ coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, (block_k & ~7), BK), tensorViewTranspose);
+#endif
+ mat_a_ft = coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA>(mat_a);
+ mat_b_ft = coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB>(mat_b);
+ sum = coopMatMulAdd(mat_a_ft, mat_b_ft, sum);
+ } else if (!unclampedA && !unclampedB) {
+ coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutAClamp, ir * BM, BM, block_k, BK) DECODEFUNCA);
+ coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutBClamp, ic * BN, BN, block_k, BK), tensorViewTranspose);
+
+ mat_a_ft = coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA>(mat_a);
+ mat_b_ft = coopmat<FLOAT_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB>(mat_b);
+ sum = coopMatMulAdd(mat_a_ft, mat_b_ft, sum);
+ }
+ }
+ }
+
+ // Convert from ACC_TYPE to D_TYPE
+ coopmat<D_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator> mat_d;
+ mat_d = coopmat<D_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator>(sum);
+
+#ifdef MUL_MAT_ID
+ // Call callback to store each element, remapping row through shared memory
+ coopMatPerElementNV(mat_d, mat_d, perElemOpD, ir, ic);
+#else
+ tensorLayoutD = setTensorLayoutStrideNV(tensorLayoutD, p.stride_d, 1);
+
+ uint pos_d = batch_idx * p.batch_stride_d + ik * p.batch_stride_d * gl_NumWorkGroups.z;
+ coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BN, ir * BM, BM), tensorViewTranspose);
+#endif
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp
new file mode 100644
index 00000000..6627a50b
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp
@@ -0,0 +1,44 @@
+#version 450
+
+#include "generic_head.comp"
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+#define BLOCK_SIZE 512
+
+layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+shared vec2 sum[BLOCK_SIZE];
+
+void main() {
+ const uint row = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x;
+ const uint tid = gl_LocalInvocationID.x;
+
+ sum[tid] = vec2(0.0f, 0.0f);
+
+ [[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) {
+ const float xi = float(data_a[row*p.KX + col]);
+ sum[tid].x += xi;
+ sum[tid].y += xi * xi;
+ }
+
+ // sum up partial sums and write back result
+ barrier();
+ [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
+ if (tid < s) {
+ sum[tid] += sum[tid + s];
+ }
+ barrier();
+ }
+
+ const float mean = sum[0].x / p.KX;
+ const float var = sum[0].y / p.KX - mean * mean;
+ const float inv_std = inversesqrt(var + p.param1);
+
+ [[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) {
+ data_d[row*p.KX + col] = D_TYPE((float(data_a[row*p.KX + col]) - mean) * inv_std);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp
new file mode 100644
index 00000000..450b67fc
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp
@@ -0,0 +1,28 @@
+#version 450
+
+#include "types.comp"
+#include "generic_unary_head.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ const uint idx = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+
+ if (idx >= p.ne) {
+ return;
+ }
+
+ const uint i3 = idx / (p.ne12*p.ne11*p.ne10);
+ const uint i3_offset = i3 * p.ne12*p.ne11*p.ne10;
+ const uint i2 = (idx - i3_offset) / (p.ne11*p.ne10);
+ const uint i2_offset = i2*p.ne11*p.ne10;
+ const uint i1 = (idx - i3_offset - i2_offset) / p.ne10;
+ const uint i0 = idx - i3_offset - i2_offset - i1*p.ne10;
+
+ const uint src0_idx = i3*p.nb03 + i2*p.nb02 + i1*p.nb01 + i0*p.nb00;
+ const uint dst_idx = i3*p.nb13 + i2*p.nb12 + i1*p.nb11 + i0*p.nb10;
+
+ const bool is_src0 = i0 < p.ne00 && i1 < p.ne01 && i2 < p.ne02 && i3 < p.ne03;
+
+ data_d[get_doffset() + dst_idx] = D_TYPE(is_src0 ? data_a[get_aoffset() + src0_idx] : 0.0f);
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp
new file mode 100644
index 00000000..b6124411
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp
@@ -0,0 +1,74 @@
+#version 450
+
+#include "types.comp"
+
+#extension GL_EXT_shader_16bit_storage : require
+
+layout(push_constant) uniform parameter {
+ uint IW; uint IH;
+ uint OW; uint OH;
+ uint OC;
+ uint pelements;
+ uint op;
+ int k0; int k1;
+ int s0; int s1;
+ int p0; int p1;
+} p;
+
+#define BLOCK_SIZE 512
+#define FLT_MAX 3.402823466e+38F
+#define OP_POOL_MAX 0u
+#define OP_POOL_AVG 1u
+
+layout (local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
+
+layout(binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout(binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const uint idx = gl_GlobalInvocationID.x;
+ if (idx >= p.pelements) {
+ return;
+ }
+
+ const uint O_HW = p.OW * p.OH;
+
+ const uint nc = idx / O_HW;
+ const uint cur_oh = (idx % O_HW) / p.OW;
+ const uint cur_ow = (idx % O_HW) % p.OW;
+
+ const int start_h = int(cur_oh) * p.s0 - p.p0;
+ const uint bh = max(start_h, 0);
+ const uint eh = min(start_h + p.k0, p.IH);
+
+ const int start_w = int(cur_ow) * p.s1 - p.p1;
+ const uint bw = max(start_w, 0);
+ const uint ew = min(start_w + p.k1, p.IW);
+
+ const float scale = 1.0 / float(p.k0 * p.k1);
+ float res;
+
+ if (p.op == OP_POOL_AVG) {
+ res = 0.0;
+ } else if (p.op == OP_POOL_MAX) {
+ res = -FLT_MAX;
+ } else {
+ return;
+ }
+
+ #pragma unroll
+ for (uint i = bh; i < eh; i++) {
+ #pragma unroll
+ for (uint j = bw; j < ew; j++) {
+ const float cur = D_TYPE(data_a[nc * p.IH * p.IW + i * p.IW + j]);
+
+ if (p.op == OP_POOL_AVG) {
+ res += cur * scale;
+ } else if (p.op == OP_POOL_MAX) {
+ res = max(res, cur);
+ }
+ }
+ }
+
+ data_d[nc * O_HW + cur_oh * p.OW + cur_ow] = res;
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp
new file mode 100644
index 00000000..52a19b62
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp
@@ -0,0 +1,21 @@
+#version 450
+
+#include "generic_head.comp"
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+
+ if (i >= p.KX) {
+ return;
+ }
+
+ data_d[i] = max(float(data_a[i]), 0);
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp
new file mode 100644
index 00000000..1568b141
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp
@@ -0,0 +1,26 @@
+#version 450
+
+#include "types.comp"
+#include "generic_unary_head.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+uint src0_idx_mod(uint idx) {
+ const uint i13 = idx / (p.ne12*p.ne11*p.ne10);
+ const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10;
+ const uint i12 = (idx - i13_offset) / (p.ne11*p.ne10);
+ const uint i12_offset = i12*p.ne11*p.ne10;
+ const uint i11 = (idx - i13_offset - i12_offset) / p.ne10;
+ const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
+ return (i13 % p.ne03)*p.nb03 + (i12 % p.ne02)*p.nb02 + (i11 % p.ne01)*p.nb01 + (i10 % p.ne00)*p.nb00;
+}
+
+void main() {
+ const uint idx = get_idx();
+
+ if (idx >= p.ne) {
+ return;
+ }
+
+ data_d[get_doffset() + dst_idx(idx)] = D_TYPE(data_a[get_aoffset() + src0_idx_mod(idx)]);
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp
new file mode 100644
index 00000000..b554400b
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp
@@ -0,0 +1,42 @@
+#version 450
+
+#include "generic_head.comp"
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+#define BLOCK_SIZE 512
+
+layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+shared FLOAT_TYPE sum[BLOCK_SIZE];
+
+void main() {
+ const uint row = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x;
+ const uint tid = gl_LocalInvocationID.x;
+
+ sum[tid] = FLOAT_TYPE(0.0f); // partial sum for thread in warp
+
+ [[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) {
+ const FLOAT_TYPE xi = FLOAT_TYPE(data_a[row*p.KX + col]);
+ sum[tid] += xi * xi;
+ }
+
+ // sum up partial sums and write back result
+ barrier();
+ [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
+ if (tid < s) {
+ sum[tid] += sum[tid + s];
+ }
+ barrier();
+ }
+
+ const FLOAT_TYPE mean = sum[0] / FLOAT_TYPE(p.KX);
+ const FLOAT_TYPE scale = inversesqrt(mean + FLOAT_TYPE(p.param1));
+
+ [[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) {
+ data_d[row*p.KX + col] = D_TYPE(scale * FLOAT_TYPE(data_a[row*p.KX + col]));
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp
new file mode 100644
index 00000000..574b51ca
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp
@@ -0,0 +1,49 @@
+#include "types.comp"
+
+#extension GL_EXT_shader_16bit_storage : require
+#extension GL_EXT_spirv_intrinsics: enable
+
+#if RTE16
+spirv_execution_mode(capabilities = [4467], 4462, 16); // RoundingModeRTE, 16 bits
+#endif
+
+layout(local_size_x = 1, local_size_y = 256, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer Y {int data_pos[];};
+layout (binding = 2) readonly buffer Z {float data_ff[];};
+layout (binding = 3) writeonly buffer D {D_TYPE data_d[];};
+
+layout (push_constant) uniform parameter {
+ uint ncols;
+ uint n_dims;
+ float freq_scale;
+ uint p_delta_rows;
+ float freq_base;
+ float ext_factor;
+ float attn_factor;
+ float corr_dims[2];
+ float theta_scale;
+ uint has_ff;
+} p;
+
+float rope_yarn_ramp(const float low, const float high, const uint i0) {
+ const float y = (i0 / 2 - low) / max(0.001f, high - low);
+ return 1.0f - min(1.0f, max(0.0f, y));
+}
+
+void rope_yarn(const float theta_extrap, const uint i0, out float cos_theta, out float sin_theta) {
+ float mscale = p.attn_factor;
+ // Get n-d rotational scaling corrected for extrapolation
+ float theta_interp = p.freq_scale * theta_extrap;
+ float theta = theta_interp;
+ if (p.ext_factor != 0.0f) {
+ float ramp_mix = rope_yarn_ramp(p.corr_dims[0], p.corr_dims[1], i0) * p.ext_factor;
+ theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
+
+ // Get n-d magnitude scaling corrected for interpolation
+ mscale *= 1.0f + 0.1f * log(1.0f / p.freq_scale);
+ }
+ cos_theta = cos(theta) * mscale;
+ sin_theta = sin(theta) * mscale;
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp
new file mode 100644
index 00000000..83b46b69
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp
@@ -0,0 +1,37 @@
+#version 450
+
+#include "rope_head.comp"
+
+void main() {
+ const uint col = gl_GlobalInvocationID.y * 2;
+ const uint row = gl_GlobalInvocationID.x;
+
+ if (col >= p.ncols) {
+ return;
+ }
+
+ if (col >= p.n_dims) {
+ const uint i = row*p.ncols + col;
+
+ data_d[i + 0] = data_a[i + 0];
+ data_d[i + 1] = data_a[i + 1];
+
+ return;
+ }
+
+ const uint i = row*p.ncols + col/2;
+ const uint i2 = row/p.p_delta_rows;
+
+ const float theta_base = data_pos[i2] * pow(p.theta_scale, col/2.0f);
+
+ const float freq_factor = p.has_ff != 0 ? data_ff[col/2] : 1.0f;
+
+ float cos_theta, sin_theta;
+ rope_yarn(theta_base / freq_factor, col, cos_theta, sin_theta);
+
+ const float x0 = float(data_a[i + 0]);
+ const float x1 = float(data_a[i + p.n_dims/2]);
+
+ data_d[i + 0] = D_TYPE(x0*cos_theta - x1*sin_theta);
+ data_d[i + p.n_dims/2] = D_TYPE(x0*sin_theta + x1*cos_theta);
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp
new file mode 100644
index 00000000..e416ad93
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp
@@ -0,0 +1,37 @@
+#version 450
+
+#include "rope_head.comp"
+
+void main() {
+ const uint col = gl_GlobalInvocationID.y * 2;
+ const uint row = gl_GlobalInvocationID.x;
+
+ if (col >= p.ncols) {
+ return;
+ }
+
+ if (col >= p.n_dims) {
+ const uint i = row*p.ncols + col;
+
+ data_d[i + 0] = data_a[i + 0];
+ data_d[i + 1] = data_a[i + 1];
+
+ return;
+ }
+
+ const uint i = row*p.ncols + col;
+ const uint i2 = row/p.p_delta_rows;
+
+ const float theta_base = data_pos[i2] * pow(p.theta_scale, col/2.0f);
+
+ const float freq_factor = p.has_ff != 0 ? data_ff[col/2] : 1.0f;
+
+ float cos_theta, sin_theta;
+ rope_yarn(theta_base / freq_factor, col, cos_theta, sin_theta);
+
+ const float x0 = float(data_a[i + 0]);
+ const float x1 = float(data_a[i + 1]);
+
+ data_d[i + 0] = D_TYPE(x0*cos_theta - x1*sin_theta);
+ data_d[i + 1] = D_TYPE(x0*sin_theta + x1*cos_theta);
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp
new file mode 100644
index 00000000..4663428d
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp
@@ -0,0 +1,24 @@
+#version 450
+
+#include "types.comp"
+#include "generic_unary_head.comp"
+
+const uint num_threads = 128;
+
+layout(local_size_x = num_threads, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ uint idx = get_idx();
+
+ // num_threads * num_iter must equal 512, to match the wg_denoms and get_idx calculation
+ const uint num_iter = 4;
+
+ [[unroll]] for (uint i = 0; i < num_iter; ++i) {
+ if (idx >= p.ne) {
+ continue;
+ }
+
+ data_d[get_doffset() + idx] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + idx]) * FLOAT_TYPE(p.param1));
+ idx += num_threads;
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp
new file mode 100644
index 00000000..4d36f88e
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp
@@ -0,0 +1,22 @@
+#version 450
+
+#include "generic_head.comp"
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+
+ if (i >= p.KX) {
+ return;
+ }
+
+ const float xi = float(data_a[i]);
+ data_d[i] = D_TYPE(xi / (1.0f + exp(-xi)));
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp
new file mode 100644
index 00000000..d7c15a16
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp
@@ -0,0 +1,17 @@
+#version 450
+
+#include "types.comp"
+#include "generic_unary_head.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ const uint idx = get_idx();
+
+ if (idx >= p.ne) {
+ return;
+ }
+
+ const FLOAT_TYPE val = FLOAT_TYPE(data_a[get_aoffset() + src0_idx(idx)]);
+ data_d[get_doffset() + dst_idx(idx)] = D_TYPE(sin(val));
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp
new file mode 100644
index 00000000..a25808e1
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp
@@ -0,0 +1,174 @@
+#version 450
+
+#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
+#extension GL_EXT_control_flow_attributes : enable
+
+layout (push_constant) uniform parameter
+{
+ uint KX;
+ uint KY;
+ float scale;
+ float max_bias;
+ float m0;
+ float m1;
+ uint n_head_log2;
+ uint nrows_x;
+} p;
+
+#include "types.comp"
+
+layout(constant_id = 0) const uint BLOCK_SIZE = 32;
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) readonly buffer Y {B_TYPE data_b[];};
+layout (binding = 2) buffer D {D_TYPE data_d[];};
+
+shared FLOAT_TYPE vals[BLOCK_SIZE];
+
+// num_iters is the number of BLOCK_SIZE loop iterations we need to iterate
+// over all the columns. The main function tries to pass a constant here,
+// as if it were a template function, to allow unrolling.
+void soft_max(uint num_iters) {
+ const uint tid = gl_LocalInvocationID.x;
+ const uint rowx = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x;
+ const uint rowy = (p.KY > 0) ? (rowx % p.KY) : 0;
+
+ if (rowx >= p.nrows_x) {
+ return;
+ }
+
+ float slope = 1.0f;
+
+ // ALiBi
+ if (p.max_bias > 0.0f) {
+ const uint h = rowx/p.KY; // head index
+
+ const float base = h < p.n_head_log2 ? p.m0 : p.m1;
+ const uint exp = h < p.n_head_log2 ? h + 1 : 2*(h - p.n_head_log2) + 1;
+
+ slope = pow(base, exp);
+ }
+
+ // Find max
+ FLOAT_TYPE max_val = uintBitsToFloat(0xFF800000);
+
+ // Cache values while we compute the max, so we don't need to read them
+ // again when we're ready to compute exp(x-max).
+ const uint DATA_CACHE_SIZE = 16;
+ FLOAT_TYPE data_cache[DATA_CACHE_SIZE];
+
+ [[unroll]] for (uint col0 = 0, idx = 0; idx < num_iters; col0 += BLOCK_SIZE, ++idx) {
+ const uint col = col0 + tid;
+
+ FLOAT_TYPE a = FLOAT_TYPE(0);
+ if (col < p.KX) {
+ a = data_a[rowx * p.KX + col];
+ }
+
+ FLOAT_TYPE b = FLOAT_TYPE(0);
+ if (p.KY > 0 && col < p.KX) {
+ b = data_b[rowy * p.KX + col];
+ }
+
+ FLOAT_TYPE v = a * p.scale + slope * b;
+
+ if (col < p.KX) {
+ max_val = max(max_val, v);
+ }
+
+ if (idx < DATA_CACHE_SIZE) {
+ data_cache[idx] = v;
+ }
+ }
+
+ // reduce across the workgroup
+ vals[tid] = max_val;
+ barrier();
+ [[unroll]] for (uint s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
+ if (tid < s) {
+ vals[tid] = max(vals[tid], vals[tid + s]);
+ }
+ barrier();
+ }
+
+ max_val = vals[0];
+ barrier();
+
+ FLOAT_TYPE sum = FLOAT_TYPE(0.0f);
+
+ // Compute sum{exp(x - max)}
+ [[unroll]] for (uint col0 = 0, idx = 0; idx < num_iters; col0 += BLOCK_SIZE, ++idx) {
+ const uint col = col0 + tid;
+
+ if (col >= p.KX) {
+ break;
+ }
+
+ // compute exp(a*scale+b*slope), add it to sum, and cache the new value
+ // in data_cache if possible.
+ const uint i = rowx * p.KX + col;
+ FLOAT_TYPE val;
+ if (idx < DATA_CACHE_SIZE) {
+ val = exp(data_cache[idx] - max_val);
+ } else {
+ val = exp(FLOAT_TYPE(data_a[i]) * p.scale + (p.KY > 0 ? slope * FLOAT_TYPE(data_b[rowy * p.KX + col]) : FLOAT_TYPE(0.0f)) - max_val);
+ }
+ sum += val;
+ if (idx < DATA_CACHE_SIZE) {
+ data_cache[idx] = val;
+ } else {
+ data_d[i] = D_TYPE(val);
+ }
+ }
+
+ // reduce across the workgroup
+ vals[tid] = sum;
+ barrier();
+ [[unroll]] for (uint s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
+ if (tid < s) {
+ vals[tid] += vals[tid + s];
+ }
+ barrier();
+ }
+ sum = vals[0];
+
+ FLOAT_TYPE rcpdivisor = 1.0/sum;
+
+ [[unroll]] for (uint col0 = 0, idx = 0; idx < num_iters; col0 += BLOCK_SIZE, ++idx) {
+ const uint col = col0 + tid;
+
+ if (col >= p.KX) {
+ continue;
+ }
+
+ if (idx < DATA_CACHE_SIZE) {
+ data_d[rowx*p.KX + col] = D_TYPE(data_cache[idx] * rcpdivisor);
+ } else {
+ data_d[rowx*p.KX + col] *= D_TYPE(rcpdivisor);
+ }
+ }
+}
+
+void main() {
+ // instantiate the soft_max function for several different
+ // dimensions, to allow loop unrolling
+ uint num_blocks = (p.KX + BLOCK_SIZE - 1) / BLOCK_SIZE;
+ if (num_blocks > 32) {
+ soft_max(num_blocks);
+ } else if (num_blocks > 16) {
+ soft_max(32);
+ } else if (num_blocks > 8) {
+ soft_max(16);
+ } else if (num_blocks > 4) {
+ soft_max(8);
+ } else if (num_blocks == 4) {
+ soft_max(4);
+ } else if (num_blocks == 3) {
+ soft_max(3);
+ } else if (num_blocks == 2) {
+ soft_max(2);
+ } else if (num_blocks == 1) {
+ soft_max(1);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/square.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/square.comp
new file mode 100644
index 00000000..ef43598b
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/square.comp
@@ -0,0 +1,17 @@
+#version 450
+
+#include "types.comp"
+#include "generic_unary_head.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+void main() {
+ const uint idx = get_idx();
+
+ if (idx >= p.ne) {
+ return;
+ }
+
+ const FLOAT_TYPE val = FLOAT_TYPE(data_a[get_aoffset() + src0_idx(idx)]);
+ data_d[get_doffset() + dst_idx(idx)] = D_TYPE(val * val);
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp
new file mode 100644
index 00000000..961e5ffa
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp
@@ -0,0 +1,37 @@
+#version 450
+
+#include "generic_head.comp"
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+layout (constant_id = 0) const uint BLOCK_SIZE = 32;
+
+shared FLOAT_TYPE tmp[BLOCK_SIZE];
+
+void main() {
+ const uint row = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x;
+ const uint col = gl_LocalInvocationID.x;
+
+ tmp[col] = FLOAT_TYPE(0.0f);
+
+ for (uint i = col; i < p.KX; i += BLOCK_SIZE) {
+ tmp[col] += FLOAT_TYPE(data_a[row*p.KX + i]);
+ }
+
+ barrier();
+ [[unroll]] for (int s = int(BLOCK_SIZE) / 2; s > 0; s >>= 1) {
+ if (col < s) {
+ tmp[col] += tmp[col + s];
+ }
+ barrier();
+ }
+
+ if (col == 0) {
+ data_d[row] = D_TYPE(tmp[0]);
+ }
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp
new file mode 100644
index 00000000..495f966b
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp
@@ -0,0 +1,20 @@
+#version 450
+
+#include "generic_head.comp"
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+
+ if (i >= p.KX) {
+ return;
+ }
+ data_d[i] = D_TYPE(1. - 2. / (exp(2.*data_a[i]) + 1.));
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp
new file mode 100644
index 00000000..28eb24e1
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp
@@ -0,0 +1,7 @@
+#version 460
+
+#extension GL_NV_cooperative_matrix2 : require
+
+void main()
+{
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp
new file mode 100644
index 00000000..79e065a9
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp
@@ -0,0 +1,41 @@
+#version 450
+
+#extension GL_EXT_shader_16bit_storage : require
+
+layout (push_constant) uniform parameter
+{
+ uint nb1;
+ uint dim;
+ uint max_period;
+} p;
+
+#include "types.comp"
+
+#extension GL_EXT_control_flow_attributes : enable
+#define BLOCK_SIZE 256
+
+layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const uint i = gl_WorkGroupID.y;
+ const uint j = gl_GlobalInvocationID.x;
+ const uint d_offset = i * p.nb1;
+
+ if (p.dim % 2 != 0 && j == ((p.dim + 1) / 2)) {
+ data_d[d_offset + p.dim] = 0.f;
+ }
+
+ const uint half_dim = p.dim / 2;
+ if (j >= half_dim) {
+ return;
+ }
+
+ const float timestep = float(data_a[i]);
+ const float freq = float(exp(-log(p.max_period) * j / half_dim));
+ const float arg = timestep * freq;
+ data_d[d_offset + j] = D_TYPE(cos(arg));
+ data_d[d_offset + j + half_dim] = D_TYPE(sin(arg));
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/types.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/types.comp
new file mode 100644
index 00000000..eecc47f3
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/types.comp
@@ -0,0 +1,323 @@
+
+#if !defined(GGML_TYPES_COMP)
+#define GGML_TYPES_COMP
+
+#extension GL_EXT_shader_explicit_arithmetic_types : require
+
+#if defined(DATA_A_F32)
+#define QUANT_K 1
+#define QUANT_R 1
+
+#if !defined(LOAD_VEC_A) || LOAD_VEC_A == 1
+#define A_TYPE float
+#elif LOAD_VEC_A == 4
+#define A_TYPE vec4
+#elif LOAD_VEC_A == 8
+#define A_TYPE mat2x4
+#endif
+#endif
+
+#if defined(DATA_A_F16)
+#define QUANT_K 1
+#define QUANT_R 1
+
+#if !defined(LOAD_VEC_A) || LOAD_VEC_A == 1
+#define A_TYPE float16_t
+#elif LOAD_VEC_A == 4
+#define A_TYPE f16vec4
+#elif LOAD_VEC_A == 8
+#define A_TYPE f16mat2x4
+#endif
+#endif
+
+#define QUANT_K_Q4_0 32
+#define QUANT_R_Q4_0 2
+
+struct block_q4_0
+{
+ float16_t d;
+ uint8_t qs[16];
+};
+struct block_q4_0_packed16
+{
+ float16_t d;
+ uint16_t qs[16/2];
+};
+
+#if defined(DATA_A_Q4_0)
+#define QUANT_K QUANT_K_Q4_0
+#define QUANT_R QUANT_R_Q4_0
+#define A_TYPE block_q4_0
+#define A_TYPE_PACKED16 block_q4_0_packed16
+#endif
+
+#define QUANT_K_Q4_1 32
+#define QUANT_R_Q4_1 2
+
+struct block_q4_1
+{
+ float16_t d;
+ float16_t m;
+ uint8_t qs[16];
+};
+
+struct block_q4_1_packed16
+{
+ float16_t d;
+ float16_t m;
+ uint16_t qs[16/2];
+};
+
+#if defined(DATA_A_Q4_1)
+#define QUANT_K QUANT_K_Q4_1
+#define QUANT_R QUANT_R_Q4_1
+#define A_TYPE block_q4_1
+#define A_TYPE_PACKED16 block_q4_1_packed16
+#endif
+
+#define QUANT_K_Q5_0 32
+#define QUANT_R_Q5_0 2
+
+struct block_q5_0
+{
+ float16_t d;
+ uint16_t qh[2];
+ uint8_t qs[16];
+};
+
+struct block_q5_0_packed16
+{
+ float16_t d;
+ uint16_t qh[2];
+ uint16_t qs[16/2];
+};
+
+#if defined(DATA_A_Q5_0)
+#define QUANT_K QUANT_K_Q5_0
+#define QUANT_R QUANT_R_Q5_0
+#define A_TYPE block_q5_0
+#define A_TYPE_PACKED16 block_q5_0_packed16
+#endif
+
+#define QUANT_K_Q5_1 32
+#define QUANT_R_Q5_1 2
+
+struct block_q5_1
+{
+ float16_t d;
+ float16_t m;
+ uint qh;
+ uint8_t qs[16];
+};
+
+struct block_q5_1_packed16
+{
+ float16_t d;
+ float16_t m;
+ uint qh;
+ uint16_t qs[16/2];
+};
+
+#if defined(DATA_A_Q5_1)
+#define QUANT_K QUANT_K_Q5_1
+#define QUANT_R QUANT_R_Q5_1
+#define A_TYPE block_q5_1
+#define A_TYPE_PACKED16 block_q5_1_packed16
+#endif
+
+#define QUANT_K_Q8_0 32
+#define QUANT_R_Q8_0 1
+
+struct block_q8_0
+{
+ float16_t d;
+ int8_t qs[32];
+};
+struct block_q8_0_packed16
+{
+ float16_t d;
+ uint16_t qs[32/2];
+};
+
+#if defined(DATA_A_Q8_0)
+#define QUANT_K QUANT_K_Q8_0
+#define QUANT_R QUANT_R_Q8_0
+#define A_TYPE block_q8_0
+#define A_TYPE_PACKED16 block_q8_0_packed16
+#endif
+
+// K-quants
+#define QUANT_K_Q2_K 256
+
+struct block_q2_K
+{
+ uint8_t scales[QUANT_K_Q2_K/16];
+ uint8_t qs[QUANT_K_Q2_K/4];
+ f16vec2 d;
+};
+
+struct block_q2_K_packed16
+{
+ uint16_t scales[QUANT_K_Q2_K/16/2];
+ uint16_t qs[QUANT_K_Q2_K/4/2];
+ f16vec2 d;
+};
+
+struct block_q2_K_packed32
+{
+ uint32_t scales[QUANT_K_Q2_K/16/4];
+ uint32_t qs[QUANT_K_Q2_K/4/4];
+ f16vec2 d;
+};
+
+#if defined(DATA_A_Q2_K)
+#define QUANT_K QUANT_K_Q2_K
+#define A_TYPE block_q2_K
+#define A_TYPE_PACKED16 block_q2_K_packed16
+#define A_TYPE_PACKED32 block_q2_K_packed32
+#endif
+
+#define QUANT_K_Q3_K 256
+
+struct block_q3_K
+{
+ uint8_t hmask[QUANT_K_Q3_K/8];
+ uint8_t qs[QUANT_K_Q3_K/4];
+ uint8_t scales[12];
+ float16_t d;
+};
+
+struct block_q3_K_packed16
+{
+ uint16_t hmask[QUANT_K_Q3_K/8/2];
+ uint16_t qs[QUANT_K_Q3_K/4/2];
+ uint16_t scales[12/2];
+ float16_t d;
+};
+
+#if defined(DATA_A_Q3_K)
+#define QUANT_K QUANT_K_Q3_K
+#define A_TYPE block_q3_K
+#define A_TYPE_PACKED16 block_q3_K_packed16
+#endif
+
+#define QUANT_K_Q4_K 256
+
+struct block_q4_K
+{
+ f16vec2 d;
+ uint8_t scales[3*QUANT_K_Q4_K/64];
+ uint8_t qs[QUANT_K_Q4_K/2];
+};
+
+struct block_q4_K_packed16
+{
+ f16vec2 d;
+ uint16_t scales[3*QUANT_K_Q4_K/64/2];
+ uint16_t qs[QUANT_K_Q4_K/2/2];
+};
+
+struct block_q4_K_packed32
+{
+ f16vec2 d;
+ uint32_t scales[3*QUANT_K_Q4_K/64/4];
+ uint32_t qs[QUANT_K_Q4_K/2/4];
+};
+
+#if defined(DATA_A_Q4_K)
+#define QUANT_K QUANT_K_Q4_K
+#define A_TYPE block_q4_K
+#define A_TYPE_PACKED16 block_q4_K_packed16
+#define A_TYPE_PACKED32 block_q4_K_packed32
+#endif
+
+#define QUANT_K_Q5_K 256
+
+struct block_q5_K
+{
+ f16vec2 d;
+ uint8_t scales[12];
+ uint8_t qh[QUANT_K_Q5_K/8];
+ uint8_t qs[QUANT_K_Q5_K/2];
+};
+
+struct block_q5_K_packed16
+{
+ f16vec2 d;
+ uint16_t scales[12/2];
+ uint16_t qh[QUANT_K_Q5_K/8/2];
+ uint16_t qs[QUANT_K_Q5_K/2/2];
+};
+
+#if defined(DATA_A_Q5_K)
+#define QUANT_K QUANT_K_Q5_K
+#define A_TYPE block_q5_K
+#define A_TYPE_PACKED16 block_q5_K_packed16
+#endif
+
+#define QUANT_K_Q6_K 256
+
+struct block_q6_K
+{
+ uint8_t ql[QUANT_K_Q6_K/2];
+ uint8_t qh[QUANT_K_Q6_K/4];
+ int8_t scales[QUANT_K_Q6_K/16];
+ float16_t d;
+};
+
+struct block_q6_K_packed16
+{
+ uint16_t ql[QUANT_K_Q6_K/2/2];
+ uint16_t qh[QUANT_K_Q6_K/4/2];
+ int8_t scales[QUANT_K_Q6_K/16];
+ float16_t d;
+};
+
+#if defined(DATA_A_Q6_K)
+#define QUANT_K QUANT_K_Q6_K
+#define A_TYPE block_q6_K
+#define A_TYPE_PACKED16 block_q6_K_packed16
+#endif
+
+// IQuants
+
+#define QUANT_K_IQ4_NL 32
+#define QUANT_R_IQ4_NL 2
+
+struct block_iq4_nl
+{
+ float16_t d;
+ uint8_t qs[QUANT_K_IQ4_NL/2];
+};
+
+struct block_iq4_nl_packed16
+{
+ float16_t d;
+ uint16_t qs[QUANT_K_IQ4_NL/2/2];
+};
+
+#if defined(DATA_A_IQ4_NL)
+
+const int8_t kvalues_iq4nl_const[16] = {
+ int8_t(-127), int8_t(-104), int8_t(-83), int8_t(-65), int8_t(-49), int8_t(-35), int8_t(-22), int8_t(-10),
+ int8_t(1), int8_t(13), int8_t(25), int8_t(38), int8_t(53), int8_t(69), int8_t(89), int8_t(113)
+};
+
+shared FLOAT_TYPE kvalues_iq4nl[16];
+
+void init_iq4nl_shmem()
+{
+ // copy the table into shared memory and sync
+ if (gl_LocalInvocationIndex.x < 16) {
+ kvalues_iq4nl[gl_LocalInvocationIndex.x] = FLOAT_TYPE(kvalues_iq4nl_const[gl_LocalInvocationIndex.x]);
+ }
+ barrier();
+}
+
+#define QUANT_K QUANT_K_IQ4_NL
+#define QUANT_R QUANT_R_IQ4_NL
+#define A_TYPE block_iq4_nl
+#define A_TYPE_PACKED16 block_iq4_nl_packed16
+#endif
+
+#endif // !defined(GGML_TYPES_COMP)
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp
new file mode 100644
index 00000000..6f607380
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp
@@ -0,0 +1,36 @@
+#version 450
+
+layout (push_constant) uniform parameter
+{
+ uint ne; uint a_offset; uint d_offset;
+ uint nb00; uint nb01; uint nb02; uint nb03;
+ uint ne10; uint ne11; uint ne12; uint ne13;
+ float sf0; float sf1; float sf2; float sf3;
+} p;
+
+#include "types.comp"
+
+layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
+
+layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
+layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
+
+void main() {
+ const uint idx = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
+
+ if (idx >= p.ne) {
+ return;
+ }
+
+ const uint i10 = idx % p.ne10;
+ const uint i11 = (idx / p.ne10) % p.ne11;
+ const uint i12 = (idx / (p.ne10 * p.ne11)) % p.ne12;
+ const uint i13 = (idx / (p.ne10 * p.ne11 * p.ne12)) % p.ne13;
+
+ const uint i00 = uint(i10 / p.sf0);
+ const uint i01 = uint(i11 / p.sf1);
+ const uint i02 = uint(i12 / p.sf2);
+ const uint i03 = uint(i13 / p.sf3);
+
+ data_d[p.d_offset + idx] = D_TYPE(data_a[p.a_offset + i03 * p.nb03 + i02 * p.nb02 + i01 * p.nb01 + i00 * p.nb00]);
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp
new file mode 100644
index 00000000..8111c063
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp
@@ -0,0 +1,594 @@
+
+
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include <string>
+#include <stdexcept>
+#include <array>
+#include <vector>
+#include <map>
+#include <thread>
+#include <mutex>
+#include <future>
+#include <queue>
+#include <condition_variable>
+#include <cstdio>
+#include <cstring>
+#include <cstdlib>
+#include <cassert>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#ifdef _WIN32
+ #include <windows.h>
+ #include <direct.h> // For _mkdir on Windows
+ #include <algorithm> // For std::replace on w64devkit
+#else
+ #include <unistd.h>
+ #include <sys/wait.h>
+ #include <fcntl.h>
+#endif
+
+#include <vulkan/vulkan_core.h>
+
+#define ASYNCIO_CONCURRENCY 64
+
+std::mutex lock;
+std::vector<std::pair<std::string, std::string>> shader_fnames;
+
+std::string GLSLC = "glslc";
+std::string input_dir = "vulkan-shaders";
+std::string output_dir = "/tmp";
+std::string target_hpp = "ggml-vulkan-shaders.hpp";
+std::string target_cpp = "ggml-vulkan-shaders.cpp";
+bool no_clean = false;
+
+const std::vector<std::string> type_names = {
+ "f32",
+ "f16",
+ "q4_0",
+ "q4_1",
+ "q5_0",
+ "q5_1",
+ "q8_0",
+ "q2_k",
+ "q3_k",
+ "q4_k",
+ "q5_k",
+ "q6_k",
+ "iq4_nl"
+};
+
+namespace {
+void execute_command(const std::string& command, std::string& stdout_str, std::string& stderr_str) {
+#ifdef _WIN32
+ HANDLE stdout_read, stdout_write;
+ HANDLE stderr_read, stderr_write;
+ SECURITY_ATTRIBUTES sa = { sizeof(SECURITY_ATTRIBUTES), NULL, TRUE };
+
+ if (!CreatePipe(&stdout_read, &stdout_write, &sa, 0) ||
+ !SetHandleInformation(stdout_read, HANDLE_FLAG_INHERIT, 0)) {
+ throw std::runtime_error("Failed to create stdout pipe");
+ }
+
+ if (!CreatePipe(&stderr_read, &stderr_write, &sa, 0) ||
+ !SetHandleInformation(stderr_read, HANDLE_FLAG_INHERIT, 0)) {
+ throw std::runtime_error("Failed to create stderr pipe");
+ }
+
+ PROCESS_INFORMATION pi;
+ STARTUPINFOA si = {};
+ si.cb = sizeof(STARTUPINFOA);
+ si.dwFlags = STARTF_USESTDHANDLES;
+ si.hStdOutput = stdout_write;
+ si.hStdError = stderr_write;
+
+ std::vector<char> cmd(command.begin(), command.end());
+ cmd.push_back('\0');
+
+ if (!CreateProcessA(NULL, cmd.data(), NULL, NULL, TRUE, 0, NULL, NULL, &si, &pi)) {
+ throw std::runtime_error("Failed to create process");
+ }
+
+ CloseHandle(stdout_write);
+ CloseHandle(stderr_write);
+
+ std::array<char, 128> buffer;
+ DWORD bytes_read;
+
+ while (ReadFile(stdout_read, buffer.data(), (DWORD)buffer.size(), &bytes_read, NULL) && bytes_read > 0) {
+ stdout_str.append(buffer.data(), bytes_read);
+ }
+
+ while (ReadFile(stderr_read, buffer.data(), (DWORD)buffer.size(), &bytes_read, NULL) && bytes_read > 0) {
+ stderr_str.append(buffer.data(), bytes_read);
+ }
+
+ CloseHandle(stdout_read);
+ CloseHandle(stderr_read);
+ WaitForSingleObject(pi.hProcess, INFINITE);
+ CloseHandle(pi.hProcess);
+ CloseHandle(pi.hThread);
+#else
+int stdout_pipe[2];
+ int stderr_pipe[2];
+
+ if (pipe(stdout_pipe) != 0 || pipe(stderr_pipe) != 0) {
+ throw std::runtime_error("Failed to create pipes");
+ }
+
+ pid_t pid = fork();
+ if (pid < 0) {
+ throw std::runtime_error("Failed to fork process");
+ }
+
+ if (pid == 0) {
+ close(stdout_pipe[0]);
+ close(stderr_pipe[0]);
+ dup2(stdout_pipe[1], STDOUT_FILENO);
+ dup2(stderr_pipe[1], STDERR_FILENO);
+ close(stdout_pipe[1]);
+ close(stderr_pipe[1]);
+ execl("/bin/sh", "sh", "-c", command.c_str(), (char*) nullptr);
+ _exit(EXIT_FAILURE);
+ } else {
+ close(stdout_pipe[1]);
+ close(stderr_pipe[1]);
+
+ std::array<char, 128> buffer;
+ ssize_t bytes_read;
+
+ while ((bytes_read = read(stdout_pipe[0], buffer.data(), buffer.size())) > 0) {
+ stdout_str.append(buffer.data(), bytes_read);
+ }
+
+ while ((bytes_read = read(stderr_pipe[0], buffer.data(), buffer.size())) > 0) {
+ stderr_str.append(buffer.data(), bytes_read);
+ }
+
+ close(stdout_pipe[0]);
+ close(stderr_pipe[0]);
+ waitpid(pid, nullptr, 0);
+ }
+#endif
+}
+
+bool directory_exists(const std::string& path) {
+ struct stat info;
+ if (stat(path.c_str(), &info) != 0) {
+ return false; // Path doesn't exist or can't be accessed
+ }
+ return (info.st_mode & S_IFDIR) != 0; // Check if it is a directory
+}
+
+bool create_directory(const std::string& path) {
+#ifdef _WIN32
+ return _mkdir(path.c_str()) == 0 || errno == EEXIST; // EEXIST means the directory already exists
+#else
+ return mkdir(path.c_str(), 0755) == 0 || errno == EEXIST; // 0755 is the directory permissions
+#endif
+}
+
+std::string to_uppercase(const std::string& input) {
+ std::string result = input;
+ for (char& c : result) {
+ c = std::toupper(c);
+ }
+ return result;
+}
+
+bool string_ends_with(const std::string& str, const std::string& suffix) {
+ if (suffix.size() > str.size()) {
+ return false;
+ }
+ return std::equal(suffix.rbegin(), suffix.rend(), str.rbegin());
+}
+
+static const char path_separator = '/';
+
+std::string join_paths(const std::string& path1, const std::string& path2) {
+ return path1 + path_separator + path2;
+}
+
+std::string basename(const std::string &path) {
+ return path.substr(path.find_last_of("/\\") + 1);
+}
+
+// variables to track number of compiles in progress
+static uint32_t compile_count = 0;
+static std::mutex compile_count_mutex;
+static std::condition_variable compile_count_cond;
+
+void string_to_spv_func(const std::string& _name, const std::string& in_fname, const std::map<std::string, std::string>& defines, bool fp16 = true, bool coopmat = false, bool coopmat2 = false, bool f16acc = false) {
+ std::string name = _name + (f16acc ? "_f16acc" : "") + (coopmat ? "_coopmat" : "") + (coopmat2 ? "_cm2" : (fp16 ? "" : "_fp32"));
+ std::string out_fname = join_paths(output_dir, name + ".spv");
+ std::string in_path = join_paths(input_dir, in_fname);
+
+ std::string target_env = (name.find("_cm2") != std::string::npos) ? "--target-env=vulkan1.3" : "--target-env=vulkan1.2";
+
+ // disable spirv-opt for coopmat shaders for https://github.com/ggerganov/llama.cpp/issues/10734
+ std::string opt_level = coopmat ? "" : "-O";
+
+ #ifdef _WIN32
+ std::vector<std::string> cmd = {GLSLC, "-fshader-stage=compute", target_env, opt_level, "\"" + in_path + "\"", "-o", "\"" + out_fname + "\""};
+ #else
+ std::vector<std::string> cmd = {GLSLC, "-fshader-stage=compute", target_env, opt_level, in_path, "-o", out_fname};
+ #endif
+
+ #ifdef GGML_VULKAN_SHADER_DEBUG_INFO
+ cmd.push_back("-g");
+ #endif
+
+ for (const auto& define : defines) {
+ cmd.push_back("-D" + define.first + "=" + define.second);
+ }
+
+ std::string command;
+ for (const auto& part : cmd) {
+ command += part + " ";
+ }
+
+ std::string stdout_str, stderr_str;
+ try {
+ // std::cout << "Executing command: ";
+ // for (const auto& part : cmd) {
+ // std::cout << part << " ";
+ // }
+ // std::cout << std::endl;
+
+ execute_command(command, stdout_str, stderr_str);
+ if (!stderr_str.empty()) {
+ std::cerr << "cannot compile " << name << "\n\n" << command << "\n\n" << stderr_str << std::endl;
+ return;
+ }
+
+ std::lock_guard<std::mutex> guard(lock);
+ shader_fnames.push_back(std::make_pair(name, out_fname));
+ } catch (const std::exception& e) {
+ std::cerr << "Error executing command for " << name << ": " << e.what() << std::endl;
+ }
+ {
+ std::lock_guard<std::mutex> guard(compile_count_mutex);
+ assert(compile_count > 0);
+ compile_count--;
+ }
+ compile_count_cond.notify_all();
+}
+
+std::map<std::string, std::string> merge_maps(const std::map<std::string, std::string>& a, const std::map<std::string, std::string>& b) {
+ std::map<std::string, std::string> result = a;
+ result.insert(b.begin(), b.end());
+ return result;
+}
+
+static std::vector<std::future<void>> compiles;
+void string_to_spv(const std::string& _name, const std::string& in_fname, const std::map<std::string, std::string>& defines, bool fp16 = true, bool coopmat = false, bool coopmat2 = false, bool f16acc = false) {
+ {
+ // wait until fewer than N compiles are in progress.
+ // 16 is an arbitrary limit, the goal is to avoid "failed to create pipe" errors.
+ uint32_t N = 16;
+ std::unique_lock<std::mutex> guard(compile_count_mutex);
+ while (compile_count >= N) {
+ compile_count_cond.wait(guard);
+ }
+ compile_count++;
+ }
+ compiles.push_back(std::async(string_to_spv_func, _name, in_fname, defines, fp16, coopmat, coopmat2, f16acc));
+}
+
+void matmul_shaders(bool fp16, bool matmul_id, bool coopmat, bool coopmat2, bool f16acc) {
+ std::string load_vec = coopmat2 ? "1" : fp16 ? "8" : "4";
+ std::string aligned_b_type_f32 = coopmat2 ? "float" : fp16 ? "mat2x4" : "vec4";
+ std::string aligned_b_type_f16 = coopmat2 ? "float16_t" : fp16 ? "f16mat2x4" : "f16vec4";
+
+ std::map<std::string, std::string> base_dict = {{"FLOAT_TYPE", (coopmat2 || fp16) ? "float16_t" : "float"}};
+ std::string shader_name = "matmul";
+
+ if (matmul_id) {
+ base_dict["MUL_MAT_ID"] = "1";
+ shader_name = "matmul_id";
+ }
+
+ if (fp16) {
+ base_dict["FLOAT16"] = "1";
+ }
+
+ base_dict["ACC_TYPE"] = f16acc ? "float16_t" : "float";
+
+ if (coopmat) {
+ base_dict["COOPMAT"] = "1";
+ }
+
+ base_dict["ACC_TYPE"] = f16acc ? "float16_t" : "float";
+
+ std::string source_name = coopmat2 ? "mul_mm_cm2.comp" : "mul_mm.comp";
+
+ // Shaders with f16 B_TYPE
+ string_to_spv(shader_name + "_f32_f16", source_name, merge_maps(base_dict, {{"DATA_A_F32", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}, }), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_f32_f16_aligned", source_name, merge_maps(base_dict, {{"DATA_A_F32", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
+
+ string_to_spv(shader_name + "_f16_aligned", source_name, merge_maps(base_dict, {{"DATA_A_F16", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_f16", source_name, merge_maps(base_dict, {{"DATA_A_F16", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16, coopmat, coopmat2, f16acc);
+
+ for (const auto& tname : type_names) {
+ std::string data_a_key = "DATA_A_" + to_uppercase(tname);
+ // For unaligned, load one at a time for f32/f16, or two at a time for quants
+ std::string load_vec_a_unaligned = (coopmat2 || tname == "f32" || tname == "f16") ? "1" : "2";
+ // For aligned matmul loads
+ std::string load_vec_a = (coopmat2 || tname == "f32" || tname == "f16") ? load_vec : "2";
+
+ string_to_spv(shader_name + "_" + tname + "_f32", source_name, merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_" + tname + "_f32_aligned", source_name, merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f32}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
+
+ if (tname != "f16" && tname != "f32") {
+ string_to_spv(shader_name + "_" + tname + "_f16", source_name, merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}}), fp16, coopmat, coopmat2, f16acc);
+ string_to_spv(shader_name + "_" + tname + "_f16_aligned", source_name, merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc);
+ }
+ }
+}
+
+void process_shaders() {
+ std::cout << "ggml_vulkan: Generating and compiling shaders to SPIR-V" << std::endl;
+ std::map<std::string, std::string> base_dict = {{"FLOAT_TYPE", "float"}};
+
+ // matmul
+ for (const auto& matmul_id : {false, true}) {
+ // No coopmats
+ // fp32
+ matmul_shaders(false, matmul_id, false, false, false);
+
+ // fp16, fp32acc and fp16acc
+ matmul_shaders(true, matmul_id, false, false, false);
+ matmul_shaders(true, matmul_id, false, false, true);
+
+ // Coopmat, fp32acc and fp16acc
+ matmul_shaders(true, matmul_id, true, false, false);
+ matmul_shaders(true, matmul_id, true, false, true);
+
+#if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
+ // Coopmat2, fp32acc and fp16acc
+ matmul_shaders(true, matmul_id, false, true, false);
+ matmul_shaders(true, matmul_id, false, true, true);
+#endif
+ }
+
+#if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
+ // flash attention
+ for (const auto& f16acc : {false, true}) {
+ std::string acctype = f16acc ? "float16_t" : "float";
+
+ for (const auto& tname : type_names) {
+ if (tname == "f32") {
+ continue;
+ }
+
+ if (tname == "f16") {
+ string_to_spv("flash_attn_f32_f16_" + tname, "flash_attn_cm2.comp",
+ merge_maps(base_dict, {{"Q_TYPE", "float"}, {"D_TYPE", "float"}, {"ACC_TYPE", acctype}}), true, false, true, f16acc);
+ } else {
+ std::string data_a_key = "DATA_A_" + to_uppercase(tname);
+ string_to_spv("flash_attn_f32_f16_" + tname, "flash_attn_cm2.comp",
+ merge_maps(base_dict, {{data_a_key, "1"}, {"Q_TYPE", "float"}, {"D_TYPE", "float"}, {"ACC_TYPE", acctype}, {"DEQUANTFUNC", "dequantFunc"+to_uppercase(tname) }, {"BLOCK_SIZE", "QUANT_K_"+to_uppercase(tname) }}), true, false, true, f16acc);
+ }
+ }
+ }
+#endif
+
+ for (const auto& tname : type_names) {
+ // mul mat vec
+ std::string data_a_key = "DATA_A_" + to_uppercase(tname);
+ std::string shader = (string_ends_with(tname, "_k")) ? "mul_mat_vec_" + tname + ".comp" : "mul_mat_vec.comp";
+
+ string_to_spv("mul_mat_vec_" + tname + "_f32_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "float"}, {"B_TYPE_VEC2", "vec2"}, {"B_TYPE_VEC4", "vec4"}, {"D_TYPE", "float"}}));
+ string_to_spv("mul_mat_vec_" + tname + "_f16_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "float16_t"}, {"B_TYPE_VEC2", "f16vec2"}, {"B_TYPE_VEC4", "f16vec4"}, {"D_TYPE", "float"}}));
+
+ string_to_spv("mul_mat_vec_id_" + tname + "_f32", shader, merge_maps(base_dict, {{"MUL_MAT_ID", "1"}, {data_a_key, "1"}, {"B_TYPE", "float"}, {"B_TYPE_VEC2", "vec2"}, {"B_TYPE_VEC4", "vec4"}, {"D_TYPE", "float"}}));
+
+ // Dequant shaders
+ if (tname != "f16") {
+ string_to_spv("dequant_" + tname, "dequant_" + tname + ".comp", merge_maps(base_dict, {{data_a_key, "1"}, {"D_TYPE", "float16_t"}}));
+ }
+
+ if (!string_ends_with(tname, "_k")) {
+ shader = (tname == "f32" || tname == "f16") ? "get_rows.comp" : "get_rows_quant.comp";
+
+ if (tname == "f16") {
+ string_to_spv("get_rows_" + tname, shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}}));
+ } else {
+ string_to_spv("get_rows_" + tname, shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float16_t"}}));
+ }
+ string_to_spv("get_rows_" + tname + "_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float"}}));
+ }
+ }
+
+ string_to_spv("mul_mat_vec_p021_f16_f32", "mul_mat_vec_p021.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("mul_mat_vec_nc_f16_f32", "mul_mat_vec_nc.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
+
+ // Norms
+ string_to_spv("norm_f32", "norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
+ string_to_spv("group_norm_f32", "group_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
+ string_to_spv("rms_norm_f32", "rms_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
+
+ string_to_spv("cpy_f32_f32", "copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("cpy_f32_f16", "copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}});
+ string_to_spv("cpy_f16_f16", "copy.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
+ string_to_spv("contig_cpy_f32_f32", "contig_copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("contig_cpy_f32_f16", "contig_copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}});
+ string_to_spv("contig_cpy_f16_f16", "contig_copy.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
+
+ string_to_spv("add_f32", "add.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+ string_to_spv("add_f16_f32_f16", "add.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"FLOAT_TYPE", "float"}});
+
+ string_to_spv("acc_f32", "acc.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+
+ string_to_spv("split_k_reduce", "mul_mat_split_k_reduce.comp", {});
+
+ string_to_spv("mul_f32", "mul.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+
+ string_to_spv("div_f32", "div.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+
+ string_to_spv("repeat_f32", "repeat.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+
+ string_to_spv("scale_f32", "scale.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+
+ string_to_spv("sqr_f32", "square.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+
+ string_to_spv("sin_f32", "sin.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+
+ string_to_spv("cos_f32", "cos.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+
+ string_to_spv("clamp_f32", "clamp.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
+
+ string_to_spv("pad_f32", "pad.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+
+ string_to_spv("concat_f32", "concat.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("concat_f16", "concat.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
+ string_to_spv("concat_i32", "concat.comp", {{"A_TYPE", "int"}, {"B_TYPE", "int"}, {"D_TYPE", "int"}});
+
+ string_to_spv("upscale_f32", "upscale.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
+
+ string_to_spv("gelu_f32", "gelu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("gelu_quick_f32", "gelu_quick.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("silu_f32", "silu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("relu_f32", "relu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("leaky_relu_f32", "leaky_relu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("tanh_f32", "tanh.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+
+ string_to_spv("diag_mask_inf_f32", "diag_mask_inf.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+
+ string_to_spv("soft_max_f32", "soft_max.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}));
+ string_to_spv("soft_max_f32_f16", "soft_max.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}));
+
+ string_to_spv("rope_norm_f32", "rope_norm.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("rope_norm_f16", "rope_norm.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
+ string_to_spv("rope_norm_f16_rte", "rope_norm.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"RTE16", "1"}});
+
+ string_to_spv("rope_neox_f32", "rope_neox.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
+ string_to_spv("rope_neox_f16", "rope_neox.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
+ string_to_spv("rope_neox_f16_rte", "rope_neox.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"RTE16", "1"}});
+
+ string_to_spv("argsort_f32", "argsort.comp", {{"A_TYPE", "float"}});
+
+ string_to_spv("sum_rows_f32", "sum_rows.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
+
+ string_to_spv("im2col_f32", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
+ string_to_spv("im2col_f32_f16", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}}));
+ string_to_spv("im2col_f32_f16_rte", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"RTE16", "1"}}));
+
+ string_to_spv("timestep_embedding_f32", "timestep_embedding.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
+
+ string_to_spv("pool2d_f32", "pool2d.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
+
+ string_to_spv("rwkv_wkv6_f32", "wkv6.comp", merge_maps(base_dict, {{"A_TYPE", "float"}}));
+
+ for (auto &c : compiles) {
+ c.wait();
+ }
+}
+
+void write_output_files() {
+ FILE* hdr = fopen(target_hpp.c_str(), "w");
+ FILE* src = fopen(target_cpp.c_str(), "w");
+
+ fprintf(hdr, "#include <cstdint>\n\n");
+ fprintf(src, "#include \"%s\"\n\n", basename(target_hpp).c_str());
+
+ for (const auto& pair : shader_fnames) {
+ const std::string& name = pair.first;
+ #ifdef _WIN32
+ std::string path = pair.second;
+ std::replace(path.begin(), path.end(), '/', '\\' );
+ #else
+ const std::string& path = pair.second;
+ #endif
+
+ FILE* spv = fopen(path.c_str(), "rb");
+ if (!spv) {
+ std::cerr << "Error opening SPIR-V file: " << path << " (" << strerror(errno) << ")\n";
+ continue;
+ }
+
+ fseek(spv, 0, SEEK_END);
+ size_t size = ftell(spv);
+ fseek(spv, 0, SEEK_SET);
+
+ std::vector<unsigned char> data(size);
+ size_t read_size = fread(data.data(), 1, size, spv);
+ fclose(spv);
+ if (read_size != size) {
+ std::cerr << "Error reading SPIR-V file: " << path << " (" << strerror(errno) << ")\n";
+ continue;
+ }
+
+ fprintf(hdr, "extern unsigned char %s_data[%zu];\n", name.c_str(), size);
+ fprintf(hdr, "const uint64_t %s_len = %zu;\n\n", name.c_str(), size);
+
+ fprintf(src, "unsigned char %s_data[%zu] = {\n", name.c_str(), size);
+ for (size_t i = 0; i < size; ++i) {
+ fprintf(src, "0x%02x,", data[i]);
+ if ((i + 1) % 12 == 0) fprintf(src, "\n");
+ }
+ fprintf(src, "\n};\n\n");
+
+ if (!no_clean) {
+ std::remove(path.c_str());
+ }
+ }
+
+ fclose(hdr);
+ fclose(src);
+}
+}
+
+int main(int argc, char** argv) {
+ std::map<std::string, std::string> args;
+ for (int i = 1; i < argc; ++i) {
+ std::string arg = argv[i];
+ if (arg.rfind("--", 0) == 0) {
+ if (i + 1 < argc && argv[i + 1][0] != '-') {
+ args[arg] = argv[i + 1];
+ ++i;
+ } else {
+ args[arg] = "";
+ }
+ }
+ }
+
+ if (args.find("--glslc") != args.end()) {
+ GLSLC = args["--glslc"]; // Path to glslc
+ }
+ if (args.find("--input-dir") != args.end()) {
+ input_dir = args["--input-dir"]; // Directory containing shader sources
+ }
+ if (args.find("--output-dir") != args.end()) {
+ output_dir = args["--output-dir"]; // Directory for containing SPIR-V output
+ }
+ if (args.find("--target-hpp") != args.end()) {
+ target_hpp = args["--target-hpp"]; // Path to generated header file
+ }
+ if (args.find("--target-cpp") != args.end()) {
+ target_cpp = args["--target-cpp"]; // Path to generated cpp file
+ }
+ if (args.find("--no-clean") != args.end()) {
+ no_clean = true; // Keep temporary SPIR-V files in output-dir after build
+ }
+
+ if (!directory_exists(input_dir)) {
+ std::cerr << "\"" << input_dir << "\" must be a valid directory containing shader sources" << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ if (!directory_exists(output_dir)) {
+ if (!create_directory(output_dir)) {
+ std::cerr << "Error creating output directory: " << output_dir << "\n";
+ return EXIT_FAILURE;
+ }
+ }
+
+ process_shaders();
+
+ write_output_files();
+
+ return EXIT_SUCCESS;
+}
diff --git a/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp
new file mode 100644
index 00000000..35cc6c45
--- /dev/null
+++ b/ml/backend/ggml/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp
@@ -0,0 +1,87 @@
+#version 450
+
+#extension GL_EXT_control_flow_attributes : require
+
+#define BLOCK_SIZE 64
+layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
+
+layout(push_constant) uniform Parameters {
+ uint B;
+ uint T;
+ uint C;
+ uint H;
+};
+
+layout(binding = 0) readonly buffer KBuf { A_TYPE k[]; };
+layout(binding = 1) readonly buffer VBuf { A_TYPE v[]; };
+layout(binding = 2) readonly buffer RBuf { A_TYPE r[]; };
+layout(binding = 3) readonly buffer TimeFBuf { A_TYPE tf[]; };
+layout(binding = 4) readonly buffer TimeDBuf { A_TYPE td[]; };
+layout(binding = 5) readonly buffer StateBuf { A_TYPE state_in[]; };
+layout(binding = 6) buffer DstBuf { A_TYPE dst[]; };
+
+shared A_TYPE _k[BLOCK_SIZE], _r[BLOCK_SIZE], _tf[BLOCK_SIZE], _td[BLOCK_SIZE];
+
+void main() {
+ const uint head_size = BLOCK_SIZE;
+ const uint batch_id = gl_WorkGroupID.x / H;
+ const uint head_id = gl_WorkGroupID.x % H;
+ const uint tid = gl_LocalInvocationID.x;
+
+ const uint state_size = C * head_size;
+ const uint n_seq_tokens = T / B;
+
+ if (batch_id >= B || head_id >= H) {
+ return;
+ }
+
+ A_TYPE state[BLOCK_SIZE];
+ [[unroll]] for (uint i = 0; i < head_size; i++) {
+ state[i] = state_in[batch_id * state_size + head_id * head_size * head_size
+ + i * head_size + tid];
+ }
+
+ barrier();
+ _tf[tid] = tf[head_id * head_size + tid];
+ barrier();
+
+ const uint start_t = batch_id * n_seq_tokens * C + head_id * head_size + tid;
+ const uint end_t = (batch_id + 1) * n_seq_tokens * C + head_id * head_size + tid;
+
+ for (uint t = start_t; t < end_t; t += C) {
+ barrier();
+ _k[tid] = k[t];
+ _r[tid] = r[t];
+ _td[tid] = td[t];
+ barrier();
+
+ const A_TYPE v_val = v[t];
+ A_TYPE y = 0.0;
+
+ [[unroll]] for (uint j = 0; j < head_size; j += 4) {
+ vec4 k_vec = vec4(_k[j], _k[j+1], _k[j+2], _k[j+3]);
+ vec4 r_vec = vec4(_r[j], _r[j+1], _r[j+2], _r[j+3]);
+ vec4 tf_vec = vec4(_tf[j], _tf[j+1], _tf[j+2], _tf[j+3]);
+ vec4 td_vec = vec4(_td[j], _td[j+1], _td[j+2], _td[j+3]);
+ vec4 s_vec = vec4(state[j], state[j+1], state[j+2], state[j+3]);
+
+ vec4 kv = k_vec * v_val;
+
+ vec4 temp = tf_vec * kv + s_vec;
+ y += dot(r_vec, temp);
+
+ s_vec = s_vec * td_vec + kv;
+ state[j] = s_vec.x;
+ state[j+1] = s_vec.y;
+ state[j+2] = s_vec.z;
+ state[j+3] = s_vec.w;
+ }
+
+ dst[t] = y;
+ }
+
+ [[unroll]] for (uint i = 0; i < head_size; i++) {
+ dst[T * C + batch_id * state_size + head_id * head_size * head_size
+ + i * head_size + tid] = state[i];
+ }
+}
--
2.43.0