* feat: Bump llama.cpp to df1b612 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix(mtmd): Correctly encode text chunks during mtmd tokenization There can be text chunks that appear interspersed with the image embeddings that contain template delimiter tokens for some models. These need to be correctly translated to text tokens. Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * tests: Use MtmdChunk in image_test Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * style: Fix unnecessary conversion linting Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix(ggml): Revert changes to ggml_hip.cpp These changes were done largely by our code assistant and are likely wrong Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Revert changes in mem_nvml.cpp Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update sync point to 1deee0 This brings in several more optimization commits and model support for EmbeddingGemma Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update patches for 1deee0 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: sync for bump to 1deee0 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Bad patch updates with errant `+` Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Bump llama.cpp/ggml to 7049736 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: format-patches after latest bump Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> --------- Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
2954 lines
100 KiB
C++
Vendored
2954 lines
100 KiB
C++
Vendored
#include "llama-context.h"
|
|
|
|
#include "llama-impl.h"
|
|
#include "llama-batch.h"
|
|
#include "llama-io.h"
|
|
#include "llama-memory.h"
|
|
#include "llama-mmap.h"
|
|
#include "llama-model.h"
|
|
|
|
#include <cinttypes>
|
|
#include <cstring>
|
|
#include <limits>
|
|
#include <stdexcept>
|
|
|
|
//
|
|
// llama_context
|
|
//
|
|
|
|
llama_context::llama_context(
|
|
const llama_model & model,
|
|
llama_context_params params) :
|
|
model(model),
|
|
balloc(std::make_unique<llama_batch_allocr>(model.hparams.n_pos_per_embd())) {
|
|
LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__);
|
|
|
|
t_start_us = model.t_start_us;
|
|
t_load_us = model.t_load_us;
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
cparams.n_seq_max = std::max(1u, params.n_seq_max);
|
|
if (cparams.n_seq_max > LLAMA_MAX_SEQ) {
|
|
throw std::runtime_error("n_seq_max must be <= " + std::to_string(LLAMA_MAX_SEQ));
|
|
}
|
|
|
|
cparams.n_threads = params.n_threads;
|
|
cparams.n_threads_batch = params.n_threads_batch;
|
|
cparams.yarn_ext_factor = params.yarn_ext_factor >= 0.0f ? params.yarn_ext_factor : hparams.yarn_ext_factor;
|
|
cparams.yarn_attn_factor = params.yarn_attn_factor >= 0.0f ? params.yarn_attn_factor : hparams.yarn_attn_factor;
|
|
cparams.yarn_beta_fast = params.yarn_beta_fast >= 0.0f ? params.yarn_beta_fast : hparams.yarn_beta_fast;
|
|
cparams.yarn_beta_slow = params.yarn_beta_slow >= 0.0f ? params.yarn_beta_slow : hparams.yarn_beta_slow;
|
|
cparams.embeddings = params.embeddings;
|
|
cparams.offload_kqv = params.offload_kqv;
|
|
cparams.no_perf = params.no_perf;
|
|
cparams.pooling_type = params.pooling_type;
|
|
cparams.warmup = false;
|
|
|
|
cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
|
|
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
|
|
cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
|
|
|
|
cparams.n_ctx_orig_yarn = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
|
|
hparams.n_ctx_orig_yarn != 0 ? hparams.n_ctx_orig_yarn :
|
|
hparams.n_ctx_train;
|
|
|
|
cparams.cb_eval = params.cb_eval;
|
|
cparams.cb_eval_user_data = params.cb_eval_user_data;
|
|
|
|
auto rope_scaling_type = params.rope_scaling_type;
|
|
if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED) {
|
|
rope_scaling_type = hparams.rope_scaling_type_train;
|
|
}
|
|
|
|
if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_NONE) {
|
|
cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
|
|
}
|
|
|
|
if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
|
|
cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f;
|
|
}
|
|
|
|
cparams.yarn_attn_factor *= hparams.rope_attn_factor;
|
|
|
|
if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
|
|
if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
|
|
cparams.pooling_type = LLAMA_POOLING_TYPE_NONE;
|
|
} else {
|
|
cparams.pooling_type = hparams.pooling_type;
|
|
}
|
|
}
|
|
|
|
if (params.attention_type == LLAMA_ATTENTION_TYPE_UNSPECIFIED) {
|
|
cparams.causal_attn = hparams.causal_attn;
|
|
} else {
|
|
cparams.causal_attn = params.attention_type == LLAMA_ATTENTION_TYPE_CAUSAL;
|
|
}
|
|
|
|
cparams.flash_attn = params.flash_attn_type != LLAMA_FLASH_ATTN_TYPE_DISABLED;
|
|
|
|
// with causal attention, the batch size is limited by the context size
|
|
cparams.n_batch = cparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
|
|
|
|
// the batch has to be at least GGML_KQ_MASK_PAD because we will be padding the KQ_mask
|
|
// this is required by GPU kernels in order to avoid out-of-bounds accesses (e.g. ggml_flash_attn_ext)
|
|
// ref: https://github.com/ggerganov/llama.cpp/pull/5021
|
|
// TODO: this padding is not needed for the cache-less context so we should probably move it to llama_memory
|
|
if (cparams.n_batch < GGML_KQ_MASK_PAD) {
|
|
LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD);
|
|
cparams.n_batch = GGML_KQ_MASK_PAD;
|
|
}
|
|
cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);
|
|
|
|
cparams.op_offload = params.op_offload;
|
|
cparams.kv_unified = params.kv_unified;
|
|
|
|
{
|
|
const char * LLAMA_GRAPH_REUSE_DISABLE = getenv("LLAMA_GRAPH_REUSE_DISABLE");
|
|
graph_reuse_disable = LLAMA_GRAPH_REUSE_DISABLE ? (atoi(LLAMA_GRAPH_REUSE_DISABLE) != 0) : graph_reuse_disable;
|
|
|
|
if (graph_reuse_disable) {
|
|
LLAMA_LOG_WARN("%s: graph reuse disabled\n", __func__);
|
|
}
|
|
}
|
|
|
|
const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max;
|
|
|
|
LLAMA_LOG_INFO("%s: n_seq_max = %u\n", __func__, cparams.n_seq_max);
|
|
LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
|
|
LLAMA_LOG_INFO("%s: n_ctx_per_seq = %u\n", __func__, n_ctx_per_seq);
|
|
LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch);
|
|
LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch);
|
|
LLAMA_LOG_INFO("%s: causal_attn = %d\n", __func__, cparams.causal_attn);
|
|
LLAMA_LOG_INFO("%s: flash_attn = %s\n", __func__, llama_flash_attn_type_name(params.flash_attn_type));
|
|
LLAMA_LOG_INFO("%s: kv_unified = %s\n", __func__, cparams.kv_unified ? "true" : "false");
|
|
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
|
|
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
|
|
|
|
if (n_ctx_per_seq < hparams.n_ctx_train) {
|
|
LLAMA_LOG_WARN("%s: n_ctx_per_seq (%u) < n_ctx_train (%u) -- the full capacity of the model will not be utilized\n",
|
|
__func__, n_ctx_per_seq, hparams.n_ctx_train);
|
|
}
|
|
|
|
if (n_ctx_per_seq > hparams.n_ctx_train) {
|
|
LLAMA_LOG_WARN("%s: n_ctx_per_seq (%u) > n_ctx_train (%u) -- possible training context overflow\n",
|
|
__func__, n_ctx_per_seq, hparams.n_ctx_train);
|
|
}
|
|
|
|
if (!hparams.vocab_only) {
|
|
// GPU backends
|
|
for (auto * dev : model.devices) {
|
|
ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
|
|
if (backend == nullptr) {
|
|
throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev)));
|
|
}
|
|
backends.emplace_back(backend);
|
|
}
|
|
|
|
// add ACCEL backends (such as BLAS)
|
|
for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
|
|
ggml_backend_dev_t dev = ggml_backend_dev_get(i);
|
|
if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
|
|
ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
|
|
if (backend == nullptr) {
|
|
throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev)));
|
|
}
|
|
backends.emplace_back(backend);
|
|
}
|
|
}
|
|
|
|
// add CPU backend
|
|
backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
|
|
if (backend_cpu == nullptr) {
|
|
throw std::runtime_error("failed to initialize CPU backend");
|
|
}
|
|
backends.emplace_back(backend_cpu);
|
|
|
|
// create a list of the set_n_threads functions in the backends
|
|
for (auto & backend : backends) {
|
|
ggml_backend_dev_t dev = ggml_backend_get_device(backend.get());
|
|
ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
|
|
if (reg) {
|
|
auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
|
|
if (ggml_backend_set_n_threads_fn) {
|
|
set_n_threads_fns.emplace_back(backend.get(), ggml_backend_set_n_threads_fn);
|
|
}
|
|
}
|
|
}
|
|
|
|
llama_set_abort_callback(this, params.abort_callback, params.abort_callback_data);
|
|
|
|
// graph outputs buffer
|
|
{
|
|
// resized during inference when a batch uses more outputs
|
|
if (output_reserve(params.n_seq_max) < params.n_seq_max) {
|
|
throw std::runtime_error("failed to reserve initial output buffer");
|
|
}
|
|
|
|
LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__,
|
|
ggml_backend_buffer_name (buf_output.get()),
|
|
ggml_backend_buffer_get_size(buf_output.get()) / 1024.0 / 1024.0);
|
|
}
|
|
}
|
|
|
|
// init the memory module
|
|
if (!hparams.vocab_only) {
|
|
llama_memory_params params_mem = {
|
|
/*.type_k =*/ params.type_k,
|
|
/*.type_v =*/ params.type_v,
|
|
/*.swa_full =*/ params.swa_full,
|
|
};
|
|
|
|
memory.reset(model.create_memory(params_mem, cparams));
|
|
}
|
|
|
|
// init backends
|
|
if (!hparams.vocab_only) {
|
|
LLAMA_LOG_DEBUG("%s: enumerating backends\n", __func__);
|
|
|
|
backend_buft.clear();
|
|
backend_ptrs.clear();
|
|
|
|
for (auto & backend : backends) {
|
|
auto * buft = ggml_backend_get_default_buffer_type(backend.get());
|
|
auto backend_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get()));
|
|
|
|
if (backend_type == GGML_BACKEND_DEVICE_TYPE_CPU && !model.devices.empty()) {
|
|
// use the host buffer of the first device CPU for faster transfer of the intermediate state
|
|
auto * dev = model.devices[0];
|
|
auto * host_buft = ggml_backend_dev_host_buffer_type(dev);
|
|
if (host_buft) {
|
|
buft = host_buft;
|
|
}
|
|
}
|
|
|
|
backend_buft.push_back(buft);
|
|
backend_ptrs.push_back(backend.get());
|
|
}
|
|
|
|
LLAMA_LOG_DEBUG("%s: backend_ptrs.size() = %zu\n", __func__, backend_ptrs.size());
|
|
|
|
const size_t max_nodes = this->graph_max_nodes();
|
|
|
|
LLAMA_LOG_DEBUG("%s: max_nodes = %zu\n", __func__, max_nodes);
|
|
|
|
gf_res_prev.reset(new llm_graph_result(max_nodes));
|
|
gf_res_reserve.reset(new llm_graph_result(max_nodes));
|
|
|
|
// TODO: move these checks to ggml_backend_sched
|
|
// enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary
|
|
bool pipeline_parallel =
|
|
model.n_devices() > 1 &&
|
|
model.params.n_gpu_layers > (int) model.hparams.n_layer &&
|
|
model.params.split_mode == LLAMA_SPLIT_MODE_LAYER &&
|
|
cparams.offload_kqv &&
|
|
!model.has_tensor_overrides();
|
|
|
|
// pipeline parallelism requires support for async compute and events in all devices
|
|
if (pipeline_parallel) {
|
|
for (auto & backend : backends) {
|
|
auto dev_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get()));
|
|
if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU) {
|
|
// ignore CPU backend
|
|
continue;
|
|
}
|
|
auto * dev = ggml_backend_get_device(backend.get());
|
|
ggml_backend_dev_props props;
|
|
ggml_backend_dev_get_props(dev, &props);
|
|
if (!props.caps.async || !props.caps.events) {
|
|
// device does not support async compute or events
|
|
pipeline_parallel = false;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, pipeline_parallel, cparams.op_offload));
|
|
|
|
if (pipeline_parallel) {
|
|
LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(sched.get()));
|
|
}
|
|
}
|
|
|
|
if (!hparams.vocab_only) {
|
|
llama_memory_context_ptr mctx;
|
|
if (memory) {
|
|
LLAMA_LOG_DEBUG("%s: reserving full memory module\n", __func__);
|
|
mctx = memory->init_full();
|
|
if (!mctx) {
|
|
throw std::runtime_error("failed to initialize memory module");
|
|
}
|
|
}
|
|
|
|
cross.v_embd.clear();
|
|
|
|
const uint32_t n_seqs = cparams.kv_unified ? 1 : cparams.n_seq_max;
|
|
const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
|
|
|
|
// avoid reserving graphs with zero outputs - assume one output per sequence
|
|
n_outputs = n_seqs;
|
|
|
|
LLAMA_LOG_DEBUG("%s: worst-case: n_tokens = %d, n_seqs = %d, n_outputs = %d\n", __func__, n_tokens, n_seqs, n_outputs);
|
|
|
|
// resolve automatic Flash Attention use
|
|
if (params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_AUTO) {
|
|
auto * gf = graph_reserve(1, n_seqs, n_outputs, mctx.get(), true);
|
|
if (!gf) {
|
|
throw std::runtime_error("failed to split graph for Flash Attention check");
|
|
}
|
|
|
|
const size_t prefix_len = strlen(LLAMA_TENSOR_NAME_FATTN) + 1;
|
|
bool fa_device_mismatch = false;
|
|
for (int i = 0; i < ggml_graph_n_nodes(gf); i++) {
|
|
ggml_tensor * n = ggml_graph_node(gf, i);
|
|
if (n->op != GGML_OP_FLASH_ATTN_EXT) {
|
|
continue;
|
|
}
|
|
ggml_backend_dev_t device_fa = ggml_backend_get_device(
|
|
ggml_backend_sched_get_tensor_backend(sched.get(), n));
|
|
|
|
// TODO: instead of the tensor names, use a map to keep track of which (FA) tensors belong to which layer
|
|
GGML_ASSERT(strncmp(n->name, LLAMA_TENSOR_NAME_FATTN "-", prefix_len) == 0);
|
|
const int il = std::stoi(n->name + prefix_len);
|
|
ggml_backend_dev_t device_kv = model.dev_layer(il);
|
|
if (device_fa != device_kv) {
|
|
LLAMA_LOG_WARN("%s: layer %d is assigned to device %s but the Flash Attention tensor "
|
|
"is assigned to device %s (usually due to missing support)\n",
|
|
__func__, il, ggml_backend_dev_name(device_kv), ggml_backend_dev_name(device_fa));
|
|
// FIXME: fa_device_mismatch logic is wrong for --no-kv-offload, but this is broken anyways
|
|
fa_device_mismatch = true;
|
|
break;
|
|
}
|
|
}
|
|
if (fa_device_mismatch) {
|
|
cparams.flash_attn = false;
|
|
LLAMA_LOG_WARN("%s: Flash Attention was auto, set to disabled\n", __func__);
|
|
if (ggml_is_quantized(params.type_v)) {
|
|
throw std::runtime_error("quantized V cache was requested, but this requires Flash Attention");
|
|
}
|
|
} else {
|
|
cparams.flash_attn = true;
|
|
LLAMA_LOG_INFO("%s: Flash Attention was auto, set to enabled\n", __func__);
|
|
}
|
|
}
|
|
|
|
// reserve worst-case graph
|
|
int n_splits_pp = -1;
|
|
int n_nodes_pp = -1;
|
|
|
|
int n_splits_tg = -1;
|
|
int n_nodes_tg = -1;
|
|
|
|
// reserve pp (prompt processing) graph first so that buffers are only allocated once
|
|
{
|
|
auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get());
|
|
if (!gf) {
|
|
throw std::runtime_error("failed to allocate compute pp buffers");
|
|
}
|
|
|
|
n_splits_pp = ggml_backend_sched_get_n_splits(sched.get());
|
|
n_nodes_pp = ggml_graph_n_nodes(gf);
|
|
}
|
|
|
|
// reserve with tg (token generation) graph to get the number of splits and nodes
|
|
{
|
|
auto * gf = graph_reserve(n_seqs, n_seqs, n_seqs, mctx.get());
|
|
if (!gf) {
|
|
throw std::runtime_error("failed to allocate compute tg buffers");
|
|
}
|
|
|
|
n_splits_tg = ggml_backend_sched_get_n_splits(sched.get());
|
|
n_nodes_tg = ggml_graph_n_nodes(gf);
|
|
}
|
|
|
|
// reserve again with pp graph to avoid ggml-alloc reallocations during inference
|
|
{
|
|
// TODO: not sure if the following graph would be worster case for multi-stream KV caches:
|
|
//
|
|
// auto * gf = graph_reserve(n_tokens, 1, n_tokens, mctx.get());
|
|
//
|
|
auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get());
|
|
if (!gf) {
|
|
throw std::runtime_error("failed to allocate compute pp buffers");
|
|
}
|
|
}
|
|
|
|
for (size_t i = 0; i < backend_ptrs.size(); ++i) {
|
|
ggml_backend_t backend = backend_ptrs[i];
|
|
ggml_backend_buffer_type_t buft = backend_buft[i];
|
|
size_t size = ggml_backend_sched_get_buffer_size(sched.get(), backend);
|
|
if (size > 1) {
|
|
LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
|
|
ggml_backend_buft_name(buft),
|
|
size / 1024.0 / 1024.0);
|
|
}
|
|
}
|
|
|
|
if (n_nodes_pp == n_nodes_tg) {
|
|
LLAMA_LOG_INFO("%s: graph nodes = %d\n", __func__, n_nodes_pp);
|
|
} else {
|
|
LLAMA_LOG_INFO("%s: graph nodes = %d (with bs=%d), %d (with bs=1)\n", __func__, n_nodes_pp, n_tokens, n_nodes_tg);
|
|
}
|
|
|
|
if (n_splits_pp == n_splits_tg) {
|
|
LLAMA_LOG_INFO("%s: graph splits = %d\n", __func__, n_splits_pp);
|
|
} else {
|
|
LLAMA_LOG_INFO("%s: graph splits = %d (with bs=%d), %d (with bs=1)\n", __func__, n_splits_pp, n_tokens, n_splits_tg);
|
|
}
|
|
}
|
|
}
|
|
|
|
llama_context::~llama_context() {
|
|
ggml_opt_free(opt_ctx);
|
|
}
|
|
|
|
void llama_context::synchronize() {
|
|
ggml_backend_sched_synchronize(sched.get());
|
|
|
|
// FIXME: if multiple single tokens are evaluated without a synchronization,
|
|
// the stats will be added to the prompt evaluation stats
|
|
// this should only happen when using batch size 1 to evaluate a batch
|
|
|
|
// add the evaluation to the stats
|
|
if (n_queued_tokens == 1) {
|
|
if (!cparams.no_perf) {
|
|
t_eval_us += ggml_time_us() - t_compute_start_us;
|
|
}
|
|
n_eval++;
|
|
} else if (n_queued_tokens > 1) {
|
|
if (!cparams.no_perf) {
|
|
t_p_eval_us += ggml_time_us() - t_compute_start_us;
|
|
}
|
|
n_p_eval += n_queued_tokens;
|
|
}
|
|
|
|
// get a more accurate load time, upon first eval
|
|
if (n_queued_tokens > 0 && !has_evaluated_once) {
|
|
t_load_us = ggml_time_us() - t_start_us;
|
|
has_evaluated_once = true;
|
|
}
|
|
|
|
n_queued_tokens = 0;
|
|
t_compute_start_us = 0;
|
|
}
|
|
|
|
const llama_model & llama_context::get_model() const {
|
|
return model;
|
|
}
|
|
|
|
const llama_cparams & llama_context::get_cparams() const {
|
|
return cparams;
|
|
}
|
|
|
|
ggml_backend_sched_t llama_context::get_sched() const {
|
|
return sched.get();
|
|
}
|
|
|
|
uint32_t llama_context::n_ctx() const {
|
|
return cparams.n_ctx;
|
|
}
|
|
|
|
uint32_t llama_context::n_ctx_per_seq() const {
|
|
return cparams.n_ctx / cparams.n_seq_max;
|
|
}
|
|
|
|
uint32_t llama_context::n_batch() const {
|
|
return cparams.n_batch;
|
|
}
|
|
|
|
uint32_t llama_context::n_ubatch() const {
|
|
return cparams.n_ubatch;
|
|
}
|
|
|
|
uint32_t llama_context::n_seq_max() const {
|
|
return cparams.n_seq_max;
|
|
}
|
|
|
|
uint32_t llama_context::n_threads() const {
|
|
return cparams.n_threads;
|
|
}
|
|
|
|
uint32_t llama_context::n_threads_batch() const {
|
|
return cparams.n_threads_batch;
|
|
}
|
|
|
|
llama_memory_t llama_context::get_memory() const {
|
|
return memory.get();
|
|
}
|
|
|
|
bool llama_context::memory_update(bool optimize) {
|
|
if (!memory) {
|
|
return false;
|
|
}
|
|
|
|
{
|
|
const auto mctx = memory->init_update(this, optimize);
|
|
switch (mctx->get_status()) {
|
|
case LLAMA_MEMORY_STATUS_SUCCESS:
|
|
{
|
|
// noop
|
|
} break;
|
|
case LLAMA_MEMORY_STATUS_NO_UPDATE:
|
|
{
|
|
// no updates need to be performed
|
|
return false;
|
|
}
|
|
case LLAMA_MEMORY_STATUS_FAILED_PREPARE:
|
|
case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
|
|
{
|
|
LLAMA_LOG_ERROR("%s: failed to prepare memory update\n", __func__);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// reset the previous graph result to make sure that it won't be reused
|
|
// TODO: change the mctx->apply() to return information if a graph reserve is needed
|
|
// reset the graph result only if the memory module did reset the scheduler
|
|
gf_res_prev->reset();
|
|
|
|
if (!mctx->apply()) {
|
|
LLAMA_LOG_ERROR("%s: failed to apply memory update\n", __func__);
|
|
}
|
|
}
|
|
|
|
// if the memory module did any computation, we have to reserve a new worst-case graph
|
|
{
|
|
const auto mctx = memory->init_full();
|
|
if (!mctx) {
|
|
throw std::runtime_error("failed to initialize memory context");
|
|
}
|
|
|
|
const uint32_t n_seqs = cparams.kv_unified ? 1 : cparams.n_seq_max;
|
|
const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
|
|
|
|
auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get());
|
|
if (!gf) {
|
|
LLAMA_LOG_ERROR("%s: failed to reserve graph after the memory update\n", __func__);
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
enum llama_pooling_type llama_context::pooling_type() const {
|
|
return cparams.pooling_type;
|
|
}
|
|
|
|
float * llama_context::get_logits() {
|
|
output_reorder();
|
|
|
|
return logits;
|
|
}
|
|
|
|
float * llama_context::get_logits_ith(int32_t i) {
|
|
int64_t j = -1;
|
|
|
|
output_reorder();
|
|
|
|
try {
|
|
if (logits == nullptr) {
|
|
throw std::runtime_error("no logits");
|
|
}
|
|
|
|
if (i < 0) {
|
|
j = n_outputs + i;
|
|
if (j < 0) {
|
|
throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs));
|
|
}
|
|
} else if ((size_t) i >= output_ids.size()) {
|
|
throw std::runtime_error(format("out of range [0, %zu)", output_ids.size()));
|
|
} else {
|
|
j = output_ids[i];
|
|
}
|
|
|
|
if (j < 0) {
|
|
throw std::runtime_error(format("batch.logits[%d] != true", i));
|
|
}
|
|
if (j >= n_outputs) {
|
|
// This should not happen
|
|
throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs));
|
|
}
|
|
|
|
return logits + j*model.vocab.n_tokens();
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what());
|
|
#ifndef NDEBUG
|
|
GGML_ABORT("fatal error");
|
|
#else
|
|
return nullptr;
|
|
#endif
|
|
}
|
|
}
|
|
|
|
float * llama_context::get_embeddings() {
|
|
output_reorder();
|
|
|
|
return embd;
|
|
}
|
|
|
|
float * llama_context::get_embeddings_ith(int32_t i) {
|
|
int64_t j = -1;
|
|
|
|
output_reorder();
|
|
|
|
try {
|
|
if (embd == nullptr) {
|
|
throw std::runtime_error("no embeddings");
|
|
}
|
|
|
|
if (i < 0) {
|
|
j = n_outputs + i;
|
|
if (j < 0) {
|
|
throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs));
|
|
}
|
|
} else if ((size_t) i >= output_ids.size()) {
|
|
throw std::runtime_error(format("out of range [0, %zu)", output_ids.size()));
|
|
} else {
|
|
j = output_ids[i];
|
|
}
|
|
|
|
if (j < 0) {
|
|
throw std::runtime_error(format("batch.logits[%d] != true", i));
|
|
}
|
|
if (j >= n_outputs) {
|
|
// This should not happen
|
|
throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs));
|
|
}
|
|
|
|
return embd + j*model.hparams.n_embd;
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what());
|
|
#ifndef NDEBUG
|
|
GGML_ABORT("fatal error");
|
|
#else
|
|
return nullptr;
|
|
#endif
|
|
}
|
|
}
|
|
|
|
float * llama_context::get_embeddings_seq(llama_seq_id seq_id) {
|
|
auto it = embd_seq.find(seq_id);
|
|
if (it == embd_seq.end()) {
|
|
return nullptr;
|
|
}
|
|
|
|
return it->second.data();
|
|
}
|
|
|
|
void llama_context::attach_threadpool(
|
|
ggml_threadpool_t threadpool,
|
|
ggml_threadpool_t threadpool_batch) {
|
|
LLAMA_LOG_DEBUG("%s: call\n", __func__);
|
|
|
|
this->threadpool = threadpool;
|
|
this->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool;
|
|
}
|
|
|
|
void llama_context::detach_threadpool() {
|
|
LLAMA_LOG_DEBUG("%s: call\n", __func__);
|
|
|
|
this->threadpool = nullptr;
|
|
this->threadpool_batch = nullptr;
|
|
}
|
|
|
|
void llama_context::set_n_threads(int32_t n_threads, int32_t n_threads_batch) {
|
|
LLAMA_LOG_DEBUG("%s: n_threads = %d, n_threads_batch = %d\n", __func__, n_threads, n_threads_batch);
|
|
|
|
cparams.n_threads = n_threads;
|
|
cparams.n_threads_batch = n_threads_batch;
|
|
}
|
|
|
|
void llama_context::set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data) {
|
|
LLAMA_LOG_DEBUG("%s: call\n", __func__);
|
|
|
|
this->abort_callback = abort_callback;
|
|
this->abort_callback_data = abort_callback_data;
|
|
|
|
for (auto & backend : backends) {
|
|
auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get()));
|
|
auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback");
|
|
if (set_abort_callback_fn) {
|
|
set_abort_callback_fn(backend.get(), this->abort_callback, this->abort_callback_data);
|
|
}
|
|
}
|
|
}
|
|
|
|
void llama_context::set_embeddings(bool value) {
|
|
LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value);
|
|
|
|
cparams.embeddings = value;
|
|
}
|
|
|
|
void llama_context::set_causal_attn(bool value) {
|
|
LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value);
|
|
|
|
cparams.causal_attn = value;
|
|
}
|
|
|
|
void llama_context::set_warmup(bool value) {
|
|
LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value);
|
|
|
|
cparams.warmup = value;
|
|
}
|
|
|
|
void llama_context::set_adapter_lora(
|
|
llama_adapter_lora * adapter,
|
|
float scale) {
|
|
LLAMA_LOG_DEBUG("%s: adapter = %p, scale = %f\n", __func__, (void *) adapter, scale);
|
|
|
|
loras[adapter] = scale;
|
|
}
|
|
|
|
bool llama_context::rm_adapter_lora(
|
|
llama_adapter_lora * adapter) {
|
|
LLAMA_LOG_DEBUG("%s: adapter = %p\n", __func__, (void *) adapter);
|
|
|
|
auto pos = loras.find(adapter);
|
|
if (pos != loras.end()) {
|
|
loras.erase(pos);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
void llama_context::clear_adapter_lora() {
|
|
LLAMA_LOG_DEBUG("%s: call\n", __func__);
|
|
|
|
loras.clear();
|
|
}
|
|
|
|
bool llama_context::apply_adapter_cvec(
|
|
const float * data,
|
|
size_t len,
|
|
int32_t n_embd,
|
|
int32_t il_start,
|
|
int32_t il_end) {
|
|
LLAMA_LOG_DEBUG("%s: il_start = %d, il_end = %d\n", __func__, il_start, il_end);
|
|
|
|
return cvec.apply(model, data, len, n_embd, il_start, il_end);
|
|
}
|
|
|
|
llm_graph_result * llama_context::process_ubatch(const llama_ubatch & ubatch, llm_graph_type gtype, llama_memory_context_i * mctx, ggml_status & ret) {
|
|
if (mctx && !mctx->apply()) {
|
|
LLAMA_LOG_ERROR("%s: failed to apply memory context\n", __func__);
|
|
ret = GGML_STATUS_FAILED;
|
|
return nullptr;
|
|
}
|
|
|
|
auto * res = gf_res_prev.get();
|
|
auto * gf = res->get_gf();
|
|
|
|
// the new graph parameters
|
|
// in order to correctly reuse a graph, it's full topology has to be uniquely determined by these parameters
|
|
const auto gparams = graph_params(res, ubatch, mctx, gtype);
|
|
|
|
if (!graph_reuse_disable && res->can_reuse(gparams)) {
|
|
//LLAMA_LOG_DEBUG("%s: reusing previous graph\n", __func__);
|
|
|
|
n_reused++;
|
|
} else {
|
|
res->reset();
|
|
|
|
ggml_backend_sched_reset(sched.get());
|
|
ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data);
|
|
|
|
//const auto t_start_us = ggml_time_us();
|
|
|
|
gf = model.build_graph(gparams);
|
|
|
|
//LLAMA_LOG_INFO("graph build time: %.3f ms\n", (ggml_time_us() - t_start_us)/1000.0);
|
|
|
|
if (!gf) {
|
|
LLAMA_LOG_ERROR("%s: failed to initialize graph\n", __func__);
|
|
ret = GGML_STATUS_FAILED;
|
|
return nullptr;
|
|
}
|
|
|
|
if (!ggml_backend_sched_alloc_graph(sched.get(), gf)) {
|
|
LLAMA_LOG_ERROR("%s: failed to allocate graph\n", __func__);
|
|
ret = GGML_STATUS_ALLOC_FAILED;
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
// set the input data for the input tensors
|
|
{
|
|
//const auto t_start_us = ggml_time_us();
|
|
|
|
res->set_inputs(&ubatch);
|
|
|
|
//LLAMA_LOG_INFO("graph set inputs time: %.3f ms\n", (ggml_time_us() - t_start_us)/1000.0);
|
|
}
|
|
|
|
const auto status = graph_compute(res->get_gf(), ubatch.n_tokens > 1);
|
|
if (status != GGML_STATUS_SUCCESS) {
|
|
LLAMA_LOG_ERROR("%s: failed to compute graph, compute status: %d\n", __func__, status);
|
|
ret = status;
|
|
return nullptr;
|
|
}
|
|
|
|
ret = GGML_STATUS_SUCCESS;
|
|
|
|
return res;
|
|
}
|
|
|
|
int llama_context::encode(const llama_batch & batch_inp) {
|
|
GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT
|
|
|
|
if (batch_inp.n_tokens == 0) {
|
|
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
|
|
return -1;
|
|
}
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
const int64_t n_embd = hparams.n_embd;
|
|
const int64_t n_vocab = model.vocab.n_tokens();
|
|
|
|
// note: during encode, we always pass the full sequence starting from pos = 0
|
|
if (!balloc->init(batch_inp, model.vocab, nullptr, n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) {
|
|
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
|
|
return -1;
|
|
}
|
|
|
|
const uint32_t n_tokens = balloc->get_n_tokens();
|
|
|
|
// [TAG_NO_CACHE_PAD]
|
|
// TODO: add new split mode where we pad the input sequences so that ubatch.equal_seqs == true
|
|
const llama_ubatch ubatch = balloc->split_simple(n_tokens);
|
|
|
|
// micro-batching is not possible for non-causal encoding, so we process the batch in a single shot
|
|
GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens");
|
|
|
|
if (t_compute_start_us == 0) {
|
|
t_compute_start_us = ggml_time_us();
|
|
}
|
|
|
|
// TODO: this clear of the buffer can easily be forgotten - need something better
|
|
embd_seq.clear();
|
|
|
|
n_queued_tokens += n_tokens;
|
|
|
|
// reserve output buffer
|
|
if (output_reserve(n_tokens) < n_tokens) {
|
|
LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens);
|
|
return -2;
|
|
};
|
|
|
|
for (uint32_t i = 0; i < n_tokens; ++i) {
|
|
output_ids[i] = i;
|
|
}
|
|
|
|
n_outputs = n_tokens;
|
|
|
|
const auto causal_attn_org = cparams.causal_attn;
|
|
|
|
// always use non-causal attention for encoder graphs
|
|
// TODO: this is a tmp solution until we have a proper way to support enc-dec models
|
|
// ref: https://github.com/ggml-org/llama.cpp/pull/12181#issuecomment-2730451223
|
|
cparams.causal_attn = false;
|
|
|
|
ggml_status status;
|
|
const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_ENCODER, nullptr, status);
|
|
|
|
cparams.causal_attn = causal_attn_org;
|
|
|
|
if (!res) {
|
|
switch (status) {
|
|
case GGML_STATUS_ABORTED: return 2;
|
|
case GGML_STATUS_ALLOC_FAILED: return -2;
|
|
case GGML_STATUS_FAILED: return -3;
|
|
case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen");
|
|
}
|
|
}
|
|
|
|
auto * t_logits = res->get_logits();
|
|
auto * t_embd = res->get_embd_pooled() ? res->get_embd_pooled() : res->get_embd();
|
|
|
|
// extract logits
|
|
if (logits && t_logits) {
|
|
ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits);
|
|
GGML_ASSERT(backend_res != nullptr);
|
|
GGML_ASSERT(logits != nullptr);
|
|
|
|
ggml_backend_tensor_get_async(backend_res, t_logits, logits, 0, n_tokens*n_vocab*sizeof(float));
|
|
}
|
|
|
|
// extract embeddings
|
|
if (embd && t_embd) {
|
|
ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd);
|
|
GGML_ASSERT(backend_embd != nullptr);
|
|
|
|
switch (cparams.pooling_type) {
|
|
case LLAMA_POOLING_TYPE_NONE:
|
|
{
|
|
// extract token embeddings
|
|
GGML_ASSERT(embd != nullptr);
|
|
|
|
GGML_ASSERT(n_tokens*n_embd <= (int64_t) embd_size);
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd, 0, n_tokens*n_embd*sizeof(float));
|
|
} break;
|
|
case LLAMA_POOLING_TYPE_MEAN:
|
|
case LLAMA_POOLING_TYPE_CLS:
|
|
case LLAMA_POOLING_TYPE_LAST:
|
|
{
|
|
// extract sequence embeddings
|
|
auto & embd_seq_out = embd_seq;
|
|
|
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
|
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s];
|
|
const int32_t seq_idx = ubatch.seq_idx[seq_id];
|
|
|
|
embd_seq_out[seq_id].resize(n_embd);
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float));
|
|
}
|
|
} break;
|
|
case LLAMA_POOLING_TYPE_RANK:
|
|
{
|
|
// extract the rerank score - n_cls_out floats per sequence
|
|
auto & embd_seq_out = embd_seq;
|
|
|
|
const uint32_t n_cls_out = hparams.n_cls_out;
|
|
|
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
|
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s];
|
|
const int32_t seq_idx = ubatch.seq_idx[seq_id];
|
|
|
|
embd_seq_out[seq_id].resize(n_cls_out);
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float));
|
|
}
|
|
} break;
|
|
case LLAMA_POOLING_TYPE_UNSPECIFIED:
|
|
{
|
|
GGML_ABORT("unknown pooling type");
|
|
}
|
|
}
|
|
}
|
|
|
|
// TODO: hacky solution
|
|
if (model.arch == LLM_ARCH_T5 && t_embd) {
|
|
//cross.t_embd = t_embd;
|
|
|
|
synchronize();
|
|
|
|
cross.n_embd = t_embd->ne[0];
|
|
cross.n_enc = t_embd->ne[1];
|
|
cross.v_embd.resize(cross.n_embd*cross.n_enc);
|
|
memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd));
|
|
|
|
const auto & batch = balloc->get_batch();
|
|
|
|
// remember the sequence ids used during the encoding - needed for cross attention later
|
|
cross.seq_ids_enc.resize(n_tokens);
|
|
for (uint32_t i = 0; i < n_tokens; i++) {
|
|
cross.seq_ids_enc[i].clear();
|
|
|
|
for (int s = 0; s < batch.n_seq_id[i]; s++) {
|
|
const llama_seq_id seq_id = batch.seq_id[i][s];
|
|
|
|
cross.seq_ids_enc[i].insert(seq_id);
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int llama_context::decode(const llama_batch & batch_inp) {
|
|
GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT
|
|
|
|
if (!memory) {
|
|
LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__);
|
|
return encode(batch_inp);
|
|
}
|
|
|
|
if (batch_inp.n_tokens == 0) {
|
|
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
|
|
return -1;
|
|
}
|
|
|
|
const auto & vocab = model.vocab;
|
|
const auto & hparams = model.hparams;
|
|
|
|
const int64_t n_vocab = vocab.n_tokens();
|
|
const int64_t n_embd = hparams.n_embd;
|
|
|
|
const bool output_all = false;
|
|
|
|
if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, output_all)) {
|
|
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
|
|
return -1;
|
|
}
|
|
|
|
const uint32_t n_tokens_all = balloc->get_n_tokens();
|
|
const uint32_t n_outputs_all = balloc->get_n_outputs();
|
|
|
|
if (output_all) {
|
|
// require that all tokens are output
|
|
if (n_outputs_all != n_tokens_all) {
|
|
LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n",
|
|
__func__, n_outputs_all, n_tokens_all);
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
GGML_ASSERT(n_tokens_all <= cparams.n_batch);
|
|
|
|
GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
|
|
|
|
if (t_compute_start_us == 0) {
|
|
t_compute_start_us = ggml_time_us();
|
|
}
|
|
n_queued_tokens += n_tokens_all;
|
|
|
|
// TODO: this clear of the buffer can easily be forgotten - need something better
|
|
embd_seq.clear();
|
|
output_swaps.clear();
|
|
|
|
bool did_optimize = false;
|
|
|
|
// handle any pending shifts/copies
|
|
memory_update(false);
|
|
|
|
llama_memory_context_ptr mctx;
|
|
|
|
while (true) {
|
|
mctx = memory->init_batch(*balloc, cparams.n_ubatch, output_all);
|
|
if (!mctx) {
|
|
return -2;
|
|
}
|
|
|
|
switch (mctx->get_status()) {
|
|
case LLAMA_MEMORY_STATUS_SUCCESS:
|
|
{
|
|
} break;
|
|
case LLAMA_MEMORY_STATUS_NO_UPDATE:
|
|
{
|
|
LLAMA_LOG_ERROR("%s: unexpected memory context status: %d\n", __func__, mctx->get_status());
|
|
|
|
return -2;
|
|
}
|
|
case LLAMA_MEMORY_STATUS_FAILED_PREPARE:
|
|
{
|
|
if (!did_optimize) {
|
|
did_optimize = true;
|
|
|
|
if (memory_update(true)) {
|
|
LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, balloc->get_n_tokens());
|
|
|
|
continue;
|
|
}
|
|
}
|
|
|
|
LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, balloc->get_n_tokens());
|
|
|
|
return 1;
|
|
}
|
|
case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
|
|
{
|
|
LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, balloc->get_n_tokens());
|
|
|
|
return -2;
|
|
}
|
|
}
|
|
|
|
break;
|
|
}
|
|
|
|
// reserve output buffer
|
|
if (output_reserve(n_outputs_all) < n_outputs_all) {
|
|
LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all);
|
|
return -2;
|
|
};
|
|
|
|
int64_t n_outputs_prev = 0;
|
|
|
|
do {
|
|
const auto & ubatch = mctx->get_ubatch();
|
|
|
|
// count the outputs in this ubatch
|
|
{
|
|
int32_t n_outputs_new = 0;
|
|
|
|
if (n_outputs_all == n_tokens_all) {
|
|
n_outputs_new = ubatch.n_tokens;
|
|
} else {
|
|
for (uint32_t i = 0; i < ubatch.n_tokens; i++) {
|
|
n_outputs_new += (int32_t) (ubatch.output[i] != 0);
|
|
}
|
|
}
|
|
|
|
// needs to happen before the graph is built
|
|
n_outputs = n_outputs_new;
|
|
}
|
|
|
|
ggml_status status;
|
|
const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, mctx.get(), status);
|
|
|
|
if (!res) {
|
|
// the last ubatch failed or was aborted -> remove all positions of that ubatch from the memory module
|
|
llama_pos pos_min[LLAMA_MAX_SEQ];
|
|
for (int s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
|
pos_min[s] = std::numeric_limits<llama_pos>::max();
|
|
}
|
|
|
|
for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
|
|
const auto & seq_id = ubatch.seq_id[i][0];
|
|
|
|
pos_min[seq_id] = std::min(pos_min[seq_id], ubatch.pos[i]);
|
|
}
|
|
|
|
for (int s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
|
if (pos_min[s] == std::numeric_limits<llama_pos>::max()) {
|
|
continue;
|
|
}
|
|
|
|
LLAMA_LOG_WARN("%s: removing memory module entries for seq_id = %d, pos = [%d, +inf)\n", __func__, s, pos_min[s]);
|
|
|
|
memory->seq_rm(s, pos_min[s], -1);
|
|
}
|
|
|
|
switch (status) {
|
|
case GGML_STATUS_ABORTED: return 2;
|
|
case GGML_STATUS_ALLOC_FAILED: return -2;
|
|
case GGML_STATUS_FAILED: return -3;
|
|
case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen");
|
|
}
|
|
}
|
|
|
|
// plot the computation graph in dot format (for debugging purposes)
|
|
//if (n_past%100 == 0) {
|
|
// ggml_graph_dump_dot(gf, NULL, "llama.dot");
|
|
//}
|
|
|
|
auto * t_logits = res->get_logits();
|
|
auto * t_embd = cparams.embeddings ? res->get_embd() : nullptr;
|
|
|
|
if (t_embd && res->get_embd_pooled()) {
|
|
t_embd = res->get_embd_pooled();
|
|
}
|
|
|
|
// extract logits
|
|
if (t_logits && n_outputs > 0) {
|
|
ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits);
|
|
GGML_ASSERT(backend_res != nullptr);
|
|
GGML_ASSERT(logits != nullptr);
|
|
|
|
float * logits_out = logits + n_outputs_prev*n_vocab;
|
|
|
|
if (n_outputs) {
|
|
GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all);
|
|
GGML_ASSERT((n_outputs_prev + n_outputs)*n_vocab <= (int64_t) logits_size);
|
|
ggml_backend_tensor_get_async(backend_res, t_logits, logits_out, 0, n_outputs*n_vocab*sizeof(float));
|
|
}
|
|
}
|
|
|
|
// extract embeddings
|
|
if (t_embd && n_outputs > 0) {
|
|
ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd);
|
|
GGML_ASSERT(backend_embd != nullptr);
|
|
|
|
switch (cparams.pooling_type) {
|
|
case LLAMA_POOLING_TYPE_NONE:
|
|
{
|
|
// extract token embeddings
|
|
GGML_ASSERT(embd != nullptr);
|
|
float * embd_out = embd + n_outputs_prev*n_embd;
|
|
|
|
if (n_outputs) {
|
|
GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all);
|
|
GGML_ASSERT((n_outputs_prev + n_outputs)*n_embd <= (int64_t) embd_size);
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_out, 0, n_outputs*n_embd*sizeof(float));
|
|
}
|
|
} break;
|
|
case LLAMA_POOLING_TYPE_MEAN:
|
|
case LLAMA_POOLING_TYPE_CLS:
|
|
case LLAMA_POOLING_TYPE_LAST:
|
|
{
|
|
// extract sequence embeddings (cleared before processing each batch)
|
|
auto & embd_seq_out = embd_seq;
|
|
|
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
|
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s];
|
|
const int32_t seq_idx = ubatch.seq_idx[seq_id];
|
|
|
|
embd_seq_out[seq_id].resize(n_embd);
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float));
|
|
}
|
|
} break;
|
|
case LLAMA_POOLING_TYPE_RANK:
|
|
{
|
|
// extract the rerank score - n_cls_out floats per sequence
|
|
auto & embd_seq_out = embd_seq;
|
|
|
|
const uint32_t n_cls_out = hparams.n_cls_out;
|
|
|
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
|
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s];
|
|
const int32_t seq_idx = ubatch.seq_idx[seq_id];
|
|
|
|
embd_seq_out[seq_id].resize(n_cls_out);
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float));
|
|
}
|
|
} break;
|
|
case LLAMA_POOLING_TYPE_UNSPECIFIED:
|
|
{
|
|
GGML_ABORT("unknown pooling type");
|
|
}
|
|
}
|
|
}
|
|
|
|
n_outputs_prev += n_outputs;
|
|
} while (mctx->next());
|
|
|
|
// set to total number of outputs in the batch, for use in llama_get_logits_ith
|
|
n_outputs = n_outputs_all;
|
|
|
|
// set output mappings
|
|
if (n_outputs > 0) {
|
|
bool sorted_output = true;
|
|
|
|
auto & out_ids = balloc->get_out_ids();
|
|
|
|
GGML_ASSERT(out_ids.size() == (size_t) n_outputs);
|
|
|
|
for (int64_t i = 0; i < n_outputs; ++i) {
|
|
int64_t out_id = out_ids[i];
|
|
output_ids[out_id] = i;
|
|
if (out_id != i) {
|
|
sorted_output = false;
|
|
}
|
|
}
|
|
|
|
// make the outputs have the same order they had in the user-provided batch
|
|
// note: this is mostly relevant for recurrent models atm
|
|
if (!sorted_output) {
|
|
GGML_ASSERT((size_t) n_outputs == out_ids.size());
|
|
|
|
// TODO: is there something more efficient which also minimizes swaps?
|
|
// selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort)
|
|
for (uint32_t i = 0; i < n_outputs - 1; ++i) {
|
|
uint32_t j_min = i;
|
|
for (uint32_t j = i + 1; j < n_outputs; ++j) {
|
|
if (out_ids[j] < out_ids[j_min]) {
|
|
j_min = j;
|
|
}
|
|
}
|
|
if (j_min == i) {
|
|
continue;
|
|
}
|
|
std::swap(out_ids[i], out_ids[j_min]);
|
|
|
|
// remember the swaps and apply them lazily upon logits/embeddings access
|
|
output_swaps.push_back({ i, j_min });
|
|
}
|
|
|
|
std::fill(output_ids.begin(), output_ids.end(), -1);
|
|
|
|
for (uint32_t i = 0; i < n_outputs; ++i) {
|
|
output_ids[out_ids[i]] = i;
|
|
}
|
|
}
|
|
}
|
|
|
|
// wait for the computation to finish (automatically done when obtaining the model output)
|
|
//synchronize();
|
|
|
|
return 0;
|
|
}
|
|
|
|
//
|
|
// output
|
|
//
|
|
|
|
uint32_t llama_context::output_reserve(int32_t n_outputs) {
|
|
const auto & hparams = model.hparams;
|
|
const auto & vocab = model.vocab;
|
|
|
|
const int64_t n_outputs_max = std::max<int64_t>(n_outputs, n_seq_max());
|
|
|
|
const auto n_batch = cparams.n_batch;
|
|
const auto n_vocab = vocab.n_tokens();
|
|
const auto n_embd = hparams.n_embd;
|
|
|
|
bool has_logits = true;
|
|
bool has_embd = cparams.embeddings;
|
|
|
|
// TODO: hacky enc-dec support
|
|
if (model.arch == LLM_ARCH_T5) {
|
|
has_logits = true;
|
|
has_embd = true;
|
|
}
|
|
|
|
logits_size = has_logits ? n_vocab*n_outputs_max : 0;
|
|
embd_size = has_embd ? n_embd*n_outputs_max : 0;
|
|
|
|
if (output_ids.empty()) {
|
|
// init, never resized afterwards
|
|
output_ids.resize(n_batch);
|
|
}
|
|
|
|
const size_t prev_size = buf_output ? ggml_backend_buffer_get_size(buf_output.get()) : 0;
|
|
const size_t new_size = (logits_size + embd_size) * sizeof(float);
|
|
|
|
// alloc only when more than the current capacity is required
|
|
// TODO: also consider shrinking the buffer
|
|
if (!buf_output || prev_size < new_size) {
|
|
if (buf_output) {
|
|
#ifndef NDEBUG
|
|
// This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark)
|
|
LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
|
|
#endif
|
|
buf_output = nullptr;
|
|
logits = nullptr;
|
|
embd = nullptr;
|
|
}
|
|
|
|
auto * buft = ggml_backend_cpu_buffer_type();
|
|
// try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory
|
|
auto * output_dev = model.dev_output();
|
|
auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr;
|
|
if (output_dev_host_buft) {
|
|
buft = output_dev_host_buft;
|
|
}
|
|
buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size));
|
|
if (buf_output == nullptr) {
|
|
LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0));
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
float * output_base = (float *) ggml_backend_buffer_get_base(buf_output.get());
|
|
|
|
logits = has_logits ? output_base : nullptr;
|
|
embd = has_embd ? output_base + logits_size : nullptr;
|
|
|
|
// set all ids as invalid (negative)
|
|
std::fill(output_ids.begin(), output_ids.end(), -1);
|
|
|
|
this->n_outputs = 0;
|
|
|
|
return n_outputs_max;
|
|
}
|
|
|
|
void llama_context::output_reorder() {
|
|
const uint64_t n_vocab = model.vocab.n_tokens();
|
|
const uint64_t n_embd = model.hparams.n_embd;
|
|
|
|
for (size_t s = 0; s < output_swaps.size(); ++s) {
|
|
const uint64_t i0 = output_swaps[s].i0;
|
|
const uint64_t i1 = output_swaps[s].i1;
|
|
|
|
if (logits_size > 0) {
|
|
for (uint64_t k = 0; k < n_vocab; k++) {
|
|
std::swap(logits[i0*n_vocab + k], logits[i1*n_vocab + k]);
|
|
}
|
|
}
|
|
|
|
if (embd_size > 0) {
|
|
for (uint64_t k = 0; k < n_embd; k++) {
|
|
std::swap(embd[i0*n_embd + k], embd[i1*n_embd + k]);
|
|
}
|
|
}
|
|
}
|
|
|
|
output_swaps.clear();
|
|
}
|
|
|
|
//
|
|
// graph
|
|
//
|
|
|
|
uint32_t llama_context::graph_max_nodes() const {
|
|
return std::max<uint32_t>(1024u, 8u*model.n_tensors());
|
|
}
|
|
|
|
llm_graph_result * llama_context::get_gf_res_reserve() const {
|
|
return static_cast<llm_graph_result *>(gf_res_reserve.get());
|
|
}
|
|
|
|
ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only) {
|
|
LLAMA_LOG_DEBUG("%s: reserving a graph for ubatch with n_tokens = %4u, n_seqs = %2u, n_outputs = %4u\n", __func__, n_tokens, n_seqs, n_outputs);
|
|
GGML_ASSERT(n_outputs >= 1);
|
|
|
|
if (n_tokens % n_seqs != 0) {
|
|
n_tokens = ((n_tokens + (n_seqs - 1)) / n_seqs) * n_seqs; // round to next multiple of n_seqs
|
|
n_outputs = std::min(n_outputs, n_tokens);
|
|
|
|
LLAMA_LOG_DEBUG("%s: making n_tokens a multiple of n_seqs - n_tokens = %u, n_seqs = %u, n_outputs = %u\n", __func__, n_tokens, n_seqs, n_outputs);
|
|
}
|
|
|
|
ggml_backend_sched_reset(sched.get());
|
|
|
|
// when the scheduler is reset, we cannnot reuse the old graph, so we reset the previous graph result to prevent that
|
|
gf_res_prev->reset();
|
|
|
|
// store the n_outputs as it is, and restore it afterwards
|
|
// TODO: not sure if needed, might simplify in the future by removing this
|
|
const auto save_n_outputs = this->n_outputs;
|
|
|
|
this->n_outputs = n_outputs;
|
|
|
|
llama_batch_allocr balloc(model.hparams.n_pos_per_embd());
|
|
llama_ubatch ubatch = balloc.ubatch_reserve(n_tokens/n_seqs, n_seqs);
|
|
|
|
auto * res = gf_res_reserve.get();
|
|
|
|
const auto gparams = graph_params(res, ubatch, mctx, LLM_GRAPH_TYPE_DEFAULT);
|
|
|
|
res->reset();
|
|
|
|
auto * gf = model.build_graph(gparams);
|
|
|
|
this->n_outputs = save_n_outputs;
|
|
|
|
// initialize scheduler with the specified graph
|
|
if (split_only) {
|
|
ggml_backend_sched_split_graph(sched.get(), gf);
|
|
} else if (!ggml_backend_sched_reserve(sched.get(), gf)) {
|
|
LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
|
|
return nullptr;
|
|
}
|
|
|
|
return gf;
|
|
}
|
|
|
|
llm_graph_params llama_context::graph_params(
|
|
llm_graph_result * res,
|
|
const llama_ubatch & ubatch,
|
|
const llama_memory_context_i * mctx,
|
|
llm_graph_type gtype) const {
|
|
return {
|
|
/*.arch =*/ model.arch,
|
|
/*.hparams =*/ model.hparams,
|
|
/*.cparams =*/ cparams,
|
|
/*.ubatch =*/ ubatch,
|
|
/*.gtype =*/ gtype,
|
|
/*.sched =*/ sched.get(),
|
|
/*.backend_cpu =*/ backend_cpu,
|
|
/*.cvec =*/ &cvec,
|
|
/*.loras =*/ &loras,
|
|
/*.mctx =*/ mctx,
|
|
/*.cross =*/ &cross,
|
|
/*.n_outputs =*/ n_outputs,
|
|
/*.cb =*/ graph_get_cb(),
|
|
/*.res =*/ res,
|
|
};
|
|
}
|
|
|
|
ggml_status llama_context::graph_compute(
|
|
ggml_cgraph * gf,
|
|
bool batched) {
|
|
int n_threads = batched ? cparams.n_threads_batch : cparams.n_threads;
|
|
ggml_threadpool_t tp = batched ? threadpool_batch : threadpool;
|
|
|
|
if (backend_cpu != nullptr) {
|
|
auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend_cpu));
|
|
auto * set_threadpool_fn = (decltype(ggml_backend_cpu_set_threadpool) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool");
|
|
if (set_threadpool_fn) {
|
|
set_threadpool_fn(backend_cpu, tp);
|
|
}
|
|
}
|
|
|
|
// set the number of threads for all the backends
|
|
for (const auto & set_n_threads_fn : set_n_threads_fns) {
|
|
set_n_threads_fn.second(set_n_threads_fn.first, n_threads);
|
|
}
|
|
|
|
auto status = ggml_backend_sched_graph_compute_async(sched.get(), gf);
|
|
if (status != GGML_STATUS_SUCCESS) {
|
|
LLAMA_LOG_ERROR("%s: ggml_backend_sched_graph_compute_async failed with error %d\n", __func__, status);
|
|
}
|
|
|
|
// fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(sched));
|
|
|
|
return status;
|
|
}
|
|
|
|
llm_graph_cb llama_context::graph_get_cb() const {
|
|
return [&](const llama_ubatch & ubatch, ggml_tensor * cur, const char * name, int il) {
|
|
if (il >= 0) {
|
|
ggml_format_name(cur, "%s-%d", name, il);
|
|
} else {
|
|
ggml_set_name(cur, name);
|
|
}
|
|
|
|
if (!cparams.offload_kqv) {
|
|
if (strcmp(name, "kqv_merged_cont") == 0) {
|
|
// all nodes between the KV store and the attention output are run on the CPU
|
|
ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend_cpu);
|
|
}
|
|
}
|
|
|
|
// norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends
|
|
// FIXME: fix in ggml_backend_sched
|
|
const bool full_offload = model.params.n_gpu_layers > (int) model.hparams.n_layer;
|
|
if (ubatch.n_tokens < 32 || full_offload) {
|
|
if (il != -1 && strcmp(name, "norm") == 0) {
|
|
const auto & dev_layer = model.dev_layer(il);
|
|
for (const auto & backend : backends) {
|
|
if (ggml_backend_get_device(backend.get()) == dev_layer) {
|
|
if (ggml_backend_supports_op(backend.get(), cur)) {
|
|
ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend.get());
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
};
|
|
}
|
|
|
|
//
|
|
// state save/load
|
|
//
|
|
|
|
class llama_io_write_dummy : public llama_io_write_i {
|
|
public:
|
|
llama_io_write_dummy() = default;
|
|
|
|
void write(const void * /* src */, size_t size) override {
|
|
size_written += size;
|
|
}
|
|
|
|
void write_tensor(const ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override {
|
|
size_written += size;
|
|
}
|
|
|
|
size_t n_bytes() override {
|
|
return size_written;
|
|
}
|
|
|
|
private:
|
|
size_t size_written = 0;
|
|
};
|
|
|
|
class llama_io_write_buffer : public llama_io_write_i {
|
|
public:
|
|
llama_io_write_buffer(
|
|
uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
|
|
|
|
void write(const void * src, size_t size) override {
|
|
if (size > buf_size) {
|
|
throw std::runtime_error("unexpectedly reached end of buffer");
|
|
}
|
|
memcpy(ptr, src, size);
|
|
ptr += size;
|
|
size_written += size;
|
|
buf_size -= size;
|
|
}
|
|
|
|
void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override {
|
|
if (size > buf_size) {
|
|
throw std::runtime_error("unexpectedly reached end of buffer");
|
|
}
|
|
ggml_backend_tensor_get(tensor, ptr, offset, size);
|
|
ptr += size;
|
|
size_written += size;
|
|
buf_size -= size;
|
|
}
|
|
|
|
size_t n_bytes() override {
|
|
return size_written;
|
|
}
|
|
|
|
private:
|
|
uint8_t * ptr;
|
|
size_t buf_size = 0;
|
|
size_t size_written = 0;
|
|
};
|
|
|
|
class llama_io_read_buffer : public llama_io_read_i {
|
|
public:
|
|
llama_io_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
|
|
|
|
const uint8_t * read(size_t size) override {
|
|
const uint8_t * base_ptr = ptr;
|
|
if (size > buf_size) {
|
|
throw std::runtime_error("unexpectedly reached end of buffer");
|
|
}
|
|
ptr += size;
|
|
size_read += size;
|
|
buf_size -= size;
|
|
return base_ptr;
|
|
}
|
|
|
|
void read_to(void * dst, size_t size) override {
|
|
memcpy(dst, read(size), size);
|
|
}
|
|
|
|
size_t n_bytes() override {
|
|
return size_read;
|
|
}
|
|
|
|
private:
|
|
const uint8_t * ptr;
|
|
size_t buf_size = 0;
|
|
size_t size_read = 0;
|
|
};
|
|
|
|
class llama_io_write_file : public llama_io_write_i {
|
|
public:
|
|
llama_io_write_file(llama_file * f) : file(f) {}
|
|
|
|
void write(const void * src, size_t size) override {
|
|
file->write_raw(src, size);
|
|
size_written += size;
|
|
}
|
|
|
|
void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override {
|
|
temp_buffer.resize(size);
|
|
ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size);
|
|
write(temp_buffer.data(), temp_buffer.size());
|
|
}
|
|
|
|
size_t n_bytes() override {
|
|
return size_written;
|
|
}
|
|
|
|
private:
|
|
llama_file * file;
|
|
size_t size_written = 0;
|
|
std::vector<uint8_t> temp_buffer;
|
|
};
|
|
|
|
class llama_io_read_file : public llama_io_read_i {
|
|
public:
|
|
llama_io_read_file(llama_file * f) : file(f) {}
|
|
|
|
void read_to(void * dst, size_t size) override {
|
|
file->read_raw(dst, size);
|
|
size_read += size;
|
|
}
|
|
|
|
const uint8_t * read(size_t size) override {
|
|
temp_buffer.resize(size);
|
|
read_to(temp_buffer.data(), size);
|
|
return temp_buffer.data();
|
|
}
|
|
|
|
size_t n_bytes() override {
|
|
return size_read;
|
|
}
|
|
|
|
private:
|
|
llama_file * file;
|
|
size_t size_read = 0;
|
|
std::vector<uint8_t> temp_buffer;
|
|
};
|
|
|
|
size_t llama_context::state_get_size() {
|
|
llama_io_write_dummy io;
|
|
try {
|
|
return state_write_data(io);
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what());
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
size_t llama_context::state_get_data(uint8_t * dst, size_t size) {
|
|
llama_io_write_buffer io(dst, size);
|
|
try {
|
|
return state_write_data(io);
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what());
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
size_t llama_context::state_set_data(const uint8_t * src, size_t size) {
|
|
llama_io_read_buffer io(src, size);
|
|
try {
|
|
return state_read_data(io);
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what());
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
size_t llama_context::state_seq_get_size(llama_seq_id seq_id, llama_state_seq_flags flags) {
|
|
llama_io_write_dummy io;
|
|
try {
|
|
return state_seq_write_data(io, seq_id, flags);
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what());
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
size_t llama_context::state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size, llama_state_seq_flags flags) {
|
|
llama_io_write_buffer io(dst, size);
|
|
try {
|
|
return state_seq_write_data(io, seq_id, flags);
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what());
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
size_t llama_context::state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size, llama_state_seq_flags flags) {
|
|
llama_io_read_buffer io(src, size);
|
|
try {
|
|
return state_seq_read_data(io, seq_id, flags);
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what());
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
bool llama_context::state_load_file(const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
|
|
llama_file file(filepath, "rb");
|
|
|
|
// sanity checks
|
|
{
|
|
const uint32_t magic = file.read_u32();
|
|
const uint32_t version = file.read_u32();
|
|
|
|
if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
|
|
LLAMA_LOG_ERROR("%s: unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// load the prompt
|
|
{
|
|
const uint32_t n_token_count = file.read_u32();
|
|
|
|
if (n_token_count > n_token_capacity) {
|
|
LLAMA_LOG_ERROR("%s: token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
|
|
return false;
|
|
}
|
|
|
|
file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
|
|
*n_token_count_out = n_token_count;
|
|
}
|
|
|
|
// restore the context state
|
|
{
|
|
const size_t n_state_size_cur = file.size() - file.tell();
|
|
|
|
llama_io_read_file io( &file);
|
|
const size_t n_read = state_read_data(io);
|
|
|
|
if (n_read != n_state_size_cur) {
|
|
LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool llama_context::state_save_file(const char * filepath, const llama_token * tokens, size_t n_token_count) {
|
|
llama_file file(filepath, "wb");
|
|
|
|
file.write_u32(LLAMA_SESSION_MAGIC);
|
|
file.write_u32(LLAMA_SESSION_VERSION);
|
|
|
|
// save the prompt
|
|
file.write_u32((uint32_t) n_token_count);
|
|
file.write_raw(tokens, sizeof(llama_token) * n_token_count);
|
|
|
|
// save the context state using stream saving
|
|
llama_io_write_file io(&file);
|
|
state_write_data(io);
|
|
|
|
return true;
|
|
}
|
|
|
|
size_t llama_context::state_seq_load_file(llama_seq_id seq_id, const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
|
|
llama_file file(filepath, "rb");
|
|
|
|
// version checks
|
|
{
|
|
const uint32_t magic = file.read_u32();
|
|
const uint32_t version = file.read_u32();
|
|
|
|
if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) {
|
|
LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version);
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
// load the prompt
|
|
{
|
|
const uint32_t n_token_count = file.read_u32();
|
|
|
|
if (n_token_count > n_token_capacity) {
|
|
LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
|
|
return 0;
|
|
}
|
|
|
|
file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
|
|
*n_token_count_out = n_token_count;
|
|
}
|
|
|
|
// restore the context state
|
|
{
|
|
const size_t state_size = file.size() - file.tell();
|
|
llama_io_read_file io(&file);
|
|
const size_t nread = state_seq_read_data(io, seq_id, 0);
|
|
if (!nread) {
|
|
LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__);
|
|
return 0;
|
|
}
|
|
GGML_ASSERT(nread <= state_size);
|
|
GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell());
|
|
}
|
|
|
|
return file.tell();
|
|
}
|
|
|
|
size_t llama_context::state_seq_save_file(llama_seq_id seq_id, const char * filepath, const llama_token * tokens, size_t n_token_count) {
|
|
llama_file file(filepath, "wb");
|
|
|
|
file.write_u32(LLAMA_STATE_SEQ_MAGIC);
|
|
file.write_u32(LLAMA_STATE_SEQ_VERSION);
|
|
|
|
// save the prompt
|
|
file.write_u32((uint32_t) n_token_count);
|
|
file.write_raw(tokens, sizeof(llama_token) * n_token_count);
|
|
|
|
// save the context state using stream saving
|
|
llama_io_write_file io(&file);
|
|
state_seq_write_data(io, seq_id, 0);
|
|
|
|
const size_t res = file.tell();
|
|
GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + io.n_bytes());
|
|
|
|
return res;
|
|
}
|
|
|
|
size_t llama_context::state_write_data(llama_io_write_i & io) {
|
|
LLAMA_LOG_DEBUG("%s: writing state\n", __func__);
|
|
|
|
// write model info
|
|
{
|
|
LLAMA_LOG_DEBUG("%s: - writing model info\n", __func__);
|
|
|
|
const std::string arch_str = llm_arch_name(model.arch);
|
|
io.write_string(arch_str);
|
|
// TODO: add more model-specific info which should prevent loading the session file if not identical
|
|
}
|
|
|
|
// write output ids
|
|
{
|
|
LLAMA_LOG_DEBUG("%s: - writing output ids\n", __func__);
|
|
|
|
const auto n_outputs = this->n_outputs;
|
|
const auto & output_ids = this->output_ids;
|
|
|
|
std::vector<int32_t> w_output_pos;
|
|
|
|
w_output_pos.resize(n_outputs);
|
|
|
|
// build a more compact representation of the output ids
|
|
for (size_t i = 0; i < n_batch(); ++i) {
|
|
// map an output id to a position in the batch
|
|
int64_t pos = output_ids[i];
|
|
if (pos >= 0) {
|
|
GGML_ASSERT(pos < n_outputs);
|
|
w_output_pos[pos] = i;
|
|
}
|
|
}
|
|
|
|
io.write(&n_outputs, sizeof(n_outputs));
|
|
|
|
if (n_outputs) {
|
|
io.write(w_output_pos.data(), n_outputs * sizeof(int32_t));
|
|
}
|
|
}
|
|
|
|
// write logits
|
|
{
|
|
LLAMA_LOG_DEBUG("%s: - writing logits\n", __func__);
|
|
|
|
const uint64_t logits_size = std::min((uint64_t) this->logits_size, (uint64_t) n_outputs * model.vocab.n_tokens());
|
|
|
|
io.write(&logits_size, sizeof(logits_size));
|
|
|
|
if (logits_size) {
|
|
io.write(logits, logits_size * sizeof(float));
|
|
}
|
|
}
|
|
|
|
// write embeddings
|
|
{
|
|
LLAMA_LOG_DEBUG("%s: - writing embeddings\n", __func__);
|
|
|
|
const uint64_t embd_size = std::min((uint64_t) this->embd_size, (uint64_t) n_outputs * model.hparams.n_embd);
|
|
|
|
io.write(&embd_size, sizeof(embd_size));
|
|
|
|
if (embd_size) {
|
|
io.write(embd, embd_size * sizeof(float));
|
|
}
|
|
}
|
|
|
|
if (memory != nullptr) {
|
|
LLAMA_LOG_DEBUG("%s: - writing memory module\n", __func__);
|
|
memory->state_write(io);
|
|
}
|
|
|
|
return io.n_bytes();
|
|
}
|
|
|
|
size_t llama_context::state_read_data(llama_io_read_i & io) {
|
|
LLAMA_LOG_DEBUG("%s: reading state\n", __func__);
|
|
|
|
// read model info
|
|
{
|
|
LLAMA_LOG_DEBUG("%s: - reading model info\n", __func__);
|
|
|
|
const std::string cur_arch_str = llm_arch_name(model.arch);
|
|
|
|
std::string arch_str;
|
|
io.read_string(arch_str);
|
|
if (cur_arch_str != arch_str) {
|
|
throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str()));
|
|
}
|
|
// TODO: add more info which needs to be identical but which is not verified otherwise
|
|
}
|
|
|
|
// read output ids
|
|
{
|
|
LLAMA_LOG_DEBUG("%s: - reading output ids\n", __func__);
|
|
|
|
auto n_outputs = this->n_outputs;
|
|
io.read_to(&n_outputs, sizeof(n_outputs));
|
|
|
|
if (n_outputs > output_reserve(n_outputs)) {
|
|
throw std::runtime_error("could not reserve outputs");
|
|
}
|
|
|
|
std::vector<int32_t> output_pos;
|
|
|
|
if (n_outputs) {
|
|
output_pos.resize(n_outputs);
|
|
io.read_to(output_pos.data(), n_outputs * sizeof(int32_t));
|
|
|
|
for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) {
|
|
int32_t id = output_pos[i];
|
|
if ((uint32_t) id >= n_batch()) {
|
|
throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, n_batch()));
|
|
}
|
|
this->output_ids[id] = i;
|
|
}
|
|
|
|
this->n_outputs = n_outputs;
|
|
}
|
|
}
|
|
|
|
// read logits
|
|
{
|
|
LLAMA_LOG_DEBUG("%s: - reading logits\n", __func__);
|
|
|
|
uint64_t logits_size;
|
|
io.read_to(&logits_size, sizeof(logits_size));
|
|
|
|
if (this->logits_size < logits_size) {
|
|
throw std::runtime_error("logits buffer too small");
|
|
}
|
|
|
|
if (logits_size) {
|
|
io.read_to(this->logits, logits_size * sizeof(float));
|
|
}
|
|
}
|
|
|
|
// read embeddings
|
|
{
|
|
LLAMA_LOG_DEBUG("%s: - reading embeddings\n", __func__);
|
|
|
|
uint64_t embd_size;
|
|
io.read_to(&embd_size, sizeof(embd_size));
|
|
|
|
if (this->embd_size < embd_size) {
|
|
throw std::runtime_error("embeddings buffer too small");
|
|
}
|
|
|
|
if (embd_size) {
|
|
io.read_to(this->embd, embd_size * sizeof(float));
|
|
}
|
|
}
|
|
|
|
if (memory) {
|
|
LLAMA_LOG_DEBUG("%s: - reading memory module\n", __func__);
|
|
|
|
memory->state_read(io);
|
|
}
|
|
|
|
return io.n_bytes();
|
|
}
|
|
|
|
size_t llama_context::state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) {
|
|
GGML_UNUSED(seq_id);
|
|
|
|
if (memory) {
|
|
memory->state_write(io, seq_id, flags);
|
|
}
|
|
|
|
return io.n_bytes();
|
|
}
|
|
|
|
size_t llama_context::state_seq_read_data(llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) {
|
|
GGML_UNUSED(seq_id);
|
|
|
|
if (memory) {
|
|
memory->state_read(io, seq_id, flags);
|
|
}
|
|
|
|
return io.n_bytes();
|
|
}
|
|
|
|
//
|
|
// perf
|
|
//
|
|
|
|
llama_perf_context_data llama_context::perf_get_data() const {
|
|
llama_perf_context_data data = {};
|
|
|
|
data.t_start_ms = 1e-3 * t_start_us;
|
|
data.t_load_ms = 1e-3 * t_load_us;
|
|
data.t_p_eval_ms = 1e-3 * t_p_eval_us;
|
|
data.t_eval_ms = 1e-3 * t_eval_us;
|
|
data.n_p_eval = std::max(1, n_p_eval);
|
|
data.n_eval = std::max(1, n_eval);
|
|
data.n_reused = std::max(0, n_reused);
|
|
|
|
return data;
|
|
}
|
|
|
|
void llama_context::perf_reset() {
|
|
t_start_us = ggml_time_us();
|
|
t_eval_us = n_eval = 0;
|
|
t_p_eval_us = n_p_eval = 0;
|
|
n_reused = 0;
|
|
}
|
|
|
|
std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> llama_context::memory_breakdown() const {
|
|
std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> ret;
|
|
for (const auto & buft_size : model.memory_breakdown()) {
|
|
ret[buft_size.first].model += buft_size.second;
|
|
}
|
|
for (const auto & buft_size : memory->memory_breakdown()) {
|
|
ret[buft_size.first].context += buft_size.second;
|
|
}
|
|
for (const auto & backend_ptr : backends) {
|
|
ggml_backend_t backend = backend_ptr.get();
|
|
ret[ggml_backend_sched_get_buffer_type(sched.get(), backend)].compute += ggml_backend_sched_get_buffer_size(sched.get(), backend);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
//
|
|
// training
|
|
//
|
|
|
|
static void llama_set_param(struct ggml_tensor * tensor, llama_opt_param_filter param_filter, void * userdata) {
|
|
if (!tensor || tensor->type != GGML_TYPE_F32) {
|
|
return;
|
|
}
|
|
if (!param_filter(tensor, userdata)) {
|
|
return;
|
|
}
|
|
if (strcmp(tensor->name, "token_embd.weight") == 0) {
|
|
return; // FIXME
|
|
}
|
|
if (strcmp(tensor->name, "rope_freqs.weight") == 0) {
|
|
return; // FIXME
|
|
}
|
|
ggml_set_param(tensor);
|
|
}
|
|
|
|
void llama_context::opt_init(struct llama_model * model, struct llama_opt_params lopt_params) {
|
|
GGML_ASSERT(!opt_ctx);
|
|
model->hparams.n_ctx_train = lopt_params.n_ctx_train > 0 ? lopt_params.n_ctx_train : n_ctx();
|
|
const uint32_t n_batch = std::min(this->n_batch(), model->hparams.n_ctx_train);
|
|
const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch);
|
|
GGML_ASSERT(model->hparams.n_ctx_train % n_batch == 0);
|
|
GGML_ASSERT(n_batch % n_ubatch == 0);
|
|
|
|
ggml_opt_params opt_params = ggml_opt_default_params(sched.get(), GGML_OPT_LOSS_TYPE_CROSS_ENTROPY);
|
|
opt_params.opt_period = n_batch / n_ubatch;
|
|
opt_params.get_opt_pars = lopt_params.get_opt_pars;
|
|
opt_params.get_opt_pars_ud = lopt_params.get_opt_pars_ud;
|
|
opt_params.optimizer = lopt_params.optimizer_type;
|
|
opt_ctx = ggml_opt_init(opt_params);
|
|
|
|
llama_opt_param_filter param_filter = lopt_params.param_filter;
|
|
void * param_filter_ud = lopt_params.param_filter_ud;
|
|
|
|
//llama_set_param(model->tok_embd, param_filter, param_filter_ud); // FIXME
|
|
llama_set_param(model->type_embd, param_filter, param_filter_ud);
|
|
llama_set_param(model->pos_embd, param_filter, param_filter_ud);
|
|
llama_set_param(model->tok_norm, param_filter, param_filter_ud);
|
|
llama_set_param(model->tok_norm_b, param_filter, param_filter_ud);
|
|
llama_set_param(model->output_norm, param_filter, param_filter_ud);
|
|
llama_set_param(model->output_norm_b, param_filter, param_filter_ud);
|
|
llama_set_param(model->output, param_filter, param_filter_ud);
|
|
llama_set_param(model->output_b, param_filter, param_filter_ud);
|
|
llama_set_param(model->output_norm_enc, param_filter, param_filter_ud);
|
|
llama_set_param(model->cls, param_filter, param_filter_ud);
|
|
llama_set_param(model->cls_b, param_filter, param_filter_ud);
|
|
llama_set_param(model->cls_out, param_filter, param_filter_ud);
|
|
llama_set_param(model->cls_out_b, param_filter, param_filter_ud);
|
|
|
|
for (struct llama_layer & layer : model->layers) {
|
|
for (size_t i = 0; i < sizeof(layer)/sizeof(struct ggml_tensor *); ++i) {
|
|
llama_set_param(reinterpret_cast<struct ggml_tensor **>(&layer)[i], param_filter, param_filter_ud);
|
|
}
|
|
}
|
|
}
|
|
|
|
void llama_context::opt_epoch_iter(
|
|
ggml_opt_dataset_t dataset,
|
|
ggml_opt_result_t result,
|
|
const std::vector<llama_token> & tokens,
|
|
const std::vector<llama_token> & labels_sparse,
|
|
llama_batch & batch,
|
|
ggml_opt_epoch_callback callback,
|
|
bool train,
|
|
int64_t idata_in_loop,
|
|
int64_t ndata_in_loop,
|
|
int64_t t_loop_start) {
|
|
GGML_ASSERT(opt_ctx);
|
|
const uint32_t n_ctx = llama_model_n_ctx_train(&model);
|
|
const uint32_t n_batch = std::min(this->n_batch(), n_ctx);
|
|
const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch);
|
|
|
|
memory->clear(true);
|
|
|
|
for (uint32_t pos_ctx = 0; pos_ctx < n_ctx; pos_ctx += n_batch) {
|
|
batch.n_tokens = n_batch;
|
|
for (uint32_t pos_batch = 0; pos_batch < n_batch; ++pos_batch) {
|
|
batch.token [pos_batch] = tokens[pos_ctx + pos_batch];
|
|
batch.pos [pos_batch] = pos_ctx + pos_batch;
|
|
batch.n_seq_id[pos_batch] = 1;
|
|
batch.seq_id [pos_batch][0] = 0;
|
|
batch.logits [pos_batch] = true;
|
|
}
|
|
|
|
if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) {
|
|
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
|
|
return;
|
|
}
|
|
|
|
const uint32_t n_tokens_all = balloc->get_n_tokens();
|
|
|
|
n_queued_tokens += n_tokens_all;
|
|
|
|
embd_seq.clear();
|
|
|
|
uint32_t n_outputs_all = n_tokens_all;
|
|
|
|
auto mctx = memory->init_batch(*balloc, cparams.n_ubatch, true);
|
|
if (!mctx || mctx->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) {
|
|
LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__);
|
|
break;
|
|
}
|
|
|
|
// reserve output buffer
|
|
if (output_reserve(n_outputs_all) < n_outputs_all) {
|
|
LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all);
|
|
GGML_ABORT("TODO: handle this error");
|
|
};
|
|
|
|
uint32_t pos_batch = 0;
|
|
do {
|
|
const auto & ubatch = mctx->get_ubatch();
|
|
|
|
n_outputs = ubatch.n_tokens;
|
|
|
|
if (!mctx->apply()) {
|
|
LLAMA_LOG_ERROR("%s: failed to update the memory context\n", __func__);
|
|
break;
|
|
}
|
|
|
|
auto * res = gf_res_prev.get();
|
|
|
|
const auto gparams = graph_params(res, ubatch, mctx.get(), LLM_GRAPH_TYPE_DEFAULT);
|
|
|
|
res->reset();
|
|
|
|
auto * gf = model.build_graph(gparams);
|
|
|
|
struct ggml_context * ctx_compute_opt;
|
|
{
|
|
const size_t size_gf = ggml_graph_size(gf);
|
|
const size_t size_meta = 4*size_gf*ggml_tensor_overhead() + 2*ggml_graph_overhead_custom(size_gf, /*grads = */ true);
|
|
struct ggml_init_params params = {
|
|
/*.mem_size =*/ size_meta,
|
|
/*.mem_buffer =*/ nullptr,
|
|
/*.no_alloc =*/ true,
|
|
};
|
|
ctx_compute_opt = ggml_init(params);
|
|
}
|
|
ggml_opt_prepare_alloc(opt_ctx, ctx_compute_opt, gf, res->get_tokens(), res->get_logits());
|
|
ggml_opt_alloc(opt_ctx, train);
|
|
|
|
res->set_inputs(&ubatch);
|
|
{
|
|
struct ggml_tensor * labels = ggml_opt_labels(opt_ctx);
|
|
GGML_ASSERT(labels->ne[1] == n_ubatch);
|
|
ggml_set_zero(labels);
|
|
const float onef = 1.0f;
|
|
for (uint32_t pos_ubatch = 0; pos_ubatch < n_ubatch; ++pos_ubatch) {
|
|
const uint32_t ilabel = pos_ctx + pos_batch + pos_ubatch;
|
|
GGML_ASSERT(labels_sparse[ilabel] < labels->ne[0]);
|
|
ggml_backend_tensor_set(labels, &onef, (pos_ubatch*labels->ne[0] + labels_sparse[ilabel])*sizeof(float), sizeof(float));
|
|
}
|
|
}
|
|
ggml_opt_eval(opt_ctx, result);
|
|
if (callback) {
|
|
callback(train, opt_ctx, dataset, result, idata_in_loop + (pos_ctx + pos_batch)/n_ubatch + 1, ndata_in_loop, t_loop_start);
|
|
}
|
|
ggml_free(ctx_compute_opt);
|
|
|
|
pos_batch += ubatch.n_tokens;
|
|
} while (mctx->next());
|
|
}
|
|
}
|
|
|
|
void llama_context::opt_epoch(
|
|
ggml_opt_dataset_t dataset,
|
|
ggml_opt_result_t result_train,
|
|
ggml_opt_result_t result_eval,
|
|
int64_t idata_split,
|
|
ggml_opt_epoch_callback callback_train,
|
|
ggml_opt_epoch_callback callback_eval) {
|
|
const uint32_t n_ctx = this->n_ctx();
|
|
const uint32_t n_batch = std::min(cparams.n_batch, n_ctx);
|
|
const uint32_t n_ubatch = std::min(cparams.n_ubatch, n_batch);
|
|
const int64_t ndata = ggml_opt_dataset_ndata(dataset);
|
|
|
|
GGML_ASSERT(idata_split >= 0);
|
|
GGML_ASSERT(idata_split <= ndata);
|
|
|
|
const uint32_t ubatch_per_ctx = n_ctx / n_ubatch;
|
|
|
|
struct llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
|
std::vector<llama_token> tokens(n_ctx);
|
|
std::vector<llama_token> labels_sparse(n_ctx);
|
|
|
|
int64_t idata = 0;
|
|
|
|
int64_t t_loop_start = ggml_time_us();
|
|
int64_t ndata_in_loop = idata_split*ubatch_per_ctx;
|
|
for (; idata < idata_split; ++idata) {
|
|
constexpr bool train = true;
|
|
const int64_t idata_in_loop = idata*ubatch_per_ctx;
|
|
|
|
ggml_opt_dataset_get_batch_host(dataset, tokens.data(), n_ctx*sizeof(llama_token), labels_sparse.data(), idata);
|
|
opt_epoch_iter(dataset, result_train, tokens, labels_sparse, batch,
|
|
callback_train, train, idata_in_loop, ndata_in_loop, t_loop_start);
|
|
}
|
|
|
|
t_loop_start = ggml_time_us();
|
|
ndata_in_loop = (ndata - idata_split)*ubatch_per_ctx;
|
|
for (; idata < ndata; ++idata) {
|
|
constexpr bool train = false;
|
|
const int64_t idata_in_loop = (idata - idata_split)*ubatch_per_ctx;
|
|
|
|
ggml_opt_dataset_get_batch_host(dataset, tokens.data(), n_ctx*sizeof(llama_token), labels_sparse.data(), idata);
|
|
opt_epoch_iter(dataset, result_eval, tokens, labels_sparse, batch,
|
|
callback_eval, train, idata_in_loop, ndata_in_loop, t_loop_start);
|
|
}
|
|
|
|
llama_batch_free(batch);
|
|
}
|
|
|
|
//
|
|
// interface implementation
|
|
//
|
|
|
|
llama_context_params llama_context_default_params() {
|
|
llama_context_params result = {
|
|
/*.n_ctx =*/ 512,
|
|
/*.n_batch =*/ 2048,
|
|
/*.n_ubatch =*/ 512,
|
|
/*.n_seq_max =*/ 1,
|
|
/*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
|
|
/*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
|
|
/*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
|
|
/*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED,
|
|
/*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED,
|
|
/*.flash_attn_type =*/ LLAMA_FLASH_ATTN_TYPE_AUTO,
|
|
/*.rope_freq_base =*/ 0.0f,
|
|
/*.rope_freq_scale =*/ 0.0f,
|
|
/*.yarn_ext_factor =*/ -1.0f,
|
|
/*.yarn_attn_factor =*/ -1.0f,
|
|
/*.yarn_beta_fast =*/ -1.0f,
|
|
/*.yarn_beta_slow =*/ -1.0f,
|
|
/*.yarn_orig_ctx =*/ 0,
|
|
/*.defrag_thold =*/ -1.0f,
|
|
/*.cb_eval =*/ nullptr,
|
|
/*.cb_eval_user_data =*/ nullptr,
|
|
/*.type_k =*/ GGML_TYPE_F16,
|
|
/*.type_v =*/ GGML_TYPE_F16,
|
|
/*.abort_callback =*/ nullptr,
|
|
/*.abort_callback_data =*/ nullptr,
|
|
/*.embeddings =*/ false,
|
|
/*.offload_kqv =*/ true,
|
|
/*.no_perf =*/ true,
|
|
/*.op_offload =*/ true,
|
|
/*.swa_full =*/ true,
|
|
/*.kv_unified =*/ false,
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
llama_context * llama_init_from_model(
|
|
llama_model * model,
|
|
llama_context_params params) {
|
|
if (!model) {
|
|
LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__);
|
|
return nullptr;
|
|
}
|
|
|
|
if (params.n_batch == 0 && params.n_ubatch == 0) {
|
|
LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__);
|
|
return nullptr;
|
|
}
|
|
|
|
if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) {
|
|
LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__);
|
|
return nullptr;
|
|
}
|
|
|
|
if (params.flash_attn_type != LLAMA_FLASH_ATTN_TYPE_DISABLED && model->arch == LLM_ARCH_GROK) {
|
|
LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__);
|
|
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
|
|
}
|
|
|
|
if (params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_AUTO && ggml_is_quantized(params.type_k)) {
|
|
const uint32_t blck_size = ggml_blck_size(params.type_k);
|
|
if (model->hparams.n_embd_head_k % blck_size != 0) {
|
|
LLAMA_LOG_ERROR("%s: K cache type %s with block size %u does not divide n_embd_head_k=%u\n",
|
|
__func__, ggml_type_name(params.type_k), blck_size, model->hparams.n_embd_head_k);
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
if (params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_AUTO && ggml_is_quantized(params.type_v)) {
|
|
const uint32_t blck_size = ggml_blck_size(params.type_v);
|
|
if (model->hparams.n_embd_head_v % blck_size != 0) {
|
|
LLAMA_LOG_ERROR("%s: V cache type %s with block size %u does not divide n_embd_head_k=%u\n",
|
|
__func__, ggml_type_name(params.type_v), blck_size, model->hparams.n_embd_head_v);
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
if (ggml_is_quantized(params.type_v) && params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_DISABLED) {
|
|
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
|
|
return nullptr;
|
|
}
|
|
|
|
if (params.pooling_type != model->hparams.pooling_type) {
|
|
//user-specified pooling-type is different from the model default
|
|
LLAMA_LOG_WARN("%s: model default pooling_type is [%d], but [%d] was specified\n", __func__,
|
|
model->hparams.pooling_type, params.pooling_type);
|
|
}
|
|
|
|
try {
|
|
auto * ctx = new llama_context(*model, params);
|
|
return ctx;
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: failed to initialize the context: %s\n", __func__, err.what());
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
// deprecated
|
|
llama_context * llama_new_context_with_model(
|
|
llama_model * model,
|
|
llama_context_params params) {
|
|
return llama_init_from_model(model, params);
|
|
}
|
|
|
|
void llama_free(llama_context * ctx) {
|
|
delete ctx;
|
|
}
|
|
|
|
uint32_t llama_n_ctx(const llama_context * ctx) {
|
|
return ctx->n_ctx();
|
|
}
|
|
|
|
uint32_t llama_n_batch(const llama_context * ctx) {
|
|
return ctx->n_batch();
|
|
}
|
|
|
|
uint32_t llama_n_ubatch(const llama_context * ctx) {
|
|
return ctx->n_ubatch();
|
|
}
|
|
|
|
uint32_t llama_n_seq_max(const llama_context * ctx) {
|
|
return ctx->n_seq_max();
|
|
}
|
|
|
|
const llama_model * llama_get_model(const llama_context * ctx) {
|
|
return &ctx->get_model();
|
|
}
|
|
|
|
enum llama_pooling_type llama_pooling_type(const llama_context * ctx) {
|
|
return ctx->pooling_type();
|
|
}
|
|
|
|
void llama_attach_threadpool(
|
|
llama_context * ctx,
|
|
ggml_threadpool_t threadpool,
|
|
ggml_threadpool_t threadpool_batch) {
|
|
ctx->attach_threadpool(threadpool, threadpool_batch);
|
|
}
|
|
|
|
void llama_detach_threadpool(llama_context * ctx) {
|
|
ctx->detach_threadpool();
|
|
}
|
|
|
|
void llama_set_n_threads(llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) {
|
|
ctx->set_n_threads(n_threads, n_threads_batch);
|
|
}
|
|
|
|
int32_t llama_n_threads(llama_context * ctx) {
|
|
return ctx->n_threads();
|
|
}
|
|
|
|
int32_t llama_n_threads_batch(llama_context * ctx) {
|
|
return ctx->n_threads_batch();
|
|
}
|
|
|
|
void llama_set_abort_callback(llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) {
|
|
ctx->set_abort_callback(abort_callback, abort_callback_data);
|
|
}
|
|
|
|
void llama_set_embeddings(llama_context * ctx, bool embeddings) {
|
|
ctx->set_embeddings(embeddings);
|
|
}
|
|
|
|
void llama_set_causal_attn(llama_context * ctx, bool causal_attn) {
|
|
ctx->set_causal_attn(causal_attn);
|
|
}
|
|
|
|
void llama_set_warmup(llama_context * ctx, bool warmup) {
|
|
ctx->set_warmup(warmup);
|
|
}
|
|
|
|
void llama_synchronize(llama_context * ctx) {
|
|
ctx->synchronize();
|
|
}
|
|
|
|
float * llama_get_logits(llama_context * ctx) {
|
|
ctx->synchronize();
|
|
|
|
return ctx->get_logits();
|
|
}
|
|
|
|
float * llama_get_logits_ith(llama_context * ctx, int32_t i) {
|
|
ctx->synchronize();
|
|
|
|
return ctx->get_logits_ith(i);
|
|
}
|
|
|
|
float * llama_get_embeddings(llama_context * ctx) {
|
|
ctx->synchronize();
|
|
|
|
return ctx->get_embeddings();
|
|
}
|
|
|
|
float * llama_get_embeddings_ith(llama_context * ctx, int32_t i) {
|
|
ctx->synchronize();
|
|
|
|
return ctx->get_embeddings_ith(i);
|
|
}
|
|
|
|
float * llama_get_embeddings_seq(llama_context * ctx, llama_seq_id seq_id) {
|
|
ctx->synchronize();
|
|
|
|
return ctx->get_embeddings_seq(seq_id);
|
|
}
|
|
|
|
// llama adapter API
|
|
|
|
int32_t llama_set_adapter_lora(
|
|
llama_context * ctx,
|
|
llama_adapter_lora * adapter,
|
|
float scale) {
|
|
ctx->set_adapter_lora(adapter, scale);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int32_t llama_rm_adapter_lora(
|
|
llama_context * ctx,
|
|
llama_adapter_lora * adapter) {
|
|
bool res = ctx->rm_adapter_lora(adapter);
|
|
|
|
return res ? 0 : -1;
|
|
}
|
|
|
|
void llama_clear_adapter_lora(llama_context * ctx) {
|
|
ctx->clear_adapter_lora();
|
|
}
|
|
|
|
int32_t llama_apply_adapter_cvec(
|
|
llama_context * ctx,
|
|
const float * data,
|
|
size_t len,
|
|
int32_t n_embd,
|
|
int32_t il_start,
|
|
int32_t il_end) {
|
|
bool res = ctx->apply_adapter_cvec(data, len, n_embd, il_start, il_end);
|
|
|
|
return res ? 0 : -1;
|
|
}
|
|
|
|
//
|
|
// memory
|
|
//
|
|
|
|
llama_memory_t llama_get_memory(const struct llama_context * ctx) {
|
|
return ctx->get_memory();
|
|
}
|
|
|
|
void llama_memory_clear(llama_memory_t mem, bool data) {
|
|
if (!mem) {
|
|
return;
|
|
}
|
|
|
|
mem->clear(data);
|
|
}
|
|
|
|
bool llama_memory_seq_rm(
|
|
llama_memory_t mem,
|
|
llama_seq_id seq_id,
|
|
llama_pos p0,
|
|
llama_pos p1) {
|
|
if (!mem) {
|
|
return true;
|
|
}
|
|
|
|
return mem->seq_rm(seq_id, p0, p1);
|
|
}
|
|
|
|
void llama_memory_seq_cp(
|
|
llama_memory_t mem,
|
|
llama_seq_id seq_id_src,
|
|
llama_seq_id seq_id_dst,
|
|
llama_pos p0,
|
|
llama_pos p1) {
|
|
if (!mem) {
|
|
return;
|
|
}
|
|
|
|
mem->seq_cp(seq_id_src, seq_id_dst, p0, p1);
|
|
}
|
|
|
|
void llama_memory_seq_keep(
|
|
llama_memory_t mem,
|
|
llama_seq_id seq_id) {
|
|
if (!mem) {
|
|
return;
|
|
}
|
|
|
|
mem->seq_keep(seq_id);
|
|
}
|
|
|
|
void llama_memory_seq_add(
|
|
llama_memory_t mem,
|
|
llama_seq_id seq_id,
|
|
llama_pos p0,
|
|
llama_pos p1,
|
|
llama_pos delta) {
|
|
if (!mem) {
|
|
return;
|
|
}
|
|
|
|
mem->seq_add(seq_id, p0, p1, delta);
|
|
}
|
|
|
|
void llama_memory_seq_div(
|
|
llama_memory_t mem,
|
|
llama_seq_id seq_id,
|
|
llama_pos p0,
|
|
llama_pos p1,
|
|
int d) {
|
|
if (!mem) {
|
|
return;
|
|
}
|
|
|
|
mem->seq_div(seq_id, p0, p1, d);
|
|
}
|
|
|
|
llama_pos llama_memory_seq_pos_min(
|
|
llama_memory_t mem,
|
|
llama_seq_id seq_id) {
|
|
if (!mem) {
|
|
return -1;
|
|
}
|
|
|
|
return mem->seq_pos_min(seq_id);
|
|
}
|
|
|
|
llama_pos llama_memory_seq_pos_max(
|
|
llama_memory_t mem,
|
|
llama_seq_id seq_id) {
|
|
if (!mem) {
|
|
return -1;
|
|
}
|
|
|
|
return mem->seq_pos_max(seq_id);
|
|
}
|
|
|
|
bool llama_memory_can_shift(llama_memory_t mem) {
|
|
if (!mem) {
|
|
return false;
|
|
}
|
|
|
|
return mem->get_can_shift();
|
|
}
|
|
|
|
// llama state API
|
|
|
|
// deprecated
|
|
size_t llama_get_state_size(llama_context * ctx) {
|
|
return llama_state_get_size(ctx);
|
|
}
|
|
|
|
// deprecated
|
|
size_t llama_copy_state_data(llama_context * ctx, uint8_t * dst) {
|
|
return llama_state_get_data(ctx, dst, -1);
|
|
}
|
|
|
|
// deprecated
|
|
size_t llama_set_state_data(llama_context * ctx, const uint8_t * src) {
|
|
return llama_state_set_data(ctx, src, -1);
|
|
}
|
|
|
|
// deprecated
|
|
bool llama_load_session_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
|
|
return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
|
|
}
|
|
|
|
// deprecated
|
|
bool llama_save_session_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
|
|
return llama_state_save_file(ctx, path_session, tokens, n_token_count);
|
|
}
|
|
|
|
// Returns the *actual* size of the state.
|
|
// Intended to be used when saving to state to a buffer.
|
|
size_t llama_state_get_size(llama_context * ctx) {
|
|
return ctx->state_get_size();
|
|
}
|
|
|
|
size_t llama_state_get_data(llama_context * ctx, uint8_t * dst, size_t size) {
|
|
ctx->synchronize();
|
|
|
|
return ctx->state_get_data(dst, size);
|
|
}
|
|
|
|
// Sets the state reading from the specified source address
|
|
size_t llama_state_set_data(llama_context * ctx, const uint8_t * src, size_t size) {
|
|
ctx->synchronize();
|
|
|
|
return ctx->state_set_data(src, size);
|
|
}
|
|
|
|
bool llama_state_load_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
|
|
ctx->synchronize();
|
|
|
|
try {
|
|
return ctx->state_load_file(path_session, tokens_out, n_token_capacity, n_token_count_out);
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what());
|
|
return false;
|
|
}
|
|
}
|
|
|
|
bool llama_state_save_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
|
|
ctx->synchronize();
|
|
|
|
try {
|
|
return ctx->state_save_file(path_session, tokens, n_token_count);
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what());
|
|
return false;
|
|
}
|
|
}
|
|
|
|
size_t llama_state_seq_get_size(llama_context * ctx, llama_seq_id seq_id) {
|
|
return llama_state_seq_get_size_ext(ctx, seq_id, 0);
|
|
}
|
|
|
|
size_t llama_state_seq_get_data(llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) {
|
|
return llama_state_seq_get_data_ext(ctx, dst, size, seq_id, 0);
|
|
}
|
|
|
|
size_t llama_state_seq_set_data(llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id seq_id) {
|
|
return llama_state_seq_set_data_ext(ctx, src, size, seq_id, 0);
|
|
}
|
|
|
|
size_t llama_state_seq_get_size_ext(llama_context * ctx, llama_seq_id seq_id, llama_state_seq_flags flags) {
|
|
return ctx->state_seq_get_size(seq_id, flags);
|
|
}
|
|
|
|
size_t llama_state_seq_get_data_ext(llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id, llama_state_seq_flags flags) {
|
|
ctx->synchronize();
|
|
|
|
return ctx->state_seq_get_data(seq_id, dst, size, flags);
|
|
}
|
|
|
|
size_t llama_state_seq_set_data_ext(llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id seq_id, llama_state_seq_flags flags) {
|
|
ctx->synchronize();
|
|
|
|
return ctx->state_seq_set_data(seq_id, src, size, flags);
|
|
}
|
|
|
|
size_t llama_state_seq_save_file(llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
|
|
ctx->synchronize();
|
|
|
|
try {
|
|
return ctx->state_seq_save_file(seq_id, filepath, tokens, n_token_count);
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what());
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
size_t llama_state_seq_load_file(llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
|
|
ctx->synchronize();
|
|
|
|
try {
|
|
return ctx->state_seq_load_file(dest_seq_id, filepath, tokens_out, n_token_capacity, n_token_count_out);
|
|
} catch (const std::exception & err) {
|
|
LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what());
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
///
|
|
|
|
int32_t llama_encode(
|
|
llama_context * ctx,
|
|
llama_batch batch) {
|
|
const int ret = ctx->encode(batch);
|
|
if (ret != 0) {
|
|
LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int32_t llama_decode(
|
|
llama_context * ctx,
|
|
llama_batch batch) {
|
|
const int ret = ctx->decode(batch);
|
|
if (ret != 0 && ret != 1) {
|
|
LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
//
|
|
// perf
|
|
//
|
|
|
|
llama_perf_context_data llama_perf_context(const llama_context * ctx) {
|
|
llama_perf_context_data data = {};
|
|
|
|
if (ctx == nullptr) {
|
|
return data;
|
|
}
|
|
|
|
data = ctx->perf_get_data();
|
|
|
|
return data;
|
|
}
|
|
|
|
void llama_perf_context_print(const llama_context * ctx) {
|
|
const auto data = llama_perf_context(ctx);
|
|
|
|
const double t_end_ms = 1e-3 * ggml_time_us();
|
|
|
|
LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, data.t_load_ms);
|
|
LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
|
__func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval);
|
|
LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
|
__func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval);
|
|
LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval));
|
|
LLAMA_LOG_INFO("%s: graphs reused = %10d\n", __func__, data.n_reused);
|
|
}
|
|
|
|
void llama_perf_context_reset(llama_context * ctx) {
|
|
ctx->perf_reset();
|
|
}
|
|
|
|
void llama_memory_breakdown_print(const struct llama_context * ctx) {
|
|
const std::vector<ggml_backend_dev_t> & devices = ctx->get_model().devices;
|
|
|
|
std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> memory_breakdown = ctx->memory_breakdown();
|
|
|
|
std::vector<std::array<std::string, 9>> table_data;
|
|
table_data.reserve(devices.size());
|
|
const std::string template_header = "%s: | %s | %s %s %s %s %s %s %s |\n";
|
|
const std::string template_gpu = "%s: | %s | %s = %s + (%s = %s + %s + %s) + %s |\n";
|
|
const std::string template_other = "%s: | %s | %s %s %s = %s + %s + %s %s |\n";
|
|
|
|
table_data.push_back({template_header, "memory breakdown [MiB]", "total", "free", "self", "model", "context", "compute", "unaccounted"});
|
|
|
|
constexpr size_t MiB = 1024 * 1024;
|
|
const std::vector<std::string> desc_prefixes_strip = {"NVIDIA ", "GeForce ", "Tesla ", "AMD ", "Radeon ", "Instinct "};
|
|
|
|
// track seen buffer types to avoid double counting:
|
|
std::set<ggml_backend_buffer_type_t> seen_buffer_types;
|
|
|
|
// accumulative memory breakdown for each device and for host:
|
|
std::vector<llama_memory_breakdown_data> mb_dev(devices.size());
|
|
llama_memory_breakdown_data mb_host;
|
|
|
|
for (const auto & buft_mb : memory_breakdown) {
|
|
ggml_backend_buffer_type_t buft = buft_mb.first;
|
|
const llama_memory_breakdown_data & mb = buft_mb.second;
|
|
if (ggml_backend_buft_is_host(buft)) {
|
|
mb_host.model += mb.model;
|
|
mb_host.context += mb.context;
|
|
mb_host.compute += mb.compute;
|
|
seen_buffer_types.insert(buft);
|
|
continue;
|
|
}
|
|
ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
|
|
if (dev) {
|
|
int i_dev = -1;
|
|
for (size_t i = 0; i < devices.size(); i++) {
|
|
if (devices[i] == dev) {
|
|
i_dev = i;
|
|
break;
|
|
}
|
|
}
|
|
if (i_dev != -1) {
|
|
mb_dev[i_dev].model += mb.model;
|
|
mb_dev[i_dev].context += mb.context;
|
|
mb_dev[i_dev].compute += mb.compute;
|
|
seen_buffer_types.insert(buft);
|
|
continue;
|
|
}
|
|
}
|
|
}
|
|
|
|
// print memory breakdown for each device:
|
|
for (size_t i = 0; i < devices.size(); i++) {
|
|
ggml_backend_dev_t dev = devices[i];
|
|
llama_memory_breakdown_data mb = mb_dev[i];
|
|
|
|
const std::string name = ggml_backend_dev_name(dev);
|
|
std::string desc = ggml_backend_dev_description(dev);
|
|
for (const std::string & prefix : desc_prefixes_strip) {
|
|
if (desc.length() >= prefix.length() && desc.substr(0, prefix.length()) == prefix) {
|
|
desc = desc.substr(prefix.length());
|
|
}
|
|
}
|
|
|
|
size_t free, total;
|
|
ggml_backend_dev_memory(dev, &free, &total);
|
|
|
|
const size_t self = mb.model + mb.context + mb.compute;
|
|
const size_t unaccounted = total - self - free;
|
|
|
|
table_data.push_back({
|
|
template_gpu,
|
|
" - " + name + " (" + desc + ")",
|
|
std::to_string(total / MiB),
|
|
std::to_string(free / MiB),
|
|
std::to_string(self / MiB),
|
|
std::to_string(mb.model / MiB),
|
|
std::to_string(mb.context / MiB),
|
|
std::to_string(mb.compute / MiB),
|
|
std::to_string(unaccounted / MiB)});
|
|
}
|
|
|
|
// print memory breakdown for host:
|
|
{
|
|
const size_t self = mb_host.model + mb_host.context + mb_host.compute;
|
|
table_data.push_back({
|
|
template_other,
|
|
" - Host",
|
|
"", // total
|
|
"", // free
|
|
std::to_string(self / MiB),
|
|
std::to_string(mb_host.model / MiB),
|
|
std::to_string(mb_host.context / MiB),
|
|
std::to_string(mb_host.compute / MiB),
|
|
""}); // unaccounted
|
|
}
|
|
|
|
// print memory breakdown for all remaining buffer types:
|
|
for (const auto & buft_mb : memory_breakdown) {
|
|
ggml_backend_buffer_type_t buft = buft_mb.first;
|
|
const llama_memory_breakdown_data & mb = buft_mb.second;
|
|
if (seen_buffer_types.count(buft) == 1) {
|
|
continue;
|
|
}
|
|
const std::string name = ggml_backend_buft_name(buft);
|
|
const size_t self = mb.model + mb.context + mb.compute;
|
|
table_data.push_back({
|
|
template_other,
|
|
" - " + name,
|
|
"", // total
|
|
"", // free
|
|
std::to_string(self / MiB),
|
|
std::to_string(mb.model / MiB),
|
|
std::to_string(mb.context / MiB),
|
|
std::to_string(mb.compute / MiB),
|
|
""}); // unaccounted
|
|
seen_buffer_types.insert(buft);
|
|
}
|
|
|
|
for (size_t j = 1; j < table_data[0].size(); j++) {
|
|
size_t max_len = 0;
|
|
for (const auto & td : table_data) {
|
|
max_len = std::max(max_len, td[j].length());
|
|
}
|
|
for (auto & td : table_data) {
|
|
td[j].insert(j == 1 ? td[j].length() : 0, max_len - td[j].length(), ' ');
|
|
}
|
|
}
|
|
for (const auto & td : table_data) {
|
|
LLAMA_LOG_INFO(td[0].c_str(),
|
|
__func__, td[1].c_str(), td[2].c_str(), td[3].c_str(), td[4].c_str(), td[5].c_str(),
|
|
td[6].c_str(), td[7].c_str(), td[8].c_str());
|
|
}
|
|
}
|
|
|
|
//
|
|
// training
|
|
//
|
|
|
|
bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata) {
|
|
GGML_UNUSED(tensor);
|
|
GGML_UNUSED(userdata);
|
|
return true;
|
|
}
|
|
|
|
void llama_opt_init(struct llama_context * ctx, struct llama_model * model, struct llama_opt_params lopt_params) {
|
|
ctx->opt_init(model, lopt_params);
|
|
}
|
|
|
|
void llama_opt_epoch(
|
|
struct llama_context * ctx,
|
|
ggml_opt_dataset_t dataset,
|
|
ggml_opt_result_t result_train,
|
|
ggml_opt_result_t result_eval,
|
|
int64_t idata_split,
|
|
ggml_opt_epoch_callback callback_train,
|
|
ggml_opt_epoch_callback callback_eval) {
|
|
ctx->opt_epoch(
|
|
dataset,
|
|
result_train,
|
|
result_eval,
|
|
idata_split,
|
|
callback_train,
|
|
callback_eval);
|
|
}
|