Compare commits
6 Commits
v0.3.7
...
paligemma-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a6d30ecefe | ||
|
|
80eef7c7b1 | ||
|
|
a33e56cddb | ||
|
|
e6802df906 | ||
|
|
c631633bce | ||
|
|
7de230f005 |
@@ -111,10 +111,7 @@ On Windows, Ollama inherits your user and system environment variables.
|
||||
|
||||
## How do I use Ollama behind a proxy?
|
||||
|
||||
Ollama pulls models from the Internet and may require a proxy server to access the models. Use `HTTPS_PROXY` to redirect outbound requests through the proxy. Ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform.
|
||||
|
||||
> [!NOTE]
|
||||
> Avoid setting `HTTP_PROXY`. Ollama does not use HTTP for model pulls, only HTTPS. Setting `HTTP_PROXY` may interrupt client connections to the server.
|
||||
Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform.
|
||||
|
||||
### How do I use Ollama behind a proxy in Docker?
|
||||
|
||||
@@ -279,4 +276,4 @@ Note: Windows with Radeon GPUs currently default to 1 model maximum due to limit
|
||||
|
||||
## How does Ollama load models on multiple GPUs?
|
||||
|
||||
Installing multiple GPUs of the same brand can be a great way to increase your available VRAM to load larger models. When you load a new model, Ollama evaluates the required VRAM for the model against what is currently available. If the model will entirely fit on any single GPU, Ollama will load the model on that GPU. This typically provides the best performance as it reduces the amount of data transfering across the PCI bus during inference. If the model does not fit entirely on one GPU, then it will be spread across all the available GPUs.
|
||||
Installing multiple GPUs of the same brand can be a great way to increase your available VRAM to load larger models. When you load a new model, Ollama evaluates the required VRAM for the model against what is currently available. If the model will entirely fit on any single GPU, Ollama will load the model on that GPU. This typically provides the best performance as it reduces the amount of data transfering across the PCI bus during inference. If the model does not fit entirely on one GPU, then it will be spread across all the available GPUs.
|
||||
@@ -32,29 +32,4 @@ func TestCPUMemInfo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestByLibrary(t *testing.T) {
|
||||
type testCase struct {
|
||||
input []GpuInfo
|
||||
expect int
|
||||
}
|
||||
|
||||
testCases := map[string]*testCase{
|
||||
"empty": {input: []GpuInfo{}, expect: 0},
|
||||
"cpu": {input: []GpuInfo{{Library: "cpu"}}, expect: 1},
|
||||
"cpu + GPU": {input: []GpuInfo{{Library: "cpu"}, {Library: "cuda"}}, expect: 2},
|
||||
"cpu + 2 GPU no variant": {input: []GpuInfo{{Library: "cpu"}, {Library: "cuda"}, {Library: "cuda"}}, expect: 2},
|
||||
"cpu + 2 GPU same variant": {input: []GpuInfo{{Library: "cpu"}, {Library: "cuda", Variant: "v11"}, {Library: "cuda", Variant: "v11"}}, expect: 2},
|
||||
"cpu + 2 GPU diff variant": {input: []GpuInfo{{Library: "cpu"}, {Library: "cuda", Variant: "v11"}, {Library: "cuda", Variant: "v12"}}, expect: 3},
|
||||
}
|
||||
|
||||
for k, v := range testCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
resp := (GpuInfoList)(v.input).ByLibrary()
|
||||
if len(resp) != v.expect {
|
||||
t.Fatalf("expected length %d, got %d => %+v", v.expect, len(resp), resp)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO - add some logic to figure out card type through other means and actually verify we got back what we expected
|
||||
|
||||
@@ -94,7 +94,7 @@ func (l GpuInfoList) ByLibrary() []GpuInfoList {
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
libs = append(libs, requested)
|
||||
libs = append(libs, info.Library)
|
||||
resp = append(resp, []GpuInfo{info})
|
||||
}
|
||||
}
|
||||
|
||||
73
llm/ext_server/server.cpp
vendored
73
llm/ext_server/server.cpp
vendored
@@ -1271,8 +1271,61 @@ struct llama_server_context
|
||||
}
|
||||
}
|
||||
|
||||
// for multiple images processing
|
||||
bool ingest_images(server_slot &slot, int n_batch)
|
||||
// 1 image only
|
||||
bool prepare_pali(server_slot &slot, int n_batch)
|
||||
{
|
||||
int n_past = 0;
|
||||
int image_idx = 0;
|
||||
slot_image &img = slot.images[image_idx];
|
||||
|
||||
// rescale image embeddings
|
||||
float *data = img.image_embedding;
|
||||
for (int i = 0; i < 2048 * 256; i++)
|
||||
{
|
||||
data[i] = data[i] / sqrt(2048);
|
||||
}
|
||||
set_image_embeds(ctx, data);
|
||||
|
||||
// generate user_prompt -> this should contain image tokens prepended and a new line appended:
|
||||
// batch.n_tokens += (int)slot.images.size() * llama_n_embd(model);
|
||||
std::vector<llama_token> tokens;
|
||||
|
||||
for (int i = 0; i < (int)slot.images.size() * 256; i++)
|
||||
{
|
||||
tokens.push_back(257152);
|
||||
}
|
||||
|
||||
tokens.push_back(2);
|
||||
|
||||
// move prefix prompt behind image tokens
|
||||
for (int i = 0; i < batch.n_tokens; i++)
|
||||
{
|
||||
tokens.push_back(batch.token[i]);
|
||||
}
|
||||
|
||||
llama_batch_clear(batch);
|
||||
for (int i = 0; i < (int)tokens.size(); ++i)
|
||||
{
|
||||
llama_batch_add(batch, tokens[i], system_tokens.size() + slot.n_past, {slot.id}, true);
|
||||
slot.n_past += 1;
|
||||
}
|
||||
|
||||
// append prefix of next image
|
||||
const auto json_prompt = slot.params.input_suffix;
|
||||
|
||||
std::vector<llama_token> append_tokens = tokenize(json_prompt, false); // has next image
|
||||
append_tokens.push_back(108);
|
||||
|
||||
for (int i = 0; i < (int)append_tokens.size(); ++i)
|
||||
{
|
||||
llama_batch_add(batch, append_tokens[i], system_tokens.size() + slot.n_past, {slot.id}, true);
|
||||
slot.n_past += 1;
|
||||
}
|
||||
llama_set_causal_attn(ctx, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool process_llava(server_slot &slot, int n_batch)
|
||||
{
|
||||
int image_idx = 0;
|
||||
|
||||
@@ -1349,6 +1402,21 @@ struct llama_server_context
|
||||
return true;
|
||||
}
|
||||
|
||||
// for multiple images processing based on model architecture
|
||||
bool ingest_images(server_slot &slot, int n_batch)
|
||||
{
|
||||
switch (llama_get_architecture(model))
|
||||
{
|
||||
case 0:
|
||||
return process_llava(slot, n_batch);
|
||||
case 25:
|
||||
return prepare_pali(slot, n_batch);
|
||||
default:
|
||||
LOG_TEE("%s : failed to retrieve model architecture\n", __func__);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void request_cancel(int task_id)
|
||||
{
|
||||
task_server task;
|
||||
@@ -1916,6 +1984,7 @@ struct llama_server_context
|
||||
};
|
||||
|
||||
const int ret = llama_decode(ctx, batch_view);
|
||||
llama_set_causal_attn(ctx, true);
|
||||
|
||||
if (ret != 0)
|
||||
{
|
||||
|
||||
@@ -7,7 +7,7 @@ index 1fe2b9f7..a43312a7 100644
|
||||
|
||||
// TODO: use a per-batch flag for logits presence instead
|
||||
- const bool has_logits = !cparams.embeddings;
|
||||
+ const bool has_logits = cparams.causal_attn;
|
||||
+ const bool has_logits = cparams.causal_attn || lctx.image_embeds;
|
||||
const bool has_embd = lctx.is_encoding || (cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE));
|
||||
|
||||
const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
|
||||
@@ -36,7 +36,7 @@ index 1fe2b9f7..a43312a7 100644
|
||||
GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");
|
||||
}
|
||||
+
|
||||
+ if (!cparams.causal_attn) {
|
||||
+ if (!cparams.causal_attn && !has_image_embeds) {
|
||||
+ res = nullptr; // do not extract logits when not needed
|
||||
+ }
|
||||
+
|
||||
|
||||
113
llm/patches/12-paligemma.diff
Normal file
113
llm/patches/12-paligemma.diff
Normal file
@@ -0,0 +1,113 @@
|
||||
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
|
||||
index 9c0d351e..019a147c 100644
|
||||
--- a/examples/llava/clip.cpp
|
||||
+++ b/examples/llava/clip.cpp
|
||||
@@ -718,10 +718,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
||||
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
||||
|
||||
- embeddings = ggml_gelu(ctx0, embeddings);
|
||||
- embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
||||
- embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
|
||||
-
|
||||
+ if (model.mm_2_w)
|
||||
+ {
|
||||
+ embeddings = ggml_gelu(ctx0, embeddings);
|
||||
+ embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
||||
+ embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
|
||||
+ }
|
||||
} else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
||||
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
||||
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
||||
@@ -2102,6 +2104,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
||||
return ctx->vision_model.mm_model_peg_0_b->ne[0];
|
||||
}
|
||||
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
||||
+ if (ctx->vision_model.mm_2_b == nullptr)
|
||||
+ {
|
||||
+ return ctx->vision_model.mm_0_b->ne[0];
|
||||
+ }
|
||||
return ctx->vision_model.mm_2_b->ne[0];
|
||||
}
|
||||
if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
||||
diff --git a/include/llama.h b/include/llama.h
|
||||
index 6072e76e..4c572a74 100644
|
||||
--- a/include/llama.h
|
||||
+++ b/include/llama.h
|
||||
@@ -444,6 +444,12 @@ extern "C" {
|
||||
// Frees all allocated memory
|
||||
LLAMA_API void llama_free(struct llama_context * ctx);
|
||||
|
||||
+ // Sets image embeddings
|
||||
+ LLAMA_API void set_image_embeds(struct llama_context *ctx, float *data);
|
||||
+
|
||||
+ // Get architecture
|
||||
+ LLAMA_API int llama_get_architecture(struct llama_model *model);
|
||||
+
|
||||
LLAMA_API int64_t llama_time_us(void);
|
||||
|
||||
LLAMA_API size_t llama_max_devices(void);
|
||||
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||
index d883ed19..322b4b59 100644
|
||||
--- a/src/llama.cpp
|
||||
+++ b/src/llama.cpp
|
||||
@@ -2710,6 +2710,8 @@ struct llama_context {
|
||||
|
||||
bool logits_all = false;
|
||||
|
||||
+ float *image_embeds = nullptr;
|
||||
+
|
||||
// embeddings output (2-dimensional array: [n_outputs][n_embd])
|
||||
// populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
|
||||
size_t embd_size = 0; // capacity (of floats) for embeddings
|
||||
@@ -11591,6 +11593,15 @@ struct llm_build_context {
|
||||
|
||||
inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
||||
|
||||
+ if (lctx.image_embeds)
|
||||
+ {
|
||||
+ struct ggml_tensor *image_embeds = ggml_dup_tensor(ctx0, inpL);
|
||||
+ image_embeds->data = lctx.image_embeds;
|
||||
+ image_embeds->ne[1] = 256;
|
||||
+ inpL = ggml_set_2d_inplace(ctx0, inpL, image_embeds, inpL->nb[1], 0);
|
||||
+ lctx.image_embeds = NULL;
|
||||
+ }
|
||||
+
|
||||
inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
|
||||
cb(inpL, "inp_scaled", -1);
|
||||
|
||||
@@ -14468,6 +14479,7 @@ static int llama_decode_internal(
|
||||
|
||||
const int64_t n_embd = hparams.n_embd;
|
||||
const int64_t n_vocab = hparams.n_vocab;
|
||||
+ const bool has_image_embeds = lctx.image_embeds;
|
||||
|
||||
uint32_t n_outputs = 0;
|
||||
uint32_t n_outputs_prev = 0;
|
||||
@@ -14581,7 +14593,8 @@ static int llama_decode_internal(
|
||||
}
|
||||
|
||||
// non-causal masks do not use the KV cache
|
||||
- if (hparams.causal_attn) {
|
||||
+ if (hparams.causal_attn || lctx.image_embeds)
|
||||
+ {
|
||||
llama_kv_cache_update(&lctx);
|
||||
|
||||
// if we have enough unused cells before the current head ->
|
||||
@@ -16455,6 +16468,16 @@ void llama_free_model(struct llama_model * model) {
|
||||
delete model;
|
||||
}
|
||||
|
||||
+void set_image_embeds(llama_context *ctx, float *data)
|
||||
+{
|
||||
+ ctx->image_embeds = data;
|
||||
+}
|
||||
+
|
||||
+int llama_get_architecture(llama_model *model)
|
||||
+{
|
||||
+ return model->arch;
|
||||
+}
|
||||
+
|
||||
struct llama_context * llama_new_context_with_model(
|
||||
struct llama_model * model,
|
||||
struct llama_context_params params) {
|
||||
@@ -258,7 +258,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
params = append(params, "--mlock")
|
||||
}
|
||||
|
||||
if gpu.IsNUMA() && gpus[0].Library == "cpu" {
|
||||
if gpu.IsNUMA() {
|
||||
numaMode := "distribute"
|
||||
if runtime.GOOS == "linux" {
|
||||
if _, err := exec.LookPath("numactl"); err == nil {
|
||||
|
||||
Reference in New Issue
Block a user