Compare commits
22 Commits
mxyng/envi
...
jmorganca/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
32c6d43e1b | ||
|
|
f8ba24577b | ||
|
|
861a521b19 | ||
|
|
793248c280 | ||
|
|
8b1b89a984 | ||
|
|
47e272c35a | ||
|
|
417a81fda3 | ||
|
|
dba62ff3a5 | ||
|
|
d70e935526 | ||
|
|
5c1063df7f | ||
|
|
cb485b2019 | ||
|
|
b2af50960f | ||
|
|
eac5b8bfbd | ||
|
|
604e43b28d | ||
|
|
53985b3c4d | ||
|
|
b6e02cbbd2 | ||
|
|
91935631ac | ||
|
|
8de30b568a | ||
|
|
485da9fd35 | ||
|
|
0796d79d19 | ||
|
|
92981ae3f2 | ||
|
|
8ed1adf3db |
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -15,6 +15,8 @@ ml/backend/**/*.cu linguist-vendored
|
||||
ml/backend/**/*.cuh linguist-vendored
|
||||
ml/backend/**/*.m linguist-vendored
|
||||
ml/backend/**/*.metal linguist-vendored
|
||||
ml/backend/**/*.comp linguist-vendored
|
||||
ml/backend/**/*.glsl linguist-vendored
|
||||
ml/backend/**/CMakeLists.txt linguist-vendored
|
||||
|
||||
llama/build-info.cpp linguist-generated
|
||||
|
||||
@@ -397,8 +397,8 @@ func checkUserLoggedIn(uiServerPort int) bool {
|
||||
// handleConnectURLScheme fetches the connect URL and opens it in the browser
|
||||
func handleConnectURLScheme() {
|
||||
if checkUserLoggedIn(uiServerPort) {
|
||||
slog.Info("user is already logged in, opening settings instead")
|
||||
sendUIRequestMessage("/")
|
||||
slog.Info("user is already logged in, opening app instead")
|
||||
showWindow(wv.webview.Window())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -466,6 +466,8 @@ func handleURLSchemeInCurrentInstance(urlSchemeRequest string) {
|
||||
if isConnect {
|
||||
handleConnectURLScheme()
|
||||
} else {
|
||||
sendUIRequestMessage("/")
|
||||
if wv.webview != nil {
|
||||
showWindow(wv.webview.Window())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,27 +24,14 @@ bool firstTimeRun,startHidden; // Set in run before initialization
|
||||
for (NSURL *url in urls) {
|
||||
if ([url.scheme isEqualToString:@"ollama"]) {
|
||||
NSString *path = url.path;
|
||||
if (!path || [path isEqualToString:@""]) {
|
||||
// For URLs like ollama://settings (without triple slash),
|
||||
// the "settings" part is parsed as the host, not the path.
|
||||
// We need to convert it to a path by prepending "/"
|
||||
if (url.host && ![url.host isEqualToString:@""]) {
|
||||
path = [@"/" stringByAppendingString:url.host];
|
||||
} else {
|
||||
path = @"/";
|
||||
}
|
||||
}
|
||||
|
||||
if ([path isEqualToString:@"/connect"] || [url.host isEqualToString:@"connect"]) {
|
||||
|
||||
if (path && ([path isEqualToString:@"/connect"] || [url.host isEqualToString:@"connect"])) {
|
||||
// Special case: handle connect by opening browser instead of app
|
||||
handleConnectURL();
|
||||
} else {
|
||||
// Set app to be active and visible
|
||||
[NSApp setActivationPolicy:NSApplicationActivationPolicyRegular];
|
||||
[NSApp activateIgnoringOtherApps:YES];
|
||||
|
||||
// Open the path with the UI
|
||||
[self uiRequest:path];
|
||||
}
|
||||
|
||||
break;
|
||||
@@ -260,7 +247,7 @@ bool firstTimeRun,startHidden; // Set in run before initialization
|
||||
}
|
||||
|
||||
- (void)openHelp:(id)sender {
|
||||
NSURL *url = [NSURL URLWithString:@"https://github.com/ollama/ollama/tree/main/docs"];
|
||||
NSURL *url = [NSURL URLWithString:@"https://docs.ollama.com/"];
|
||||
[[NSWorkspace sharedWorkspace] openURL:url];
|
||||
}
|
||||
|
||||
|
||||
@@ -147,7 +147,9 @@ func handleURLSchemeRequest(urlScheme string) {
|
||||
if isConnect {
|
||||
handleConnectURLScheme()
|
||||
} else {
|
||||
sendUIRequestMessage("/")
|
||||
if wv.webview != nil {
|
||||
showWindow(wv.webview.Window())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
190
cmd/cmd.go
190
cmd/cmd.go
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
@@ -48,9 +47,6 @@ import (
|
||||
"github.com/ollama/ollama/version"
|
||||
)
|
||||
|
||||
//go:embed usage.gotmpl
|
||||
var usageTemplate string
|
||||
|
||||
const ConnectInstructions = "To sign in, navigate to:\n %s\n\n"
|
||||
|
||||
// ensureThinkingSupport emits a warning if the model does not advertise thinking support
|
||||
@@ -1668,6 +1664,21 @@ func versionHandler(cmd *cobra.Command, _ []string) {
|
||||
}
|
||||
}
|
||||
|
||||
func appendEnvDocs(cmd *cobra.Command, envs []envconfig.EnvVar) {
|
||||
if len(envs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
envUsage := `
|
||||
Environment Variables:
|
||||
`
|
||||
for _, e := range envs {
|
||||
envUsage += fmt.Sprintf(" %-24s %s\n", e.Name, e.Description)
|
||||
}
|
||||
|
||||
cmd.SetUsageTemplate(cmd.UsageTemplate() + envUsage)
|
||||
}
|
||||
|
||||
func NewCLI() *cobra.Command {
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
cobra.EnableCommandSorting = false
|
||||
@@ -1697,24 +1708,22 @@ func NewCLI() *cobra.Command {
|
||||
rootCmd.Flags().BoolP("version", "v", false, "Show version information")
|
||||
|
||||
createCmd := &cobra.Command{
|
||||
Use: "create MODEL",
|
||||
Short: "Create a model",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: CreateHandler,
|
||||
Annotations: envconfig.Usage("OLLAMA_HOST"),
|
||||
Use: "create MODEL",
|
||||
Short: "Create a model",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: CreateHandler,
|
||||
}
|
||||
|
||||
createCmd.Flags().StringP("file", "f", "", "Name of the Modelfile (default \"Modelfile\")")
|
||||
createCmd.Flags().StringP("quantize", "q", "", "Quantize model to this level (e.g. q4_K_M)")
|
||||
|
||||
showCmd := &cobra.Command{
|
||||
Use: "show MODEL",
|
||||
Short: "Show information for a model",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: ShowHandler,
|
||||
Annotations: envconfig.Usage("OLLAMA_HOST"),
|
||||
Use: "show MODEL",
|
||||
Short: "Show information for a model",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: ShowHandler,
|
||||
}
|
||||
|
||||
showCmd.Flags().Bool("license", false, "Show license of a model")
|
||||
@@ -1725,12 +1734,11 @@ func NewCLI() *cobra.Command {
|
||||
showCmd.Flags().BoolP("verbose", "v", false, "Show detailed model information")
|
||||
|
||||
runCmd := &cobra.Command{
|
||||
Use: "run MODEL [PROMPT]",
|
||||
Short: "Run a model",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: RunHandler,
|
||||
Annotations: envconfig.Usage("OLLAMA_HOST", "OLLAMA_NOHISTORY"),
|
||||
Use: "run MODEL [PROMPT]",
|
||||
Short: "Run a model",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: RunHandler,
|
||||
}
|
||||
|
||||
runCmd.Flags().String("keepalive", "", "Duration to keep a model loaded (e.g. 5m)")
|
||||
@@ -1745,12 +1753,11 @@ func NewCLI() *cobra.Command {
|
||||
runCmd.Flags().Int("dimensions", 0, "Truncate output embeddings to specified dimension (embedding models only)")
|
||||
|
||||
stopCmd := &cobra.Command{
|
||||
Use: "stop MODEL",
|
||||
Short: "Stop a running model",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: StopHandler,
|
||||
Annotations: envconfig.Usage("OLLAMA_HOST"),
|
||||
Use: "stop MODEL",
|
||||
Short: "Stop a running model",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: StopHandler,
|
||||
}
|
||||
|
||||
serveCmd := &cobra.Command{
|
||||
@@ -1759,44 +1766,24 @@ func NewCLI() *cobra.Command {
|
||||
Short: "Start ollama",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: RunServer,
|
||||
Annotations: envconfig.Usage(
|
||||
"OLLAMA_DEBUG",
|
||||
"OLLAMA_HOST",
|
||||
"OLLAMA_CONTEXT_LENGTH",
|
||||
"OLLAMA_KEEP_ALIVE",
|
||||
"OLLAMA_MAX_LOADED_MODELS",
|
||||
"OLLAMA_MAX_QUEUE",
|
||||
"OLLAMA_MODELS",
|
||||
"OLLAMA_NUM_PARALLEL",
|
||||
"OLLAMA_NOPRUNE",
|
||||
"OLLAMA_ORIGINS",
|
||||
"OLLAMA_SCHED_SPREAD",
|
||||
"OLLAMA_FLASH_ATTENTION",
|
||||
"OLLAMA_KV_CACHE_TYPE",
|
||||
"OLLAMA_LLM_LIBRARY",
|
||||
"OLLAMA_GPU_OVERHEAD",
|
||||
"OLLAMA_LOAD_TIMEOUT",
|
||||
),
|
||||
}
|
||||
|
||||
pullCmd := &cobra.Command{
|
||||
Use: "pull MODEL",
|
||||
Short: "Pull a model from a registry",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: PullHandler,
|
||||
Annotations: envconfig.Usage("OLLAMA_HOST"),
|
||||
Use: "pull MODEL",
|
||||
Short: "Pull a model from a registry",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: PullHandler,
|
||||
}
|
||||
|
||||
pullCmd.Flags().Bool("insecure", false, "Use an insecure registry")
|
||||
|
||||
pushCmd := &cobra.Command{
|
||||
Use: "push MODEL",
|
||||
Short: "Push a model to a registry",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: PushHandler,
|
||||
Annotations: envconfig.Usage("OLLAMA_HOST"),
|
||||
Use: "push MODEL",
|
||||
Short: "Push a model to a registry",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: PushHandler,
|
||||
}
|
||||
|
||||
pushCmd.Flags().Bool("insecure", false, "Use an insecure registry")
|
||||
@@ -1818,37 +1805,33 @@ func NewCLI() *cobra.Command {
|
||||
}
|
||||
|
||||
listCmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Aliases: []string{"ls"},
|
||||
Short: "List models",
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: ListHandler,
|
||||
Annotations: envconfig.Usage("OLLAMA_HOST"),
|
||||
Use: "list",
|
||||
Aliases: []string{"ls"},
|
||||
Short: "List models",
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: ListHandler,
|
||||
}
|
||||
|
||||
psCmd := &cobra.Command{
|
||||
Use: "ps",
|
||||
Short: "List running models",
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: ListRunningHandler,
|
||||
Annotations: envconfig.Usage("OLLAMA_HOST"),
|
||||
Use: "ps",
|
||||
Short: "List running models",
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: ListRunningHandler,
|
||||
}
|
||||
copyCmd := &cobra.Command{
|
||||
Use: "cp SOURCE DESTINATION",
|
||||
Short: "Copy a model",
|
||||
Args: cobra.ExactArgs(2),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: CopyHandler,
|
||||
Annotations: envconfig.Usage("OLLAMA_HOST"),
|
||||
Use: "cp SOURCE DESTINATION",
|
||||
Short: "Copy a model",
|
||||
Args: cobra.ExactArgs(2),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: CopyHandler,
|
||||
}
|
||||
|
||||
deleteCmd := &cobra.Command{
|
||||
Use: "rm MODEL [MODEL...]",
|
||||
Short: "Remove a model",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: DeleteHandler,
|
||||
Annotations: envconfig.Usage("OLLAMA_HOST"),
|
||||
Use: "rm MODEL [MODEL...]",
|
||||
Short: "Remove a model",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
PreRunE: checkServerHeartbeat,
|
||||
RunE: DeleteHandler,
|
||||
}
|
||||
|
||||
runnerCmd := &cobra.Command{
|
||||
@@ -1863,6 +1846,50 @@ func NewCLI() *cobra.Command {
|
||||
_ = runner.Execute(args[1:])
|
||||
})
|
||||
|
||||
envVars := envconfig.AsMap()
|
||||
|
||||
envs := []envconfig.EnvVar{envVars["OLLAMA_HOST"]}
|
||||
|
||||
for _, cmd := range []*cobra.Command{
|
||||
createCmd,
|
||||
showCmd,
|
||||
runCmd,
|
||||
stopCmd,
|
||||
pullCmd,
|
||||
pushCmd,
|
||||
listCmd,
|
||||
psCmd,
|
||||
copyCmd,
|
||||
deleteCmd,
|
||||
serveCmd,
|
||||
} {
|
||||
switch cmd {
|
||||
case runCmd:
|
||||
appendEnvDocs(cmd, []envconfig.EnvVar{envVars["OLLAMA_HOST"], envVars["OLLAMA_NOHISTORY"]})
|
||||
case serveCmd:
|
||||
appendEnvDocs(cmd, []envconfig.EnvVar{
|
||||
envVars["OLLAMA_DEBUG"],
|
||||
envVars["OLLAMA_HOST"],
|
||||
envVars["OLLAMA_CONTEXT_LENGTH"],
|
||||
envVars["OLLAMA_KEEP_ALIVE"],
|
||||
envVars["OLLAMA_MAX_LOADED_MODELS"],
|
||||
envVars["OLLAMA_MAX_QUEUE"],
|
||||
envVars["OLLAMA_MODELS"],
|
||||
envVars["OLLAMA_NUM_PARALLEL"],
|
||||
envVars["OLLAMA_NOPRUNE"],
|
||||
envVars["OLLAMA_ORIGINS"],
|
||||
envVars["OLLAMA_SCHED_SPREAD"],
|
||||
envVars["OLLAMA_FLASH_ATTENTION"],
|
||||
envVars["OLLAMA_KV_CACHE_TYPE"],
|
||||
envVars["OLLAMA_LLM_LIBRARY"],
|
||||
envVars["OLLAMA_GPU_OVERHEAD"],
|
||||
envVars["OLLAMA_LOAD_TIMEOUT"],
|
||||
})
|
||||
default:
|
||||
appendEnvDocs(cmd, envs)
|
||||
}
|
||||
}
|
||||
|
||||
rootCmd.AddCommand(
|
||||
serveCmd,
|
||||
createCmd,
|
||||
@@ -1880,7 +1907,6 @@ func NewCLI() *cobra.Command {
|
||||
runnerCmd,
|
||||
)
|
||||
|
||||
rootCmd.SetUsageTemplate(usageTemplate)
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
Usage:
|
||||
{{- if .Runnable }} {{ .UseLine }}
|
||||
{{- end }}
|
||||
{{- if .HasAvailableSubCommands }} {{ .CommandPath }} [command]
|
||||
{{- end }}
|
||||
|
||||
{{- if gt (len .Aliases) 0}}
|
||||
|
||||
Aliases:
|
||||
{{ .NameAndAliases }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .HasExample }}
|
||||
|
||||
Examples:
|
||||
{{ .Example }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .HasAvailableSubCommands }}
|
||||
{{- if eq (len .Groups) 0}}
|
||||
|
||||
Available Commands:
|
||||
{{- range .Commands }}
|
||||
{{- if or .IsAvailableCommand (eq .Name "help") }}
|
||||
{{ rpad .Name .NamePadding }} {{ .Short }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- else }}
|
||||
|
||||
{{- range .Groups }}
|
||||
|
||||
{{ .Title }}
|
||||
|
||||
{{- range $.Commands }}
|
||||
{{- if and (eq .GroupID .ID) (or .IsAvailableCommand (eq .Name "help")) }}
|
||||
{{ rpad .Name .NamePadding }} {{ .Short }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if not .AllChildCommandsHaveGroup }}
|
||||
|
||||
Additional Commands:
|
||||
{{- range $.Commands }}
|
||||
{{- if and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")) }}
|
||||
{{ rpad .Name .NamePadding }} {{ .Short }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .HasAvailableLocalFlags }}
|
||||
|
||||
Flags:
|
||||
{{ .LocalFlags.FlagUsages | trimTrailingWhitespaces }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .HasAvailableInheritedFlags }}
|
||||
|
||||
Global Flags:
|
||||
{{ .InheritedFlags.FlagUsages | trimTrailingWhitespaces }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* Hijack .Annotations for Environment Variables */ -}}
|
||||
{{- if .Annotations }}
|
||||
|
||||
Environment Variables:
|
||||
{{- range $key, $value := .Annotations }}
|
||||
{{ rpad $key 24 }} {{ $value | trimTrailingWhitespaces }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .HasHelpSubCommands }}
|
||||
|
||||
Additional help topics:
|
||||
{{- range .Commands }}
|
||||
{{- if .IsAdditionalHelpTopicCommand }}
|
||||
{{ rpad .CommandPath .CommandPathPadding }} {{ .Short }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .HasAvailableSubCommands }}
|
||||
|
||||
Use "{{ .CommandPath }} [command] --help" for more information about a command.
|
||||
{{- end }}
|
||||
@@ -206,6 +206,8 @@ func ConvertModel(fsys fs.FS, f *os.File) error {
|
||||
conv = &commandrModel{}
|
||||
case "GptOssForCausalLM":
|
||||
conv = &gptossModel{}
|
||||
case "DeepseekOCRForCausalLM":
|
||||
conv = &deepseekocr{}
|
||||
default:
|
||||
return fmt.Errorf("unsupported architecture %q", p.Architectures[0])
|
||||
}
|
||||
|
||||
136
convert/convert_deepseekocr.go
Normal file
136
convert/convert_deepseekocr.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ollama/ollama/fs/ggml"
|
||||
)
|
||||
|
||||
type deepseekocr struct {
|
||||
ModelParameters
|
||||
LanguageConfig struct {
|
||||
MaxPositionEmbeddings uint32 `json:"max_position_embeddings"`
|
||||
HiddenSize uint32 `json:"hidden_size"`
|
||||
HiddenLayers uint32 `json:"num_hidden_layers"`
|
||||
IntermediateSize uint32 `json:"intermediate_size"`
|
||||
NumAttentionHeads uint32 `json:"num_attention_heads"`
|
||||
NumKeyValueHeads uint32 `json:"num_key_value_heads"`
|
||||
NumRoutedExperts uint32 `json:"n_routed_experts"`
|
||||
NumSharedExperts uint32 `json:"n_shared_experts"`
|
||||
NumExpertsPerToken uint32 `json:"num_experts_per_tok"`
|
||||
FirstKDenseReplace uint32 `json:"first_k_dense_replace"`
|
||||
} `json:"language_config"`
|
||||
|
||||
VisionConfig struct {
|
||||
ImageSize uint32 `json:"image_size"`
|
||||
Width struct {
|
||||
Vision struct {
|
||||
Heads uint32 `json:"heads"`
|
||||
ImageSize uint32 `json:"image_size"`
|
||||
Layers uint32 `json:"layers"`
|
||||
PatchSize uint32 `json:"patch_size"`
|
||||
Width uint32 `json:"width"`
|
||||
} `json:"clip-l-14-224"`
|
||||
Sam struct {
|
||||
GlobalAttentionIndexes []int32 `json:"global_attn_indexes"`
|
||||
Heads uint32 `json:"heads"`
|
||||
Layers uint32 `json:"layers"`
|
||||
Width uint32 `json:"width"`
|
||||
} `json:"sam_vit_b"`
|
||||
}
|
||||
} `json:"vision_config"`
|
||||
}
|
||||
|
||||
func (m *deepseekocr) KV(t *Tokenizer) ggml.KV {
|
||||
kv := m.ModelParameters.KV(t)
|
||||
kv["general.architecture"] = "deepseekocr"
|
||||
kv["block_count"] = m.LanguageConfig.HiddenLayers
|
||||
kv["context_length"] = m.LanguageConfig.MaxPositionEmbeddings
|
||||
kv["embedding_length"] = m.LanguageConfig.HiddenSize
|
||||
kv["feed_forward_length"] = m.LanguageConfig.IntermediateSize
|
||||
kv["attention.head_count"] = m.LanguageConfig.NumAttentionHeads
|
||||
kv["attention.head_count_kv"] = m.LanguageConfig.NumKeyValueHeads
|
||||
kv["expert_count"] = m.LanguageConfig.NumRoutedExperts
|
||||
kv["expert_used_count"] = m.LanguageConfig.NumExpertsPerToken
|
||||
kv["leading_dense_block_count"] = m.LanguageConfig.FirstKDenseReplace
|
||||
|
||||
kv["vision.block_count"] = m.VisionConfig.Width.Vision.Layers
|
||||
kv["vision.embedding_length"] = m.VisionConfig.Width.Vision.Width
|
||||
kv["vision.head_count"] = m.VisionConfig.Width.Vision.Heads
|
||||
kv["vision.image_size"] = m.VisionConfig.Width.Vision.ImageSize
|
||||
kv["vision.patch_size"] = m.VisionConfig.Width.Vision.PatchSize
|
||||
|
||||
kv["sam.block_count"] = m.VisionConfig.Width.Sam.Layers
|
||||
kv["sam.embedding_length"] = m.VisionConfig.Width.Sam.Width
|
||||
kv["sam.head_count"] = m.VisionConfig.Width.Sam.Heads
|
||||
kv["sam.global_attention_indexes"] = m.VisionConfig.Width.Sam.GlobalAttentionIndexes
|
||||
return kv
|
||||
}
|
||||
|
||||
func (m *deepseekocr) Tensors(s []Tensor) (out []*ggml.Tensor) {
|
||||
merges := make([]merge, m.LanguageConfig.HiddenLayers*3)
|
||||
for i := range m.LanguageConfig.HiddenLayers {
|
||||
merges[i*3+0] = merge{
|
||||
fmt.Sprintf("blk.%d.mlp.experts.*.gate_proj.weight", i),
|
||||
fmt.Sprintf("blk.%d.ffn_gate_exps.weight", i),
|
||||
}
|
||||
merges[i*3+1] = merge{
|
||||
fmt.Sprintf("blk.%d.mlp.experts.*.up_proj.weight", i),
|
||||
fmt.Sprintf("blk.%d.ffn_up_exps.weight", i),
|
||||
}
|
||||
merges[i*3+2] = merge{
|
||||
fmt.Sprintf("blk.%d.mlp.experts.*.down_proj.weight", i),
|
||||
fmt.Sprintf("blk.%d.ffn_down_exps.weight", i),
|
||||
}
|
||||
}
|
||||
|
||||
out, s = mergeTensors(s, merges...)
|
||||
for _, t := range s {
|
||||
out = append(out, &ggml.Tensor{
|
||||
Name: t.Name(),
|
||||
Kind: t.Kind(),
|
||||
Shape: t.Shape(),
|
||||
WriterTo: t,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *deepseekocr) Replacements() []string {
|
||||
return []string{
|
||||
"model.embed_tokens", "token_embd",
|
||||
"model.layers", "blk",
|
||||
"input_layernorm", "attn_norm",
|
||||
"self_attn.q_proj", "attn_q",
|
||||
"self_attn.k_proj", "attn_k",
|
||||
"self_attn.v_proj", "attn_v",
|
||||
"self_attn.o_proj", "attn_output",
|
||||
"post_attention_layernorm", "ffn_norm",
|
||||
"mlp.gate_proj", "ffn_gate",
|
||||
"mlp.up_proj", "ffn_up",
|
||||
"mlp.down_proj", "ffn_down",
|
||||
"mlp.gate", "ffn_gate_inp",
|
||||
"mlp.shared_experts.gate_proj", "ffn_gate_shexp",
|
||||
"mlp.shared_experts.up_proj", "ffn_up_shexp",
|
||||
"mlp.shared_experts.down_proj", "ffn_down_shexp",
|
||||
"model.norm", "output_norm",
|
||||
"lm_head", "output",
|
||||
|
||||
"model.vision_model", "v",
|
||||
"embeddings.patch_embedding", "patch_embd",
|
||||
"embeddings.class_embedding", "class_embd",
|
||||
"embeddings.position_embedding", "position_embd",
|
||||
"transformer.layers", "blk",
|
||||
|
||||
"model.projector", "mm",
|
||||
"model.image_newline", "mm.image_newline",
|
||||
//nolint:misspell // this misspelling is upstream. fixing it breaks the model
|
||||
"model.view_seperator", "mm.view_seperator",
|
||||
|
||||
"model.sam_model.patch_embed.proj", "s.patch_embd",
|
||||
"model.sam_model.pos_embed", "s.position_embd",
|
||||
"model.sam_model.blocks", "s.blk",
|
||||
"model.sam_model.neck", "s.neck",
|
||||
"model.sam_model.net_", "s.net_",
|
||||
}
|
||||
}
|
||||
@@ -44,7 +44,10 @@ func (t tensorBase) Kind() uint32 {
|
||||
t.name == "v.positional_embedding_vlm" ||
|
||||
t.name == "v.tile_position_embd.weight" ||
|
||||
t.name == "v.pre_tile_position_embd.weight" ||
|
||||
t.name == "v.post_tile_position_embd.weight" {
|
||||
t.name == "v.post_tile_position_embd.weight" ||
|
||||
t.name == "s.position_embd" ||
|
||||
strings.HasSuffix(t.name, "rel_pos_h") ||
|
||||
strings.HasSuffix(t.name, "rel_pos_w") {
|
||||
// these tensors are always F32
|
||||
return tensorKindFP32
|
||||
}
|
||||
|
||||
@@ -96,7 +96,10 @@ type safetensor struct {
|
||||
|
||||
func (st safetensor) Kind() uint32 {
|
||||
kind := st.tensorBase.Kind()
|
||||
if !strings.HasPrefix(st.name, "v.") && st.dtype == "BF16" && kind != tensorKindFP32 {
|
||||
if st.dtype == "BF16" &&
|
||||
!strings.HasPrefix(st.name, "v.") &&
|
||||
!strings.HasPrefix(st.name, "s.") &&
|
||||
kind != tensorKindFP32 {
|
||||
kind = tensorKindBF16
|
||||
}
|
||||
|
||||
|
||||
@@ -67,14 +67,7 @@ func GPUDevices(ctx context.Context, runners []ml.FilteredRunnerDiscovery) []ml.
|
||||
slog.Info("discovering available GPUs...")
|
||||
|
||||
// Warn if any user-overrides are set which could lead to incorrect GPU discovery
|
||||
overrideWarning(
|
||||
"CUDA_VISIBLE_DEVICES",
|
||||
"HIP_VISIBLE_DEVICES",
|
||||
"ROCR_VISIBLE_DEVICES",
|
||||
"GGML_VK_VISIBLE_DEVICES",
|
||||
"GPU_DEVICE_ORDINAL",
|
||||
"HSA_OVERRIDE_GFX_VERSION",
|
||||
)
|
||||
overrideWarnings()
|
||||
|
||||
requested := envconfig.LLMLibrary()
|
||||
jetpack := cudaJetpack()
|
||||
@@ -132,10 +125,20 @@ func GPUDevices(ctx context.Context, runners []ml.FilteredRunnerDiscovery) []ml.
|
||||
supportedMu := sync.Mutex{}
|
||||
supported := make(map[string]map[string]map[string]int) // [Library][libDir][ID] = pre-deletion devices index
|
||||
for i := range devices {
|
||||
libDir := devices[i].LibraryPath[len(devices[i].LibraryPath)-1]
|
||||
if !devices[i].NeedsInitValidation() {
|
||||
// No need to validate, add to the supported map
|
||||
supportedMu.Lock()
|
||||
if _, ok := supported[devices[i].Library]; !ok {
|
||||
supported[devices[i].Library] = make(map[string]map[string]int)
|
||||
}
|
||||
if _, ok := supported[devices[i].Library][libDir]; !ok {
|
||||
supported[devices[i].Library][libDir] = make(map[string]int)
|
||||
}
|
||||
supported[devices[i].Library][libDir][devices[i].ID] = i
|
||||
supportedMu.Unlock()
|
||||
continue
|
||||
}
|
||||
libDir := devices[i].LibraryPath[len(devices[i].LibraryPath)-1]
|
||||
slog.Debug("verifying if device is supported", "library", libDir, "description", devices[i].Description, "compute", devices[i].Compute(), "id", devices[i].ID, "pci_id", devices[i].PCIID)
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
@@ -461,20 +464,23 @@ func bootstrapDevices(ctx context.Context, ollamaLibDirs []string, extraEnvs map
|
||||
return devices
|
||||
}
|
||||
|
||||
func overrideWarning(s ...string) {
|
||||
attrs := make([]slog.Attr, 0, len(s))
|
||||
for _, i := range envconfig.Lookup(s...) {
|
||||
if !i.IsZero() {
|
||||
attrs = append(attrs, i.LogValue().Group()...)
|
||||
func overrideWarnings() {
|
||||
anyFound := false
|
||||
m := envconfig.AsMap()
|
||||
for _, k := range []string{
|
||||
"CUDA_VISIBLE_DEVICES",
|
||||
"HIP_VISIBLE_DEVICES",
|
||||
"ROCR_VISIBLE_DEVICES",
|
||||
"GGML_VK_VISIBLE_DEVICES",
|
||||
"GPU_DEVICE_ORDINAL",
|
||||
"HSA_OVERRIDE_GFX_VERSION",
|
||||
} {
|
||||
if e, found := m[k]; found && e.Value != "" {
|
||||
anyFound = true
|
||||
slog.Warn("user overrode visible devices", k, e.Value)
|
||||
}
|
||||
}
|
||||
|
||||
if len(attrs) > 0 {
|
||||
slog.LogAttrs(
|
||||
context.TODO(),
|
||||
slog.LevelWarn,
|
||||
"user overrode visible devices; if GPUs are not correctly discovered, unset and try again",
|
||||
attrs...,
|
||||
)
|
||||
if anyFound {
|
||||
slog.Warn("if GPUs are not correctly discovered, unset and try again")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,34 +1,34 @@
|
||||
---
|
||||
title: VS Code
|
||||
title: VS Code
|
||||
---
|
||||
|
||||
## Install
|
||||
|
||||
Install [VS Code](https://code.visualstudio.com/download).
|
||||
Install [VS Code](https://code.visualstudio.com/download).
|
||||
|
||||
## Usage with Ollama
|
||||
## Usage with Ollama
|
||||
|
||||
1. Open Copilot side bar found in top right window
|
||||
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||
<img
|
||||
src="/images/vscode-sidebar.png"
|
||||
alt="VS Code chat Sidebar"
|
||||
width="75%"
|
||||
/>
|
||||
</div>
|
||||
2. Select the model drowpdown > **Manage models**
|
||||
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||
<img
|
||||
src="/images/vscode-models.png"
|
||||
alt="VS Code model picker"
|
||||
width="75%"
|
||||
/>
|
||||
</div>
|
||||
<div style={{ display: "flex", justifyContent: "center" }}>
|
||||
<img
|
||||
src="/images/vscode-sidebar.png"
|
||||
alt="VS Code chat Sidebar"
|
||||
width="75%"
|
||||
/>
|
||||
</div>
|
||||
2. Select the model dropdown > **Manage models**
|
||||
<div style={{ display: "flex", justifyContent: "center" }}>
|
||||
<img
|
||||
src="/images/vscode-models.png"
|
||||
alt="VS Code model picker"
|
||||
width="75%"
|
||||
/>
|
||||
</div>
|
||||
3. Enter **Ollama** under **Provider Dropdown** and select desired models (e.g `qwen3, qwen3-coder:480b-cloud`)
|
||||
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||
<img
|
||||
src="/images/vscode-model-options.png"
|
||||
alt="VS Code model options dropdown"
|
||||
width="75%"
|
||||
/>
|
||||
</div>
|
||||
<div style={{ display: "flex", justifyContent: "center" }}>
|
||||
<img
|
||||
src="/images/vscode-model-options.png"
|
||||
alt="VS Code model options dropdown"
|
||||
width="75%"
|
||||
/>
|
||||
</div>
|
||||
|
||||
@@ -149,9 +149,6 @@ PARAMETER <parameter> <parametervalue>
|
||||
|
||||
| Parameter | Description | Value Type | Example Usage |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | -------------------- |
|
||||
| mirostat | Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) | int | mirostat 0 |
|
||||
| mirostat_eta | Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) | float | mirostat_eta 0.1 |
|
||||
| mirostat_tau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | mirostat_tau 5.0 |
|
||||
| num_ctx | Sets the size of the context window used to generate the next token. (Default: 2048) | int | num_ctx 4096 |
|
||||
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
|
||||
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -267,97 +265,67 @@ func Uint64(key string, defaultValue uint64) func() uint64 {
|
||||
// Set aside VRAM per GPU
|
||||
var GpuOverhead = Uint64("OLLAMA_GPU_OVERHEAD", 0)
|
||||
|
||||
type item struct {
|
||||
enable bool
|
||||
name, usage string
|
||||
value, defaultValue any
|
||||
type EnvVar struct {
|
||||
Name string
|
||||
Value any
|
||||
Description string
|
||||
}
|
||||
|
||||
func (i item) IsZero() bool {
|
||||
return (i.value == i.defaultValue) || (i.defaultValue == nil && reflect.ValueOf(i.value).IsZero())
|
||||
}
|
||||
func AsMap() map[string]EnvVar {
|
||||
ret := map[string]EnvVar{
|
||||
"OLLAMA_DEBUG": {"OLLAMA_DEBUG", LogLevel(), "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
|
||||
"OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention(false), "Enabled flash attention"},
|
||||
"OLLAMA_KV_CACHE_TYPE": {"OLLAMA_KV_CACHE_TYPE", KvCacheType(), "Quantization type for the K/V cache (default: f16)"},
|
||||
"OLLAMA_GPU_OVERHEAD": {"OLLAMA_GPU_OVERHEAD", GpuOverhead(), "Reserve a portion of VRAM per GPU (bytes)"},
|
||||
"OLLAMA_HOST": {"OLLAMA_HOST", Host(), "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive(), "The duration that models stay loaded in memory (default \"5m\")"},
|
||||
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary(), "Set LLM library to bypass autodetection"},
|
||||
"OLLAMA_LOAD_TIMEOUT": {"OLLAMA_LOAD_TIMEOUT", LoadTimeout(), "How long to allow model loads to stall before giving up (default \"5m\")"},
|
||||
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners(), "Maximum number of loaded models per GPU"},
|
||||
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueue(), "Maximum number of queued requests"},
|
||||
"OLLAMA_MODELS": {"OLLAMA_MODELS", Models(), "The path to the models directory"},
|
||||
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory(), "Do not preserve readline history"},
|
||||
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune(), "Do not prune model blobs on startup"},
|
||||
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel(), "Maximum number of parallel requests"},
|
||||
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowedOrigins(), "A comma separated list of allowed origins"},
|
||||
"OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread(), "Always schedule model across all GPUs"},
|
||||
"OLLAMA_MULTIUSER_CACHE": {"OLLAMA_MULTIUSER_CACHE", MultiUserCache(), "Optimize prompt caching for multi-user scenarios"},
|
||||
"OLLAMA_CONTEXT_LENGTH": {"OLLAMA_CONTEXT_LENGTH", ContextLength(), "Context length to use unless otherwise specified (default: 4096)"},
|
||||
"OLLAMA_NEW_ENGINE": {"OLLAMA_NEW_ENGINE", NewEngine(), "Enable the new Ollama engine"},
|
||||
"OLLAMA_REMOTES": {"OLLAMA_REMOTES", Remotes(), "Allowed hosts for remote models (default \"ollama.com\")"},
|
||||
|
||||
func (i item) LogValue() slog.Value {
|
||||
return slog.GroupValue(slog.Any(i.name, i.value))
|
||||
}
|
||||
|
||||
type slice []item
|
||||
|
||||
func (s slice) LogValue() slog.Value {
|
||||
attrs := make([]slog.Attr, 0, 2*len(s))
|
||||
for _, e := range s {
|
||||
attrs = append(attrs, e.LogValue().Group()...)
|
||||
// Informational
|
||||
"HTTP_PROXY": {"HTTP_PROXY", String("HTTP_PROXY")(), "HTTP proxy"},
|
||||
"HTTPS_PROXY": {"HTTPS_PROXY", String("HTTPS_PROXY")(), "HTTPS proxy"},
|
||||
"NO_PROXY": {"NO_PROXY", String("NO_PROXY")(), "No proxy"},
|
||||
}
|
||||
return slog.GroupValue(attrs...)
|
||||
}
|
||||
|
||||
var all = slice{
|
||||
{true, "OLLAMA_DEBUG", "Show additional debug information (e.g. OLLAMA_DEBUG=1). Verbosity increase with value", LogLevel(), nil},
|
||||
{true, "OLLAMA_FLASH_ATTENTION", "Enable flash attention", FlashAttention(false), nil},
|
||||
{true, "OLLAMA_KV_CACHE_TYPE", "Quantization type for the K/V cache", KvCacheType(), nil},
|
||||
{true, "OLLAMA_GPU_OVERHEAD", "Reserve a portion of VRAM per GPU (bytes)", GpuOverhead(), 0},
|
||||
{true, "OLLAMA_HOST", "IP Address for the ollama server", Host(), "127.0.0.1:11434"},
|
||||
{true, "OLLAMA_KEEP_ALIVE", "The duration that models stay loaded in memory", KeepAlive(), 5 * time.Minute},
|
||||
{true, "OLLAMA_LLM_LIBRARY", "Set LLM library to bypass autodetection", LLMLibrary(), nil},
|
||||
{true, "OLLAMA_LOAD_TIMEOUT", "How long to allow model loads to stall before giving up", LoadTimeout(), 5 * time.Minute},
|
||||
{true, "OLLAMA_MAX_LOADED_MODELS", "Maximum number of loaded models per GPU", MaxRunners(), 0},
|
||||
{true, "OLLAMA_MAX_QUEUE", "Maximum number of queued requests", MaxQueue(), 512},
|
||||
{true, "OLLAMA_MODELS", "The path to the models directory", Models(), filepath.Join(os.Getenv("HOME"), ".ollama", "models")},
|
||||
{true, "OLLAMA_NOHISTORY", "Do not preserve readline history", NoHistory(), false},
|
||||
{true, "OLLAMA_NOPRUNE", "Do not prune model blobs on startup", NoPrune(), false},
|
||||
{true, "OLLAMA_NUM_PARALLEL", "Maximum number of parallel requests", NumParallel(), 1},
|
||||
{true, "OLLAMA_ORIGINS", "A comma separated list of allowed origins", AllowedOrigins(), nil},
|
||||
{true, "OLLAMA_SCHED_SPREAD", "Always schedule model across all GPUs", SchedSpread(), false},
|
||||
{true, "OLLAMA_MULTIUSER_CACHE", "Optimize prompt caching for multi-user scenarios", MultiUserCache(), false},
|
||||
{true, "OLLAMA_CONTEXT_LENGTH", "Context length to use unless otherwise specified", ContextLength(), 4096},
|
||||
{true, "OLLAMA_NEW_ENGINE", "Enable the new Ollama engine", NewEngine(), false},
|
||||
{true, "OLLAMA_REMOTES", "Allowed hosts for remote models", Remotes(), []string{"ollama.com"}},
|
||||
{runtime.GOOS != "windows", "HTTP_PROXY", "HTTP proxy", String("http_proxy")(), nil},
|
||||
{runtime.GOOS != "windows", "HTTPS_PROXY", "HTTPS proxy", String("https_proxy")(), nil},
|
||||
{runtime.GOOS != "windows", "NO_PROXY", "No proxy", String("no_proxy")(), nil},
|
||||
{runtime.GOOS != "darwin", "CUDA_VISIBLE_DEVICES", "Set which NVIDIA devices are visible", CudaVisibleDevices(), nil},
|
||||
{runtime.GOOS != "darwin", "HIP_VISIBLE_DEVICES", "Set which AMD devices are visible by numeric ID", HipVisibleDevices(), nil},
|
||||
{runtime.GOOS != "darwin", "ROCR_VISIBLE_DEVICES", "Set which AMD devices are visible by UUID or numeric ID", RocrVisibleDevices(), nil},
|
||||
{runtime.GOOS != "darwin", "GGML_VK_VISIBLE_DEVICES", "Set which Vulkan devices are visible by numeric ID", VkVisibleDevices(), nil},
|
||||
{runtime.GOOS != "darwin", "GPU_DEVICE_ORDINAL", "Set which AMD devices are visible by numeric ID", GpuDeviceOrdinal(), nil},
|
||||
{runtime.GOOS != "darwin", "HSA_OVERRIDE_GFX_VERSION", "Override the gfx used for all detected AMD GPUs", HsaOverrideGfxVersion(), nil},
|
||||
}
|
||||
|
||||
func Enabled() slice {
|
||||
enabled := make(slice, 0, len(all))
|
||||
for _, i := range all {
|
||||
if i.enable {
|
||||
enabled = append(enabled, i)
|
||||
}
|
||||
if runtime.GOOS != "windows" {
|
||||
// Windows environment variables are case-insensitive so there's no need to duplicate them
|
||||
ret["http_proxy"] = EnvVar{"http_proxy", String("http_proxy")(), "HTTP proxy"}
|
||||
ret["https_proxy"] = EnvVar{"https_proxy", String("https_proxy")(), "HTTPS proxy"}
|
||||
ret["no_proxy"] = EnvVar{"no_proxy", String("no_proxy")(), "No proxy"}
|
||||
}
|
||||
return enabled
|
||||
|
||||
if runtime.GOOS != "darwin" {
|
||||
ret["CUDA_VISIBLE_DEVICES"] = EnvVar{"CUDA_VISIBLE_DEVICES", CudaVisibleDevices(), "Set which NVIDIA devices are visible"}
|
||||
ret["HIP_VISIBLE_DEVICES"] = EnvVar{"HIP_VISIBLE_DEVICES", HipVisibleDevices(), "Set which AMD devices are visible by numeric ID"}
|
||||
ret["ROCR_VISIBLE_DEVICES"] = EnvVar{"ROCR_VISIBLE_DEVICES", RocrVisibleDevices(), "Set which AMD devices are visible by UUID or numeric ID"}
|
||||
ret["GGML_VK_VISIBLE_DEVICES"] = EnvVar{"GGML_VK_VISIBLE_DEVICES", VkVisibleDevices(), "Set which Vulkan devices are visible by numeric ID"}
|
||||
ret["GPU_DEVICE_ORDINAL"] = EnvVar{"GPU_DEVICE_ORDINAL", GpuDeviceOrdinal(), "Set which AMD devices are visible by numeric ID"}
|
||||
ret["HSA_OVERRIDE_GFX_VERSION"] = EnvVar{"HSA_OVERRIDE_GFX_VERSION", HsaOverrideGfxVersion(), "Override the gfx used for all detected AMD GPUs"}
|
||||
ret["OLLAMA_VULKAN"] = EnvVar{"OLLAMA_VULKAN", EnableVulkan(), "Enable experimental Vulkan support"}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func Lookup(s ...string) []item {
|
||||
enabled := Enabled()
|
||||
filtered := make([]item, 0, len(s))
|
||||
for _, k := range s {
|
||||
if i := slices.IndexFunc(enabled, func(i item) bool { return i.name == k }); i != -1 {
|
||||
filtered = append(filtered, enabled[i])
|
||||
}
|
||||
func Values() map[string]string {
|
||||
vals := make(map[string]string)
|
||||
for k, v := range AsMap() {
|
||||
vals[k] = fmt.Sprintf("%v", v.Value)
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// Usage returns enabled environment variables and their usage descriptions.
|
||||
// If a variable has a default value, it is included in the description.
|
||||
func Usage(s ...string) map[string]string {
|
||||
enabled := Enabled()
|
||||
m := make(map[string]string, len(s))
|
||||
for _, k := range s {
|
||||
if i := slices.IndexFunc(enabled, func(i item) bool { return i.name == k }); i != -1 {
|
||||
m[k] = enabled[i].usage
|
||||
if enabled[i].defaultValue != nil {
|
||||
m[k] += fmt.Sprintf(" (default: %v)", enabled[i].defaultValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
return m
|
||||
return vals
|
||||
}
|
||||
|
||||
// Var returns an environment variable stripped of leading and trailing quotes or spaces
|
||||
|
||||
@@ -249,6 +249,9 @@ func (kv KV) OllamaEngineRequired() bool {
|
||||
"qwen25vl",
|
||||
"qwen3", "qwen3moe",
|
||||
"qwen3vl", "qwen3vlmoe",
|
||||
"deepseekocr",
|
||||
"deepseek2",
|
||||
"nomic-bert",
|
||||
}, kv.Architecture())
|
||||
}
|
||||
|
||||
|
||||
@@ -388,9 +388,9 @@ func NewFunctionNameMap() *FunctionNameMap {
|
||||
}
|
||||
}
|
||||
|
||||
// Init initializes the handler with tools and optional last message
|
||||
// Init initializes the handler with tools, optional last message, and think value
|
||||
// Implements the Parser interface
|
||||
func (h *HarmonyMessageHandler) Init(tools []api.Tool, lastMessage *api.Message) []api.Tool {
|
||||
func (h *HarmonyMessageHandler) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
// Initialize the harmony parser
|
||||
if h.HarmonyParser == nil {
|
||||
h.HarmonyParser = &HarmonyParser{
|
||||
|
||||
@@ -3,7 +3,6 @@ package kvcache
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
@@ -40,18 +39,18 @@ type Causal struct {
|
||||
|
||||
// ** current forward pass **
|
||||
|
||||
// the active layer for Get and Put
|
||||
curLayer int
|
||||
|
||||
// starting location for data storage for this batch
|
||||
curLoc int
|
||||
|
||||
// size of the current batch
|
||||
curBatchSize int
|
||||
|
||||
// locations for data storage for this batch
|
||||
curLoc ml.Tensor
|
||||
|
||||
// mask of the cache as used by this batch
|
||||
curMask ml.Tensor
|
||||
|
||||
// the active layer for Get and Put
|
||||
curLayer int
|
||||
|
||||
// locations in the cache that are needed for this batch
|
||||
curCellRange cellRange
|
||||
|
||||
@@ -206,45 +205,47 @@ func (c *Causal) StartForward(ctx ml.Context, batch input.Batch, reserve bool) e
|
||||
c.curPositions = batch.Positions
|
||||
c.opts.Except = nil
|
||||
|
||||
var locs []int32
|
||||
if !reserve {
|
||||
c.updateSlidingWindow()
|
||||
|
||||
var err error
|
||||
c.curLoc, err = c.findStartLoc()
|
||||
if errors.Is(err, ErrKvCacheFull) {
|
||||
c.defrag()
|
||||
c.curLoc, err = c.findStartLoc()
|
||||
}
|
||||
locs, err = c.findLocs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, pos := range batch.Positions {
|
||||
seq := batch.Sequences[i]
|
||||
loc := int(locs[i])
|
||||
|
||||
c.cells[c.curLoc+i] = cacheCell{pos: pos, sequences: []int{seq}}
|
||||
c.cells[loc] = cacheCell{pos: pos, sequences: []int{seq}}
|
||||
|
||||
seqRange, ok := c.cellRanges[seq]
|
||||
if !ok {
|
||||
seqRange = newRange()
|
||||
}
|
||||
|
||||
seqRange.min = min(seqRange.min, c.curLoc+i)
|
||||
c.curCellRange.min = min(c.curCellRange.min, c.curLoc+i)
|
||||
seqRange.min = min(seqRange.min, loc)
|
||||
c.curCellRange.min = min(c.curCellRange.min, loc)
|
||||
|
||||
seqRange.max = max(seqRange.max, c.curLoc+i)
|
||||
c.curCellRange.max = max(c.curCellRange.max, c.curLoc+i)
|
||||
seqRange.max = max(seqRange.max, loc)
|
||||
c.curCellRange.max = max(c.curCellRange.max, loc)
|
||||
|
||||
c.cellRanges[seq] = seqRange
|
||||
}
|
||||
} else {
|
||||
// If we are reserving memory, don't update any of the cache metadata but set the size
|
||||
// to the worst case.
|
||||
c.curLoc = 0
|
||||
locs = make([]int32, c.curBatchSize)
|
||||
for i := range locs {
|
||||
locs[i] = int32(i)
|
||||
}
|
||||
c.curCellRange.min = 0
|
||||
c.curCellRange.max = len(c.cells) - 1
|
||||
}
|
||||
|
||||
c.curLoc = ctx.Input().FromInts(locs, len(locs))
|
||||
c.curMask = c.buildMask(ctx)
|
||||
|
||||
return nil
|
||||
@@ -257,22 +258,20 @@ func newRange() cellRange {
|
||||
}
|
||||
}
|
||||
|
||||
// Find the first contiguous block of at least curBatchSize
|
||||
func (c *Causal) findStartLoc() (int, error) {
|
||||
var start, count int
|
||||
// Returns a slice of locations where each token in the batch should be stored
|
||||
func (c *Causal) findLocs() ([]int32, error) {
|
||||
loc := make([]int32, 0, c.curBatchSize)
|
||||
|
||||
for i := range c.cells {
|
||||
if len(c.cells[i].sequences) == 0 {
|
||||
count++
|
||||
if count >= c.curBatchSize {
|
||||
return start, nil
|
||||
loc = append(loc, int32(i))
|
||||
if len(loc) >= c.curBatchSize {
|
||||
return loc, nil
|
||||
}
|
||||
} else {
|
||||
start = i + 1
|
||||
count = 0
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("%w (cache: %v batch: %v)", ErrKvCacheFull, len(c.cells), c.curBatchSize)
|
||||
return nil, fmt.Errorf("%w (cache: %v batch: %v)", ErrKvCacheFull, len(c.cells), c.curBatchSize)
|
||||
}
|
||||
|
||||
func (c *Causal) updateSlidingWindow() {
|
||||
@@ -402,145 +401,6 @@ func (c *Causal) buildMask(ctx ml.Context) ml.Tensor {
|
||||
return maskTensor
|
||||
}
|
||||
|
||||
func (c *Causal) moveCells(ctx ml.Context, src, dst, length int) {
|
||||
for i, key := range c.keys {
|
||||
if key == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
kHeadDim := key.Dim(0)
|
||||
numKVHeads := key.Dim(1)
|
||||
rowSize := key.Stride(2)
|
||||
|
||||
kSrcView := key.View(ctx, rowSize*src, kHeadDim*numKVHeads*length)
|
||||
kDstView := key.View(ctx, rowSize*dst, kHeadDim*numKVHeads*length)
|
||||
|
||||
value := c.values[i]
|
||||
var vSrcView, vDstView ml.Tensor
|
||||
if c.config.PermutedV {
|
||||
vHeadDim := value.Dim(1)
|
||||
elemSize := value.Stride(0)
|
||||
|
||||
vSrcView = value.View(ctx, elemSize*src, length, len(c.cells)*elemSize, vHeadDim*numKVHeads)
|
||||
vDstView = value.View(ctx, elemSize*dst, length, len(c.cells)*elemSize, vHeadDim*numKVHeads)
|
||||
} else {
|
||||
vHeadDim := value.Dim(0)
|
||||
rowSize := value.Stride(2)
|
||||
|
||||
vSrcView = value.View(ctx, rowSize*src, vHeadDim*numKVHeads*length)
|
||||
vDstView = value.View(ctx, rowSize*dst, vHeadDim*numKVHeads*length)
|
||||
}
|
||||
|
||||
ctx.Forward(
|
||||
kSrcView.Copy(ctx, kDstView),
|
||||
vSrcView.Copy(ctx, vDstView),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Causal) defrag() {
|
||||
slog.Debug("defragmenting kv cache")
|
||||
|
||||
// Defrag strategy:
|
||||
// - Search for empty holes at the beginning of the cache,
|
||||
// filling them with active data starting at the end
|
||||
// - If there are contiguous elements that need to be moved,
|
||||
// combine them into a single operation by holding new moves
|
||||
// until we see that the next one is non-contiguous
|
||||
// - Fill up the context with the maximum number of operations it
|
||||
// can hold then compute that and continue with a new context
|
||||
//
|
||||
// We could try to optimize placement by grouping blocks from
|
||||
// the same sequences together but most likely the next forward
|
||||
// pass will disrupt this anyways, so the real world benefit
|
||||
// seems limited as this time.
|
||||
|
||||
ctx := c.backend.NewContext()
|
||||
|
||||
// For every move, 6 tensors are required per layer (2 views and a
|
||||
// copy for each of k and v). We also need to refer to the original
|
||||
// k and v cache tensors - once per layer, not per move.
|
||||
layers := 0
|
||||
for _, key := range c.keys {
|
||||
if key == nil {
|
||||
continue
|
||||
}
|
||||
layers++
|
||||
}
|
||||
|
||||
maxMoves := (ctx.MaxGraphNodes() - 2*layers) / (6 * layers)
|
||||
moves := 0
|
||||
|
||||
var pendingSrc, pendingDst, pendingLen int
|
||||
src := len(c.cells) - 1
|
||||
|
||||
for dst := 0; dst < src; dst++ {
|
||||
if len(c.cells[dst].sequences) == 0 {
|
||||
for ; src > dst; src-- {
|
||||
if len(c.cells[src].sequences) != 0 {
|
||||
c.cells[dst] = c.cells[src]
|
||||
c.cells[src] = cacheCell{}
|
||||
|
||||
if pendingLen > 0 {
|
||||
if src == pendingSrc-pendingLen && dst == pendingDst+pendingLen {
|
||||
pendingSrc = src
|
||||
pendingLen++
|
||||
break
|
||||
} else {
|
||||
c.moveCells(ctx, pendingSrc, pendingDst, pendingLen)
|
||||
moves++
|
||||
}
|
||||
}
|
||||
|
||||
pendingSrc = src
|
||||
pendingDst = dst
|
||||
pendingLen = 1
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if moves >= maxMoves {
|
||||
ctx.Compute()
|
||||
ctx.Close()
|
||||
ctx = c.backend.NewContext()
|
||||
|
||||
moves = 0
|
||||
}
|
||||
}
|
||||
|
||||
if pendingLen > 0 {
|
||||
c.moveCells(ctx, pendingSrc, pendingDst, pendingLen)
|
||||
moves++
|
||||
}
|
||||
|
||||
if moves > 0 {
|
||||
ctx.Compute()
|
||||
}
|
||||
ctx.Close()
|
||||
|
||||
// Reset range metadata
|
||||
for seq := range c.cellRanges {
|
||||
seqRange := newRange()
|
||||
|
||||
for i, cell := range c.cells {
|
||||
if slices.Contains(cell.sequences, seq) {
|
||||
if i < seqRange.min {
|
||||
seqRange.min = i
|
||||
}
|
||||
if i > seqRange.max {
|
||||
seqRange.max = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.cellRanges[seq] = seqRange
|
||||
}
|
||||
|
||||
c.updateSlidingWindow()
|
||||
}
|
||||
|
||||
func (c *Causal) SetLayer(layer int) {
|
||||
c.curLayer = layer
|
||||
}
|
||||
@@ -625,18 +485,25 @@ func (c *Causal) Put(ctx ml.Context, key, value ml.Tensor) {
|
||||
}
|
||||
}
|
||||
|
||||
rowSize := c.keys[c.curLayer].Stride(2)
|
||||
ctx.Forward(key.Copy(ctx, c.keys[c.curLayer].View(ctx, rowSize*c.curLoc, kHeadDim*numKVHeads*batchSize)))
|
||||
key = key.Reshape(ctx, kHeadDim*numKVHeads, batchSize)
|
||||
keyCache := c.keys[c.curLayer]
|
||||
keyCache = keyCache.Reshape(ctx, kHeadDim*numKVHeads, len(c.cells))
|
||||
ctx.Forward(keyCache.SetRows(ctx, key, c.curLoc))
|
||||
|
||||
if c.config.PermutedV {
|
||||
elemSize := c.values[c.curLayer].Stride(0)
|
||||
value = value.Reshape(ctx, vHeadDim*numKVHeads, 1, batchSize)
|
||||
value = value.Permute(ctx, 2, 0, 1, 3)
|
||||
|
||||
value = value.Permute(ctx, 1, 2, 0, 3)
|
||||
ctx.Forward(value.Copy(ctx, c.values[c.curLayer].View(ctx, elemSize*c.curLoc, batchSize, len(c.cells)*elemSize, vHeadDim*numKVHeads)))
|
||||
valueCache := c.values[c.curLayer]
|
||||
valueCache = valueCache.Reshape(ctx, 1, len(c.cells), vHeadDim*numKVHeads)
|
||||
|
||||
ctx.Forward(valueCache.SetRows(ctx, value, c.curLoc))
|
||||
} else {
|
||||
rowSize := c.values[c.curLayer].Stride(2)
|
||||
value = value.Reshape(ctx, vHeadDim*numKVHeads, batchSize)
|
||||
valueCache := c.values[c.curLayer]
|
||||
valueCache = valueCache.Reshape(ctx, vHeadDim*numKVHeads, len(c.cells))
|
||||
|
||||
ctx.Forward(value.Copy(ctx, c.values[c.curLayer].View(ctx, rowSize*c.curLoc, vHeadDim*numKVHeads*batchSize)))
|
||||
ctx.Forward(valueCache.SetRows(ctx, value, c.curLoc))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -38,7 +38,7 @@ index 44ae76d66..639d551a2 100644
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
index d2c278a35..221e29509 100644
|
||||
index ca02ea079..c12b069e5 100644
|
||||
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
@@ -73,6 +73,7 @@ DispatchLoaderDynamic & ggml_vk_default_dispatcher();
|
||||
|
||||
@@ -11,7 +11,7 @@ vidmem optimization.
|
||||
1 file changed, 1 insertion(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
index 221e29509..18b7cbccf 100644
|
||||
index c12b069e5..76c78c2ea 100644
|
||||
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
@@ -5654,14 +5654,11 @@ static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& sr
|
||||
|
||||
@@ -50,7 +50,7 @@ Subject: [PATCH] Vulkan MMQ Integer Dot Refactor and K-Quant support (#16536)
|
||||
create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_shmem_types.glsl
|
||||
|
||||
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
index 18b7cbccf..53b57c179 100644
|
||||
index 76c78c2ea..7669ed206 100644
|
||||
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
@@ -488,6 +488,7 @@ struct vk_device_struct {
|
||||
|
||||
@@ -58,7 +58,7 @@ index 639d551a2..e5c446d1d 100644
|
||||
GGML_API size_t gguf_type_size(enum gguf_type type);
|
||||
GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params);
|
||||
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
index 53b57c179..b2855b078 100644
|
||||
index 7669ed206..63a762ec2 100644
|
||||
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
@@ -387,12 +387,76 @@ static constexpr uint32_t num_argsort_pipelines = 11;
|
||||
|
||||
@@ -31,7 +31,7 @@ Add new backend tests.
|
||||
6 files changed, 371 insertions(+), 117 deletions(-)
|
||||
|
||||
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
index b2855b078..aaf4334b5 100644
|
||||
index 63a762ec2..db92a7901 100644
|
||||
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
@@ -458,6 +458,11 @@ static topk_moe_mode ggml_vk_num_additional_ops_to_topk_moe_mode(uint32_t num) {
|
||||
|
||||
@@ -9,7 +9,7 @@ Subject: [PATCH] vulkan: Handle argsort with a large number of rows (#16851)
|
||||
2 files changed, 16 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
index aaf4334b5..3604ceb04 100644
|
||||
index db92a7901..e959674d1 100644
|
||||
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
@@ -1084,6 +1084,7 @@ struct vk_op_soft_max_push_constants {
|
||||
|
||||
@@ -20,7 +20,7 @@ Subject: [PATCH] vulkan: Fix crash when FP16 mul_mat accumulation is not
|
||||
1 file changed, 13 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
index 3604ceb04..80185d9f0 100644
|
||||
index e959674d1..903050b0b 100644
|
||||
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
|
||||
@@ -146,8 +146,13 @@ static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline);
|
||||
|
||||
25
llama/patches/0036-ggml-cuda-skip-large-batches.patch
Normal file
25
llama/patches/0036-ggml-cuda-skip-large-batches.patch
Normal file
@@ -0,0 +1,25 @@
|
||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Michael Yang <git@mxy.ng>
|
||||
Date: Tue, 18 Nov 2025 11:13:04 -0800
|
||||
Subject: [PATCH] ggml-cuda: skip large batches
|
||||
|
||||
cuda panics on batches larger than 1024 so mark it as unsupported to
|
||||
fallback to cpu
|
||||
---
|
||||
ggml/src/ggml-cuda/ggml-cuda.cu | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu
|
||||
index f1a20e7fe..1a71e07c9 100644
|
||||
--- a/ggml/src/ggml-cuda/ggml-cuda.cu
|
||||
+++ b/ggml/src/ggml-cuda/ggml-cuda.cu
|
||||
@@ -3677,6 +3677,9 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
|
||||
if (b->type == GGML_TYPE_F16 && a->type != GGML_TYPE_F16) {
|
||||
return false;
|
||||
}
|
||||
+ if (op->op == GGML_OP_MUL_MAT && b->ne[2] * b->ne[3] > 1024) {
|
||||
+ return false;
|
||||
+ }
|
||||
#ifdef GGML_USE_MUSA
|
||||
const int cc = ggml_cuda_info().devices[dev_ctx->device].cc;
|
||||
if (b->ne[2]*b->ne[3] > 1 && !ggml_is_transposed(a) && !ggml_is_transposed(b)) {
|
||||
28
llama/patches/0036-win-exit-instead-of-abort.patch
Normal file
28
llama/patches/0036-win-exit-instead-of-abort.patch
Normal file
@@ -0,0 +1,28 @@
|
||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Daniel Hiltgen <daniel@ollama.com>
|
||||
Date: Tue, 18 Nov 2025 09:58:23 -0800
|
||||
Subject: [PATCH] win: exit instead of abort
|
||||
|
||||
---
|
||||
ggml/src/ggml.c | 7 ++++++-
|
||||
1 file changed, 6 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
|
||||
index 9be35c1be..923c33d05 100644
|
||||
--- a/ggml/src/ggml.c
|
||||
+++ b/ggml/src/ggml.c
|
||||
@@ -229,8 +229,13 @@ void ggml_abort(const char * file, int line, const char * fmt, ...) {
|
||||
fprintf(stderr, "%s\n", message);
|
||||
ggml_print_backtrace();
|
||||
}
|
||||
-
|
||||
+#if defined(_WIN32)
|
||||
+ fflush(stderr);
|
||||
+ fflush(stdout);
|
||||
+ exit(1);
|
||||
+#else
|
||||
abort();
|
||||
+#endif
|
||||
}
|
||||
|
||||
// ggml_print_backtrace is registered with std::set_terminate by ggml.cpp
|
||||
@@ -173,6 +173,7 @@ type Tensor interface {
|
||||
Cos(ctx Context) Tensor
|
||||
Tanh(ctx Context) Tensor
|
||||
GELU(ctx Context, up ...Tensor) Tensor
|
||||
QuickGELU(ctx Context, up ...Tensor) Tensor
|
||||
SILU(ctx Context, up ...Tensor) Tensor
|
||||
RELU(ctx Context, up ...Tensor) Tensor
|
||||
Sigmoid(ctx Context) Tensor
|
||||
@@ -193,6 +194,7 @@ type Tensor interface {
|
||||
Repeat(ctx Context, dim, n int) Tensor
|
||||
Concat(ctx Context, t2 Tensor, dim int) Tensor
|
||||
Rows(ctx Context, t2 Tensor) Tensor
|
||||
SetRows(ctx Context, src Tensor, idxs Tensor) Tensor
|
||||
Copy(ctx Context, t2 Tensor) Tensor
|
||||
Duplicate(ctx Context) Tensor
|
||||
|
||||
@@ -207,6 +209,8 @@ type Tensor interface {
|
||||
Stddev(ctx Context) Tensor
|
||||
Sqr(ctx Context) Tensor
|
||||
Sqrt(ctx Context) Tensor
|
||||
|
||||
Interpolate(ctx Context, dims [4]int, samplingMode SamplingMode) Tensor
|
||||
}
|
||||
|
||||
// ScaledDotProductAttention implements a fused attention
|
||||
@@ -372,3 +376,10 @@ const (
|
||||
DTypeI32
|
||||
DTypeMXFP4
|
||||
)
|
||||
|
||||
type SamplingMode int
|
||||
|
||||
const (
|
||||
SamplingModeNearest SamplingMode = iota
|
||||
SamplingModeBilinear
|
||||
)
|
||||
|
||||
@@ -314,7 +314,7 @@ func New(modelPath string, params ml.BackendParams) (ml.Backend, error) {
|
||||
"altup_proj", "altup_unembd_proj",
|
||||
"per_layer_token_embd", "per_layer_model_proj", "per_layer_proj_norm"):
|
||||
createTensor(tensor{source: t}, output.bts, blocks)
|
||||
case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."):
|
||||
case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm.") || strings.HasPrefix(t.Name, "s."):
|
||||
// TODO: assign vision tensors to the gpu if possible
|
||||
createTensor(tensor{source: t}, output.bts, blocks)
|
||||
case contains(t.Name, "rope_freqs", "rope_factors_long", "rope_factors_short"):
|
||||
@@ -1338,6 +1338,13 @@ func (t *Tensor) Rows(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tensor) SetRows(ctx ml.Context, src ml.Tensor, idxs ml.Tensor) ml.Tensor {
|
||||
return &Tensor{
|
||||
b: t.b,
|
||||
t: C.ggml_set_rows(ctx.(*Context).ctx, t.t, src.(*Tensor).t, idxs.(*Tensor).t),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
||||
return &Tensor{
|
||||
b: t.b,
|
||||
@@ -1378,6 +1385,10 @@ func inferShape(t *Tensor, shape []int) {
|
||||
}
|
||||
|
||||
func (t *Tensor) Reshape(ctx ml.Context, shape ...int) ml.Tensor {
|
||||
if !C.ggml_is_contiguous(t.t) {
|
||||
return t.Contiguous(ctx, shape...)
|
||||
}
|
||||
|
||||
if slices.Contains(shape, -1) {
|
||||
inferShape(t, shape)
|
||||
}
|
||||
@@ -1567,6 +1578,16 @@ func (t *Tensor) GELU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tensor) QuickGELU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
|
||||
var tt *C.struct_ggml_tensor
|
||||
if len(t2) > 0 {
|
||||
tt = C.ggml_geglu_quick_split(ctx.(*Context).ctx, t.t, t2[0].(*Tensor).t)
|
||||
} else {
|
||||
tt = C.ggml_gelu_quick_inplace(ctx.(*Context).ctx, t.t)
|
||||
}
|
||||
return &Tensor{b: t.b, t: tt}
|
||||
}
|
||||
|
||||
func (t *Tensor) SILU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
|
||||
if len(t2) > 0 {
|
||||
return &Tensor{
|
||||
@@ -1724,6 +1745,23 @@ func (t *Tensor) Sqrt(ctx ml.Context) ml.Tensor {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tensor) Interpolate(ctx ml.Context, dims [4]int, samplingMode ml.SamplingMode) ml.Tensor {
|
||||
var mode C.uint32_t
|
||||
switch samplingMode {
|
||||
case ml.SamplingModeNearest:
|
||||
mode = C.GGML_SCALE_MODE_NEAREST
|
||||
case ml.SamplingModeBilinear:
|
||||
mode = C.GGML_SCALE_MODE_BILINEAR
|
||||
default:
|
||||
panic("unsupported interpolate mode")
|
||||
}
|
||||
|
||||
return &Tensor{
|
||||
b: t.b,
|
||||
t: C.ggml_interpolate(ctx.(*Context).ctx, t.t, C.int64_t(dims[0]), C.int64_t(dims[1]), C.int64_t(dims[2]), C.int64_t(dims[3]), mode),
|
||||
}
|
||||
}
|
||||
|
||||
// Slice returns a view of the tensor sliced along dim from low to high in step steps.
|
||||
// Slice panics if the dimension is invalid or the slice parameters are out of range.
|
||||
// If dim=0 and step>1, the tensor is a copy rather than a view to ensure proper shape.
|
||||
|
||||
@@ -3677,6 +3677,9 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
|
||||
if (b->type == GGML_TYPE_F16 && a->type != GGML_TYPE_F16) {
|
||||
return false;
|
||||
}
|
||||
if (op->op == GGML_OP_MUL_MAT && b->ne[2] * b->ne[3] > 1024) {
|
||||
return false;
|
||||
}
|
||||
#ifdef GGML_USE_MUSA
|
||||
const int cc = ggml_cuda_info().devices[dev_ctx->device].cc;
|
||||
if (b->ne[2]*b->ne[3] > 1 && !ggml_is_transposed(a) && !ggml_is_transposed(b)) {
|
||||
|
||||
7
ml/backend/ggml/ggml/src/ggml.c
vendored
7
ml/backend/ggml/ggml/src/ggml.c
vendored
@@ -229,8 +229,13 @@ void ggml_abort(const char * file, int line, const char * fmt, ...) {
|
||||
fprintf(stderr, "%s\n", message);
|
||||
ggml_print_backtrace();
|
||||
}
|
||||
|
||||
#if defined(_WIN32)
|
||||
fflush(stderr);
|
||||
fflush(stdout);
|
||||
exit(1);
|
||||
#else
|
||||
abort();
|
||||
#endif
|
||||
}
|
||||
|
||||
// ggml_print_backtrace is registered with std::set_terminate by ggml.cpp
|
||||
|
||||
@@ -25,12 +25,15 @@ const (
|
||||
|
||||
// Composite returns an image with the alpha channel removed by drawing over a white background.
|
||||
func Composite(img image.Image) image.Image {
|
||||
dst := image.NewRGBA(img.Bounds())
|
||||
|
||||
white := color.RGBA{255, 255, 255, 255}
|
||||
draw.Draw(dst, dst.Bounds(), &image.Uniform{white}, image.Point{}, draw.Src)
|
||||
draw.Draw(dst, dst.Bounds(), img, img.Bounds().Min, draw.Over)
|
||||
return CompositeColor(img, white)
|
||||
}
|
||||
|
||||
// CompositeColor returns an image with the alpha channel removed by drawing over a white background.
|
||||
func CompositeColor(img image.Image, color color.Color) image.Image {
|
||||
dst := image.NewRGBA(img.Bounds())
|
||||
draw.Draw(dst, dst.Bounds(), &image.Uniform{color}, image.Point{}, draw.Src)
|
||||
draw.Draw(dst, dst.Bounds(), img, img.Bounds().Min, draw.Over)
|
||||
return dst
|
||||
}
|
||||
|
||||
@@ -55,6 +58,31 @@ func Resize(img image.Image, newSize image.Point, method int) image.Image {
|
||||
return dst
|
||||
}
|
||||
|
||||
// Pad returns an image which has been resized to fit within a new size, preserving aspect ratio, and padded with a color.
|
||||
func Pad(img image.Image, newSize image.Point, color color.Color, kernel draw.Interpolator) image.Image {
|
||||
dst := image.NewRGBA(image.Rect(0, 0, newSize.X, newSize.Y))
|
||||
draw.Draw(dst, dst.Bounds(), &image.Uniform{color}, image.Point{}, draw.Src)
|
||||
|
||||
var minPoint, maxPoint image.Point
|
||||
if img.Bounds().Dx() > img.Bounds().Dy() {
|
||||
// landscape
|
||||
height := newSize.X * img.Bounds().Dy() / img.Bounds().Dx()
|
||||
minPoint = image.Point{0, (newSize.Y - height) / 2}
|
||||
maxPoint = image.Point{newSize.X, height + minPoint.Y}
|
||||
} else {
|
||||
// portrait
|
||||
width := newSize.Y * img.Bounds().Dx() / img.Bounds().Dy()
|
||||
minPoint = image.Point{(newSize.X - width) / 2, 0}
|
||||
maxPoint = image.Point{minPoint.X + width, newSize.Y}
|
||||
}
|
||||
|
||||
kernel.Scale(dst, image.Rectangle{
|
||||
Min: minPoint,
|
||||
Max: maxPoint,
|
||||
}, img, img.Bounds(), draw.Over, nil)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Normalize returns a slice of float32 containing each of the r, g, b values for an image normalized around a value.
|
||||
func Normalize(img image.Image, mean, std [3]float32, rescale bool, channelFirst bool) []float32 {
|
||||
var pixelVals []float32
|
||||
|
||||
@@ -156,6 +156,7 @@ func New(c fs.Config) (model.Model, error) {
|
||||
)),
|
||||
},
|
||||
},
|
||||
true,
|
||||
)
|
||||
default:
|
||||
return nil, model.ErrUnsupportedTokenizer
|
||||
|
||||
@@ -254,6 +254,30 @@ func New(c fs.Config) (model.Model, error) {
|
||||
keyLength := int(cmp.Or(c.Uint("attention.key_length_mla"), c.Uint("attention.key_length")))
|
||||
valueLength := int(cmp.Or(c.Uint("attention.value_length_mla"), c.Uint("attention.value_length")))
|
||||
|
||||
var pre []string
|
||||
switch c.String("tokenizer.ggml.pre") {
|
||||
case "deepseek-v3":
|
||||
pre = []string{
|
||||
// Split regex into multiple parts (according to DeepSeek3's regex)
|
||||
"\\p{N}{1,3}",
|
||||
`[一-龥-ゟ゠-ヿ]+`,
|
||||
"[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+",
|
||||
}
|
||||
case "deepseek-llm":
|
||||
// TODO: these models haven't been vetted so skip for now
|
||||
// pre = []string{
|
||||
// "[\r\n]",
|
||||
// "\\s?[A-Za-zµÀ-ÖØ-öø-ƺƼ-ƿDŽ-ʓʕ-ʯͰ-ͳͶͷͻ-ͽͿΆΈ-ΊΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-ՖႠ-ჅᎠ-Ᏽᏸ-ᏽᲐ-ᲺᲽ-Ჿᴀ-ᴫᵫ-ᵷᵹ-ᶚḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙὛὝὟ-ώᾀ-ᾴᾶ-ᾼιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼℂℇℊ-ℓℕℙ-ℝℤΩℨK-ℭℯ-ℴℹℼ-ℿⅅ-ⅉⅎↃↄⰀ-ⱻⱾ-ⳤⳫ-ⳮⳲⳳꙀ-ꙭꚀ-ꚛꜢ-ꝯꝱ-ꞇꞋ-ꞎꭰ-ꮿff-stﬓ-ﬗA-Za-z𐐀-𐑏𐒰-𐓓𐓘-𐓻𐲀-𐲲𐳀-𐳲𑢠-𑣟𞤀-𞥃]+",
|
||||
// "\\s?[!-/:-~!-/:-~‘-‟ -。]+",
|
||||
// "\\s+$",
|
||||
// "[一-龥ࠀ-一가-]+",
|
||||
// "[0-9]",
|
||||
// }
|
||||
fallthrough
|
||||
default:
|
||||
return nil, model.ErrUnsupportedTokenizer
|
||||
}
|
||||
|
||||
m := Model{
|
||||
BytePairEncoding: model.NewBytePairEncoding(
|
||||
&model.Vocabulary{
|
||||
@@ -268,10 +292,7 @@ func New(c fs.Config) (model.Model, error) {
|
||||
c.Ints("tokenizer.ggml.eos_token_ids")...,
|
||||
),
|
||||
},
|
||||
// Split regex into multiple parts (according to DeepSeek3's regex)
|
||||
"\\p{N}{1,3}",
|
||||
`[一-龥-ゟ゠-ヿ]+`,
|
||||
"[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+",
|
||||
pre...,
|
||||
),
|
||||
Layers: layers,
|
||||
Options: &Options{
|
||||
|
||||
83
model/models/deepseekocr/imageprocessor.go
Normal file
83
model/models/deepseekocr/imageprocessor.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package deepseekocr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"image"
|
||||
"image/color"
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
"golang.org/x/image/draw"
|
||||
|
||||
"github.com/ollama/ollama/ml"
|
||||
"github.com/ollama/ollama/model/imageproc"
|
||||
)
|
||||
|
||||
type ratio struct {
|
||||
x, y int
|
||||
}
|
||||
|
||||
func ProcessImage(ctx ml.Context, bts []byte) (ml.Tensor, ml.Tensor, []int, error) {
|
||||
img, _, err := image.Decode(bytes.NewReader(bts))
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
minNum, maxNum, imageSize, baseSize := 2, 9, 640, 1024
|
||||
var targetRatios []ratio
|
||||
for n := minNum; n <= maxNum; n++ {
|
||||
for i := 1; i <= n; i++ {
|
||||
for j := 1; j <= n; j++ {
|
||||
if i*j <= maxNum && i*j >= minNum && !slices.Contains(targetRatios, ratio{i, j}) {
|
||||
targetRatios = append(targetRatios, ratio{i, j})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
targetRatio := findBestAspectRatio(targetRatios, img.Bounds().Dx(), img.Bounds().Dy(), imageSize)
|
||||
targetWidth, targetHeight := imageSize*targetRatio.x, imageSize*targetRatio.y
|
||||
blocks := targetRatio.x * targetRatio.y
|
||||
|
||||
mean := imageproc.ImageNetStandardMean
|
||||
std := imageproc.ImageNetStandardSTD
|
||||
|
||||
var patches []float32
|
||||
resized := imageproc.Resize(img, image.Point{X: targetWidth, Y: targetHeight}, imageproc.ResizeBilinear)
|
||||
for i := range blocks {
|
||||
patch := image.NewRGBA(image.Rect(0, 0, imageSize, imageSize))
|
||||
draw.Draw(patch, patch.Bounds(), resized, image.Point{
|
||||
X: i % (targetWidth / imageSize) * imageSize,
|
||||
Y: i / (targetWidth / imageSize) * imageSize,
|
||||
}, draw.Over)
|
||||
|
||||
patches = append(patches, imageproc.Normalize(patch, mean, std, true, true)...)
|
||||
}
|
||||
|
||||
img = imageproc.CompositeColor(img, color.Gray{})
|
||||
img = imageproc.Pad(img, image.Point{X: baseSize, Y: baseSize}, color.Gray{127}, draw.BiLinear)
|
||||
|
||||
return ctx.Input().FromFloats(patches, imageSize, imageSize, 3, blocks),
|
||||
ctx.Input().FromFloats(imageproc.Normalize(img, mean, std, true, true), baseSize, baseSize, 3),
|
||||
[]int{targetRatio.x, targetRatio.y},
|
||||
nil
|
||||
}
|
||||
|
||||
func findBestAspectRatio(targetRatios []ratio, width, height, imageSize int) ratio {
|
||||
bestDiff := math.MaxFloat64
|
||||
best := ratio{1, 1}
|
||||
realRatio := float64(width) / float64(height)
|
||||
for _, target := range targetRatios {
|
||||
targetRatio := float64(target.x) / float64(target.y)
|
||||
diff := math.Abs(realRatio - targetRatio)
|
||||
if diff < bestDiff {
|
||||
bestDiff = diff
|
||||
best = target
|
||||
} else if diff == bestDiff {
|
||||
if float64(width*height) > 0.5*float64(imageSize*imageSize*best.x*best.y) {
|
||||
best = target
|
||||
}
|
||||
}
|
||||
}
|
||||
return best
|
||||
}
|
||||
192
model/models/deepseekocr/model.go
Normal file
192
model/models/deepseekocr/model.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package deepseekocr
|
||||
|
||||
import (
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
"github.com/ollama/ollama/fs"
|
||||
"github.com/ollama/ollama/kvcache"
|
||||
"github.com/ollama/ollama/ml"
|
||||
"github.com/ollama/ollama/ml/nn"
|
||||
"github.com/ollama/ollama/model"
|
||||
"github.com/ollama/ollama/model/input"
|
||||
)
|
||||
|
||||
type Model struct {
|
||||
model.Base
|
||||
model.TextProcessor
|
||||
|
||||
Sam *samModel `gguf:"s"`
|
||||
Vision *visionModel `gguf:"v"`
|
||||
Text *textModel
|
||||
|
||||
ImageNewline ml.Tensor `gguf:"mm.image_newline"`
|
||||
//nolint:misspell // this misspelling is upstream. fixing it breaks the model
|
||||
ViewSeperator ml.Tensor `gguf:"mm.view_seperator"`
|
||||
|
||||
Projector *nn.Linear `gguf:"mm.layers"`
|
||||
}
|
||||
|
||||
func (m *Model) EncodeMultimodal(ctx ml.Context, bts []byte) ([]input.Multimodal, error) {
|
||||
patches, original, crop, err := ProcessImage(ctx, bts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var outputs []ml.Tensor
|
||||
if true { // TODO: local features if sum(patches) != 0
|
||||
samOutputs := m.Sam.Forward(ctx, patches)
|
||||
visionOutputs := m.Vision.Forward(ctx, patches, samOutputs)
|
||||
|
||||
samOutputs = samOutputs.Reshape(ctx, -1, samOutputs.Dim(2), samOutputs.Dim(3)).Permute(ctx, 1, 0, 2, 3)
|
||||
visionOutputs = visionOutputs.Slice(ctx, 1, 1, visionOutputs.Dim(1), 1)
|
||||
localOutputs := visionOutputs.Concat(ctx, samOutputs, 0)
|
||||
localOutputs = m.Projector.Forward(ctx, localOutputs)
|
||||
|
||||
hw := int(math.Sqrt(float64(localOutputs.Dim(1))))
|
||||
localOutputs = localOutputs.Reshape(ctx, -1, hw, crop[0], crop[1])
|
||||
localOutputs = localOutputs.Permute(ctx, 0, 2, 1, 3)
|
||||
localOutputs = localOutputs.Contiguous(ctx, -1, crop[0]*hw, crop[1]*hw)
|
||||
localOutputs = localOutputs.Concat(ctx, m.ImageNewline.Repeat(ctx, 2, localOutputs.Dim(2)), 1)
|
||||
localOutputs = localOutputs.Reshape(ctx, localOutputs.Dim(0), -1)
|
||||
|
||||
outputs = append(outputs, localOutputs)
|
||||
}
|
||||
|
||||
samOutputs := m.Sam.Forward(ctx, original)
|
||||
visionOutputs := m.Vision.Forward(ctx, original, samOutputs)
|
||||
|
||||
samOutputs = samOutputs.Reshape(ctx, -1, samOutputs.Dim(2), samOutputs.Dim(3)).Permute(ctx, 1, 0, 2, 3)
|
||||
visionOutputs = visionOutputs.Slice(ctx, 1, 1, visionOutputs.Dim(1), 1)
|
||||
globalOutputs := visionOutputs.Concat(ctx, samOutputs, 0)
|
||||
globalOutputs = m.Projector.Forward(ctx, globalOutputs)
|
||||
|
||||
hw := int(math.Sqrt(float64(globalOutputs.Dim(1))))
|
||||
globalOutputs = globalOutputs.Reshape(ctx, -1, hw, hw)
|
||||
globalOutputs = globalOutputs.Concat(ctx, m.ImageNewline.Repeat(ctx, 2, globalOutputs.Dim(2)), 1)
|
||||
globalOutputs = globalOutputs.Reshape(ctx, globalOutputs.Dim(0), -1)
|
||||
|
||||
outputs = append(outputs, globalOutputs, m.ViewSeperator)
|
||||
return []input.Multimodal{
|
||||
{Tensor: outputs[0].Stack(ctx, 1, outputs[1:]...)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *Model) PostTokenize(inputs []*input.Input) ([]*input.Input, error) {
|
||||
outputs := make([]*input.Input, 0, len(inputs))
|
||||
for i := range inputs {
|
||||
if inputs[i].Multimodal == nil {
|
||||
outputs = append(outputs, inputs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
t := inputs[i].Multimodal[0].Tensor
|
||||
outputs = append(outputs, &input.Input{
|
||||
Token: 128815,
|
||||
Multimodal: inputs[i].Multimodal,
|
||||
MultimodalHash: inputs[i].MultimodalHash,
|
||||
SameBatch: t.Dim(1) - 1,
|
||||
})
|
||||
|
||||
outputs = slices.Grow(outputs, t.Dim(1)-1)
|
||||
outputs = append(outputs, slices.Repeat([]*input.Input{{Token: 128815}}, t.Dim(1)-1)...)
|
||||
}
|
||||
return outputs, nil
|
||||
}
|
||||
|
||||
func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) {
|
||||
inputsEmbeds := m.Text.TokenEmbedding.Forward(ctx, batch.Inputs).Duplicate(ctx)
|
||||
positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions))
|
||||
|
||||
for _, mm := range batch.Multimodal {
|
||||
t := mm.Multimodal[0].Tensor
|
||||
ctx.Forward(t.Copy(ctx, inputsEmbeds.View(ctx, mm.Index*inputsEmbeds.Stride(1), t.Dim(0)*t.Dim(1))))
|
||||
}
|
||||
|
||||
hiddenStates := inputsEmbeds
|
||||
for i, block := range m.Text.Blocks {
|
||||
if m.Cache != nil {
|
||||
m.Cache.SetLayer(i)
|
||||
}
|
||||
|
||||
var outputs ml.Tensor
|
||||
if i == len(m.Text.Blocks)-1 {
|
||||
outputs = batch.Outputs
|
||||
}
|
||||
|
||||
hiddenStates = block.Forward(ctx, hiddenStates, positions, outputs, m.Cache, m.Text.Options)
|
||||
}
|
||||
|
||||
hiddenStates = m.Text.OutputNorm.Forward(ctx, hiddenStates, m.Text.Options.eps)
|
||||
return m.Text.Output.Forward(ctx, hiddenStates), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
model.Register("deepseekocr", func(c fs.Config) (model.Model, error) {
|
||||
textBlocks := make([]textBlock, c.Uint("block_count"))
|
||||
leadingDenseBlockCount := int(c.Uint("leading_dense_block_count", 1))
|
||||
for i := range textBlocks {
|
||||
if i >= leadingDenseBlockCount {
|
||||
textBlocks[i].FeedForward = &textMoe{}
|
||||
} else {
|
||||
textBlocks[i].FeedForward = &textMLP{}
|
||||
}
|
||||
}
|
||||
|
||||
m := Model{
|
||||
TextProcessor: model.NewBytePairEncoding(
|
||||
&model.Vocabulary{
|
||||
Values: c.Strings("tokenizer.ggml.tokens"),
|
||||
Types: c.Ints("tokenizer.ggml.token_type"),
|
||||
Merges: c.Strings("tokenizer.ggml.merges"),
|
||||
AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true),
|
||||
BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))},
|
||||
AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false),
|
||||
EOS: append(
|
||||
[]int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))},
|
||||
c.Ints("tokenizer.ggml.eos_token_ids")...,
|
||||
),
|
||||
},
|
||||
// Split regex into multiple parts (according to DeepSeek3's regex)
|
||||
"\\p{N}{1,3}",
|
||||
`[一-龥-ゟ゠-ヿ]+`,
|
||||
"[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+",
|
||||
),
|
||||
Text: &textModel{
|
||||
Blocks: textBlocks,
|
||||
Options: textOptions{
|
||||
hiddenSize: int(c.Uint("embedding_length")),
|
||||
numHeads: int(c.Uint("attention.head_count")),
|
||||
numKVHeads: int(c.Uint("attention.head_count_kv")),
|
||||
numExperts: int(c.Uint("expert_count")),
|
||||
numExpertsUsed: int(c.Uint("expert_used_count")),
|
||||
ropeBase: c.Float("rope.freq_base", 10_000),
|
||||
ropeScale: c.Float("rope.scaling.factor", 1.0),
|
||||
eps: c.Float("attention.layer_norm_rms_epsilon", 1e-6),
|
||||
},
|
||||
},
|
||||
Vision: &visionModel{
|
||||
Blocks: make([]visionBlock, c.Uint("vision.block_count")),
|
||||
Options: visionOptions{
|
||||
hiddenSize: int(c.Uint("vision.embedding_length")),
|
||||
numHeads: int(c.Uint("vision.head_count")),
|
||||
imageSize: int(c.Uint("vision.image_size", 224)),
|
||||
patchSize: int(c.Uint("vision.patch_size", 14)),
|
||||
eps: c.Float("vision.attention.layer_norm_epsilon", 1e-5),
|
||||
},
|
||||
},
|
||||
Sam: &samModel{
|
||||
Blocks: make([]samBlock, c.Uint("sam.block_count")),
|
||||
Options: samOptions{
|
||||
hiddenSize: int(c.Uint("sam.embedding_length")),
|
||||
numHeads: int(c.Uint("sam.head_count")),
|
||||
eps: c.Float("sam.attention.layer_norm_epsilon", 1e-6),
|
||||
globalAttentionLayers: c.Ints("sam.global_attention_indexes"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m.Cache = kvcache.NewCausalCache(m.Text.Shift)
|
||||
return &m, nil
|
||||
})
|
||||
}
|
||||
225
model/models/deepseekocr/model_sam.go
Normal file
225
model/models/deepseekocr/model_sam.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package deepseekocr
|
||||
|
||||
import (
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
"github.com/ollama/ollama/ml"
|
||||
"github.com/ollama/ollama/ml/nn"
|
||||
)
|
||||
|
||||
type samModel struct {
|
||||
PatchEmbedding *nn.Conv2D `gguf:"patch_embd"`
|
||||
PositionEmbedding ml.Tensor `gguf:"position_embd"`
|
||||
|
||||
Blocks []samBlock `gguf:"blk"`
|
||||
|
||||
Neck *samNeck `gguf:"neck"`
|
||||
Net2 *nn.Conv2D `gguf:"net_2"`
|
||||
Net3 *nn.Conv2D `gguf:"net_3"`
|
||||
|
||||
Options samOptions
|
||||
}
|
||||
|
||||
func (m *samModel) absolutePositionEmbedding(ctx ml.Context, hiddenStates ml.Tensor) ml.Tensor {
|
||||
source := m.PositionEmbedding.Dim(1)
|
||||
target := hiddenStates.Dim(2)
|
||||
if source != target {
|
||||
positionEmbed := m.PositionEmbedding.Permute(ctx, 2, 0, 1, 3)
|
||||
positionEmbed = positionEmbed.Interpolate(ctx, [4]int{target, target, hiddenStates.Dim(0), 1}, ml.SamplingModeBilinear)
|
||||
return positionEmbed.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx)
|
||||
}
|
||||
|
||||
return m.PositionEmbedding
|
||||
}
|
||||
|
||||
func (m *samModel) Forward(ctx ml.Context, t ml.Tensor) ml.Tensor {
|
||||
hiddenStates := m.PatchEmbedding.Forward(ctx, t, 16, 16, 0, 0, 1, 1)
|
||||
hiddenStates = hiddenStates.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx)
|
||||
|
||||
if m.PositionEmbedding != nil {
|
||||
hiddenStates = hiddenStates.Add(ctx, m.absolutePositionEmbedding(ctx, hiddenStates))
|
||||
}
|
||||
|
||||
for i, block := range m.Blocks {
|
||||
var windowSize int
|
||||
if !slices.Contains(m.Options.globalAttentionLayers, int32(i)) {
|
||||
windowSize = 14
|
||||
}
|
||||
|
||||
hiddenStates = block.Forward(ctx, hiddenStates, windowSize, m.Options)
|
||||
}
|
||||
|
||||
hiddenStates = hiddenStates.Permute(ctx, 2, 0, 1, 3).Contiguous(ctx)
|
||||
hiddenStates = m.Neck.Forward(ctx, hiddenStates, m.Options)
|
||||
hiddenStates = m.Net2.Forward(ctx, hiddenStates, 2, 2, 1, 1, 1, 1)
|
||||
hiddenStates = m.Net3.Forward(ctx, hiddenStates, 2, 2, 1, 1, 1, 1)
|
||||
return hiddenStates
|
||||
}
|
||||
|
||||
type samOptions struct {
|
||||
hiddenSize,
|
||||
numHeads int
|
||||
eps float32
|
||||
globalAttentionLayers []int32
|
||||
}
|
||||
|
||||
func (o samOptions) headDim() int {
|
||||
return o.hiddenSize / o.numHeads
|
||||
}
|
||||
|
||||
type samBlock struct {
|
||||
Norm1 *nn.LayerNorm `gguf:"norm1"`
|
||||
Attention *samAttention `gguf:"attn"`
|
||||
Norm2 *nn.LayerNorm `gguf:"norm2"`
|
||||
FeedForward *samMLP `gguf:"mlp"`
|
||||
}
|
||||
|
||||
func (m *samBlock) Forward(ctx ml.Context, hiddenStates ml.Tensor, windowSize int, opts samOptions) ml.Tensor {
|
||||
c, w, h := hiddenStates.Dim(0), hiddenStates.Dim(1), hiddenStates.Dim(2)
|
||||
|
||||
residual := hiddenStates
|
||||
hiddenStates = m.Norm1.Forward(ctx, hiddenStates, opts.eps)
|
||||
|
||||
var pw, ph int
|
||||
if windowSize > 0 {
|
||||
pw = (windowSize - hiddenStates.Dim(1)%windowSize) % windowSize
|
||||
ph = (windowSize - hiddenStates.Dim(2)%windowSize) % windowSize
|
||||
if pw > 0 || ph > 0 {
|
||||
hiddenStates = hiddenStates.Pad(ctx, 0, pw, ph, 0)
|
||||
}
|
||||
|
||||
hiddenStates = hiddenStates.Reshape(ctx, c*windowSize, (w+pw)/windowSize, windowSize, -1)
|
||||
hiddenStates = hiddenStates.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx, c, windowSize, windowSize, -1)
|
||||
}
|
||||
|
||||
hiddenStates = m.Attention.Forward(ctx, hiddenStates, opts)
|
||||
|
||||
if windowSize > 0 {
|
||||
hiddenStates = hiddenStates.Reshape(ctx, c*windowSize, windowSize, (w+pw)/windowSize, -1)
|
||||
hiddenStates = hiddenStates.Permute(ctx, 0, 2, 1, 3)
|
||||
hiddenStates = hiddenStates.Contiguous(ctx, c, w+pw, h+ph, -1)
|
||||
hiddenStates = hiddenStates.Pad(ctx, 0, -pw, -ph, 0)
|
||||
}
|
||||
|
||||
hiddenStates = hiddenStates.Add(ctx, residual)
|
||||
|
||||
residual = hiddenStates
|
||||
hiddenStates = m.Norm2.Forward(ctx, hiddenStates, opts.eps)
|
||||
hiddenStates = m.FeedForward.Forward(ctx, hiddenStates, opts)
|
||||
return hiddenStates.Add(ctx, residual)
|
||||
}
|
||||
|
||||
type samAttention struct {
|
||||
QKV *nn.Linear `gguf:"qkv"`
|
||||
Output *nn.Linear `gguf:"proj"`
|
||||
|
||||
RelativePosition *struct {
|
||||
Height ml.Tensor `gguf:"h"`
|
||||
Width ml.Tensor `gguf:"w"`
|
||||
} `gguf:",pre:rel_pos_"`
|
||||
}
|
||||
|
||||
func relativeCoordinates(ctx ml.Context, qn, kn int) ml.Tensor {
|
||||
s := make([]int32, qn*kn)
|
||||
for i := range qn {
|
||||
for j := range kn {
|
||||
q := i * max(kn/qn, 1)
|
||||
k := j * max(qn/kn, 1)
|
||||
s[i*kn+j] = int32(q - k + (kn-1)*max(qn/kn, 1))
|
||||
}
|
||||
}
|
||||
return ctx.Input().FromInts(s, qn*kn)
|
||||
}
|
||||
|
||||
func relativePositions(ctx ml.Context, positions ml.Tensor, qn, kn int) ml.Tensor {
|
||||
maxRelativeDistance := 2*max(qn, kn) - 1
|
||||
if positions.Dim(1) != maxRelativeDistance {
|
||||
// linear interpolation kernel not available so approx. with bilinear interpolation
|
||||
positions = positions.Interpolate(ctx, [4]int{positions.Dim(0), maxRelativeDistance, 1, 1}, ml.SamplingModeBilinear)
|
||||
}
|
||||
|
||||
rc := relativeCoordinates(ctx, qn, kn)
|
||||
return positions.Rows(ctx, rc).Reshape(ctx, positions.Dim(0), kn, qn)
|
||||
}
|
||||
|
||||
func (m *samAttention) decomposedRelativePositions(ctx ml.Context, query ml.Tensor, qn, kn []int) (ml.Tensor, ml.Tensor) {
|
||||
qh, qw := qn[0], qn[1]
|
||||
kh, kw := kn[0], kn[1]
|
||||
|
||||
rh := relativePositions(ctx, m.RelativePosition.Height, qh, kh)
|
||||
rw := relativePositions(ctx, m.RelativePosition.Width, qw, kw)
|
||||
|
||||
query = query.Contiguous(ctx, query.Dim(0), qw, qh, -1)
|
||||
rh = rh.Mulmat(ctx, query).Reshape(ctx, 1, kh, qh*qw, -1)
|
||||
rw = rw.Mulmat(ctx, query.Permute(ctx, 0, 2, 1, 3)).Permute(ctx, 0, 2, 1, 3).Contiguous(ctx, kw, 1, qh*qw, -1)
|
||||
return rh, rw
|
||||
}
|
||||
|
||||
func (m *samAttention) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts samOptions) ml.Tensor {
|
||||
w, h, b := hiddenStates.Dim(1), hiddenStates.Dim(2), hiddenStates.Dim(3)
|
||||
|
||||
qkv := m.QKV.Forward(ctx, hiddenStates)
|
||||
qkv = qkv.Reshape(ctx, opts.headDim(), -1, w*h, b)
|
||||
chunks := qkv.Chunk(ctx, 1, opts.numHeads)
|
||||
query, key, value := chunks[0], chunks[1], chunks[2]
|
||||
|
||||
ctx.Forward(query, key, value)
|
||||
|
||||
query = query.Permute(ctx, 0, 2, 1, 3)
|
||||
rh, rw := m.decomposedRelativePositions(ctx, query, []int{h, w}, []int{h, w})
|
||||
mask := rh.Repeat(ctx, 0, rw.Dim(0)).Add(ctx, rw)
|
||||
mask = mask.Reshape(ctx, h*w, -1, opts.numHeads, b)
|
||||
|
||||
key = key.Permute(ctx, 0, 2, 1, 3)
|
||||
scores := key.MulmatFullPrec(ctx, query)
|
||||
scores = scores.Scale(ctx, 1/math.Sqrt(float64(opts.headDim())))
|
||||
|
||||
scores = scores.Add(ctx, mask)
|
||||
scores = scores.Softmax(ctx)
|
||||
|
||||
value = value.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx)
|
||||
attention := value.Mulmat(ctx, scores)
|
||||
attention = attention.Permute(ctx, 0, 2, 1, 3)
|
||||
attention = attention.Contiguous(ctx, -1, w, h, b)
|
||||
return m.Output.Forward(ctx, attention)
|
||||
}
|
||||
|
||||
type samMLP struct {
|
||||
Lin1 *nn.Linear `gguf:"lin1"`
|
||||
Lin2 *nn.Linear `gguf:"lin2"`
|
||||
}
|
||||
|
||||
func (m *samMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts samOptions) ml.Tensor {
|
||||
return m.Lin2.Forward(ctx, m.Lin1.Forward(ctx, hiddenStates).GELU(ctx))
|
||||
}
|
||||
|
||||
type LayerNorm2D struct {
|
||||
Weight ml.Tensor `gguf:"weight"`
|
||||
Bias ml.Tensor `gguf:"bias"`
|
||||
}
|
||||
|
||||
func (ln *LayerNorm2D) Forward(ctx ml.Context, x ml.Tensor, eps float32) ml.Tensor {
|
||||
x = x.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx)
|
||||
u := x.Mean(ctx)
|
||||
d := x.Sub(ctx, u)
|
||||
s := d.Sqr(ctx).Mean(ctx)
|
||||
x = d.Div(ctx, s.Add(ctx, ctx.Input().FromFloats([]float32{eps}, 1)).Sqrt(ctx))
|
||||
x = x.Mul(ctx, ln.Weight).Add(ctx, ln.Bias)
|
||||
return x.Permute(ctx, 2, 0, 1, 3).Contiguous(ctx)
|
||||
}
|
||||
|
||||
type samNeck struct {
|
||||
C1 *nn.Conv2D `gguf:"0"`
|
||||
LN1 *LayerNorm2D `gguf:"1"`
|
||||
C2 *nn.Conv2D `gguf:"2"`
|
||||
LN2 *LayerNorm2D `gguf:"3"`
|
||||
}
|
||||
|
||||
func (m *samNeck) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts samOptions) ml.Tensor {
|
||||
hiddenStates = m.C1.Forward(ctx, hiddenStates, 1, 1, 0, 0, 1, 1)
|
||||
hiddenStates = m.LN1.Forward(ctx, hiddenStates, opts.eps)
|
||||
hiddenStates = m.C2.Forward(ctx, hiddenStates, 1, 1, 1, 1, 1, 1)
|
||||
hiddenStates = m.LN2.Forward(ctx, hiddenStates, opts.eps)
|
||||
return hiddenStates
|
||||
}
|
||||
140
model/models/deepseekocr/model_text.go
Normal file
140
model/models/deepseekocr/model_text.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package deepseekocr
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/ollama/ollama/kvcache"
|
||||
"github.com/ollama/ollama/ml"
|
||||
"github.com/ollama/ollama/ml/nn"
|
||||
"github.com/ollama/ollama/ml/nn/fast"
|
||||
"github.com/ollama/ollama/ml/nn/rope"
|
||||
)
|
||||
|
||||
type textModel struct {
|
||||
TokenEmbedding *nn.Embedding `gguf:"token_embd"`
|
||||
Blocks []textBlock `gguf:"blk"`
|
||||
OutputNorm *nn.RMSNorm `gguf:"output_norm"`
|
||||
Output *nn.Linear `gguf:"output"`
|
||||
|
||||
Options textOptions
|
||||
}
|
||||
|
||||
func (m *textModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) {
|
||||
return m.Options.applyRotaryPositionalEmbedding(ctx, key, shift), nil
|
||||
}
|
||||
|
||||
type textOptions struct {
|
||||
hiddenSize,
|
||||
numHeads,
|
||||
numKVHeads,
|
||||
numExperts,
|
||||
numExpertsUsed int
|
||||
ropeBase,
|
||||
ropeScale,
|
||||
eps float32
|
||||
}
|
||||
|
||||
func (o textOptions) headDim() int {
|
||||
return o.hiddenSize / o.numHeads
|
||||
}
|
||||
|
||||
func (o textOptions) applyRotaryPositionalEmbedding(ctx ml.Context, t, p ml.Tensor) ml.Tensor {
|
||||
return fast.RoPE(ctx, t, p, o.headDim(), o.ropeBase, 1/o.ropeScale, rope.WithTypeNeoX())
|
||||
}
|
||||
|
||||
type textBlock struct {
|
||||
AttentionNorm *nn.RMSNorm `gguf:"attn_norm"`
|
||||
Attention *textAttention
|
||||
MLPNNorm *nn.RMSNorm `gguf:"ffn_norm"`
|
||||
FeedForward textFeedForward
|
||||
}
|
||||
|
||||
func (m *textBlock) Forward(ctx ml.Context, hiddenStates, positions, outputs ml.Tensor, cache kvcache.Cache, opts textOptions) ml.Tensor {
|
||||
residual := hiddenStates
|
||||
hiddenStates = m.AttentionNorm.Forward(ctx, hiddenStates, opts.eps)
|
||||
hiddenStates = m.Attention.Forward(ctx, hiddenStates, positions, cache, opts)
|
||||
if outputs != nil {
|
||||
hiddenStates = hiddenStates.Rows(ctx, outputs)
|
||||
residual = residual.Rows(ctx, outputs)
|
||||
}
|
||||
|
||||
hiddenStates = hiddenStates.Add(ctx, residual)
|
||||
|
||||
residual = hiddenStates
|
||||
hiddenStates = m.MLPNNorm.Forward(ctx, hiddenStates, opts.eps)
|
||||
hiddenStates = m.FeedForward.Forward(ctx, hiddenStates, opts)
|
||||
return hiddenStates.Add(ctx, residual)
|
||||
}
|
||||
|
||||
type textAttention struct {
|
||||
Query *nn.Linear `gguf:"attn_q"`
|
||||
Key *nn.Linear `gguf:"attn_k"`
|
||||
Value *nn.Linear `gguf:"attn_v"`
|
||||
Output *nn.Linear `gguf:"attn_output"`
|
||||
}
|
||||
|
||||
func (m *textAttention) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, opts textOptions) ml.Tensor {
|
||||
query := m.Query.Forward(ctx, hiddenStates)
|
||||
query = query.Reshape(ctx, opts.headDim(), opts.numHeads, -1)
|
||||
|
||||
key := m.Key.Forward(ctx, hiddenStates)
|
||||
key = key.Reshape(ctx, opts.headDim(), opts.numKVHeads, -1)
|
||||
|
||||
value := m.Value.Forward(ctx, hiddenStates)
|
||||
value = value.Reshape(ctx, opts.headDim(), opts.numKVHeads, -1)
|
||||
|
||||
query = opts.applyRotaryPositionalEmbedding(ctx, query, positions)
|
||||
key = opts.applyRotaryPositionalEmbedding(ctx, key, positions)
|
||||
|
||||
attention := nn.Attention(ctx, query, key, value, 1./math.Sqrt(float64(opts.headDim())), cache)
|
||||
attention = attention.Reshape(ctx, -1, attention.Dim(2))
|
||||
return m.Output.Forward(ctx, attention)
|
||||
}
|
||||
|
||||
type textFeedForward interface {
|
||||
Forward(ml.Context, ml.Tensor, textOptions) ml.Tensor
|
||||
}
|
||||
|
||||
type textMoe struct {
|
||||
Router *nn.Linear `gguf:"ffn_gate_inp"`
|
||||
Gate *nn.LinearBatch `gguf:"ffn_gate_exps"`
|
||||
Up *nn.LinearBatch `gguf:"ffn_up_exps"`
|
||||
Down *nn.LinearBatch `gguf:"ffn_down_exps"`
|
||||
SharedExperts *textMLP `gguf:",suf:_shexp"`
|
||||
}
|
||||
|
||||
func (m *textMoe) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts textOptions) ml.Tensor {
|
||||
scores := m.Router.Forward(ctx, hiddenStates).Softmax(ctx)
|
||||
indices := scores.TopK(ctx, opts.numExpertsUsed)
|
||||
weights := scores.Reshape(ctx, 1, opts.numExperts, hiddenStates.Dim(1)).Rows(ctx, indices)
|
||||
|
||||
experts := hiddenStates.Reshape(ctx, hiddenStates.Dim(0), 1, hiddenStates.Dim(1))
|
||||
experts = m.Gate.Forward(ctx, experts, indices).SILU(ctx, m.Up.Forward(ctx, experts, indices))
|
||||
experts = m.Down.Forward(ctx, experts, indices)
|
||||
experts = experts.Mul(ctx, weights)
|
||||
|
||||
expert := func(i int) ml.Tensor {
|
||||
return experts.View(
|
||||
ctx, i*experts.Stride(1), experts.Dim(0), experts.Stride(2), experts.Dim(2),
|
||||
)
|
||||
}
|
||||
|
||||
routedStates := expert(0)
|
||||
for i := 1; i < opts.numExpertsUsed; i++ {
|
||||
routedStates = routedStates.Add(ctx, expert(i))
|
||||
}
|
||||
|
||||
sharedStates := m.SharedExperts.Forward(ctx, hiddenStates, opts)
|
||||
return routedStates.Add(ctx, sharedStates)
|
||||
}
|
||||
|
||||
type textMLP struct {
|
||||
Gate *nn.Linear `gguf:"ffn_gate"`
|
||||
Up *nn.Linear `gguf:"ffn_up"`
|
||||
Down *nn.Linear `gguf:"ffn_down"`
|
||||
}
|
||||
|
||||
func (m *textMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, _ textOptions) ml.Tensor {
|
||||
hiddenStates = m.Gate.Forward(ctx, hiddenStates).SILU(ctx, m.Up.Forward(ctx, hiddenStates))
|
||||
return m.Down.Forward(ctx, hiddenStates)
|
||||
}
|
||||
117
model/models/deepseekocr/model_vision.go
Normal file
117
model/models/deepseekocr/model_vision.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package deepseekocr
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/ollama/ollama/ml"
|
||||
"github.com/ollama/ollama/ml/nn"
|
||||
)
|
||||
|
||||
type visionModel struct {
|
||||
PatchEmbedding *nn.Conv2D `gguf:"patch_embd"`
|
||||
ClassEmbedding ml.Tensor `gguf:"class_embd"`
|
||||
PositionEmbedding *nn.Embedding `gguf:"position_embd"`
|
||||
|
||||
PreLayerNorm *nn.LayerNorm `gguf:"pre_layrnorm"`
|
||||
Blocks []visionBlock `gguf:"blk"`
|
||||
|
||||
Options visionOptions
|
||||
}
|
||||
|
||||
func (m *visionModel) absolutePositionEmbedding(ctx ml.Context, embeds ml.Tensor) ml.Tensor {
|
||||
numPatches := m.Options.imageSize / m.Options.patchSize * m.Options.imageSize / m.Options.patchSize
|
||||
positions := ctx.Arange(0, float32(numPatches+1), 1, ml.DTypeI32)
|
||||
positionEmbeds := m.PositionEmbedding.Forward(ctx, positions)
|
||||
|
||||
source := int(math.Sqrt(float64(positionEmbeds.Dim(1) - 1)))
|
||||
target := int(math.Sqrt(float64(embeds.Dim(1) - 1)))
|
||||
if source != target {
|
||||
newPositionEmbeds := positionEmbeds.Slice(ctx, 1, 1, positionEmbeds.Dim(1), 1)
|
||||
newPositionEmbeds = newPositionEmbeds.Reshape(ctx, -1, source, source)
|
||||
newPositionEmbeds = newPositionEmbeds.Permute(ctx, 2, 0, 1, 3).Contiguous(ctx)
|
||||
newPositionEmbeds = newPositionEmbeds.Interpolate(ctx, [4]int{target, target, embeds.Dim(0), 1}, ml.SamplingModeBilinear)
|
||||
newPositionEmbeds = newPositionEmbeds.Permute(ctx, 1, 2, 0, 3)
|
||||
newPositionEmbeds = newPositionEmbeds.Contiguous(ctx, -1, target*target)
|
||||
|
||||
positionEmbeds = positionEmbeds.Slice(ctx, 1, 0, 1, 1).Concat(ctx, newPositionEmbeds, 1)
|
||||
}
|
||||
|
||||
return positionEmbeds
|
||||
}
|
||||
|
||||
func (m *visionModel) Forward(ctx ml.Context, pixelValues, patchEmbeds ml.Tensor) ml.Tensor {
|
||||
if patchEmbeds == nil {
|
||||
patchEmbeds = m.PatchEmbedding.Forward(ctx, pixelValues, m.Options.patchSize, m.Options.patchSize, 0, 0, 1, 1)
|
||||
}
|
||||
|
||||
patchEmbeds = patchEmbeds.Reshape(ctx, -1, patchEmbeds.Dim(2), patchEmbeds.Dim(3))
|
||||
patchEmbeds = patchEmbeds.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx)
|
||||
|
||||
classEmbeds := m.ClassEmbedding.Repeat(ctx, 2, patchEmbeds.Dim(2))
|
||||
embeds := classEmbeds.Concat(ctx, patchEmbeds, 1)
|
||||
embeds = embeds.Add(ctx, m.absolutePositionEmbedding(ctx, embeds))
|
||||
|
||||
hiddenStates := m.PreLayerNorm.Forward(ctx, embeds, m.Options.eps)
|
||||
for _, block := range m.Blocks {
|
||||
hiddenStates = block.Forward(ctx, hiddenStates, m.Options)
|
||||
}
|
||||
|
||||
return hiddenStates
|
||||
}
|
||||
|
||||
type visionOptions struct {
|
||||
hiddenSize,
|
||||
numHeads int
|
||||
eps float32
|
||||
|
||||
imageSize, patchSize int
|
||||
}
|
||||
|
||||
func (o visionOptions) headDim() int {
|
||||
return o.hiddenSize / o.numHeads
|
||||
}
|
||||
|
||||
type visionBlock struct {
|
||||
Norm1 *nn.LayerNorm `gguf:"layer_norm1"`
|
||||
Attention *visionAttention `gguf:"self_attn"`
|
||||
Norm2 *nn.LayerNorm `gguf:"layer_norm2"`
|
||||
FeedForward *visionMLP `gguf:"mlp"`
|
||||
}
|
||||
|
||||
func (m *visionBlock) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts visionOptions) ml.Tensor {
|
||||
residual := hiddenStates
|
||||
hiddenStates = m.Norm1.Forward(ctx, hiddenStates, opts.eps)
|
||||
hiddenStates = m.Attention.Forward(ctx, hiddenStates, opts)
|
||||
hiddenStates = hiddenStates.Add(ctx, residual)
|
||||
|
||||
residual = hiddenStates
|
||||
hiddenStates = m.Norm2.Forward(ctx, hiddenStates, opts.eps)
|
||||
hiddenStates = m.FeedForward.Forward(ctx, hiddenStates)
|
||||
hiddenStates = hiddenStates.Add(ctx, residual)
|
||||
return hiddenStates
|
||||
}
|
||||
|
||||
type visionAttention struct {
|
||||
QKV *nn.Linear `gguf:"qkv_proj"`
|
||||
Output *nn.Linear `gguf:"out_proj"`
|
||||
}
|
||||
|
||||
func (m *visionAttention) Forward(ctx ml.Context, t ml.Tensor, opts visionOptions) ml.Tensor {
|
||||
qkv := m.QKV.Forward(ctx, t)
|
||||
qkv = qkv.Reshape(ctx, opts.headDim(), -1, qkv.Dim(1), qkv.Dim(2))
|
||||
chunks := qkv.Chunk(ctx, 1, opts.numHeads)
|
||||
query, key, value := chunks[0], chunks[1], chunks[2]
|
||||
|
||||
attention := nn.Attention(ctx, query, key, value, 1/math.Sqrt(float64(opts.headDim())), nil)
|
||||
attention = attention.Reshape(ctx, -1, attention.Dim(2), attention.Dim(3))
|
||||
return m.Output.Forward(ctx, attention)
|
||||
}
|
||||
|
||||
type visionMLP struct {
|
||||
FC1 *nn.Linear `gguf:"fc1"`
|
||||
FC2 *nn.Linear `gguf:"fc2"`
|
||||
}
|
||||
|
||||
func (m *visionMLP) Forward(ctx ml.Context, t ml.Tensor) ml.Tensor {
|
||||
return m.FC2.Forward(ctx, m.FC1.Forward(ctx, t).QuickGELU(ctx))
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package models
|
||||
import (
|
||||
_ "github.com/ollama/ollama/model/models/bert"
|
||||
_ "github.com/ollama/ollama/model/models/deepseek2"
|
||||
_ "github.com/ollama/ollama/model/models/deepseekocr"
|
||||
_ "github.com/ollama/ollama/model/models/gemma2"
|
||||
_ "github.com/ollama/ollama/model/models/gemma3"
|
||||
_ "github.com/ollama/ollama/model/models/gemma3n"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
_ "github.com/ollama/ollama/model/models/llama4"
|
||||
_ "github.com/ollama/ollama/model/models/mistral3"
|
||||
_ "github.com/ollama/ollama/model/models/mllama"
|
||||
_ "github.com/ollama/ollama/model/models/nomicbert"
|
||||
_ "github.com/ollama/ollama/model/models/qwen2"
|
||||
_ "github.com/ollama/ollama/model/models/qwen25vl"
|
||||
_ "github.com/ollama/ollama/model/models/qwen3"
|
||||
|
||||
170
model/models/nomicbert/model.go
Normal file
170
model/models/nomicbert/model.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package nomicbert
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"math"
|
||||
|
||||
"github.com/ollama/ollama/fs"
|
||||
"github.com/ollama/ollama/ml"
|
||||
"github.com/ollama/ollama/ml/nn"
|
||||
"github.com/ollama/ollama/ml/nn/fast"
|
||||
"github.com/ollama/ollama/ml/nn/pooling"
|
||||
"github.com/ollama/ollama/ml/nn/rope"
|
||||
"github.com/ollama/ollama/model"
|
||||
"github.com/ollama/ollama/model/input"
|
||||
)
|
||||
|
||||
type Model struct {
|
||||
model.Base
|
||||
model.TextProcessor
|
||||
|
||||
TokenEmbedding *nn.Embedding `gguf:"token_embd"`
|
||||
TypeEmbedding *nn.Embedding `gguf:"token_types"`
|
||||
TokenEmbeddingNorm *nn.LayerNorm `gguf:"token_embd_norm"`
|
||||
|
||||
Layers []EncoderLayer `gguf:"blk"`
|
||||
|
||||
Options
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
hiddenSize int
|
||||
numHeads int
|
||||
headDim int
|
||||
eps float32
|
||||
poolingType pooling.Type
|
||||
normalize bool
|
||||
ropeFreqBase float32
|
||||
}
|
||||
|
||||
// Single Encoder Layer
|
||||
type EncoderLayer struct {
|
||||
*Attention
|
||||
|
||||
AttentionNorm *nn.LayerNorm `gguf:"attn_output_norm"`
|
||||
|
||||
*MLP
|
||||
|
||||
MLPNorm *nn.LayerNorm `gguf:"layer_output_norm"`
|
||||
}
|
||||
|
||||
type Attention struct {
|
||||
QKV *nn.Linear `gguf:"attn_qkv"`
|
||||
Output *nn.Linear `gguf:"attn_output"`
|
||||
}
|
||||
|
||||
type MLP struct {
|
||||
Gate *nn.Linear `gguf:"ffn_gate"`
|
||||
Up *nn.Linear `gguf:"ffn_up"`
|
||||
Down *nn.Linear `gguf:"ffn_down"`
|
||||
}
|
||||
|
||||
func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) {
|
||||
hiddenStates := m.TokenEmbedding.Forward(ctx, batch.Inputs)
|
||||
|
||||
typeEmbed := m.TypeEmbedding.Weight.Slice(ctx, 1, 0, 1, 1)
|
||||
hiddenStates = hiddenStates.Add(ctx, typeEmbed)
|
||||
|
||||
hiddenStates = m.TokenEmbeddingNorm.Forward(ctx, hiddenStates, m.eps)
|
||||
|
||||
positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions))
|
||||
|
||||
for _, layer := range m.Layers {
|
||||
hiddenStates = layer.Forward(ctx, hiddenStates, positions, &m.Options)
|
||||
}
|
||||
|
||||
hiddenStates = m.poolingType.Forward(ctx, hiddenStates)
|
||||
|
||||
if m.normalize {
|
||||
hiddenStates = hiddenStates.L2Norm(ctx, 1e-12)
|
||||
}
|
||||
|
||||
return hiddenStates, nil
|
||||
}
|
||||
|
||||
func (e *EncoderLayer) Forward(ctx ml.Context, hiddenStates ml.Tensor, positions ml.Tensor, opts *Options) ml.Tensor {
|
||||
residual := hiddenStates
|
||||
hiddenStates = e.Attention.Forward(ctx, hiddenStates, positions, opts)
|
||||
hiddenStates = hiddenStates.Add(ctx, residual)
|
||||
hiddenStates = e.AttentionNorm.Forward(ctx, hiddenStates, opts.eps)
|
||||
|
||||
residual = hiddenStates
|
||||
hiddenStates = e.MLP.Forward(ctx, hiddenStates)
|
||||
hiddenStates = hiddenStates.Add(ctx, residual)
|
||||
hiddenStates = e.MLPNorm.Forward(ctx, hiddenStates, opts.eps)
|
||||
|
||||
return hiddenStates
|
||||
}
|
||||
|
||||
func (a *Attention) Forward(ctx ml.Context, hiddenStates ml.Tensor, positions ml.Tensor, opts *Options) ml.Tensor {
|
||||
batchSize := hiddenStates.Dim(1)
|
||||
|
||||
qkv := a.QKV.Forward(ctx, hiddenStates)
|
||||
|
||||
qkv = qkv.Reshape(ctx, opts.headDim, opts.numHeads*3, batchSize)
|
||||
chunks := qkv.Chunk(ctx, 1, opts.numHeads)
|
||||
query, key, value := chunks[0], chunks[1], chunks[2]
|
||||
|
||||
query = fast.RoPE(ctx, query, positions, opts.headDim, opts.ropeFreqBase, 1.0, rope.WithTypeNeoX())
|
||||
key = fast.RoPE(ctx, key, positions, opts.headDim, opts.ropeFreqBase, 1.0, rope.WithTypeNeoX())
|
||||
|
||||
attention := nn.Attention(ctx, query, key, value, 1.0/math.Sqrt(float64(opts.headDim)), nil)
|
||||
|
||||
attention = attention.Reshape(ctx, opts.hiddenSize, batchSize)
|
||||
|
||||
return a.Output.Forward(ctx, attention)
|
||||
}
|
||||
|
||||
func (m *MLP) Forward(ctx ml.Context, hiddenStates ml.Tensor) ml.Tensor {
|
||||
hidden := m.Gate.Forward(ctx, hiddenStates).SILU(ctx, m.Up.Forward(ctx, hiddenStates))
|
||||
|
||||
return m.Down.Forward(ctx, hidden)
|
||||
}
|
||||
|
||||
func New(c fs.Config) (model.Model, error) {
|
||||
hiddenSize := int(c.Uint("embedding_length"))
|
||||
numHeads := int(c.Uint("attention.head_count"))
|
||||
headDim := hiddenSize / numHeads
|
||||
|
||||
processor := model.NewWordPiece(
|
||||
&model.Vocabulary{
|
||||
Values: c.Strings("tokenizer.ggml.tokens"),
|
||||
Scores: c.Floats("tokenizer.ggml.scores"),
|
||||
Types: c.Ints("tokenizer.ggml.token_type"),
|
||||
AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true),
|
||||
BOS: []int32{
|
||||
int32(cmp.Or(
|
||||
c.Uint("tokenizer.ggml.cls_token_id"),
|
||||
c.Uint("tokenizer.ggml.bos_token_id"),
|
||||
)),
|
||||
},
|
||||
AddEOS: c.Bool("tokenizer.ggml.add_eos_token", true),
|
||||
EOS: []int32{
|
||||
int32(cmp.Or(
|
||||
c.Uint("tokenizer.ggml.separator_token_id"),
|
||||
c.Uint("tokenizer.ggml.eos_token_id"),
|
||||
)),
|
||||
},
|
||||
},
|
||||
false,
|
||||
)
|
||||
|
||||
return &Model{
|
||||
TextProcessor: processor,
|
||||
Layers: make([]EncoderLayer, c.Uint("block_count")),
|
||||
Options: Options{
|
||||
hiddenSize: hiddenSize,
|
||||
numHeads: numHeads,
|
||||
headDim: headDim,
|
||||
eps: c.Float("attention.layer_norm_epsilon"),
|
||||
poolingType: pooling.Type(c.Uint("pooling_type")),
|
||||
normalize: c.Bool("normalize_embeddings", false),
|
||||
ropeFreqBase: c.Float("rope.freq_base", 1000.0),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
model.Register("nomic-bert", New)
|
||||
model.Register("nomic-bert_embed", New)
|
||||
}
|
||||
319
model/parsers/cogito.go
Normal file
319
model/parsers/cogito.go
Normal file
@@ -0,0 +1,319 @@
|
||||
package parsers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
type CogitoParserState int
|
||||
|
||||
const (
|
||||
CogitoCollectingThinking CogitoParserState = iota
|
||||
CogitoCollectingContent
|
||||
CogitoCollectingToolCalls
|
||||
CogitoCollectingToolOutput
|
||||
)
|
||||
|
||||
const (
|
||||
cogitoThinkingCloseTag = "</think>"
|
||||
cogitoToolCallsBeginTag = "<|tool▁calls▁begin|>"
|
||||
cogitoToolCallsEndTag = "<|tool▁calls▁end|>"
|
||||
cogitoToolCallBeginTag = "<|tool▁call▁begin|>"
|
||||
cogitoToolCallEndTag = "<|tool▁call▁end|>"
|
||||
cogitoToolSepTag = "<|tool▁sep|>"
|
||||
cogitoToolOutputBeginTag = "<|tool▁output▁begin|>"
|
||||
cogitoToolOutputEndTag = "<|tool▁output▁end|>"
|
||||
cogitoToolOutputsBeginTag = "<|tool▁outputs▁begin|>"
|
||||
cogitoToolOutputsEndTag = "<|tool▁outputs▁end|>"
|
||||
)
|
||||
|
||||
type CogitoParser struct {
|
||||
state CogitoParserState
|
||||
buffer strings.Builder
|
||||
}
|
||||
|
||||
func (p *CogitoParser) HasToolSupport() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *CogitoParser) HasThinkingSupport() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *CogitoParser) setInitialState(lastMessage *api.Message, tools []api.Tool, thinkValue *api.ThinkValue) {
|
||||
prefill := lastMessage != nil && lastMessage.Role == "assistant"
|
||||
|
||||
// Check both model capability AND request preference
|
||||
thinkingEnabled := thinkValue != nil && thinkValue.Bool()
|
||||
// thinkingEnabled should be set to false for tools
|
||||
|
||||
if !thinkingEnabled {
|
||||
p.state = CogitoCollectingContent
|
||||
return
|
||||
}
|
||||
|
||||
if prefill && lastMessage.Content != "" {
|
||||
p.state = CogitoCollectingContent
|
||||
return
|
||||
}
|
||||
|
||||
// Note: for cogito, if there are tools, then we don't want to be thinking
|
||||
if len(tools) > 0 {
|
||||
p.state = CogitoCollectingContent
|
||||
return
|
||||
}
|
||||
|
||||
p.state = CogitoCollectingThinking
|
||||
}
|
||||
|
||||
func (p *CogitoParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
p.setInitialState(lastMessage, tools, thinkValue)
|
||||
return tools
|
||||
}
|
||||
|
||||
type cogitoEvent interface {
|
||||
isCogitoEvent()
|
||||
}
|
||||
|
||||
type cogitoEventThinkingContent struct {
|
||||
content string
|
||||
}
|
||||
|
||||
type cogitoEventContent struct {
|
||||
content string
|
||||
}
|
||||
|
||||
type cogitoEventToolCall struct {
|
||||
toolCall api.ToolCall
|
||||
}
|
||||
|
||||
func (cogitoEventThinkingContent) isCogitoEvent() {}
|
||||
func (cogitoEventContent) isCogitoEvent() {}
|
||||
func (cogitoEventToolCall) isCogitoEvent() {}
|
||||
|
||||
func (p *CogitoParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) {
|
||||
p.buffer.WriteString(s)
|
||||
events := p.parseEvents()
|
||||
|
||||
var toolCalls []api.ToolCall
|
||||
var contentSb strings.Builder
|
||||
var thinkingSb strings.Builder
|
||||
for _, event := range events {
|
||||
switch event := event.(type) {
|
||||
case cogitoEventToolCall:
|
||||
toolCalls = append(toolCalls, event.toolCall)
|
||||
case cogitoEventThinkingContent:
|
||||
thinkingSb.WriteString(event.content)
|
||||
case cogitoEventContent:
|
||||
contentSb.WriteString(event.content)
|
||||
}
|
||||
}
|
||||
|
||||
return contentSb.String(), thinkingSb.String(), toolCalls, nil
|
||||
}
|
||||
|
||||
func (p *CogitoParser) parseEvents() []cogitoEvent {
|
||||
var all []cogitoEvent
|
||||
|
||||
keepLooping := true
|
||||
for keepLooping {
|
||||
var events []cogitoEvent
|
||||
events, keepLooping = p.eat()
|
||||
if len(events) > 0 {
|
||||
all = append(all, events...)
|
||||
}
|
||||
}
|
||||
|
||||
return all
|
||||
}
|
||||
|
||||
func (p *CogitoParser) eat() ([]cogitoEvent, bool) {
|
||||
var events []cogitoEvent
|
||||
bufStr := p.buffer.String()
|
||||
if bufStr == "" {
|
||||
return events, false
|
||||
}
|
||||
|
||||
switch p.state {
|
||||
case CogitoCollectingThinking:
|
||||
if strings.Contains(bufStr, cogitoThinkingCloseTag) { // thinking[</think>] -> content
|
||||
split := strings.SplitN(bufStr, cogitoThinkingCloseTag, 2)
|
||||
thinking := split[0]
|
||||
thinking = strings.TrimRightFunc(thinking, unicode.IsSpace)
|
||||
|
||||
remaining := split[1]
|
||||
remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace)
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
p.state = CogitoCollectingContent
|
||||
|
||||
if len(thinking) > 0 {
|
||||
events = append(events, cogitoEventThinkingContent{content: thinking})
|
||||
}
|
||||
return events, true
|
||||
} else if overlapLen := overlap(bufStr, cogitoThinkingCloseTag); overlapLen > 0 { // partial </think>
|
||||
beforePartialTag := bufStr[:len(bufStr)-overlapLen]
|
||||
trailingLen := trailingWhitespaceLen(beforePartialTag)
|
||||
ambiguousStart := len(beforePartialTag) - trailingLen
|
||||
|
||||
unambiguous := bufStr[:ambiguousStart]
|
||||
ambiguous := bufStr[ambiguousStart:]
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(ambiguous)
|
||||
if len(unambiguous) > 0 {
|
||||
events = append(events, cogitoEventThinkingContent{content: unambiguous})
|
||||
}
|
||||
return events, false
|
||||
} else { // otherwise its thinking content
|
||||
whitespaceLen := trailingWhitespaceLen(bufStr)
|
||||
ambiguousStart := len(bufStr) - whitespaceLen
|
||||
|
||||
unambiguous := bufStr[:ambiguousStart]
|
||||
ambiguous := bufStr[ambiguousStart:]
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(ambiguous)
|
||||
if len(unambiguous) > 0 {
|
||||
events = append(events, cogitoEventThinkingContent{content: unambiguous})
|
||||
}
|
||||
return events, false
|
||||
}
|
||||
|
||||
case CogitoCollectingContent:
|
||||
switch {
|
||||
case strings.Contains(bufStr, cogitoToolCallsBeginTag): // content[<|tool▁calls▁begin|>] -> tool calls
|
||||
split := strings.SplitN(bufStr, cogitoToolCallsBeginTag, 2)
|
||||
contentBefore := strings.TrimRightFunc(split[0], unicode.IsSpace)
|
||||
remaining := split[1]
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
p.state = CogitoCollectingToolCalls
|
||||
|
||||
if len(contentBefore) > 0 {
|
||||
events = append(events, cogitoEventContent{content: contentBefore})
|
||||
}
|
||||
return events, true
|
||||
case strings.Contains(bufStr, cogitoToolOutputsBeginTag): // content[<|tool▁outputs▁begin|>] -> tool outputs
|
||||
split := strings.SplitN(bufStr, cogitoToolOutputsBeginTag, 2)
|
||||
contentBefore := strings.TrimRightFunc(split[0], unicode.IsSpace)
|
||||
remaining := split[1]
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
p.state = CogitoCollectingToolOutput
|
||||
|
||||
if len(contentBefore) > 0 {
|
||||
events = append(events, cogitoEventContent{content: contentBefore})
|
||||
}
|
||||
return events, true
|
||||
default: // otherwise its content
|
||||
p.buffer.Reset()
|
||||
if len(bufStr) > 0 {
|
||||
events = append(events, cogitoEventContent{content: bufStr})
|
||||
}
|
||||
return events, false
|
||||
}
|
||||
case CogitoCollectingToolCalls:
|
||||
if idx := strings.Index(bufStr, cogitoToolCallBeginTag); idx != -1 {
|
||||
startIdx := idx + len(cogitoToolCallBeginTag)
|
||||
if endIdx := strings.Index(bufStr[startIdx:], cogitoToolCallEndTag); endIdx != -1 {
|
||||
toolCallContent := bufStr[startIdx : startIdx+endIdx]
|
||||
|
||||
if toolCall, err := p.parseToolCallContent(toolCallContent); err == nil {
|
||||
remaining := bufStr[startIdx+endIdx+len(cogitoToolCallEndTag):]
|
||||
remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace)
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
|
||||
events = append(events, cogitoEventToolCall{toolCall: toolCall})
|
||||
return events, true
|
||||
} else {
|
||||
slog.Warn("cogito tool call parsing failed", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if idx := strings.Index(bufStr, cogitoToolCallsEndTag); idx != -1 {
|
||||
remaining := bufStr[idx+len(cogitoToolCallsEndTag):]
|
||||
remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace)
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
p.state = CogitoCollectingContent
|
||||
|
||||
return events, true
|
||||
}
|
||||
|
||||
return events, false
|
||||
|
||||
case CogitoCollectingToolOutput:
|
||||
if idx := strings.Index(bufStr, cogitoToolOutputBeginTag); idx != -1 {
|
||||
startIdx := idx + len(cogitoToolOutputBeginTag)
|
||||
if endIdx := strings.Index(bufStr[startIdx:], cogitoToolOutputEndTag); endIdx != -1 {
|
||||
remaining := bufStr[startIdx+endIdx+len(cogitoToolOutputEndTag):]
|
||||
remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace)
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
|
||||
return events, true
|
||||
}
|
||||
}
|
||||
|
||||
if idx := strings.Index(bufStr, cogitoToolOutputsEndTag); idx != -1 {
|
||||
remaining := bufStr[idx+len(cogitoToolOutputsEndTag):]
|
||||
remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace)
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
p.state = CogitoCollectingContent
|
||||
|
||||
return events, true
|
||||
}
|
||||
|
||||
return events, false
|
||||
}
|
||||
|
||||
return events, false
|
||||
}
|
||||
|
||||
func (p *CogitoParser) parseToolCallContent(content string) (api.ToolCall, error) {
|
||||
// Expected format: function<|tool▁sep|>tool_name\n```json\n{args}\n```
|
||||
parts := strings.SplitN(content, cogitoToolSepTag, 2)
|
||||
if len(parts) < 2 {
|
||||
return api.ToolCall{}, errors.New("invalid format")
|
||||
}
|
||||
nameAndArgs := parts[1]
|
||||
|
||||
jsonStart := strings.Index(nameAndArgs, "\n```json\n")
|
||||
if jsonStart == -1 {
|
||||
return api.ToolCall{}, errors.New("invalid format")
|
||||
}
|
||||
toolName := strings.TrimSpace(nameAndArgs[:jsonStart])
|
||||
jsonContent := nameAndArgs[jsonStart+len("\n```json\n"):]
|
||||
|
||||
jsonEnd := strings.Index(jsonContent, "\n```")
|
||||
if jsonEnd == -1 {
|
||||
return api.ToolCall{}, errors.New("invalid format")
|
||||
}
|
||||
argsJSON := jsonContent[:jsonEnd]
|
||||
|
||||
var args api.ToolCallFunctionArguments
|
||||
if err := json.Unmarshal([]byte(argsJSON), &args); err != nil {
|
||||
return api.ToolCall{}, err
|
||||
}
|
||||
|
||||
return api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: toolName,
|
||||
Arguments: args,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
565
model/parsers/cogito_test.go
Normal file
565
model/parsers/cogito_test.go
Normal file
@@ -0,0 +1,565 @@
|
||||
package parsers
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
func TestCogitoParser(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expectedContent string
|
||||
expectedThinking string
|
||||
expectedToolCalls []api.ToolCall
|
||||
tools []api.Tool
|
||||
lastMessage *api.Message
|
||||
}{
|
||||
{
|
||||
name: "simple_content",
|
||||
input: "This is a simple response.",
|
||||
expectedContent: "This is a simple response.",
|
||||
expectedThinking: "",
|
||||
},
|
||||
{
|
||||
name: "thinking_only",
|
||||
input: "This is thinking content.</think>This is response content.",
|
||||
expectedContent: "This is response content.",
|
||||
expectedThinking: "This is thinking content.",
|
||||
},
|
||||
{
|
||||
name: "tool_call_simple",
|
||||
input: `<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`,
|
||||
expectedToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tools: []api.Tool{
|
||||
{
|
||||
Type: "function",
|
||||
Function: api.ToolFunction{
|
||||
Name: "get_weather",
|
||||
Parameters: api.ToolFunctionParameters{
|
||||
Properties: map[string]api.ToolProperty{
|
||||
"location": {Type: api.PropertyType{"string"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "thinking_with_tool_call",
|
||||
input: `I need to check the weather.</think><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`,
|
||||
expectedContent: "I need to check the weather.</think>",
|
||||
expectedThinking: "", // No thinking when tools are present (Cogito-specific behavior)
|
||||
expectedToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tools: []api.Tool{
|
||||
{
|
||||
Type: "function",
|
||||
Function: api.ToolFunction{
|
||||
Name: "get_weather",
|
||||
Parameters: api.ToolFunctionParameters{
|
||||
Properties: map[string]api.ToolProperty{
|
||||
"location": {Type: api.PropertyType{"string"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple_tool_calls",
|
||||
input: `<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```" + `<|tool▁call▁end|>
|
||||
<|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"London"}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`,
|
||||
expectedToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "London",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tools: []api.Tool{
|
||||
{
|
||||
Type: "function",
|
||||
Function: api.ToolFunction{
|
||||
Name: "get_weather",
|
||||
Parameters: api.ToolFunctionParameters{
|
||||
Properties: map[string]api.ToolProperty{
|
||||
"location": {Type: api.PropertyType{"string"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "complex_tool_arguments",
|
||||
input: `<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>process_data
|
||||
` + "```json\n" + `{"items":["item1","item2"],"config":{"enabled":true,"threshold":0.95},"count":42}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`,
|
||||
expectedToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "process_data",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"items": []any{"item1", "item2"},
|
||||
"config": map[string]any{"enabled": true, "threshold": 0.95},
|
||||
"count": 42.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "tool_output_parsing",
|
||||
input: `<|tool▁outputs▁begin|><|tool▁output▁begin|>{"temperature": 22, "condition": "sunny"}<|tool▁output▁end|><|tool▁outputs▁end|>`,
|
||||
expectedContent: "",
|
||||
expectedThinking: "",
|
||||
},
|
||||
{
|
||||
name: "thinking_with_multiline_content",
|
||||
input: `This is line 1
|
||||
This is line 2
|
||||
This is line 3</think>Final response here.`,
|
||||
expectedContent: "Final response here.",
|
||||
expectedThinking: "This is line 1\nThis is line 2\nThis is line 3",
|
||||
},
|
||||
{
|
||||
name: "no_thinking_simple",
|
||||
input: "This is content.",
|
||||
expectedContent: "This is content.",
|
||||
expectedThinking: "",
|
||||
},
|
||||
{
|
||||
name: "prefill_content_only",
|
||||
input: "Continuing from previous content.",
|
||||
expectedContent: "Continuing from previous content.",
|
||||
lastMessage: &api.Message{
|
||||
Role: "assistant",
|
||||
Content: "Previous content",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "prefill_with_thinking",
|
||||
input: "Continuing thinking</think>Continuing content.",
|
||||
expectedContent: "Continuing content.",
|
||||
expectedThinking: "Continuing thinking",
|
||||
lastMessage: &api.Message{
|
||||
Role: "assistant",
|
||||
},
|
||||
},
|
||||
// Edge cases
|
||||
{
|
||||
name: "nested_think_tags_in_thinking",
|
||||
input: "I'm thinking <think>nested</think> more thinking</think>Final content.",
|
||||
expectedContent: "more thinking</think>Final content.",
|
||||
expectedThinking: "I'm thinking <think>nested",
|
||||
},
|
||||
{
|
||||
name: "multiple_think_close_tags",
|
||||
input: "First thinking</think>Content</think>More content.",
|
||||
expectedContent: "Content</think>More content.",
|
||||
expectedThinking: "First thinking",
|
||||
},
|
||||
{
|
||||
name: "empty_thinking_content",
|
||||
input: "</think>Just content here.",
|
||||
expectedContent: "</think>Just content here.",
|
||||
expectedThinking: "",
|
||||
},
|
||||
{
|
||||
name: "thinking_disabled_with_think_tags",
|
||||
input: "Content with </think> tags should be treated as content.",
|
||||
expectedContent: "Content with </think> tags should be treated as content.",
|
||||
expectedThinking: "",
|
||||
lastMessage: &api.Message{
|
||||
Role: "assistant",
|
||||
Content: "existing", // Forces non-thinking mode
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Use thinking-enabled parser for tests that expect thinking
|
||||
hasThinking := tt.expectedThinking != ""
|
||||
parser := &CogitoParser{} // it has thinking support
|
||||
parser.Init(tt.tools, tt.lastMessage, &api.ThinkValue{Value: hasThinking}) // but we should set it with the request that the user wants
|
||||
|
||||
content, thinking, toolCalls, err := parser.Add(tt.input, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Add() error = %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.expectedContent, content); diff != "" {
|
||||
t.Errorf("content mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.expectedThinking, thinking); diff != "" {
|
||||
t.Errorf("thinking mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.expectedToolCalls, toolCalls); diff != "" {
|
||||
t.Errorf("tool calls mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCogitoParser_Streaming(t *testing.T) {
|
||||
parser := &CogitoParser{}
|
||||
parser.Init(nil, nil, &api.ThinkValue{Value: true})
|
||||
|
||||
chunks := []string{
|
||||
"This is ",
|
||||
"thinking content",
|
||||
".</think>This is ",
|
||||
"content.<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>test_tool\n```json\n{\"arg\":\"value\"}\n```<|tool▁call▁end|><|tool▁calls▁end|>",
|
||||
}
|
||||
|
||||
var finalContent, finalThinking strings.Builder
|
||||
var finalToolCalls []api.ToolCall
|
||||
|
||||
for i, chunk := range chunks {
|
||||
done := i == len(chunks)-1
|
||||
content, thinking, toolCalls, err := parser.Add(chunk, done)
|
||||
if err != nil {
|
||||
t.Fatalf("Add() error on chunk %d: %v", i, err)
|
||||
}
|
||||
|
||||
finalContent.WriteString(content)
|
||||
finalThinking.WriteString(thinking)
|
||||
finalToolCalls = append(finalToolCalls, toolCalls...)
|
||||
}
|
||||
|
||||
expectedContent := "This is content."
|
||||
expectedThinking := "This is thinking content."
|
||||
expectedToolCalls := []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "test_tool",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"arg": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if finalContent.String() != expectedContent {
|
||||
t.Errorf("expected content %q, got %q", expectedContent, finalContent.String())
|
||||
}
|
||||
|
||||
if finalThinking.String() != expectedThinking {
|
||||
t.Errorf("expected thinking %q, got %q", expectedThinking, finalThinking.String())
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(expectedToolCalls, finalToolCalls); diff != "" {
|
||||
t.Errorf("tool calls mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCogitoParser_StreamingEdgeCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
chunks []string
|
||||
expectedContent string
|
||||
expectedThinking string
|
||||
expectedToolCalls []api.ToolCall
|
||||
hasThinkingSupport bool
|
||||
}{
|
||||
{
|
||||
name: "split_thinking_tag",
|
||||
chunks: []string{
|
||||
"This is thinking content</thi",
|
||||
"nk>This is content.",
|
||||
},
|
||||
expectedContent: "This is content.",
|
||||
expectedThinking: "This is thinking content",
|
||||
hasThinkingSupport: true,
|
||||
},
|
||||
{
|
||||
name: "split_tool_calls_begin_tag_conservative_parsing",
|
||||
chunks: []string{
|
||||
"Content before<|tool▁calls▁beg",
|
||||
"in|><|tool▁call▁begin|>function<|tool▁sep|>test\n```json\n{}\n```<|tool▁call▁end|><|tool▁calls▁end|>",
|
||||
},
|
||||
// Parser is conservative - treats incomplete tags as content
|
||||
expectedContent: "Content before<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>test\n```json\n{}\n```<|tool▁call▁end|><|tool▁calls▁end|>",
|
||||
expectedToolCalls: nil,
|
||||
hasThinkingSupport: false,
|
||||
},
|
||||
{
|
||||
name: "thinking_disabled_with_split_tags",
|
||||
chunks: []string{
|
||||
"Content with </thi",
|
||||
"nk> should be treated as content.",
|
||||
},
|
||||
expectedContent: "Content with </think> should be treated as content.",
|
||||
expectedThinking: "",
|
||||
hasThinkingSupport: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
parser := &CogitoParser{}
|
||||
parser.Init(nil, nil, &api.ThinkValue{Value: tt.hasThinkingSupport})
|
||||
|
||||
var finalContent, finalThinking strings.Builder
|
||||
var finalToolCalls []api.ToolCall
|
||||
|
||||
for i, chunk := range tt.chunks {
|
||||
done := i == len(tt.chunks)-1
|
||||
content, thinking, toolCalls, err := parser.Add(chunk, done)
|
||||
if err != nil {
|
||||
t.Fatalf("Add() error on chunk %d: %v", i, err)
|
||||
}
|
||||
|
||||
finalContent.WriteString(content)
|
||||
finalThinking.WriteString(thinking)
|
||||
finalToolCalls = append(finalToolCalls, toolCalls...)
|
||||
}
|
||||
|
||||
if finalContent.String() != tt.expectedContent {
|
||||
t.Errorf("expected content %q, got %q", tt.expectedContent, finalContent.String())
|
||||
}
|
||||
|
||||
if finalThinking.String() != tt.expectedThinking {
|
||||
t.Errorf("expected thinking %q, got %q", tt.expectedThinking, finalThinking.String())
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.expectedToolCalls, finalToolCalls); diff != "" {
|
||||
t.Errorf("tool calls mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCogitoParser_HasToolSupport(t *testing.T) {
|
||||
parser := &CogitoParser{}
|
||||
if !parser.HasToolSupport() {
|
||||
t.Error("CogitoParser should support tools")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCogitoParser_Init(t *testing.T) {
|
||||
parser := &CogitoParser{}
|
||||
|
||||
tools := []api.Tool{
|
||||
{Function: api.ToolFunction{Name: "test_tool"}},
|
||||
}
|
||||
|
||||
lastMessage := &api.Message{Role: "assistant", Content: "previous"}
|
||||
|
||||
returnedTools := parser.Init(tools, lastMessage, nil)
|
||||
|
||||
if len(returnedTools) != len(tools) {
|
||||
t.Errorf("expected %d tools returned, got %d", len(tools), len(returnedTools))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCogitoParser_parseToolCallContent(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
content string
|
||||
expected api.ToolCall
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid_tool_call_standard_format",
|
||||
content: `function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid_tool_call_complex_args",
|
||||
content: `function<|tool▁sep|>process_data
|
||||
` + "```json\n" + `{"items":["item1","item2"],"config":{"enabled":true},"count":42}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "process_data",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"items": []any{"item1", "item2"},
|
||||
"config": map[string]any{"enabled": true},
|
||||
"count": 42.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid_tool_call_empty_args",
|
||||
content: `function<|tool▁sep|>no_args_tool
|
||||
` + "```json\n" + `{}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "no_args_tool",
|
||||
Arguments: api.ToolCallFunctionArguments{},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "missing_separator",
|
||||
content: `functionget_weather` + "```json\n" + `{"location":"Paris"}` + "\n```",
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid_function_type",
|
||||
content: `not_function<|tool▁sep|>get_weather` + "```json\n" + `{"location":"Paris"}` + "\n```",
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "missing_json_block_start",
|
||||
content: `function<|tool▁sep|>get_weather{"location":"Paris"}` + "```",
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "missing_json_block_end",
|
||||
content: `function<|tool▁sep|>get_weather` + "```json\n" + `{"location":"Paris"}`,
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid_json",
|
||||
content: `function<|tool▁sep|>get_weather` + "```json\n" + `{location:Paris}` + "\n```",
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "empty_function_type",
|
||||
content: `<|tool▁sep|>get_weather` + "```json\n" + `{"location":"Paris"}` + "\n```",
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "tool_with_spaces_in_name",
|
||||
content: `function<|tool▁sep|> get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "tool_with_multiline_json",
|
||||
content: `function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{
|
||||
"location": "Paris",
|
||||
"units": "metric"
|
||||
}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
"units": "metric",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "tool_with_nested_objects",
|
||||
content: `function<|tool▁sep|>complex_tool
|
||||
` + "```json\n" + `{"nested":{"deep":{"value":123}}}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "complex_tool",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"nested": map[string]any{
|
||||
"deep": map[string]any{
|
||||
"value": 123.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
parser := &CogitoParser{}
|
||||
|
||||
result, err := parser.parseToolCallContent(tt.content)
|
||||
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.expected, result); diff != "" {
|
||||
t.Errorf("tool call mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
44
model/parsers/intellect3.go
Normal file
44
model/parsers/intellect3.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package parsers
|
||||
|
||||
import (
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/thinking"
|
||||
)
|
||||
|
||||
// Intellect3Parser combines thinking support using
|
||||
// the built-in thinking parser, with tool call support
|
||||
// via qwen3-coder's parser.
|
||||
type Intellect3Parser struct {
|
||||
thinkingParser thinking.Parser
|
||||
toolParser Qwen3CoderParser
|
||||
}
|
||||
|
||||
func (p *Intellect3Parser) HasToolSupport() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Intellect3Parser) HasThinkingSupport() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Intellect3Parser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
p.thinkingParser = thinking.Parser{
|
||||
OpeningTag: "<think>",
|
||||
ClosingTag: "</think>",
|
||||
}
|
||||
p.toolParser = Qwen3CoderParser{}
|
||||
return p.toolParser.Init(tools, lastMessage, thinkValue)
|
||||
}
|
||||
|
||||
func (p *Intellect3Parser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) {
|
||||
// First extract thinking content
|
||||
thinkingContent, remainingContent := p.thinkingParser.AddContent(s)
|
||||
|
||||
// Then process the remaining content for tool calls
|
||||
toolContent, _, toolCalls, err := p.toolParser.Add(remainingContent, done)
|
||||
if err != nil {
|
||||
return "", thinkingContent, nil, err
|
||||
}
|
||||
|
||||
return toolContent, thinkingContent, toolCalls, nil
|
||||
}
|
||||
542
model/parsers/intellect3_test.go
Normal file
542
model/parsers/intellect3_test.go
Normal file
@@ -0,0 +1,542 @@
|
||||
package parsers
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
func TestIntellect3ParserThinkingOnly(t *testing.T) {
|
||||
cases := []struct {
|
||||
desc string
|
||||
chunks []string
|
||||
wantText string
|
||||
wantThink string
|
||||
}{
|
||||
{
|
||||
desc: "simple thinking content",
|
||||
chunks: []string{"<think>I need to analyze this</think>Here is my response"},
|
||||
wantText: "Here is my response",
|
||||
wantThink: "I need to analyze this",
|
||||
},
|
||||
{
|
||||
desc: "thinking with whitespace",
|
||||
chunks: []string{"<think>\n Some thoughts \n</think>\n\nContent"},
|
||||
wantText: "Content",
|
||||
wantThink: "Some thoughts \n", // Thinking parser preserves internal whitespace
|
||||
},
|
||||
{
|
||||
desc: "thinking only",
|
||||
chunks: []string{"<think>Just thinking</think>"},
|
||||
wantText: "",
|
||||
wantThink: "Just thinking",
|
||||
},
|
||||
{
|
||||
desc: "no thinking tags",
|
||||
chunks: []string{"Just regular content"},
|
||||
wantText: "Just regular content",
|
||||
wantThink: "",
|
||||
},
|
||||
{
|
||||
desc: "streaming thinking content",
|
||||
chunks: []string{"<think>Fir", "st part", " second part</think>Content"},
|
||||
wantText: "Content",
|
||||
wantThink: "First part second part",
|
||||
},
|
||||
{
|
||||
desc: "partial opening tag",
|
||||
chunks: []string{"<thi", "nk>Thinking</think>Content"},
|
||||
wantText: "Content",
|
||||
wantThink: "Thinking",
|
||||
},
|
||||
{
|
||||
desc: "partial closing tag",
|
||||
chunks: []string{"<think>Thinking</thi", "nk>Content"},
|
||||
wantText: "Content",
|
||||
wantThink: "Thinking",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Intellect3Parser{}
|
||||
parser.Init(nil, nil, nil)
|
||||
|
||||
var gotText, gotThink string
|
||||
for i, chunk := range tc.chunks {
|
||||
isLast := i == len(tc.chunks)-1
|
||||
text, think, calls, err := parser.Add(chunk, isLast)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
gotText += text
|
||||
gotThink += think
|
||||
if len(calls) > 0 {
|
||||
t.Fatalf("expected no tool calls, got %v", calls)
|
||||
}
|
||||
}
|
||||
|
||||
if gotText != tc.wantText {
|
||||
t.Errorf("content: got %q, want %q", gotText, tc.wantText)
|
||||
}
|
||||
if gotThink != tc.wantThink {
|
||||
t.Errorf("thinking: got %q, want %q", gotThink, tc.wantThink)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntellect3ParserToolCallsOnly(t *testing.T) {
|
||||
tools := []api.Tool{
|
||||
tool("get_weather", map[string]api.ToolProperty{
|
||||
"location": {Type: api.PropertyType{"string"}},
|
||||
"unit": {Type: api.PropertyType{"string"}},
|
||||
}),
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
desc string
|
||||
chunks []string
|
||||
wantText string
|
||||
wantCalls []api.ToolCall
|
||||
}{
|
||||
{
|
||||
desc: "simple tool call",
|
||||
chunks: []string{
|
||||
"Let me check the weather<tool_call><function=get_weather>\n<parameter=location>\nSan Francisco\n</parameter>\n<parameter=unit>\ncelsius\n</parameter>\n</function></tool_call>",
|
||||
},
|
||||
wantText: "Let me check the weather",
|
||||
wantCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: map[string]any{
|
||||
"location": "San Francisco",
|
||||
"unit": "celsius",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "tool call streaming",
|
||||
chunks: []string{
|
||||
"Checking<tool_call><function=get_wea",
|
||||
"ther>\n<parameter=location>\nNew York\n</param", //nolint:all
|
||||
"eter>\n<parameter=unit>\nfahrenheit\n</parameter>\n</function></tool_call>Done",
|
||||
},
|
||||
wantText: "CheckingDone",
|
||||
wantCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: map[string]any{
|
||||
"location": "New York",
|
||||
"unit": "fahrenheit",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "multiple tool calls",
|
||||
chunks: []string{
|
||||
"<tool_call><function=get_weather>\n<parameter=location>\nBoston\n</parameter>\n<parameter=unit>\ncelsius\n</parameter>\n</function></tool_call>",
|
||||
"<tool_call><function=get_weather>\n<parameter=location>\nSeattle\n</parameter>\n<parameter=unit>\nfahrenheit\n</parameter>\n</function></tool_call>",
|
||||
},
|
||||
wantText: "",
|
||||
wantCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: map[string]any{
|
||||
"location": "Boston",
|
||||
"unit": "celsius",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: map[string]any{
|
||||
"location": "Seattle",
|
||||
"unit": "fahrenheit",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "no tool calls",
|
||||
chunks: []string{"Just regular content"},
|
||||
wantText: "Just regular content",
|
||||
wantCalls: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Intellect3Parser{}
|
||||
parser.Init(tools, nil, nil)
|
||||
|
||||
var gotText string
|
||||
var gotCalls []api.ToolCall
|
||||
for i, chunk := range tc.chunks {
|
||||
isLast := i == len(tc.chunks)-1
|
||||
text, think, calls, err := parser.Add(chunk, isLast)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
gotText += text
|
||||
gotCalls = append(gotCalls, calls...)
|
||||
if think != "" {
|
||||
t.Fatalf("expected no thinking, got %q", think)
|
||||
}
|
||||
}
|
||||
|
||||
if gotText != tc.wantText {
|
||||
t.Errorf("content: got %q, want %q", gotText, tc.wantText)
|
||||
}
|
||||
if !reflect.DeepEqual(gotCalls, tc.wantCalls) {
|
||||
t.Errorf("tool calls: got %#v, want %#v", gotCalls, tc.wantCalls)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntellect3ParserCombined(t *testing.T) {
|
||||
tools := []api.Tool{
|
||||
tool("get_weather", map[string]api.ToolProperty{
|
||||
"location": {Type: api.PropertyType{"string"}},
|
||||
"unit": {Type: api.PropertyType{"string"}},
|
||||
}),
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
desc string
|
||||
chunks []string
|
||||
wantText string
|
||||
wantThink string
|
||||
wantCalls []api.ToolCall
|
||||
}{
|
||||
{
|
||||
desc: "thinking then tool call",
|
||||
chunks: []string{
|
||||
"<think>Need to get weather data</think>Let me check<tool_call><function=get_weather>\n<parameter=location>\nParis\n</parameter>\n<parameter=unit>\ncelsius\n</parameter>\n</function></tool_call>",
|
||||
},
|
||||
wantText: "Let me check",
|
||||
wantThink: "Need to get weather data",
|
||||
wantCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: map[string]any{
|
||||
"location": "Paris",
|
||||
"unit": "celsius",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "thinking, tool call, and final content",
|
||||
chunks: []string{
|
||||
"<think>User wants weather info</think>Checking weather<tool_call><function=get_weather>\n<parameter=location>\nTokyo\n</parameter>\n<parameter=unit>\ncelsius\n</parameter>\n</function></tool_call>Done!",
|
||||
},
|
||||
wantText: "Checking weatherDone!",
|
||||
wantThink: "User wants weather info",
|
||||
wantCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: map[string]any{
|
||||
"location": "Tokyo",
|
||||
"unit": "celsius",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "streaming combined content",
|
||||
chunks: []string{
|
||||
"<think>Analyzing",
|
||||
" the request</think>",
|
||||
"Let me help<tool_call>",
|
||||
"<function=get_weather>\n<parameter=location>\nLondon",
|
||||
"\n</parameter>\n<parameter=unit>\ncelsius\n</parameter>\n</function>",
|
||||
"</tool_call>There you go!",
|
||||
},
|
||||
wantText: "Let me helpThere you go!",
|
||||
wantThink: "Analyzing the request",
|
||||
wantCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: map[string]any{
|
||||
"location": "London",
|
||||
"unit": "celsius",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "multiple tool calls with thinking",
|
||||
chunks: []string{
|
||||
"<think>Need multiple locations</think>",
|
||||
"<tool_call><function=get_weather>\n<parameter=location>\nBoston\n</parameter>\n<parameter=unit>\ncelsius\n</parameter>\n</function></tool_call>",
|
||||
"and<tool_call><function=get_weather>\n<parameter=location>\nBerlin\n</parameter>\n<parameter=unit>\ncelsius\n</parameter>\n</function></tool_call>",
|
||||
},
|
||||
wantText: "and",
|
||||
wantThink: "Need multiple locations",
|
||||
wantCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: map[string]any{
|
||||
"location": "Boston",
|
||||
"unit": "celsius",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: map[string]any{
|
||||
"location": "Berlin",
|
||||
"unit": "celsius",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Intellect3Parser{}
|
||||
parser.Init(tools, nil, nil)
|
||||
|
||||
var gotText, gotThink string
|
||||
var gotCalls []api.ToolCall
|
||||
for i, chunk := range tc.chunks {
|
||||
isLast := i == len(tc.chunks)-1
|
||||
text, think, calls, err := parser.Add(chunk, isLast)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
gotText += text
|
||||
gotThink += think
|
||||
gotCalls = append(gotCalls, calls...)
|
||||
}
|
||||
|
||||
if gotText != tc.wantText {
|
||||
t.Errorf("content: got %q, want %q", gotText, tc.wantText)
|
||||
}
|
||||
if gotThink != tc.wantThink {
|
||||
t.Errorf("thinking: got %q, want %q", gotThink, tc.wantThink)
|
||||
}
|
||||
if !reflect.DeepEqual(gotCalls, tc.wantCalls) {
|
||||
t.Errorf("tool calls: got %#v, want %#v", gotCalls, tc.wantCalls)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntellect3ParserEdgeCases(t *testing.T) {
|
||||
tools := []api.Tool{
|
||||
tool("test_func", map[string]api.ToolProperty{
|
||||
"param": {Type: api.PropertyType{"string"}},
|
||||
}),
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
desc string
|
||||
chunks []string
|
||||
wantText string
|
||||
wantThink string
|
||||
wantCalls int
|
||||
}{
|
||||
{
|
||||
desc: "empty input",
|
||||
chunks: []string{""},
|
||||
wantText: "",
|
||||
wantThink: "",
|
||||
wantCalls: 0,
|
||||
},
|
||||
{
|
||||
desc: "only whitespace",
|
||||
chunks: []string{" \n \t "},
|
||||
wantText: "",
|
||||
wantThink: "",
|
||||
wantCalls: 0,
|
||||
},
|
||||
{
|
||||
desc: "unclosed thinking tag",
|
||||
chunks: []string{"<think>Never closes"},
|
||||
wantText: "",
|
||||
wantThink: "Never closes",
|
||||
wantCalls: 0,
|
||||
},
|
||||
{
|
||||
desc: "unclosed tool call tag",
|
||||
chunks: []string{"<tool_call><function=test_func>\n<parameter=param>\nvalue\n</parameter>\n</function>"},
|
||||
wantText: "", // Qwen3CoderParser waits for closing tag, doesn't emit partial tool calls
|
||||
wantThink: "",
|
||||
wantCalls: 0, // Won't be parsed until </tool_call> is seen
|
||||
},
|
||||
{
|
||||
desc: "unicode in thinking",
|
||||
chunks: []string{"<think>思考中 🤔</think>答案是 42"},
|
||||
wantText: "答案是 42",
|
||||
wantThink: "思考中 🤔",
|
||||
wantCalls: 0,
|
||||
},
|
||||
{
|
||||
desc: "fake thinking tag",
|
||||
chunks: []string{"<thinking>This is not the right tag</thinking>Content"},
|
||||
wantText: "<thinking>This is not the right tag</thinking>Content",
|
||||
wantThink: "",
|
||||
wantCalls: 0,
|
||||
},
|
||||
{
|
||||
desc: "fake tool call tag",
|
||||
chunks: []string{"<tool>Not a tool call</tool>"},
|
||||
wantText: "<tool>Not a tool call</tool>",
|
||||
wantThink: "",
|
||||
wantCalls: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Intellect3Parser{}
|
||||
parser.Init(tools, nil, nil)
|
||||
|
||||
var gotText, gotThink string
|
||||
var gotCalls []api.ToolCall
|
||||
for i, chunk := range tc.chunks {
|
||||
isLast := i == len(tc.chunks)-1
|
||||
text, think, calls, err := parser.Add(chunk, isLast)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
gotText += text
|
||||
gotThink += think
|
||||
gotCalls = append(gotCalls, calls...)
|
||||
}
|
||||
|
||||
if gotText != tc.wantText {
|
||||
t.Errorf("content: got %q, want %q", gotText, tc.wantText)
|
||||
}
|
||||
if gotThink != tc.wantThink {
|
||||
t.Errorf("thinking: got %q, want %q", gotThink, tc.wantThink)
|
||||
}
|
||||
if len(gotCalls) != tc.wantCalls {
|
||||
t.Errorf("tool calls count: got %d, want %d", len(gotCalls), tc.wantCalls)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntellect3ParserCapabilities(t *testing.T) {
|
||||
parser := Intellect3Parser{}
|
||||
|
||||
if !parser.HasToolSupport() {
|
||||
t.Error("Intellect3Parser should have tool support")
|
||||
}
|
||||
|
||||
if !parser.HasThinkingSupport() {
|
||||
t.Error("Intellect3Parser should have thinking support")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntellect3ParserInit(t *testing.T) {
|
||||
parser := Intellect3Parser{}
|
||||
|
||||
tools := []api.Tool{
|
||||
tool("test", map[string]api.ToolProperty{
|
||||
"param": {Type: api.PropertyType{"string"}},
|
||||
}),
|
||||
}
|
||||
|
||||
returnedTools := parser.Init(tools, nil, nil)
|
||||
|
||||
// Should return tools unchanged (delegated to Qwen3CoderParser)
|
||||
if !reflect.DeepEqual(returnedTools, tools) {
|
||||
t.Errorf("Init should return tools unchanged")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntellect3ParserWhitespaceHandling(t *testing.T) {
|
||||
tools := []api.Tool{
|
||||
tool("test", map[string]api.ToolProperty{
|
||||
"param": {Type: api.PropertyType{"string"}},
|
||||
}),
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
desc string
|
||||
chunks []string
|
||||
wantText string
|
||||
wantThink string
|
||||
}{
|
||||
{
|
||||
desc: "whitespace between thinking and content",
|
||||
chunks: []string{"<think>Thinking</think>\n\n\nContent"},
|
||||
wantText: "Content",
|
||||
wantThink: "Thinking",
|
||||
},
|
||||
{
|
||||
desc: "whitespace inside thinking tags",
|
||||
chunks: []string{"<think> \n Thinking \n </think>Content"},
|
||||
wantText: "Content",
|
||||
wantThink: "Thinking \n ", // Thinking parser preserves internal whitespace
|
||||
},
|
||||
{
|
||||
desc: "leading whitespace before thinking",
|
||||
chunks: []string{" <think>Thinking</think>Content"},
|
||||
wantText: "Content",
|
||||
wantThink: "Thinking",
|
||||
},
|
||||
{
|
||||
desc: "whitespace before tool call",
|
||||
chunks: []string{"Text <tool_call><function=test>\n<parameter=param>\nvalue\n</parameter>\n</function></tool_call>"},
|
||||
wantText: "Text",
|
||||
wantThink: "",
|
||||
},
|
||||
{
|
||||
desc: "whitespace after tool call",
|
||||
chunks: []string{"<tool_call><function=test>\n<parameter=param>\nvalue\n</parameter>\n</function></tool_call> Text"},
|
||||
wantText: "Text",
|
||||
wantThink: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Intellect3Parser{}
|
||||
parser.Init(tools, nil, nil)
|
||||
|
||||
var gotText, gotThink string
|
||||
for i, chunk := range tc.chunks {
|
||||
isLast := i == len(tc.chunks)-1
|
||||
text, think, _, err := parser.Add(chunk, isLast)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
gotText += text
|
||||
gotThink += think
|
||||
}
|
||||
|
||||
if gotText != tc.wantText {
|
||||
t.Errorf("content: got %q, want %q", gotText, tc.wantText)
|
||||
}
|
||||
if gotThink != tc.wantThink {
|
||||
t.Errorf("thinking: got %q, want %q", gotThink, tc.wantThink)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -6,9 +6,9 @@ import (
|
||||
)
|
||||
|
||||
type Parser interface {
|
||||
// Init initializes the parser with tools and optional last message for chat prefill
|
||||
// Init initializes the parser with tools, optional last message for chat prefill, and think value
|
||||
// Returns processed tools if the parser needs to modify them (e.g., harmony renames them)
|
||||
Init(tools []api.Tool, lastMessage *api.Message) []api.Tool
|
||||
Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool
|
||||
// Add processes streamed content and returns parsed content, thinking, and tool calls
|
||||
// The done flag indicates if this is the last chunk (used for draining accumulators)
|
||||
Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error)
|
||||
@@ -52,6 +52,10 @@ func ParserForName(name string) Parser {
|
||||
return &PassthroughParser{}
|
||||
case "harmony":
|
||||
return harmony.NewHarmonyMessageHandler()
|
||||
case "cogito":
|
||||
return &CogitoParser{}
|
||||
case "intellect-3":
|
||||
return &Intellect3Parser{}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -59,7 +63,7 @@ func ParserForName(name string) Parser {
|
||||
|
||||
type PassthroughParser struct{}
|
||||
|
||||
func (p *PassthroughParser) Init(tools []api.Tool, lastMessage *api.Message) []api.Tool {
|
||||
func (p *PassthroughParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
return tools // passthrough doesn't modify tools
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ type mockParser struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (m *mockParser) Init(tools []api.Tool, lastMessage *api.Message) []api.Tool {
|
||||
func (m *mockParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
return tools
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ func (p *Qwen3CoderParser) HasThinkingSupport() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Qwen3CoderParser) Init(tools []api.Tool, lastMessage *api.Message) []api.Tool {
|
||||
func (p *Qwen3CoderParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
p.tools = tools
|
||||
return tools // Qwen doesn't modify tools
|
||||
}
|
||||
@@ -432,7 +432,7 @@ func transformToXML(raw string) string {
|
||||
groups := qwenTagRegex.FindStringSubmatch(match)
|
||||
tag := groups[1]
|
||||
var escapedValue strings.Builder
|
||||
xml.EscapeText(&escapedValue, []byte(groups[2]))
|
||||
_ = xml.EscapeText(&escapedValue, []byte(groups[2])) // error is always nil for strings.Builder
|
||||
return fmt.Sprintf(`<%s name="%s">`, tag, escapedValue.String())
|
||||
})
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ func (p *Qwen3VLParser) setInitialState(lastMessage *api.Message) {
|
||||
p.state = CollectingThinkingContent
|
||||
}
|
||||
|
||||
func (p *Qwen3VLParser) Init(tools []api.Tool, lastMessage *api.Message) []api.Tool {
|
||||
func (p *Qwen3VLParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
p.tools = tools
|
||||
p.setInitialState(lastMessage)
|
||||
return tools
|
||||
|
||||
@@ -198,7 +198,7 @@ func TestQwen3VLNonThinkingParserStreaming(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: false}
|
||||
parser.Init([]api.Tool{}, nil)
|
||||
parser.Init([]api.Tool{}, nil, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
@@ -515,7 +515,7 @@ func TestQwenOldParserStreaming(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: false}
|
||||
parser.Init([]api.Tool{}, nil)
|
||||
parser.Init([]api.Tool{}, nil, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
@@ -822,7 +822,7 @@ func TestQwen3VLNonThinkingToolCallWhitespaceHandling(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: false}
|
||||
parser.Init([]api.Tool{}, nil)
|
||||
parser.Init([]api.Tool{}, nil, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
|
||||
@@ -205,7 +205,7 @@ func TestQwen3VLThinkingParserStreaming(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, nil)
|
||||
parser.Init([]api.Tool{}, nil, nil)
|
||||
// parser.state = CollectingThinkingContent
|
||||
|
||||
for i, step := range tc.steps {
|
||||
@@ -386,7 +386,7 @@ func TestQwen3VLParserState(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: tc.hasThinking}
|
||||
parser.Init(nil, tc.last)
|
||||
parser.Init(nil, tc.last, nil)
|
||||
if parser.state != tc.wantState {
|
||||
t.Errorf("%s: got state %v, want %v", tc.desc, parser.state, tc.wantState)
|
||||
}
|
||||
@@ -437,7 +437,7 @@ func TestQwen3VLThinkingParserWithThinkingPrefill(t *testing.T) {
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, last)
|
||||
parser.Init([]api.Tool{}, last, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
@@ -500,7 +500,7 @@ func TestQwen3VLThinkingParserWithNonThinkingPrefill(t *testing.T) {
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, last)
|
||||
parser.Init([]api.Tool{}, last, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
@@ -523,7 +523,7 @@ func TestQwen3VLThinkingParserStreamingAssistantPrefillContent(t *testing.T) {
|
||||
// last message is assistant with content ⇒ start in CollectingContent
|
||||
last := &api.Message{Role: "assistant", Content: "has content"}
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, last)
|
||||
parser.Init([]api.Tool{}, last, nil)
|
||||
|
||||
type step struct {
|
||||
input string
|
||||
@@ -750,7 +750,7 @@ func TestQwen3VLThinkingWhitespaceHandling(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, nil)
|
||||
parser.Init([]api.Tool{}, nil, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
@@ -859,7 +859,7 @@ func TestQwen3VLToolCallWhitespaceHandling(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, tc.prefillMsg)
|
||||
parser.Init([]api.Tool{}, tc.prefillMsg, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
|
||||
129
model/renderers/cogito.go
Normal file
129
model/renderers/cogito.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package renderers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
type CogitoRenderer struct {
|
||||
isThinking bool
|
||||
}
|
||||
|
||||
func (r *CogitoRenderer) Render(messages []api.Message, tools []api.Tool, thinkValue *api.ThinkValue) (string, error) {
|
||||
var sb strings.Builder
|
||||
|
||||
defaultPrompt := "You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco."
|
||||
|
||||
// thinking is enabled: model must support it AND user must request it (true)
|
||||
enableThinking := r.isThinking && (thinkValue != nil && thinkValue.Bool())
|
||||
|
||||
var systemPrompt string
|
||||
var conversationMessages []api.Message
|
||||
|
||||
if len(messages) > 0 && messages[0].Role == "system" {
|
||||
systemPrompt = messages[0].Content
|
||||
conversationMessages = messages[1:]
|
||||
} else {
|
||||
conversationMessages = messages
|
||||
}
|
||||
|
||||
var finalSystemPrompt string
|
||||
if enableThinking {
|
||||
finalSystemPrompt = "Enable deep thinking subroutine.\n\n" + defaultPrompt
|
||||
if systemPrompt != "" {
|
||||
finalSystemPrompt += "\n\n" + systemPrompt + "\n\n"
|
||||
}
|
||||
} else {
|
||||
finalSystemPrompt = defaultPrompt
|
||||
if systemPrompt != "" {
|
||||
finalSystemPrompt += "\n\n" + systemPrompt
|
||||
}
|
||||
}
|
||||
|
||||
if len(tools) > 0 {
|
||||
if finalSystemPrompt != "" {
|
||||
finalSystemPrompt += "\nYou have the following functions available:\n"
|
||||
} else {
|
||||
finalSystemPrompt = "You have the following functions available:\n"
|
||||
}
|
||||
|
||||
for _, tool := range tools {
|
||||
toolJSON, _ := json.MarshalIndent(tool, "", " ") // TODO(gguo): double check json format
|
||||
finalSystemPrompt += "```json\n" + string(toolJSON) + "\n```\n"
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString("<|begin▁of▁sentence|>" + finalSystemPrompt)
|
||||
|
||||
outputsOpen := false
|
||||
isLastUser := false
|
||||
|
||||
for i, message := range conversationMessages {
|
||||
switch message.Role {
|
||||
case "user":
|
||||
isLastUser = true
|
||||
sb.WriteString("<|User|>" + message.Content + "<|Assistant|>")
|
||||
|
||||
case "assistant":
|
||||
isLastUser = false
|
||||
|
||||
if len(message.ToolCalls) > 0 {
|
||||
if message.Content != "" {
|
||||
sb.WriteString(message.Content)
|
||||
}
|
||||
|
||||
sb.WriteString("<|tool▁calls▁begin|>")
|
||||
|
||||
for j, toolCall := range message.ToolCalls {
|
||||
sb.WriteString("<|tool▁call▁begin|>function<|tool▁sep|>" + toolCall.Function.Name)
|
||||
|
||||
argsJSON, _ := json.Marshal(toolCall.Function.Arguments)
|
||||
sb.WriteString("\n```json\n" + string(argsJSON) + "\n```")
|
||||
sb.WriteString("<|tool▁call▁end|>")
|
||||
|
||||
if j < len(message.ToolCalls)-1 {
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString("<|tool▁calls▁end|><|end▁of▁sentence|>")
|
||||
} else {
|
||||
sb.WriteString(message.Content + "<|end▁of▁sentence|>")
|
||||
}
|
||||
|
||||
case "tool":
|
||||
isLastUser = false
|
||||
|
||||
if !outputsOpen {
|
||||
sb.WriteString("<|tool▁outputs▁begin|>")
|
||||
outputsOpen = true
|
||||
}
|
||||
|
||||
sb.WriteString("<|tool▁output▁begin|>" + message.Content + "<|tool▁output▁end|>")
|
||||
|
||||
hasNextTool := i+1 < len(conversationMessages) && conversationMessages[i+1].Role == "tool"
|
||||
if hasNextTool {
|
||||
sb.WriteString("\n")
|
||||
} else {
|
||||
sb.WriteString("<|tool▁outputs▁end|>")
|
||||
outputsOpen = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if outputsOpen {
|
||||
sb.WriteString("<|tool▁outputs▁end|>")
|
||||
}
|
||||
|
||||
if !isLastUser {
|
||||
sb.WriteString("<|Assistant|>")
|
||||
}
|
||||
|
||||
if enableThinking {
|
||||
sb.WriteString("<think>\n")
|
||||
}
|
||||
|
||||
return sb.String(), nil
|
||||
}
|
||||
491
model/renderers/cogito_test.go
Normal file
491
model/renderers/cogito_test.go
Normal file
@@ -0,0 +1,491 @@
|
||||
package renderers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
func TestCogitoRenderer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
messages []api.Message
|
||||
tools []api.Tool
|
||||
thinkValue *api.ThinkValue
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "basic user message",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Hello, how are you?"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello, how are you?<|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "basic with system message",
|
||||
messages: []api.Message{
|
||||
{Role: "system", Content: "You are a helpful assistant."},
|
||||
{Role: "user", Content: "Hello, how are you?"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.
|
||||
|
||||
You are a helpful assistant.<|User|>Hello, how are you?<|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "conversation with assistant response",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "What is the capital of France?"},
|
||||
{Role: "assistant", Content: "The capital of France is Paris."},
|
||||
{Role: "user", Content: "Fantastic!"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>What is the capital of France?<|Assistant|>The capital of France is Paris.<|end▁of▁sentence|><|User|>Fantastic!<|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "thinking enabled without system",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Hello, how are you?"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: true},
|
||||
expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine.
|
||||
|
||||
You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello, how are you?<|Assistant|><think>
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "thinking enabled with system",
|
||||
messages: []api.Message{
|
||||
{Role: "system", Content: "You are a helpful assistant."},
|
||||
{Role: "user", Content: "Hello, how are you?"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: true},
|
||||
expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine.
|
||||
|
||||
You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.
|
||||
|
||||
You are a helpful assistant.
|
||||
|
||||
<|User|>Hello, how are you?<|Assistant|><think>
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "thinking disabled",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Hello, how are you?"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello, how are you?<|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "with tools",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "What's the weather like?"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
tools: []api.Tool{
|
||||
{
|
||||
Type: "function",
|
||||
Function: api.ToolFunction{
|
||||
Name: "get_weather",
|
||||
Description: "Get current weather",
|
||||
Parameters: api.ToolFunctionParameters{
|
||||
Type: "object",
|
||||
Properties: map[string]api.ToolProperty{
|
||||
"location": {
|
||||
Type: api.PropertyType{"string"},
|
||||
Description: "City name",
|
||||
},
|
||||
},
|
||||
Required: []string{"location"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.
|
||||
You have the following functions available:
|
||||
` + "```json\n" + `{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_weather",
|
||||
"description": "Get current weather",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"location"
|
||||
],
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "City name"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
` + "```\n" + `<|User|>What's the weather like?<|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "assistant with tool calls",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "What's the weather in Paris?"},
|
||||
{
|
||||
Role: "assistant",
|
||||
Content: "I'll check the weather in Paris for you.",
|
||||
ToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>What's the weather in Paris?<|Assistant|>I'll check the weather in Paris for you.<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "tool response",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "What's the weather in Paris?"},
|
||||
{
|
||||
Role: "assistant",
|
||||
ToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{Role: "tool", Content: "Temperature: 22°C, Sunny"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>What's the weather in Paris?<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁outputs▁begin|><|tool▁output▁begin|>Temperature: 22°C, Sunny<|tool▁output▁end|><|tool▁outputs▁end|><|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "multiple tool responses",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Get weather for Paris and London"},
|
||||
{
|
||||
Role: "assistant",
|
||||
ToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "London",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{Role: "tool", Content: "Paris: 22°C, Sunny"},
|
||||
{Role: "tool", Content: "London: 18°C, Cloudy"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Get weather for Paris and London<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```" + `<|tool▁call▁end|>
|
||||
<|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"London"}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁outputs▁begin|><|tool▁output▁begin|>Paris: 22°C, Sunny<|tool▁output▁end|>
|
||||
<|tool▁output▁begin|>London: 18°C, Cloudy<|tool▁output▁end|><|tool▁outputs▁end|><|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "thinking with tools",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "What's the weather like?"},
|
||||
},
|
||||
tools: []api.Tool{
|
||||
{
|
||||
Type: "function",
|
||||
Function: api.ToolFunction{
|
||||
Name: "get_weather",
|
||||
Description: "Get current weather",
|
||||
Parameters: api.ToolFunctionParameters{
|
||||
Type: "object",
|
||||
Properties: map[string]api.ToolProperty{
|
||||
"location": {
|
||||
Type: api.PropertyType{"string"},
|
||||
Description: "City name",
|
||||
},
|
||||
},
|
||||
Required: []string{"location"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: true},
|
||||
expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine.
|
||||
|
||||
You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.
|
||||
You have the following functions available:
|
||||
` + "```json\n" + `{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_weather",
|
||||
"description": "Get current weather",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"location"
|
||||
],
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "City name"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
` + "```\n" + `<|User|>What's the weather like?<|Assistant|><think>
|
||||
`,
|
||||
},
|
||||
// test cases based on cogito
|
||||
{
|
||||
name: "single_turn_thinking_false",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Hello"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello<|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "single_turn_thinking_true",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Hello"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: true},
|
||||
expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine.
|
||||
|
||||
You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello<|Assistant|><think>
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "multi_turn_thinking_false",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Hello"},
|
||||
{Role: "assistant", Content: "Hi there!"},
|
||||
{Role: "user", Content: "How are you?"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello<|Assistant|>Hi there!<|end▁of▁sentence|><|User|>How are you?<|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "multi_turn_thinking_true",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Hello"},
|
||||
{Role: "assistant", Content: "Hi there!"},
|
||||
{Role: "user", Content: "How are you?"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: true},
|
||||
expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine.
|
||||
|
||||
You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello<|Assistant|>Hi there!<|end▁of▁sentence|><|User|>How are you?<|Assistant|><think>
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "multi_with_system_thinking_false",
|
||||
messages: []api.Message{
|
||||
{Role: "system", Content: "You are a helpful assistant"},
|
||||
{Role: "user", Content: "Start"},
|
||||
{Role: "assistant", Content: "Okay"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.
|
||||
|
||||
You are a helpful assistant<|User|>Start<|Assistant|>Okay<|end▁of▁sentence|><|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "multi_with_system_thinking_true",
|
||||
messages: []api.Message{
|
||||
{Role: "system", Content: "You are a helpful assistant"},
|
||||
{Role: "user", Content: "Start"},
|
||||
{Role: "assistant", Content: "Okay"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: true},
|
||||
expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine.
|
||||
|
||||
You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.
|
||||
|
||||
You are a helpful assistant
|
||||
|
||||
<|User|>Start<|Assistant|>Okay<|end▁of▁sentence|><|Assistant|><think>
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "multi_with_system2_thinking_false",
|
||||
messages: []api.Message{
|
||||
{Role: "system", Content: "You are a pirate chatbot who always responds in pirate speak!"},
|
||||
{Role: "user", Content: "Give me a short introduction to LLMs."},
|
||||
{Role: "assistant", Content: "Arrr! I'm a pirate"},
|
||||
{Role: "user", Content: "Tell me more about LLMs."},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.
|
||||
|
||||
You are a pirate chatbot who always responds in pirate speak!<|User|>Give me a short introduction to LLMs.<|Assistant|>Arrr! I'm a pirate<|end▁of▁sentence|><|User|>Tell me more about LLMs.<|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "multi_with_system2_thinking_true",
|
||||
messages: []api.Message{
|
||||
{Role: "system", Content: "You are a pirate chatbot who always responds in pirate speak!"},
|
||||
{Role: "user", Content: "Give me a short introduction to LLMs."},
|
||||
{Role: "assistant", Content: "Arrr! I'm a pirate"},
|
||||
{Role: "user", Content: "Tell me more about LLMs."},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: true},
|
||||
expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine.
|
||||
|
||||
You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.
|
||||
|
||||
You are a pirate chatbot who always responds in pirate speak!
|
||||
|
||||
<|User|>Give me a short introduction to LLMs.<|Assistant|>Arrr! I'm a pirate<|end▁of▁sentence|><|User|>Tell me more about LLMs.<|Assistant|><think>
|
||||
`,
|
||||
},
|
||||
// tools
|
||||
{
|
||||
name: "tool_calls_only_no_content",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Get weather for Paris"},
|
||||
{
|
||||
Role: "assistant",
|
||||
ToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Get weather for Paris<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "complex_tool_arguments",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Process complex data"},
|
||||
{
|
||||
Role: "assistant",
|
||||
ToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "process_data",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"items": []any{"item1", "item2", "item3"},
|
||||
"config": map[string]any{
|
||||
"enabled": true,
|
||||
"threshold": 0.95,
|
||||
"tags": []string{"important", "urgent"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Process complex data<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>process_data
|
||||
` + "```json\n" + `{"config":{"enabled":true,"tags":["important","urgent"],"threshold":0.95},"items":["item1","item2","item3"]}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "empty_messages",
|
||||
messages: []api.Message{
|
||||
{Role: "system", Content: ""},
|
||||
{Role: "user", Content: "Hello"},
|
||||
{Role: "assistant", Content: ""},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello<|Assistant|><|end▁of▁sentence|><|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "thinking_with_empty_assistant_content",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Think about this"},
|
||||
{Role: "assistant", Content: ""},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: true},
|
||||
expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine.
|
||||
|
||||
You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Think about this<|Assistant|><|end▁of▁sentence|><|Assistant|><think>
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "multiple_system_messages",
|
||||
messages: []api.Message{
|
||||
{Role: "system", Content: "First instruction"},
|
||||
{Role: "system", Content: "Second instruction"},
|
||||
{Role: "user", Content: "Hello"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.
|
||||
|
||||
First instruction<|User|>Hello<|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "special_characters_in_content",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "What about <|special|> tokens and \"quotes\"?"},
|
||||
{Role: "assistant", Content: "They're handled normally in content."},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>What about <|special|> tokens and "quotes"?<|Assistant|>They're handled normally in content.<|end▁of▁sentence|><|Assistant|>`,
|
||||
},
|
||||
{
|
||||
name: "long_conversation_multiple_rounds",
|
||||
messages: []api.Message{
|
||||
{Role: "user", Content: "Hi"},
|
||||
{Role: "assistant", Content: "Hello!"},
|
||||
{Role: "user", Content: "How are you?"},
|
||||
{Role: "assistant", Content: "Good, thanks!"},
|
||||
{Role: "user", Content: "What's the weather?"},
|
||||
},
|
||||
thinkValue: &api.ThinkValue{Value: false},
|
||||
expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hi<|Assistant|>Hello!<|end▁of▁sentence|><|User|>How are you?<|Assistant|>Good, thanks!<|end▁of▁sentence|><|User|>What's the weather?<|Assistant|>`,
|
||||
},
|
||||
}
|
||||
|
||||
renderer := &CogitoRenderer{isThinking: true}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rendered, err := renderer.Render(tt.messages, tt.tools, tt.thinkValue)
|
||||
if err != nil {
|
||||
t.Fatalf("Render() error = %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(tt.expected, rendered); diff != "" {
|
||||
t.Errorf("Render() mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
158
model/renderers/intellect3.go
Normal file
158
model/renderers/intellect3.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package renderers
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
const intellect3DefaultSystemMessage = "You are INTELLECT-3, a helpful assistant developed by Prime Intellect, that can interact with a computer to solve tasks."
|
||||
|
||||
type Intellect3Renderer struct{}
|
||||
|
||||
func (r *Intellect3Renderer) Render(messages []api.Message, tools []api.Tool, think *api.ThinkValue) (string, error) {
|
||||
var sb strings.Builder
|
||||
|
||||
// filter out system messages and choose the first (if any) to win
|
||||
var systemMessage string
|
||||
var filteredMessages []api.Message
|
||||
for _, message := range messages {
|
||||
if message.Role != "system" {
|
||||
filteredMessages = append(filteredMessages, message)
|
||||
continue
|
||||
}
|
||||
|
||||
if systemMessage == "" {
|
||||
systemMessage = message.Content
|
||||
}
|
||||
}
|
||||
|
||||
if systemMessage != "" || len(tools) > 0 {
|
||||
sb.WriteString(imStartTag + "system\n")
|
||||
|
||||
// Use default system message when tools present but no user system message
|
||||
if systemMessage == "" && len(tools) > 0 {
|
||||
systemMessage = intellect3DefaultSystemMessage
|
||||
}
|
||||
|
||||
sb.WriteString(systemMessage)
|
||||
|
||||
if len(tools) > 0 {
|
||||
sb.WriteString("\n\n# Tools\n\nYou have access to the following functions:\n\n")
|
||||
sb.WriteString("<tools>")
|
||||
for _, tool := range tools {
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString("<function>\n")
|
||||
sb.WriteString("<name>" + tool.Function.Name + "</name>")
|
||||
if tool.Function.Description != "" {
|
||||
sb.WriteString("\n<description>" + tool.Function.Description + "</description>")
|
||||
}
|
||||
sb.WriteString("\n<parameters>")
|
||||
|
||||
for name, prop := range tool.Function.Parameters.Properties {
|
||||
sb.WriteString("\n<parameter>")
|
||||
sb.WriteString("\n<name>" + name + "</name>")
|
||||
|
||||
if len(prop.Type) > 0 {
|
||||
sb.WriteString("\n<type>" + formatToolDefinitionType(prop.Type) + "</type>")
|
||||
}
|
||||
|
||||
if prop.Description != "" {
|
||||
sb.WriteString("\n<description>" + prop.Description + "</description>")
|
||||
}
|
||||
|
||||
// Render any additional keys not already handled
|
||||
handledKeys := map[string]bool{
|
||||
"type": true,
|
||||
"description": true,
|
||||
}
|
||||
sb.WriteString(renderAdditionalKeys(prop, handledKeys))
|
||||
|
||||
sb.WriteString("\n</parameter>")
|
||||
}
|
||||
|
||||
// Render extra keys for parameters (everything except 'type' and 'properties')
|
||||
paramHandledKeys := map[string]bool{
|
||||
"type": true,
|
||||
"properties": true,
|
||||
}
|
||||
sb.WriteString(renderAdditionalKeys(tool.Function.Parameters, paramHandledKeys))
|
||||
|
||||
sb.WriteString("\n</parameters>")
|
||||
sb.WriteString("\n</function>")
|
||||
}
|
||||
sb.WriteString("\n</tools>")
|
||||
sb.WriteString("\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n- Required parameters MUST be specified\n- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n</IMPORTANT>")
|
||||
}
|
||||
|
||||
sb.WriteString(imEndTag + "\n")
|
||||
}
|
||||
|
||||
for i, message := range filteredMessages {
|
||||
lastMessage := i == len(filteredMessages)-1
|
||||
prefill := lastMessage && message.Role == "assistant"
|
||||
switch message.Role {
|
||||
case "assistant":
|
||||
if len(message.ToolCalls) > 0 {
|
||||
sb.WriteString(imStartTag + "assistant\n")
|
||||
|
||||
// Add thinking tags if present
|
||||
if message.Thinking != "" {
|
||||
sb.WriteString("<think>" + strings.TrimSpace(message.Thinking) + "</think>\n")
|
||||
}
|
||||
|
||||
if message.Content != "" {
|
||||
sb.WriteString(strings.TrimSpace(message.Content) + "\n")
|
||||
}
|
||||
|
||||
for _, toolCall := range message.ToolCalls {
|
||||
sb.WriteString("\n<tool_call>\n<function=" + toolCall.Function.Name + ">")
|
||||
for name, value := range toolCall.Function.Arguments {
|
||||
valueStr := formatToolCallArgument(value)
|
||||
sb.WriteString("\n<parameter=" + name + ">\n" + valueStr + "\n</parameter>")
|
||||
}
|
||||
sb.WriteString("\n</function>\n</tool_call>")
|
||||
}
|
||||
sb.WriteString("<|im_end|>\n")
|
||||
} else {
|
||||
sb.WriteString(imStartTag + "assistant\n")
|
||||
|
||||
// Add thinking tags if present
|
||||
if message.Thinking != "" {
|
||||
sb.WriteString("<think>" + strings.TrimSpace(message.Thinking) + "</think>\n")
|
||||
}
|
||||
|
||||
// Add content if present
|
||||
if message.Content != "" {
|
||||
sb.WriteString(message.Content)
|
||||
}
|
||||
|
||||
if !prefill {
|
||||
sb.WriteString(imEndTag + "\n")
|
||||
}
|
||||
}
|
||||
case "tool":
|
||||
if i == 0 || filteredMessages[i-1].Role != "tool" {
|
||||
sb.WriteString(imStartTag + "user\n")
|
||||
}
|
||||
|
||||
sb.WriteString("<tool_response>\n")
|
||||
sb.WriteString(message.Content)
|
||||
sb.WriteString("\n</tool_response>\n")
|
||||
|
||||
if i == len(filteredMessages)-1 || filteredMessages[i+1].Role != "tool" {
|
||||
sb.WriteString(imEndTag + "\n")
|
||||
}
|
||||
default:
|
||||
sb.WriteString(imStartTag + message.Role + "\n")
|
||||
sb.WriteString(message.Content)
|
||||
sb.WriteString(imEndTag + "\n")
|
||||
}
|
||||
|
||||
if lastMessage && !prefill {
|
||||
sb.WriteString(imStartTag + "assistant\n<think>")
|
||||
}
|
||||
}
|
||||
|
||||
return sb.String(), nil
|
||||
}
|
||||
218
model/renderers/intellect3_test.go
Normal file
218
model/renderers/intellect3_test.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package renderers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
func TestIntellect3Renderer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
msgs []api.Message
|
||||
tools []api.Tool
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "basic user message",
|
||||
msgs: []api.Message{
|
||||
{Role: "user", Content: "Hello!"},
|
||||
},
|
||||
expected: "<|im_start|>user\n" +
|
||||
"Hello!<|im_end|>\n" +
|
||||
"<|im_start|>assistant\n" +
|
||||
"<think>",
|
||||
},
|
||||
{
|
||||
name: "with system message",
|
||||
msgs: []api.Message{
|
||||
{Role: "system", Content: "You are helpful."},
|
||||
{Role: "user", Content: "Hi"},
|
||||
},
|
||||
expected: "<|im_start|>system\n" +
|
||||
"You are helpful.<|im_end|>\n" +
|
||||
"<|im_start|>user\n" +
|
||||
"Hi<|im_end|>\n" +
|
||||
"<|im_start|>assistant\n" +
|
||||
"<think>",
|
||||
},
|
||||
{
|
||||
name: "multi-turn conversation",
|
||||
msgs: []api.Message{
|
||||
{Role: "user", Content: "Hello"},
|
||||
{Role: "assistant", Content: "Hi!"},
|
||||
{Role: "user", Content: "Bye"},
|
||||
},
|
||||
expected: "<|im_start|>user\n" +
|
||||
"Hello<|im_end|>\n" +
|
||||
"<|im_start|>assistant\n" +
|
||||
"Hi!<|im_end|>\n" +
|
||||
"<|im_start|>user\n" +
|
||||
"Bye<|im_end|>\n" +
|
||||
"<|im_start|>assistant\n" +
|
||||
"<think>",
|
||||
},
|
||||
{
|
||||
name: "with tools no system message",
|
||||
msgs: []api.Message{
|
||||
{Role: "user", Content: "Weather?"},
|
||||
},
|
||||
tools: []api.Tool{
|
||||
{
|
||||
Type: "function",
|
||||
Function: api.ToolFunction{
|
||||
Name: "get_weather",
|
||||
Description: "Get weather",
|
||||
Parameters: api.ToolFunctionParameters{
|
||||
Type: "object",
|
||||
Properties: map[string]api.ToolProperty{
|
||||
"location": {Type: api.PropertyType{"string"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: "<|im_start|>system\n" +
|
||||
"You are INTELLECT-3, a helpful assistant developed by Prime Intellect, that can interact with a computer to solve tasks.\n\n" +
|
||||
"# Tools\n\n" +
|
||||
"You have access to the following functions:\n\n" +
|
||||
"<tools>\n" +
|
||||
"<function>\n" +
|
||||
"<name>get_weather</name>\n" +
|
||||
"<description>Get weather</description>\n" +
|
||||
"<parameters>\n" +
|
||||
"<parameter>\n" +
|
||||
"<name>location</name>\n" +
|
||||
"<type>string</type>\n" +
|
||||
"</parameter>\n" +
|
||||
"</parameters>\n" +
|
||||
"</function>\n" +
|
||||
"</tools>\n\n" +
|
||||
"If you choose to call a function ONLY reply in the following format with NO suffix:\n\n" +
|
||||
"<tool_call>\n" +
|
||||
"<function=example_function_name>\n" +
|
||||
"<parameter=example_parameter_1>\n" +
|
||||
"value_1\n" +
|
||||
"</parameter>\n" +
|
||||
"<parameter=example_parameter_2>\n" +
|
||||
"This is the value for the second parameter\n" +
|
||||
"that can span\n" +
|
||||
"multiple lines\n" +
|
||||
"</parameter>\n" +
|
||||
"</function>\n" +
|
||||
"</tool_call>\n\n" +
|
||||
"<IMPORTANT>\n" +
|
||||
"Reminder:\n" +
|
||||
"- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" +
|
||||
"- Required parameters MUST be specified\n" +
|
||||
"- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" +
|
||||
"- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n" +
|
||||
"</IMPORTANT><|im_end|>\n" +
|
||||
"<|im_start|>user\n" +
|
||||
"Weather?<|im_end|>\n" +
|
||||
"<|im_start|>assistant\n" +
|
||||
"<think>",
|
||||
},
|
||||
{
|
||||
name: "tool call and response",
|
||||
msgs: []api.Message{
|
||||
{Role: "user", Content: "Weather?"},
|
||||
{
|
||||
Role: "assistant",
|
||||
Content: "Checking.",
|
||||
ToolCalls: []api.ToolCall{
|
||||
{
|
||||
ID: "1",
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: map[string]any{"location": "SF"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{Role: "tool", Content: `{"temp": 68}`, ToolCallID: "1"},
|
||||
},
|
||||
tools: []api.Tool{
|
||||
{
|
||||
Type: "function",
|
||||
Function: api.ToolFunction{
|
||||
Name: "get_weather",
|
||||
Parameters: api.ToolFunctionParameters{
|
||||
Type: "object",
|
||||
Properties: map[string]api.ToolProperty{
|
||||
"location": {Type: api.PropertyType{"string"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: "<|im_start|>system\n" +
|
||||
"You are INTELLECT-3, a helpful assistant developed by Prime Intellect, that can interact with a computer to solve tasks.\n\n" +
|
||||
"# Tools\n\n" +
|
||||
"You have access to the following functions:\n\n" +
|
||||
"<tools>\n" +
|
||||
"<function>\n" +
|
||||
"<name>get_weather</name>\n" +
|
||||
"<parameters>\n" +
|
||||
"<parameter>\n" +
|
||||
"<name>location</name>\n" +
|
||||
"<type>string</type>\n" +
|
||||
"</parameter>\n" +
|
||||
"</parameters>\n" +
|
||||
"</function>\n" +
|
||||
"</tools>\n\n" +
|
||||
"If you choose to call a function ONLY reply in the following format with NO suffix:\n\n" +
|
||||
"<tool_call>\n" +
|
||||
"<function=example_function_name>\n" +
|
||||
"<parameter=example_parameter_1>\n" +
|
||||
"value_1\n" +
|
||||
"</parameter>\n" +
|
||||
"<parameter=example_parameter_2>\n" +
|
||||
"This is the value for the second parameter\n" +
|
||||
"that can span\n" +
|
||||
"multiple lines\n" +
|
||||
"</parameter>\n" +
|
||||
"</function>\n" +
|
||||
"</tool_call>\n\n" +
|
||||
"<IMPORTANT>\n" +
|
||||
"Reminder:\n" +
|
||||
"- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" +
|
||||
"- Required parameters MUST be specified\n" +
|
||||
"- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" +
|
||||
"- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n" +
|
||||
"</IMPORTANT><|im_end|>\n" +
|
||||
"<|im_start|>user\n" +
|
||||
"Weather?<|im_end|>\n" +
|
||||
"<|im_start|>assistant\n" +
|
||||
"Checking.\n\n" +
|
||||
"<tool_call>\n" +
|
||||
"<function=get_weather>\n" +
|
||||
"<parameter=location>\n" +
|
||||
"SF\n" +
|
||||
"</parameter>\n" +
|
||||
"</function>\n" +
|
||||
"</tool_call><|im_end|>\n" +
|
||||
"<|im_start|>user\n" +
|
||||
"<tool_response>\n" +
|
||||
`{"temp": 68}` + "\n" +
|
||||
"</tool_response>\n" +
|
||||
"<|im_end|>\n" +
|
||||
"<|im_start|>assistant\n" +
|
||||
"<think>",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rendered, err := (&Intellect3Renderer{}).Render(tt.msgs, tt.tools, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := cmp.Diff(rendered, tt.expected); diff != "" {
|
||||
t.Errorf("mismatch (-got +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -56,6 +56,12 @@ func rendererForName(name string) Renderer {
|
||||
case "qwen3-vl-thinking":
|
||||
renderer := &Qwen3VLRenderer{isThinking: true, useImgTags: RenderImgTags}
|
||||
return renderer
|
||||
case "cogito":
|
||||
renderer := &CogitoRenderer{isThinking: true}
|
||||
return renderer
|
||||
case "intellect-3":
|
||||
renderer := &Intellect3Renderer{}
|
||||
return renderer
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@ import (
|
||||
)
|
||||
|
||||
type WordPiece struct {
|
||||
vocab *Vocabulary
|
||||
vocab *Vocabulary
|
||||
lowercase bool
|
||||
}
|
||||
|
||||
// ggmlPrefix is the prefix used by GGML vocabularies to indicate word boundaries.
|
||||
@@ -114,8 +115,10 @@ func (wpm WordPiece) Encode(s string, addSpecial bool) ([]int32, error) {
|
||||
subword = ggmlPrefix + subword
|
||||
}
|
||||
|
||||
// TODO: some models might not want [ToLower]
|
||||
piece = wpm.vocab.Encode(strings.ToLower(subword))
|
||||
if wpm.lowercase {
|
||||
subword = strings.ToLower(subword)
|
||||
}
|
||||
piece = wpm.vocab.Encode(subword)
|
||||
if piece >= 0 {
|
||||
break
|
||||
}
|
||||
@@ -160,8 +163,9 @@ func (wpm WordPiece) Vocabulary() *Vocabulary {
|
||||
|
||||
var _ TextProcessor = (*WordPiece)(nil)
|
||||
|
||||
func NewWordPiece(vocab *Vocabulary) WordPiece {
|
||||
func NewWordPiece(vocab *Vocabulary, lowercase bool) WordPiece {
|
||||
return WordPiece{
|
||||
vocab: vocab,
|
||||
vocab: vocab,
|
||||
lowercase: lowercase,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,9 @@ func TestWordPiece(t *testing.T) {
|
||||
AddEOS: true,
|
||||
BOS: []int32{1},
|
||||
EOS: []int32{2},
|
||||
})
|
||||
},
|
||||
true, // lowercase
|
||||
)
|
||||
|
||||
ids, err := wpm.Encode("Hello world!", true)
|
||||
if err != nil {
|
||||
|
||||
@@ -340,7 +340,7 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
builtinParser = parsers.ParserForName(m.Config.Parser)
|
||||
if builtinParser != nil {
|
||||
// no tools or last message for generate endpoint
|
||||
builtinParser.Init(nil, nil)
|
||||
builtinParser.Init(nil, nil, req.Think)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1541,7 +1541,7 @@ func (s *Server) GenerateRoutes(rc *ollama.Registry) (http.Handler, error) {
|
||||
|
||||
func Serve(ln net.Listener) error {
|
||||
slog.SetDefault(logutil.NewLogger(os.Stderr, envconfig.LogLevel()))
|
||||
slog.Info("server environment configuration", "", envconfig.Enabled())
|
||||
slog.Info("server config", "env", envconfig.Values())
|
||||
|
||||
blobsDir, err := GetBlobsPath("")
|
||||
if err != nil {
|
||||
@@ -2051,7 +2051,7 @@ func (s *Server) ChatHandler(c *gin.Context) {
|
||||
lastMessage = &msgs[len(msgs)-1]
|
||||
}
|
||||
// Initialize parser and get processed tools
|
||||
processedTools = builtinParser.Init(req.Tools, lastMessage)
|
||||
processedTools = builtinParser.Init(req.Tools, lastMessage, req.Think)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user