Compare commits
13 Commits
v0.13.0
...
parth/remo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7b152860c2 | ||
|
|
467bbc0dd5 | ||
|
|
6d9f9323c5 | ||
|
|
0c2489605d | ||
|
|
8b1b89a984 | ||
|
|
47e272c35a | ||
|
|
417a81fda3 | ||
|
|
dba62ff3a5 | ||
|
|
d70e935526 | ||
|
|
5c1063df7f | ||
|
|
cb485b2019 | ||
|
|
b2af50960f | ||
|
|
eac5b8bfbd |
4
.gitattributes
vendored
4
.gitattributes
vendored
@@ -15,8 +15,12 @@ ml/backend/**/*.cu linguist-vendored
|
||||
ml/backend/**/*.cuh linguist-vendored
|
||||
ml/backend/**/*.m linguist-vendored
|
||||
ml/backend/**/*.metal linguist-vendored
|
||||
ml/backend/**/*.comp linguist-vendored
|
||||
ml/backend/**/*.glsl linguist-vendored
|
||||
ml/backend/**/CMakeLists.txt linguist-vendored
|
||||
|
||||
app/webview linguist-vendored
|
||||
|
||||
llama/build-info.cpp linguist-generated
|
||||
ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.s linguist-generated
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ linters:
|
||||
- nolintlint
|
||||
- nosprintfhostport
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- sloglint
|
||||
- staticcheck
|
||||
- unconvert
|
||||
|
||||
@@ -397,8 +397,8 @@ func checkUserLoggedIn(uiServerPort int) bool {
|
||||
// handleConnectURLScheme fetches the connect URL and opens it in the browser
|
||||
func handleConnectURLScheme() {
|
||||
if checkUserLoggedIn(uiServerPort) {
|
||||
slog.Info("user is already logged in, opening settings instead")
|
||||
sendUIRequestMessage("/")
|
||||
slog.Info("user is already logged in, opening app instead")
|
||||
showWindow(wv.webview.Window())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -466,6 +466,8 @@ func handleURLSchemeInCurrentInstance(urlSchemeRequest string) {
|
||||
if isConnect {
|
||||
handleConnectURLScheme()
|
||||
} else {
|
||||
sendUIRequestMessage("/")
|
||||
if wv.webview != nil {
|
||||
showWindow(wv.webview.Window())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,27 +24,14 @@ bool firstTimeRun,startHidden; // Set in run before initialization
|
||||
for (NSURL *url in urls) {
|
||||
if ([url.scheme isEqualToString:@"ollama"]) {
|
||||
NSString *path = url.path;
|
||||
if (!path || [path isEqualToString:@""]) {
|
||||
// For URLs like ollama://settings (without triple slash),
|
||||
// the "settings" part is parsed as the host, not the path.
|
||||
// We need to convert it to a path by prepending "/"
|
||||
if (url.host && ![url.host isEqualToString:@""]) {
|
||||
path = [@"/" stringByAppendingString:url.host];
|
||||
} else {
|
||||
path = @"/";
|
||||
}
|
||||
}
|
||||
|
||||
if ([path isEqualToString:@"/connect"] || [url.host isEqualToString:@"connect"]) {
|
||||
|
||||
if (path && ([path isEqualToString:@"/connect"] || [url.host isEqualToString:@"connect"])) {
|
||||
// Special case: handle connect by opening browser instead of app
|
||||
handleConnectURL();
|
||||
} else {
|
||||
// Set app to be active and visible
|
||||
[NSApp setActivationPolicy:NSApplicationActivationPolicyRegular];
|
||||
[NSApp activateIgnoringOtherApps:YES];
|
||||
|
||||
// Open the path with the UI
|
||||
[self uiRequest:path];
|
||||
}
|
||||
|
||||
break;
|
||||
@@ -260,7 +247,7 @@ bool firstTimeRun,startHidden; // Set in run before initialization
|
||||
}
|
||||
|
||||
- (void)openHelp:(id)sender {
|
||||
NSURL *url = [NSURL URLWithString:@"https://github.com/ollama/ollama/tree/main/docs"];
|
||||
NSURL *url = [NSURL URLWithString:@"https://docs.ollama.com/"];
|
||||
[[NSWorkspace sharedWorkspace] openURL:url];
|
||||
}
|
||||
|
||||
|
||||
@@ -147,7 +147,9 @@ func handleURLSchemeRequest(urlScheme string) {
|
||||
if isConnect {
|
||||
handleConnectURLScheme()
|
||||
} else {
|
||||
sendUIRequestMessage("/")
|
||||
if wv.webview != nil {
|
||||
showWindow(wv.webview.Window())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -98,6 +98,9 @@ func GPUDevices(ctx context.Context, runners []ml.FilteredRunnerDiscovery) []ml.
|
||||
continue
|
||||
} else if jetpack != "" && filepath.Base(dir) != "cuda_"+jetpack {
|
||||
continue
|
||||
} else if jetpack == "" && strings.Contains(filepath.Base(dir), "cuda_jetpack") {
|
||||
slog.Debug("jetpack not detected (set JETSON_JETPACK or OLLAMA_LLM_LIBRARY to override), skipping", "libDir", dir)
|
||||
continue
|
||||
} else if !envconfig.EnableVulkan() && strings.Contains(filepath.Base(dir), "vulkan") {
|
||||
slog.Info("experimental Vulkan support disabled. To enable, set OLLAMA_VULKAN=1")
|
||||
continue
|
||||
@@ -125,10 +128,20 @@ func GPUDevices(ctx context.Context, runners []ml.FilteredRunnerDiscovery) []ml.
|
||||
supportedMu := sync.Mutex{}
|
||||
supported := make(map[string]map[string]map[string]int) // [Library][libDir][ID] = pre-deletion devices index
|
||||
for i := range devices {
|
||||
libDir := devices[i].LibraryPath[len(devices[i].LibraryPath)-1]
|
||||
if !devices[i].NeedsInitValidation() {
|
||||
// No need to validate, add to the supported map
|
||||
supportedMu.Lock()
|
||||
if _, ok := supported[devices[i].Library]; !ok {
|
||||
supported[devices[i].Library] = make(map[string]map[string]int)
|
||||
}
|
||||
if _, ok := supported[devices[i].Library][libDir]; !ok {
|
||||
supported[devices[i].Library][libDir] = make(map[string]int)
|
||||
}
|
||||
supported[devices[i].Library][libDir][devices[i].ID] = i
|
||||
supportedMu.Unlock()
|
||||
continue
|
||||
}
|
||||
libDir := devices[i].LibraryPath[len(devices[i].LibraryPath)-1]
|
||||
slog.Debug("verifying if device is supported", "library", libDir, "description", devices[i].Description, "compute", devices[i].Compute(), "id", devices[i].ID, "pci_id", devices[i].PCIID)
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
|
||||
11
docs/faq.mdx
11
docs/faq.mdx
@@ -57,8 +57,13 @@ ollama ps
|
||||
```
|
||||
|
||||
<Info>
|
||||
**Output**: ``` NAME ID SIZE PROCESSOR UNTIL llama3:70b bcfb190ca3a7 42 GB
|
||||
100% GPU 4 minutes from now ```
|
||||
|
||||
**Output**:
|
||||
|
||||
```
|
||||
NAME ID SIZE PROCESSOR UNTIL
|
||||
llama3:70b bcfb190ca3a7 42 GB 100% GPU 4 minutes from now
|
||||
```
|
||||
</Info>
|
||||
|
||||
The `Processor` column will show which memory the model was loaded in to:
|
||||
@@ -385,4 +390,4 @@ Ollama for Windows and macOS register as a login item during installation. You
|
||||
- In `Task Manager` go to the `Startup apps` tab, search for `ollama` then click `Disable`
|
||||
|
||||
**MacOS**
|
||||
- Open `Settings` and search for "Login Items", find the `Ollama` entry under "Allow in the Background`, then click the slider to disable.
|
||||
- Open `Settings` and search for "Login Items", find the `Ollama` entry under "Allow in the Background`, then click the slider to disable.
|
||||
|
||||
@@ -149,9 +149,6 @@ PARAMETER <parameter> <parametervalue>
|
||||
|
||||
| Parameter | Description | Value Type | Example Usage |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | -------------------- |
|
||||
| mirostat | Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) | int | mirostat 0 |
|
||||
| mirostat_eta | Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) | float | mirostat_eta 0.1 |
|
||||
| mirostat_tau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | mirostat_tau 5.0 |
|
||||
| num_ctx | Sets the size of the context window used to generate the next token. (Default: 2048) | int | num_ctx 4096 |
|
||||
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
|
||||
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
|
||||
|
||||
@@ -251,6 +251,7 @@ func (kv KV) OllamaEngineRequired() bool {
|
||||
"qwen3vl", "qwen3vlmoe",
|
||||
"deepseekocr",
|
||||
"deepseek2",
|
||||
"nomic-bert",
|
||||
}, kv.Architecture())
|
||||
}
|
||||
|
||||
|
||||
@@ -388,9 +388,9 @@ func NewFunctionNameMap() *FunctionNameMap {
|
||||
}
|
||||
}
|
||||
|
||||
// Init initializes the handler with tools and optional last message
|
||||
// Init initializes the handler with tools, optional last message, and think value
|
||||
// Implements the Parser interface
|
||||
func (h *HarmonyMessageHandler) Init(tools []api.Tool, lastMessage *api.Message) []api.Tool {
|
||||
func (h *HarmonyMessageHandler) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
// Initialize the harmony parser
|
||||
if h.HarmonyParser == nil {
|
||||
h.HarmonyParser = &HarmonyParser{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -236,11 +236,6 @@ type Model struct {
|
||||
}
|
||||
|
||||
func New(c fs.Config) (model.Model, error) {
|
||||
if c.Uint("attention.key_length_mla") == 0 {
|
||||
// non-MLA models aren't yet supported
|
||||
return nil, model.ErrUnsupportedModel
|
||||
}
|
||||
|
||||
layers := make([]Layer, c.Uint("block_count"))
|
||||
|
||||
firstDenseLayerIndex := int(c.Uint("leading_dense_block_count"))
|
||||
@@ -259,6 +254,30 @@ func New(c fs.Config) (model.Model, error) {
|
||||
keyLength := int(cmp.Or(c.Uint("attention.key_length_mla"), c.Uint("attention.key_length")))
|
||||
valueLength := int(cmp.Or(c.Uint("attention.value_length_mla"), c.Uint("attention.value_length")))
|
||||
|
||||
var pre []string
|
||||
switch c.String("tokenizer.ggml.pre") {
|
||||
case "deepseek-v3":
|
||||
pre = []string{
|
||||
// Split regex into multiple parts (according to DeepSeek3's regex)
|
||||
"\\p{N}{1,3}",
|
||||
`[一-龥-ゟ゠-ヿ]+`,
|
||||
"[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+",
|
||||
}
|
||||
case "deepseek-llm":
|
||||
// TODO: these models haven't been vetted so skip for now
|
||||
// pre = []string{
|
||||
// "[\r\n]",
|
||||
// "\\s?[A-Za-zµÀ-ÖØ-öø-ƺƼ-ƿDŽ-ʓʕ-ʯͰ-ͳͶͷͻ-ͽͿΆΈ-ΊΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-ՖႠ-ჅᎠ-Ᏽᏸ-ᏽᲐ-ᲺᲽ-Ჿᴀ-ᴫᵫ-ᵷᵹ-ᶚḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙὛὝὟ-ώᾀ-ᾴᾶ-ᾼιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼℂℇℊ-ℓℕℙ-ℝℤΩℨK-ℭℯ-ℴℹℼ-ℿⅅ-ⅉⅎↃↄⰀ-ⱻⱾ-ⳤⳫ-ⳮⳲⳳꙀ-ꙭꚀ-ꚛꜢ-ꝯꝱ-ꞇꞋ-ꞎꭰ-ꮿff-stﬓ-ﬗA-Za-z𐐀-𐑏𐒰-𐓓𐓘-𐓻𐲀-𐲲𐳀-𐳲𑢠-𑣟𞤀-𞥃]+",
|
||||
// "\\s?[!-/:-~!-/:-~‘-‟ -。]+",
|
||||
// "\\s+$",
|
||||
// "[一-龥ࠀ-一가-]+",
|
||||
// "[0-9]",
|
||||
// }
|
||||
fallthrough
|
||||
default:
|
||||
return nil, model.ErrUnsupportedTokenizer
|
||||
}
|
||||
|
||||
m := Model{
|
||||
BytePairEncoding: model.NewBytePairEncoding(
|
||||
&model.Vocabulary{
|
||||
@@ -273,10 +292,7 @@ func New(c fs.Config) (model.Model, error) {
|
||||
c.Ints("tokenizer.ggml.eos_token_ids")...,
|
||||
),
|
||||
},
|
||||
// Split regex into multiple parts (according to DeepSeek3's regex)
|
||||
"\\p{N}{1,3}",
|
||||
`[一-龥-ゟ゠-ヿ]+`,
|
||||
"[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+",
|
||||
pre...,
|
||||
),
|
||||
Layers: layers,
|
||||
Options: &Options{
|
||||
|
||||
319
model/parsers/cogito.go
Normal file
319
model/parsers/cogito.go
Normal file
@@ -0,0 +1,319 @@
|
||||
package parsers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
type CogitoParserState int
|
||||
|
||||
const (
|
||||
CogitoCollectingThinking CogitoParserState = iota
|
||||
CogitoCollectingContent
|
||||
CogitoCollectingToolCalls
|
||||
CogitoCollectingToolOutput
|
||||
)
|
||||
|
||||
const (
|
||||
cogitoThinkingCloseTag = "</think>"
|
||||
cogitoToolCallsBeginTag = "<|tool▁calls▁begin|>"
|
||||
cogitoToolCallsEndTag = "<|tool▁calls▁end|>"
|
||||
cogitoToolCallBeginTag = "<|tool▁call▁begin|>"
|
||||
cogitoToolCallEndTag = "<|tool▁call▁end|>"
|
||||
cogitoToolSepTag = "<|tool▁sep|>"
|
||||
cogitoToolOutputBeginTag = "<|tool▁output▁begin|>"
|
||||
cogitoToolOutputEndTag = "<|tool▁output▁end|>"
|
||||
cogitoToolOutputsBeginTag = "<|tool▁outputs▁begin|>"
|
||||
cogitoToolOutputsEndTag = "<|tool▁outputs▁end|>"
|
||||
)
|
||||
|
||||
type CogitoParser struct {
|
||||
state CogitoParserState
|
||||
buffer strings.Builder
|
||||
}
|
||||
|
||||
func (p *CogitoParser) HasToolSupport() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *CogitoParser) HasThinkingSupport() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *CogitoParser) setInitialState(lastMessage *api.Message, tools []api.Tool, thinkValue *api.ThinkValue) {
|
||||
prefill := lastMessage != nil && lastMessage.Role == "assistant"
|
||||
|
||||
// Check both model capability AND request preference
|
||||
thinkingEnabled := thinkValue != nil && thinkValue.Bool()
|
||||
// thinkingEnabled should be set to false for tools
|
||||
|
||||
if !thinkingEnabled {
|
||||
p.state = CogitoCollectingContent
|
||||
return
|
||||
}
|
||||
|
||||
if prefill && lastMessage.Content != "" {
|
||||
p.state = CogitoCollectingContent
|
||||
return
|
||||
}
|
||||
|
||||
// Note: for cogito, if there are tools, then we don't want to be thinking
|
||||
if len(tools) > 0 {
|
||||
p.state = CogitoCollectingContent
|
||||
return
|
||||
}
|
||||
|
||||
p.state = CogitoCollectingThinking
|
||||
}
|
||||
|
||||
func (p *CogitoParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
p.setInitialState(lastMessage, tools, thinkValue)
|
||||
return tools
|
||||
}
|
||||
|
||||
type cogitoEvent interface {
|
||||
isCogitoEvent()
|
||||
}
|
||||
|
||||
type cogitoEventThinkingContent struct {
|
||||
content string
|
||||
}
|
||||
|
||||
type cogitoEventContent struct {
|
||||
content string
|
||||
}
|
||||
|
||||
type cogitoEventToolCall struct {
|
||||
toolCall api.ToolCall
|
||||
}
|
||||
|
||||
func (cogitoEventThinkingContent) isCogitoEvent() {}
|
||||
func (cogitoEventContent) isCogitoEvent() {}
|
||||
func (cogitoEventToolCall) isCogitoEvent() {}
|
||||
|
||||
func (p *CogitoParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) {
|
||||
p.buffer.WriteString(s)
|
||||
events := p.parseEvents()
|
||||
|
||||
var toolCalls []api.ToolCall
|
||||
var contentSb strings.Builder
|
||||
var thinkingSb strings.Builder
|
||||
for _, event := range events {
|
||||
switch event := event.(type) {
|
||||
case cogitoEventToolCall:
|
||||
toolCalls = append(toolCalls, event.toolCall)
|
||||
case cogitoEventThinkingContent:
|
||||
thinkingSb.WriteString(event.content)
|
||||
case cogitoEventContent:
|
||||
contentSb.WriteString(event.content)
|
||||
}
|
||||
}
|
||||
|
||||
return contentSb.String(), thinkingSb.String(), toolCalls, nil
|
||||
}
|
||||
|
||||
func (p *CogitoParser) parseEvents() []cogitoEvent {
|
||||
var all []cogitoEvent
|
||||
|
||||
keepLooping := true
|
||||
for keepLooping {
|
||||
var events []cogitoEvent
|
||||
events, keepLooping = p.eat()
|
||||
if len(events) > 0 {
|
||||
all = append(all, events...)
|
||||
}
|
||||
}
|
||||
|
||||
return all
|
||||
}
|
||||
|
||||
func (p *CogitoParser) eat() ([]cogitoEvent, bool) {
|
||||
var events []cogitoEvent
|
||||
bufStr := p.buffer.String()
|
||||
if bufStr == "" {
|
||||
return events, false
|
||||
}
|
||||
|
||||
switch p.state {
|
||||
case CogitoCollectingThinking:
|
||||
if strings.Contains(bufStr, cogitoThinkingCloseTag) { // thinking[</think>] -> content
|
||||
split := strings.SplitN(bufStr, cogitoThinkingCloseTag, 2)
|
||||
thinking := split[0]
|
||||
thinking = strings.TrimRightFunc(thinking, unicode.IsSpace)
|
||||
|
||||
remaining := split[1]
|
||||
remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace)
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
p.state = CogitoCollectingContent
|
||||
|
||||
if len(thinking) > 0 {
|
||||
events = append(events, cogitoEventThinkingContent{content: thinking})
|
||||
}
|
||||
return events, true
|
||||
} else if overlapLen := overlap(bufStr, cogitoThinkingCloseTag); overlapLen > 0 { // partial </think>
|
||||
beforePartialTag := bufStr[:len(bufStr)-overlapLen]
|
||||
trailingLen := trailingWhitespaceLen(beforePartialTag)
|
||||
ambiguousStart := len(beforePartialTag) - trailingLen
|
||||
|
||||
unambiguous := bufStr[:ambiguousStart]
|
||||
ambiguous := bufStr[ambiguousStart:]
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(ambiguous)
|
||||
if len(unambiguous) > 0 {
|
||||
events = append(events, cogitoEventThinkingContent{content: unambiguous})
|
||||
}
|
||||
return events, false
|
||||
} else { // otherwise its thinking content
|
||||
whitespaceLen := trailingWhitespaceLen(bufStr)
|
||||
ambiguousStart := len(bufStr) - whitespaceLen
|
||||
|
||||
unambiguous := bufStr[:ambiguousStart]
|
||||
ambiguous := bufStr[ambiguousStart:]
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(ambiguous)
|
||||
if len(unambiguous) > 0 {
|
||||
events = append(events, cogitoEventThinkingContent{content: unambiguous})
|
||||
}
|
||||
return events, false
|
||||
}
|
||||
|
||||
case CogitoCollectingContent:
|
||||
switch {
|
||||
case strings.Contains(bufStr, cogitoToolCallsBeginTag): // content[<|tool▁calls▁begin|>] -> tool calls
|
||||
split := strings.SplitN(bufStr, cogitoToolCallsBeginTag, 2)
|
||||
contentBefore := strings.TrimRightFunc(split[0], unicode.IsSpace)
|
||||
remaining := split[1]
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
p.state = CogitoCollectingToolCalls
|
||||
|
||||
if len(contentBefore) > 0 {
|
||||
events = append(events, cogitoEventContent{content: contentBefore})
|
||||
}
|
||||
return events, true
|
||||
case strings.Contains(bufStr, cogitoToolOutputsBeginTag): // content[<|tool▁outputs▁begin|>] -> tool outputs
|
||||
split := strings.SplitN(bufStr, cogitoToolOutputsBeginTag, 2)
|
||||
contentBefore := strings.TrimRightFunc(split[0], unicode.IsSpace)
|
||||
remaining := split[1]
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
p.state = CogitoCollectingToolOutput
|
||||
|
||||
if len(contentBefore) > 0 {
|
||||
events = append(events, cogitoEventContent{content: contentBefore})
|
||||
}
|
||||
return events, true
|
||||
default: // otherwise its content
|
||||
p.buffer.Reset()
|
||||
if len(bufStr) > 0 {
|
||||
events = append(events, cogitoEventContent{content: bufStr})
|
||||
}
|
||||
return events, false
|
||||
}
|
||||
case CogitoCollectingToolCalls:
|
||||
if idx := strings.Index(bufStr, cogitoToolCallBeginTag); idx != -1 {
|
||||
startIdx := idx + len(cogitoToolCallBeginTag)
|
||||
if endIdx := strings.Index(bufStr[startIdx:], cogitoToolCallEndTag); endIdx != -1 {
|
||||
toolCallContent := bufStr[startIdx : startIdx+endIdx]
|
||||
|
||||
if toolCall, err := p.parseToolCallContent(toolCallContent); err == nil {
|
||||
remaining := bufStr[startIdx+endIdx+len(cogitoToolCallEndTag):]
|
||||
remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace)
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
|
||||
events = append(events, cogitoEventToolCall{toolCall: toolCall})
|
||||
return events, true
|
||||
} else {
|
||||
slog.Warn("cogito tool call parsing failed", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if idx := strings.Index(bufStr, cogitoToolCallsEndTag); idx != -1 {
|
||||
remaining := bufStr[idx+len(cogitoToolCallsEndTag):]
|
||||
remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace)
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
p.state = CogitoCollectingContent
|
||||
|
||||
return events, true
|
||||
}
|
||||
|
||||
return events, false
|
||||
|
||||
case CogitoCollectingToolOutput:
|
||||
if idx := strings.Index(bufStr, cogitoToolOutputBeginTag); idx != -1 {
|
||||
startIdx := idx + len(cogitoToolOutputBeginTag)
|
||||
if endIdx := strings.Index(bufStr[startIdx:], cogitoToolOutputEndTag); endIdx != -1 {
|
||||
remaining := bufStr[startIdx+endIdx+len(cogitoToolOutputEndTag):]
|
||||
remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace)
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
|
||||
return events, true
|
||||
}
|
||||
}
|
||||
|
||||
if idx := strings.Index(bufStr, cogitoToolOutputsEndTag); idx != -1 {
|
||||
remaining := bufStr[idx+len(cogitoToolOutputsEndTag):]
|
||||
remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace)
|
||||
|
||||
p.buffer.Reset()
|
||||
p.buffer.WriteString(remaining)
|
||||
p.state = CogitoCollectingContent
|
||||
|
||||
return events, true
|
||||
}
|
||||
|
||||
return events, false
|
||||
}
|
||||
|
||||
return events, false
|
||||
}
|
||||
|
||||
func (p *CogitoParser) parseToolCallContent(content string) (api.ToolCall, error) {
|
||||
// Expected format: function<|tool▁sep|>tool_name\n```json\n{args}\n```
|
||||
parts := strings.SplitN(content, cogitoToolSepTag, 2)
|
||||
if len(parts) < 2 {
|
||||
return api.ToolCall{}, errors.New("invalid format")
|
||||
}
|
||||
nameAndArgs := parts[1]
|
||||
|
||||
jsonStart := strings.Index(nameAndArgs, "\n```json\n")
|
||||
if jsonStart == -1 {
|
||||
return api.ToolCall{}, errors.New("invalid format")
|
||||
}
|
||||
toolName := strings.TrimSpace(nameAndArgs[:jsonStart])
|
||||
jsonContent := nameAndArgs[jsonStart+len("\n```json\n"):]
|
||||
|
||||
jsonEnd := strings.Index(jsonContent, "\n```")
|
||||
if jsonEnd == -1 {
|
||||
return api.ToolCall{}, errors.New("invalid format")
|
||||
}
|
||||
argsJSON := jsonContent[:jsonEnd]
|
||||
|
||||
var args api.ToolCallFunctionArguments
|
||||
if err := json.Unmarshal([]byte(argsJSON), &args); err != nil {
|
||||
return api.ToolCall{}, err
|
||||
}
|
||||
|
||||
return api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: toolName,
|
||||
Arguments: args,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
565
model/parsers/cogito_test.go
Normal file
565
model/parsers/cogito_test.go
Normal file
@@ -0,0 +1,565 @@
|
||||
package parsers
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
func TestCogitoParser(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expectedContent string
|
||||
expectedThinking string
|
||||
expectedToolCalls []api.ToolCall
|
||||
tools []api.Tool
|
||||
lastMessage *api.Message
|
||||
}{
|
||||
{
|
||||
name: "simple_content",
|
||||
input: "This is a simple response.",
|
||||
expectedContent: "This is a simple response.",
|
||||
expectedThinking: "",
|
||||
},
|
||||
{
|
||||
name: "thinking_only",
|
||||
input: "This is thinking content.</think>This is response content.",
|
||||
expectedContent: "This is response content.",
|
||||
expectedThinking: "This is thinking content.",
|
||||
},
|
||||
{
|
||||
name: "tool_call_simple",
|
||||
input: `<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`,
|
||||
expectedToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tools: []api.Tool{
|
||||
{
|
||||
Type: "function",
|
||||
Function: api.ToolFunction{
|
||||
Name: "get_weather",
|
||||
Parameters: api.ToolFunctionParameters{
|
||||
Properties: map[string]api.ToolProperty{
|
||||
"location": {Type: api.PropertyType{"string"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "thinking_with_tool_call",
|
||||
input: `I need to check the weather.</think><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`,
|
||||
expectedContent: "I need to check the weather.</think>",
|
||||
expectedThinking: "", // No thinking when tools are present (Cogito-specific behavior)
|
||||
expectedToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tools: []api.Tool{
|
||||
{
|
||||
Type: "function",
|
||||
Function: api.ToolFunction{
|
||||
Name: "get_weather",
|
||||
Parameters: api.ToolFunctionParameters{
|
||||
Properties: map[string]api.ToolProperty{
|
||||
"location": {Type: api.PropertyType{"string"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple_tool_calls",
|
||||
input: `<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```" + `<|tool▁call▁end|>
|
||||
<|tool▁call▁begin|>function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"London"}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`,
|
||||
expectedToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "London",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tools: []api.Tool{
|
||||
{
|
||||
Type: "function",
|
||||
Function: api.ToolFunction{
|
||||
Name: "get_weather",
|
||||
Parameters: api.ToolFunctionParameters{
|
||||
Properties: map[string]api.ToolProperty{
|
||||
"location": {Type: api.PropertyType{"string"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "complex_tool_arguments",
|
||||
input: `<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>process_data
|
||||
` + "```json\n" + `{"items":["item1","item2"],"config":{"enabled":true,"threshold":0.95},"count":42}
|
||||
` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`,
|
||||
expectedToolCalls: []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "process_data",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"items": []any{"item1", "item2"},
|
||||
"config": map[string]any{"enabled": true, "threshold": 0.95},
|
||||
"count": 42.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "tool_output_parsing",
|
||||
input: `<|tool▁outputs▁begin|><|tool▁output▁begin|>{"temperature": 22, "condition": "sunny"}<|tool▁output▁end|><|tool▁outputs▁end|>`,
|
||||
expectedContent: "",
|
||||
expectedThinking: "",
|
||||
},
|
||||
{
|
||||
name: "thinking_with_multiline_content",
|
||||
input: `This is line 1
|
||||
This is line 2
|
||||
This is line 3</think>Final response here.`,
|
||||
expectedContent: "Final response here.",
|
||||
expectedThinking: "This is line 1\nThis is line 2\nThis is line 3",
|
||||
},
|
||||
{
|
||||
name: "no_thinking_simple",
|
||||
input: "This is content.",
|
||||
expectedContent: "This is content.",
|
||||
expectedThinking: "",
|
||||
},
|
||||
{
|
||||
name: "prefill_content_only",
|
||||
input: "Continuing from previous content.",
|
||||
expectedContent: "Continuing from previous content.",
|
||||
lastMessage: &api.Message{
|
||||
Role: "assistant",
|
||||
Content: "Previous content",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "prefill_with_thinking",
|
||||
input: "Continuing thinking</think>Continuing content.",
|
||||
expectedContent: "Continuing content.",
|
||||
expectedThinking: "Continuing thinking",
|
||||
lastMessage: &api.Message{
|
||||
Role: "assistant",
|
||||
},
|
||||
},
|
||||
// Edge cases
|
||||
{
|
||||
name: "nested_think_tags_in_thinking",
|
||||
input: "I'm thinking <think>nested</think> more thinking</think>Final content.",
|
||||
expectedContent: "more thinking</think>Final content.",
|
||||
expectedThinking: "I'm thinking <think>nested",
|
||||
},
|
||||
{
|
||||
name: "multiple_think_close_tags",
|
||||
input: "First thinking</think>Content</think>More content.",
|
||||
expectedContent: "Content</think>More content.",
|
||||
expectedThinking: "First thinking",
|
||||
},
|
||||
{
|
||||
name: "empty_thinking_content",
|
||||
input: "</think>Just content here.",
|
||||
expectedContent: "</think>Just content here.",
|
||||
expectedThinking: "",
|
||||
},
|
||||
{
|
||||
name: "thinking_disabled_with_think_tags",
|
||||
input: "Content with </think> tags should be treated as content.",
|
||||
expectedContent: "Content with </think> tags should be treated as content.",
|
||||
expectedThinking: "",
|
||||
lastMessage: &api.Message{
|
||||
Role: "assistant",
|
||||
Content: "existing", // Forces non-thinking mode
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Use thinking-enabled parser for tests that expect thinking
|
||||
hasThinking := tt.expectedThinking != ""
|
||||
parser := &CogitoParser{} // it has thinking support
|
||||
parser.Init(tt.tools, tt.lastMessage, &api.ThinkValue{Value: hasThinking}) // but we should set it with the request that the user wants
|
||||
|
||||
content, thinking, toolCalls, err := parser.Add(tt.input, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Add() error = %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.expectedContent, content); diff != "" {
|
||||
t.Errorf("content mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.expectedThinking, thinking); diff != "" {
|
||||
t.Errorf("thinking mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.expectedToolCalls, toolCalls); diff != "" {
|
||||
t.Errorf("tool calls mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCogitoParser_Streaming(t *testing.T) {
|
||||
parser := &CogitoParser{}
|
||||
parser.Init(nil, nil, &api.ThinkValue{Value: true})
|
||||
|
||||
chunks := []string{
|
||||
"This is ",
|
||||
"thinking content",
|
||||
".</think>This is ",
|
||||
"content.<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>test_tool\n```json\n{\"arg\":\"value\"}\n```<|tool▁call▁end|><|tool▁calls▁end|>",
|
||||
}
|
||||
|
||||
var finalContent, finalThinking strings.Builder
|
||||
var finalToolCalls []api.ToolCall
|
||||
|
||||
for i, chunk := range chunks {
|
||||
done := i == len(chunks)-1
|
||||
content, thinking, toolCalls, err := parser.Add(chunk, done)
|
||||
if err != nil {
|
||||
t.Fatalf("Add() error on chunk %d: %v", i, err)
|
||||
}
|
||||
|
||||
finalContent.WriteString(content)
|
||||
finalThinking.WriteString(thinking)
|
||||
finalToolCalls = append(finalToolCalls, toolCalls...)
|
||||
}
|
||||
|
||||
expectedContent := "This is content."
|
||||
expectedThinking := "This is thinking content."
|
||||
expectedToolCalls := []api.ToolCall{
|
||||
{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "test_tool",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"arg": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if finalContent.String() != expectedContent {
|
||||
t.Errorf("expected content %q, got %q", expectedContent, finalContent.String())
|
||||
}
|
||||
|
||||
if finalThinking.String() != expectedThinking {
|
||||
t.Errorf("expected thinking %q, got %q", expectedThinking, finalThinking.String())
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(expectedToolCalls, finalToolCalls); diff != "" {
|
||||
t.Errorf("tool calls mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCogitoParser_StreamingEdgeCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
chunks []string
|
||||
expectedContent string
|
||||
expectedThinking string
|
||||
expectedToolCalls []api.ToolCall
|
||||
hasThinkingSupport bool
|
||||
}{
|
||||
{
|
||||
name: "split_thinking_tag",
|
||||
chunks: []string{
|
||||
"This is thinking content</thi",
|
||||
"nk>This is content.",
|
||||
},
|
||||
expectedContent: "This is content.",
|
||||
expectedThinking: "This is thinking content",
|
||||
hasThinkingSupport: true,
|
||||
},
|
||||
{
|
||||
name: "split_tool_calls_begin_tag_conservative_parsing",
|
||||
chunks: []string{
|
||||
"Content before<|tool▁calls▁beg",
|
||||
"in|><|tool▁call▁begin|>function<|tool▁sep|>test\n```json\n{}\n```<|tool▁call▁end|><|tool▁calls▁end|>",
|
||||
},
|
||||
// Parser is conservative - treats incomplete tags as content
|
||||
expectedContent: "Content before<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>test\n```json\n{}\n```<|tool▁call▁end|><|tool▁calls▁end|>",
|
||||
expectedToolCalls: nil,
|
||||
hasThinkingSupport: false,
|
||||
},
|
||||
{
|
||||
name: "thinking_disabled_with_split_tags",
|
||||
chunks: []string{
|
||||
"Content with </thi",
|
||||
"nk> should be treated as content.",
|
||||
},
|
||||
expectedContent: "Content with </think> should be treated as content.",
|
||||
expectedThinking: "",
|
||||
hasThinkingSupport: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
parser := &CogitoParser{}
|
||||
parser.Init(nil, nil, &api.ThinkValue{Value: tt.hasThinkingSupport})
|
||||
|
||||
var finalContent, finalThinking strings.Builder
|
||||
var finalToolCalls []api.ToolCall
|
||||
|
||||
for i, chunk := range tt.chunks {
|
||||
done := i == len(tt.chunks)-1
|
||||
content, thinking, toolCalls, err := parser.Add(chunk, done)
|
||||
if err != nil {
|
||||
t.Fatalf("Add() error on chunk %d: %v", i, err)
|
||||
}
|
||||
|
||||
finalContent.WriteString(content)
|
||||
finalThinking.WriteString(thinking)
|
||||
finalToolCalls = append(finalToolCalls, toolCalls...)
|
||||
}
|
||||
|
||||
if finalContent.String() != tt.expectedContent {
|
||||
t.Errorf("expected content %q, got %q", tt.expectedContent, finalContent.String())
|
||||
}
|
||||
|
||||
if finalThinking.String() != tt.expectedThinking {
|
||||
t.Errorf("expected thinking %q, got %q", tt.expectedThinking, finalThinking.String())
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.expectedToolCalls, finalToolCalls); diff != "" {
|
||||
t.Errorf("tool calls mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCogitoParser_HasToolSupport(t *testing.T) {
|
||||
parser := &CogitoParser{}
|
||||
if !parser.HasToolSupport() {
|
||||
t.Error("CogitoParser should support tools")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCogitoParser_Init(t *testing.T) {
|
||||
parser := &CogitoParser{}
|
||||
|
||||
tools := []api.Tool{
|
||||
{Function: api.ToolFunction{Name: "test_tool"}},
|
||||
}
|
||||
|
||||
lastMessage := &api.Message{Role: "assistant", Content: "previous"}
|
||||
|
||||
returnedTools := parser.Init(tools, lastMessage, nil)
|
||||
|
||||
if len(returnedTools) != len(tools) {
|
||||
t.Errorf("expected %d tools returned, got %d", len(tools), len(returnedTools))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCogitoParser_parseToolCallContent(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
content string
|
||||
expected api.ToolCall
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid_tool_call_standard_format",
|
||||
content: `function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid_tool_call_complex_args",
|
||||
content: `function<|tool▁sep|>process_data
|
||||
` + "```json\n" + `{"items":["item1","item2"],"config":{"enabled":true},"count":42}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "process_data",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"items": []any{"item1", "item2"},
|
||||
"config": map[string]any{"enabled": true},
|
||||
"count": 42.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid_tool_call_empty_args",
|
||||
content: `function<|tool▁sep|>no_args_tool
|
||||
` + "```json\n" + `{}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "no_args_tool",
|
||||
Arguments: api.ToolCallFunctionArguments{},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "missing_separator",
|
||||
content: `functionget_weather` + "```json\n" + `{"location":"Paris"}` + "\n```",
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid_function_type",
|
||||
content: `not_function<|tool▁sep|>get_weather` + "```json\n" + `{"location":"Paris"}` + "\n```",
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "missing_json_block_start",
|
||||
content: `function<|tool▁sep|>get_weather{"location":"Paris"}` + "```",
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "missing_json_block_end",
|
||||
content: `function<|tool▁sep|>get_weather` + "```json\n" + `{"location":"Paris"}`,
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid_json",
|
||||
content: `function<|tool▁sep|>get_weather` + "```json\n" + `{location:Paris}` + "\n```",
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "empty_function_type",
|
||||
content: `<|tool▁sep|>get_weather` + "```json\n" + `{"location":"Paris"}` + "\n```",
|
||||
expected: api.ToolCall{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "tool_with_spaces_in_name",
|
||||
content: `function<|tool▁sep|> get_weather
|
||||
` + "```json\n" + `{"location":"Paris"}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "tool_with_multiline_json",
|
||||
content: `function<|tool▁sep|>get_weather
|
||||
` + "```json\n" + `{
|
||||
"location": "Paris",
|
||||
"units": "metric"
|
||||
}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "get_weather",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"location": "Paris",
|
||||
"units": "metric",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "tool_with_nested_objects",
|
||||
content: `function<|tool▁sep|>complex_tool
|
||||
` + "```json\n" + `{"nested":{"deep":{"value":123}}}
|
||||
` + "```",
|
||||
expected: api.ToolCall{
|
||||
Function: api.ToolCallFunction{
|
||||
Name: "complex_tool",
|
||||
Arguments: api.ToolCallFunctionArguments{
|
||||
"nested": map[string]any{
|
||||
"deep": map[string]any{
|
||||
"value": 123.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
parser := &CogitoParser{}
|
||||
|
||||
result, err := parser.parseToolCallContent(tt.content)
|
||||
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.expected, result); diff != "" {
|
||||
t.Errorf("tool call mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -6,9 +6,9 @@ import (
|
||||
)
|
||||
|
||||
type Parser interface {
|
||||
// Init initializes the parser with tools and optional last message for chat prefill
|
||||
// Init initializes the parser with tools, optional last message for chat prefill, and think value
|
||||
// Returns processed tools if the parser needs to modify them (e.g., harmony renames them)
|
||||
Init(tools []api.Tool, lastMessage *api.Message) []api.Tool
|
||||
Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool
|
||||
// Add processes streamed content and returns parsed content, thinking, and tool calls
|
||||
// The done flag indicates if this is the last chunk (used for draining accumulators)
|
||||
Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error)
|
||||
@@ -52,6 +52,8 @@ func ParserForName(name string) Parser {
|
||||
return &PassthroughParser{}
|
||||
case "harmony":
|
||||
return harmony.NewHarmonyMessageHandler()
|
||||
case "cogito":
|
||||
return &CogitoParser{}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -59,7 +61,7 @@ func ParserForName(name string) Parser {
|
||||
|
||||
type PassthroughParser struct{}
|
||||
|
||||
func (p *PassthroughParser) Init(tools []api.Tool, lastMessage *api.Message) []api.Tool {
|
||||
func (p *PassthroughParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
return tools // passthrough doesn't modify tools
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ type mockParser struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (m *mockParser) Init(tools []api.Tool, lastMessage *api.Message) []api.Tool {
|
||||
func (m *mockParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
return tools
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ func (p *Qwen3CoderParser) HasThinkingSupport() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Qwen3CoderParser) Init(tools []api.Tool, lastMessage *api.Message) []api.Tool {
|
||||
func (p *Qwen3CoderParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
p.tools = tools
|
||||
return tools // Qwen doesn't modify tools
|
||||
}
|
||||
@@ -432,7 +432,7 @@ func transformToXML(raw string) string {
|
||||
groups := qwenTagRegex.FindStringSubmatch(match)
|
||||
tag := groups[1]
|
||||
var escapedValue strings.Builder
|
||||
xml.EscapeText(&escapedValue, []byte(groups[2]))
|
||||
_ = xml.EscapeText(&escapedValue, []byte(groups[2])) // error is always nil for strings.Builder
|
||||
return fmt.Sprintf(`<%s name="%s">`, tag, escapedValue.String())
|
||||
})
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ func (p *Qwen3VLParser) setInitialState(lastMessage *api.Message) {
|
||||
p.state = CollectingThinkingContent
|
||||
}
|
||||
|
||||
func (p *Qwen3VLParser) Init(tools []api.Tool, lastMessage *api.Message) []api.Tool {
|
||||
func (p *Qwen3VLParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool {
|
||||
p.tools = tools
|
||||
p.setInitialState(lastMessage)
|
||||
return tools
|
||||
|
||||
@@ -198,7 +198,7 @@ func TestQwen3VLNonThinkingParserStreaming(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: false}
|
||||
parser.Init([]api.Tool{}, nil)
|
||||
parser.Init([]api.Tool{}, nil, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
@@ -515,7 +515,7 @@ func TestQwenOldParserStreaming(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: false}
|
||||
parser.Init([]api.Tool{}, nil)
|
||||
parser.Init([]api.Tool{}, nil, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
@@ -822,7 +822,7 @@ func TestQwen3VLNonThinkingToolCallWhitespaceHandling(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: false}
|
||||
parser.Init([]api.Tool{}, nil)
|
||||
parser.Init([]api.Tool{}, nil, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
|
||||
@@ -205,7 +205,7 @@ func TestQwen3VLThinkingParserStreaming(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, nil)
|
||||
parser.Init([]api.Tool{}, nil, nil)
|
||||
// parser.state = CollectingThinkingContent
|
||||
|
||||
for i, step := range tc.steps {
|
||||
@@ -386,7 +386,7 @@ func TestQwen3VLParserState(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: tc.hasThinking}
|
||||
parser.Init(nil, tc.last)
|
||||
parser.Init(nil, tc.last, nil)
|
||||
if parser.state != tc.wantState {
|
||||
t.Errorf("%s: got state %v, want %v", tc.desc, parser.state, tc.wantState)
|
||||
}
|
||||
@@ -437,7 +437,7 @@ func TestQwen3VLThinkingParserWithThinkingPrefill(t *testing.T) {
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, last)
|
||||
parser.Init([]api.Tool{}, last, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
@@ -500,7 +500,7 @@ func TestQwen3VLThinkingParserWithNonThinkingPrefill(t *testing.T) {
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, last)
|
||||
parser.Init([]api.Tool{}, last, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
@@ -523,7 +523,7 @@ func TestQwen3VLThinkingParserStreamingAssistantPrefillContent(t *testing.T) {
|
||||
// last message is assistant with content ⇒ start in CollectingContent
|
||||
last := &api.Message{Role: "assistant", Content: "has content"}
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, last)
|
||||
parser.Init([]api.Tool{}, last, nil)
|
||||
|
||||
type step struct {
|
||||
input string
|
||||
@@ -750,7 +750,7 @@ func TestQwen3VLThinkingWhitespaceHandling(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, nil)
|
||||
parser.Init([]api.Tool{}, nil, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
@@ -859,7 +859,7 @@ func TestQwen3VLToolCallWhitespaceHandling(t *testing.T) {
|
||||
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
parser := Qwen3VLParser{hasThinkingSupport: true}
|
||||
parser.Init([]api.Tool{}, tc.prefillMsg)
|
||||
parser.Init([]api.Tool{}, tc.prefillMsg, nil)
|
||||
|
||||
for i, step := range tc.steps {
|
||||
parser.buffer.WriteString(step.input)
|
||||
|
||||
@@ -340,7 +340,7 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
builtinParser = parsers.ParserForName(m.Config.Parser)
|
||||
if builtinParser != nil {
|
||||
// no tools or last message for generate endpoint
|
||||
builtinParser.Init(nil, nil)
|
||||
builtinParser.Init(nil, nil, req.Think)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2051,7 +2051,7 @@ func (s *Server) ChatHandler(c *gin.Context) {
|
||||
lastMessage = &msgs[len(msgs)-1]
|
||||
}
|
||||
// Initialize parser and get processed tools
|
||||
processedTools = builtinParser.Init(req.Tools, lastMessage)
|
||||
processedTools = builtinParser.Init(req.Tools, lastMessage, req.Think)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user