From bb93e5afe7f0581742c995a465f93f1742846180 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 18 Nov 2025 14:47:45 -0800 Subject: [PATCH] errorlint --- api/client_test.go | 3 ++- app/server/server_unix.go | 4 ++-- app/store/database.go | 8 +++++--- app/updater/updater.go | 4 ++-- app/updater/updater_darwin.go | 2 +- cmd/cmd_test.go | 5 +++-- llm/server.go | 12 ++++++------ llm/server_test.go | 2 +- model/models/qwen25vl/process_image.go | 2 +- model/models/qwen3vl/imageprocessor.go | 2 +- model/sentencepiece.go | 2 +- parser/parser_test.go | 2 +- runner/llamarunner/image_test.go | 3 ++- runner/ollamarunner/runner.go | 2 +- server/auth.go | 2 +- server/create.go | 6 +++--- server/images.go | 4 ++-- server/internal/client/ollama/registry.go | 4 ++-- server/prompt_test.go | 3 ++- server/quantization.go | 2 +- server/sched.go | 2 +- 21 files changed, 41 insertions(+), 35 deletions(-) diff --git a/api/client_test.go b/api/client_test.go index f0034e02d..b2a7a3d4f 100644 --- a/api/client_test.go +++ b/api/client_test.go @@ -2,6 +2,7 @@ package api import ( "encoding/json" + "errors" "fmt" "net/http" "net/http/httptest" @@ -39,7 +40,7 @@ func TestClientFromEnvironment(t *testing.T) { t.Setenv("OLLAMA_HOST", v.value) client, err := ClientFromEnvironment() - if err != v.err { + if !errors.Is(err, v.err) { t.Fatalf("expected %s, got %s", v.err, err) } diff --git a/app/server/server_unix.go b/app/server/server_unix.go index 8d365c1e5..9696069b4 100644 --- a/app/server/server_unix.go +++ b/app/server/server_unix.go @@ -31,7 +31,7 @@ func terminate(proc *os.Process) error { func terminated(pid int) (bool, error) { proc, err := os.FindProcess(pid) if err != nil { - return false, fmt.Errorf("failed to find process: %v", err) + return false, fmt.Errorf("failed to find process: %w", err) } err = proc.Signal(syscall.Signal(0)) @@ -40,7 +40,7 @@ func terminated(pid int) (bool, error) { return true, nil } - return false, fmt.Errorf("error signaling process: %v", err) + return false, fmt.Errorf("error signaling process: %w", err) } return false, nil diff --git a/app/store/database.go b/app/store/database.go index feb25c092..90d97a292 100644 --- a/app/store/database.go +++ b/app/store/database.go @@ -483,7 +483,8 @@ func (db *database) cleanupOrphanedData() error { } func duplicateColumnError(err error) bool { - if sqlite3Err, ok := err.(sqlite3.Error); ok { + var sqlite3Err sqlite3.Error + if errors.As(err, &sqlite3Err) { return sqlite3Err.Code == sqlite3.ErrError && strings.Contains(sqlite3Err.Error(), "duplicate column name") } @@ -491,7 +492,8 @@ func duplicateColumnError(err error) bool { } func columnNotExists(err error) bool { - if sqlite3Err, ok := err.(sqlite3.Error); ok { + var sqlite3Err sqlite3.Error + if errors.As(err, &sqlite3Err) { return sqlite3Err.Code == sqlite3.ErrError && strings.Contains(sqlite3Err.Error(), "no such column") } @@ -587,7 +589,7 @@ func (db *database) getChatWithOptions(id string, loadAttachmentData bool) (*Cha &browserState, ) if err != nil { - if err == sql.ErrNoRows { + if errors.Is(err, sql.ErrNoRows) { return nil, errors.New("chat not found") } return nil, fmt.Errorf("query chat: %w", err) diff --git a/app/updater/updater.go b/app/updater/updater.go index 473ecf466..161428403 100644 --- a/app/updater/updater.go +++ b/app/updater/updater.go @@ -198,7 +198,7 @@ func (u *Updater) DownloadNewRelease(ctx context.Context, updateResp UpdateRespo _, err = os.Stat(filepath.Dir(stageFilename)) if errors.Is(err, os.ErrNotExist) { if err := os.MkdirAll(filepath.Dir(stageFilename), 0o755); err != nil { - return fmt.Errorf("create ollama dir %s: %v", filepath.Dir(stageFilename), err) + return fmt.Errorf("create ollama dir %s: %w", filepath.Dir(stageFilename), err) } } @@ -218,7 +218,7 @@ func (u *Updater) DownloadNewRelease(ctx context.Context, updateResp UpdateRespo if err := VerifyDownload(); err != nil { _ = os.Remove(stageFilename) - return fmt.Errorf("%s - %s", resp.Request.URL.String(), err) + return fmt.Errorf("%s - %w", resp.Request.URL.String(), err) } UpdateDownloaded = true return nil diff --git a/app/updater/updater_darwin.go b/app/updater/updater_darwin.go index 7f7bd8864..7172dee63 100644 --- a/app/updater/updater_darwin.go +++ b/app/updater/updater_darwin.go @@ -338,7 +338,7 @@ func verifyDownload() error { } if err := verifyExtractedBundle(filepath.Join(dir, "Ollama.app")); err != nil { - return fmt.Errorf("signature verification failed: %s", err) + return fmt.Errorf("signature verification failed: %w", err) } return nil } diff --git a/cmd/cmd_test.go b/cmd/cmd_test.go index 60cd938b6..a63542e69 100644 --- a/cmd/cmd_test.go +++ b/cmd/cmd_test.go @@ -3,6 +3,7 @@ package cmd import ( "bytes" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -761,8 +762,8 @@ func TestGetModelfileName(t *testing.T) { t.Errorf("expected filename: '%s' actual filename: '%s'", expectedFilename, actualFilename) } - if tt.expectedErr != os.ErrNotExist { - if actualErr != tt.expectedErr { + if !errors.Is(tt.expectedErr, os.ErrNotExist) { + if !errors.Is(actualErr, tt.expectedErr) { t.Errorf("expected err: %v actual err: %v", tt.expectedErr, actualErr) } } else { diff --git a/llm/server.go b/llm/server.go index 0d5fc994b..368a60036 100644 --- a/llm/server.go +++ b/llm/server.go @@ -250,7 +250,7 @@ func NewLlamaServer(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, modelPath st if s.status != nil && s.status.LastErrMsg != "" { msg = s.status.LastErrMsg } - err := fmt.Errorf("error starting runner: %v %s", err, msg) + err := fmt.Errorf("error starting runner: %w %s", err, msg) if llamaModel != nil { llama.FreeModel(llamaModel) } @@ -1209,7 +1209,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil) if err != nil { - return ServerStatusError, fmt.Errorf("error creating GET request: %v", err) + return ServerStatusError, fmt.Errorf("error creating GET request: %w", err) } req.Header.Set("Content-Type", "application/json") @@ -1512,13 +1512,13 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu enc.SetEscapeHTML(false) if err := enc.Encode(req); err != nil { - return fmt.Errorf("failed to marshal data: %v", err) + return fmt.Errorf("failed to marshal data: %w", err) } endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port) serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer) if err != nil { - return fmt.Errorf("error creating POST request: %v", err) + return fmt.Errorf("error creating POST request: %w", err) } serverReq.Header.Set("Content-Type", "application/json") @@ -1567,7 +1567,7 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu var c CompletionResponse if err := json.Unmarshal(evt, &c); err != nil { - return fmt.Errorf("error unmarshalling llm prediction response: %v", err) + return fmt.Errorf("error unmarshalling llm prediction response: %w", err) } switch { case strings.TrimSpace(c.Content) == lastToken: @@ -1609,7 +1609,7 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu return fmt.Errorf("an error was encountered while running the model: %s", msg) } - return fmt.Errorf("error reading llm response: %v", err) + return fmt.Errorf("error reading llm response: %w", err) } return nil diff --git a/llm/server_test.go b/llm/server_test.go index 1f5d5cda3..75afc79b8 100644 --- a/llm/server_test.go +++ b/llm/server_test.go @@ -209,7 +209,7 @@ func TestLLMServerFitGPU(t *testing.T) { } gpuLayers, err := s.createLayout(systemInfo, tt.gpus, s.mem, tt.requireFull, 0) - if err != tt.expectedErr { + if !errors.Is(err, tt.expectedErr) { t.Fatalf("fitGPU returned error: %v", err) } if gpuLayers.Hash() != tt.expected.Hash() { diff --git a/model/models/qwen25vl/process_image.go b/model/models/qwen25vl/process_image.go index ce5ded295..e8aab2176 100644 --- a/model/models/qwen25vl/process_image.go +++ b/model/models/qwen25vl/process_image.go @@ -107,7 +107,7 @@ func (p *ImageProcessor) ProcessImage(img image.Image) ([]float32, *Grid, error) patches, err := p.createPatches(normalizedPixels, resizedHeight, resizedWidth, grid) if err != nil { - return nil, nil, fmt.Errorf("failed to create patches: %v", err) + return nil, nil, fmt.Errorf("failed to create patches: %w", err) } // Return patches and grid dimensions diff --git a/model/models/qwen3vl/imageprocessor.go b/model/models/qwen3vl/imageprocessor.go index 2453a87da..4487be251 100644 --- a/model/models/qwen3vl/imageprocessor.go +++ b/model/models/qwen3vl/imageprocessor.go @@ -111,7 +111,7 @@ func (p *ImageProcessor) ProcessImage(ctx ml.Context, img image.Image) (ml.Tenso patches, err := p.createPatches(normalizedPixels, resizedHeight, resizedWidth, grid) if err != nil { - return nil, nil, fmt.Errorf("failed to create patches: %v", err) + return nil, nil, fmt.Errorf("failed to create patches: %w", err) } patchDim := p.numChannels * p.temporalPatchSize * diff --git a/model/sentencepiece.go b/model/sentencepiece.go index 4a1c3125c..e45b4840e 100644 --- a/model/sentencepiece.go +++ b/model/sentencepiece.go @@ -231,7 +231,7 @@ func (spm SentencePiece) Decode(ids []int32) (string, error) { if len(data) == 6 && strings.HasPrefix(data, "<0x") && strings.HasSuffix(data, ">") { byteVal, err := strconv.ParseUint(data[1:5], 0, 8) if err != nil { - return "", fmt.Errorf("failed to parse hex byte: %v", err) + return "", fmt.Errorf("failed to parse hex byte: %w", err) } if err := sb.WriteByte(byte(byteVal)); err != nil { diff --git a/parser/parser_test.go b/parser/parser_test.go index 478e56aae..67441f663 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -1083,7 +1083,7 @@ func TestFilesForModel(t *testing.T) { if err == nil { t.Error("Expected error, but got none") } - if tt.expectErrType != nil && err != tt.expectErrType { + if tt.expectErrType != nil && !errors.Is(err, tt.expectErrType) { t.Errorf("Expected error type %v, got %v", tt.expectErrType, err) } return diff --git a/runner/llamarunner/image_test.go b/runner/llamarunner/image_test.go index f7d98a472..73d3e4879 100644 --- a/runner/llamarunner/image_test.go +++ b/runner/llamarunner/image_test.go @@ -1,6 +1,7 @@ package llamarunner import ( + "errors" "reflect" "testing" @@ -18,7 +19,7 @@ func TestImageCache(t *testing.T) { // Empty cache result, err := cache.findImage(0x5adb61d31933a946) - if err != errImageNotFound { + if !errors.Is(err, errImageNotFound) { t.Errorf("found result in empty cache: result %v, err %v", result, err) } diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index e2a9c5487..42164f3a6 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -1239,7 +1239,7 @@ func (s *Server) loadModel() { s.progress = progress }) if err != nil { - panic(fmt.Errorf("failed to load model: %v", err)) + panic(fmt.Errorf("failed to load model: %w", err)) } s.status = llm.ServerStatusReady diff --git a/server/auth.go b/server/auth.go index 51d370477..24b0171eb 100644 --- a/server/auth.go +++ b/server/auth.go @@ -75,7 +75,7 @@ func getAuthorizationToken(ctx context.Context, challenge registryChallenge) (st body, err := io.ReadAll(response.Body) if err != nil { - return "", fmt.Errorf("%d: %v", response.StatusCode, err) + return "", fmt.Errorf("%d: %w", response.StatusCode, err) } if response.StatusCode >= http.StatusBadRequest { diff --git a/server/create.go b/server/create.go index b5464e0af..129f61289 100644 --- a/server/create.go +++ b/server/create.go @@ -386,7 +386,7 @@ func convertFromSafetensors(files map[string]string, baseLayers []*layerGGML, is } if _, err := root.Stat(fp); err != nil && !errors.Is(err, fs.ErrNotExist) { // Path is likely outside the root - return nil, fmt.Errorf("%w: %s: %s", errFilePath, err, fp) + return nil, fmt.Errorf("%w: %w: %s", errFilePath, err, fp) } blobPath, err := GetBlobsPath(digest) @@ -678,10 +678,10 @@ func removeLayer(layers []Layer, mediatype string) []Layer { func setTemplate(layers []Layer, t string) ([]Layer, error) { layers = removeLayer(layers, "application/vnd.ollama.image.template") if _, err := template.Parse(t); err != nil { - return nil, fmt.Errorf("%w: %s", errBadTemplate, err) + return nil, fmt.Errorf("%w: %w", errBadTemplate, err) } if _, err := template.Parse(t); err != nil { - return nil, fmt.Errorf("%w: %s", errBadTemplate, err) + return nil, fmt.Errorf("%w: %w", errBadTemplate, err) } blob := strings.NewReader(t) diff --git a/server/images.go b/server/images.go index d3bd9ffaf..705293253 100644 --- a/server/images.go +++ b/server/images.go @@ -640,7 +640,7 @@ func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn fu manifest, err = pullModelManifest(ctx, mp, regOpts) if err != nil { - return fmt.Errorf("pull model manifest: %s", err) + return fmt.Errorf("pull model manifest: %w", err) } var layers []Layer @@ -786,7 +786,7 @@ func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.UR defer resp.Body.Close() responseBody, err := io.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("%d: %s", resp.StatusCode, err) + return nil, fmt.Errorf("%d: %w", resp.StatusCode, err) } return nil, fmt.Errorf("%d: %s", resp.StatusCode, responseBody) default: diff --git a/server/internal/client/ollama/registry.go b/server/internal/client/ollama/registry.go index eae130bf4..76a8f6457 100644 --- a/server/internal/client/ollama/registry.go +++ b/server/internal/client/ollama/registry.go @@ -1184,11 +1184,11 @@ func parseChunk[S ~string | ~[]byte](s S) (blob.Chunk, error) { } start, err := strconv.ParseInt(startPart, 10, 64) if err != nil { - return blob.Chunk{}, fmt.Errorf("chunks: invalid start to %q: %v", s, err) + return blob.Chunk{}, fmt.Errorf("chunks: invalid start to %q: %w", s, err) } end, err := strconv.ParseInt(endPart, 10, 64) if err != nil { - return blob.Chunk{}, fmt.Errorf("chunks: invalid end to %q: %v", s, err) + return blob.Chunk{}, fmt.Errorf("chunks: invalid end to %q: %w", s, err) } if start > end { return blob.Chunk{}, fmt.Errorf("chunks: invalid range %q: start > end", s) diff --git a/server/prompt_test.go b/server/prompt_test.go index 3bd621152..ca5b94a7b 100644 --- a/server/prompt_test.go +++ b/server/prompt_test.go @@ -2,6 +2,7 @@ package server import ( "bytes" + "errors" "testing" "github.com/google/go-cmp/cmp" @@ -238,7 +239,7 @@ func TestChatPrompt(t *testing.T) { prompt, images, err := chatPrompt(t.Context(), &model, mockRunner{}.Tokenize, &opts, tt.msgs, nil, &api.ThinkValue{Value: think}, tt.truncate) if tt.error == nil && err != nil { t.Fatal(err) - } else if tt.error != nil && err != tt.error { + } else if tt.error != nil && !errors.Is(err, tt.error) { t.Fatalf("expected err '%q', got '%q'", tt.error, err) } diff --git a/server/quantization.go b/server/quantization.go index b15451d7e..df9478731 100644 --- a/server/quantization.go +++ b/server/quantization.go @@ -31,7 +31,7 @@ func (q quantizer) WriteTo(w io.Writer) (int64, error) { data, err := io.ReadAll(sr) if err != nil { slog.Warn("file read error", "tensor", q.from.Name, "file", q.Name(), "error", err) - return 0, fmt.Errorf("unable to read tensor %s from %s: %s", q.from.Name, q.Name(), err) + return 0, fmt.Errorf("unable to read tensor %s from %s: %w", q.from.Name, q.Name(), err) } var f32s []float32 newType := fsggml.TensorType(q.to.Kind) diff --git a/server/sched.go b/server/sched.go index 68eef162f..778321a70 100644 --- a/server/sched.go +++ b/server/sched.go @@ -420,7 +420,7 @@ func (s *Scheduler) load(req *LlmRequest, f *ggml.GGML, systemInfo ml.SystemInfo // show a generalized compatibility error until there is a better way to // check for model compatibility if errors.Is(err, ggml.ErrUnsupportedFormat) || strings.Contains(err.Error(), "failed to load model") { - err = fmt.Errorf("%v: this model may be incompatible with your version of Ollama. If you previously pulled this model, try updating it by running `ollama pull %s`", err, req.model.ShortName) + err = fmt.Errorf("%w: this model may be incompatible with your version of Ollama. If you previously pulled this model, try updating it by running `ollama pull %s`", err, req.model.ShortName) } slog.Info("NewLlamaServer failed", "model", req.model.ModelPath, "error", err) req.errCh <- err