errorlint
This commit is contained in:
parent
4d24d8a77d
commit
bb93e5afe7
|
|
@ -2,6 +2,7 @@ package api
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
|
@ -39,7 +40,7 @@ func TestClientFromEnvironment(t *testing.T) {
|
|||
t.Setenv("OLLAMA_HOST", v.value)
|
||||
|
||||
client, err := ClientFromEnvironment()
|
||||
if err != v.err {
|
||||
if !errors.Is(err, v.err) {
|
||||
t.Fatalf("expected %s, got %s", v.err, err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ func terminate(proc *os.Process) error {
|
|||
func terminated(pid int) (bool, error) {
|
||||
proc, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to find process: %v", err)
|
||||
return false, fmt.Errorf("failed to find process: %w", err)
|
||||
}
|
||||
|
||||
err = proc.Signal(syscall.Signal(0))
|
||||
|
|
@ -40,7 +40,7 @@ func terminated(pid int) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("error signaling process: %v", err)
|
||||
return false, fmt.Errorf("error signaling process: %w", err)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
|
|
|
|||
|
|
@ -483,7 +483,8 @@ func (db *database) cleanupOrphanedData() error {
|
|||
}
|
||||
|
||||
func duplicateColumnError(err error) bool {
|
||||
if sqlite3Err, ok := err.(sqlite3.Error); ok {
|
||||
var sqlite3Err sqlite3.Error
|
||||
if errors.As(err, &sqlite3Err) {
|
||||
return sqlite3Err.Code == sqlite3.ErrError &&
|
||||
strings.Contains(sqlite3Err.Error(), "duplicate column name")
|
||||
}
|
||||
|
|
@ -491,7 +492,8 @@ func duplicateColumnError(err error) bool {
|
|||
}
|
||||
|
||||
func columnNotExists(err error) bool {
|
||||
if sqlite3Err, ok := err.(sqlite3.Error); ok {
|
||||
var sqlite3Err sqlite3.Error
|
||||
if errors.As(err, &sqlite3Err) {
|
||||
return sqlite3Err.Code == sqlite3.ErrError &&
|
||||
strings.Contains(sqlite3Err.Error(), "no such column")
|
||||
}
|
||||
|
|
@ -587,7 +589,7 @@ func (db *database) getChatWithOptions(id string, loadAttachmentData bool) (*Cha
|
|||
&browserState,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, errors.New("chat not found")
|
||||
}
|
||||
return nil, fmt.Errorf("query chat: %w", err)
|
||||
|
|
|
|||
|
|
@ -198,7 +198,7 @@ func (u *Updater) DownloadNewRelease(ctx context.Context, updateResp UpdateRespo
|
|||
_, err = os.Stat(filepath.Dir(stageFilename))
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.MkdirAll(filepath.Dir(stageFilename), 0o755); err != nil {
|
||||
return fmt.Errorf("create ollama dir %s: %v", filepath.Dir(stageFilename), err)
|
||||
return fmt.Errorf("create ollama dir %s: %w", filepath.Dir(stageFilename), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -218,7 +218,7 @@ func (u *Updater) DownloadNewRelease(ctx context.Context, updateResp UpdateRespo
|
|||
|
||||
if err := VerifyDownload(); err != nil {
|
||||
_ = os.Remove(stageFilename)
|
||||
return fmt.Errorf("%s - %s", resp.Request.URL.String(), err)
|
||||
return fmt.Errorf("%s - %w", resp.Request.URL.String(), err)
|
||||
}
|
||||
UpdateDownloaded = true
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -338,7 +338,7 @@ func verifyDownload() error {
|
|||
}
|
||||
|
||||
if err := verifyExtractedBundle(filepath.Join(dir, "Ollama.app")); err != nil {
|
||||
return fmt.Errorf("signature verification failed: %s", err)
|
||||
return fmt.Errorf("signature verification failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package cmd
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
|
@ -761,8 +762,8 @@ func TestGetModelfileName(t *testing.T) {
|
|||
t.Errorf("expected filename: '%s' actual filename: '%s'", expectedFilename, actualFilename)
|
||||
}
|
||||
|
||||
if tt.expectedErr != os.ErrNotExist {
|
||||
if actualErr != tt.expectedErr {
|
||||
if !errors.Is(tt.expectedErr, os.ErrNotExist) {
|
||||
if !errors.Is(actualErr, tt.expectedErr) {
|
||||
t.Errorf("expected err: %v actual err: %v", tt.expectedErr, actualErr)
|
||||
}
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -250,7 +250,7 @@ func NewLlamaServer(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, modelPath st
|
|||
if s.status != nil && s.status.LastErrMsg != "" {
|
||||
msg = s.status.LastErrMsg
|
||||
}
|
||||
err := fmt.Errorf("error starting runner: %v %s", err, msg)
|
||||
err := fmt.Errorf("error starting runner: %w %s", err, msg)
|
||||
if llamaModel != nil {
|
||||
llama.FreeModel(llamaModel)
|
||||
}
|
||||
|
|
@ -1209,7 +1209,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
|
|||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
|
||||
if err != nil {
|
||||
return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
|
||||
return ServerStatusError, fmt.Errorf("error creating GET request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
|
|
@ -1512,13 +1512,13 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
|
|||
enc.SetEscapeHTML(false)
|
||||
|
||||
if err := enc.Encode(req); err != nil {
|
||||
return fmt.Errorf("failed to marshal data: %v", err)
|
||||
return fmt.Errorf("failed to marshal data: %w", err)
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
|
||||
serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating POST request: %v", err)
|
||||
return fmt.Errorf("error creating POST request: %w", err)
|
||||
}
|
||||
serverReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
|
|
@ -1567,7 +1567,7 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
|
|||
|
||||
var c CompletionResponse
|
||||
if err := json.Unmarshal(evt, &c); err != nil {
|
||||
return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
|
||||
return fmt.Errorf("error unmarshalling llm prediction response: %w", err)
|
||||
}
|
||||
switch {
|
||||
case strings.TrimSpace(c.Content) == lastToken:
|
||||
|
|
@ -1609,7 +1609,7 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
|
|||
return fmt.Errorf("an error was encountered while running the model: %s", msg)
|
||||
}
|
||||
|
||||
return fmt.Errorf("error reading llm response: %v", err)
|
||||
return fmt.Errorf("error reading llm response: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ func TestLLMServerFitGPU(t *testing.T) {
|
|||
}
|
||||
|
||||
gpuLayers, err := s.createLayout(systemInfo, tt.gpus, s.mem, tt.requireFull, 0)
|
||||
if err != tt.expectedErr {
|
||||
if !errors.Is(err, tt.expectedErr) {
|
||||
t.Fatalf("fitGPU returned error: %v", err)
|
||||
}
|
||||
if gpuLayers.Hash() != tt.expected.Hash() {
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ func (p *ImageProcessor) ProcessImage(img image.Image) ([]float32, *Grid, error)
|
|||
|
||||
patches, err := p.createPatches(normalizedPixels, resizedHeight, resizedWidth, grid)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create patches: %v", err)
|
||||
return nil, nil, fmt.Errorf("failed to create patches: %w", err)
|
||||
}
|
||||
|
||||
// Return patches and grid dimensions
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ func (p *ImageProcessor) ProcessImage(ctx ml.Context, img image.Image) (ml.Tenso
|
|||
|
||||
patches, err := p.createPatches(normalizedPixels, resizedHeight, resizedWidth, grid)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create patches: %v", err)
|
||||
return nil, nil, fmt.Errorf("failed to create patches: %w", err)
|
||||
}
|
||||
|
||||
patchDim := p.numChannels * p.temporalPatchSize *
|
||||
|
|
|
|||
|
|
@ -231,7 +231,7 @@ func (spm SentencePiece) Decode(ids []int32) (string, error) {
|
|||
if len(data) == 6 && strings.HasPrefix(data, "<0x") && strings.HasSuffix(data, ">") {
|
||||
byteVal, err := strconv.ParseUint(data[1:5], 0, 8)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse hex byte: %v", err)
|
||||
return "", fmt.Errorf("failed to parse hex byte: %w", err)
|
||||
}
|
||||
|
||||
if err := sb.WriteByte(byte(byteVal)); err != nil {
|
||||
|
|
|
|||
|
|
@ -1083,7 +1083,7 @@ func TestFilesForModel(t *testing.T) {
|
|||
if err == nil {
|
||||
t.Error("Expected error, but got none")
|
||||
}
|
||||
if tt.expectErrType != nil && err != tt.expectErrType {
|
||||
if tt.expectErrType != nil && !errors.Is(err, tt.expectErrType) {
|
||||
t.Errorf("Expected error type %v, got %v", tt.expectErrType, err)
|
||||
}
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package llamarunner
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
|
|
@ -18,7 +19,7 @@ func TestImageCache(t *testing.T) {
|
|||
|
||||
// Empty cache
|
||||
result, err := cache.findImage(0x5adb61d31933a946)
|
||||
if err != errImageNotFound {
|
||||
if !errors.Is(err, errImageNotFound) {
|
||||
t.Errorf("found result in empty cache: result %v, err %v", result, err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1239,7 +1239,7 @@ func (s *Server) loadModel() {
|
|||
s.progress = progress
|
||||
})
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to load model: %v", err))
|
||||
panic(fmt.Errorf("failed to load model: %w", err))
|
||||
}
|
||||
|
||||
s.status = llm.ServerStatusReady
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ func getAuthorizationToken(ctx context.Context, challenge registryChallenge) (st
|
|||
|
||||
body, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%d: %v", response.StatusCode, err)
|
||||
return "", fmt.Errorf("%d: %w", response.StatusCode, err)
|
||||
}
|
||||
|
||||
if response.StatusCode >= http.StatusBadRequest {
|
||||
|
|
|
|||
|
|
@ -386,7 +386,7 @@ func convertFromSafetensors(files map[string]string, baseLayers []*layerGGML, is
|
|||
}
|
||||
if _, err := root.Stat(fp); err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
// Path is likely outside the root
|
||||
return nil, fmt.Errorf("%w: %s: %s", errFilePath, err, fp)
|
||||
return nil, fmt.Errorf("%w: %w: %s", errFilePath, err, fp)
|
||||
}
|
||||
|
||||
blobPath, err := GetBlobsPath(digest)
|
||||
|
|
@ -678,10 +678,10 @@ func removeLayer(layers []Layer, mediatype string) []Layer {
|
|||
func setTemplate(layers []Layer, t string) ([]Layer, error) {
|
||||
layers = removeLayer(layers, "application/vnd.ollama.image.template")
|
||||
if _, err := template.Parse(t); err != nil {
|
||||
return nil, fmt.Errorf("%w: %s", errBadTemplate, err)
|
||||
return nil, fmt.Errorf("%w: %w", errBadTemplate, err)
|
||||
}
|
||||
if _, err := template.Parse(t); err != nil {
|
||||
return nil, fmt.Errorf("%w: %s", errBadTemplate, err)
|
||||
return nil, fmt.Errorf("%w: %w", errBadTemplate, err)
|
||||
}
|
||||
|
||||
blob := strings.NewReader(t)
|
||||
|
|
|
|||
|
|
@ -640,7 +640,7 @@ func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn fu
|
|||
|
||||
manifest, err = pullModelManifest(ctx, mp, regOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pull model manifest: %s", err)
|
||||
return fmt.Errorf("pull model manifest: %w", err)
|
||||
}
|
||||
|
||||
var layers []Layer
|
||||
|
|
@ -786,7 +786,7 @@ func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.UR
|
|||
defer resp.Body.Close()
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%d: %s", resp.StatusCode, err)
|
||||
return nil, fmt.Errorf("%d: %w", resp.StatusCode, err)
|
||||
}
|
||||
return nil, fmt.Errorf("%d: %s", resp.StatusCode, responseBody)
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -1184,11 +1184,11 @@ func parseChunk[S ~string | ~[]byte](s S) (blob.Chunk, error) {
|
|||
}
|
||||
start, err := strconv.ParseInt(startPart, 10, 64)
|
||||
if err != nil {
|
||||
return blob.Chunk{}, fmt.Errorf("chunks: invalid start to %q: %v", s, err)
|
||||
return blob.Chunk{}, fmt.Errorf("chunks: invalid start to %q: %w", s, err)
|
||||
}
|
||||
end, err := strconv.ParseInt(endPart, 10, 64)
|
||||
if err != nil {
|
||||
return blob.Chunk{}, fmt.Errorf("chunks: invalid end to %q: %v", s, err)
|
||||
return blob.Chunk{}, fmt.Errorf("chunks: invalid end to %q: %w", s, err)
|
||||
}
|
||||
if start > end {
|
||||
return blob.Chunk{}, fmt.Errorf("chunks: invalid range %q: start > end", s)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package server
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
|
@ -238,7 +239,7 @@ func TestChatPrompt(t *testing.T) {
|
|||
prompt, images, err := chatPrompt(t.Context(), &model, mockRunner{}.Tokenize, &opts, tt.msgs, nil, &api.ThinkValue{Value: think}, tt.truncate)
|
||||
if tt.error == nil && err != nil {
|
||||
t.Fatal(err)
|
||||
} else if tt.error != nil && err != tt.error {
|
||||
} else if tt.error != nil && !errors.Is(err, tt.error) {
|
||||
t.Fatalf("expected err '%q', got '%q'", tt.error, err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ func (q quantizer) WriteTo(w io.Writer) (int64, error) {
|
|||
data, err := io.ReadAll(sr)
|
||||
if err != nil {
|
||||
slog.Warn("file read error", "tensor", q.from.Name, "file", q.Name(), "error", err)
|
||||
return 0, fmt.Errorf("unable to read tensor %s from %s: %s", q.from.Name, q.Name(), err)
|
||||
return 0, fmt.Errorf("unable to read tensor %s from %s: %w", q.from.Name, q.Name(), err)
|
||||
}
|
||||
var f32s []float32
|
||||
newType := fsggml.TensorType(q.to.Kind)
|
||||
|
|
|
|||
|
|
@ -420,7 +420,7 @@ func (s *Scheduler) load(req *LlmRequest, f *ggml.GGML, systemInfo ml.SystemInfo
|
|||
// show a generalized compatibility error until there is a better way to
|
||||
// check for model compatibility
|
||||
if errors.Is(err, ggml.ErrUnsupportedFormat) || strings.Contains(err.Error(), "failed to load model") {
|
||||
err = fmt.Errorf("%v: this model may be incompatible with your version of Ollama. If you previously pulled this model, try updating it by running `ollama pull %s`", err, req.model.ShortName)
|
||||
err = fmt.Errorf("%w: this model may be incompatible with your version of Ollama. If you previously pulled this model, try updating it by running `ollama pull %s`", err, req.model.ShortName)
|
||||
}
|
||||
slog.Info("NewLlamaServer failed", "model", req.model.ModelPath, "error", err)
|
||||
req.errCh <- err
|
||||
|
|
|
|||
Loading…
Reference in New Issue