better example module, add port

This commit is contained in:
jmorganca
2024-05-25 20:11:57 -07:00
parent ec17359a68
commit d12db0568e
8 changed files with 179 additions and 149 deletions

31
llama/example/README.md Normal file
View File

@@ -0,0 +1,31 @@
# `example`
Demo app for the `llama` package
Pull a model:
```
ollama pull mistral:7b-instruct-v0.3-q4_0
```
Then run it:
```
go run -x . \
-model ~/.ollama/models/blobs/sha256-ff82381e2bea77d91c1b824c7afb83f6fb73e9f7de9dda631bcdbca564aa5435 \
-prompt "[ISNT] Why is the sky blue? [/INST]"
```
## Vision
```
ollama pull llava:7b-v1.6-mistral-q4_0
```
```
go run -x . \
-model ~/.ollama/models/blobs/sha256-170370233dd5c5415250a2ecd5c71586352850729062ccef1496385647293868 \
-projector ~/.ollama/models/blobs/sha256-72d6f08a42f656d36b356dbe0920675899a99ce21192fd66266fb7d82ed07539 \
-image ./alonso.jpg \
-prompt "[ISNT] What is in this image? <image> [/INST]"
```

BIN
llama/example/alonso.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

128
llama/example/main.go Normal file
View File

@@ -0,0 +1,128 @@
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"strings"
"github.com/ollama/ollama/llama"
)
func main() {
mpath := flag.String("model", "", "Path to model binary file")
ppath := flag.String("projector", "", "Path to projector binary file")
image := flag.String("image", "", "Path to image file")
prompt := flag.String("prompt", "", "Prompt including <image> tag")
flag.Parse()
if *mpath == "" {
panic("model path is required")
}
if *prompt == "" {
panic("prompt is required")
}
// load the model
llama.BackendInit()
params := llama.NewModelParams()
model := llama.LoadModelFromFile(*mpath, params)
ctxParams := llama.NewContextParams()
// language model context
lc := llama.NewContextWithModel(model, ctxParams)
// eval before
batch := llama.NewBatch(512, 0, 1)
var nPast int
// clip context
var clipCtx *llama.ClipContext
// multi-modal
if *ppath == "" {
clipCtx = llama.NewClipContext(*ppath)
// open image file
file, err := os.Open(*image)
if err != nil {
panic(err)
}
defer file.Close()
data, err := io.ReadAll(file)
if err != nil {
log.Fatal(err)
}
embedding := llama.NewLlavaImageEmbed(clipCtx, data)
parts := strings.Split(*prompt, "<image>")
if len(parts) != 2 {
panic("prompt must contain exactly one <image>")
}
beforeTokens, err := lc.Model().Tokenize(parts[0], 2048, true, true)
if err != nil {
panic(err)
}
for _, t := range beforeTokens {
batch.Add(t, nPast, []int{0}, true)
nPast++
}
err = lc.Decode(batch)
if err != nil {
panic(err)
}
llama.LlavaEvalImageEmbed(lc, embedding, 512, &nPast)
afterTokens, err := lc.Model().Tokenize(parts[1], 2048, true, true)
if err != nil {
panic(err)
}
for _, t := range afterTokens {
batch.Add(t, nPast, []int{0}, true)
nPast++
}
} else {
tokens, err := lc.Model().Tokenize(*prompt, 2048, true, true)
if err != nil {
panic(err)
}
for _, t := range tokens {
batch.Add(t, nPast, []int{0}, true)
nPast++
}
}
// main loop
for n := nPast; n < 4096; n++ {
err := lc.Decode(batch)
if err != nil {
panic(err)
}
// sample a token
logits := lc.GetLogitsIth(batch.NumTokens() - 1)
token := lc.SampleTokenGreedy(logits)
// if it's an end of sequence token, break
if lc.Model().TokenIsEog(token) {
break
}
// print the token
str := lc.Model().TokenToPiece(token)
fmt.Print(str)
batch.Clear()
batch.Add(token, n, []int{0}, true)
}
}