Compare commits
435 Commits
jmorganca/
...
jyan/forma
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5dc5a295bf | ||
|
|
e21e6b2a33 | ||
|
|
a240ea3367 | ||
|
|
d4a86102fd | ||
|
|
476fb8e892 | ||
|
|
829ff87bd1 | ||
|
|
f6b622c4b3 | ||
|
|
2e4da8eec2 | ||
|
|
763bb65dbb | ||
|
|
7ca9605f54 | ||
|
|
eb2c443a79 | ||
|
|
278e25ea44 | ||
|
|
a50a87a7b8 | ||
|
|
98085015d5 | ||
|
|
bf54c845e9 | ||
|
|
c365f195a8 | ||
|
|
e91d0ef737 | ||
|
|
22f5c12ced | ||
|
|
298c996e54 | ||
|
|
0fc0cfc6d2 | ||
|
|
914f68f021 | ||
|
|
bd1d119ba9 | ||
|
|
a03be18189 | ||
|
|
96bc232b43 | ||
|
|
bca7b12284 | ||
|
|
32cb1960c1 | ||
|
|
de781b37c8 | ||
|
|
3e21799377 | ||
|
|
26a00a0410 | ||
|
|
646371f56d | ||
|
|
1f5008544b | ||
|
|
45cbfc5aee | ||
|
|
6d423b383b | ||
|
|
ad897080a2 | ||
|
|
b7d316d98d | ||
|
|
d7339fad52 | ||
|
|
92c81e8117 | ||
|
|
9db0996ed4 | ||
|
|
6f43898b17 | ||
|
|
7487229c34 | ||
|
|
8a8e7afa96 | ||
|
|
c79f8c9c39 | ||
|
|
485016bfbb | ||
|
|
0165ba1651 | ||
|
|
c4209d6d21 | ||
|
|
6adca97f37 | ||
|
|
9a3c8003c8 | ||
|
|
d51f15257c | ||
|
|
8f440d579a | ||
|
|
4cc3be3035 | ||
|
|
db2ffa79f1 | ||
|
|
afd2b058b4 | ||
|
|
fd5971be0b | ||
|
|
89bf98bcf2 | ||
|
|
1b2d156094 | ||
|
|
714adb8bd1 | ||
|
|
95b1133d0c | ||
|
|
b37b496a12 | ||
|
|
d6f692ad1a | ||
|
|
f77713bf1f | ||
|
|
38255d2af1 | ||
|
|
73630a7e85 | ||
|
|
955c317cab | ||
|
|
9f18b88a06 | ||
|
|
353f83a9c7 | ||
|
|
3bade04e10 | ||
|
|
a6d0f443eb | ||
|
|
96236b7968 | ||
|
|
4434d7f447 | ||
|
|
171eb040fc | ||
|
|
3591bbe56f | ||
|
|
34d5ef29b3 | ||
|
|
bbbd9f20f3 | ||
|
|
547132e820 | ||
|
|
2d315ba9a9 | ||
|
|
d355d2020f | ||
|
|
c8cf0d94ed | ||
|
|
4730762e5c | ||
|
|
d88582dffd | ||
|
|
2f81b3dce2 | ||
|
|
5cab13739e | ||
|
|
8aadad9c72 | ||
|
|
807d092761 | ||
|
|
f36f1d6be9 | ||
|
|
8800c8a59b | ||
|
|
b4dce13309 | ||
|
|
e15307fdf4 | ||
|
|
3520c0e4d5 | ||
|
|
ccdf0b2a44 | ||
|
|
63a453554d | ||
|
|
105186aa17 | ||
|
|
ba04afc9a4 | ||
|
|
7e1e0086e7 | ||
|
|
02b31c9dc8 | ||
|
|
7f2fbad736 | ||
|
|
5bece94509 | ||
|
|
3d90156e99 | ||
|
|
5e46c5c435 | ||
|
|
583c1f472c | ||
|
|
26bfc1c443 | ||
|
|
799aa9883c | ||
|
|
84ed77cbd8 | ||
|
|
c9e584fb90 | ||
|
|
17b1e81ca1 | ||
|
|
7e9a2da097 | ||
|
|
c48c1d7c46 | ||
|
|
d1692fd3e0 | ||
|
|
5fa36a0833 | ||
|
|
853ae490e1 | ||
|
|
f2cf97d6f1 | ||
|
|
c344da4c5a | ||
|
|
85a57006d1 | ||
|
|
c5e892cb3e | ||
|
|
81fb06f530 | ||
|
|
a385382ff5 | ||
|
|
b8772a353f | ||
|
|
c2714fcbfd | ||
|
|
a2fc933fed | ||
|
|
0e331c7168 | ||
|
|
ac145f75ca | ||
|
|
a4b8d1f89a | ||
|
|
798b107f19 | ||
|
|
6a1b471365 | ||
|
|
ec231a7923 | ||
|
|
7ca71a6b0f | ||
|
|
7607e6e902 | ||
|
|
f1548ef62d | ||
|
|
6845988807 | ||
|
|
9eed4a90ce | ||
|
|
f8464785a6 | ||
|
|
1d359e737e | ||
|
|
50b9056e09 | ||
|
|
91a090a485 | ||
|
|
9c76b30d72 | ||
|
|
93f19910c5 | ||
|
|
4ec7445a6f | ||
|
|
0372c51f82 | ||
|
|
0fec3525ad | ||
|
|
41ba3017fd | ||
|
|
8080fbce35 | ||
|
|
ec14f6ceda | ||
|
|
c60a086635 | ||
|
|
92ca2cca95 | ||
|
|
1e1634daca | ||
|
|
824ee5446f | ||
|
|
879e2caf8c | ||
|
|
c4014e73a2 | ||
|
|
be9efdb981 | ||
|
|
074dc3b9d8 | ||
|
|
86f9b582d5 | ||
|
|
4142c3ef7c | ||
|
|
6602e793c0 | ||
|
|
ea0fdaed28 | ||
|
|
1eb382da5a | ||
|
|
bb6fd02298 | ||
|
|
7e2bceceee | ||
|
|
30a7d7096c | ||
|
|
200a18820e | ||
|
|
e03637176d | ||
|
|
c02db93243 | ||
|
|
ffa4d5134a | ||
|
|
302d7fdbf3 | ||
|
|
cf442cd57e | ||
|
|
0e1ba65855 | ||
|
|
6aad333c63 | ||
|
|
4fcc84e67a | ||
|
|
3ae2f441e0 | ||
|
|
2abb3f6424 | ||
|
|
ce3b212d12 | ||
|
|
83d6d46e29 | ||
|
|
354ad9254e | ||
|
|
58876091f7 | ||
|
|
dc18eee39d | ||
|
|
8727a9c140 | ||
|
|
d0425f26cf | ||
|
|
cfa84b8470 | ||
|
|
1580ed4c06 | ||
|
|
a7ee84fc31 | ||
|
|
84ac7ce139 | ||
|
|
788b092c49 | ||
|
|
5cde17a096 | ||
|
|
c3837eb08c | ||
|
|
8cc0ee2efe | ||
|
|
d5eec16d23 | ||
|
|
daa1a032f7 | ||
|
|
6042e8bc57 | ||
|
|
920a4b0794 | ||
|
|
ee49844d09 | ||
|
|
8a516ac862 | ||
|
|
bee2f4a3b0 | ||
|
|
cef45feaa4 | ||
|
|
2687f02c96 | ||
|
|
b25976aeb8 | ||
|
|
001f167aad | ||
|
|
486a2c1d94 | ||
|
|
88cf154483 | ||
|
|
8cbd3e7510 | ||
|
|
eeb695261f | ||
|
|
dc9b1111e0 | ||
|
|
06ac829e70 | ||
|
|
72700279e2 | ||
|
|
5d3f7fff26 | ||
|
|
d77c1c5f9d | ||
|
|
2a5302a1cf | ||
|
|
ffbd3d173f | ||
|
|
1e0a669f75 | ||
|
|
527e9be058 | ||
|
|
34bea2e272 | ||
|
|
fe44ae3371 | ||
|
|
adeb40eaf2 | ||
|
|
d7d33e5255 | ||
|
|
63bc884e25 | ||
|
|
ef4e095d24 | ||
|
|
4d4f75a8a8 | ||
|
|
3f71ba406a | ||
|
|
88a67127d8 | ||
|
|
f7dc7dcc64 | ||
|
|
04f971c84b | ||
|
|
548a7df014 | ||
|
|
70edb9bc4d | ||
|
|
3f0ed03856 | ||
|
|
4736391bfb | ||
|
|
7c5330413b | ||
|
|
39d9d22ca3 | ||
|
|
af47413dba | ||
|
|
b2f00aa977 | ||
|
|
6694be5e50 | ||
|
|
f5e8b207fb | ||
|
|
d245460362 | ||
|
|
4d0d0fa383 | ||
|
|
7ffe45734d | ||
|
|
01811c176a | ||
|
|
a7248f6ea8 | ||
|
|
9685c34509 | ||
|
|
d091fe3c21 | ||
|
|
ee02f548c8 | ||
|
|
b08870aff3 | ||
|
|
3ecae420ac | ||
|
|
4cbbf0e13b | ||
|
|
380378cc80 | ||
|
|
0963c65027 | ||
|
|
ed740a2504 | ||
|
|
c9f98622b1 | ||
|
|
0a954e5066 | ||
|
|
aa93423fbf | ||
|
|
01c9386267 | ||
|
|
af9eb36f9f | ||
|
|
06093fd396 | ||
|
|
86b7fcac32 | ||
|
|
fb8ddc564e | ||
|
|
242efe6611 | ||
|
|
1b0e6c9c0e | ||
|
|
dfa2f32ca0 | ||
|
|
840424a2c4 | ||
|
|
f56aa20014 | ||
|
|
6707768ebd | ||
|
|
c78bb76a12 | ||
|
|
942c979232 | ||
|
|
06164911dd | ||
|
|
2a21363bb7 | ||
|
|
026869915f | ||
|
|
45d61aaaa3 | ||
|
|
20f6c06569 | ||
|
|
371f5e52aa | ||
|
|
e006480e49 | ||
|
|
aed545872d | ||
|
|
44869c59d6 | ||
|
|
52663284cf | ||
|
|
42fa9d7f0a | ||
|
|
b7a87a22b6 | ||
|
|
e8aaea030e | ||
|
|
b1ad3a43cb | ||
|
|
267e25a750 | ||
|
|
9a32c514cb | ||
|
|
e9ae607ece | ||
|
|
93707fa3f2 | ||
|
|
94c369095f | ||
|
|
9164b0161b | ||
|
|
e592e8fccb | ||
|
|
bf4fc25f7b | ||
|
|
5b806d8d24 | ||
|
|
cb1e072643 | ||
|
|
45b6a12e45 | ||
|
|
68755f1f5e | ||
|
|
997a455039 | ||
|
|
88775e1ff9 | ||
|
|
8867e744ff | ||
|
|
4fd064bea6 | ||
|
|
59fbceedcc | ||
|
|
321d57e1a0 | ||
|
|
ba26c7aa00 | ||
|
|
63c763685f | ||
|
|
34a4a94f13 | ||
|
|
f4a73d57a4 | ||
|
|
948114e3e3 | ||
|
|
a3e60d9058 | ||
|
|
8acb233668 | ||
|
|
119589fcb3 | ||
|
|
5ea844964e | ||
|
|
bd8eed57fc | ||
|
|
9cf0f2e973 | ||
|
|
176ad3aa6e | ||
|
|
4d08363580 | ||
|
|
8907bf51d2 | ||
|
|
abe614c705 | ||
|
|
238715037d | ||
|
|
c0a00f68ae | ||
|
|
f0c454ab57 | ||
|
|
089daaeabc | ||
|
|
b9f74ff3d6 | ||
|
|
fcf4d60eee | ||
|
|
e33d5c2dbc | ||
|
|
18d9a7e1f1 | ||
|
|
8488388cbd | ||
|
|
588901f449 | ||
|
|
0a7fdbe533 | ||
|
|
5950c176ca | ||
|
|
23d23409a0 | ||
|
|
9009bedf13 | ||
|
|
d4ac57e240 | ||
|
|
7b59d1770f | ||
|
|
95ead8ffba | ||
|
|
7aa08a77ca | ||
|
|
7e432cdfac | ||
|
|
586672f490 | ||
|
|
b03408de74 | ||
|
|
1e6a28bf5b | ||
|
|
d6e3b64582 | ||
|
|
114c932a8e | ||
|
|
7f7103de06 | ||
|
|
c631a9c726 | ||
|
|
8fd9e56804 | ||
|
|
8a65717f55 | ||
|
|
6d3152a98a | ||
|
|
b438d485f1 | ||
|
|
204349b17b | ||
|
|
86e67fc4a9 | ||
|
|
2bed62926e | ||
|
|
aad8d128a0 | ||
|
|
ec1acbb867 | ||
|
|
e4859c4563 | ||
|
|
8e30eb26bd | ||
|
|
0b5c589ca2 | ||
|
|
65fadddc85 | ||
|
|
ed5fb088c4 | ||
|
|
f81f308118 | ||
|
|
b1390a7b37 | ||
|
|
11d83386a5 | ||
|
|
bb31def011 | ||
|
|
41e03ede95 | ||
|
|
7fea1ecdf6 | ||
|
|
054894271d | ||
|
|
6fef042f0b | ||
|
|
5c0c2d1d09 | ||
|
|
37f9c8ad99 | ||
|
|
2a80f55e2a | ||
|
|
421c878a2d | ||
|
|
36666c2142 | ||
|
|
85801317d1 | ||
|
|
2ed0d65948 | ||
|
|
d459dc4ad1 | ||
|
|
40bc4622ef | ||
|
|
c0f818a07a | ||
|
|
8671fdeda6 | ||
|
|
2619850fb4 | ||
|
|
8feb97dc0d | ||
|
|
4e1ff6dcbb | ||
|
|
8589d752ac | ||
|
|
de4ded68b0 | ||
|
|
9b5a3c5991 | ||
|
|
00b0699c75 | ||
|
|
993cf8bf55 | ||
|
|
7bb7cb8a60 | ||
|
|
b123be5b71 | ||
|
|
ddf5c09a9b | ||
|
|
5f73c08729 | ||
|
|
f503a848c2 | ||
|
|
36a6daccab | ||
|
|
ceb0e26e5e | ||
|
|
284e02bed0 | ||
|
|
3450a57d4a | ||
|
|
592dae31c8 | ||
|
|
2010cbc5fa | ||
|
|
ac0801eced | ||
|
|
ad66e5b060 | ||
|
|
ade4b55520 | ||
|
|
a6d62e0617 | ||
|
|
6e76348df7 | ||
|
|
0d6687f84c | ||
|
|
74d2a9ef9a | ||
|
|
14476d48cc | ||
|
|
ce8ce82567 | ||
|
|
4dc4f1be34 | ||
|
|
16b52331a4 | ||
|
|
5445aaa94e | ||
|
|
2ac3dd6853 | ||
|
|
d8851cb7a0 | ||
|
|
058f6cd2cc | ||
|
|
790cf34d17 | ||
|
|
928d844896 | ||
|
|
939d6a8606 | ||
|
|
58888a74bc | ||
|
|
cc5a71e0e3 | ||
|
|
e83bcf7f9a | ||
|
|
5690e5ce99 | ||
|
|
f2ea8470e5 | ||
|
|
34b9db5afc | ||
|
|
8711d03df7 | ||
|
|
ee448deaba | ||
|
|
6e8db04716 | ||
|
|
658e60cf73 | ||
|
|
4c78f028f8 | ||
|
|
435cc866a3 | ||
|
|
c7d3a558f6 | ||
|
|
089cdb2877 | ||
|
|
ea1e9aa36b | ||
|
|
d0d28ef90d | ||
|
|
6654186a7c | ||
|
|
aa72281eae | ||
|
|
74bcbf828f | ||
|
|
fe39147e64 | ||
|
|
fad00a85e5 | ||
|
|
9c0db4cc83 | ||
|
|
6f18297b3a | ||
|
|
15016413de | ||
|
|
440b7190ed | ||
|
|
c496967e56 | ||
|
|
3934c15895 | ||
|
|
d524e5ef5e | ||
|
|
52f5370c48 | ||
|
|
1b42b4b59a | ||
|
|
7c000ec3ed | ||
|
|
c942e4a07b | ||
|
|
bd54b08261 | ||
|
|
b99c291f47 |
21
.github/workflows/release.yaml
vendored
21
.github/workflows/release.yaml
vendored
@@ -28,6 +28,7 @@ jobs:
|
|||||||
security unlock-keychain -p password build.keychain
|
security unlock-keychain -p password build.keychain
|
||||||
security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign
|
security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign
|
||||||
security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain
|
security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain
|
||||||
|
security set-keychain-settings -lut 3600 build.keychain
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
@@ -103,6 +104,7 @@ jobs:
|
|||||||
path: |
|
path: |
|
||||||
llm/build/**/bin/*
|
llm/build/**/bin/*
|
||||||
llm/build/**/*.a
|
llm/build/**/*.a
|
||||||
|
dist/windows-amd64/**
|
||||||
|
|
||||||
# ROCm generation step
|
# ROCm generation step
|
||||||
generate-windows-rocm:
|
generate-windows-rocm:
|
||||||
@@ -173,7 +175,9 @@ jobs:
|
|||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: generate-windows-rocm
|
name: generate-windows-rocm
|
||||||
path: llm/build/**/bin/*
|
path: |
|
||||||
|
llm/build/**/bin/*
|
||||||
|
dist/windows-amd64/**
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: windows-rocm-deps
|
name: windows-rocm-deps
|
||||||
@@ -253,7 +257,9 @@ jobs:
|
|||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: generate-windows-cuda
|
name: generate-windows-cuda
|
||||||
path: llm/build/**/bin/*
|
path: |
|
||||||
|
llm/build/**/bin/*
|
||||||
|
dist/windows-amd64/**
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: windows-cuda-deps
|
name: windows-cuda-deps
|
||||||
@@ -306,23 +312,18 @@ jobs:
|
|||||||
- uses: actions/download-artifact@v4
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: generate-windows-cpu
|
name: generate-windows-cpu
|
||||||
path: llm/build
|
|
||||||
- uses: actions/download-artifact@v4
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: generate-windows-cuda
|
name: generate-windows-cuda
|
||||||
path: llm/build
|
|
||||||
- uses: actions/download-artifact@v4
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: windows-cuda-deps
|
name: windows-cuda-deps
|
||||||
path: dist/deps
|
|
||||||
- uses: actions/download-artifact@v4
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: windows-rocm-deps
|
name: windows-rocm-deps
|
||||||
path: dist/deps
|
|
||||||
- uses: actions/download-artifact@v4
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: generate-windows-rocm
|
name: generate-windows-rocm
|
||||||
path: llm/build
|
|
||||||
- run: dir llm/build
|
- run: dir llm/build
|
||||||
- run: |
|
- run: |
|
||||||
$gopath=(get-command go).source | split-path -parent
|
$gopath=(get-command go).source | split-path -parent
|
||||||
@@ -331,13 +332,13 @@ jobs:
|
|||||||
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||||
$env:PATH="$gopath;$env:PATH"
|
$env:PATH="$gopath;$env:PATH"
|
||||||
$env:OLLAMA_SKIP_GENERATE="1"
|
$env:OLLAMA_SKIP_GENERATE="1"
|
||||||
$env:NVIDIA_DIR=$(resolve-path ".\dist\deps")
|
|
||||||
$env:HIP_PATH=$(resolve-path ".\dist\deps")
|
|
||||||
& .\scripts\build_windows.ps1
|
& .\scripts\build_windows.ps1
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: dist-windows
|
name: dist-windows
|
||||||
path: dist/*.exe
|
path: |
|
||||||
|
dist/OllamaSetup.exe
|
||||||
|
dist/ollama-windows-*.zip
|
||||||
|
|
||||||
# Linux x86 assets built using the container based build
|
# Linux x86 assets built using the container based build
|
||||||
build-linux-amd64:
|
build-linux-amd64:
|
||||||
|
|||||||
44
.github/workflows/test.yaml
vendored
44
.github/workflows/test.yaml
vendored
@@ -1,5 +1,15 @@
|
|||||||
name: test
|
name: test
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# For PRs, later CI runs preempt previous ones. e.g. a force push on a PR
|
||||||
|
# cancels running CI jobs and starts all new ones.
|
||||||
|
#
|
||||||
|
# For non-PR pushes, concurrency.group needs to be unique for every distinct
|
||||||
|
# CI run we want to have happen. Use run_id, which in practice means all
|
||||||
|
# non-PR CI runs will be allowed to run without preempting each other.
|
||||||
|
group: ${{ github.workflow }}-$${{ github.pull_request.number || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
@@ -21,14 +31,16 @@ jobs:
|
|||||||
- id: changes
|
- id: changes
|
||||||
run: |
|
run: |
|
||||||
changed() {
|
changed() {
|
||||||
git diff-tree -r --no-commit-id --name-only ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }} \
|
git diff-tree -r --no-commit-id --name-only \
|
||||||
| xargs python3 -c "import sys; print(any([x.startswith('$1') for x in sys.argv[1:]]))"
|
$(git merge-base ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }}) \
|
||||||
|
${{ github.event.pull_request.head.sha }} \
|
||||||
|
| xargs python3 -c "import sys; from pathlib import Path; print(any(Path(x).match(glob) for x in sys.argv[1:] for glob in '$*'.split(' ')))"
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
echo GENERATE=$(changed llm/)
|
echo GENERATE=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||||
echo GENERATE_CUDA=$(changed llm/)
|
echo GENERATE_CUDA=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||||
echo GENERATE_ROCM=$(changed llm/)
|
echo GENERATE_ROCM=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||||
} >>$GITHUB_OUTPUT
|
} >>$GITHUB_OUTPUT
|
||||||
|
|
||||||
generate:
|
generate:
|
||||||
@@ -103,7 +115,9 @@ jobs:
|
|||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: cuda-${{ matrix.cuda-version }}-libraries
|
name: cuda-${{ matrix.cuda-version }}-libraries
|
||||||
path: llm/build/**/bin/*
|
path: |
|
||||||
|
llm/build/**/bin/*
|
||||||
|
dist/windows-amd64/**
|
||||||
generate-rocm:
|
generate-rocm:
|
||||||
needs: [changes]
|
needs: [changes]
|
||||||
if: ${{ needs.changes.outputs.GENERATE_ROCM == 'True' }}
|
if: ${{ needs.changes.outputs.GENERATE_ROCM == 'True' }}
|
||||||
@@ -134,7 +148,9 @@ jobs:
|
|||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: rocm-${{ matrix.rocm-version }}-libraries
|
name: rocm-${{ matrix.rocm-version }}-libraries
|
||||||
path: llm/build/**/bin/*
|
path: |
|
||||||
|
llm/build/**/bin/*
|
||||||
|
dist/windows-amd64/**
|
||||||
|
|
||||||
# ROCm generation step
|
# ROCm generation step
|
||||||
generate-windows-rocm:
|
generate-windows-rocm:
|
||||||
@@ -253,14 +269,9 @@ jobs:
|
|||||||
mkdir -p llm/build/darwin/$ARCH/stub/bin
|
mkdir -p llm/build/darwin/$ARCH/stub/bin
|
||||||
touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server
|
touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server
|
||||||
if: ${{ startsWith(matrix.os, 'macos-') }}
|
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||||
- run: |
|
|
||||||
mkdir -p llm/build/windows/$ARCH/stub/bin
|
|
||||||
touch llm/build/windows/$ARCH/stub/bin/ollama_llama_server
|
|
||||||
if: ${{ startsWith(matrix.os, 'windows-') }}
|
|
||||||
shell: bash
|
|
||||||
- uses: golangci/golangci-lint-action@v4
|
- uses: golangci/golangci-lint-action@v4
|
||||||
with:
|
with:
|
||||||
args: --timeout 8m0s
|
args: --timeout 8m0s -v
|
||||||
test:
|
test:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@@ -276,6 +287,8 @@ jobs:
|
|||||||
GOARCH: ${{ matrix.arch }}
|
GOARCH: ${{ matrix.arch }}
|
||||||
CGO_ENABLED: '1'
|
CGO_ENABLED: '1'
|
||||||
OLLAMA_CPU_TARGET: 'static'
|
OLLAMA_CPU_TARGET: 'static'
|
||||||
|
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||||
|
OLLAMA_SKIP_METAL_GENERATE: '1'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -284,7 +297,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
cache: true
|
cache: true
|
||||||
- run: go get
|
|
||||||
- run: |
|
- run: |
|
||||||
case ${{ matrix.arch }} in
|
case ${{ matrix.arch }} in
|
||||||
amd64) echo ARCH=x86_64 ;;
|
amd64) echo ARCH=x86_64 ;;
|
||||||
@@ -299,10 +311,6 @@ jobs:
|
|||||||
mkdir -p llm/build/darwin/$ARCH/stub/bin
|
mkdir -p llm/build/darwin/$ARCH/stub/bin
|
||||||
touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server
|
touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server
|
||||||
if: ${{ startsWith(matrix.os, 'macos-') }}
|
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||||
- run: |
|
|
||||||
mkdir -p llm/build/windows/$ARCH/stub/bin
|
|
||||||
touch llm/build/windows/$ARCH/stub/bin/ollama_llama_server
|
|
||||||
if: ${{ startsWith(matrix.os, 'windows-') }}
|
|
||||||
shell: bash
|
shell: bash
|
||||||
- run: go generate ./...
|
- run: go generate ./...
|
||||||
- run: go build
|
- run: go build
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -11,4 +11,5 @@ ggml-metal.metal
|
|||||||
.idea
|
.idea
|
||||||
test_data
|
test_data
|
||||||
*.crt
|
*.crt
|
||||||
llm/build
|
llm/build
|
||||||
|
__debug_bin*
|
||||||
75
README.md
75
README.md
@@ -1,5 +1,5 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<img alt="ollama" height="200px" src="https://github.com/ollama/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
|
<img alt="ollama" height="200px" src="https://github.com/ollama/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
# Ollama
|
# Ollama
|
||||||
@@ -51,19 +51,17 @@ Here are some example models that can be downloaded:
|
|||||||
| ------------------ | ---------- | ----- | ------------------------------ |
|
| ------------------ | ---------- | ----- | ------------------------------ |
|
||||||
| Llama 3 | 8B | 4.7GB | `ollama run llama3` |
|
| Llama 3 | 8B | 4.7GB | `ollama run llama3` |
|
||||||
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
||||||
|
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
||||||
|
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
||||||
|
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
|
||||||
|
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
|
||||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||||
| Dolphin Phi | 2.7B | 1.6GB | `ollama run dolphin-phi` |
|
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||||
| Phi-2 | 2.7B | 1.7GB | `ollama run phi` |
|
|
||||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||||
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
||||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||||
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
||||||
| Llama 2 13B | 13B | 7.3GB | `ollama run llama2:13b` |
|
|
||||||
| Llama 2 70B | 70B | 39GB | `ollama run llama2:70b` |
|
|
||||||
| Orca Mini | 3B | 1.9GB | `ollama run orca-mini` |
|
|
||||||
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
||||||
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
|
|
||||||
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
|
|
||||||
| Solar | 10.7B | 6.1GB | `ollama run solar` |
|
| Solar | 10.7B | 6.1GB | `ollama run solar` |
|
||||||
|
|
||||||
> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||||
@@ -177,7 +175,7 @@ I'm a basic program that prints the famous "Hello, world!" message to the consol
|
|||||||
The image features a yellow smiley face, which is likely the central focus of the picture.
|
The image features a yellow smiley face, which is likely the central focus of the picture.
|
||||||
```
|
```
|
||||||
|
|
||||||
### Pass in prompt as arguments
|
### Pass the prompt as an argument
|
||||||
|
|
||||||
```
|
```
|
||||||
$ ollama run llama3 "Summarize this file: $(cat README.md)"
|
$ ollama run llama3 "Summarize this file: $(cat README.md)"
|
||||||
@@ -196,25 +194,7 @@ ollama list
|
|||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
Install `cmake` and `go`:
|
See the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
|
||||||
|
|
||||||
```
|
|
||||||
brew install cmake go
|
|
||||||
```
|
|
||||||
|
|
||||||
Then generate dependencies:
|
|
||||||
|
|
||||||
```
|
|
||||||
go generate ./...
|
|
||||||
```
|
|
||||||
|
|
||||||
Then build the binary:
|
|
||||||
|
|
||||||
```
|
|
||||||
go build .
|
|
||||||
```
|
|
||||||
|
|
||||||
More detailed instructions can be found in the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
|
|
||||||
|
|
||||||
### Running local builds
|
### Running local builds
|
||||||
|
|
||||||
@@ -260,16 +240,18 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
|
|
||||||
### Web & Desktop
|
### Web & Desktop
|
||||||
|
|
||||||
|
- [Open WebUI](https://github.com/open-webui/open-webui)
|
||||||
|
- [Enchanted (macOS native)](https://github.com/AugustDev/enchanted)
|
||||||
|
- [Hollama](https://github.com/fmaclen/hollama)
|
||||||
- [Lollms-Webui](https://github.com/ParisNeo/lollms-webui)
|
- [Lollms-Webui](https://github.com/ParisNeo/lollms-webui)
|
||||||
- [LibreChat](https://github.com/danny-avila/LibreChat)
|
- [LibreChat](https://github.com/danny-avila/LibreChat)
|
||||||
- [Bionic GPT](https://github.com/bionic-gpt/bionic-gpt)
|
- [Bionic GPT](https://github.com/bionic-gpt/bionic-gpt)
|
||||||
- [Enchanted (macOS native)](https://github.com/AugustDev/enchanted)
|
|
||||||
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
||||||
- [Saddle](https://github.com/jikkuatwork/saddle)
|
- [Saddle](https://github.com/jikkuatwork/saddle)
|
||||||
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
||||||
|
- [Chatbot UI v2](https://github.com/mckaywrigley/chatbot-ui)
|
||||||
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
||||||
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
||||||
- [Open WebUI](https://github.com/open-webui/open-webui)
|
|
||||||
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
||||||
- [big-AGI](https://github.com/enricoros/big-AGI/blob/main/docs/config-local-ollama.md)
|
- [big-AGI](https://github.com/enricoros/big-AGI/blob/main/docs/config-local-ollama.md)
|
||||||
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
||||||
@@ -287,13 +269,22 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [OllamaGUI](https://github.com/enoch1118/ollamaGUI)
|
- [OllamaGUI](https://github.com/enoch1118/ollamaGUI)
|
||||||
- [OpenAOE](https://github.com/InternLM/OpenAOE)
|
- [OpenAOE](https://github.com/InternLM/OpenAOE)
|
||||||
- [Odin Runes](https://github.com/leonid20000/OdinRunes)
|
- [Odin Runes](https://github.com/leonid20000/OdinRunes)
|
||||||
- [LLM-X: Progressive Web App](https://github.com/mrdjohnson/llm-x)
|
- [LLM-X](https://github.com/mrdjohnson/llm-x) (Progressive Web App)
|
||||||
- [AnythingLLM (Docker + MacOs/Windows/Linux native app)](https://github.com/Mintplex-Labs/anything-llm)
|
- [AnythingLLM (Docker + MacOs/Windows/Linux native app)](https://github.com/Mintplex-Labs/anything-llm)
|
||||||
- [Ollama Basic Chat: Uses HyperDiv Reactive UI](https://github.com/rapidarchitect/ollama_basic_chat)
|
- [Ollama Basic Chat: Uses HyperDiv Reactive UI](https://github.com/rapidarchitect/ollama_basic_chat)
|
||||||
- [Ollama-chats RPG](https://github.com/drazdra/ollama-chats)
|
- [Ollama-chats RPG](https://github.com/drazdra/ollama-chats)
|
||||||
- [ChatOllama: Open Source Chatbot based on Ollama with Knowledge Bases](https://github.com/sugarforever/chat-ollama)
|
- [QA-Pilot](https://github.com/reid41/QA-Pilot) (Chat with Code Repository)
|
||||||
- [CRAG Ollama Chat: Simple Web Search with Corrective RAG](https://github.com/Nagi-ovo/CRAG-Ollama-Chat)
|
- [ChatOllama](https://github.com/sugarforever/chat-ollama) (Open Source Chatbot based on Ollama with Knowledge Bases)
|
||||||
- [RAGFlow: Open-source Retrieval-Augmented Generation engine based on deep document understanding](https://github.com/infiniflow/ragflow)
|
- [CRAG Ollama Chat](https://github.com/Nagi-ovo/CRAG-Ollama-Chat) (Simple Web Search with Corrective RAG)
|
||||||
|
- [RAGFlow](https://github.com/infiniflow/ragflow) (Open-source Retrieval-Augmented Generation engine based on deep document understanding)
|
||||||
|
- [StreamDeploy](https://github.com/StreamDeploy-DevRel/streamdeploy-llm-app-scaffold) (LLM Application Scaffold)
|
||||||
|
- [chat](https://github.com/swuecho/chat) (chat web app for teams)
|
||||||
|
- [Lobe Chat](https://github.com/lobehub/lobe-chat) with [Integrating Doc](https://lobehub.com/docs/self-hosting/examples/ollama)
|
||||||
|
- [Ollama RAG Chatbot](https://github.com/datvodinh/rag-chatbot.git) (Local Chat with multiple PDFs using Ollama and RAG)
|
||||||
|
- [BrainSoup](https://www.nurgo-software.com/products/brainsoup) (Flexible native client with RAG & multi-agent automation)
|
||||||
|
- [macai](https://github.com/Renset/macai) (macOS client for Ollama, ChatGPT, and other compatible API back-ends)
|
||||||
|
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
|
||||||
|
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
|
||||||
|
|
||||||
### Terminal
|
### Terminal
|
||||||
|
|
||||||
@@ -309,11 +300,13 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Oatmeal](https://github.com/dustinblackman/oatmeal)
|
- [Oatmeal](https://github.com/dustinblackman/oatmeal)
|
||||||
- [cmdh](https://github.com/pgibler/cmdh)
|
- [cmdh](https://github.com/pgibler/cmdh)
|
||||||
- [ooo](https://github.com/npahlfer/ooo)
|
- [ooo](https://github.com/npahlfer/ooo)
|
||||||
|
- [shell-pilot](https://github.com/reid41/shell-pilot)
|
||||||
- [tenere](https://github.com/pythops/tenere)
|
- [tenere](https://github.com/pythops/tenere)
|
||||||
- [llm-ollama](https://github.com/taketwo/llm-ollama) for [Datasette's LLM CLI](https://llm.datasette.io/en/stable/).
|
- [llm-ollama](https://github.com/taketwo/llm-ollama) for [Datasette's LLM CLI](https://llm.datasette.io/en/stable/).
|
||||||
- [typechat-cli](https://github.com/anaisbetts/typechat-cli)
|
- [typechat-cli](https://github.com/anaisbetts/typechat-cli)
|
||||||
- [ShellOracle](https://github.com/djcopley/ShellOracle)
|
- [ShellOracle](https://github.com/djcopley/ShellOracle)
|
||||||
- [tlm](https://github.com/yusufcanb/tlm)
|
- [tlm](https://github.com/yusufcanb/tlm)
|
||||||
|
- [podman-ollama](https://github.com/ericcurtin/podman-ollama)
|
||||||
|
|
||||||
### Database
|
### Database
|
||||||
|
|
||||||
@@ -324,6 +317,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
|
|
||||||
- [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)
|
- [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)
|
||||||
- [Helm Chart](https://artifacthub.io/packages/helm/ollama-helm/ollama)
|
- [Helm Chart](https://artifacthub.io/packages/helm/ollama-helm/ollama)
|
||||||
|
- [Guix channel](https://codeberg.org/tusharhero/ollama-guix)
|
||||||
|
|
||||||
### Libraries
|
### Libraries
|
||||||
|
|
||||||
@@ -345,10 +339,13 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Haystack](https://github.com/deepset-ai/haystack-integrations/blob/main/integrations/ollama.md)
|
- [Haystack](https://github.com/deepset-ai/haystack-integrations/blob/main/integrations/ollama.md)
|
||||||
- [Elixir LangChain](https://github.com/brainlid/langchain)
|
- [Elixir LangChain](https://github.com/brainlid/langchain)
|
||||||
- [Ollama for R - rollama](https://github.com/JBGruber/rollama)
|
- [Ollama for R - rollama](https://github.com/JBGruber/rollama)
|
||||||
|
- [Ollama for R - ollama-r](https://github.com/hauselin/ollama-r)
|
||||||
- [Ollama-ex for Elixir](https://github.com/lebrunel/ollama-ex)
|
- [Ollama-ex for Elixir](https://github.com/lebrunel/ollama-ex)
|
||||||
- [Ollama Connector for SAP ABAP](https://github.com/b-tocs/abap_btocs_ollama)
|
- [Ollama Connector for SAP ABAP](https://github.com/b-tocs/abap_btocs_ollama)
|
||||||
- [Testcontainers](https://testcontainers.com/modules/ollama/)
|
- [Testcontainers](https://testcontainers.com/modules/ollama/)
|
||||||
|
- [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama)
|
||||||
|
- [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) with an [example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama)
|
||||||
|
- [LlamaScript](https://github.com/Project-Llama/llamascript)
|
||||||
### Mobile
|
### Mobile
|
||||||
|
|
||||||
- [Enchanted](https://github.com/AugustDev/enchanted)
|
- [Enchanted](https://github.com/AugustDev/enchanted)
|
||||||
@@ -367,17 +364,21 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram)
|
- [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram)
|
||||||
- [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation)
|
- [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation)
|
||||||
- [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama)
|
- [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama)
|
||||||
- [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama)
|
|
||||||
- [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot)
|
- [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot)
|
||||||
- [Cliobot](https://github.com/herval/cliobot) (Telegram bot with Ollama support)
|
- [Cliobot](https://github.com/herval/cliobot) (Telegram bot with Ollama support)
|
||||||
- [Copilot for Obsidian plugin](https://github.com/logancyang/obsidian-copilot)
|
- [Copilot for Obsidian plugin](https://github.com/logancyang/obsidian-copilot)
|
||||||
- [Obsidian Local GPT plugin](https://github.com/pfrankov/obsidian-local-gpt)
|
- [Obsidian Local GPT plugin](https://github.com/pfrankov/obsidian-local-gpt)
|
||||||
- [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama)
|
- [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama)
|
||||||
|
- [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama)
|
||||||
|
- [Ollama Copilot](https://github.com/bernardo-bruning/ollama-copilot) (Proxy that allows you to use ollama as a copilot like Github copilot)
|
||||||
- [twinny](https://github.com/rjmacarthy/twinny) (Copilot and Copilot chat alternative using Ollama)
|
- [twinny](https://github.com/rjmacarthy/twinny) (Copilot and Copilot chat alternative using Ollama)
|
||||||
- [Wingman-AI](https://github.com/RussellCanfield/wingman-ai) (Copilot code and chat alternative using Ollama and HuggingFace)
|
- [Wingman-AI](https://github.com/RussellCanfield/wingman-ai) (Copilot code and chat alternative using Ollama and HuggingFace)
|
||||||
- [Page Assist](https://github.com/n4ze3m/page-assist) (Chrome Extension)
|
- [Page Assist](https://github.com/n4ze3m/page-assist) (Chrome Extension)
|
||||||
- [AI Telegram Bot](https://github.com/tusharhero/aitelegrambot) (Telegram bot using Ollama in backend)
|
- [AI Telegram Bot](https://github.com/tusharhero/aitelegrambot) (Telegram bot using Ollama in backend)
|
||||||
- [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support)
|
- [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support)
|
||||||
|
- [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation)
|
||||||
|
- [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) Chat/moderation bot written in python. Uses Ollama to create personalities.
|
||||||
|
|
||||||
### Supported backends
|
### Supported backends
|
||||||
- [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov.
|
- [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov.
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,16 @@
|
|||||||
// Package api implements the client-side API for code wishing to interact
|
// Package api implements the client-side API for code wishing to interact
|
||||||
// with the ollama service. The methods of the [Client] type correspond to
|
// with the ollama service. The methods of the [Client] type correspond to
|
||||||
// the ollama REST API as described in https://github.com/ollama/ollama/blob/main/docs/api.md
|
// the ollama REST API as described in [the API documentation].
|
||||||
//
|
|
||||||
// The ollama command-line client itself uses this package to interact with
|
// The ollama command-line client itself uses this package to interact with
|
||||||
// the backend service.
|
// the backend service.
|
||||||
|
//
|
||||||
|
// # Examples
|
||||||
|
//
|
||||||
|
// Several examples of using this package are available [in the GitHub
|
||||||
|
// repository].
|
||||||
|
//
|
||||||
|
// [the API documentation]: https://github.com/ollama/ollama/blob/main/docs/api.md
|
||||||
|
// [in the GitHub repository]: https://github.com/ollama/ollama/tree/main/examples
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -18,6 +25,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ollama/ollama/format"
|
"github.com/ollama/ollama/format"
|
||||||
@@ -57,12 +65,36 @@ func checkError(resp *http.Response, body []byte) error {
|
|||||||
// If the variable is not specified, a default ollama host and port will be
|
// If the variable is not specified, a default ollama host and port will be
|
||||||
// used.
|
// used.
|
||||||
func ClientFromEnvironment() (*Client, error) {
|
func ClientFromEnvironment() (*Client, error) {
|
||||||
|
ollamaHost, err := GetOllamaHost()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Client{
|
||||||
|
base: &url.URL{
|
||||||
|
Scheme: ollamaHost.Scheme,
|
||||||
|
Host: net.JoinHostPort(ollamaHost.Host, ollamaHost.Port),
|
||||||
|
},
|
||||||
|
http: http.DefaultClient,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type OllamaHost struct {
|
||||||
|
Scheme string
|
||||||
|
Host string
|
||||||
|
Port string
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetOllamaHost() (OllamaHost, error) {
|
||||||
defaultPort := "11434"
|
defaultPort := "11434"
|
||||||
|
|
||||||
scheme, hostport, ok := strings.Cut(os.Getenv("OLLAMA_HOST"), "://")
|
hostVar := os.Getenv("OLLAMA_HOST")
|
||||||
|
hostVar = strings.TrimSpace(strings.Trim(strings.TrimSpace(hostVar), "\"'"))
|
||||||
|
|
||||||
|
scheme, hostport, ok := strings.Cut(hostVar, "://")
|
||||||
switch {
|
switch {
|
||||||
case !ok:
|
case !ok:
|
||||||
scheme, hostport = "http", os.Getenv("OLLAMA_HOST")
|
scheme, hostport = "http", hostVar
|
||||||
case scheme == "http":
|
case scheme == "http":
|
||||||
defaultPort = "80"
|
defaultPort = "80"
|
||||||
case scheme == "https":
|
case scheme == "https":
|
||||||
@@ -82,15 +114,24 @@ func ClientFromEnvironment() (*Client, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Client{
|
if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 0 {
|
||||||
base: &url.URL{
|
return OllamaHost{}, ErrInvalidHostPort
|
||||||
Scheme: scheme,
|
}
|
||||||
Host: net.JoinHostPort(host, port),
|
|
||||||
},
|
return OllamaHost{
|
||||||
http: http.DefaultClient,
|
Scheme: scheme,
|
||||||
|
Host: host,
|
||||||
|
Port: port,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewClient(base *url.URL, http *http.Client) *Client {
|
||||||
|
return &Client{
|
||||||
|
base: base,
|
||||||
|
http: http,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error {
|
func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error {
|
||||||
var reqBody io.Reader
|
var reqBody io.Reader
|
||||||
var data []byte
|
var data []byte
|
||||||
@@ -265,8 +306,14 @@ func (c *Client) Pull(ctx context.Context, req *PullRequest, fn PullProgressFunc
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PushProgressFunc is a function that [Client.Push] invokes when progress is
|
||||||
|
// made.
|
||||||
|
// It's similar to other progress function types like [PullProgressFunc].
|
||||||
type PushProgressFunc func(ProgressResponse) error
|
type PushProgressFunc func(ProgressResponse) error
|
||||||
|
|
||||||
|
// Push uploads a model to the model library; requires registering for ollama.ai
|
||||||
|
// and adding a public key first. fn is called each time progress is made on
|
||||||
|
// the request and can be used to display a progress bar, etc.
|
||||||
func (c *Client) Push(ctx context.Context, req *PushRequest, fn PushProgressFunc) error {
|
func (c *Client) Push(ctx context.Context, req *PushRequest, fn PushProgressFunc) error {
|
||||||
return c.stream(ctx, http.MethodPost, "/api/push", req, func(bts []byte) error {
|
return c.stream(ctx, http.MethodPost, "/api/push", req, func(bts []byte) error {
|
||||||
var resp ProgressResponse
|
var resp ProgressResponse
|
||||||
@@ -278,8 +325,15 @@ func (c *Client) Push(ctx context.Context, req *PushRequest, fn PushProgressFunc
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateProgressFunc is a function that [Client.Create] invokes when progress
|
||||||
|
// is made.
|
||||||
|
// It's similar to other progress function types like [PullProgressFunc].
|
||||||
type CreateProgressFunc func(ProgressResponse) error
|
type CreateProgressFunc func(ProgressResponse) error
|
||||||
|
|
||||||
|
// Create creates a model from a [Modelfile]. fn is a progress function that
|
||||||
|
// behaves similarly to other methods (see [Client.Pull]).
|
||||||
|
//
|
||||||
|
// [Modelfile]: https://github.com/ollama/ollama/blob/main/docs/modelfile.md
|
||||||
func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgressFunc) error {
|
func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgressFunc) error {
|
||||||
return c.stream(ctx, http.MethodPost, "/api/create", req, func(bts []byte) error {
|
return c.stream(ctx, http.MethodPost, "/api/create", req, func(bts []byte) error {
|
||||||
var resp ProgressResponse
|
var resp ProgressResponse
|
||||||
@@ -291,6 +345,7 @@ func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgre
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List lists models that are available locally.
|
||||||
func (c *Client) List(ctx context.Context) (*ListResponse, error) {
|
func (c *Client) List(ctx context.Context) (*ListResponse, error) {
|
||||||
var lr ListResponse
|
var lr ListResponse
|
||||||
if err := c.do(ctx, http.MethodGet, "/api/tags", nil, &lr); err != nil {
|
if err := c.do(ctx, http.MethodGet, "/api/tags", nil, &lr); err != nil {
|
||||||
@@ -299,6 +354,17 @@ func (c *Client) List(ctx context.Context) (*ListResponse, error) {
|
|||||||
return &lr, nil
|
return &lr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List running models.
|
||||||
|
func (c *Client) ListRunning(ctx context.Context) (*ListResponse, error) {
|
||||||
|
var lr ListResponse
|
||||||
|
if err := c.do(ctx, http.MethodGet, "/api/ps", nil, &lr); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &lr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy copies a model - creating a model with another name from an existing
|
||||||
|
// model.
|
||||||
func (c *Client) Copy(ctx context.Context, req *CopyRequest) error {
|
func (c *Client) Copy(ctx context.Context, req *CopyRequest) error {
|
||||||
if err := c.do(ctx, http.MethodPost, "/api/copy", req, nil); err != nil {
|
if err := c.do(ctx, http.MethodPost, "/api/copy", req, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -306,6 +372,7 @@ func (c *Client) Copy(ctx context.Context, req *CopyRequest) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete deletes a model and its data.
|
||||||
func (c *Client) Delete(ctx context.Context, req *DeleteRequest) error {
|
func (c *Client) Delete(ctx context.Context, req *DeleteRequest) error {
|
||||||
if err := c.do(ctx, http.MethodDelete, "/api/delete", req, nil); err != nil {
|
if err := c.do(ctx, http.MethodDelete, "/api/delete", req, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -313,6 +380,7 @@ func (c *Client) Delete(ctx context.Context, req *DeleteRequest) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Show obtains model information, including details, modelfile, license etc.
|
||||||
func (c *Client) Show(ctx context.Context, req *ShowRequest) (*ShowResponse, error) {
|
func (c *Client) Show(ctx context.Context, req *ShowRequest) (*ShowResponse, error) {
|
||||||
var resp ShowResponse
|
var resp ShowResponse
|
||||||
if err := c.do(ctx, http.MethodPost, "/api/show", req, &resp); err != nil {
|
if err := c.do(ctx, http.MethodPost, "/api/show", req, &resp); err != nil {
|
||||||
@@ -321,12 +389,16 @@ func (c *Client) Show(ctx context.Context, req *ShowRequest) (*ShowResponse, err
|
|||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Hearbeat checks if the server has started and is responsive; if yes, it
|
||||||
|
// returns nil, otherwise an error.
|
||||||
func (c *Client) Heartbeat(ctx context.Context) error {
|
func (c *Client) Heartbeat(ctx context.Context) error {
|
||||||
if err := c.do(ctx, http.MethodHead, "/", nil, nil); err != nil {
|
if err := c.do(ctx, http.MethodHead, "/", nil, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Embeddings generates embeddings from a model.
|
||||||
func (c *Client) Embeddings(ctx context.Context, req *EmbeddingRequest) (*EmbeddingResponse, error) {
|
func (c *Client) Embeddings(ctx context.Context, req *EmbeddingRequest) (*EmbeddingResponse, error) {
|
||||||
var resp EmbeddingResponse
|
var resp EmbeddingResponse
|
||||||
if err := c.do(ctx, http.MethodPost, "/api/embeddings", req, &resp); err != nil {
|
if err := c.do(ctx, http.MethodPost, "/api/embeddings", req, &resp); err != nil {
|
||||||
@@ -335,10 +407,13 @@ func (c *Client) Embeddings(ctx context.Context, req *EmbeddingRequest) (*Embedd
|
|||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateBlob creates a blob from a file on the server. digest is the
|
||||||
|
// expected SHA256 digest of the file, and r represents the file.
|
||||||
func (c *Client) CreateBlob(ctx context.Context, digest string, r io.Reader) error {
|
func (c *Client) CreateBlob(ctx context.Context, digest string, r io.Reader) error {
|
||||||
return c.do(ctx, http.MethodPost, fmt.Sprintf("/api/blobs/%s", digest), r, nil)
|
return c.do(ctx, http.MethodPost, fmt.Sprintf("/api/blobs/%s", digest), r, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Version returns the Ollama server version as a string.
|
||||||
func (c *Client) Version(ctx context.Context) (string, error) {
|
func (c *Client) Version(ctx context.Context) (string, error) {
|
||||||
var version struct {
|
var version struct {
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
|
|||||||
@@ -1,6 +1,12 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
func TestClientFromEnvironment(t *testing.T) {
|
func TestClientFromEnvironment(t *testing.T) {
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
@@ -40,4 +46,40 @@ func TestClientFromEnvironment(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hostTestCases := map[string]*testCase{
|
||||||
|
"empty": {value: "", expect: "127.0.0.1:11434"},
|
||||||
|
"only address": {value: "1.2.3.4", expect: "1.2.3.4:11434"},
|
||||||
|
"only port": {value: ":1234", expect: ":1234"},
|
||||||
|
"address and port": {value: "1.2.3.4:1234", expect: "1.2.3.4:1234"},
|
||||||
|
"hostname": {value: "example.com", expect: "example.com:11434"},
|
||||||
|
"hostname and port": {value: "example.com:1234", expect: "example.com:1234"},
|
||||||
|
"zero port": {value: ":0", expect: ":0"},
|
||||||
|
"too large port": {value: ":66000", err: ErrInvalidHostPort},
|
||||||
|
"too small port": {value: ":-1", err: ErrInvalidHostPort},
|
||||||
|
"ipv6 localhost": {value: "[::1]", expect: "[::1]:11434"},
|
||||||
|
"ipv6 world open": {value: "[::]", expect: "[::]:11434"},
|
||||||
|
"ipv6 no brackets": {value: "::1", expect: "[::1]:11434"},
|
||||||
|
"ipv6 + port": {value: "[::1]:1337", expect: "[::1]:1337"},
|
||||||
|
"extra space": {value: " 1.2.3.4 ", expect: "1.2.3.4:11434"},
|
||||||
|
"extra quotes": {value: "\"1.2.3.4\"", expect: "1.2.3.4:11434"},
|
||||||
|
"extra space+quotes": {value: " \" 1.2.3.4 \" ", expect: "1.2.3.4:11434"},
|
||||||
|
"extra single quotes": {value: "'1.2.3.4'", expect: "1.2.3.4:11434"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range hostTestCases {
|
||||||
|
t.Run(k, func(t *testing.T) {
|
||||||
|
t.Setenv("OLLAMA_HOST", v.value)
|
||||||
|
|
||||||
|
oh, err := GetOllamaHost()
|
||||||
|
if err != v.err {
|
||||||
|
t.Fatalf("expected %s, got %s", v.err, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
host := net.JoinHostPort(oh.Host, oh.Port)
|
||||||
|
assert.Equal(t, v.expect, host, fmt.Sprintf("%s: expected %s, got %s", k, v.expect, host))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
253
api/types.go
253
api/types.go
@@ -4,6 +4,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
@@ -12,6 +13,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// StatusError is an error with and HTTP status code.
|
||||||
type StatusError struct {
|
type StatusError struct {
|
||||||
StatusCode int
|
StatusCode int
|
||||||
Status string
|
Status string
|
||||||
@@ -32,6 +34,7 @@ func (e StatusError) Error() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImageData represents the raw binary data of an image file.
|
||||||
type ImageData []byte
|
type ImageData []byte
|
||||||
|
|
||||||
// GenerateRequest describes a request sent by [Client.Generate]. While you
|
// GenerateRequest describes a request sent by [Client.Generate]. While you
|
||||||
@@ -77,26 +80,44 @@ type GenerateRequest struct {
|
|||||||
Options map[string]interface{} `json:"options"`
|
Options map[string]interface{} `json:"options"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChatRequest describes a request sent by [Client.Chat].
|
||||||
type ChatRequest struct {
|
type ChatRequest struct {
|
||||||
Model string `json:"model"`
|
// Model is the model name, as in [GenerateRequest].
|
||||||
Messages []Message `json:"messages"`
|
Model string `json:"model"`
|
||||||
Stream *bool `json:"stream,omitempty"`
|
|
||||||
Format string `json:"format"`
|
// Messages is the messages of the chat - can be used to keep a chat memory.
|
||||||
|
Messages []Message `json:"messages"`
|
||||||
|
|
||||||
|
// Stream enable streaming of returned response; true by default.
|
||||||
|
Stream *bool `json:"stream,omitempty"`
|
||||||
|
|
||||||
|
// Format is the format to return the response in (e.g. "json").
|
||||||
|
Format string `json:"format"`
|
||||||
|
|
||||||
|
// KeepAlive controls how long the model will stay loaded into memory
|
||||||
|
// followin the request.
|
||||||
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
||||||
|
|
||||||
|
// Options lists model-specific options.
|
||||||
Options map[string]interface{} `json:"options"`
|
Options map[string]interface{} `json:"options"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Message is a single message in a chat sequence. The message contains the
|
||||||
|
// role ("system", "user", or "assistant"), the content and an optional list
|
||||||
|
// of images.
|
||||||
type Message struct {
|
type Message struct {
|
||||||
Role string `json:"role"` // one of ["system", "user", "assistant"]
|
Role string `json:"role"`
|
||||||
Content string `json:"content"`
|
Content string `json:"content"`
|
||||||
Images []ImageData `json:"images,omitempty"`
|
Images []ImageData `json:"images,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChatResponse is the response returned by [Client.Chat]. Its fields are
|
||||||
|
// similar to [GenerateResponse].
|
||||||
type ChatResponse struct {
|
type ChatResponse struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
CreatedAt time.Time `json:"created_at"`
|
CreatedAt time.Time `json:"created_at"`
|
||||||
Message Message `json:"message"`
|
Message Message `json:"message"`
|
||||||
|
DoneReason string `json:"done_reason,omitempty"`
|
||||||
|
|
||||||
Done bool `json:"done"`
|
Done bool `json:"done"`
|
||||||
|
|
||||||
@@ -112,7 +133,8 @@ type Metrics struct {
|
|||||||
EvalDuration time.Duration `json:"eval_duration,omitempty"`
|
EvalDuration time.Duration `json:"eval_duration,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options specified in GenerateRequest, if you add a new option here add it to the API docs also
|
// Options specified in [GenerateRequest], if you add a new option here add it
|
||||||
|
// to the API docs also.
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Runner
|
Runner
|
||||||
|
|
||||||
@@ -141,7 +163,6 @@ type Runner struct {
|
|||||||
UseNUMA bool `json:"numa,omitempty"`
|
UseNUMA bool `json:"numa,omitempty"`
|
||||||
NumCtx int `json:"num_ctx,omitempty"`
|
NumCtx int `json:"num_ctx,omitempty"`
|
||||||
NumBatch int `json:"num_batch,omitempty"`
|
NumBatch int `json:"num_batch,omitempty"`
|
||||||
NumGQA int `json:"num_gqa,omitempty"`
|
|
||||||
NumGPU int `json:"num_gpu,omitempty"`
|
NumGPU int `json:"num_gpu,omitempty"`
|
||||||
MainGPU int `json:"main_gpu,omitempty"`
|
MainGPU int `json:"main_gpu,omitempty"`
|
||||||
LowVRAM bool `json:"low_vram,omitempty"`
|
LowVRAM bool `json:"low_vram,omitempty"`
|
||||||
@@ -151,36 +172,45 @@ type Runner struct {
|
|||||||
UseMMap bool `json:"use_mmap,omitempty"`
|
UseMMap bool `json:"use_mmap,omitempty"`
|
||||||
UseMLock bool `json:"use_mlock,omitempty"`
|
UseMLock bool `json:"use_mlock,omitempty"`
|
||||||
NumThread int `json:"num_thread,omitempty"`
|
NumThread int `json:"num_thread,omitempty"`
|
||||||
|
|
||||||
// Unused: RopeFrequencyBase is ignored. Instead the value in the model will be used
|
|
||||||
RopeFrequencyBase float32 `json:"rope_frequency_base,omitempty"`
|
|
||||||
// Unused: RopeFrequencyScale is ignored. Instead the value in the model will be used
|
|
||||||
RopeFrequencyScale float32 `json:"rope_frequency_scale,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EmbeddingRequest is the request passed to [Client.Embeddings].
|
||||||
type EmbeddingRequest struct {
|
type EmbeddingRequest struct {
|
||||||
Model string `json:"model"`
|
// Model is the model name.
|
||||||
Prompt string `json:"prompt"`
|
Model string `json:"model"`
|
||||||
|
|
||||||
|
// Prompt is the textual prompt to embed.
|
||||||
|
Prompt string `json:"prompt"`
|
||||||
|
|
||||||
|
// KeepAlive controls how long the model will stay loaded in memory following
|
||||||
|
// this request.
|
||||||
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
||||||
|
|
||||||
|
// Options lists model-specific options.
|
||||||
Options map[string]interface{} `json:"options"`
|
Options map[string]interface{} `json:"options"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EmbeddingResponse is the response from [Client.Embeddings].
|
||||||
type EmbeddingResponse struct {
|
type EmbeddingResponse struct {
|
||||||
Embedding []float64 `json:"embedding"`
|
Embedding []float64 `json:"embedding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateRequest is the request passed to [Client.Create].
|
||||||
type CreateRequest struct {
|
type CreateRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Modelfile string `json:"modelfile"`
|
Modelfile string `json:"modelfile"`
|
||||||
Stream *bool `json:"stream,omitempty"`
|
Stream *bool `json:"stream,omitempty"`
|
||||||
Quantization string `json:"quantization,omitempty"`
|
Quantize string `json:"quantize,omitempty"`
|
||||||
|
|
||||||
// Name is deprecated, see Model
|
// Name is deprecated, see Model
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
|
||||||
|
// Quantization is deprecated, see Quantize
|
||||||
|
Quantization string `json:"quantization,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteRequest is the request passed to [Client.Delete].
|
||||||
type DeleteRequest struct {
|
type DeleteRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
|
|
||||||
@@ -188,6 +218,7 @@ type DeleteRequest struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShowRequest is the request passed to [Client.Show].
|
||||||
type ShowRequest struct {
|
type ShowRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
System string `json:"system"`
|
System string `json:"system"`
|
||||||
@@ -199,6 +230,7 @@ type ShowRequest struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShowResponse is the response returned from [Client.Show].
|
||||||
type ShowResponse struct {
|
type ShowResponse struct {
|
||||||
License string `json:"license,omitempty"`
|
License string `json:"license,omitempty"`
|
||||||
Modelfile string `json:"modelfile,omitempty"`
|
Modelfile string `json:"modelfile,omitempty"`
|
||||||
@@ -209,11 +241,13 @@ type ShowResponse struct {
|
|||||||
Messages []Message `json:"messages,omitempty"`
|
Messages []Message `json:"messages,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CopyRequest is the request passed to [Client.Copy].
|
||||||
type CopyRequest struct {
|
type CopyRequest struct {
|
||||||
Source string `json:"source"`
|
Source string `json:"source"`
|
||||||
Destination string `json:"destination"`
|
Destination string `json:"destination"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PullRequest is the request passed to [Client.Pull].
|
||||||
type PullRequest struct {
|
type PullRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Insecure bool `json:"insecure,omitempty"`
|
Insecure bool `json:"insecure,omitempty"`
|
||||||
@@ -225,6 +259,8 @@ type PullRequest struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProgressResponse is the response passed to progress functions like
|
||||||
|
// [PullProgressFunc] and [PushProgressFunc].
|
||||||
type ProgressResponse struct {
|
type ProgressResponse struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Digest string `json:"digest,omitempty"`
|
Digest string `json:"digest,omitempty"`
|
||||||
@@ -232,6 +268,7 @@ type ProgressResponse struct {
|
|||||||
Completed int64 `json:"completed,omitempty"`
|
Completed int64 `json:"completed,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PushRequest is the request passed to [Client.Push].
|
||||||
type PushRequest struct {
|
type PushRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Insecure bool `json:"insecure,omitempty"`
|
Insecure bool `json:"insecure,omitempty"`
|
||||||
@@ -243,34 +280,52 @@ type PushRequest struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListResponse is the response from [Client.List].
|
||||||
type ListResponse struct {
|
type ListResponse struct {
|
||||||
Models []ModelResponse `json:"models"`
|
Models []ModelResponse `json:"models"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ModelResponse is a single model description in [ListResponse].
|
||||||
type ModelResponse struct {
|
type ModelResponse struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
ModifiedAt time.Time `json:"modified_at"`
|
ModifiedAt time.Time `json:"modified_at,omitempty"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Digest string `json:"digest"`
|
Digest string `json:"digest"`
|
||||||
Details ModelDetails `json:"details,omitempty"`
|
Details ModelDetails `json:"details,omitempty"`
|
||||||
|
ExpiresAt time.Time `json:"expires_at,omitempty"`
|
||||||
|
SizeVRAM int64 `json:"size_vram,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TokenResponse struct {
|
type TokenResponse struct {
|
||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GenerateResponse is the response passed into [GenerateResponseFunc].
|
||||||
type GenerateResponse struct {
|
type GenerateResponse struct {
|
||||||
Model string `json:"model"`
|
// Model is the model name that generated the response.
|
||||||
CreatedAt time.Time `json:"created_at"`
|
Model string `json:"model"`
|
||||||
Response string `json:"response"`
|
|
||||||
|
|
||||||
Done bool `json:"done"`
|
//CreatedAt is the timestamp of the response.
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
|
||||||
|
// Response is the textual response itself.
|
||||||
|
Response string `json:"response"`
|
||||||
|
|
||||||
|
// Done specifies if the response is complete.
|
||||||
|
Done bool `json:"done"`
|
||||||
|
|
||||||
|
// DoneReason is the reason the model stopped generating text.
|
||||||
|
DoneReason string `json:"done_reason,omitempty"`
|
||||||
|
|
||||||
|
// Context is an encoding of the conversation used in this response; this
|
||||||
|
// can be sent in the next request to keep a conversational memory.
|
||||||
Context []int `json:"context,omitempty"`
|
Context []int `json:"context,omitempty"`
|
||||||
|
|
||||||
Metrics
|
Metrics
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ModelDetails provides details about a model.
|
||||||
type ModelDetails struct {
|
type ModelDetails struct {
|
||||||
ParentModel string `json:"parent_model"`
|
ParentModel string `json:"parent_model"`
|
||||||
Format string `json:"format"`
|
Format string `json:"format"`
|
||||||
@@ -308,7 +363,7 @@ func (m *Metrics) Summary() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrInvalidOpts = errors.New("invalid options")
|
var ErrInvalidHostPort = errors.New("invalid port specified in OLLAMA_HOST")
|
||||||
|
|
||||||
func (opts *Options) FromMap(m map[string]interface{}) error {
|
func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||||
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
|
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
|
||||||
@@ -323,81 +378,83 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
invalidOpts := []string{}
|
|
||||||
for key, val := range m {
|
for key, val := range m {
|
||||||
if opt, ok := jsonOpts[key]; ok {
|
opt, ok := jsonOpts[key]
|
||||||
field := valueOpts.FieldByName(opt.Name)
|
if !ok {
|
||||||
if field.IsValid() && field.CanSet() {
|
slog.Warn("invalid option provided", "option", opt.Name)
|
||||||
if val == nil {
|
continue
|
||||||
continue
|
}
|
||||||
}
|
|
||||||
|
|
||||||
switch field.Kind() {
|
field := valueOpts.FieldByName(opt.Name)
|
||||||
case reflect.Int:
|
if field.IsValid() && field.CanSet() {
|
||||||
switch t := val.(type) {
|
if val == nil {
|
||||||
case int64:
|
continue
|
||||||
field.SetInt(t)
|
}
|
||||||
case float64:
|
|
||||||
// when JSON unmarshals numbers, it uses float64, not int
|
switch field.Kind() {
|
||||||
field.SetInt(int64(t))
|
case reflect.Int:
|
||||||
default:
|
switch t := val.(type) {
|
||||||
return fmt.Errorf("option %q must be of type integer", key)
|
case int64:
|
||||||
}
|
field.SetInt(t)
|
||||||
case reflect.Bool:
|
case float64:
|
||||||
val, ok := val.(bool)
|
// when JSON unmarshals numbers, it uses float64, not int
|
||||||
if !ok {
|
field.SetInt(int64(t))
|
||||||
return fmt.Errorf("option %q must be of type boolean", key)
|
default:
|
||||||
}
|
return fmt.Errorf("option %q must be of type integer", key)
|
||||||
field.SetBool(val)
|
}
|
||||||
case reflect.Float32:
|
case reflect.Bool:
|
||||||
// JSON unmarshals to float64
|
val, ok := val.(bool)
|
||||||
val, ok := val.(float64)
|
if !ok {
|
||||||
if !ok {
|
return fmt.Errorf("option %q must be of type boolean", key)
|
||||||
return fmt.Errorf("option %q must be of type float32", key)
|
}
|
||||||
}
|
field.SetBool(val)
|
||||||
field.SetFloat(val)
|
case reflect.Float32:
|
||||||
case reflect.String:
|
// JSON unmarshals to float64
|
||||||
val, ok := val.(string)
|
val, ok := val.(float64)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("option %q must be of type string", key)
|
return fmt.Errorf("option %q must be of type float32", key)
|
||||||
}
|
}
|
||||||
field.SetString(val)
|
field.SetFloat(val)
|
||||||
case reflect.Slice:
|
case reflect.String:
|
||||||
// JSON unmarshals to []interface{}, not []string
|
val, ok := val.(string)
|
||||||
val, ok := val.([]interface{})
|
if !ok {
|
||||||
if !ok {
|
return fmt.Errorf("option %q must be of type string", key)
|
||||||
return fmt.Errorf("option %q must be of type array", key)
|
}
|
||||||
}
|
field.SetString(val)
|
||||||
// convert []interface{} to []string
|
case reflect.Slice:
|
||||||
slice := make([]string, len(val))
|
// JSON unmarshals to []interface{}, not []string
|
||||||
for i, item := range val {
|
val, ok := val.([]interface{})
|
||||||
str, ok := item.(string)
|
if !ok {
|
||||||
if !ok {
|
return fmt.Errorf("option %q must be of type array", key)
|
||||||
return fmt.Errorf("option %q must be of an array of strings", key)
|
}
|
||||||
}
|
// convert []interface{} to []string
|
||||||
slice[i] = str
|
slice := make([]string, len(val))
|
||||||
}
|
for i, item := range val {
|
||||||
field.Set(reflect.ValueOf(slice))
|
str, ok := item.(string)
|
||||||
default:
|
if !ok {
|
||||||
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
return fmt.Errorf("option %q must be of an array of strings", key)
|
||||||
}
|
}
|
||||||
|
slice[i] = str
|
||||||
|
}
|
||||||
|
field.Set(reflect.ValueOf(slice))
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
invalidOpts = append(invalidOpts, key)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(invalidOpts) > 0 {
|
|
||||||
return fmt.Errorf("%w: %v", ErrInvalidOpts, strings.Join(invalidOpts, ", "))
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultOptions is the default set of options for [GenerateRequest]; these
|
||||||
|
// values are used unless the user specifies other values explicitly.
|
||||||
func DefaultOptions() Options {
|
func DefaultOptions() Options {
|
||||||
return Options{
|
return Options{
|
||||||
// options set on request to runner
|
// options set on request to runner
|
||||||
NumPredict: -1,
|
NumPredict: -1,
|
||||||
NumKeep: 0,
|
|
||||||
|
// set a minimal num_keep to avoid issues on context shifts
|
||||||
|
NumKeep: 4,
|
||||||
Temperature: 0.8,
|
Temperature: 0.8,
|
||||||
TopK: 40,
|
TopK: 40,
|
||||||
TopP: 0.9,
|
TopP: 0.9,
|
||||||
@@ -418,8 +475,7 @@ func DefaultOptions() Options {
|
|||||||
NumCtx: 2048,
|
NumCtx: 2048,
|
||||||
NumBatch: 512,
|
NumBatch: 512,
|
||||||
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
|
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
|
||||||
NumGQA: 1,
|
NumThread: 0, // let the runtime decide
|
||||||
NumThread: 0, // let the runtime decide
|
|
||||||
LowVRAM: false,
|
LowVRAM: false,
|
||||||
F16KV: true,
|
F16KV: true,
|
||||||
UseMLock: false,
|
UseMLock: false,
|
||||||
@@ -433,6 +489,13 @@ type Duration struct {
|
|||||||
time.Duration
|
time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d Duration) MarshalJSON() ([]byte, error) {
|
||||||
|
if d.Duration < 0 {
|
||||||
|
return []byte("-1"), nil
|
||||||
|
}
|
||||||
|
return []byte("\"" + d.Duration.String() + "\""), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Duration) UnmarshalJSON(b []byte) (err error) {
|
func (d *Duration) UnmarshalJSON(b []byte) (err error) {
|
||||||
var v any
|
var v any
|
||||||
if err := json.Unmarshal(b, &v); err != nil {
|
if err := json.Unmarshal(b, &v); err != nil {
|
||||||
@@ -446,7 +509,7 @@ func (d *Duration) UnmarshalJSON(b []byte) (err error) {
|
|||||||
if t < 0 {
|
if t < 0 {
|
||||||
d.Duration = time.Duration(math.MaxInt64)
|
d.Duration = time.Duration(math.MaxInt64)
|
||||||
} else {
|
} else {
|
||||||
d.Duration = time.Duration(t * float64(time.Second))
|
d.Duration = time.Duration(int(t) * int(time.Second))
|
||||||
}
|
}
|
||||||
case string:
|
case string:
|
||||||
d.Duration, err = time.ParseDuration(t)
|
d.Duration, err = time.ParseDuration(t)
|
||||||
@@ -456,6 +519,8 @@ func (d *Duration) UnmarshalJSON(b []byte) (err error) {
|
|||||||
if d.Duration < 0 {
|
if d.Duration < 0 {
|
||||||
d.Duration = time.Duration(math.MaxInt64)
|
d.Duration = time.Duration(math.MaxInt64)
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Unsupported type: '%s'", reflect.TypeOf(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -21,6 +21,11 @@ func TestKeepAliveParsingFromJSON(t *testing.T) {
|
|||||||
req: `{ "keep_alive": 42 }`,
|
req: `{ "keep_alive": 42 }`,
|
||||||
exp: &Duration{42 * time.Second},
|
exp: &Duration{42 * time.Second},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Positive Float",
|
||||||
|
req: `{ "keep_alive": 42.5 }`,
|
||||||
|
exp: &Duration{42 * time.Second},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Positive Integer String",
|
name: "Positive Integer String",
|
||||||
req: `{ "keep_alive": "42m" }`,
|
req: `{ "keep_alive": "42m" }`,
|
||||||
@@ -31,6 +36,11 @@ func TestKeepAliveParsingFromJSON(t *testing.T) {
|
|||||||
req: `{ "keep_alive": -1 }`,
|
req: `{ "keep_alive": -1 }`,
|
||||||
exp: &Duration{math.MaxInt64},
|
exp: &Duration{math.MaxInt64},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Negative Float",
|
||||||
|
req: `{ "keep_alive": -3.14 }`,
|
||||||
|
exp: &Duration{math.MaxInt64},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Negative Integer String",
|
name: "Negative Integer String",
|
||||||
req: `{ "keep_alive": "-1m" }`,
|
req: `{ "keep_alive": "-1m" }`,
|
||||||
@@ -48,3 +58,50 @@ func TestKeepAliveParsingFromJSON(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDurationMarshalUnmarshal(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input time.Duration
|
||||||
|
expected time.Duration
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"negative duration",
|
||||||
|
time.Duration(-1),
|
||||||
|
time.Duration(math.MaxInt64),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"positive duration",
|
||||||
|
time.Duration(42 * time.Second),
|
||||||
|
time.Duration(42 * time.Second),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"another positive duration",
|
||||||
|
time.Duration(42 * time.Minute),
|
||||||
|
time.Duration(42 * time.Minute),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"zero duration",
|
||||||
|
time.Duration(0),
|
||||||
|
time.Duration(0),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"max duration",
|
||||||
|
time.Duration(math.MaxInt64),
|
||||||
|
time.Duration(math.MaxInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
b, err := json.Marshal(Duration{test.input})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var d Duration
|
||||||
|
err = json.Unmarshal(b, &d)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, test.expected, d.Duration, "input %v, marshalled %v, got %v", test.input, string(b), d.Duration)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,12 +5,14 @@ import (
|
|||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
func InitLogging() {
|
func InitLogging() {
|
||||||
level := slog.LevelInfo
|
level := slog.LevelInfo
|
||||||
|
|
||||||
if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
|
if envconfig.Debug {
|
||||||
level = slog.LevelDebug
|
level = slog.LevelDebug
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -43,37 +43,36 @@ func getCLIFullPath(command string) string {
|
|||||||
return command
|
return command
|
||||||
}
|
}
|
||||||
|
|
||||||
func SpawnServer(ctx context.Context, command string) (chan int, error) {
|
func start(ctx context.Context, command string) (*exec.Cmd, error) {
|
||||||
done := make(chan int)
|
|
||||||
|
|
||||||
logDir := filepath.Dir(ServerLogFile)
|
|
||||||
_, err := os.Stat(logDir)
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
|
||||||
return done, fmt.Errorf("create ollama server log dir %s: %v", logDir, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := getCmd(ctx, getCLIFullPath(command))
|
cmd := getCmd(ctx, getCLIFullPath(command))
|
||||||
// send stdout and stderr to a file
|
|
||||||
stdout, err := cmd.StdoutPipe()
|
stdout, err := cmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return done, fmt.Errorf("failed to spawn server stdout pipe %s", err)
|
return nil, fmt.Errorf("failed to spawn server stdout pipe: %w", err)
|
||||||
}
|
}
|
||||||
stderr, err := cmd.StderrPipe()
|
stderr, err := cmd.StderrPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return done, fmt.Errorf("failed to spawn server stderr pipe %s", err)
|
return nil, fmt.Errorf("failed to spawn server stderr pipe: %w", err)
|
||||||
}
|
|
||||||
stdin, err := cmd.StdinPipe()
|
|
||||||
if err != nil {
|
|
||||||
return done, fmt.Errorf("failed to spawn server stdin pipe %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO - rotation
|
// TODO - rotation
|
||||||
logFile, err := os.OpenFile(ServerLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
logFile, err := os.OpenFile(ServerLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return done, fmt.Errorf("failed to create server log %w", err)
|
return nil, fmt.Errorf("failed to create server log: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logDir := filepath.Dir(ServerLogFile)
|
||||||
|
_, err = os.Stat(logDir)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil, fmt.Errorf("stat ollama server log dir %s: %v", logDir, err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
||||||
|
return nil, fmt.Errorf("create ollama server log dir %s: %v", logDir, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer logFile.Close()
|
defer logFile.Close()
|
||||||
io.Copy(logFile, stdout) //nolint:errcheck
|
io.Copy(logFile, stdout) //nolint:errcheck
|
||||||
@@ -117,19 +116,33 @@ func SpawnServer(ctx context.Context, command string) (chan int, error) {
|
|||||||
|
|
||||||
// run the command and wait for it to finish
|
// run the command and wait for it to finish
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
return done, fmt.Errorf("failed to start server %w", err)
|
return nil, fmt.Errorf("failed to start server %w", err)
|
||||||
}
|
}
|
||||||
if cmd.Process != nil {
|
if cmd.Process != nil {
|
||||||
slog.Info(fmt.Sprintf("started ollama server with pid %d", cmd.Process.Pid))
|
slog.Info(fmt.Sprintf("started ollama server with pid %d", cmd.Process.Pid))
|
||||||
}
|
}
|
||||||
slog.Info(fmt.Sprintf("ollama server logs %s", ServerLogFile))
|
slog.Info(fmt.Sprintf("ollama server logs %s", ServerLogFile))
|
||||||
|
|
||||||
|
return cmd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SpawnServer(ctx context.Context, command string) (chan int, error) {
|
||||||
|
done := make(chan int)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
// Keep the server running unless we're shuttind down the app
|
// Keep the server running unless we're shuttind down the app
|
||||||
crashCount := 0
|
crashCount := 0
|
||||||
for {
|
for {
|
||||||
|
slog.Info("starting server...")
|
||||||
|
cmd, err := start(ctx, command)
|
||||||
|
if err != nil {
|
||||||
|
crashCount++
|
||||||
|
slog.Error(fmt.Sprintf("failed to start server %s", err))
|
||||||
|
time.Sleep(500 * time.Millisecond * time.Duration(crashCount))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
cmd.Wait() //nolint:errcheck
|
cmd.Wait() //nolint:errcheck
|
||||||
stdin.Close()
|
|
||||||
var code int
|
var code int
|
||||||
if cmd.ProcessState != nil {
|
if cmd.ProcessState != nil {
|
||||||
code = cmd.ProcessState.ExitCode()
|
code = cmd.ProcessState.ExitCode()
|
||||||
@@ -143,15 +156,12 @@ func SpawnServer(ctx context.Context, command string) (chan int, error) {
|
|||||||
default:
|
default:
|
||||||
crashCount++
|
crashCount++
|
||||||
slog.Warn(fmt.Sprintf("server crash %d - exit code %d - respawning", crashCount, code))
|
slog.Warn(fmt.Sprintf("server crash %d - exit code %d - respawning", crashCount, code))
|
||||||
time.Sleep(500 * time.Millisecond)
|
time.Sleep(500 * time.Millisecond * time.Duration(crashCount))
|
||||||
if err := cmd.Start(); err != nil {
|
break
|
||||||
slog.Error(fmt.Sprintf("failed to restart server %s", err))
|
|
||||||
// Keep trying, but back off if we keep failing
|
|
||||||
time.Sleep(time.Duration(crashCount) * time.Second)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return done, nil
|
return done, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -31,16 +31,13 @@ func DoUpgrade(cancel context.CancelFunc, done chan int) error {
|
|||||||
"/LOG=" + filepath.Base(UpgradeLogFile), // Only relative seems reliable, so set pwd
|
"/LOG=" + filepath.Base(UpgradeLogFile), // Only relative seems reliable, so set pwd
|
||||||
"/FORCECLOSEAPPLICATIONS", // Force close the tray app - might be needed
|
"/FORCECLOSEAPPLICATIONS", // Force close the tray app - might be needed
|
||||||
}
|
}
|
||||||
// When we're not in debug mode, make the upgrade as quiet as possible (no GUI, no prompts)
|
// make the upgrade as quiet as possible (no GUI, no prompts)
|
||||||
// TODO - temporarily disable since we're pinning in debug mode for the preview
|
|
||||||
// if debug := os.Getenv("OLLAMA_DEBUG"); debug == "" {
|
|
||||||
installArgs = append(installArgs,
|
installArgs = append(installArgs,
|
||||||
"/SP", // Skip the "This will install... Do you wish to continue" prompt
|
"/SP", // Skip the "This will install... Do you wish to continue" prompt
|
||||||
"/SUPPRESSMSGBOXES",
|
"/SUPPRESSMSGBOXES",
|
||||||
"/SILENT",
|
"/SILENT",
|
||||||
"/VERYSILENT",
|
"/VERYSILENT",
|
||||||
)
|
)
|
||||||
// }
|
|
||||||
|
|
||||||
// Safeguard in case we have requests in flight that need to drain...
|
// Safeguard in case we have requests in flight that need to drain...
|
||||||
slog.Info("Waiting for server to shutdown")
|
slog.Info("Waiting for server to shutdown")
|
||||||
|
|||||||
@@ -88,15 +88,12 @@ DialogFontSize=12
|
|||||||
[Files]
|
[Files]
|
||||||
Source: ".\app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ; Flags: ignoreversion 64bit
|
Source: ".\app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ; Flags: ignoreversion 64bit
|
||||||
Source: "..\ollama.exe"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
Source: "..\ollama.exe"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
||||||
Source: "..\dist\windeps\*.dll"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
Source: "..\dist\windows-{#ARCH}\*.dll"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
||||||
|
Source: "..\dist\windows-{#ARCH}\ollama_runners\*"; DestDir: "{app}\ollama_runners"; Flags: ignoreversion 64bit recursesubdirs
|
||||||
Source: "..\dist\ollama_welcome.ps1"; DestDir: "{app}"; Flags: ignoreversion
|
Source: "..\dist\ollama_welcome.ps1"; DestDir: "{app}"; Flags: ignoreversion
|
||||||
Source: ".\assets\app.ico"; DestDir: "{app}"; Flags: ignoreversion
|
Source: ".\assets\app.ico"; DestDir: "{app}"; Flags: ignoreversion
|
||||||
; Assumes v5.7, may need adjustments for v6
|
#if DirExists("..\dist\windows-amd64\rocm")
|
||||||
#if GetEnv("HIP_PATH") != ""
|
Source: "..\dist\windows-amd64\rocm\*"; DestDir: "{app}\rocm\"; Flags: ignoreversion recursesubdirs
|
||||||
Source: "{#GetEnv('HIP_PATH')}\bin\hipblas.dll"; DestDir: "{app}\rocm\"; Flags: ignoreversion
|
|
||||||
Source: "{#GetEnv('HIP_PATH')}\bin\rocblas.dll"; DestDir: "{app}\rocm\"; Flags: ignoreversion
|
|
||||||
; amdhip64.dll dependency comes from the driver and must be installed already
|
|
||||||
Source: "{#GetEnv('HIP_PATH')}\bin\rocblas\library\*"; DestDir: "{app}\rocm\rocblas\library\"; Flags: ignoreversion
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
@@ -132,7 +129,7 @@ SetupAppRunningError=Another Ollama installer is running.%n%nPlease cancel or fi
|
|||||||
|
|
||||||
|
|
||||||
;FinishedHeadingLabel=Run your first model
|
;FinishedHeadingLabel=Run your first model
|
||||||
;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama2
|
;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama3
|
||||||
;ClickFinish=%n
|
;ClickFinish=%n
|
||||||
|
|
||||||
[Registry]
|
[Registry]
|
||||||
|
|||||||
@@ -4,5 +4,5 @@ write-host "Welcome to Ollama!"
|
|||||||
write-host ""
|
write-host ""
|
||||||
write-host "Run your first model:"
|
write-host "Run your first model:"
|
||||||
write-host ""
|
write-host ""
|
||||||
write-host "`tollama run llama2"
|
write-host "`tollama run llama3"
|
||||||
write-host ""
|
write-host ""
|
||||||
@@ -1,71 +1,71 @@
|
|||||||
//go:build windows
|
//go:build windows
|
||||||
|
|
||||||
package wintray
|
package wintray
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
updatAvailableMenuID = 1
|
updatAvailableMenuID = 1
|
||||||
updateMenuID = updatAvailableMenuID + 1
|
updateMenuID = updatAvailableMenuID + 1
|
||||||
separatorMenuID = updateMenuID + 1
|
separatorMenuID = updateMenuID + 1
|
||||||
diagLogsMenuID = separatorMenuID + 1
|
diagLogsMenuID = separatorMenuID + 1
|
||||||
diagSeparatorMenuID = diagLogsMenuID + 1
|
diagSeparatorMenuID = diagLogsMenuID + 1
|
||||||
quitMenuID = diagSeparatorMenuID + 1
|
quitMenuID = diagSeparatorMenuID + 1
|
||||||
)
|
)
|
||||||
|
|
||||||
func (t *winTray) initMenus() error {
|
func (t *winTray) initMenus() error {
|
||||||
if err := t.addOrUpdateMenuItem(diagLogsMenuID, 0, diagLogsMenuTitle, false); err != nil {
|
if err := t.addOrUpdateMenuItem(diagLogsMenuID, 0, diagLogsMenuTitle, false); err != nil {
|
||||||
return fmt.Errorf("unable to create menu entries %w\n", err)
|
return fmt.Errorf("unable to create menu entries %w\n", err)
|
||||||
}
|
}
|
||||||
if err := t.addSeparatorMenuItem(diagSeparatorMenuID, 0); err != nil {
|
if err := t.addSeparatorMenuItem(diagSeparatorMenuID, 0); err != nil {
|
||||||
return fmt.Errorf("unable to create menu entries %w", err)
|
return fmt.Errorf("unable to create menu entries %w", err)
|
||||||
}
|
}
|
||||||
if err := t.addOrUpdateMenuItem(quitMenuID, 0, quitMenuTitle, false); err != nil {
|
if err := t.addOrUpdateMenuItem(quitMenuID, 0, quitMenuTitle, false); err != nil {
|
||||||
return fmt.Errorf("unable to create menu entries %w\n", err)
|
return fmt.Errorf("unable to create menu entries %w\n", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *winTray) UpdateAvailable(ver string) error {
|
func (t *winTray) UpdateAvailable(ver string) error {
|
||||||
if !t.updateNotified {
|
if !t.updateNotified {
|
||||||
slog.Debug("updating menu and sending notification for new update")
|
slog.Debug("updating menu and sending notification for new update")
|
||||||
if err := t.addOrUpdateMenuItem(updatAvailableMenuID, 0, updateAvailableMenuTitle, true); err != nil {
|
if err := t.addOrUpdateMenuItem(updatAvailableMenuID, 0, updateAvailableMenuTitle, true); err != nil {
|
||||||
return fmt.Errorf("unable to create menu entries %w", err)
|
return fmt.Errorf("unable to create menu entries %w", err)
|
||||||
}
|
}
|
||||||
if err := t.addOrUpdateMenuItem(updateMenuID, 0, updateMenutTitle, false); err != nil {
|
if err := t.addOrUpdateMenuItem(updateMenuID, 0, updateMenutTitle, false); err != nil {
|
||||||
return fmt.Errorf("unable to create menu entries %w", err)
|
return fmt.Errorf("unable to create menu entries %w", err)
|
||||||
}
|
}
|
||||||
if err := t.addSeparatorMenuItem(separatorMenuID, 0); err != nil {
|
if err := t.addSeparatorMenuItem(separatorMenuID, 0); err != nil {
|
||||||
return fmt.Errorf("unable to create menu entries %w", err)
|
return fmt.Errorf("unable to create menu entries %w", err)
|
||||||
}
|
}
|
||||||
iconFilePath, err := iconBytesToFilePath(wt.updateIcon)
|
iconFilePath, err := iconBytesToFilePath(wt.updateIcon)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to write icon data to temp file: %w", err)
|
return fmt.Errorf("unable to write icon data to temp file: %w", err)
|
||||||
}
|
}
|
||||||
if err := wt.setIcon(iconFilePath); err != nil {
|
if err := wt.setIcon(iconFilePath); err != nil {
|
||||||
return fmt.Errorf("unable to set icon: %w", err)
|
return fmt.Errorf("unable to set icon: %w", err)
|
||||||
}
|
}
|
||||||
t.updateNotified = true
|
t.updateNotified = true
|
||||||
|
|
||||||
t.pendingUpdate = true
|
t.pendingUpdate = true
|
||||||
// Now pop up the notification
|
// Now pop up the notification
|
||||||
t.muNID.Lock()
|
t.muNID.Lock()
|
||||||
defer t.muNID.Unlock()
|
defer t.muNID.Unlock()
|
||||||
copy(t.nid.InfoTitle[:], windows.StringToUTF16(updateTitle))
|
copy(t.nid.InfoTitle[:], windows.StringToUTF16(updateTitle))
|
||||||
copy(t.nid.Info[:], windows.StringToUTF16(fmt.Sprintf(updateMessage, ver)))
|
copy(t.nid.Info[:], windows.StringToUTF16(fmt.Sprintf(updateMessage, ver)))
|
||||||
t.nid.Flags |= NIF_INFO
|
t.nid.Flags |= NIF_INFO
|
||||||
t.nid.Timeout = 10
|
t.nid.Timeout = 10
|
||||||
t.nid.Size = uint32(unsafe.Sizeof(*wt.nid))
|
t.nid.Size = uint32(unsafe.Sizeof(*wt.nid))
|
||||||
err = t.nid.modify()
|
err = t.nid.modify()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
36
auth/auth.go
36
auth/auth.go
@@ -10,12 +10,44 @@ import (
|
|||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultPrivateKey = "id_ed25519"
|
const defaultPrivateKey = "id_ed25519"
|
||||||
|
|
||||||
|
func keyPath() (string, error) {
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Join(home, ".ollama", defaultPrivateKey), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetPublicKey() (string, error) {
|
||||||
|
keyPath, err := keyPath()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
privateKeyFile, err := os.ReadFile(keyPath)
|
||||||
|
if err != nil {
|
||||||
|
slog.Info(fmt.Sprintf("Failed to load private key: %v", err))
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
privateKey, err := ssh.ParsePrivateKey(privateKeyFile)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
publicKey := ssh.MarshalAuthorizedKey(privateKey.PublicKey())
|
||||||
|
|
||||||
|
return strings.TrimSpace(string(publicKey)), nil
|
||||||
|
}
|
||||||
|
|
||||||
func NewNonce(r io.Reader, length int) (string, error) {
|
func NewNonce(r io.Reader, length int) (string, error) {
|
||||||
nonce := make([]byte, length)
|
nonce := make([]byte, length)
|
||||||
if _, err := io.ReadFull(r, nonce); err != nil {
|
if _, err := io.ReadFull(r, nonce); err != nil {
|
||||||
@@ -26,13 +58,11 @@ func NewNonce(r io.Reader, length int) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Sign(ctx context.Context, bts []byte) (string, error) {
|
func Sign(ctx context.Context, bts []byte) (string, error) {
|
||||||
home, err := os.UserHomeDir()
|
keyPath, err := keyPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
keyPath := filepath.Join(home, ".ollama", defaultPrivateKey)
|
|
||||||
|
|
||||||
privateKeyFile, err := os.ReadFile(keyPath)
|
privateKeyFile, err := os.ReadFile(keyPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Info(fmt.Sprintf("Failed to load private key: %v", err))
|
slog.Info(fmt.Sprintf("Failed to load private key: %v", err))
|
||||||
|
|||||||
467
cmd/cmd.go
467
cmd/cmd.go
@@ -12,18 +12,20 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
|
"github.com/mattn/go-runewidth"
|
||||||
"github.com/olekukonko/tablewriter"
|
"github.com/olekukonko/tablewriter"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
@@ -31,10 +33,14 @@ import (
|
|||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/ollama/ollama/auth"
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
"github.com/ollama/ollama/format"
|
"github.com/ollama/ollama/format"
|
||||||
"github.com/ollama/ollama/parser"
|
"github.com/ollama/ollama/parser"
|
||||||
"github.com/ollama/ollama/progress"
|
"github.com/ollama/ollama/progress"
|
||||||
"github.com/ollama/ollama/server"
|
"github.com/ollama/ollama/server"
|
||||||
|
"github.com/ollama/ollama/types/errtypes"
|
||||||
|
"github.com/ollama/ollama/types/model"
|
||||||
"github.com/ollama/ollama/version"
|
"github.com/ollama/ollama/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -53,14 +59,13 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
|||||||
p := progress.NewProgress(os.Stderr)
|
p := progress.NewProgress(os.Stderr)
|
||||||
defer p.Stop()
|
defer p.Stop()
|
||||||
|
|
||||||
bars := make(map[string]*progress.Bar)
|
f, err := os.Open(filename)
|
||||||
|
|
||||||
modelfile, err := os.ReadFile(filename)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
commands, err := parser.Parse(bytes.NewReader(modelfile))
|
modelfile, err := parser.ParseFile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -74,10 +79,10 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
|||||||
spinner := progress.NewSpinner(status)
|
spinner := progress.NewSpinner(status)
|
||||||
p.Add(status, spinner)
|
p.Add(status, spinner)
|
||||||
|
|
||||||
for _, c := range commands {
|
for i := range modelfile.Commands {
|
||||||
switch c.Name {
|
switch modelfile.Commands[i].Name {
|
||||||
case "model", "adapter":
|
case "model", "adapter":
|
||||||
path := c.Args
|
path := modelfile.Commands[i].Args
|
||||||
if path == "~" {
|
if path == "~" {
|
||||||
path = home
|
path = home
|
||||||
} else if strings.HasPrefix(path, "~/") {
|
} else if strings.HasPrefix(path, "~/") {
|
||||||
@@ -89,101 +94,22 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fi, err := os.Stat(path)
|
fi, err := os.Stat(path)
|
||||||
if errors.Is(err, os.ErrNotExist) && c.Name == "model" {
|
if errors.Is(err, os.ErrNotExist) && modelfile.Commands[i].Name == "model" {
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO make this work w/ adapters
|
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
tf, err := os.CreateTemp("", "ollama-tf")
|
// this is likely a safetensors or pytorch directory
|
||||||
|
// TODO make this work w/ adapters
|
||||||
|
tempfile, err := tempZipFiles(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(tf.Name())
|
defer os.RemoveAll(tempfile)
|
||||||
|
|
||||||
zf := zip.NewWriter(tf)
|
path = tempfile
|
||||||
|
|
||||||
files := []string{}
|
|
||||||
|
|
||||||
tfiles, err := filepath.Glob(filepath.Join(path, "pytorch_model-*.bin"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if len(tfiles) == 0 {
|
|
||||||
tfiles, err = filepath.Glob(filepath.Join(path, "model-*.safetensors"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
files = append(files, tfiles...)
|
|
||||||
|
|
||||||
if len(files) == 0 {
|
|
||||||
return fmt.Errorf("no models were found in '%s'", path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// add the safetensor/torch config file + tokenizer
|
|
||||||
files = append(files, filepath.Join(path, "config.json"))
|
|
||||||
files = append(files, filepath.Join(path, "params.json"))
|
|
||||||
files = append(files, filepath.Join(path, "added_tokens.json"))
|
|
||||||
files = append(files, filepath.Join(path, "tokenizer.model"))
|
|
||||||
|
|
||||||
for _, fn := range files {
|
|
||||||
f, err := os.Open(fn)
|
|
||||||
|
|
||||||
// just skip whatever files aren't there
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
if strings.HasSuffix(fn, "tokenizer.model") {
|
|
||||||
// try the parent dir before giving up
|
|
||||||
parentDir := filepath.Dir(path)
|
|
||||||
newFn := filepath.Join(parentDir, "tokenizer.model")
|
|
||||||
f, err = os.Open(newFn)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
continue
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := f.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
h, err := zip.FileInfoHeader(fi)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
h.Name = filepath.Base(fn)
|
|
||||||
h.Method = zip.Store
|
|
||||||
|
|
||||||
w, err := zf.CreateHeader(h)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.Copy(w, f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := zf.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tf.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
path = tf.Name()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
digest, err := createBlob(cmd, client, path)
|
digest, err := createBlob(cmd, client, path)
|
||||||
@@ -191,10 +117,11 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
modelfile = bytes.ReplaceAll(modelfile, []byte(c.Args), []byte("@"+digest))
|
modelfile.Commands[i].Args = "@" + digest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bars := make(map[string]*progress.Bar)
|
||||||
fn := func(resp api.ProgressResponse) error {
|
fn := func(resp api.ProgressResponse) error {
|
||||||
if resp.Digest != "" {
|
if resp.Digest != "" {
|
||||||
spinner.Stop()
|
spinner.Stop()
|
||||||
@@ -218,9 +145,9 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
quantization, _ := cmd.Flags().GetString("quantization")
|
quantize, _ := cmd.Flags().GetString("quantize")
|
||||||
|
|
||||||
request := api.CreateRequest{Name: args[0], Modelfile: string(modelfile), Quantization: quantization}
|
request := api.CreateRequest{Name: args[0], Modelfile: modelfile.String(), Quantize: quantize}
|
||||||
if err := client.Create(cmd.Context(), &request, fn); err != nil {
|
if err := client.Create(cmd.Context(), &request, fn); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -228,6 +155,114 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func tempZipFiles(path string) (string, error) {
|
||||||
|
tempfile, err := os.CreateTemp("", "ollama-tf")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer tempfile.Close()
|
||||||
|
|
||||||
|
zipfile := zip.NewWriter(tempfile)
|
||||||
|
defer zipfile.Close()
|
||||||
|
|
||||||
|
detectContentType := func(path string) (string, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
b.Grow(512)
|
||||||
|
|
||||||
|
if _, err := io.CopyN(&b, f, 512); err != nil && !errors.Is(err, io.EOF) {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
contentType, _, _ := strings.Cut(http.DetectContentType(b.Bytes()), ";")
|
||||||
|
return contentType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
glob := func(pattern, contentType string) ([]string, error) {
|
||||||
|
matches, err := filepath.Glob(pattern)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, safetensor := range matches {
|
||||||
|
if ct, err := detectContentType(safetensor); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if ct != contentType {
|
||||||
|
return nil, fmt.Errorf("invalid content type: expected %s for %s", ct, safetensor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return matches, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var files []string
|
||||||
|
if st, _ := glob(filepath.Join(path, "model*.safetensors"), "application/octet-stream"); len(st) > 0 {
|
||||||
|
// safetensors files might be unresolved git lfs references; skip if they are
|
||||||
|
// covers model-x-of-y.safetensors, model.fp32-x-of-y.safetensors, model.safetensors
|
||||||
|
files = append(files, st...)
|
||||||
|
} else if pt, _ := glob(filepath.Join(path, "pytorch_model*.bin"), "application/zip"); len(pt) > 0 {
|
||||||
|
// pytorch files might also be unresolved git lfs references; skip if they are
|
||||||
|
// covers pytorch_model-x-of-y.bin, pytorch_model.fp32-x-of-y.bin, pytorch_model.bin
|
||||||
|
files = append(files, pt...)
|
||||||
|
} else if pt, _ := glob(filepath.Join(path, "consolidated*.pth"), "application/zip"); len(pt) > 0 {
|
||||||
|
// pytorch files might also be unresolved git lfs references; skip if they are
|
||||||
|
// covers consolidated.x.pth, consolidated.pth
|
||||||
|
files = append(files, pt...)
|
||||||
|
} else {
|
||||||
|
return "", errors.New("no safetensors or torch files found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// add configuration files, json files are detected as text/plain
|
||||||
|
js, err := glob(filepath.Join(path, "*.json"), "text/plain")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
files = append(files, js...)
|
||||||
|
|
||||||
|
if tks, _ := glob(filepath.Join(path, "tokenizer.model"), "application/octet-stream"); len(tks) > 0 {
|
||||||
|
// add tokenizer.model if it exists, tokenizer.json is automatically picked up by the previous glob
|
||||||
|
// tokenizer.model might be a unresolved git lfs reference; error if it is
|
||||||
|
files = append(files, tks...)
|
||||||
|
} else if tks, _ := glob(filepath.Join(path, "**/tokenizer.model"), "text/plain"); len(tks) > 0 {
|
||||||
|
// some times tokenizer.model is in a subdirectory (e.g. meta-llama/Meta-Llama-3-8B)
|
||||||
|
files = append(files, tks...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
f, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
fi, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
zfi, err := zip.FileInfoHeader(fi)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
zf, err := zipfile.CreateHeader(zfi)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(zf, f); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tempfile.Name(), nil
|
||||||
|
}
|
||||||
|
|
||||||
func createBlob(cmd *cobra.Command, client *api.Client, path string) (string, error) {
|
func createBlob(cmd *cobra.Command, client *api.Client, path string) (string, error) {
|
||||||
bin, err := os.Open(path)
|
bin, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -292,6 +327,18 @@ func RunHandler(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
opts.Format = format
|
opts.Format = format
|
||||||
|
|
||||||
|
keepAlive, err := cmd.Flags().GetString("keepalive")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if keepAlive != "" {
|
||||||
|
d, err := time.ParseDuration(keepAlive)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts.KeepAlive = &api.Duration{Duration: d}
|
||||||
|
}
|
||||||
|
|
||||||
prompts := args[1:]
|
prompts := args[1:]
|
||||||
// prepend stdin to the prompt if provided
|
// prepend stdin to the prompt if provided
|
||||||
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
||||||
@@ -322,6 +369,47 @@ func RunHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return generateInteractive(cmd, opts)
|
return generateInteractive(cmd, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func errFromUnknownKey(unknownKeyErr error) error {
|
||||||
|
// find SSH public key in the error message
|
||||||
|
sshKeyPattern := `ssh-\w+ [^\s"]+`
|
||||||
|
re := regexp.MustCompile(sshKeyPattern)
|
||||||
|
matches := re.FindStringSubmatch(unknownKeyErr.Error())
|
||||||
|
|
||||||
|
if len(matches) > 0 {
|
||||||
|
serverPubKey := matches[0]
|
||||||
|
|
||||||
|
localPubKey, err := auth.GetPublicKey()
|
||||||
|
if err != nil {
|
||||||
|
return unknownKeyErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if runtime.GOOS == "linux" && serverPubKey != localPubKey {
|
||||||
|
// try the ollama service public key
|
||||||
|
svcPubKey, err := os.ReadFile("/usr/share/ollama/.ollama/id_ed25519.pub")
|
||||||
|
if err != nil {
|
||||||
|
return unknownKeyErr
|
||||||
|
}
|
||||||
|
localPubKey = strings.TrimSpace(string(svcPubKey))
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if the returned public key matches the local public key, this prevents adding a remote key to the user's account
|
||||||
|
if serverPubKey != localPubKey {
|
||||||
|
return unknownKeyErr
|
||||||
|
}
|
||||||
|
|
||||||
|
var msg strings.Builder
|
||||||
|
msg.WriteString(unknownKeyErr.Error())
|
||||||
|
msg.WriteString("\n\nYour ollama key is:\n")
|
||||||
|
msg.WriteString(localPubKey)
|
||||||
|
msg.WriteString("\nAdd your key at:\n")
|
||||||
|
msg.WriteString("https://ollama.com/settings/keys")
|
||||||
|
|
||||||
|
return errors.New(msg.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return unknownKeyErr
|
||||||
|
}
|
||||||
|
|
||||||
func PushHandler(cmd *cobra.Command, args []string) error {
|
func PushHandler(cmd *cobra.Command, args []string) error {
|
||||||
client, err := api.ClientFromEnvironment()
|
client, err := api.ClientFromEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -369,6 +457,20 @@ func PushHandler(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
request := api.PushRequest{Name: args[0], Insecure: insecure}
|
request := api.PushRequest{Name: args[0], Insecure: insecure}
|
||||||
if err := client.Push(cmd.Context(), &request, fn); err != nil {
|
if err := client.Push(cmd.Context(), &request, fn); err != nil {
|
||||||
|
if spinner != nil {
|
||||||
|
spinner.Stop()
|
||||||
|
}
|
||||||
|
if strings.Contains(err.Error(), "access denied") {
|
||||||
|
return errors.New("you are not authorized to push to this namespace, create the model under a namespace you own")
|
||||||
|
}
|
||||||
|
host := model.ParseName(args[0]).Host
|
||||||
|
isOllamaHost := strings.HasSuffix(host, ".ollama.ai") || strings.HasSuffix(host, ".ollama.com")
|
||||||
|
if strings.Contains(err.Error(), errtypes.UnknownOllamaKeyErrMsg) && isOllamaHost {
|
||||||
|
// the user has not added their ollama key to ollama.com
|
||||||
|
// re-throw an error with a more user-friendly message
|
||||||
|
return errFromUnknownKey(err)
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -409,6 +511,52 @@ func ListHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ListRunningHandler(cmd *cobra.Command, args []string) error {
|
||||||
|
client, err := api.ClientFromEnvironment()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
models, err := client.ListRunning(cmd.Context())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var data [][]string
|
||||||
|
|
||||||
|
for _, m := range models.Models {
|
||||||
|
if len(args) == 0 || strings.HasPrefix(m.Name, args[0]) {
|
||||||
|
var procStr string
|
||||||
|
switch {
|
||||||
|
case m.SizeVRAM == 0:
|
||||||
|
procStr = "100% CPU"
|
||||||
|
case m.SizeVRAM == m.Size:
|
||||||
|
procStr = "100% GPU"
|
||||||
|
case m.SizeVRAM > m.Size || m.Size == 0:
|
||||||
|
procStr = "Unknown"
|
||||||
|
default:
|
||||||
|
sizeCPU := m.Size - m.SizeVRAM
|
||||||
|
cpuPercent := math.Round(float64(sizeCPU) / float64(m.Size) * 100)
|
||||||
|
procStr = fmt.Sprintf("%d%%/%d%% CPU/GPU", int(cpuPercent), int(100-cpuPercent))
|
||||||
|
}
|
||||||
|
data = append(data, []string{m.Name, m.Digest[:12], format.HumanBytes(m.Size), procStr, format.HumanTime(m.ExpiresAt, "Never")})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
table := tablewriter.NewWriter(os.Stdout)
|
||||||
|
table.SetHeader([]string{"NAME", "ID", "SIZE", "PROCESSOR", "UNTIL"})
|
||||||
|
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
|
||||||
|
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
||||||
|
table.SetHeaderLine(false)
|
||||||
|
table.SetBorder(false)
|
||||||
|
table.SetNoWhiteSpace(true)
|
||||||
|
table.SetTablePadding("\t")
|
||||||
|
table.AppendBulk(data)
|
||||||
|
table.Render()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func DeleteHandler(cmd *cobra.Command, args []string) error {
|
func DeleteHandler(cmd *cobra.Command, args []string) error {
|
||||||
client, err := api.ClientFromEnvironment()
|
client, err := api.ClientFromEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -585,6 +733,7 @@ type runOptions struct {
|
|||||||
Images []api.ImageData
|
Images []api.ImageData
|
||||||
Options map[string]interface{}
|
Options map[string]interface{}
|
||||||
MultiModal bool
|
MultiModal bool
|
||||||
|
KeepAlive *api.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
type displayResponseState struct {
|
type displayResponseState struct {
|
||||||
@@ -597,7 +746,8 @@ func displayResponse(content string, wordWrap bool, state *displayResponseState)
|
|||||||
if wordWrap && termWidth >= 10 {
|
if wordWrap && termWidth >= 10 {
|
||||||
for _, ch := range content {
|
for _, ch := range content {
|
||||||
if state.lineLength+1 > termWidth-5 {
|
if state.lineLength+1 > termWidth-5 {
|
||||||
if len(state.wordBuffer) > termWidth-10 {
|
|
||||||
|
if runewidth.StringWidth(state.wordBuffer) > termWidth-10 {
|
||||||
fmt.Printf("%s%c", state.wordBuffer, ch)
|
fmt.Printf("%s%c", state.wordBuffer, ch)
|
||||||
state.wordBuffer = ""
|
state.wordBuffer = ""
|
||||||
state.lineLength = 0
|
state.lineLength = 0
|
||||||
@@ -605,12 +755,22 @@ func displayResponse(content string, wordWrap bool, state *displayResponseState)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// backtrack the length of the last word and clear to the end of the line
|
// backtrack the length of the last word and clear to the end of the line
|
||||||
fmt.Printf("\x1b[%dD\x1b[K\n", len(state.wordBuffer))
|
a := runewidth.StringWidth(state.wordBuffer)
|
||||||
|
if a > 0 {
|
||||||
|
fmt.Printf("\x1b[%dD", a)
|
||||||
|
}
|
||||||
|
fmt.Printf("\x1b[K\n")
|
||||||
fmt.Printf("%s%c", state.wordBuffer, ch)
|
fmt.Printf("%s%c", state.wordBuffer, ch)
|
||||||
state.lineLength = len(state.wordBuffer) + 1
|
chWidth := runewidth.RuneWidth(ch)
|
||||||
|
|
||||||
|
state.lineLength = runewidth.StringWidth(state.wordBuffer) + chWidth
|
||||||
} else {
|
} else {
|
||||||
fmt.Print(string(ch))
|
fmt.Print(string(ch))
|
||||||
state.lineLength += 1
|
state.lineLength += runewidth.RuneWidth(ch)
|
||||||
|
if runewidth.RuneWidth(ch) >= 2 {
|
||||||
|
state.wordBuffer = ""
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
switch ch {
|
switch ch {
|
||||||
case ' ':
|
case ' ':
|
||||||
@@ -679,6 +839,10 @@ func chat(cmd *cobra.Command, opts runOptions) (*api.Message, error) {
|
|||||||
Options: opts.Options,
|
Options: opts.Options,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.KeepAlive != nil {
|
||||||
|
req.KeepAlive = opts.KeepAlive
|
||||||
|
}
|
||||||
|
|
||||||
if err := client.Chat(cancelCtx, req, fn); err != nil {
|
if err := client.Chat(cancelCtx, req, fn); err != nil {
|
||||||
if errors.Is(err, context.Canceled) {
|
if errors.Is(err, context.Canceled) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -754,14 +918,15 @@ func generate(cmd *cobra.Command, opts runOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
request := api.GenerateRequest{
|
request := api.GenerateRequest{
|
||||||
Model: opts.Model,
|
Model: opts.Model,
|
||||||
Prompt: opts.Prompt,
|
Prompt: opts.Prompt,
|
||||||
Context: generateContext,
|
Context: generateContext,
|
||||||
Images: opts.Images,
|
Images: opts.Images,
|
||||||
Format: opts.Format,
|
Format: opts.Format,
|
||||||
System: opts.System,
|
System: opts.System,
|
||||||
Template: opts.Template,
|
Template: opts.Template,
|
||||||
Options: opts.Options,
|
Options: opts.Options,
|
||||||
|
KeepAlive: opts.KeepAlive,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := client.Generate(ctx, &request, fn); err != nil {
|
if err := client.Generate(ctx, &request, fn); err != nil {
|
||||||
@@ -796,24 +961,27 @@ func generate(cmd *cobra.Command, opts runOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func RunServer(cmd *cobra.Command, _ []string) error {
|
func RunServer(cmd *cobra.Command, _ []string) error {
|
||||||
host, port, err := net.SplitHostPort(strings.Trim(os.Getenv("OLLAMA_HOST"), "\"'"))
|
// retrieve the OLLAMA_HOST environment variable
|
||||||
|
ollamaHost, err := api.GetOllamaHost()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
host, port = "127.0.0.1", "11434"
|
return err
|
||||||
if ip := net.ParseIP(strings.Trim(os.Getenv("OLLAMA_HOST"), "[]")); ip != nil {
|
|
||||||
host = ip.String()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := initializeKeypair(); err != nil {
|
if err := initializeKeypair(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ln, err := net.Listen("tcp", net.JoinHostPort(host, port))
|
ln, err := net.Listen("tcp", net.JoinHostPort(ollamaHost.Host, ollamaHost.Port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return server.Serve(ln)
|
err = server.Serve(ln)
|
||||||
|
if errors.Is(err, http.ErrServerClosed) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func initializeKeypair() error {
|
func initializeKeypair() error {
|
||||||
@@ -916,12 +1084,19 @@ func versionHandler(cmd *cobra.Command, _ []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendHostEnvDocs(cmd *cobra.Command) {
|
func appendEnvDocs(cmd *cobra.Command, envs []envconfig.EnvVar) {
|
||||||
const hostEnvDocs = `
|
if len(envs) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
envUsage := `
|
||||||
Environment Variables:
|
Environment Variables:
|
||||||
OLLAMA_HOST The host:port or base URL of the Ollama server (e.g. http://localhost:11434)
|
|
||||||
`
|
`
|
||||||
cmd.SetUsageTemplate(cmd.UsageTemplate() + hostEnvDocs)
|
for _, e := range envs {
|
||||||
|
envUsage += fmt.Sprintf(" %-24s %s\n", e.Name, e.Description)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.SetUsageTemplate(cmd.UsageTemplate() + envUsage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCLI() *cobra.Command {
|
func NewCLI() *cobra.Command {
|
||||||
@@ -960,8 +1135,8 @@ func NewCLI() *cobra.Command {
|
|||||||
RunE: CreateHandler,
|
RunE: CreateHandler,
|
||||||
}
|
}
|
||||||
|
|
||||||
createCmd.Flags().StringP("file", "f", "Modelfile", "Name of the Modelfile (default \"Modelfile\")")
|
createCmd.Flags().StringP("file", "f", "Modelfile", "Name of the Modelfile")
|
||||||
createCmd.Flags().StringP("quantization", "q", "", "Quantization level.")
|
createCmd.Flags().StringP("quantize", "q", "", "Quantize model to this level (e.g. q4_0)")
|
||||||
|
|
||||||
showCmd := &cobra.Command{
|
showCmd := &cobra.Command{
|
||||||
Use: "show MODEL",
|
Use: "show MODEL",
|
||||||
@@ -985,6 +1160,7 @@ func NewCLI() *cobra.Command {
|
|||||||
RunE: RunHandler,
|
RunE: RunHandler,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runCmd.Flags().String("keepalive", "", "Duration to keep a model loaded (e.g. 5m)")
|
||||||
runCmd.Flags().Bool("verbose", false, "Show timings for response")
|
runCmd.Flags().Bool("verbose", false, "Show timings for response")
|
||||||
runCmd.Flags().Bool("insecure", false, "Use an insecure registry")
|
runCmd.Flags().Bool("insecure", false, "Use an insecure registry")
|
||||||
runCmd.Flags().Bool("nowordwrap", false, "Don't wrap words to the next line automatically")
|
runCmd.Flags().Bool("nowordwrap", false, "Don't wrap words to the next line automatically")
|
||||||
@@ -996,15 +1172,6 @@ func NewCLI() *cobra.Command {
|
|||||||
Args: cobra.ExactArgs(0),
|
Args: cobra.ExactArgs(0),
|
||||||
RunE: RunServer,
|
RunE: RunServer,
|
||||||
}
|
}
|
||||||
serveCmd.SetUsageTemplate(serveCmd.UsageTemplate() + `
|
|
||||||
Environment Variables:
|
|
||||||
|
|
||||||
OLLAMA_HOST The host:port to bind to (default "127.0.0.1:11434")
|
|
||||||
OLLAMA_ORIGINS A comma separated list of allowed origins.
|
|
||||||
OLLAMA_MODELS The path to the models directory (default is "~/.ollama/models")
|
|
||||||
OLLAMA_KEEP_ALIVE The duration that models stay loaded in memory (default is "5m")
|
|
||||||
OLLAMA_DEBUG Set to 1 to enable additional debug logging
|
|
||||||
`)
|
|
||||||
|
|
||||||
pullCmd := &cobra.Command{
|
pullCmd := &cobra.Command{
|
||||||
Use: "pull MODEL",
|
Use: "pull MODEL",
|
||||||
@@ -1033,8 +1200,16 @@ Environment Variables:
|
|||||||
PreRunE: checkServerHeartbeat,
|
PreRunE: checkServerHeartbeat,
|
||||||
RunE: ListHandler,
|
RunE: ListHandler,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
psCmd := &cobra.Command{
|
||||||
|
Use: "ps",
|
||||||
|
Short: "List running models",
|
||||||
|
PreRunE: checkServerHeartbeat,
|
||||||
|
RunE: ListRunningHandler,
|
||||||
|
}
|
||||||
|
|
||||||
copyCmd := &cobra.Command{
|
copyCmd := &cobra.Command{
|
||||||
Use: "cp SOURCE TARGET",
|
Use: "cp SOURCE DESTINATION",
|
||||||
Short: "Copy a model",
|
Short: "Copy a model",
|
||||||
Args: cobra.ExactArgs(2),
|
Args: cobra.ExactArgs(2),
|
||||||
PreRunE: checkServerHeartbeat,
|
PreRunE: checkServerHeartbeat,
|
||||||
@@ -1049,6 +1224,10 @@ Environment Variables:
|
|||||||
RunE: DeleteHandler,
|
RunE: DeleteHandler,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
envVars := envconfig.AsMap()
|
||||||
|
|
||||||
|
envs := []envconfig.EnvVar{envVars["OLLAMA_HOST"]}
|
||||||
|
|
||||||
for _, cmd := range []*cobra.Command{
|
for _, cmd := range []*cobra.Command{
|
||||||
createCmd,
|
createCmd,
|
||||||
showCmd,
|
showCmd,
|
||||||
@@ -1056,10 +1235,33 @@ Environment Variables:
|
|||||||
pullCmd,
|
pullCmd,
|
||||||
pushCmd,
|
pushCmd,
|
||||||
listCmd,
|
listCmd,
|
||||||
|
psCmd,
|
||||||
copyCmd,
|
copyCmd,
|
||||||
deleteCmd,
|
deleteCmd,
|
||||||
|
serveCmd,
|
||||||
} {
|
} {
|
||||||
appendHostEnvDocs(cmd)
|
switch cmd {
|
||||||
|
case runCmd:
|
||||||
|
appendEnvDocs(cmd, []envconfig.EnvVar{envVars["OLLAMA_HOST"], envVars["OLLAMA_NOHISTORY"]})
|
||||||
|
case serveCmd:
|
||||||
|
appendEnvDocs(cmd, []envconfig.EnvVar{
|
||||||
|
envVars["OLLAMA_DEBUG"],
|
||||||
|
envVars["OLLAMA_HOST"],
|
||||||
|
envVars["OLLAMA_KEEP_ALIVE"],
|
||||||
|
envVars["OLLAMA_MAX_LOADED_MODELS"],
|
||||||
|
envVars["OLLAMA_MAX_QUEUE"],
|
||||||
|
envVars["OLLAMA_MODELS"],
|
||||||
|
envVars["OLLAMA_NUM_PARALLEL"],
|
||||||
|
envVars["OLLAMA_NOPRUNE"],
|
||||||
|
envVars["OLLAMA_ORIGINS"],
|
||||||
|
envVars["OLLAMA_TMPDIR"],
|
||||||
|
envVars["OLLAMA_FLASH_ATTENTION"],
|
||||||
|
envVars["OLLAMA_LLM_LIBRARY"],
|
||||||
|
envVars["OLLAMA_MAX_VRAM"],
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
appendEnvDocs(cmd, envs)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rootCmd.AddCommand(
|
rootCmd.AddCommand(
|
||||||
@@ -1070,6 +1272,7 @@ Environment Variables:
|
|||||||
pullCmd,
|
pullCmd,
|
||||||
pushCmd,
|
pushCmd,
|
||||||
listCmd,
|
listCmd,
|
||||||
|
psCmd,
|
||||||
copyCmd,
|
copyCmd,
|
||||||
deleteCmd,
|
deleteCmd,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -15,8 +15,10 @@ import (
|
|||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
"github.com/ollama/ollama/progress"
|
"github.com/ollama/ollama/progress"
|
||||||
"github.com/ollama/ollama/readline"
|
"github.com/ollama/ollama/readline"
|
||||||
|
"github.com/ollama/ollama/types/errtypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MultilineState int
|
type MultilineState int
|
||||||
@@ -56,6 +58,11 @@ func loadModel(cmd *cobra.Command, opts *runOptions) error {
|
|||||||
Model: opts.Model,
|
Model: opts.Model,
|
||||||
Messages: []api.Message{},
|
Messages: []api.Message{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.KeepAlive != nil {
|
||||||
|
chatReq.KeepAlive = opts.KeepAlive
|
||||||
|
}
|
||||||
|
|
||||||
err = client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error {
|
err = client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error {
|
||||||
p.StopAndClear()
|
p.StopAndClear()
|
||||||
if len(opts.Messages) > 0 {
|
if len(opts.Messages) > 0 {
|
||||||
@@ -94,6 +101,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
fmt.Fprintln(os.Stderr, " /show Show model information")
|
fmt.Fprintln(os.Stderr, " /show Show model information")
|
||||||
fmt.Fprintln(os.Stderr, " /load <model> Load a session or model")
|
fmt.Fprintln(os.Stderr, " /load <model> Load a session or model")
|
||||||
fmt.Fprintln(os.Stderr, " /save <model> Save your current session")
|
fmt.Fprintln(os.Stderr, " /save <model> Save your current session")
|
||||||
|
fmt.Fprintln(os.Stderr, " /clear Clear session context")
|
||||||
fmt.Fprintln(os.Stderr, " /bye Exit")
|
fmt.Fprintln(os.Stderr, " /bye Exit")
|
||||||
fmt.Fprintln(os.Stderr, " /?, /help Help for a command")
|
fmt.Fprintln(os.Stderr, " /?, /help Help for a command")
|
||||||
fmt.Fprintln(os.Stderr, " /? shortcuts Help for keyboard shortcuts")
|
fmt.Fprintln(os.Stderr, " /? shortcuts Help for keyboard shortcuts")
|
||||||
@@ -131,6 +139,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word")
|
fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word")
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor")
|
fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor")
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor")
|
fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor")
|
||||||
|
fmt.Fprintln(os.Stderr, " Ctrl + w Delete the word before the cursor")
|
||||||
fmt.Fprintln(os.Stderr, "")
|
fmt.Fprintln(os.Stderr, "")
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen")
|
fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen")
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding")
|
fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding")
|
||||||
@@ -161,7 +170,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
fmt.Fprintln(os.Stderr, " /set parameter repeat_penalty <float> How strongly to penalize repetitions")
|
fmt.Fprintln(os.Stderr, " /set parameter repeat_penalty <float> How strongly to penalize repetitions")
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter repeat_last_n <int> Set how far back to look for repetitions")
|
fmt.Fprintln(os.Stderr, " /set parameter repeat_last_n <int> Set how far back to look for repetitions")
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter num_gpu <int> The number of layers to send to the GPU")
|
fmt.Fprintln(os.Stderr, " /set parameter num_gpu <int> The number of layers to send to the GPU")
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter stop \"<string>\", ... Set the stop parameters")
|
fmt.Fprintln(os.Stderr, " /set parameter stop <string> <string> ... Set the stop parameters")
|
||||||
fmt.Fprintln(os.Stderr, "")
|
fmt.Fprintln(os.Stderr, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -175,6 +184,10 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if envconfig.NoHistory {
|
||||||
|
scanner.HistoryDisable()
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Print(readline.StartBracketedPaste)
|
fmt.Print(readline.StartBracketedPaste)
|
||||||
defer fmt.Printf(readline.EndBracketedPaste)
|
defer fmt.Printf(readline.EndBracketedPaste)
|
||||||
|
|
||||||
@@ -275,11 +288,22 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
fn := func(resp api.ProgressResponse) error { return nil }
|
fn := func(resp api.ProgressResponse) error { return nil }
|
||||||
err = client.Create(cmd.Context(), req, fn)
|
err = client.Create(cmd.Context(), req, fn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("error: couldn't save model")
|
if strings.Contains(err.Error(), errtypes.InvalidModelNameErrMsg) {
|
||||||
|
fmt.Printf("error: The model name '%s' is invalid\n", args[1])
|
||||||
|
continue
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Printf("Created new model '%s'\n", args[1])
|
fmt.Printf("Created new model '%s'\n", args[1])
|
||||||
continue
|
continue
|
||||||
|
case strings.HasPrefix(line, "/clear"):
|
||||||
|
opts.Messages = []api.Message{}
|
||||||
|
if opts.System != "" {
|
||||||
|
newMessage := api.Message{Role: "system", Content: opts.System}
|
||||||
|
opts.Messages = append(opts.Messages, newMessage)
|
||||||
|
}
|
||||||
|
fmt.Println("Cleared session context")
|
||||||
|
continue
|
||||||
case strings.HasPrefix(line, "/set"):
|
case strings.HasPrefix(line, "/set"):
|
||||||
args := strings.Fields(line)
|
args := strings.Fields(line)
|
||||||
if len(args) > 1 {
|
if len(args) > 1 {
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -17,20 +18,36 @@ import (
|
|||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
_ int32 = iota
|
||||||
|
tokenTypeNormal
|
||||||
|
tokenTypeUnknown
|
||||||
|
tokenTypeControl
|
||||||
|
tokenTypeUserDefined
|
||||||
|
tokenTypeUnused
|
||||||
|
tokenTypeByte
|
||||||
|
)
|
||||||
|
|
||||||
type Params struct {
|
type Params struct {
|
||||||
Architectures []string `json:"architectures"`
|
Architectures []string `json:"architectures"`
|
||||||
VocabSize int `json:"vocab_size"`
|
VocabSize int `json:"vocab_size"`
|
||||||
HiddenSize int `json:"hidden_size"` // n_embd
|
HiddenSize int `json:"hidden_size"` // n_embd
|
||||||
HiddenLayers int `json:"num_hidden_layers"` // n_layer
|
HiddenLayers int `json:"num_hidden_layers"` // n_layer
|
||||||
ContextSize int `json:"max_position_embeddings"`
|
ContextSize int `json:"max_position_embeddings"`
|
||||||
IntermediateSize int `json:"intermediate_size"`
|
IntermediateSize int `json:"intermediate_size"`
|
||||||
AttentionHeads int `json:"num_attention_heads"` // n_head
|
AttentionHeads int `json:"num_attention_heads"` // n_head
|
||||||
KeyValHeads int `json:"num_key_value_heads"`
|
KeyValHeads int `json:"num_key_value_heads"`
|
||||||
NormEPS float64 `json:"rms_norm_eps"`
|
NormEPS float64 `json:"rms_norm_eps"`
|
||||||
BoSTokenID int `json:"bos_token_id"`
|
BoSTokenID int `json:"bos_token_id"`
|
||||||
EoSTokenID int `json:"eos_token_id"`
|
EoSTokenID int `json:"eos_token_id"`
|
||||||
HeadDimension int `json:"head_dim"`
|
HeadDimension int `json:"head_dim"`
|
||||||
PaddingTokenID int `json:"pad_token_id"`
|
PaddingTokenID int `json:"pad_token_id"`
|
||||||
|
RopeFrequencyBase float64 `json:"rope_theta"`
|
||||||
|
|
||||||
|
Experts int `json:"num_local_experts"`
|
||||||
|
ExpertsUsed int `json:"num_experts_per_tok"`
|
||||||
|
|
||||||
|
PreTokenizer string
|
||||||
|
|
||||||
ByteOrder
|
ByteOrder
|
||||||
}
|
}
|
||||||
@@ -43,7 +60,7 @@ type ByteOrder interface {
|
|||||||
type ModelArch interface {
|
type ModelArch interface {
|
||||||
GetTensors() error
|
GetTensors() error
|
||||||
LoadVocab() error
|
LoadVocab() error
|
||||||
WriteGGUF() (string, error)
|
WriteGGUF(io.WriteSeeker) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type ModelFormat interface {
|
type ModelFormat interface {
|
||||||
@@ -69,10 +86,9 @@ func GetModelFormat(dirname string) (ModelFormat, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
slog.Debug(fmt.Sprintf("file = %s", fn))
|
|
||||||
if strings.HasSuffix(fn, ".safetensors") {
|
if strings.HasSuffix(fn, ".safetensors") {
|
||||||
return &SafetensorFormat{}, nil
|
return &SafetensorFormat{}, nil
|
||||||
} else if strings.HasSuffix(fn, ".bin") {
|
} else if strings.HasSuffix(fn, ".bin") || strings.HasSuffix(fn, ".pth") {
|
||||||
slog.Debug("model is torch")
|
slog.Debug("model is torch")
|
||||||
return &TorchFormat{}, nil
|
return &TorchFormat{}, nil
|
||||||
}
|
}
|
||||||
@@ -87,6 +103,7 @@ type Vocab struct {
|
|||||||
Tokens []string
|
Tokens []string
|
||||||
Scores []float32
|
Scores []float32
|
||||||
Types []int32
|
Types []int32
|
||||||
|
Merges []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
||||||
@@ -165,7 +182,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
|||||||
}
|
}
|
||||||
v.Tokens = append(v.Tokens, t.key)
|
v.Tokens = append(v.Tokens, t.key)
|
||||||
v.Scores = append(v.Scores, -1000.0)
|
v.Scores = append(v.Scores, -1000.0)
|
||||||
v.Types = append(v.Types, int32(llm.GGUFTokenUserDefined))
|
v.Types = append(v.Types, tokenTypeUserDefined)
|
||||||
}
|
}
|
||||||
slog.Info(fmt.Sprintf("vocab size w/ extra tokens: %d", len(v.Tokens)))
|
slog.Info(fmt.Sprintf("vocab size w/ extra tokens: %d", len(v.Tokens)))
|
||||||
|
|
||||||
@@ -175,7 +192,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
|||||||
for cnt := 0; cnt < missingTokens; cnt++ {
|
for cnt := 0; cnt < missingTokens; cnt++ {
|
||||||
v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1))
|
v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1))
|
||||||
v.Scores = append(v.Scores, -1)
|
v.Scores = append(v.Scores, -1)
|
||||||
v.Types = append(v.Types, int32(llm.GGUFTokenUserDefined))
|
v.Types = append(v.Types, tokenTypeUserDefined)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
103
convert/convert_test.go
Normal file
103
convert/convert_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
//go:build slow
|
||||||
|
|
||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func convertFull(t *testing.T, p string) (llm.KV, llm.Tensors) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
mf, err := GetModelFormat(p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := mf.GetParams(p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
arch, err := mf.GetModelArch("", p, params)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := arch.LoadVocab(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := arch.GetTensors(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.CreateTemp(t.TempDir(), "f16")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if err := arch.WriteGGUF(f); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := os.Open(f.Name())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
m, _, err := llm.DecodeGGML(r)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.KV(), m.Tensors()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConvertFull(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
path string
|
||||||
|
arch string
|
||||||
|
tensors int
|
||||||
|
layers int
|
||||||
|
}{
|
||||||
|
{"Meta-Llama-3-8B-Instruct", "llama", 291, 35},
|
||||||
|
{"Mistral-7B-Instruct-v0.2", "llama", 291, 35},
|
||||||
|
{"Mixtral-8x7B-Instruct-v0.1", "llama", 291, 35},
|
||||||
|
{"gemma-2b-it", "gemma", 164, 20},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range cases {
|
||||||
|
t.Run(tt.path, func(t *testing.T) {
|
||||||
|
p := filepath.Join("testdata", tt.path)
|
||||||
|
if _, err := os.Stat(p); err != nil {
|
||||||
|
t.Skipf("%s not found", p)
|
||||||
|
}
|
||||||
|
|
||||||
|
kv, tensors := convertFull(t, p)
|
||||||
|
|
||||||
|
if kv.Architecture() != tt.arch {
|
||||||
|
t.Fatalf("expected llama, got %s", kv.Architecture())
|
||||||
|
}
|
||||||
|
|
||||||
|
if kv.FileType().String() != "F16" {
|
||||||
|
t.Fatalf("expected F16, got %s", kv.FileType())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tensors) != tt.tensors {
|
||||||
|
t.Fatalf("expected %d tensors, got %d", tt.tensors, len(tensors))
|
||||||
|
}
|
||||||
|
|
||||||
|
layers := tensors.Layers()
|
||||||
|
if len(layers) != tt.layers {
|
||||||
|
t.Fatalf("expected %d layers, got %d", tt.layers, len(layers))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,14 +1,11 @@
|
|||||||
package convert
|
package convert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/d4l3k/go-bfloat16"
|
|
||||||
"github.com/pdevine/tensor"
|
"github.com/pdevine/tensor"
|
||||||
"github.com/pdevine/tensor/native"
|
"github.com/pdevine/tensor/native"
|
||||||
|
|
||||||
@@ -19,49 +16,27 @@ type GemmaModel struct {
|
|||||||
ModelData
|
ModelData
|
||||||
}
|
}
|
||||||
|
|
||||||
func gemmaLayerHandler(w io.Writer, r safetensorWriterTo, f *os.File) error {
|
|
||||||
slog.Debug(fmt.Sprintf("converting '%s'", r.t.Name))
|
|
||||||
|
|
||||||
data := make([]byte, r.end-r.start)
|
|
||||||
if err := binary.Read(f, r.bo, data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tDataF32 := bfloat16.DecodeFloat32(data)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
tDataF32, err = addOnes(tDataF32, int(r.t.Shape[0]))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := binary.Write(w, r.bo, tDataF32); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addOnes(data []float32, vectorSize int) ([]float32, error) {
|
func addOnes(data []float32, vectorSize int) ([]float32, error) {
|
||||||
n := tensor.New(tensor.WithShape(vectorSize), tensor.WithBacking(data))
|
n := tensor.New(tensor.WithShape(vectorSize), tensor.WithBacking(data))
|
||||||
ones := tensor.Ones(tensor.Float32, vectorSize)
|
ones := tensor.Ones(tensor.Float32, vectorSize)
|
||||||
|
|
||||||
var err error
|
n, err := n.Add(ones)
|
||||||
n, err = n.Add(ones)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []float32{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
newN, err := native.SelectF32(n, 0)
|
ts, err := native.SelectF32(n, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []float32{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var fullTensor []float32
|
var f32s []float32
|
||||||
for _, v := range newN {
|
for _, t := range ts {
|
||||||
fullTensor = append(fullTensor, v...)
|
f32s = append(f32s, t...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fullTensor, nil
|
|
||||||
|
return f32s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GemmaModel) GetTensors() error {
|
func (m *GemmaModel) GetTensors() error {
|
||||||
@@ -71,12 +46,10 @@ func (m *GemmaModel) GetTensors() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug(fmt.Sprintf("Total tensors: %d", len(t)))
|
slog.Debug(fmt.Sprintf("Total tensors: %d", len(t)))
|
||||||
|
|
||||||
m.Tensors = []llm.Tensor{}
|
|
||||||
for _, l := range t {
|
for _, l := range t {
|
||||||
if strings.HasSuffix(l.Name, "norm.weight") {
|
if strings.HasSuffix(l.Name, "norm.weight") {
|
||||||
wt := l.WriterTo.(safetensorWriterTo)
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
wt.handler = gemmaLayerHandler
|
wt.repacker = m.Repack
|
||||||
l.WriterTo = wt
|
l.WriterTo = wt
|
||||||
}
|
}
|
||||||
m.Tensors = append(m.Tensors, l)
|
m.Tensors = append(m.Tensors, l)
|
||||||
@@ -94,7 +67,11 @@ func (m *GemmaModel) LoadVocab() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GemmaModel) WriteGGUF() (string, error) {
|
func (m *GemmaModel) Repack(_ string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return addOnes(data, int(shape[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GemmaModel) WriteGGUF(ws io.WriteSeeker) error {
|
||||||
kv := llm.KV{
|
kv := llm.KV{
|
||||||
"general.architecture": "gemma",
|
"general.architecture": "gemma",
|
||||||
"general.name": m.Name,
|
"general.name": m.Name,
|
||||||
@@ -122,16 +99,5 @@ func (m *GemmaModel) WriteGGUF() (string, error) {
|
|||||||
"tokenizer.ggml.add_eos_token": false,
|
"tokenizer.ggml.add_eos_token": false,
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.CreateTemp("", "ollama-gguf")
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
mod := llm.NewGGUFV3(m.Params.ByteOrder)
|
|
||||||
if err := mod.Encode(f, kv, m.Tensors); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return f.Name(), nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
188
convert/llama.go
188
convert/llama.go
@@ -1,18 +1,17 @@
|
|||||||
package convert
|
package convert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"cmp"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/nlpodyssey/gopickle/pytorch"
|
|
||||||
"github.com/pdevine/tensor"
|
"github.com/pdevine/tensor"
|
||||||
"github.com/pdevine/tensor/native"
|
"github.com/pdevine/tensor/native"
|
||||||
"github.com/x448/float16"
|
|
||||||
|
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
)
|
)
|
||||||
@@ -21,81 +20,12 @@ type LlamaModel struct {
|
|||||||
ModelData
|
ModelData
|
||||||
}
|
}
|
||||||
|
|
||||||
func llamaLayerHandler(w io.Writer, r torchWriterTo) error {
|
|
||||||
slog.Debug(fmt.Sprintf("repacking layer '%s'", r.t.Name))
|
|
||||||
|
|
||||||
data := r.storage.(*pytorch.HalfStorage).Data
|
|
||||||
tData := make([]uint16, len(data))
|
|
||||||
for cnt, v := range data {
|
|
||||||
tData[cnt] = uint16(float16.Fromfloat32(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
var heads uint32
|
|
||||||
if strings.Contains(r.t.Name, "attn_q") {
|
|
||||||
heads = uint32(r.params.AttentionHeads)
|
|
||||||
} else if strings.Contains(r.t.Name, "attn_k") {
|
|
||||||
heads = uint32(r.params.KeyValHeads)
|
|
||||||
if heads == 0 {
|
|
||||||
heads = uint32(r.params.AttentionHeads)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("unknown layer type")
|
|
||||||
}
|
|
||||||
|
|
||||||
slog.Debug(fmt.Sprintf("heads = %d", heads))
|
|
||||||
|
|
||||||
tData, err = llamaRepack(tData, int(heads), r.t.Shape)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = binary.Write(w, r.bo, tData); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func llamaRepack(data []uint16, heads int, shape []uint64) ([]uint16, error) {
|
|
||||||
n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data))
|
|
||||||
origShape := n.Shape().Clone()
|
|
||||||
|
|
||||||
// reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf
|
|
||||||
if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.T(0, 2, 1, 3); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.Reshape(origShape...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.Transpose(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newN, err := native.SelectU16(n, 1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fullTensor []uint16
|
|
||||||
for _, v := range newN {
|
|
||||||
fullTensor = append(fullTensor, v...)
|
|
||||||
}
|
|
||||||
return fullTensor, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LlamaModel) GetTensors() error {
|
func (m *LlamaModel) GetTensors() error {
|
||||||
t, err := m.Format.GetTensors(m.Path, m.Params)
|
t, err := m.Format.GetTensors(m.Path, m.Params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Tensors = []llm.Tensor{}
|
|
||||||
|
|
||||||
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
||||||
re, err := regexp.Compile(pattern)
|
re, err := regexp.Compile(pattern)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -105,10 +35,16 @@ func (m *LlamaModel) GetTensors() error {
|
|||||||
for _, l := range t {
|
for _, l := range t {
|
||||||
matches := re.FindAllStringSubmatch(l.Name, -1)
|
matches := re.FindAllStringSubmatch(l.Name, -1)
|
||||||
if len(matches) > 0 {
|
if len(matches) > 0 {
|
||||||
slog.Debug(fmt.Sprintf("setting handler for: %s", l.Name))
|
switch m.Format.(type) {
|
||||||
wt := l.WriterTo.(torchWriterTo)
|
case *TorchFormat:
|
||||||
wt.handler = llamaLayerHandler
|
wt := l.WriterTo.(torchWriterTo)
|
||||||
l.WriterTo = wt
|
wt.repacker = m.Repack
|
||||||
|
l.WriterTo = wt
|
||||||
|
case *SafetensorFormat:
|
||||||
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
|
wt.repacker = m.Repack
|
||||||
|
l.WriterTo = wt
|
||||||
|
}
|
||||||
}
|
}
|
||||||
m.Tensors = append(m.Tensors, l)
|
m.Tensors = append(m.Tensors, l)
|
||||||
}
|
}
|
||||||
@@ -116,23 +52,26 @@ func (m *LlamaModel) GetTensors() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LlamaModel) LoadVocab() error {
|
func (m *LlamaModel) LoadVocab() (err error) {
|
||||||
var v *Vocab
|
pre, ts, merges, err := parseTokens(filepath.Join(m.Path, "tokenizer.json"))
|
||||||
var err error
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
slog.Debug("loading vocab")
|
} else if err != nil {
|
||||||
v, err = LoadSentencePieceTokens(m.Path, m.Params)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("vocab loaded")
|
m.Vocab = &Vocab{}
|
||||||
|
for _, t := range ts {
|
||||||
|
m.Vocab.Tokens = append(m.Vocab.Tokens, t.Content)
|
||||||
|
m.Vocab.Types = append(m.Vocab.Types, t.Type())
|
||||||
|
}
|
||||||
|
|
||||||
m.Vocab = v
|
m.Vocab.Merges = merges
|
||||||
|
m.Params.PreTokenizer = pre
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LlamaModel) WriteGGUF() (string, error) {
|
func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error {
|
||||||
kv := llm.KV{
|
kv := llm.KV{
|
||||||
"general.architecture": "llama",
|
"general.architecture": "llama",
|
||||||
"general.name": m.Name,
|
"general.name": m.Name,
|
||||||
@@ -141,36 +80,79 @@ func (m *LlamaModel) WriteGGUF() (string, error) {
|
|||||||
"llama.embedding_length": uint32(m.Params.HiddenSize),
|
"llama.embedding_length": uint32(m.Params.HiddenSize),
|
||||||
"llama.block_count": uint32(m.Params.HiddenLayers),
|
"llama.block_count": uint32(m.Params.HiddenLayers),
|
||||||
"llama.feed_forward_length": uint32(m.Params.IntermediateSize),
|
"llama.feed_forward_length": uint32(m.Params.IntermediateSize),
|
||||||
|
"llama.rope.freq_base": float32(m.Params.RopeFrequencyBase),
|
||||||
"llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads),
|
"llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads),
|
||||||
"llama.attention.head_count": uint32(m.Params.AttentionHeads),
|
"llama.attention.head_count": uint32(m.Params.AttentionHeads),
|
||||||
"llama.attention.head_count_kv": uint32(m.Params.KeyValHeads),
|
"llama.attention.head_count_kv": uint32(m.Params.KeyValHeads),
|
||||||
"llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
|
"llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
|
||||||
"general.file_type": uint32(1),
|
"general.file_type": uint32(1),
|
||||||
"tokenizer.ggml.model": "llama",
|
"tokenizer.ggml.model": "gpt2",
|
||||||
|
|
||||||
|
"tokenizer.ggml.pre": m.Params.PreTokenizer,
|
||||||
"tokenizer.ggml.tokens": m.Vocab.Tokens,
|
"tokenizer.ggml.tokens": m.Vocab.Tokens,
|
||||||
"tokenizer.ggml.scores": m.Vocab.Scores,
|
|
||||||
"tokenizer.ggml.token_type": m.Vocab.Types,
|
"tokenizer.ggml.token_type": m.Vocab.Types,
|
||||||
|
|
||||||
"tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID),
|
"tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID),
|
||||||
"tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID),
|
"tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID),
|
||||||
"tokenizer.ggml.unknown_token_id": uint32(0),
|
"tokenizer.ggml.unknown_token_id": uint32(0),
|
||||||
"tokenizer.ggml.add_bos_token": true,
|
|
||||||
"tokenizer.ggml.add_eos_token": false,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.CreateTemp("", "ollama-gguf")
|
if len(m.Vocab.Merges) > 0 {
|
||||||
if err != nil {
|
kv["tokenizer.ggml.merges"] = m.Vocab.Merges
|
||||||
return "", err
|
} else {
|
||||||
}
|
kv["tokenizer.ggml.scores"] = m.Vocab.Scores
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
mod := llm.NewGGUFV3(m.Params.ByteOrder)
|
|
||||||
if err := mod.Encode(f, kv, m.Tensors); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug(fmt.Sprintf("gguf file = %s", f.Name()))
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
|
}
|
||||||
return f.Name(), nil
|
|
||||||
|
func (m *LlamaModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return llamaRepack(name, m.Params, data, shape)
|
||||||
|
}
|
||||||
|
|
||||||
|
func llamaRepack(name string, params *Params, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
var dims []int
|
||||||
|
for _, dim := range shape {
|
||||||
|
if dim != 0 {
|
||||||
|
dims = append(dims, int(dim))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var heads int
|
||||||
|
if strings.HasSuffix(name, "attn_q.weight") {
|
||||||
|
heads = params.AttentionHeads
|
||||||
|
} else if strings.HasSuffix(name, "attn_k.weight") {
|
||||||
|
heads = cmp.Or(params.KeyValHeads, params.AttentionHeads)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unknown tensor name: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := tensor.New(tensor.WithShape(dims...), tensor.WithBacking(data))
|
||||||
|
if err := n.Reshape(append([]int{heads, 2, dims[0] / heads / 2}, dims[1:]...)...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.T(0, 2, 1, 3); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.Reshape(dims...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.Transpose(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := native.SelectF32(n, 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var f32s []float32
|
||||||
|
for _, t := range ts {
|
||||||
|
f32s = append(f32s, t...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f32s, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,17 +1,8 @@
|
|||||||
package convert
|
package convert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/d4l3k/go-bfloat16"
|
|
||||||
"github.com/pdevine/tensor"
|
|
||||||
"github.com/pdevine/tensor/native"
|
|
||||||
"github.com/x448/float16"
|
|
||||||
|
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
)
|
)
|
||||||
@@ -20,90 +11,12 @@ type MistralModel struct {
|
|||||||
ModelData
|
ModelData
|
||||||
}
|
}
|
||||||
|
|
||||||
func mistralLayerHandler(w io.Writer, r safetensorWriterTo, f *os.File) error {
|
|
||||||
layerSize := r.end - r.start
|
|
||||||
|
|
||||||
var err error
|
|
||||||
tData := make([]uint16, layerSize/2)
|
|
||||||
if err = binary.Read(f, r.bo, tData); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var heads uint32
|
|
||||||
if strings.Contains(r.t.Name, "attn_q") {
|
|
||||||
heads = uint32(r.params.AttentionHeads)
|
|
||||||
} else if strings.Contains(r.t.Name, "attn_k") {
|
|
||||||
heads = uint32(r.params.KeyValHeads)
|
|
||||||
if heads == 0 {
|
|
||||||
heads = uint32(r.params.AttentionHeads)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("unknown layer type")
|
|
||||||
}
|
|
||||||
|
|
||||||
tData, err = repack(tData, int(heads), r.t.Shape)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf []byte
|
|
||||||
for _, n := range tData {
|
|
||||||
buf = r.bo.AppendUint16(buf, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
tempBuf := make([]uint16, len(tData))
|
|
||||||
tDataF32 := bfloat16.DecodeFloat32(buf)
|
|
||||||
for cnt, v := range tDataF32 {
|
|
||||||
tDataF16 := float16.Fromfloat32(v)
|
|
||||||
tempBuf[cnt] = uint16(tDataF16)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = binary.Write(w, r.bo, tempBuf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func repack(data []uint16, heads int, shape []uint64) ([]uint16, error) {
|
|
||||||
n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data))
|
|
||||||
origShape := n.Shape().Clone()
|
|
||||||
|
|
||||||
// reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf
|
|
||||||
if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.T(0, 2, 1, 3); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.Reshape(origShape...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.Transpose(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newN, err := native.SelectU16(n, 1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fullTensor []uint16
|
|
||||||
for _, v := range newN {
|
|
||||||
fullTensor = append(fullTensor, v...)
|
|
||||||
}
|
|
||||||
return fullTensor, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MistralModel) GetTensors() error {
|
func (m *MistralModel) GetTensors() error {
|
||||||
t, err := m.Format.GetTensors(m.Path, m.Params)
|
t, err := m.Format.GetTensors(m.Path, m.Params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Tensors = []llm.Tensor{}
|
|
||||||
|
|
||||||
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
||||||
re, err := regexp.Compile(pattern)
|
re, err := regexp.Compile(pattern)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -114,7 +27,7 @@ func (m *MistralModel) GetTensors() error {
|
|||||||
matches := re.FindAllStringSubmatch(l.Name, -1)
|
matches := re.FindAllStringSubmatch(l.Name, -1)
|
||||||
if len(matches) > 0 {
|
if len(matches) > 0 {
|
||||||
wt := l.WriterTo.(safetensorWriterTo)
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
wt.handler = mistralLayerHandler
|
wt.repacker = m.Repack
|
||||||
l.WriterTo = wt
|
l.WriterTo = wt
|
||||||
}
|
}
|
||||||
m.Tensors = append(m.Tensors, l)
|
m.Tensors = append(m.Tensors, l)
|
||||||
@@ -132,7 +45,7 @@ func (m *MistralModel) LoadVocab() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MistralModel) WriteGGUF() (string, error) {
|
func (m *MistralModel) WriteGGUF(ws io.WriteSeeker) error {
|
||||||
kv := llm.KV{
|
kv := llm.KV{
|
||||||
"general.architecture": "llama",
|
"general.architecture": "llama",
|
||||||
"general.name": m.Name,
|
"general.name": m.Name,
|
||||||
@@ -158,16 +71,9 @@ func (m *MistralModel) WriteGGUF() (string, error) {
|
|||||||
"tokenizer.ggml.unknown_token_id": uint32(0),
|
"tokenizer.ggml.unknown_token_id": uint32(0),
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.CreateTemp("", "ollama-gguf")
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
if err != nil {
|
}
|
||||||
return "", err
|
|
||||||
}
|
func (m *MistralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
defer f.Close()
|
return llamaRepack(name, m.Params, data, shape)
|
||||||
|
|
||||||
mod := llm.NewGGUFV3(m.Params.ByteOrder)
|
|
||||||
if err := mod.Encode(f, kv, m.Tensors); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return f.Name(), nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
87
convert/mixtral.go
Normal file
87
convert/mixtral.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MixtralModel struct {
|
||||||
|
ModelData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MixtralModel) GetTensors() error {
|
||||||
|
t, err := m.Format.GetTensors(m.Path, m.Params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
||||||
|
re, err := regexp.Compile(pattern)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range t {
|
||||||
|
matches := re.FindAllStringSubmatch(l.Name, -1)
|
||||||
|
if len(matches) > 0 {
|
||||||
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
|
wt.repacker = m.Repack
|
||||||
|
l.WriterTo = wt
|
||||||
|
}
|
||||||
|
m.Tensors = append(m.Tensors, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MixtralModel) LoadVocab() error {
|
||||||
|
v, err := LoadSentencePieceTokens(m.Path, m.Params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Vocab = v
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MixtralModel) WriteGGUF(ws io.WriteSeeker) error {
|
||||||
|
kv := llm.KV{
|
||||||
|
"general.architecture": "llama",
|
||||||
|
"general.name": m.Name,
|
||||||
|
"llama.block_count": uint32(m.Params.HiddenLayers),
|
||||||
|
"llama.context_length": uint32(m.Params.ContextSize),
|
||||||
|
"llama.embedding_length": uint32(m.Params.HiddenSize),
|
||||||
|
"llama.feed_forward_length": uint32(m.Params.IntermediateSize),
|
||||||
|
"llama.attention.head_count": uint32(m.Params.AttentionHeads),
|
||||||
|
"llama.attention.head_count_kv": uint32(m.Params.KeyValHeads),
|
||||||
|
|
||||||
|
"llama.rope.freq_base": float32(m.Params.RopeFrequencyBase),
|
||||||
|
"llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
|
||||||
|
|
||||||
|
"llama.expert_count": uint32(m.Params.Experts),
|
||||||
|
"llama.expert_used_count": uint32(m.Params.ExpertsUsed),
|
||||||
|
|
||||||
|
"llama.vocab_size": uint32(len(m.Vocab.Tokens)),
|
||||||
|
"llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads),
|
||||||
|
|
||||||
|
"general.file_type": uint32(1),
|
||||||
|
"tokenizer.ggml.model": "llama",
|
||||||
|
|
||||||
|
"tokenizer.ggml.tokens": m.Vocab.Tokens,
|
||||||
|
"tokenizer.ggml.scores": m.Vocab.Scores,
|
||||||
|
"tokenizer.ggml.token_type": m.Vocab.Types,
|
||||||
|
|
||||||
|
"tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID),
|
||||||
|
"tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID),
|
||||||
|
"tokenizer.ggml.unknown_token_id": uint32(0),
|
||||||
|
"tokenizer.ggml.add_bos_token": true,
|
||||||
|
"tokenizer.ggml.add_eos_token": false,
|
||||||
|
}
|
||||||
|
|
||||||
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MixtralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return llamaRepack(name, m.Params, data, shape)
|
||||||
|
}
|
||||||
@@ -6,14 +6,13 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/d4l3k/go-bfloat16"
|
"github.com/d4l3k/go-bfloat16"
|
||||||
"github.com/mitchellh/mapstructure"
|
|
||||||
"github.com/x448/float16"
|
"github.com/x448/float16"
|
||||||
|
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
@@ -26,39 +25,38 @@ type safetensorWriterTo struct {
|
|||||||
bo ByteOrder
|
bo ByteOrder
|
||||||
|
|
||||||
filename string
|
filename string
|
||||||
|
dtype string
|
||||||
|
|
||||||
start, end, padding uint64
|
offset, size int64
|
||||||
handler func(w io.Writer, r safetensorWriterTo, f *os.File) error
|
repacker func(string, []float32, []uint64) ([]float32, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type tensorMetaData struct {
|
type safetensorMetadata struct {
|
||||||
Type string `mapstructure:"dtype"`
|
Type string `json:"dtype"`
|
||||||
Shape []int `mapstructure:"shape"`
|
Shape []uint64 `json:"shape"`
|
||||||
Offsets []int `mapstructure:"data_offsets"`
|
Offsets []int64 `json:"data_offsets"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SafetensorFormat struct{}
|
type SafetensorFormat struct{}
|
||||||
|
|
||||||
func (m *SafetensorFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
|
func (m *SafetensorFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
|
||||||
slog.Debug("getting tensor data")
|
|
||||||
var tensors []llm.Tensor
|
var tensors []llm.Tensor
|
||||||
files, err := filepath.Glob(filepath.Join(dirpath, "/model-*.safetensors"))
|
matches, err := filepath.Glob(filepath.Join(dirpath, "*.safetensors"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var offset uint64
|
var offset uint64
|
||||||
for _, f := range files {
|
for _, f := range matches {
|
||||||
var t []llm.Tensor
|
var t []llm.Tensor
|
||||||
var err error
|
var err error
|
||||||
t, offset, err = m.readTensors(f, offset, params)
|
t, offset, err = m.readTensors(f, offset, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("%v", err)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tensors = append(tensors, t...)
|
tensors = append(tensors, t...)
|
||||||
}
|
}
|
||||||
slog.Debug(fmt.Sprintf("all tensors = %d", len(tensors)))
|
|
||||||
return tensors, nil
|
return tensors, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,72 +67,57 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params)
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
var jsonSize uint64
|
var n int64
|
||||||
if err := binary.Read(f, binary.LittleEndian, &jsonSize); err != nil {
|
if err := binary.Read(f, binary.LittleEndian, &n); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := make([]byte, jsonSize)
|
b := bytes.NewBuffer(make([]byte, 0, n))
|
||||||
_, err = io.ReadFull(f, buf)
|
if _, err = io.CopyN(b, f, n); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d := json.NewDecoder(bytes.NewBuffer(buf))
|
var headers map[string]safetensorMetadata
|
||||||
d.UseNumber()
|
if err := json.NewDecoder(b).Decode(&headers); err != nil {
|
||||||
var parsed map[string]interface{}
|
|
||||||
if err = d.Decode(&parsed); err != nil {
|
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var keys []string
|
var keys []string
|
||||||
for k := range parsed {
|
for key := range headers {
|
||||||
keys = append(keys, k)
|
if !strings.HasSuffix(key, "self_attn.rotary_embd.inv_freq") {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
slices.Sort(keys)
|
slices.Sort(keys)
|
||||||
|
|
||||||
slog.Info("converting layers")
|
|
||||||
|
|
||||||
var tensors []llm.Tensor
|
var tensors []llm.Tensor
|
||||||
for _, k := range keys {
|
for _, key := range keys {
|
||||||
vals := parsed[k].(map[string]interface{})
|
value := headers[key]
|
||||||
var data tensorMetaData
|
|
||||||
if err = mapstructure.Decode(vals, &data); err != nil {
|
|
||||||
slog.Error("couldn't decode properly")
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
slog.Debug(fmt.Sprintf("metadata = %#v", data))
|
|
||||||
var size uint64
|
|
||||||
var kind uint32
|
var kind uint32
|
||||||
switch len(data.Shape) {
|
switch len(value.Shape) {
|
||||||
case 0:
|
case 0:
|
||||||
// metadata
|
// valuedata
|
||||||
continue
|
continue
|
||||||
case 1:
|
|
||||||
// convert to float32
|
|
||||||
kind = 0
|
|
||||||
size = uint64(data.Shape[0] * 4)
|
|
||||||
case 2:
|
case 2:
|
||||||
// convert to float16
|
|
||||||
kind = 1
|
kind = 1
|
||||||
size = uint64(data.Shape[0] * data.Shape[1] * 2)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ggufName, err := m.GetLayerName(k)
|
name, err := m.GetLayerName(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("%v", err)
|
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
shape := []uint64{0, 0, 0, 0}
|
shape := make([]uint64, len(value.Shape))
|
||||||
for i := range data.Shape {
|
copy(shape, value.Shape)
|
||||||
shape[i] = uint64(data.Shape[i])
|
|
||||||
|
pad := func(s int64) int64 {
|
||||||
|
return 8 + n + s
|
||||||
}
|
}
|
||||||
|
|
||||||
t := llm.Tensor{
|
t := llm.Tensor{
|
||||||
Name: ggufName,
|
Name: name,
|
||||||
Kind: kind,
|
Kind: kind,
|
||||||
Offset: offset,
|
Offset: offset,
|
||||||
Shape: shape[:],
|
Shape: shape[:],
|
||||||
@@ -145,16 +128,15 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params)
|
|||||||
params: params,
|
params: params,
|
||||||
bo: params.ByteOrder,
|
bo: params.ByteOrder,
|
||||||
filename: fn,
|
filename: fn,
|
||||||
start: uint64(data.Offsets[0]),
|
dtype: value.Type,
|
||||||
end: uint64(data.Offsets[1]),
|
offset: pad(value.Offsets[0]),
|
||||||
padding: 8 + jsonSize,
|
size: pad(value.Offsets[1]) - pad(value.Offsets[0]),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
offset += t.Size()
|
||||||
tensors = append(tensors, t)
|
tensors = append(tensors, t)
|
||||||
offset += size
|
|
||||||
}
|
}
|
||||||
slog.Debug(fmt.Sprintf("total tensors for file = %d", len(tensors)))
|
|
||||||
slog.Debug(fmt.Sprintf("offset = %d", offset))
|
|
||||||
return tensors, offset, nil
|
return tensors, offset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,9 +149,7 @@ func (m *SafetensorFormat) GetParams(dirpath string) (*Params, error) {
|
|||||||
|
|
||||||
var params Params
|
var params Params
|
||||||
|
|
||||||
d := json.NewDecoder(f)
|
if err := json.NewDecoder(f).Decode(¶ms); err != nil {
|
||||||
err = d.Decode(¶ms)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,15 +165,19 @@ func (m *SafetensorFormat) GetLayerName(n string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tMap := map[string]string{
|
tMap := map[string]string{
|
||||||
"model.layers.(\\d+).input_layernorm.weight": "blk.$1.attn_norm.weight",
|
"model.layers.(\\d+).input_layernorm.weight": "blk.$1.attn_norm.weight",
|
||||||
"model.layers.(\\d+).mlp.down_proj.weight": "blk.$1.ffn_down.weight",
|
"model.layers.(\\d+).mlp.down_proj.weight": "blk.$1.ffn_down.weight",
|
||||||
"model.layers.(\\d+).mlp.gate_proj.weight": "blk.$1.ffn_gate.weight",
|
"model.layers.(\\d+).mlp.gate_proj.weight": "blk.$1.ffn_gate.weight",
|
||||||
"model.layers.(\\d+).mlp.up_proj.weight": "blk.$1.ffn_up.weight",
|
"model.layers.(\\d+).mlp.up_proj.weight": "blk.$1.ffn_up.weight",
|
||||||
"model.layers.(\\d+).post_attention_layernorm.weight": "blk.$1.ffn_norm.weight",
|
"model.layers.(\\d+).post_attention_layernorm.weight": "blk.$1.ffn_norm.weight",
|
||||||
"model.layers.(\\d+).self_attn.k_proj.weight": "blk.$1.attn_k.weight",
|
"model.layers.(\\d+).self_attn.k_proj.weight": "blk.$1.attn_k.weight",
|
||||||
"model.layers.(\\d+).self_attn.o_proj.weight": "blk.$1.attn_output.weight",
|
"model.layers.(\\d+).self_attn.o_proj.weight": "blk.$1.attn_output.weight",
|
||||||
"model.layers.(\\d+).self_attn.q_proj.weight": "blk.$1.attn_q.weight",
|
"model.layers.(\\d+).self_attn.q_proj.weight": "blk.$1.attn_q.weight",
|
||||||
"model.layers.(\\d+).self_attn.v_proj.weight": "blk.$1.attn_v.weight",
|
"model.layers.(\\d+).self_attn.v_proj.weight": "blk.$1.attn_v.weight",
|
||||||
|
"model.layers.(\\d+).block_sparse_moe.gate.weight": "blk.$1.ffn_gate_inp.weight",
|
||||||
|
"model.layers.(\\d+).block_sparse_moe.experts.(\\d+).w1.weight": "blk.$1.ffn_gate.$2.weight",
|
||||||
|
"model.layers.(\\d+).block_sparse_moe.experts.(\\d+).w2.weight": "blk.$1.ffn_down.$2.weight",
|
||||||
|
"model.layers.(\\d+).block_sparse_moe.experts.(\\d+).w3.weight": "blk.$1.ffn_up.$2.weight",
|
||||||
}
|
}
|
||||||
|
|
||||||
v, ok := directMap[n]
|
v, ok := directMap[n]
|
||||||
@@ -220,55 +204,58 @@ func (r safetensorWriterTo) WriteTo(w io.Writer) (n int64, err error) {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
if _, err = f.Seek(int64(r.padding+r.start), 0); err != nil {
|
if _, err = f.Seek(r.offset, io.SeekStart); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// use the handler if one is present
|
var f32s []float32
|
||||||
if r.handler != nil {
|
switch r.dtype {
|
||||||
return 0, r.handler(w, r, f)
|
case "F32":
|
||||||
}
|
f32s = make([]float32, r.size/4)
|
||||||
|
if err = binary.Read(f, r.bo, f32s); err != nil {
|
||||||
remaining := r.end - r.start
|
return 0, err
|
||||||
|
}
|
||||||
bufSize := uint64(10240)
|
case "F16":
|
||||||
var finished bool
|
u16s := make([]uint16, r.size/2)
|
||||||
for {
|
if err = binary.Read(f, r.bo, u16s); err != nil {
|
||||||
data := make([]byte, min(bufSize, remaining))
|
|
||||||
|
|
||||||
b, err := io.ReadFull(f, data)
|
|
||||||
remaining -= uint64(b)
|
|
||||||
|
|
||||||
if err == io.EOF || remaining <= 0 {
|
|
||||||
finished = true
|
|
||||||
} else if err != nil {
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// convert bfloat16 -> ieee float32
|
for _, b := range u16s {
|
||||||
tDataF32 := bfloat16.DecodeFloat32(data)
|
f32s = append(f32s, float16.Frombits(b).Float32())
|
||||||
|
|
||||||
switch r.t.Kind {
|
|
||||||
case 0:
|
|
||||||
if err := binary.Write(w, r.bo, tDataF32); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
case 1:
|
|
||||||
// convert float32 -> float16
|
|
||||||
tempBuf := make([]uint16, len(data)/2)
|
|
||||||
for cnt, v := range tDataF32 {
|
|
||||||
tDataF16 := float16.Fromfloat32(v)
|
|
||||||
tempBuf[cnt] = uint16(tDataF16)
|
|
||||||
}
|
|
||||||
if err := binary.Write(w, r.bo, tempBuf); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if finished {
|
|
||||||
break
|
case "BF16":
|
||||||
|
u8s := make([]uint8, r.size)
|
||||||
|
if err = binary.Read(f, r.bo, u8s); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f32s = bfloat16.DecodeFloat32(u8s)
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown data type: %s", r.dtype)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.repacker != nil {
|
||||||
|
f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0, nil
|
|
||||||
|
switch r.t.Kind {
|
||||||
|
case 0:
|
||||||
|
return 0, binary.Write(w, r.bo, f32s)
|
||||||
|
case 1:
|
||||||
|
f16s := make([]uint16, len(f32s))
|
||||||
|
for i := range f32s {
|
||||||
|
f16s[i] = float16.Fromfloat32(f32s[i]).Bits()
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, binary.Write(w, r.bo, f16s)
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
|
func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
|
||||||
@@ -277,6 +264,15 @@ func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (M
|
|||||||
return nil, fmt.Errorf("No architecture specified to convert")
|
return nil, fmt.Errorf("No architecture specified to convert")
|
||||||
case 1:
|
case 1:
|
||||||
switch params.Architectures[0] {
|
switch params.Architectures[0] {
|
||||||
|
case "LlamaForCausalLM":
|
||||||
|
return &LlamaModel{
|
||||||
|
ModelData{
|
||||||
|
Name: name,
|
||||||
|
Path: dirPath,
|
||||||
|
Params: params,
|
||||||
|
Format: m,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
case "MistralForCausalLM":
|
case "MistralForCausalLM":
|
||||||
return &MistralModel{
|
return &MistralModel{
|
||||||
ModelData{
|
ModelData{
|
||||||
@@ -286,6 +282,15 @@ func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (M
|
|||||||
Format: m,
|
Format: m,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
|
case "MixtralForCausalLM":
|
||||||
|
return &MixtralModel{
|
||||||
|
ModelData{
|
||||||
|
Name: name,
|
||||||
|
Path: dirPath,
|
||||||
|
Params: params,
|
||||||
|
Format: m,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
case "GemmaForCausalLM":
|
case "GemmaForCausalLM":
|
||||||
return &GemmaModel{
|
return &GemmaModel{
|
||||||
ModelData{
|
ModelData{
|
||||||
|
|||||||
109
convert/tokenizer.go
Normal file
109
convert/tokenizer.go
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Tokenizer struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
AddedTokens []Token `json:"added_tokens"`
|
||||||
|
Model TokenizerModel `json:"model"`
|
||||||
|
|
||||||
|
PreTokenizer struct {
|
||||||
|
PreTokenizers []struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Pattern struct {
|
||||||
|
Regex string `json:"Regex"`
|
||||||
|
} `json:"pattern"`
|
||||||
|
} `json:"pretokenizers"`
|
||||||
|
} `json:"pre_tokenizer"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TokenizerModel struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Vocab map[string]int `json:"vocab"`
|
||||||
|
Merges []string `json:"merges"`
|
||||||
|
Tokens []Token
|
||||||
|
}
|
||||||
|
|
||||||
|
type Token struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
Special bool `json:"special"`
|
||||||
|
UserDefined bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) Type() int32 {
|
||||||
|
switch {
|
||||||
|
case t.Special:
|
||||||
|
return tokenTypeControl
|
||||||
|
case t.UserDefined:
|
||||||
|
return tokenTypeUserDefined
|
||||||
|
default:
|
||||||
|
return tokenTypeNormal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tokenizer) maxID() int {
|
||||||
|
return max(
|
||||||
|
slices.Max(maps.Values(t.Model.Vocab)),
|
||||||
|
slices.MaxFunc(t.AddedTokens, func(a, b Token) int {
|
||||||
|
return cmp.Compare(a.ID, b.ID)
|
||||||
|
}).ID,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, err error) {
|
||||||
|
f, err := os.Open(dirpath)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var t Tokenizer
|
||||||
|
if err := json.NewDecoder(f).Decode(&t); err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens = make([]Token, t.maxID()+1)
|
||||||
|
for k, v := range t.Model.Vocab {
|
||||||
|
tokens[v] = Token{ID: v, Content: k, Special: false, UserDefined: false}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range t.AddedTokens {
|
||||||
|
v.UserDefined = true
|
||||||
|
tokens[v.ID] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
sha256sum := sha256.New()
|
||||||
|
for _, pt := range t.PreTokenizer.PreTokenizers {
|
||||||
|
switch pt.Type {
|
||||||
|
case "Split":
|
||||||
|
if pt.Pattern.Regex != "" {
|
||||||
|
sha256sum.Write([]byte(pt.Pattern.Regex))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch digest := fmt.Sprintf("%x", sha256sum.Sum(nil)); digest {
|
||||||
|
case "d98f9631be1e9607a9848c26c1f9eac1aa9fc21ac6ba82a2fc0741af9780a48f":
|
||||||
|
pre = "llama-bpe"
|
||||||
|
case "03df5c5863ad70781dcfdef491ead25140f895fe8010964be0daefe27be32b02":
|
||||||
|
pre = "deepseek-llm"
|
||||||
|
case "21cde974d587f0d54dc8d56b183cc1e6239600172035c68fbd6d4b9f8da0576e":
|
||||||
|
pre = "deepseek-coder"
|
||||||
|
default:
|
||||||
|
slog.Warn("unknown pretokenizer, using default", "digest", digest)
|
||||||
|
pre = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
return pre, tokens, t.Model.Merges, nil
|
||||||
|
}
|
||||||
@@ -24,8 +24,8 @@ type torchWriterTo struct {
|
|||||||
params *Params
|
params *Params
|
||||||
bo ByteOrder
|
bo ByteOrder
|
||||||
|
|
||||||
storage pytorch.StorageInterface
|
storage pytorch.StorageInterface
|
||||||
handler func(w io.Writer, r torchWriterTo) error
|
repacker func(string, []float32, []uint64) ([]float32, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type TorchFormat struct{}
|
type TorchFormat struct{}
|
||||||
@@ -33,14 +33,14 @@ type TorchFormat struct{}
|
|||||||
func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
|
func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
|
||||||
slog.Debug("getting torch tensors")
|
slog.Debug("getting torch tensors")
|
||||||
|
|
||||||
files, err := filepath.Glob(filepath.Join(dirpath, "pytorch_model-*.bin"))
|
var files []string
|
||||||
if err != nil {
|
if pt, _ := filepath.Glob(filepath.Join(dirpath, "consolidated*.pth")); len(pt) > 0 {
|
||||||
slog.Error("didn't find any torch files")
|
files = append(files, pt...)
|
||||||
return nil, err
|
} else if pt, _ := filepath.Glob(filepath.Join(dirpath, "pytorch_model*.pth")); len(pt) > 0 {
|
||||||
|
files = append(files, pt...)
|
||||||
}
|
}
|
||||||
|
|
||||||
var offset uint64
|
var offset uint64
|
||||||
|
|
||||||
var tensors []llm.Tensor
|
var tensors []llm.Tensor
|
||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
m, err := pytorch.Load(fn)
|
m, err := pytorch.Load(fn)
|
||||||
@@ -74,10 +74,10 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor,
|
|||||||
|
|
||||||
ggufName, err := tf.GetLayerName(k.(string))
|
ggufName, err := tf.GetLayerName(k.(string))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("%v", err)
|
slog.Error(err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
slog.Debug(fmt.Sprintf("finding name for '%s' -> '%s'", k.(string), ggufName))
|
slog.Debug(fmt.Sprintf("'%35s': '%30s' %10d [%#v]", k.(string), ggufName, size, tshape))
|
||||||
|
|
||||||
shape := []uint64{0, 0, 0, 0}
|
shape := []uint64{0, 0, 0, 0}
|
||||||
for i := range tshape {
|
for i := range tshape {
|
||||||
@@ -120,7 +120,7 @@ func getAltParams(dirpath string) (*Params, error) {
|
|||||||
AttentionHeads int `json:"n_heads"`
|
AttentionHeads int `json:"n_heads"`
|
||||||
KeyValHeads int `json:"n_kv_heads"`
|
KeyValHeads int `json:"n_kv_heads"`
|
||||||
HiddenLayers int `json:"n_layers"`
|
HiddenLayers int `json:"n_layers"`
|
||||||
RopeTheta int `json:"rope_theta"`
|
RopeTheta float64 `json:"rope_theta"`
|
||||||
NormEPS float64 `json:"norm_eps"`
|
NormEPS float64 `json:"norm_eps"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,6 +133,7 @@ func getAltParams(dirpath string) (*Params, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
params := &Params{
|
params := &Params{
|
||||||
|
Architectures: []string{"LlamaForCausalLM"},
|
||||||
HiddenSize: tparams.HiddenSize,
|
HiddenSize: tparams.HiddenSize,
|
||||||
AttentionHeads: tparams.AttentionHeads,
|
AttentionHeads: tparams.AttentionHeads,
|
||||||
KeyValHeads: tparams.KeyValHeads,
|
KeyValHeads: tparams.KeyValHeads,
|
||||||
@@ -229,37 +230,38 @@ func (m *TorchFormat) GetLayerName(n string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) {
|
func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) {
|
||||||
// use the handler if one is present
|
var f32s []float32
|
||||||
if r.handler != nil {
|
switch s := r.storage.(type) {
|
||||||
return 0, r.handler(w, r)
|
case *pytorch.FloatStorage:
|
||||||
|
f32s = s.Data
|
||||||
|
case *pytorch.HalfStorage:
|
||||||
|
f32s = s.Data
|
||||||
|
case *pytorch.BFloat16Storage:
|
||||||
|
f32s = s.Data
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown data type: %T", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch r.storage.(type) {
|
if r.repacker != nil {
|
||||||
case *pytorch.FloatStorage:
|
f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape)
|
||||||
slog.Warn(fmt.Sprintf("unexpected storage found for layer '%s'; skipping", r.t.Name))
|
if err != nil {
|
||||||
return 0, nil
|
return 0, err
|
||||||
case *pytorch.HalfStorage:
|
|
||||||
switch r.t.Kind {
|
|
||||||
case 0:
|
|
||||||
data := r.storage.(*pytorch.HalfStorage).Data
|
|
||||||
slog.Debug(fmt.Sprintf("%35s F32 (%d)", r.t.Name, len(data)))
|
|
||||||
if err := binary.Write(w, r.bo, data); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
case 1:
|
|
||||||
data := r.storage.(*pytorch.HalfStorage).Data
|
|
||||||
tData := make([]uint16, len(data))
|
|
||||||
for cnt, v := range data {
|
|
||||||
tData[cnt] = uint16(float16.Fromfloat32(v))
|
|
||||||
}
|
|
||||||
slog.Debug(fmt.Sprintf("%35s F16 (%d)", r.t.Name, len(tData)))
|
|
||||||
if err := binary.Write(w, r.bo, tData); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, nil
|
switch r.t.Kind {
|
||||||
|
case 0:
|
||||||
|
return 0, binary.Write(w, r.bo, f32s)
|
||||||
|
case 1:
|
||||||
|
f16s := make([]uint16, len(f32s))
|
||||||
|
for i := range f32s {
|
||||||
|
f16s[i] = float16.Fromfloat32(f32s[i]).Bits()
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, binary.Write(w, r.bo, f16s)
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TorchFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
|
func (m *TorchFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
* [Importing models](./import.md)
|
* [Importing models](./import.md)
|
||||||
* [Linux Documentation](./linux.md)
|
* [Linux Documentation](./linux.md)
|
||||||
* [Windows Documentation](./windows.md)
|
* [Windows Documentation](./windows.md)
|
||||||
* [Docker Documentation](https://hub.docker.com/r/ollama/ollama)
|
* [Docker Documentation](./docker.md)
|
||||||
|
|
||||||
### Reference
|
### Reference
|
||||||
|
|
||||||
|
|||||||
69
docs/api.md
69
docs/api.md
@@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
### Model names
|
### Model names
|
||||||
|
|
||||||
Model names follow a `model:tag` format, where `model` can have an optional namespace such as `example/model`. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
|
Model names follow a `model:tag` format, where `model` can have an optional namespace such as `example/model`. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
|
||||||
|
|
||||||
### Durations
|
### Durations
|
||||||
|
|
||||||
@@ -66,7 +66,7 @@ Enable JSON mode by setting the `format` parameter to `json`. This will structur
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"prompt": "Why is the sky blue?"
|
"prompt": "Why is the sky blue?"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
@@ -77,7 +77,7 @@ A stream of JSON objects is returned:
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||||
"response": "The",
|
"response": "The",
|
||||||
"done": false
|
"done": false
|
||||||
@@ -95,11 +95,11 @@ The final response in the stream also includes additional data about the generat
|
|||||||
- `context`: an encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory
|
- `context`: an encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory
|
||||||
- `response`: empty if the response was streamed, if not streamed, this will contain the full response
|
- `response`: empty if the response was streamed, if not streamed, this will contain the full response
|
||||||
|
|
||||||
To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration`.
|
To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration` * `10^9`.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"response": "",
|
"response": "",
|
||||||
"done": true,
|
"done": true,
|
||||||
@@ -121,7 +121,7 @@ A response can be received in one reply when streaming is off.
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"prompt": "Why is the sky blue?",
|
"prompt": "Why is the sky blue?",
|
||||||
"stream": false
|
"stream": false
|
||||||
}'
|
}'
|
||||||
@@ -133,7 +133,7 @@ If `stream` is set to `false`, the response will be a single JSON object:
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"response": "The sky is blue because it is the color of the sky.",
|
"response": "The sky is blue because it is the color of the sky.",
|
||||||
"done": true,
|
"done": true,
|
||||||
@@ -155,7 +155,7 @@ If `stream` is set to `false`, the response will be a single JSON object:
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"prompt": "What color is the sky at different times of the day? Respond using JSON",
|
"prompt": "What color is the sky at different times of the day? Respond using JSON",
|
||||||
"format": "json",
|
"format": "json",
|
||||||
"stream": false
|
"stream": false
|
||||||
@@ -166,7 +166,7 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-11-09T21:07:55.186497Z",
|
"created_at": "2023-11-09T21:07:55.186497Z",
|
||||||
"response": "{\n\"morning\": {\n\"color\": \"blue\"\n},\n\"noon\": {\n\"color\": \"blue-gray\"\n},\n\"afternoon\": {\n\"color\": \"warm gray\"\n},\n\"evening\": {\n\"color\": \"orange\"\n}\n}\n",
|
"response": "{\n\"morning\": {\n\"color\": \"blue\"\n},\n\"noon\": {\n\"color\": \"blue-gray\"\n},\n\"afternoon\": {\n\"color\": \"warm gray\"\n},\n\"evening\": {\n\"color\": \"orange\"\n}\n}\n",
|
||||||
"done": true,
|
"done": true,
|
||||||
@@ -289,7 +289,7 @@ If you want to set custom options for the model at runtime rather than in the Mo
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"prompt": "Why is the sky blue?",
|
"prompt": "Why is the sky blue?",
|
||||||
"stream": false,
|
"stream": false,
|
||||||
"options": {
|
"options": {
|
||||||
@@ -313,7 +313,6 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
"numa": false,
|
"numa": false,
|
||||||
"num_ctx": 1024,
|
"num_ctx": 1024,
|
||||||
"num_batch": 2,
|
"num_batch": 2,
|
||||||
"num_gqa": 1,
|
|
||||||
"num_gpu": 1,
|
"num_gpu": 1,
|
||||||
"main_gpu": 0,
|
"main_gpu": 0,
|
||||||
"low_vram": false,
|
"low_vram": false,
|
||||||
@@ -321,8 +320,6 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
"vocab_only": false,
|
"vocab_only": false,
|
||||||
"use_mmap": true,
|
"use_mmap": true,
|
||||||
"use_mlock": false,
|
"use_mlock": false,
|
||||||
"rope_frequency_base": 1.1,
|
|
||||||
"rope_frequency_scale": 0.8,
|
|
||||||
"num_thread": 8
|
"num_thread": 8
|
||||||
}
|
}
|
||||||
}'
|
}'
|
||||||
@@ -332,7 +329,7 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"response": "The sky is blue because it is the color of the sky.",
|
"response": "The sky is blue because it is the color of the sky.",
|
||||||
"done": true,
|
"done": true,
|
||||||
@@ -354,7 +351,7 @@ If an empty prompt is provided, the model will be loaded into memory.
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2"
|
"model": "llama3"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -364,7 +361,7 @@ A single JSON object is returned:
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-12-18T19:52:07.071755Z",
|
"created_at": "2023-12-18T19:52:07.071755Z",
|
||||||
"response": "",
|
"response": "",
|
||||||
"done": true
|
"done": true
|
||||||
@@ -407,7 +404,7 @@ Send a chat message with a streaming response.
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/chat -d '{
|
curl http://localhost:11434/api/chat -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"messages": [
|
"messages": [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
@@ -423,7 +420,7 @@ A stream of JSON objects is returned:
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||||
"message": {
|
"message": {
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
@@ -438,7 +435,7 @@ Final response:
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"done": true,
|
"done": true,
|
||||||
"total_duration": 4883583458,
|
"total_duration": 4883583458,
|
||||||
@@ -456,7 +453,7 @@ Final response:
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/chat -d '{
|
curl http://localhost:11434/api/chat -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"messages": [
|
"messages": [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
@@ -471,7 +468,7 @@ curl http://localhost:11434/api/chat -d '{
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "registry.ollama.ai/library/llama2:latest",
|
"model": "registry.ollama.ai/library/llama3:latest",
|
||||||
"created_at": "2023-12-12T14:13:43.416799Z",
|
"created_at": "2023-12-12T14:13:43.416799Z",
|
||||||
"message": {
|
"message": {
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
@@ -495,7 +492,7 @@ Send a chat message with a conversation history. You can use this same approach
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/chat -d '{
|
curl http://localhost:11434/api/chat -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"messages": [
|
"messages": [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
@@ -519,7 +516,7 @@ A stream of JSON objects is returned:
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||||
"message": {
|
"message": {
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
@@ -533,7 +530,7 @@ Final response:
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"done": true,
|
"done": true,
|
||||||
"total_duration": 8113331500,
|
"total_duration": 8113331500,
|
||||||
@@ -591,7 +588,7 @@ curl http://localhost:11434/api/chat -d '{
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/chat -d '{
|
curl http://localhost:11434/api/chat -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"messages": [
|
"messages": [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
@@ -609,7 +606,7 @@ curl http://localhost:11434/api/chat -d '{
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "registry.ollama.ai/library/llama2:latest",
|
"model": "registry.ollama.ai/library/llama3:latest",
|
||||||
"created_at": "2023-12-12T14:13:43.416799Z",
|
"created_at": "2023-12-12T14:13:43.416799Z",
|
||||||
"message": {
|
"message": {
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
@@ -651,7 +648,7 @@ Create a new model from a `Modelfile`.
|
|||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/create -d '{
|
curl http://localhost:11434/api/create -d '{
|
||||||
"name": "mario",
|
"name": "mario",
|
||||||
"modelfile": "FROM llama2\nSYSTEM You are mario from Super Mario Bros."
|
"modelfile": "FROM llama3\nSYSTEM You are mario from Super Mario Bros."
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -758,7 +755,7 @@ A single JSON object will be returned.
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "llama2:latest",
|
"name": "llama3:latest",
|
||||||
"modified_at": "2023-12-07T09:32:18.757212583-08:00",
|
"modified_at": "2023-12-07T09:32:18.757212583-08:00",
|
||||||
"size": 3825819519,
|
"size": 3825819519,
|
||||||
"digest": "fe938a131f40e6f6d40083c9f0f430a515233eb2edaa6d72eb85c50d64f2300e",
|
"digest": "fe938a131f40e6f6d40083c9f0f430a515233eb2edaa6d72eb85c50d64f2300e",
|
||||||
@@ -792,7 +789,7 @@ Show information about a model including details, modelfile, template, parameter
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/show -d '{
|
curl http://localhost:11434/api/show -d '{
|
||||||
"name": "llama2"
|
"name": "llama3"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -800,9 +797,9 @@ curl http://localhost:11434/api/show -d '{
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llava:latest\n\nFROM /Users/matt/.ollama/models/blobs/sha256:200765e1283640ffbd013184bf496e261032fa75b99498a9613be4e94d63ad52\nTEMPLATE \"\"\"{{ .System }}\nUSER: {{ .Prompt }}\nASSSISTANT: \"\"\"\nPARAMETER num_ctx 4096\nPARAMETER stop \"\u003c/s\u003e\"\nPARAMETER stop \"USER:\"\nPARAMETER stop \"ASSSISTANT:\"",
|
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llava:latest\n\nFROM /Users/matt/.ollama/models/blobs/sha256:200765e1283640ffbd013184bf496e261032fa75b99498a9613be4e94d63ad52\nTEMPLATE \"\"\"{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: \"\"\"\nPARAMETER num_ctx 4096\nPARAMETER stop \"\u003c/s\u003e\"\nPARAMETER stop \"USER:\"\nPARAMETER stop \"ASSISTANT:\"",
|
||||||
"parameters": "num_ctx 4096\nstop \u003c/s\u003e\nstop USER:\nstop ASSSISTANT:",
|
"parameters": "num_ctx 4096\nstop \u003c/s\u003e\nstop USER:\nstop ASSISTANT:",
|
||||||
"template": "{{ .System }}\nUSER: {{ .Prompt }}\nASSSISTANT: ",
|
"template": "{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: ",
|
||||||
"details": {
|
"details": {
|
||||||
"format": "gguf",
|
"format": "gguf",
|
||||||
"family": "llama",
|
"family": "llama",
|
||||||
@@ -827,8 +824,8 @@ Copy a model. Creates a model with another name from an existing model.
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/copy -d '{
|
curl http://localhost:11434/api/copy -d '{
|
||||||
"source": "llama2",
|
"source": "llama3",
|
||||||
"destination": "llama2-backup"
|
"destination": "llama3-backup"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -854,7 +851,7 @@ Delete a model and its data.
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -X DELETE http://localhost:11434/api/delete -d '{
|
curl -X DELETE http://localhost:11434/api/delete -d '{
|
||||||
"name": "llama2:13b"
|
"name": "llama3:13b"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -882,7 +879,7 @@ Download a model from the ollama library. Cancelled pulls are resumed from where
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/pull -d '{
|
curl http://localhost:11434/api/pull -d '{
|
||||||
"name": "llama2"
|
"name": "llama3"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ Install required tools:
|
|||||||
- go version 1.22 or higher
|
- go version 1.22 or higher
|
||||||
- gcc version 11.4.0 or higher
|
- gcc version 11.4.0 or higher
|
||||||
|
|
||||||
|
### MacOS
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
brew install go cmake gcc
|
brew install go cmake gcc
|
||||||
```
|
```
|
||||||
@@ -51,7 +53,7 @@ Typically the build scripts will auto-detect CUDA, however, if your Linux distro
|
|||||||
or installation approach uses unusual paths, you can specify the location by
|
or installation approach uses unusual paths, you can specify the location by
|
||||||
specifying an environment variable `CUDA_LIB_DIR` to the location of the shared
|
specifying an environment variable `CUDA_LIB_DIR` to the location of the shared
|
||||||
libraries, and `CUDACXX` to the location of the nvcc compiler. You can customize
|
libraries, and `CUDACXX` to the location of the nvcc compiler. You can customize
|
||||||
set set of target CUDA architectues by setting `CMAKE_CUDA_ARCHITECTURES` (e.g. "50;60;70")
|
a set of target CUDA architectures by setting `CMAKE_CUDA_ARCHITECTURES` (e.g. "50;60;70")
|
||||||
|
|
||||||
Then generate dependencies:
|
Then generate dependencies:
|
||||||
|
|
||||||
@@ -142,4 +144,4 @@ In addition to the common Windows development tools described above, install AMD
|
|||||||
- [AMD HIP](https://www.amd.com/en/developer/resources/rocm-hub/hip-sdk.html)
|
- [AMD HIP](https://www.amd.com/en/developer/resources/rocm-hub/hip-sdk.html)
|
||||||
- [Strawberry Perl](https://strawberryperl.com/)
|
- [Strawberry Perl](https://strawberryperl.com/)
|
||||||
|
|
||||||
Lastly, add `ninja.exe` included with MSVC to the system path (e.g. `C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja`).
|
Lastly, add `ninja.exe` included with MSVC to the system path (e.g. `C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja`).
|
||||||
|
|||||||
71
docs/docker.md
Normal file
71
docs/docker.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Ollama Docker image
|
||||||
|
|
||||||
|
### CPU only
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
|
||||||
|
```
|
||||||
|
|
||||||
|
### Nvidia GPU
|
||||||
|
Install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installation).
|
||||||
|
|
||||||
|
#### Install with Apt
|
||||||
|
1. Configure the repository
|
||||||
|
```bash
|
||||||
|
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey \
|
||||||
|
| sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
|
||||||
|
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
|
||||||
|
| sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' \
|
||||||
|
| sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
||||||
|
sudo apt-get update
|
||||||
|
```
|
||||||
|
2. Install the NVIDIA Container Toolkit packages
|
||||||
|
```bash
|
||||||
|
sudo apt-get install -y nvidia-container-toolkit
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Install with Yum or Dnf
|
||||||
|
1. Configure the repository
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo \
|
||||||
|
| sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install the NVIDIA Container Toolkit packages
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo yum install -y nvidia-container-toolkit
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Configure Docker to use Nvidia driver
|
||||||
|
```
|
||||||
|
sudo nvidia-ctk runtime configure --runtime=docker
|
||||||
|
sudo systemctl restart docker
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Start the container
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
|
||||||
|
```
|
||||||
|
|
||||||
|
### AMD GPU
|
||||||
|
|
||||||
|
To run Ollama using Docker with AMD GPUs, use the `rocm` tag and the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run -d --device /dev/kfd --device /dev/dri -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama:rocm
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run model locally
|
||||||
|
|
||||||
|
Now you can run a model:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker exec -it ollama ollama run llama3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Try different models
|
||||||
|
|
||||||
|
More models can be found on the [Ollama library](https://ollama.com/library).
|
||||||
179
docs/faq.md
179
docs/faq.md
@@ -6,7 +6,7 @@ Ollama on macOS and Windows will automatically download updates. Click on the ta
|
|||||||
|
|
||||||
On Linux, re-run the install script:
|
On Linux, re-run the install script:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
curl -fsSL https://ollama.com/install.sh | sh
|
curl -fsSL https://ollama.com/install.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -30,9 +30,9 @@ To change this when using `ollama run`, use `/set parameter`:
|
|||||||
|
|
||||||
When using the API, specify the `num_ctx` parameter:
|
When using the API, specify the `num_ctx` parameter:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"prompt": "Why is the sky blue?",
|
"prompt": "Why is the sky blue?",
|
||||||
"options": {
|
"options": {
|
||||||
"num_ctx": 4096
|
"num_ctx": 4096
|
||||||
@@ -40,6 +40,21 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## How can I tell if my model was loaded onto the GPU?
|
||||||
|
|
||||||
|
Use the `ollama ps` command to see what models are currently loaded into memory.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ollama ps
|
||||||
|
NAME ID SIZE PROCESSOR UNTIL
|
||||||
|
llama3:70b bcfb190ca3a7 42 GB 100% GPU 4 minutes from now
|
||||||
|
```
|
||||||
|
|
||||||
|
The `Processor` column will show which memory the model was loaded in to:
|
||||||
|
* `100% GPU` means the model was loaded entirely into the GPU
|
||||||
|
* `100% CPU` means the model was loaded entirely in system memory
|
||||||
|
* `48%/52% CPU/GPU` means the model was loaded partially onto both the GPU and into system memory
|
||||||
|
|
||||||
## How do I configure Ollama server?
|
## How do I configure Ollama server?
|
||||||
|
|
||||||
Ollama server can be configured with environment variables.
|
Ollama server can be configured with environment variables.
|
||||||
@@ -80,81 +95,19 @@ If Ollama is run as a systemd service, environment variables should be set using
|
|||||||
|
|
||||||
### Setting environment variables on Windows
|
### Setting environment variables on Windows
|
||||||
|
|
||||||
On windows, Ollama inherits your user and system environment variables.
|
On Windows, Ollama inherits your user and system environment variables.
|
||||||
|
|
||||||
1. First Quit Ollama by clicking on it in the task bar
|
1. First Quit Ollama by clicking on it in the task bar.
|
||||||
|
|
||||||
2. Edit system environment variables from the control panel
|
2. Start the Settings (Windows 11) or Control Panel (Windows 10) application and search for _environment variables_.
|
||||||
|
|
||||||
3. Edit or create New variable(s) for your user account for `OLLAMA_HOST`, `OLLAMA_MODELS`, etc.
|
3. Click on _Edit environment variables for your account_.
|
||||||
|
|
||||||
4. Click OK/Apply to save
|
4. Edit or create a new variable for your user account for `OLLAMA_HOST`, `OLLAMA_MODELS`, etc.
|
||||||
|
|
||||||
5. Run `ollama` from a new terminal window
|
5. Click OK/Apply to save.
|
||||||
|
|
||||||
|
6. Start the Ollama application from the Windows Start menu.
|
||||||
## How can I expose Ollama on my network?
|
|
||||||
|
|
||||||
Ollama binds 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable.
|
|
||||||
|
|
||||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
|
||||||
|
|
||||||
## How can I use Ollama with a proxy server?
|
|
||||||
|
|
||||||
Ollama runs an HTTP server and can be exposed using a proxy server such as Nginx. To do so, configure the proxy to forward requests and optionally set required headers (if not exposing Ollama on the network). For example, with Nginx:
|
|
||||||
|
|
||||||
```
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
server_name example.com; # Replace with your domain or IP
|
|
||||||
location / {
|
|
||||||
proxy_pass http://localhost:11434;
|
|
||||||
proxy_set_header Host localhost:11434;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## How can I use Ollama with ngrok?
|
|
||||||
|
|
||||||
Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok:
|
|
||||||
|
|
||||||
```
|
|
||||||
ngrok http 11434 --host-header="localhost:11434"
|
|
||||||
```
|
|
||||||
|
|
||||||
## How can I use Ollama with Cloudflare Tunnel?
|
|
||||||
|
|
||||||
To use Ollama with Cloudflare Tunnel, use the `--url` and `--http-host-header` flags:
|
|
||||||
|
|
||||||
```
|
|
||||||
cloudflared tunnel --url http://localhost:11434 --http-host-header="localhost:11434"
|
|
||||||
```
|
|
||||||
|
|
||||||
## How can I allow additional web origins to access Ollama?
|
|
||||||
|
|
||||||
Ollama allows cross-origin requests from `127.0.0.1` and `0.0.0.0` by default. Additional origins can be configured with `OLLAMA_ORIGINS`.
|
|
||||||
|
|
||||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
|
||||||
|
|
||||||
## Where are models stored?
|
|
||||||
|
|
||||||
- macOS: `~/.ollama/models`
|
|
||||||
- Linux: `/usr/share/ollama/.ollama/models`
|
|
||||||
- Windows: `C:\Users\<username>\.ollama\models`
|
|
||||||
|
|
||||||
### How do I set them to a different location?
|
|
||||||
|
|
||||||
If a different directory needs to be used, set the environment variable `OLLAMA_MODELS` to the chosen directory.
|
|
||||||
|
|
||||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
|
||||||
|
|
||||||
## Does Ollama send my prompts and answers back to ollama.com?
|
|
||||||
|
|
||||||
No. Ollama runs locally, and conversation data does not leave your machine.
|
|
||||||
|
|
||||||
## How can I use Ollama in Visual Studio Code?
|
|
||||||
|
|
||||||
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme.
|
|
||||||
|
|
||||||
## How do I use Ollama behind a proxy?
|
## How do I use Ollama behind a proxy?
|
||||||
|
|
||||||
@@ -181,6 +134,69 @@ docker build -t ollama-with-ca .
|
|||||||
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Does Ollama send my prompts and answers back to ollama.com?
|
||||||
|
|
||||||
|
No. Ollama runs locally, and conversation data does not leave your machine.
|
||||||
|
|
||||||
|
## How can I expose Ollama on my network?
|
||||||
|
|
||||||
|
Ollama binds 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable.
|
||||||
|
|
||||||
|
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||||
|
|
||||||
|
## How can I use Ollama with a proxy server?
|
||||||
|
|
||||||
|
Ollama runs an HTTP server and can be exposed using a proxy server such as Nginx. To do so, configure the proxy to forward requests and optionally set required headers (if not exposing Ollama on the network). For example, with Nginx:
|
||||||
|
|
||||||
|
```
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name example.com; # Replace with your domain or IP
|
||||||
|
location / {
|
||||||
|
proxy_pass http://localhost:11434;
|
||||||
|
proxy_set_header Host localhost:11434;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## How can I use Ollama with ngrok?
|
||||||
|
|
||||||
|
Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ngrok http 11434 --host-header="localhost:11434"
|
||||||
|
```
|
||||||
|
|
||||||
|
## How can I use Ollama with Cloudflare Tunnel?
|
||||||
|
|
||||||
|
To use Ollama with Cloudflare Tunnel, use the `--url` and `--http-host-header` flags:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cloudflared tunnel --url http://localhost:11434 --http-host-header="localhost:11434"
|
||||||
|
```
|
||||||
|
|
||||||
|
## How can I allow additional web origins to access Ollama?
|
||||||
|
|
||||||
|
Ollama allows cross-origin requests from `127.0.0.1` and `0.0.0.0` by default. Additional origins can be configured with `OLLAMA_ORIGINS`.
|
||||||
|
|
||||||
|
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||||
|
|
||||||
|
## Where are models stored?
|
||||||
|
|
||||||
|
- macOS: `~/.ollama/models`
|
||||||
|
- Linux: `/usr/share/ollama/.ollama/models`
|
||||||
|
- Windows: `C:\Users\%username%\.ollama\models`
|
||||||
|
|
||||||
|
### How do I set them to a different location?
|
||||||
|
|
||||||
|
If a different directory needs to be used, set the environment variable `OLLAMA_MODELS` to the chosen directory.
|
||||||
|
|
||||||
|
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||||
|
|
||||||
|
## How can I use Ollama in Visual Studio Code?
|
||||||
|
|
||||||
|
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme.
|
||||||
|
|
||||||
## How do I use Ollama with GPU acceleration in Docker?
|
## How do I use Ollama with GPU acceleration in Docker?
|
||||||
|
|
||||||
The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details.
|
The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details.
|
||||||
@@ -195,7 +211,7 @@ Open `Control Panel > Networking and Internet > View network status and tasks` a
|
|||||||
Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these
|
Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these
|
||||||
properties.
|
properties.
|
||||||
|
|
||||||
## How can I pre-load a model to get faster response times?
|
## How can I preload a model into Ollama to get faster response times?
|
||||||
|
|
||||||
If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints.
|
If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints.
|
||||||
|
|
||||||
@@ -209,6 +225,11 @@ To use the chat completions endpoint, use:
|
|||||||
curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
|
curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To preload a model using the CLI, use the command:
|
||||||
|
```shell
|
||||||
|
ollama run llama3 ""
|
||||||
|
```
|
||||||
|
|
||||||
## How do I keep a model loaded in memory or make it unload immediately?
|
## How do I keep a model loaded in memory or make it unload immediately?
|
||||||
|
|
||||||
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory.
|
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory.
|
||||||
@@ -221,10 +242,18 @@ The `keep_alive` parameter can be set to:
|
|||||||
|
|
||||||
For example, to preload a model and leave it in memory use:
|
For example, to preload a model and leave it in memory use:
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{"model": "llama2", "keep_alive": -1}'
|
curl http://localhost:11434/api/generate -d '{"model": "llama3", "keep_alive": -1}'
|
||||||
```
|
```
|
||||||
|
|
||||||
To unload the model and free up memory use:
|
To unload the model and free up memory use:
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{"model": "llama2", "keep_alive": 0}'
|
curl http://localhost:11434/api/generate -d '{"model": "llama3", "keep_alive": 0}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Alternatively, you can change the amount of time all models are loaded into memory by setting the `OLLAMA_KEEP_ALIVE` environment variable when starting the Ollama server. The `OLLAMA_KEEP_ALIVE` variable uses the same parameter types as the `keep_alive` parameter types mentioned above. Refer to section explaining [how to configure the Ollama server](#how-do-i-configure-ollama-server) to correctly set the environment variable.
|
||||||
|
|
||||||
|
If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints.
|
||||||
|
|
||||||
|
## How do I manage the maximum number of requests the Ollama server can queue?
|
||||||
|
|
||||||
|
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
||||||
|
|||||||
@@ -125,7 +125,7 @@ Publishing models is in early alpha. If you'd like to publish your model to shar
|
|||||||
|
|
||||||
1. Create [an account](https://ollama.com/signup)
|
1. Create [an account](https://ollama.com/signup)
|
||||||
2. Copy your Ollama public key:
|
2. Copy your Ollama public key:
|
||||||
- macOS: `cat ~/.ollama/id_ed25519.pub`
|
- macOS: `cat ~/.ollama/id_ed25519.pub | pbcopy`
|
||||||
- Windows: `type %USERPROFILE%\.ollama\id_ed25519.pub`
|
- Windows: `type %USERPROFILE%\.ollama\id_ed25519.pub`
|
||||||
- Linux: `cat /usr/share/ollama/.ollama/id_ed25519.pub`
|
- Linux: `cat /usr/share/ollama/.ollama/id_ed25519.pub`
|
||||||
3. Add your public key to your [Ollama account](https://ollama.com/settings/keys)
|
3. Add your public key to your [Ollama account](https://ollama.com/settings/keys)
|
||||||
@@ -136,6 +136,8 @@ Next, copy your model to your username's namespace:
|
|||||||
ollama cp example <your username>/example
|
ollama cp example <your username>/example
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> Note: model names may only contain lowercase letters, digits, and the characters `.`, `-`, and `_`.
|
||||||
|
|
||||||
Then push the model:
|
Then push the model:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ sudo chmod +x /usr/bin/ollama
|
|||||||
To view logs of Ollama running as a startup service, run:
|
To view logs of Ollama running as a startup service, run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
journalctl -u ollama
|
journalctl -e -u ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
## Uninstall
|
## Uninstall
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ A model file is the blueprint to create and share models with Ollama.
|
|||||||
- [Examples](#examples)
|
- [Examples](#examples)
|
||||||
- [Instructions](#instructions)
|
- [Instructions](#instructions)
|
||||||
- [FROM (Required)](#from-required)
|
- [FROM (Required)](#from-required)
|
||||||
- [Build from llama2](#build-from-llama2)
|
- [Build from llama3](#build-from-llama3)
|
||||||
- [Build from a bin file](#build-from-a-bin-file)
|
- [Build from a bin file](#build-from-a-bin-file)
|
||||||
- [PARAMETER](#parameter)
|
- [PARAMETER](#parameter)
|
||||||
- [Valid Parameters and Values](#valid-parameters-and-values)
|
- [Valid Parameters and Values](#valid-parameters-and-values)
|
||||||
@@ -48,7 +48,7 @@ INSTRUCTION arguments
|
|||||||
An example of a `Modelfile` creating a mario blueprint:
|
An example of a `Modelfile` creating a mario blueprint:
|
||||||
|
|
||||||
```modelfile
|
```modelfile
|
||||||
FROM llama2
|
FROM llama3
|
||||||
# sets the temperature to 1 [higher is more creative, lower is more coherent]
|
# sets the temperature to 1 [higher is more creative, lower is more coherent]
|
||||||
PARAMETER temperature 1
|
PARAMETER temperature 1
|
||||||
# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
|
# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
|
||||||
@@ -67,33 +67,25 @@ To use this:
|
|||||||
|
|
||||||
More examples are available in the [examples directory](../examples).
|
More examples are available in the [examples directory](../examples).
|
||||||
|
|
||||||
### `Modelfile`s in [ollama.com/library][1]
|
To view the Modelfile of a given model, use the `ollama show --modelfile` command.
|
||||||
|
|
||||||
There are two ways to view `Modelfile`s underlying the models in [ollama.com/library][1]:
|
|
||||||
|
|
||||||
- Option 1: view a details page from a model's tags page:
|
|
||||||
1. Go to a particular model's tags (e.g. https://ollama.com/library/llama2/tags)
|
|
||||||
2. Click on a tag (e.g. https://ollama.com/library/llama2:13b)
|
|
||||||
3. Scroll down to "Layers"
|
|
||||||
- Note: if the [`FROM` instruction](#from-required) is not present,
|
|
||||||
it means the model was created from a local file
|
|
||||||
- Option 2: use `ollama show` to print the `Modelfile` for any local models like so:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
> ollama show --modelfile llama2:13b
|
> ollama show --modelfile llama3
|
||||||
# Modelfile generated by "ollama show"
|
# Modelfile generated by "ollama show"
|
||||||
# To build a new Modelfile based on this one, replace the FROM line with:
|
# To build a new Modelfile based on this one, replace the FROM line with:
|
||||||
# FROM llama2:13b
|
# FROM llama3:latest
|
||||||
|
FROM /Users/pdevine/.ollama/models/blobs/sha256-00e1317cbf74d901080d7100f57580ba8dd8de57203072dc6f668324ba545f29
|
||||||
|
TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|>
|
||||||
|
|
||||||
FROM /root/.ollama/models/blobs/sha256:123abc
|
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
|
||||||
TEMPLATE """[INST] {{ if .System }}<<SYS>>{{ .System }}<</SYS>>
|
|
||||||
|
|
||||||
{{ end }}{{ .Prompt }} [/INST] """
|
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
|
||||||
SYSTEM """"""
|
|
||||||
PARAMETER stop [INST]
|
{{ .Response }}<|eot_id|>"""
|
||||||
PARAMETER stop [/INST]
|
PARAMETER stop "<|start_header_id|>"
|
||||||
PARAMETER stop <<SYS>>
|
PARAMETER stop "<|end_header_id|>"
|
||||||
PARAMETER stop <</SYS>>
|
PARAMETER stop "<|eot_id|>"
|
||||||
|
PARAMETER stop "<|reserved_special_token"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Instructions
|
## Instructions
|
||||||
@@ -106,10 +98,10 @@ The `FROM` instruction defines the base model to use when creating a model.
|
|||||||
FROM <model name>:<tag>
|
FROM <model name>:<tag>
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Build from llama2
|
#### Build from llama3
|
||||||
|
|
||||||
```modelfile
|
```modelfile
|
||||||
FROM llama2
|
FROM llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
A list of available base models:
|
A list of available base models:
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ chat_completion = client.chat.completions.create(
|
|||||||
'content': 'Say this is a test',
|
'content': 'Say this is a test',
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
model='llama2',
|
model='llama3',
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ const openai = new OpenAI({
|
|||||||
|
|
||||||
const chatCompletion = await openai.chat.completions.create({
|
const chatCompletion = await openai.chat.completions.create({
|
||||||
messages: [{ role: 'user', content: 'Say this is a test' }],
|
messages: [{ role: 'user', content: 'Say this is a test' }],
|
||||||
model: 'llama2',
|
model: 'llama3',
|
||||||
})
|
})
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ const chatCompletion = await openai.chat.completions.create({
|
|||||||
curl http://localhost:11434/v1/chat/completions \
|
curl http://localhost:11434/v1/chat/completions \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
-d '{
|
-d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"messages": [
|
"messages": [
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
@@ -113,7 +113,7 @@ curl http://localhost:11434/v1/chat/completions \
|
|||||||
Before using a model, pull it locally `ollama pull`:
|
Before using a model, pull it locally `ollama pull`:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
ollama pull llama2
|
ollama pull llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
### Default model names
|
### Default model names
|
||||||
@@ -121,7 +121,7 @@ ollama pull llama2
|
|||||||
For tooling that relies on default OpenAI model names such as `gpt-3.5-turbo`, use `ollama cp` to copy an existing model name to a temporary name:
|
For tooling that relies on default OpenAI model names such as `gpt-3.5-turbo`, use `ollama cp` to copy an existing model name to a temporary name:
|
||||||
|
|
||||||
```
|
```
|
||||||
ollama cp llama2 gpt-3.5-turbo
|
ollama cp llama3 gpt-3.5-turbo
|
||||||
```
|
```
|
||||||
|
|
||||||
Afterwards, this new model name can be specified the `model` field:
|
Afterwards, this new model name can be specified the `model` field:
|
||||||
|
|||||||
@@ -1,85 +1,87 @@
|
|||||||
# How to troubleshoot issues
|
# How to troubleshoot issues
|
||||||
|
|
||||||
Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command:
|
Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
cat ~/.ollama/logs/server.log
|
cat ~/.ollama/logs/server.log
|
||||||
```
|
```
|
||||||
|
|
||||||
On **Linux** systems with systemd, the logs can be found with this command:
|
On **Linux** systems with systemd, the logs can be found with this command:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
journalctl -u ollama
|
journalctl -u ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
When you run Ollama in a **container**, the logs go to stdout/stderr in the container:
|
When you run Ollama in a **container**, the logs go to stdout/stderr in the container:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker logs <container-name>
|
docker logs <container-name>
|
||||||
```
|
```
|
||||||
(Use `docker ps` to find the container name)
|
(Use `docker ps` to find the container name)
|
||||||
|
|
||||||
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
||||||
|
|
||||||
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
|
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
|
||||||
- `explorer %LOCALAPPDATA%\Ollama` to view logs
|
- `explorer %LOCALAPPDATA%\Ollama` to view logs
|
||||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
||||||
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
||||||
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
||||||
|
|
||||||
To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal
|
To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal
|
||||||
```powershell
|
```powershell
|
||||||
$env:OLLAMA_DEBUG="1"
|
$env:OLLAMA_DEBUG="1"
|
||||||
& "ollama app.exe"
|
& "ollama app.exe"
|
||||||
```
|
```
|
||||||
|
|
||||||
Join the [Discord](https://discord.gg/ollama) for help interpreting the logs.
|
Join the [Discord](https://discord.gg/ollama) for help interpreting the logs.
|
||||||
|
|
||||||
## LLM libraries
|
## LLM libraries
|
||||||
|
|
||||||
Ollama includes multiple LLM libraries compiled for different GPUs and CPU
|
Ollama includes multiple LLM libraries compiled for different GPUs and CPU vector features. Ollama tries to pick the best one based on the capabilities of your system. If this autodetection has problems, or you run into other problems (e.g. crashes in your GPU) you can workaround this by forcing a specific LLM library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest but most compatible is `cpu`. Rosetta emulation under MacOS will work with the `cpu` library.
|
||||||
vector features. Ollama tries to pick the best one based on the capabilities of
|
|
||||||
your system. If this autodetection has problems, or you run into other problems
|
In the server log, you will see a message that looks something like this (varies from release to release):
|
||||||
(e.g. crashes in your GPU) you can workaround this by forcing a specific LLM
|
|
||||||
library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest
|
```
|
||||||
but most compatible is `cpu`. Rosetta emulation under MacOS will work with the
|
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
||||||
`cpu` library.
|
```
|
||||||
|
|
||||||
In the server log, you will see a message that looks something like this (varies
|
**Experimental LLM Library Override**
|
||||||
from release to release):
|
|
||||||
|
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass autodetection, so for example, if you have a CUDA card, but want to force the CPU LLM library with AVX2 vector support, use:
|
||||||
```
|
|
||||||
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
```
|
||||||
```
|
OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
|
||||||
|
```
|
||||||
**Experimental LLM Library Override**
|
|
||||||
|
You can see what features your CPU has with the following.
|
||||||
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass
|
```
|
||||||
autodetection, so for example, if you have a CUDA card, but want to force the
|
cat /proc/cpuinfo| grep flags | head -1
|
||||||
CPU LLM library with AVX2 vector support, use:
|
```
|
||||||
|
|
||||||
```
|
## Installing older or pre-release versions on Linux
|
||||||
OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
|
|
||||||
```
|
If you run into problems on Linux and want to install an older version, or you'd like to try out a pre-release before it's officially released, you can tell the install script which version to install.
|
||||||
|
|
||||||
You can see what features your CPU has with the following.
|
```sh
|
||||||
```
|
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
||||||
cat /proc/cpuinfo| grep flags | head -1
|
```
|
||||||
```
|
|
||||||
|
## Linux tmp noexec
|
||||||
## Installing older or pre-release versions on Linux
|
|
||||||
|
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
|
||||||
If you run into problems on Linux and want to install an older version, or you'd
|
|
||||||
like to try out a pre-release before it's officially released, you can tell the
|
## Container fails to run on NVIDIA GPU
|
||||||
install script which version to install.
|
|
||||||
|
Make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
||||||
```sh
|
|
||||||
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
|
||||||
```
|
|
||||||
|
- Is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU.
|
||||||
## Linux tmp noexec
|
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
|
||||||
|
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
||||||
If your system is configured with the "noexec" flag where Ollama stores its
|
- Try rebooting
|
||||||
temporary executable files, you can specify an alternate location by setting
|
- Make sure you're running the latest nvidia drivers
|
||||||
OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example
|
|
||||||
OLLAMA_TMPDIR=/usr/share/ollama/
|
If none of those resolve the problem, gather additional information and file an issue:
|
||||||
|
- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
|
||||||
|
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`
|
||||||
|
|||||||
@@ -5,17 +5,17 @@ In this tutorial, we are going to use JavaScript with LangChain and Ollama to le
|
|||||||
To get started, let's just use **LangChain** to ask a simple question to a model. To do this with JavaScript, we need to install **LangChain**:
|
To get started, let's just use **LangChain** to ask a simple question to a model. To do this with JavaScript, we need to install **LangChain**:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm install langchain
|
npm install @langchain/community
|
||||||
```
|
```
|
||||||
|
|
||||||
Now we can start building out our JavaScript:
|
Now we can start building out our JavaScript:
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
import { Ollama } from "langchain/llms/ollama";
|
import { Ollama } from "@langchain/community/llms/ollama";
|
||||||
|
|
||||||
const ollama = new Ollama({
|
const ollama = new Ollama({
|
||||||
baseUrl: "http://localhost:11434",
|
baseUrl: "http://localhost:11434",
|
||||||
model: "llama2",
|
model: "llama3",
|
||||||
});
|
});
|
||||||
|
|
||||||
const answer = await ollama.invoke(`why is the sky blue?`);
|
const answer = await ollama.invoke(`why is the sky blue?`);
|
||||||
@@ -23,10 +23,10 @@ const answer = await ollama.invoke(`why is the sky blue?`);
|
|||||||
console.log(answer);
|
console.log(answer);
|
||||||
```
|
```
|
||||||
|
|
||||||
That will get us the same thing as if we ran `ollama run llama2 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app.
|
That will get us the same thing as if we ran `ollama run llama3 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm install cheerio
|
npm install cheerio
|
||||||
```
|
```
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
|
|||||||
@@ -12,15 +12,17 @@ So let's figure out how we can use **LangChain** with Ollama to ask our question
|
|||||||
|
|
||||||
Let's start by asking a simple question that we can get an answer to from the **Llama2** model using **Ollama**. First, we need to install the **LangChain** package:
|
Let's start by asking a simple question that we can get an answer to from the **Llama2** model using **Ollama**. First, we need to install the **LangChain** package:
|
||||||
|
|
||||||
`pip install langchain`
|
`pip install langchain_community`
|
||||||
|
|
||||||
Then we can create a model and ask the question:
|
Then we can create a model and ask the question:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from langchain.llms import Ollama
|
from langchain_community.llms import Ollama
|
||||||
ollama = Ollama(base_url='http://localhost:11434',
|
ollama = Ollama(
|
||||||
model="llama2")
|
base_url='http://localhost:11434',
|
||||||
print(ollama("why is the sky blue"))
|
model="llama3"
|
||||||
|
)
|
||||||
|
print(ollama.invoke("why is the sky blue"))
|
||||||
```
|
```
|
||||||
|
|
||||||
Notice that we are defining the model and the base URL for Ollama.
|
Notice that we are defining the model and the base URL for Ollama.
|
||||||
|
|||||||
108
docs/windows.md
108
docs/windows.md
@@ -1,47 +1,61 @@
|
|||||||
# Ollama Windows Preview
|
# Ollama Windows Preview
|
||||||
|
|
||||||
Welcome to the Ollama Windows preview.
|
Welcome to the Ollama Windows preview.
|
||||||
|
|
||||||
No more WSL required!
|
No more WSL required!
|
||||||
|
|
||||||
Ollama now runs as a native Windows application, including NVIDIA and AMD Radeon GPU support.
|
Ollama now runs as a native Windows application, including NVIDIA and AMD Radeon GPU support.
|
||||||
After installing Ollama Windows Preview, Ollama will run in the background and
|
After installing Ollama Windows Preview, Ollama will run in the background and
|
||||||
the `ollama` command line is available in `cmd`, `powershell` or your favorite
|
the `ollama` command line is available in `cmd`, `powershell` or your favorite
|
||||||
terminal application. As usual the Ollama [api](./api.md) will be served on
|
terminal application. As usual the Ollama [api](./api.md) will be served on
|
||||||
`http://localhost:11434`.
|
`http://localhost:11434`.
|
||||||
|
|
||||||
As this is a preview release, you should expect a few bugs here and there. If
|
As this is a preview release, you should expect a few bugs here and there. If
|
||||||
you run into a problem you can reach out on
|
you run into a problem you can reach out on
|
||||||
[Discord](https://discord.gg/ollama), or file an
|
[Discord](https://discord.gg/ollama), or file an
|
||||||
[issue](https://github.com/ollama/ollama/issues).
|
[issue](https://github.com/ollama/ollama/issues).
|
||||||
Logs will often be helpful in dianosing the problem (see
|
Logs will often be helpful in diagnosing the problem (see
|
||||||
[Troubleshooting](#troubleshooting) below)
|
[Troubleshooting](#troubleshooting) below)
|
||||||
|
|
||||||
## System Requirements
|
## System Requirements
|
||||||
|
|
||||||
* Windows 10 or newer, Home or Pro
|
* Windows 10 or newer, Home or Pro
|
||||||
* NVIDIA 452.39 or newer Drivers if you have an NVIDIA card
|
* NVIDIA 452.39 or newer Drivers if you have an NVIDIA card
|
||||||
* AMD Radeon Driver https://www.amd.com/en/support if you have a Radeon card
|
* AMD Radeon Driver https://www.amd.com/en/support if you have a Radeon card
|
||||||
|
|
||||||
## API Access
|
## API Access
|
||||||
|
|
||||||
Here's a quick example showing API access from `powershell`
|
Here's a quick example showing API access from `powershell`
|
||||||
```powershell
|
```powershell
|
||||||
(Invoke-WebRequest -method POST -Body '{"model":"llama2", "prompt":"Why is the sky blue?", "stream": false}' -uri http://localhost:11434/api/generate ).Content | ConvertFrom-json
|
(Invoke-WebRequest -method POST -Body '{"model":"llama3", "prompt":"Why is the sky blue?", "stream": false}' -uri http://localhost:11434/api/generate ).Content | ConvertFrom-json
|
||||||
```
|
```
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds
|
While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds
|
||||||
a "view logs" menu item to the app, and increses logging for the GUI app and
|
a "view logs" menu item to the app, and increases logging for the GUI app and
|
||||||
server.
|
server.
|
||||||
|
|
||||||
Ollama on Windows stores files in a few different locations. You can view them in
|
Ollama on Windows stores files in a few different locations. You can view them in
|
||||||
the explorer window by hitting `<cmd>+R` and type in:
|
the explorer window by hitting `<cmd>+R` and type in:
|
||||||
- `explorer %LOCALAPPDATA%\Ollama` contains logs, and downloaded updates
|
- `explorer %LOCALAPPDATA%\Ollama` contains logs, and downloaded updates
|
||||||
- *app.log* contains logs from the GUI application
|
- *app.log* contains logs from the GUI application
|
||||||
- *server.log* contains the server logs
|
- *server.log* contains the server logs
|
||||||
- *upgrade.log* contains log output for upgrades
|
- *upgrade.log* contains log output for upgrades
|
||||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` contains the binaries (The installer adds this to your user PATH)
|
- `explorer %LOCALAPPDATA%\Programs\Ollama` contains the binaries (The installer adds this to your user PATH)
|
||||||
- `explorer %HOMEPATH%\.ollama` contains models and configuration
|
- `explorer %HOMEPATH%\.ollama` contains models and configuration
|
||||||
- `explorer %TEMP%` contains temporary executable files in one or more `ollama*` directories
|
- `explorer %TEMP%` contains temporary executable files in one or more `ollama*` directories
|
||||||
|
|
||||||
|
|
||||||
|
## Standalone CLI
|
||||||
|
|
||||||
|
The easiest way to install Ollama on Windows is to use the `OllamaSetup.exe`
|
||||||
|
installer. It installs in your account without requiring Administrator rights.
|
||||||
|
We update Ollama regularly to support the latest models, and this installer will
|
||||||
|
help you keep up to date.
|
||||||
|
|
||||||
|
If you'd like to install or integrate Ollama as a service, a standalone
|
||||||
|
`ollama-windows-amd64.zip` zip file is available containing only the Ollama CLI
|
||||||
|
and GPU library dependencies for Nvidia and AMD. This allows for embedding
|
||||||
|
Ollama in existing applications, or running it as a system service via `ollama
|
||||||
|
serve` with tools such as [NSSM](https://nssm.cc/).
|
||||||
|
|||||||
212
envconfig/config.go
Normal file
212
envconfig/config.go
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
package envconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Set via OLLAMA_ORIGINS in the environment
|
||||||
|
AllowOrigins []string
|
||||||
|
// Set via OLLAMA_DEBUG in the environment
|
||||||
|
Debug bool
|
||||||
|
// Experimental flash attention
|
||||||
|
FlashAttention bool
|
||||||
|
// Set via OLLAMA_KEEP_ALIVE in the environment
|
||||||
|
KeepAlive string
|
||||||
|
// Set via OLLAMA_LLM_LIBRARY in the environment
|
||||||
|
LLMLibrary string
|
||||||
|
// Set via OLLAMA_MAX_LOADED_MODELS in the environment
|
||||||
|
MaxRunners int
|
||||||
|
// Set via OLLAMA_MAX_QUEUE in the environment
|
||||||
|
MaxQueuedRequests int
|
||||||
|
// Set via OLLAMA_MAX_VRAM in the environment
|
||||||
|
MaxVRAM uint64
|
||||||
|
// Set via OLLAMA_NOHISTORY in the environment
|
||||||
|
NoHistory bool
|
||||||
|
// Set via OLLAMA_NOPRUNE in the environment
|
||||||
|
NoPrune bool
|
||||||
|
// Set via OLLAMA_NUM_PARALLEL in the environment
|
||||||
|
NumParallel int
|
||||||
|
// Set via OLLAMA_RUNNERS_DIR in the environment
|
||||||
|
RunnersDir string
|
||||||
|
// Set via OLLAMA_TMPDIR in the environment
|
||||||
|
TmpDir string
|
||||||
|
)
|
||||||
|
|
||||||
|
type EnvVar struct {
|
||||||
|
Name string
|
||||||
|
Value any
|
||||||
|
Description string
|
||||||
|
}
|
||||||
|
|
||||||
|
func AsMap() map[string]EnvVar {
|
||||||
|
return map[string]EnvVar{
|
||||||
|
"OLLAMA_DEBUG": {"OLLAMA_DEBUG", Debug, "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
|
||||||
|
"OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention, "Enabled flash attention"},
|
||||||
|
"OLLAMA_HOST": {"OLLAMA_HOST", "", "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||||
|
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
||||||
|
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"},
|
||||||
|
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"},
|
||||||
|
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
||||||
|
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"},
|
||||||
|
"OLLAMA_MODELS": {"OLLAMA_MODELS", "", "The path to the models directory"},
|
||||||
|
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
||||||
|
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
||||||
|
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default 1)"},
|
||||||
|
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"},
|
||||||
|
"OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"},
|
||||||
|
"OLLAMA_TMPDIR": {"OLLAMA_TMPDIR", TmpDir, "Location for temporary files"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Values() map[string]string {
|
||||||
|
vals := make(map[string]string)
|
||||||
|
for k, v := range AsMap() {
|
||||||
|
vals[k] = fmt.Sprintf("%v", v.Value)
|
||||||
|
}
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultAllowOrigins = []string{
|
||||||
|
"localhost",
|
||||||
|
"127.0.0.1",
|
||||||
|
"0.0.0.0",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean quotes and spaces from the value
|
||||||
|
func clean(key string) string {
|
||||||
|
return strings.Trim(os.Getenv(key), "\"' ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// default values
|
||||||
|
NumParallel = 1
|
||||||
|
MaxRunners = 1
|
||||||
|
MaxQueuedRequests = 512
|
||||||
|
|
||||||
|
LoadConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadConfig() {
|
||||||
|
if debug := clean("OLLAMA_DEBUG"); debug != "" {
|
||||||
|
d, err := strconv.ParseBool(debug)
|
||||||
|
if err == nil {
|
||||||
|
Debug = d
|
||||||
|
} else {
|
||||||
|
Debug = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fa := clean("OLLAMA_FLASH_ATTENTION"); fa != "" {
|
||||||
|
d, err := strconv.ParseBool(fa)
|
||||||
|
if err == nil {
|
||||||
|
FlashAttention = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RunnersDir = clean("OLLAMA_RUNNERS_DIR")
|
||||||
|
if runtime.GOOS == "windows" && RunnersDir == "" {
|
||||||
|
// On Windows we do not carry the payloads inside the main executable
|
||||||
|
appExe, err := os.Executable()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to lookup executable path", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cwd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to lookup working directory", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var paths []string
|
||||||
|
for _, root := range []string{filepath.Dir(appExe), cwd} {
|
||||||
|
paths = append(paths,
|
||||||
|
filepath.Join(root),
|
||||||
|
filepath.Join(root, "windows-"+runtime.GOARCH),
|
||||||
|
filepath.Join(root, "dist", "windows-"+runtime.GOARCH),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try a few variations to improve developer experience when building from source in the local tree
|
||||||
|
for _, p := range paths {
|
||||||
|
candidate := filepath.Join(p, "ollama_runners")
|
||||||
|
_, err := os.Stat(candidate)
|
||||||
|
if err == nil {
|
||||||
|
RunnersDir = candidate
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if RunnersDir == "" {
|
||||||
|
slog.Error("unable to locate llm runner directory. Set OLLAMA_RUNNERS_DIR to the location of 'ollama_runners'")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TmpDir = clean("OLLAMA_TMPDIR")
|
||||||
|
|
||||||
|
userLimit := clean("OLLAMA_MAX_VRAM")
|
||||||
|
if userLimit != "" {
|
||||||
|
avail, err := strconv.ParseUint(userLimit, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("invalid setting, ignoring", "OLLAMA_MAX_VRAM", userLimit, "error", err)
|
||||||
|
} else {
|
||||||
|
MaxVRAM = avail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LLMLibrary = clean("OLLAMA_LLM_LIBRARY")
|
||||||
|
|
||||||
|
if onp := clean("OLLAMA_NUM_PARALLEL"); onp != "" {
|
||||||
|
val, err := strconv.Atoi(onp)
|
||||||
|
if err != nil || val <= 0 {
|
||||||
|
slog.Error("invalid setting must be greater than zero", "OLLAMA_NUM_PARALLEL", onp, "error", err)
|
||||||
|
} else {
|
||||||
|
NumParallel = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if nohistory := clean("OLLAMA_NOHISTORY"); nohistory != "" {
|
||||||
|
NoHistory = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if noprune := clean("OLLAMA_NOPRUNE"); noprune != "" {
|
||||||
|
NoPrune = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if origins := clean("OLLAMA_ORIGINS"); origins != "" {
|
||||||
|
AllowOrigins = strings.Split(origins, ",")
|
||||||
|
}
|
||||||
|
for _, allowOrigin := range defaultAllowOrigins {
|
||||||
|
AllowOrigins = append(AllowOrigins,
|
||||||
|
fmt.Sprintf("http://%s", allowOrigin),
|
||||||
|
fmt.Sprintf("https://%s", allowOrigin),
|
||||||
|
fmt.Sprintf("http://%s:*", allowOrigin),
|
||||||
|
fmt.Sprintf("https://%s:*", allowOrigin),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
maxRunners := clean("OLLAMA_MAX_LOADED_MODELS")
|
||||||
|
if maxRunners != "" {
|
||||||
|
m, err := strconv.Atoi(maxRunners)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("invalid setting", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err)
|
||||||
|
} else {
|
||||||
|
MaxRunners = m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if onp := os.Getenv("OLLAMA_MAX_QUEUE"); onp != "" {
|
||||||
|
p, err := strconv.Atoi(onp)
|
||||||
|
if err != nil || p <= 0 {
|
||||||
|
slog.Error("invalid setting", "OLLAMA_MAX_QUEUE", onp, "error", err)
|
||||||
|
} else {
|
||||||
|
MaxQueuedRequests = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
KeepAlive = clean("OLLAMA_KEEP_ALIVE")
|
||||||
|
}
|
||||||
23
envconfig/config_test.go
Normal file
23
envconfig/config_test.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
package envconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfig(t *testing.T) {
|
||||||
|
Debug = false // Reset whatever was loaded in init()
|
||||||
|
t.Setenv("OLLAMA_DEBUG", "")
|
||||||
|
LoadConfig()
|
||||||
|
require.False(t, Debug)
|
||||||
|
t.Setenv("OLLAMA_DEBUG", "false")
|
||||||
|
LoadConfig()
|
||||||
|
require.False(t, Debug)
|
||||||
|
t.Setenv("OLLAMA_DEBUG", "1")
|
||||||
|
LoadConfig()
|
||||||
|
require.True(t, Debug)
|
||||||
|
t.Setenv("OLLAMA_FLASH_ATTENTION", "1")
|
||||||
|
LoadConfig()
|
||||||
|
require.True(t, FlashAttention)
|
||||||
|
}
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
# Bash Shell examples
|
|
||||||
|
|
||||||
When calling `ollama`, you can pass it a file to run all the prompts in the file, one after the other:
|
|
||||||
|
|
||||||
`ollama run llama2 < sourcequestions.txt`
|
|
||||||
|
|
||||||
This concept is used in the following example.
|
|
||||||
|
|
||||||
## Compare Models
|
|
||||||
`comparemodels.sh` is a script that runs all the questions in `sourcequestions.txt` using any 4 models you choose that you have already pulled from the Ollama library or have created locally.
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
#! /usr/bin/env bash
|
|
||||||
# Compare multiple models by running them with the same questions
|
|
||||||
|
|
||||||
NUMBEROFCHOICES=4
|
|
||||||
SELECTIONS=()
|
|
||||||
declare -a SUMS=()
|
|
||||||
|
|
||||||
# Get the list of models
|
|
||||||
CHOICES=$(ollama list | awk '{print $1}')
|
|
||||||
|
|
||||||
# Select which models to run as a comparison
|
|
||||||
echo "Select $NUMBEROFCHOICES models to compare:"
|
|
||||||
select ITEM in $CHOICES; do
|
|
||||||
if [[ -n $ITEM ]]; then
|
|
||||||
echo "You have selected $ITEM"
|
|
||||||
SELECTIONS+=("$ITEM")
|
|
||||||
((COUNT++))
|
|
||||||
if [[ $COUNT -eq $NUMBEROFCHOICES ]]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Invalid selection"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Loop through each of the selected models
|
|
||||||
for ITEM in "${SELECTIONS[@]}"; do
|
|
||||||
echo "--------------------------------------------------------------"
|
|
||||||
echo "Loading the model $ITEM into memory"
|
|
||||||
ollama run "$ITEM" ""
|
|
||||||
echo "--------------------------------------------------------------"
|
|
||||||
echo "Running the questions through the model $ITEM"
|
|
||||||
COMMAND_OUTPUT=$(ollama run "$ITEM" --verbose < sourcequestions.txt 2>&1| tee /dev/stderr)
|
|
||||||
|
|
||||||
# eval duration is sometimes listed in seconds and sometimes in milliseconds.
|
|
||||||
# Add up the values for each model
|
|
||||||
SUM=$(echo "$COMMAND_OUTPUT" | awk '
|
|
||||||
/eval duration:/ {
|
|
||||||
value = $3
|
|
||||||
if (index(value, "ms") > 0) {
|
|
||||||
gsub("ms", "", value)
|
|
||||||
value /= 1000
|
|
||||||
} else {
|
|
||||||
gsub("s", "", value)
|
|
||||||
}
|
|
||||||
sum += value
|
|
||||||
}
|
|
||||||
END { print sum }')
|
|
||||||
|
|
||||||
|
|
||||||
SUMS+=("All questions for $ITEM completed in $SUM seconds")
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "--------------------------------------------------------------"
|
|
||||||
echo -e "Sums of eval durations for each run:"
|
|
||||||
for val in "${SUMS[@]}"; do
|
|
||||||
echo "$val"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "--------------------------------------------------------------"
|
|
||||||
echo "Comparison complete. Now you can decide"
|
|
||||||
echo "which model is best."
|
|
||||||
echo "--------------------------------------------------------------"
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
Why is the sky blue
|
|
||||||
What is a black hole
|
|
||||||
Explain the big bang theory like I am 5?
|
|
||||||
What is the quickest way to win a game of Monopoly with 3 others?
|
|
||||||
Why does a vacuum bottle keep my coffee hot and my milkshake cold?
|
|
||||||
What is the difference between a meteor, a meteorite, and a meteoroid?
|
|
||||||
Create an array with 5 items and print to the console. Do this in Python, C#, Typescript, and Rust.
|
|
||||||
1
examples/flyio/.gitignore
vendored
Normal file
1
examples/flyio/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
fly.toml
|
||||||
67
examples/flyio/README.md
Normal file
67
examples/flyio/README.md
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# Deploy Ollama to Fly.io
|
||||||
|
|
||||||
|
> Note: this example exposes a public endpoint and does not configure authentication. Use with care.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Ollama: https://ollama.com/download
|
||||||
|
- Fly.io account. Sign up for a free account: https://fly.io/app/sign-up
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
1. Login to Fly.io
|
||||||
|
|
||||||
|
```bash
|
||||||
|
fly auth login
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Create a new Fly app
|
||||||
|
|
||||||
|
```bash
|
||||||
|
fly launch --name <name> --image ollama/ollama --internal-port 11434 --vm-size shared-cpu-8x --now
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Pull and run `orca-mini:3b`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
OLLAMA_HOST=https://<name>.fly.dev ollama run orca-mini:3b
|
||||||
|
```
|
||||||
|
|
||||||
|
`shared-cpu-8x` is a free-tier eligible machine type. For better performance, switch to a `performance` or `dedicated` machine type or attach a GPU for hardware acceleration (see below).
|
||||||
|
|
||||||
|
## (Optional) Persistent Volume
|
||||||
|
|
||||||
|
By default Fly Machines use ephemeral storage which is problematic if you want to use the same model across restarts without pulling it again. Create and attach a persistent volume to store the downloaded models:
|
||||||
|
|
||||||
|
1. Create the Fly Volume
|
||||||
|
|
||||||
|
```bash
|
||||||
|
fly volume create ollama
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Update `fly.toml` and add `[mounts]`
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[mounts]
|
||||||
|
source = "ollama"
|
||||||
|
destination = "/mnt/ollama/models"
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Update `fly.toml` and add `[env]`
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[env]
|
||||||
|
OLLAMA_MODELS = "/mnt/ollama/models"
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Deploy your app
|
||||||
|
|
||||||
|
```bash
|
||||||
|
fly deploy
|
||||||
|
```
|
||||||
|
|
||||||
|
## (Optional) Hardware Acceleration
|
||||||
|
|
||||||
|
Fly.io GPU is currently in waitlist. Sign up for the waitlist: https://fly.io/gpu
|
||||||
|
|
||||||
|
Once you've been accepted, create the app with the additional flags `--vm-gpu-kind a100-pcie-40gb` or `--vm-gpu-kind a100-pcie-80gb`.
|
||||||
@@ -35,7 +35,7 @@ func main() {
|
|||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
req := &api.ChatRequest{
|
req := &api.ChatRequest{
|
||||||
Model: "llama2",
|
Model: "llama3",
|
||||||
Messages: messages,
|
Messages: messages,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
responseData, err := io.ReadAll(resp.Body)
|
responseData, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
|||||||
@@ -7,12 +7,24 @@
|
|||||||
|
|
||||||
## Steps
|
## Steps
|
||||||
|
|
||||||
1. Create the Ollama namespace, daemon set, and service
|
1. Create the Ollama namespace, deployment, and service
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl apply -f cpu.yaml
|
kubectl apply -f cpu.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## (Optional) Hardware Acceleration
|
||||||
|
|
||||||
|
Hardware acceleration in Kubernetes requires NVIDIA's [`k8s-device-plugin`](https://github.com/NVIDIA/k8s-device-plugin) which is deployed in Kubernetes in form of daemonset. Follow the link for more details.
|
||||||
|
|
||||||
|
Once configured, create a GPU enabled Ollama deployment.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl apply -f gpu.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test
|
||||||
|
|
||||||
1. Port forward the Ollama service to connect and use it locally
|
1. Port forward the Ollama service to connect and use it locally
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -23,14 +35,4 @@
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama run orca-mini:3b
|
ollama run orca-mini:3b
|
||||||
```
|
```
|
||||||
|
|
||||||
## (Optional) Hardware Acceleration
|
|
||||||
|
|
||||||
Hardware acceleration in Kubernetes requires NVIDIA's [`k8s-device-plugin`](https://github.com/NVIDIA/k8s-device-plugin). Follow the link for more details.
|
|
||||||
|
|
||||||
Once configured, create a GPU enabled Ollama deployment.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
kubectl apply -f gpu.yaml
|
|
||||||
```
|
|
||||||
@@ -40,9 +40,9 @@ while True:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# Prompt
|
# Prompt
|
||||||
template = """Use the following pieces of context to answer the question at the end.
|
template = """Use the following pieces of context to answer the question at the end.
|
||||||
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
||||||
Use three sentences maximum and keep the answer as concise as possible.
|
Use three sentences maximum and keep the answer as concise as possible.
|
||||||
{context}
|
{context}
|
||||||
Question: {question}
|
Question: {question}
|
||||||
Helpful Answer:"""
|
Helpful Answer:"""
|
||||||
@@ -51,11 +51,11 @@ while True:
|
|||||||
template=template,
|
template=template,
|
||||||
)
|
)
|
||||||
|
|
||||||
llm = Ollama(model="llama2:13b", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
|
llm = Ollama(model="llama3:8b", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
|
||||||
qa_chain = RetrievalQA.from_chain_type(
|
qa_chain = RetrievalQA.from_chain_type(
|
||||||
llm,
|
llm,
|
||||||
retriever=vectorstore.as_retriever(),
|
retriever=vectorstore.as_retriever(),
|
||||||
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
|
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
|
||||||
)
|
)
|
||||||
|
|
||||||
result = qa_chain({"query": query})
|
result = qa_chain({"query": query})
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
from langchain.llms import Ollama
|
from langchain_community.llms import Ollama
|
||||||
from langchain.document_loaders import WebBaseLoader
|
from langchain_community.document_loaders import WebBaseLoader
|
||||||
from langchain.chains.summarize import load_summarize_chain
|
from langchain.chains.summarize import load_summarize_chain
|
||||||
|
|
||||||
loader = WebBaseLoader("https://ollama.com/blog/run-llama2-uncensored-locally")
|
loader = WebBaseLoader("https://ollama.com/blog/run-llama2-uncensored-locally")
|
||||||
docs = loader.load()
|
docs = loader.load()
|
||||||
|
|
||||||
llm = Ollama(model="llama2")
|
llm = Ollama(model="llama3")
|
||||||
chain = load_summarize_chain(llm, chain_type="stuff")
|
chain = load_summarize_chain(llm, chain_type="stuff")
|
||||||
|
|
||||||
result = chain.run(docs)
|
result = chain.invoke(docs)
|
||||||
print(result)
|
print(result)
|
||||||
|
|||||||
@@ -4,10 +4,10 @@ This example is a basic "hello world" of using LangChain with Ollama.
|
|||||||
|
|
||||||
## Running the Example
|
## Running the Example
|
||||||
|
|
||||||
1. Ensure you have the `llama2` model installed:
|
1. Ensure you have the `llama3` model installed:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama pull llama2
|
ollama pull llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install the Python Requirements.
|
2. Install the Python Requirements.
|
||||||
@@ -21,4 +21,3 @@ This example is a basic "hello world" of using LangChain with Ollama.
|
|||||||
```bash
|
```bash
|
||||||
python main.py
|
python main.py
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
from langchain.llms import Ollama
|
from langchain.llms import Ollama
|
||||||
|
|
||||||
input = input("What is your question?")
|
input = input("What is your question?")
|
||||||
llm = Ollama(model="llama2")
|
llm = Ollama(model="llama3")
|
||||||
res = llm.predict(input)
|
res = llm.predict(input)
|
||||||
print (res)
|
print (res)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM llama2
|
FROM llama3
|
||||||
PARAMETER temperature 1
|
PARAMETER temperature 1
|
||||||
SYSTEM """
|
SYSTEM """
|
||||||
You are Mario from super mario bros, acting as an assistant.
|
You are Mario from super mario bros, acting as an assistant.
|
||||||
|
|||||||
@@ -2,12 +2,12 @@
|
|||||||
|
|
||||||
# Example character: Mario
|
# Example character: Mario
|
||||||
|
|
||||||
This example shows how to create a basic character using Llama2 as the base model.
|
This example shows how to create a basic character using Llama3 as the base model.
|
||||||
|
|
||||||
To run this example:
|
To run this example:
|
||||||
|
|
||||||
1. Download the Modelfile
|
1. Download the Modelfile
|
||||||
2. `ollama pull llama2` to get the base model used in the model file.
|
2. `ollama pull llama3` to get the base model used in the model file.
|
||||||
3. `ollama create NAME -f ./Modelfile`
|
3. `ollama create NAME -f ./Modelfile`
|
||||||
4. `ollama run NAME`
|
4. `ollama run NAME`
|
||||||
|
|
||||||
@@ -18,7 +18,7 @@ Ask it some questions like "Who are you?" or "Is Peach in trouble again?"
|
|||||||
What the model file looks like:
|
What the model file looks like:
|
||||||
|
|
||||||
```
|
```
|
||||||
FROM llama2
|
FROM llama3
|
||||||
PARAMETER temperature 1
|
PARAMETER temperature 1
|
||||||
SYSTEM """
|
SYSTEM """
|
||||||
You are Mario from Super Mario Bros, acting as an assistant.
|
You are Mario from Super Mario Bros, acting as an assistant.
|
||||||
|
|||||||
@@ -2,16 +2,16 @@ import requests
|
|||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
|
|
||||||
model = "llama2"
|
model = "llama3"
|
||||||
template = {
|
template = {
|
||||||
"firstName": "",
|
"firstName": "",
|
||||||
"lastName": "",
|
"lastName": "",
|
||||||
"address": {
|
"address": {
|
||||||
"street": "",
|
"street": "",
|
||||||
"city": "",
|
"city": "",
|
||||||
"state": "",
|
"state": "",
|
||||||
"zipCode": ""
|
"zipCode": ""
|
||||||
},
|
},
|
||||||
"phoneNumber": ""
|
"phoneNumber": ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ countries = [
|
|||||||
"France",
|
"France",
|
||||||
]
|
]
|
||||||
country = random.choice(countries)
|
country = random.choice(countries)
|
||||||
model = "llama2"
|
model = "llama3"
|
||||||
|
|
||||||
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters."
|
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters."
|
||||||
|
|
||||||
|
|||||||
@@ -6,10 +6,10 @@ There are two python scripts in this example. `randomaddresses.py` generates ran
|
|||||||
|
|
||||||
## Running the Example
|
## Running the Example
|
||||||
|
|
||||||
1. Ensure you have the `llama2` model installed:
|
1. Ensure you have the `llama3` model installed:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama pull llama2
|
ollama pull llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install the Python Requirements.
|
2. Install the Python Requirements.
|
||||||
|
|||||||
@@ -2,13 +2,14 @@ import json
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
|
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
|
||||||
model = "llama2" # TODO: update this for whatever model you wish to use
|
model = "llama3" # TODO: update this for whatever model you wish to use
|
||||||
|
|
||||||
|
|
||||||
def chat(messages):
|
def chat(messages):
|
||||||
r = requests.post(
|
r = requests.post(
|
||||||
"http://0.0.0.0:11434/api/chat",
|
"http://0.0.0.0:11434/api/chat",
|
||||||
json={"model": model, "messages": messages, "stream": True},
|
json={"model": model, "messages": messages, "stream": True},
|
||||||
|
stream=True
|
||||||
)
|
)
|
||||||
r.raise_for_status()
|
r.raise_for_status()
|
||||||
output = ""
|
output = ""
|
||||||
|
|||||||
@@ -4,10 +4,10 @@ The **chat** endpoint is one of two ways to generate text from an LLM with Ollam
|
|||||||
|
|
||||||
## Running the Example
|
## Running the Example
|
||||||
|
|
||||||
1. Ensure you have the `llama2` model installed:
|
1. Ensure you have the `llama3` model installed:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama pull llama2
|
ollama pull llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install the Python Requirements.
|
2. Install the Python Requirements.
|
||||||
|
|||||||
@@ -4,10 +4,10 @@ This example demonstrates how one would create a set of 'mentors' you can have a
|
|||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
1. Add llama2 to have the mentors ask your questions:
|
1. Add llama3 to have the mentors ask your questions:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama pull llama2
|
ollama pull llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install prerequisites:
|
2. Install prerequisites:
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ async function characterGenerator() {
|
|||||||
ollama.setModel("stablebeluga2:70b-q4_K_M");
|
ollama.setModel("stablebeluga2:70b-q4_K_M");
|
||||||
const bio = await ollama.generate(`create a bio of ${character} in a single long paragraph. Instead of saying '${character} is...' or '${character} was...' use language like 'You are...' or 'You were...'. Then create a paragraph describing the speaking mannerisms and style of ${character}. Don't include anything about how ${character} looked or what they sounded like, just focus on the words they said. Instead of saying '${character} would say...' use language like 'You should say...'. If you use quotes, always use single quotes instead of double quotes. If there are any specific words or phrases you used a lot, show how you used them. `);
|
const bio = await ollama.generate(`create a bio of ${character} in a single long paragraph. Instead of saying '${character} is...' or '${character} was...' use language like 'You are...' or 'You were...'. Then create a paragraph describing the speaking mannerisms and style of ${character}. Don't include anything about how ${character} looked or what they sounded like, just focus on the words they said. Instead of saying '${character} would say...' use language like 'You should say...'. If you use quotes, always use single quotes instead of double quotes. If there are any specific words or phrases you used a lot, show how you used them. `);
|
||||||
|
|
||||||
const thecontents = `FROM llama2\nSYSTEM """\n${bio.response.replace(/(\r\n|\n|\r)/gm, " ").replace('would', 'should')} All answers to questions should be related back to what you are most known for.\n"""`;
|
const thecontents = `FROM llama3\nSYSTEM """\n${bio.response.replace(/(\r\n|\n|\r)/gm, " ").replace('would', 'should')} All answers to questions should be related back to what you are most known for.\n"""`;
|
||||||
|
|
||||||
fs.writeFile(path.join(directory, 'Modelfile'), thecontents, (err: any) => {
|
fs.writeFile(path.join(directory, 'Modelfile'), thecontents, (err: any) => {
|
||||||
if (err) throw err;
|
if (err) throw err;
|
||||||
@@ -23,4 +23,4 @@ async function characterGenerator() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
characterGenerator();
|
characterGenerator();
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import * as readline from "readline";
|
import * as readline from "readline";
|
||||||
|
|
||||||
const model = "llama2";
|
const model = "llama3";
|
||||||
type Message = {
|
type Message = {
|
||||||
role: "assistant" | "user" | "system";
|
role: "assistant" | "user" | "system";
|
||||||
content: string;
|
content: string;
|
||||||
@@ -74,4 +74,4 @@ async function main() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
main();
|
main();
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ const (
|
|||||||
|
|
||||||
KibiByte = Byte * 1024
|
KibiByte = Byte * 1024
|
||||||
MebiByte = KibiByte * 1024
|
MebiByte = KibiByte * 1024
|
||||||
|
GibiByte = MebiByte * 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
func HumanBytes(b int64) string {
|
func HumanBytes(b int64) string {
|
||||||
@@ -52,6 +53,8 @@ func HumanBytes(b int64) string {
|
|||||||
|
|
||||||
func HumanBytes2(b uint64) string {
|
func HumanBytes2(b uint64) string {
|
||||||
switch {
|
switch {
|
||||||
|
case b >= GibiByte:
|
||||||
|
return fmt.Sprintf("%.1f GiB", float64(b)/GibiByte)
|
||||||
case b >= MebiByte:
|
case b >= MebiByte:
|
||||||
return fmt.Sprintf("%.1f MiB", float64(b)/MebiByte)
|
return fmt.Sprintf("%.1f MiB", float64(b)/MebiByte)
|
||||||
case b >= KibiByte:
|
case b >= KibiByte:
|
||||||
|
|||||||
@@ -2,24 +2,41 @@ package format
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Thousand = 1000
|
Thousand = 1000
|
||||||
Million = Thousand * 1000
|
Million = Thousand * 1000
|
||||||
Billion = Million * 1000
|
Billion = Million * 1000
|
||||||
|
Trillion = Billion * 1000
|
||||||
)
|
)
|
||||||
|
|
||||||
func HumanNumber(b uint64) string {
|
func HumanNumber(b uint64) string {
|
||||||
switch {
|
switch {
|
||||||
case b > Billion:
|
case b >= Trillion:
|
||||||
return fmt.Sprintf("%.0fB", math.Round(float64(b)/Billion))
|
number := float64(b) / Trillion
|
||||||
case b > Million:
|
return fmt.Sprintf("%sT", DecimalPlace(number))
|
||||||
return fmt.Sprintf("%.0fM", math.Round(float64(b)/Million))
|
case b >= Billion:
|
||||||
case b > Thousand:
|
number := float64(b) / Billion
|
||||||
return fmt.Sprintf("%.0fK", math.Round(float64(b)/Thousand))
|
return fmt.Sprintf("%sB", DecimalPlace(number))
|
||||||
|
case b >= Million:
|
||||||
|
number := float64(b) / Million
|
||||||
|
return fmt.Sprintf("%sM", DecimalPlace(number))
|
||||||
|
case b >= Thousand:
|
||||||
|
number := float64(b) / Thousand
|
||||||
|
return fmt.Sprintf("%sK", DecimalPlace(number))
|
||||||
default:
|
default:
|
||||||
return fmt.Sprintf("%d", b)
|
return fmt.Sprintf("%d", b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DecimalPlace(number float64) string {
|
||||||
|
switch {
|
||||||
|
case number >= 100:
|
||||||
|
return fmt.Sprintf("%.0f", number)
|
||||||
|
case number >= 10:
|
||||||
|
return fmt.Sprintf("%.1f", number)
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%.2f", number)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
35
format/format_test.go
Normal file
35
format/format_test.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package format
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHumanNumber(t *testing.T) {
|
||||||
|
|
||||||
|
type testCase struct {
|
||||||
|
input uint64
|
||||||
|
expected string
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []testCase{
|
||||||
|
{0, "0"},
|
||||||
|
{1000000, "1.00M"},
|
||||||
|
{125000000, "125M"},
|
||||||
|
{500500000, "500M"},
|
||||||
|
{500550000, "501M"},
|
||||||
|
{1000000000, "1.00B"},
|
||||||
|
{2800000000, "2.80B"},
|
||||||
|
{2850000000, "2.85B"},
|
||||||
|
{28550000000, "28.6B"},
|
||||||
|
{1000000000000, "1.00T"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.expected, func(t *testing.T) {
|
||||||
|
result := HumanNumber(tc.input)
|
||||||
|
if result != tc.expected {
|
||||||
|
t.Errorf("Expected %s, got %s", tc.expected, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -60,7 +60,9 @@ func humanTime(t time.Time, zeroValue string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
delta := time.Since(t)
|
delta := time.Since(t)
|
||||||
if delta < 0 {
|
if int(delta.Hours())/24/365 < -20 {
|
||||||
|
return "Forever"
|
||||||
|
} else if delta < 0 {
|
||||||
return humanDuration(-delta) + " from now"
|
return humanDuration(-delta) + " from now"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -32,4 +32,14 @@ func TestHumanTime(t *testing.T) {
|
|||||||
v := now.Add(800 * time.Millisecond)
|
v := now.Add(800 * time.Millisecond)
|
||||||
assertEqual(t, HumanTime(v, ""), "Less than a second from now")
|
assertEqual(t, HumanTime(v, ""), "Less than a second from now")
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("time way in the future", func(t *testing.T) {
|
||||||
|
v := now.Add(24 * time.Hour * 365 * 200)
|
||||||
|
assertEqual(t, HumanTime(v, ""), "Forever")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("time way in the future lowercase", func(t *testing.T) {
|
||||||
|
v := now.Add(24 * time.Hour * 365 * 200)
|
||||||
|
assertEqual(t, HumanTimeLower(v, ""), "forever")
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
67
go.mod
67
go.mod
@@ -1,77 +1,76 @@
|
|||||||
module github.com/ollama/ollama
|
module github.com/ollama/ollama
|
||||||
|
|
||||||
go 1.22
|
go 1.22.0
|
||||||
|
|
||||||
toolchain go1.22.0
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/containerd/console v1.0.3
|
github.com/containerd/console v1.0.3
|
||||||
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
|
|
||||||
github.com/emirpasic/gods v1.18.1
|
github.com/emirpasic/gods v1.18.1
|
||||||
github.com/gin-gonic/gin v1.9.1
|
github.com/gin-gonic/gin v1.10.0
|
||||||
github.com/golang/protobuf v1.5.0 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/google/uuid v1.0.0
|
github.com/google/uuid v1.1.2
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
|
||||||
github.com/olekukonko/tablewriter v0.0.5
|
github.com/olekukonko/tablewriter v0.0.5
|
||||||
github.com/spf13/cobra v1.7.0
|
github.com/spf13/cobra v1.7.0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.9.0
|
||||||
github.com/x448/float16 v0.8.4
|
github.com/x448/float16 v0.8.4
|
||||||
golang.org/x/sync v0.3.0
|
golang.org/x/sync v0.3.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
|
||||||
|
github.com/mattn/go-runewidth v0.0.14
|
||||||
github.com/nlpodyssey/gopickle v0.3.0
|
github.com/nlpodyssey/gopickle v0.3.0
|
||||||
github.com/pdevine/tensor v0.0.0-20240228013915-64ccaa8d9ca9
|
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/apache/arrow/go/arrow v0.0.0-20201229220542-30ce2eb5d4dc // indirect
|
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 // indirect
|
||||||
|
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
||||||
github.com/chewxy/hm v1.0.0 // indirect
|
github.com/chewxy/hm v1.0.0 // indirect
|
||||||
github.com/chewxy/math32 v1.0.8 // indirect
|
github.com/chewxy/math32 v1.10.1 // indirect
|
||||||
|
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||||
|
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/google/flatbuffers v1.12.0 // indirect
|
github.com/google/flatbuffers v24.3.25+incompatible // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/rivo/uniseg v0.2.0 // indirect
|
github.com/rivo/uniseg v0.2.0 // indirect
|
||||||
github.com/xtgo/set v1.0.0 // indirect
|
github.com/xtgo/set v1.0.0 // indirect
|
||||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
|
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
gonum.org/v1/gonum v0.8.2 // indirect
|
gonum.org/v1/gonum v0.15.0 // indirect
|
||||||
gorgonia.org/vecf32 v0.9.0 // indirect
|
gorgonia.org/vecf32 v0.9.0 // indirect
|
||||||
gorgonia.org/vecf64 v0.9.0 // indirect
|
gorgonia.org/vecf64 v0.9.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/bytedance/sonic v1.9.1 // indirect
|
github.com/bytedance/sonic v1.11.6 // indirect
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
github.com/gin-contrib/cors v1.7.2
|
||||||
github.com/gin-contrib/cors v1.4.0
|
|
||||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
github.com/go-playground/validator/v10 v10.20.0 // indirect
|
||||||
github.com/goccy/go-json v0.10.2 // indirect
|
github.com/goccy/go-json v0.10.2 // indirect
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||||
github.com/leodido/go-urn v1.2.4 // indirect
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||||
golang.org/x/arch v0.3.0 // indirect
|
golang.org/x/arch v0.8.0 // indirect
|
||||||
golang.org/x/crypto v0.14.0
|
golang.org/x/crypto v0.23.0
|
||||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
|
||||||
golang.org/x/net v0.17.0 // indirect
|
golang.org/x/net v0.25.0 // indirect
|
||||||
golang.org/x/sys v0.13.0
|
golang.org/x/sys v0.20.0
|
||||||
golang.org/x/term v0.13.0
|
golang.org/x/term v0.20.0
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/text v0.15.0 // indirect
|
||||||
google.golang.org/protobuf v1.30.0
|
google.golang.org/protobuf v1.34.1
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
246
go.sum
246
go.sum
@@ -1,22 +1,32 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||||
github.com/apache/arrow/go/arrow v0.0.0-20201229220542-30ce2eb5d4dc h1:zvQ6w7KwtQWgMQiewOF9tFtundRMVZFSAksNV6ogzuY=
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
github.com/apache/arrow/go/arrow v0.0.0-20201229220542-30ce2eb5d4dc/go.mod h1:c9sxoIT3YgLxH4UhLOCKaBlEojuMhVYpk4Ntv3opUTQ=
|
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ=
|
||||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
|
||||||
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
|
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||||
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||||
|
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||||
|
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
|
||||||
|
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
|
||||||
github.com/chewxy/hm v1.0.0 h1:zy/TSv3LV2nD3dwUEQL2VhXeoXbb9QkpmdRAVUFiA6k=
|
github.com/chewxy/hm v1.0.0 h1:zy/TSv3LV2nD3dwUEQL2VhXeoXbb9QkpmdRAVUFiA6k=
|
||||||
github.com/chewxy/hm v1.0.0/go.mod h1:qg9YI4q6Fkj/whwHR1D+bOGeF7SniIP40VweVepLjg0=
|
github.com/chewxy/hm v1.0.0/go.mod h1:qg9YI4q6Fkj/whwHR1D+bOGeF7SniIP40VweVepLjg0=
|
||||||
github.com/chewxy/math32 v1.0.0/go.mod h1:Miac6hA1ohdDUTagnvJy/q+aNnEk16qWUdb8ZVhvCN0=
|
github.com/chewxy/math32 v1.0.0/go.mod h1:Miac6hA1ohdDUTagnvJy/q+aNnEk16qWUdb8ZVhvCN0=
|
||||||
github.com/chewxy/math32 v1.0.8 h1:fU5E4Ec4Z+5RtRAi3TovSxUjQPkgRh+HbP7tKB2OFbM=
|
github.com/chewxy/math32 v1.10.1 h1:LFpeY0SLJXeaiej/eIp2L40VYfscTvKh/FSEZ68uMkU=
|
||||||
github.com/chewxy/math32 v1.0.8/go.mod h1:dOB2rcuFrCn6UHrze36WSLVPKtzPMRAQvBvUwkSsLqs=
|
github.com/chewxy/math32 v1.10.1/go.mod h1:dOB2rcuFrCn6UHrze36WSLVPKtzPMRAQvBvUwkSsLqs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||||
|
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||||
|
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
||||||
|
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
||||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
@@ -31,30 +41,35 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM
|
|||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
||||||
github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=
|
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
|
||||||
github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/gin-contrib/cors v1.7.2 h1:oLDHxdg8W/XDoN/8zamqk/Drgt4oVZDvaV0YmvVICQw=
|
||||||
|
github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E=
|
||||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||||
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
|
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
|
||||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
|
||||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
|
||||||
|
github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
|
||||||
|
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
|
||||||
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
|
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
|
||||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
|
||||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
|
||||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||||
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
|
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
|
||||||
github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
|
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||||
github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
|
||||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
|
||||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
@@ -72,51 +87,54 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
|||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/flatbuffers v1.12.0 h1:/PtAHvnBY4Kqnx/xCQ3OIV9uYcSFGScBsWI3Oogeh6w=
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
|
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
||||||
|
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||||
|
github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI=
|
||||||
|
github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
|
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ=
|
||||||
|
github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
|
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
|
||||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
|
||||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
@@ -126,12 +144,15 @@ github.com/nlpodyssey/gopickle v0.3.0 h1:BLUE5gxFLyyNOPzlXxt6GoHEMMxD0qhsE4p0CIQ
|
|||||||
github.com/nlpodyssey/gopickle v0.3.0/go.mod h1:f070HJ/yR+eLi5WmM1OXJEGaTpuJEUiib19olXgYha0=
|
github.com/nlpodyssey/gopickle v0.3.0/go.mod h1:f070HJ/yR+eLi5WmM1OXJEGaTpuJEUiib19olXgYha0=
|
||||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||||
github.com/pdevine/tensor v0.0.0-20240228013915-64ccaa8d9ca9 h1:DV4iXjNn6fGeDl1AkZ1I0QB/0DBjrc7kPpxHrmuDzW4=
|
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c h1:GwiUUjKefgvSNmv3NCvI/BL0kDebW6Xa+kcdpdc1mTY=
|
||||||
github.com/pdevine/tensor v0.0.0-20240228013915-64ccaa8d9ca9/go.mod h1:nR7l3gM6ubiOm+mCkmmUyIBUcBAyiUmW6dQrDZhugFE=
|
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c/go.mod h1:PSojXDXF7TbgQiD6kkd98IHOS0QqTyUEaWRiS8+BLu8=
|
||||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
|
||||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4=
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
@@ -139,10 +160,11 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
|||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
|
||||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
@@ -150,96 +172,119 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
|||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
|
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||||
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
|
||||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
|
||||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
|
||||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/xtgo/set v1.0.0 h1:6BCNBRv3ORNDQ7fyoJXRv+tstJz3m1JVFQErfeZz2pY=
|
github.com/xtgo/set v1.0.0 h1:6BCNBRv3ORNDQ7fyoJXRv+tstJz3m1JVFQErfeZz2pY=
|
||||||
github.com/xtgo/set v1.0.0/go.mod h1:d3NHzGzSa0NmB2NhFyECA+QdRp29oEn2xbT+TpeFoM8=
|
github.com/xtgo/set v1.0.0/go.mod h1:d3NHzGzSa0NmB2NhFyECA+QdRp29oEn2xbT+TpeFoM8=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 h1:lGdhQUN/cnWdSH3291CUuxSEqc+AsGTiDxPP3r2J0l4=
|
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 h1:lGdhQUN/cnWdSH3291CUuxSEqc+AsGTiDxPP3r2J0l4=
|
||||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
|
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
|
||||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||||
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
|
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
|
||||||
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
|
||||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
|
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
|
||||||
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
|
||||||
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
|
||||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||||
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||||
|
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
|
||||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@@ -247,34 +292,40 @@ golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGm
|
|||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||||
gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM=
|
|
||||||
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
||||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
|
gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
|
||||||
|
gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ=
|
||||||
|
gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo=
|
||||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||||
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
|
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
|
||||||
|
gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
google.golang.org/genproto v0.0.0-20200911024640-645f7a48b24f h1:Yv4xsIx7HZOoyUGSJ2ksDyWE2qIBXROsZKt2ny3hCGM=
|
google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U=
|
||||||
google.golang.org/genproto v0.0.0-20200911024640-645f7a48b24f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0=
|
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||||
google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v0.0.0-20200910201057-6591123024b3/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||||
|
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
@@ -283,20 +334,18 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
|||||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||||
|
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gorgonia.org/vecf32 v0.9.0 h1:PClazic1r+JVJ1dEzRXgeiVl4g1/Hf/w+wUSqnco1Xg=
|
gorgonia.org/vecf32 v0.9.0 h1:PClazic1r+JVJ1dEzRXgeiVl4g1/Hf/w+wUSqnco1Xg=
|
||||||
@@ -305,4 +354,5 @@ gorgonia.org/vecf64 v0.9.0 h1:bgZDP5x0OzBF64PjMGC3EvTdOoMEcmfAh1VCUnZFm1A=
|
|||||||
gorgonia.org/vecf64 v0.9.0/go.mod h1:hp7IOWCnRiVQKON73kkC/AUMtEXyf9kGlVrtPQ9ccVA=
|
gorgonia.org/vecf64 v0.9.0/go.mod h1:hp7IOWCnRiVQKON73kkC/AUMtEXyf9kGlVrtPQ9ccVA=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,22 +35,66 @@ func GetSupportedGFX(libDir string) ([]string, error) {
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func amdSetVisibleDevices(ids []int, skip map[int]interface{}) {
|
func rocmGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
|
||||||
// Set the visible devices if not already set
|
ids := []string{}
|
||||||
// TODO - does sort order matter?
|
for _, info := range gpuInfo {
|
||||||
devices := []string{}
|
if info.Library != "rocm" {
|
||||||
for i := range ids {
|
// TODO shouldn't happen if things are wired correctly...
|
||||||
if _, skipped := skip[i]; skipped {
|
slog.Debug("rocmGetVisibleDevicesEnv skipping over non-rocm device", "library", info.Library)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
devices = append(devices, strconv.Itoa(i))
|
ids = append(ids, info.ID)
|
||||||
|
}
|
||||||
|
return "HIP_VISIBLE_DEVICES", strings.Join(ids, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
func commonAMDValidateLibDir() (string, error) {
|
||||||
|
// We try to favor system paths first, so that we can wire up the subprocess to use
|
||||||
|
// the system version. Only use our bundled version if the system version doesn't work
|
||||||
|
// This gives users a more recovery options if versions have subtle problems at runtime
|
||||||
|
|
||||||
|
// Prefer explicit HIP env var
|
||||||
|
hipPath := os.Getenv("HIP_PATH")
|
||||||
|
if hipPath != "" {
|
||||||
|
hipLibDir := filepath.Join(hipPath, "bin")
|
||||||
|
if rocmLibUsable(hipLibDir) {
|
||||||
|
slog.Debug("detected ROCM via HIP_PATH=" + hipPath)
|
||||||
|
return hipLibDir, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
val := strings.Join(devices, ",")
|
// Scan the LD_LIBRARY_PATH or PATH
|
||||||
err := os.Setenv("HIP_VISIBLE_DEVICES", val)
|
pathEnv := "LD_LIBRARY_PATH"
|
||||||
if err != nil {
|
if runtime.GOOS == "windows" {
|
||||||
slog.Warn(fmt.Sprintf("failed to set env: %s", err))
|
pathEnv = "PATH"
|
||||||
} else {
|
|
||||||
slog.Info("Setting HIP_VISIBLE_DEVICES=" + val)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
paths := os.Getenv(pathEnv)
|
||||||
|
for _, path := range filepath.SplitList(paths) {
|
||||||
|
d, err := filepath.Abs(path)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if rocmLibUsable(d) {
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Well known location(s)
|
||||||
|
for _, path := range RocmStandardLocations {
|
||||||
|
if rocmLibUsable(path) {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Installer payload location if we're running the installed binary
|
||||||
|
exe, err := os.Executable()
|
||||||
|
if err == nil {
|
||||||
|
rocmTargetDir := filepath.Join(filepath.Dir(exe), "rocm")
|
||||||
|
if rocmLibUsable(rocmTargetDir) {
|
||||||
|
slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir)
|
||||||
|
return rocmTargetDir, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("no suitable rocm found, falling back to CPU")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package gpu
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"strconv"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
@@ -69,21 +68,27 @@ func NewHipLib() (*HipLib, error) {
|
|||||||
func (hl *HipLib) Release() {
|
func (hl *HipLib) Release() {
|
||||||
err := windows.FreeLibrary(hl.dll)
|
err := windows.FreeLibrary(hl.dll)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("failed to unload amdhip64.dll: %s", err))
|
slog.Warn("failed to unload amdhip64.dll", "error", err)
|
||||||
}
|
}
|
||||||
hl.dll = 0
|
hl.dll = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hl *HipLib) AMDDriverVersion() (string, error) {
|
func (hl *HipLib) AMDDriverVersion() (driverMajor, driverMinor int, err error) {
|
||||||
if hl.dll == 0 {
|
if hl.dll == 0 {
|
||||||
return "", fmt.Errorf("dll has been unloaded")
|
return 0, 0, fmt.Errorf("dll has been unloaded")
|
||||||
}
|
}
|
||||||
var version int
|
var version int
|
||||||
status, _, err := syscall.SyscallN(hl.hipDriverGetVersion, uintptr(unsafe.Pointer(&version)))
|
status, _, err := syscall.SyscallN(hl.hipDriverGetVersion, uintptr(unsafe.Pointer(&version)))
|
||||||
if status != hipSuccess {
|
if status != hipSuccess {
|
||||||
return "", fmt.Errorf("failed call to hipDriverGetVersion: %d %s", status, err)
|
return 0, 0, fmt.Errorf("failed call to hipDriverGetVersion: %d %s", status, err)
|
||||||
}
|
}
|
||||||
return strconv.Itoa(version), nil
|
|
||||||
|
slog.Debug("hipDriverGetVersion", "version", version)
|
||||||
|
// TODO - this isn't actually right, but the docs claim hipDriverGetVersion isn't accurate anyway...
|
||||||
|
driverMajor = version / 1000
|
||||||
|
driverMinor = (version - (driverMajor * 1000)) / 10
|
||||||
|
|
||||||
|
return driverMajor, driverMinor, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hl *HipLib) HipGetDeviceCount() int {
|
func (hl *HipLib) HipGetDeviceCount() int {
|
||||||
@@ -98,7 +103,7 @@ func (hl *HipLib) HipGetDeviceCount() int {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
if status != hipSuccess {
|
if status != hipSuccess {
|
||||||
slog.Warn(fmt.Sprintf("failed call to hipGetDeviceCount: %d %s", status, err))
|
slog.Warn("failed call to hipGetDeviceCount", "status", status, "error", err)
|
||||||
}
|
}
|
||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|||||||
545
gpu/amd_linux.go
545
gpu/amd_linux.go
@@ -8,9 +8,12 @@ import (
|
|||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/format"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Discovery logic for AMD/ROCm GPUs
|
// Discovery logic for AMD/ROCm GPUs
|
||||||
@@ -23,187 +26,157 @@ const (
|
|||||||
// Prefix with the node dir
|
// Prefix with the node dir
|
||||||
GPUTotalMemoryFileGlob = "mem_banks/*/properties" // size_in_bytes line
|
GPUTotalMemoryFileGlob = "mem_banks/*/properties" // size_in_bytes line
|
||||||
GPUUsedMemoryFileGlob = "mem_banks/*/used_memory"
|
GPUUsedMemoryFileGlob = "mem_banks/*/used_memory"
|
||||||
RocmStandardLocation = "/opt/rocm/lib"
|
|
||||||
|
|
||||||
// TODO find a better way to detect iGPU instead of minimum memory
|
|
||||||
IGPUMemLimit = 1024 * 1024 * 1024 // 512G is what they typically report, so anything less than 1G must be iGPU
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Used to validate if the given ROCm lib is usable
|
// Used to validate if the given ROCm lib is usable
|
||||||
ROCmLibGlobs = []string{"libhipblas.so.2*", "rocblas"} // TODO - probably include more coverage of files here...
|
ROCmLibGlobs = []string{"libhipblas.so.2*", "rocblas"} // TODO - probably include more coverage of files here...
|
||||||
|
RocmStandardLocations = []string{"/opt/rocm/lib", "/usr/lib64"}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Gather GPU information from the amdgpu driver if any supported GPUs are detected
|
// Gather GPU information from the amdgpu driver if any supported GPUs are detected
|
||||||
// HIP_VISIBLE_DEVICES will be set if we detect a mix of unsupported and supported devices
|
func AMDGetGPUInfo() []GpuInfo {
|
||||||
// and the user hasn't already set this variable
|
resp := []GpuInfo{}
|
||||||
func AMDGetGPUInfo(resp *GpuInfo) {
|
|
||||||
// TODO - DRY this out with windows
|
|
||||||
if !AMDDetected() {
|
if !AMDDetected() {
|
||||||
return
|
return resp
|
||||||
}
|
}
|
||||||
skip := map[int]interface{}{}
|
|
||||||
|
|
||||||
// Opportunistic logging of driver version to aid in troubleshooting
|
// Opportunistic logging of driver version to aid in troubleshooting
|
||||||
ver, err := AMDDriverVersion()
|
driverMajor, driverMinor, err := AMDDriverVersion()
|
||||||
if err == nil {
|
|
||||||
slog.Info("AMD Driver: " + ver)
|
|
||||||
} else {
|
|
||||||
// TODO - if we see users crash and burn with the upstreamed kernel this can be adjusted to hard-fail rocm support and fallback to CPU
|
|
||||||
slog.Warn(fmt.Sprintf("ollama recommends running the https://www.amd.com/en/support/linux-drivers: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the user has specified exactly which GPUs to use, look up their memory
|
|
||||||
visibleDevices := os.Getenv("HIP_VISIBLE_DEVICES")
|
|
||||||
if visibleDevices != "" {
|
|
||||||
ids := []int{}
|
|
||||||
for _, idStr := range strings.Split(visibleDevices, ",") {
|
|
||||||
id, err := strconv.Atoi(idStr)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("malformed HIP_VISIBLE_DEVICES=%s %s", visibleDevices, err))
|
|
||||||
} else {
|
|
||||||
ids = append(ids, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
amdProcMemLookup(resp, nil, ids)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gather GFX version information from all detected cards
|
|
||||||
gfx := AMDGFXVersions()
|
|
||||||
verStrings := []string{}
|
|
||||||
for i, v := range gfx {
|
|
||||||
verStrings = append(verStrings, v.ToGFXString())
|
|
||||||
if v.Major == 0 {
|
|
||||||
// Silently skip CPUs
|
|
||||||
skip[i] = struct{}{}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if v.Major < 9 {
|
|
||||||
// TODO consider this a build-time setting if we can support 8xx family GPUs
|
|
||||||
slog.Warn(fmt.Sprintf("amdgpu [%d] too old %s", i, v.ToGFXString()))
|
|
||||||
skip[i] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
slog.Info(fmt.Sprintf("detected amdgpu versions %v", verStrings))
|
|
||||||
|
|
||||||
// Abort if all GPUs are skipped
|
|
||||||
if len(skip) >= len(gfx) {
|
|
||||||
slog.Info("all detected amdgpus are skipped, falling back to CPU")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we got this far, then we have at least 1 GPU that's a ROCm candidate, so make sure we have a lib
|
|
||||||
libDir, err := AMDValidateLibDir()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("unable to verify rocm library, will use cpu: %s", err))
|
// TODO - if we see users crash and burn with the upstreamed kernel this can be adjusted to hard-fail rocm support and fallback to CPU
|
||||||
return
|
slog.Warn("ollama recommends running the https://www.amd.com/en/support/linux-drivers", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
updateLibPath(libDir)
|
// Determine if the user has already pre-selected which GPUs to look at, then ignore the others
|
||||||
|
var visibleDevices []string
|
||||||
|
hipVD := os.Getenv("HIP_VISIBLE_DEVICES") // zero based index only
|
||||||
|
rocrVD := os.Getenv("ROCR_VISIBLE_DEVICES") // zero based index or UUID, but consumer cards seem to not support UUID
|
||||||
|
gpuDO := os.Getenv("GPU_DEVICE_ORDINAL") // zero based index
|
||||||
|
switch {
|
||||||
|
// TODO is this priorty order right?
|
||||||
|
case hipVD != "":
|
||||||
|
visibleDevices = strings.Split(hipVD, ",")
|
||||||
|
case rocrVD != "":
|
||||||
|
visibleDevices = strings.Split(rocrVD, ",")
|
||||||
|
// TODO - since we don't yet support UUIDs, consider detecting and reporting here
|
||||||
|
// all our test systems show GPU-XX indicating UUID is not supported
|
||||||
|
case gpuDO != "":
|
||||||
|
visibleDevices = strings.Split(gpuDO, ",")
|
||||||
|
}
|
||||||
|
|
||||||
gfxOverride := os.Getenv("HSA_OVERRIDE_GFX_VERSION")
|
gfxOverride := os.Getenv("HSA_OVERRIDE_GFX_VERSION")
|
||||||
if gfxOverride == "" {
|
var supported []string
|
||||||
supported, err := GetSupportedGFX(libDir)
|
libDir := ""
|
||||||
|
|
||||||
|
// The amdgpu driver always exposes the host CPU(s) first, but we have to skip them and subtract
|
||||||
|
// from the other IDs to get alignment with the HIP libraries expectations (zero is the first GPU, not the CPU)
|
||||||
|
matches, _ := filepath.Glob(GPUPropertiesFileGlob)
|
||||||
|
cpuCount := 0
|
||||||
|
for _, match := range matches {
|
||||||
|
slog.Debug("evaluating amdgpu node " + match)
|
||||||
|
fp, err := os.Open(match)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("failed to lookup supported GFX types, falling back to CPU mode: %s", err))
|
slog.Debug("failed to open sysfs node", "file", match, "error", err)
|
||||||
return
|
|
||||||
}
|
|
||||||
slog.Debug(fmt.Sprintf("rocm supported GPU types %v", supported))
|
|
||||||
|
|
||||||
for i, v := range gfx {
|
|
||||||
if !slices.Contains[[]string, string](supported, v.ToGFXString()) {
|
|
||||||
slog.Warn(fmt.Sprintf("amdgpu [%d] %s is not supported by %s %v", i, v.ToGFXString(), libDir, supported))
|
|
||||||
// TODO - consider discrete markdown just for ROCM troubleshooting?
|
|
||||||
slog.Warn("See https://github.com/ollama/ollama/blob/main/docs/gpu.md#overrides for HSA_OVERRIDE_GFX_VERSION usage")
|
|
||||||
skip[i] = struct{}{}
|
|
||||||
} else {
|
|
||||||
slog.Info(fmt.Sprintf("amdgpu [%d] %s is supported", i, v.ToGFXString()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
slog.Debug("skipping rocm gfx compatibility check with HSA_OVERRIDE_GFX_VERSION=" + gfxOverride)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(skip) >= len(gfx) {
|
|
||||||
slog.Info("all detected amdgpus are skipped, falling back to CPU")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ids := make([]int, len(gfx))
|
|
||||||
i := 0
|
|
||||||
for k := range gfx {
|
|
||||||
ids[i] = k
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
amdProcMemLookup(resp, skip, ids)
|
|
||||||
if resp.memInfo.DeviceCount == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(skip) > 0 {
|
|
||||||
amdSetVisibleDevices(ids, skip)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateLibPath(libDir string) {
|
|
||||||
ldPaths := []string{}
|
|
||||||
if val, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok {
|
|
||||||
ldPaths = strings.Split(val, ":")
|
|
||||||
}
|
|
||||||
for _, d := range ldPaths {
|
|
||||||
if d == libDir {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
val := strings.Join(append(ldPaths, libDir), ":")
|
|
||||||
slog.Debug("updated lib path", "LD_LIBRARY_PATH", val)
|
|
||||||
os.Setenv("LD_LIBRARY_PATH", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk the sysfs nodes for the available GPUs and gather information from them
|
|
||||||
// skipping over any devices in the skip map
|
|
||||||
func amdProcMemLookup(resp *GpuInfo, skip map[int]interface{}, ids []int) {
|
|
||||||
resp.memInfo.DeviceCount = 0
|
|
||||||
resp.memInfo.TotalMemory = 0
|
|
||||||
resp.memInfo.FreeMemory = 0
|
|
||||||
slog.Debug("discovering VRAM for amdgpu devices")
|
|
||||||
if len(ids) == 0 {
|
|
||||||
entries, err := os.ReadDir(AMDNodesSysfsDir)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("failed to read amdgpu sysfs %s - %s", AMDNodesSysfsDir, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, node := range entries {
|
|
||||||
if !node.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
id, err := strconv.Atoi(node.Name())
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn("malformed amdgpu sysfs node id " + node.Name())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ids = append(ids, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
slog.Debug(fmt.Sprintf("amdgpu devices %v", ids))
|
|
||||||
|
|
||||||
for _, id := range ids {
|
|
||||||
if _, skipped := skip[id]; skipped {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
defer fp.Close()
|
||||||
|
nodeID, err := strconv.Atoi(filepath.Base(filepath.Dir(match)))
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("failed to parse node ID", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(fp)
|
||||||
|
isCPU := false
|
||||||
|
var major, minor, patch uint64
|
||||||
|
var vendor, device uint64
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
// Note: we could also use "cpu_cores_count X" where X is greater than zero to detect CPUs
|
||||||
|
if strings.HasPrefix(line, "gfx_target_version") {
|
||||||
|
ver := strings.Fields(line)
|
||||||
|
|
||||||
|
// Detect CPUs
|
||||||
|
if len(ver) == 2 && ver[1] == "0" {
|
||||||
|
slog.Debug("detected CPU " + match)
|
||||||
|
isCPU = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ver) != 2 || len(ver[1]) < 5 {
|
||||||
|
slog.Warn("malformed "+match, "gfx_target_version", line)
|
||||||
|
// If this winds up being a CPU, our offsets may be wrong
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
l := len(ver[1])
|
||||||
|
var err1, err2, err3 error
|
||||||
|
patch, err1 = strconv.ParseUint(ver[1][l-2:l], 10, 32)
|
||||||
|
minor, err2 = strconv.ParseUint(ver[1][l-4:l-2], 10, 32)
|
||||||
|
major, err3 = strconv.ParseUint(ver[1][:l-4], 10, 32)
|
||||||
|
if err1 != nil || err2 != nil || err3 != nil {
|
||||||
|
slog.Debug("malformed int " + line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(line, "vendor_id") {
|
||||||
|
ver := strings.Fields(line)
|
||||||
|
if len(ver) != 2 {
|
||||||
|
slog.Debug("malformed vendor_id", "vendor_id", line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
vendor, err = strconv.ParseUint(ver[1], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("malformed vendor_id" + line)
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(line, "device_id") {
|
||||||
|
ver := strings.Fields(line)
|
||||||
|
if len(ver) != 2 {
|
||||||
|
slog.Debug("malformed device_id", "device_id", line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
device, err = strconv.ParseUint(ver[1], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("malformed device_id" + line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO - any other properties we want to extract and record?
|
||||||
|
// vendor_id + device_id -> pci lookup for "Name"
|
||||||
|
// Other metrics that may help us understand relative performance between multiple GPUs
|
||||||
|
}
|
||||||
|
|
||||||
|
if isCPU {
|
||||||
|
cpuCount++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPUs are always first in the list
|
||||||
|
gpuID := nodeID - cpuCount
|
||||||
|
|
||||||
|
// Shouldn't happen, but just in case...
|
||||||
|
if gpuID < 0 {
|
||||||
|
slog.Error("unexpected amdgpu sysfs data resulted in negative GPU ID, please set OLLAMA_DEBUG=1 and report an issue")
|
||||||
|
return []GpuInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if int(major) < RocmComputeMin {
|
||||||
|
slog.Warn(fmt.Sprintf("amdgpu too old gfx%d%x%x", major, minor, patch), "gpu", gpuID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look up the memory for the current node
|
||||||
totalMemory := uint64(0)
|
totalMemory := uint64(0)
|
||||||
usedMemory := uint64(0)
|
usedMemory := uint64(0)
|
||||||
// Adjust for sysfs vs HIP ids
|
propGlob := filepath.Join(AMDNodesSysfsDir, strconv.Itoa(nodeID), GPUTotalMemoryFileGlob)
|
||||||
propGlob := filepath.Join(AMDNodesSysfsDir, strconv.Itoa(id+1), GPUTotalMemoryFileGlob)
|
|
||||||
propFiles, err := filepath.Glob(propGlob)
|
propFiles, err := filepath.Glob(propGlob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("error looking up total GPU memory: %s %s", propGlob, err))
|
slog.Warn("error looking up total GPU memory", "glob", propGlob, "error", err)
|
||||||
}
|
}
|
||||||
// 1 or more memory banks - sum the values of all of them
|
// 1 or more memory banks - sum the values of all of them
|
||||||
for _, propFile := range propFiles {
|
for _, propFile := range propFiles {
|
||||||
fp, err := os.Open(propFile)
|
fp, err := os.Open(propFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("failed to open sysfs node file %s: %s", propFile, err))
|
slog.Warn("failed to open sysfs node", "file", propFile, "erroir", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
defer fp.Close()
|
defer fp.Close()
|
||||||
@@ -226,49 +199,118 @@ func amdProcMemLookup(resp *GpuInfo, skip map[int]interface{}, ids []int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if totalMemory == 0 {
|
if totalMemory == 0 {
|
||||||
slog.Warn(fmt.Sprintf("amdgpu [%d] reports zero total memory, skipping", id))
|
slog.Warn("amdgpu reports zero total memory", "gpu", gpuID)
|
||||||
skip[id] = struct{}{}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if totalMemory < IGPUMemLimit {
|
usedGlob := filepath.Join(AMDNodesSysfsDir, strconv.Itoa(nodeID), GPUUsedMemoryFileGlob)
|
||||||
slog.Info(fmt.Sprintf("amdgpu [%d] appears to be an iGPU with %dM reported total memory, skipping", id, totalMemory/1024/1024))
|
|
||||||
skip[id] = struct{}{}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
usedGlob := filepath.Join(AMDNodesSysfsDir, strconv.Itoa(id), GPUUsedMemoryFileGlob)
|
|
||||||
usedFiles, err := filepath.Glob(usedGlob)
|
usedFiles, err := filepath.Glob(usedGlob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("error looking up used GPU memory: %s %s", usedGlob, err))
|
slog.Warn("error looking up used GPU memory", "glob", usedGlob, "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, usedFile := range usedFiles {
|
for _, usedFile := range usedFiles {
|
||||||
fp, err := os.Open(usedFile)
|
fp, err := os.Open(usedFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("failed to open sysfs node file %s: %s", usedFile, err))
|
slog.Warn("failed to open sysfs node", "file", usedFile, "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
defer fp.Close()
|
defer fp.Close()
|
||||||
data, err := io.ReadAll(fp)
|
data, err := io.ReadAll(fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("failed to read sysfs node file %s: %s", usedFile, err))
|
slog.Warn("failed to read sysfs node", "file", usedFile, "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
used, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
used, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("malformed used memory %s: %s", string(data), err))
|
slog.Warn("malformed used memory", "data", string(data), "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
usedMemory += used
|
usedMemory += used
|
||||||
}
|
}
|
||||||
slog.Info(fmt.Sprintf("[%d] amdgpu totalMemory %dM", id, totalMemory/1024/1024))
|
|
||||||
slog.Info(fmt.Sprintf("[%d] amdgpu freeMemory %dM", id, (totalMemory-usedMemory)/1024/1024))
|
// iGPU detection, remove this check once we can support an iGPU variant of the rocm library
|
||||||
resp.memInfo.DeviceCount++
|
if totalMemory < IGPUMemLimit {
|
||||||
resp.memInfo.TotalMemory += totalMemory
|
slog.Info("unsupported Radeon iGPU detected skipping", "id", gpuID, "total", format.HumanBytes2(totalMemory))
|
||||||
resp.memInfo.FreeMemory += (totalMemory - usedMemory)
|
continue
|
||||||
|
}
|
||||||
|
var name string
|
||||||
|
// TODO - PCI ID lookup
|
||||||
|
if vendor > 0 && device > 0 {
|
||||||
|
name = fmt.Sprintf("%04x:%04x", vendor, device)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Debug("amdgpu memory", "gpu", gpuID, "total", format.HumanBytes2(totalMemory))
|
||||||
|
slog.Debug("amdgpu memory", "gpu", gpuID, "available", format.HumanBytes2(totalMemory-usedMemory))
|
||||||
|
gpuInfo := GpuInfo{
|
||||||
|
Library: "rocm",
|
||||||
|
memInfo: memInfo{
|
||||||
|
TotalMemory: totalMemory,
|
||||||
|
FreeMemory: (totalMemory - usedMemory),
|
||||||
|
},
|
||||||
|
ID: fmt.Sprintf("%d", gpuID),
|
||||||
|
Name: name,
|
||||||
|
Compute: fmt.Sprintf("gfx%d%x%x", major, minor, patch),
|
||||||
|
MinimumMemory: rocmMinimumMemory,
|
||||||
|
DriverMajor: driverMajor,
|
||||||
|
DriverMinor: driverMinor,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the user wants to filter to a subset of devices, filter out if we aren't a match
|
||||||
|
if len(visibleDevices) > 0 {
|
||||||
|
include := false
|
||||||
|
for _, visible := range visibleDevices {
|
||||||
|
if visible == gpuInfo.ID {
|
||||||
|
include = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !include {
|
||||||
|
slog.Info("filtering out device per user request", "id", gpuInfo.ID, "visible_devices", visibleDevices)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final validation is gfx compatibility - load the library if we haven't already loaded it
|
||||||
|
// even if the user overrides, we still need to validate the library
|
||||||
|
if libDir == "" {
|
||||||
|
libDir, err = AMDValidateLibDir()
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("unable to verify rocm library, will use cpu", "error", err)
|
||||||
|
return []GpuInfo{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gpuInfo.DependencyPath = libDir
|
||||||
|
|
||||||
|
if gfxOverride == "" {
|
||||||
|
// Only load supported list once
|
||||||
|
if len(supported) == 0 {
|
||||||
|
supported, err = GetSupportedGFX(libDir)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("failed to lookup supported GFX types, falling back to CPU mode", "error", err)
|
||||||
|
return []GpuInfo{}
|
||||||
|
}
|
||||||
|
slog.Debug("rocm supported GPUs", "types", supported)
|
||||||
|
}
|
||||||
|
gfx := gpuInfo.Compute
|
||||||
|
if !slices.Contains[[]string, string](supported, gfx) {
|
||||||
|
slog.Warn("amdgpu is not supported", "gpu", gpuInfo.ID, "gpu_type", gfx, "library", libDir, "supported_types", supported)
|
||||||
|
// TODO - consider discrete markdown just for ROCM troubleshooting?
|
||||||
|
slog.Warn("See https://github.com/ollama/ollama/blob/main/docs/gpu.md#overrides for HSA_OVERRIDE_GFX_VERSION usage")
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
slog.Info("amdgpu is supported", "gpu", gpuInfo.ID, "gpu_type", gfx)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
slog.Info("skipping rocm gfx compatibility check", "HSA_OVERRIDE_GFX_VERSION", gfxOverride)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The GPU has passed all the verification steps and is supported
|
||||||
|
resp = append(resp, gpuInfo)
|
||||||
}
|
}
|
||||||
if resp.memInfo.DeviceCount > 0 {
|
if len(resp) == 0 {
|
||||||
resp.Library = "rocm"
|
slog.Info("no compatible amdgpu devices detected")
|
||||||
}
|
}
|
||||||
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
// Quick check for AMD driver so we can skip amdgpu discovery if not present
|
// Quick check for AMD driver so we can skip amdgpu discovery if not present
|
||||||
@@ -280,87 +322,24 @@ func AMDDetected() bool {
|
|||||||
slog.Debug("amdgpu driver not detected " + sysfsDir)
|
slog.Debug("amdgpu driver not detected " + sysfsDir)
|
||||||
return false
|
return false
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
slog.Debug(fmt.Sprintf("error looking up amd driver %s %s", sysfsDir, err))
|
slog.Debug("error looking up amd driver", "path", sysfsDir, "error", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupLink(source, target string) error {
|
|
||||||
if err := os.RemoveAll(target); err != nil {
|
|
||||||
return fmt.Errorf("failed to remove old rocm directory %s %w", target, err)
|
|
||||||
}
|
|
||||||
if err := os.Symlink(source, target); err != nil {
|
|
||||||
return fmt.Errorf("failed to create link %s => %s %w", source, target, err)
|
|
||||||
}
|
|
||||||
slog.Debug(fmt.Sprintf("host rocm linked %s => %s", source, target))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the AMD rocm lib dir is wired up
|
|
||||||
// Prefer to use host installed ROCm, as long as it meets our minimum requirements
|
// Prefer to use host installed ROCm, as long as it meets our minimum requirements
|
||||||
// failing that, tell the user how to download it on their own
|
// failing that, tell the user how to download it on their own
|
||||||
func AMDValidateLibDir() (string, error) {
|
func AMDValidateLibDir() (string, error) {
|
||||||
// We rely on the rpath compiled into our library to find rocm
|
libDir, err := commonAMDValidateLibDir()
|
||||||
// so we establish a symlink to wherever we find it on the system
|
|
||||||
// to <payloads>/rocm
|
|
||||||
payloadsDir, err := PayloadsDir()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we already have a rocm dependency wired, nothing more to do
|
|
||||||
rocmTargetDir := filepath.Clean(filepath.Join(payloadsDir, "..", "rocm"))
|
|
||||||
if rocmLibUsable(rocmTargetDir) {
|
|
||||||
return rocmTargetDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// next to the running binary
|
|
||||||
exe, err := os.Executable()
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
peerDir := filepath.Dir(exe)
|
return libDir, nil
|
||||||
if rocmLibUsable(peerDir) {
|
|
||||||
slog.Debug("detected ROCM next to ollama executable " + peerDir)
|
|
||||||
return rocmTargetDir, setupLink(peerDir, rocmTargetDir)
|
|
||||||
}
|
|
||||||
peerDir = filepath.Join(filepath.Dir(exe), "rocm")
|
|
||||||
if rocmLibUsable(peerDir) {
|
|
||||||
slog.Debug("detected ROCM next to ollama executable " + peerDir)
|
|
||||||
return rocmTargetDir, setupLink(peerDir, rocmTargetDir)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Well known ollama installer path
|
// Well known ollama installer path
|
||||||
installedRocmDir := "/usr/share/ollama/lib/rocm"
|
installedRocmDir := "/usr/share/ollama/lib/rocm"
|
||||||
if rocmLibUsable(installedRocmDir) {
|
if rocmLibUsable(installedRocmDir) {
|
||||||
return rocmTargetDir, setupLink(installedRocmDir, rocmTargetDir)
|
return installedRocmDir, nil
|
||||||
}
|
|
||||||
|
|
||||||
// Prefer explicit HIP env var
|
|
||||||
hipPath := os.Getenv("HIP_PATH")
|
|
||||||
if hipPath != "" {
|
|
||||||
hipLibDir := filepath.Join(hipPath, "lib")
|
|
||||||
if rocmLibUsable(hipLibDir) {
|
|
||||||
slog.Debug("detected ROCM via HIP_PATH=" + hipPath)
|
|
||||||
return rocmTargetDir, setupLink(hipLibDir, rocmTargetDir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan the library path for potential matches
|
|
||||||
ldPaths := strings.Split(os.Getenv("LD_LIBRARY_PATH"), ":")
|
|
||||||
for _, ldPath := range ldPaths {
|
|
||||||
d, err := filepath.Abs(ldPath)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if rocmLibUsable(d) {
|
|
||||||
return rocmTargetDir, setupLink(d, rocmTargetDir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Well known location(s)
|
|
||||||
if rocmLibUsable("/opt/rocm/lib") {
|
|
||||||
return rocmTargetDir, setupLink("/opt/rocm/lib", rocmTargetDir)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we still haven't found a usable rocm, the user will have to install it on their own
|
// If we still haven't found a usable rocm, the user will have to install it on their own
|
||||||
@@ -368,84 +347,34 @@ func AMDValidateLibDir() (string, error) {
|
|||||||
return "", fmt.Errorf("no suitable rocm found, falling back to CPU")
|
return "", fmt.Errorf("no suitable rocm found, falling back to CPU")
|
||||||
}
|
}
|
||||||
|
|
||||||
func AMDDriverVersion() (string, error) {
|
func AMDDriverVersion() (driverMajor, driverMinor int, err error) {
|
||||||
_, err := os.Stat(DriverVersionFile)
|
_, err = os.Stat(DriverVersionFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("amdgpu version file missing: %s %w", DriverVersionFile, err)
|
return 0, 0, fmt.Errorf("amdgpu version file missing: %s %w", DriverVersionFile, err)
|
||||||
}
|
}
|
||||||
fp, err := os.Open(DriverVersionFile)
|
fp, err := os.Open(DriverVersionFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return 0, 0, err
|
||||||
}
|
}
|
||||||
defer fp.Close()
|
defer fp.Close()
|
||||||
verString, err := io.ReadAll(fp)
|
verString, err := io.ReadAll(fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return 0, 0, err
|
||||||
}
|
}
|
||||||
return strings.TrimSpace(string(verString)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func AMDGFXVersions() map[int]Version {
|
pattern := `\A(\d+)\.(\d+).*`
|
||||||
// The amdgpu driver always exposes the host CPU as node 0, but we have to skip that and subtract one
|
regex := regexp.MustCompile(pattern)
|
||||||
// from the other IDs to get alignment with the HIP libraries expectations (zero is the first GPU, not the CPU)
|
match := regex.FindStringSubmatch(string(verString))
|
||||||
res := map[int]Version{}
|
if len(match) < 2 {
|
||||||
matches, _ := filepath.Glob(GPUPropertiesFileGlob)
|
return 0, 0, fmt.Errorf("malformed version string %s", string(verString))
|
||||||
for _, match := range matches {
|
|
||||||
fp, err := os.Open(match)
|
|
||||||
if err != nil {
|
|
||||||
slog.Debug(fmt.Sprintf("failed to open sysfs node file %s: %s", match, err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
defer fp.Close()
|
|
||||||
i, err := strconv.Atoi(filepath.Base(filepath.Dir(match)))
|
|
||||||
if err != nil {
|
|
||||||
slog.Debug(fmt.Sprintf("failed to parse node ID %s", err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if i == 0 {
|
|
||||||
// Skipping the CPU
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Align with HIP IDs (zero is first GPU, not CPU)
|
|
||||||
i -= 1
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(fp)
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := strings.TrimSpace(scanner.Text())
|
|
||||||
if strings.HasPrefix(line, "gfx_target_version") {
|
|
||||||
ver := strings.Fields(line)
|
|
||||||
if len(ver) != 2 || len(ver[1]) < 5 {
|
|
||||||
if ver[1] != "0" {
|
|
||||||
slog.Debug("malformed " + line)
|
|
||||||
}
|
|
||||||
res[i] = Version{
|
|
||||||
Major: 0,
|
|
||||||
Minor: 0,
|
|
||||||
Patch: 0,
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
l := len(ver[1])
|
|
||||||
patch, err1 := strconv.ParseUint(ver[1][l-2:l], 10, 32)
|
|
||||||
minor, err2 := strconv.ParseUint(ver[1][l-4:l-2], 10, 32)
|
|
||||||
major, err3 := strconv.ParseUint(ver[1][:l-4], 10, 32)
|
|
||||||
if err1 != nil || err2 != nil || err3 != nil {
|
|
||||||
slog.Debug("malformed int " + line)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
res[i] = Version{
|
|
||||||
Major: uint(major),
|
|
||||||
Minor: uint(minor),
|
|
||||||
Patch: uint(patch),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return res
|
driverMajor, err = strconv.Atoi(match[1])
|
||||||
}
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
func (v Version) ToGFXString() string {
|
}
|
||||||
return fmt.Sprintf("gfx%d%d%d", v.Major, v.Minor, v.Patch)
|
driverMinor, err = strconv.Atoi(match[2])
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
return driverMajor, driverMinor, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,10 +8,11 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/format"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
RocmStandardLocation = "C:\\Program Files\\AMD\\ROCm\\5.7\\bin" // TODO glob?
|
|
||||||
|
|
||||||
// TODO We're lookinng for this exact name to detect iGPUs since hipGetDeviceProperties never reports integrated==true
|
// TODO We're lookinng for this exact name to detect iGPUs since hipGetDeviceProperties never reports integrated==true
|
||||||
iGPUName = "AMD Radeon(TM) Graphics"
|
iGPUName = "AMD Radeon(TM) Graphics"
|
||||||
@@ -19,39 +20,35 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// Used to validate if the given ROCm lib is usable
|
// Used to validate if the given ROCm lib is usable
|
||||||
ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // TODO - probably include more coverage of files here...
|
ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // TODO - probably include more coverage of files here...
|
||||||
|
RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\5.7\\bin"} // TODO glob?
|
||||||
)
|
)
|
||||||
|
|
||||||
func AMDGetGPUInfo(resp *GpuInfo) {
|
func AMDGetGPUInfo() []GpuInfo {
|
||||||
|
resp := []GpuInfo{}
|
||||||
hl, err := NewHipLib()
|
hl, err := NewHipLib()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Debug(err.Error())
|
slog.Debug(err.Error())
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
defer hl.Release()
|
defer hl.Release()
|
||||||
skip := map[int]interface{}{}
|
|
||||||
ids := []int{}
|
|
||||||
resp.memInfo.DeviceCount = 0
|
|
||||||
resp.memInfo.TotalMemory = 0
|
|
||||||
resp.memInfo.FreeMemory = 0
|
|
||||||
|
|
||||||
ver, err := hl.AMDDriverVersion()
|
// TODO - this reports incorrect version information, so omitting for now
|
||||||
if err == nil {
|
// driverMajor, driverMinor, err := hl.AMDDriverVersion()
|
||||||
slog.Info("AMD Driver: " + ver)
|
// if err != nil {
|
||||||
} else {
|
// // For now this is benign, but we may eventually need to fail compatibility checks
|
||||||
// For now this is benign, but we may eventually need to fail compatibility checks
|
// slog.Debug("error looking up amd driver version", "error", err)
|
||||||
slog.Debug(fmt.Sprintf("error looking up amd driver version: %s", err))
|
// }
|
||||||
}
|
|
||||||
|
|
||||||
// Note: the HIP library automatically handles HIP_VISIBLE_DEVICES
|
// Note: the HIP library automatically handles subsetting to any HIP_VISIBLE_DEVICES the user specified
|
||||||
count := hl.HipGetDeviceCount()
|
count := hl.HipGetDeviceCount()
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
libDir, err := AMDValidateLibDir()
|
libDir, err := AMDValidateLibDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("unable to verify rocm library, will use cpu: %s", err))
|
slog.Warn("unable to verify rocm library, will use cpu", "error", err)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var supported []string
|
var supported []string
|
||||||
@@ -59,95 +56,94 @@ func AMDGetGPUInfo(resp *GpuInfo) {
|
|||||||
if gfxOverride == "" {
|
if gfxOverride == "" {
|
||||||
supported, err = GetSupportedGFX(libDir)
|
supported, err = GetSupportedGFX(libDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("failed to lookup supported GFX types, falling back to CPU mode: %s", err))
|
slog.Warn("failed to lookup supported GFX types, falling back to CPU mode", "error", err)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
slog.Debug("skipping rocm gfx compatibility check with HSA_OVERRIDE_GFX_VERSION=" + gfxOverride)
|
slog.Info("skipping rocm gfx compatibility check", "HSA_OVERRIDE_GFX_VERSION", gfxOverride)
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Info(fmt.Sprintf("detected %d hip devices", count))
|
slog.Debug("detected hip devices", "count", count)
|
||||||
|
// TODO how to determine the underlying device ID when visible devices is causing this to subset?
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
ids = append(ids, i)
|
|
||||||
err = hl.HipSetDevice(i)
|
err = hl.HipSetDevice(i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("[%d] %s", i, err))
|
slog.Warn("set device", "id", i, "error", err)
|
||||||
skip[i] = struct{}{}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
props, err := hl.HipGetDeviceProperties(i)
|
props, err := hl.HipGetDeviceProperties(i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("[%d] %s", i, err))
|
slog.Warn("get properties", "id", i, "error", err)
|
||||||
skip[i] = struct{}{}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
n := bytes.IndexByte(props.Name[:], 0)
|
n := bytes.IndexByte(props.Name[:], 0)
|
||||||
name := string(props.Name[:n])
|
name := string(props.Name[:n])
|
||||||
slog.Info(fmt.Sprintf("[%d] Name: %s", i, name))
|
// TODO is UUID actually populated on windows?
|
||||||
|
// Can luid be used on windows for setting visible devices (and is it actually set?)
|
||||||
n = bytes.IndexByte(props.GcnArchName[:], 0)
|
n = bytes.IndexByte(props.GcnArchName[:], 0)
|
||||||
gfx := string(props.GcnArchName[:n])
|
gfx := string(props.GcnArchName[:n])
|
||||||
slog.Info(fmt.Sprintf("[%d] GcnArchName: %s", i, gfx))
|
slog.Debug("hip device", "id", i, "name", name, "gfx", gfx)
|
||||||
//slog.Info(fmt.Sprintf("[%d] Integrated: %d", i, props.iGPU)) // DOESN'T REPORT CORRECTLY! Always 0
|
//slog.Info(fmt.Sprintf("[%d] Integrated: %d", i, props.iGPU)) // DOESN'T REPORT CORRECTLY! Always 0
|
||||||
// TODO Why isn't props.iGPU accurate!?
|
// TODO Why isn't props.iGPU accurate!?
|
||||||
if strings.EqualFold(name, iGPUName) {
|
if strings.EqualFold(name, iGPUName) {
|
||||||
slog.Info(fmt.Sprintf("iGPU detected [%d] skipping", i))
|
slog.Info("unsupported Radeon iGPU detected skipping", "id", i, "name", name, "gfx", gfx)
|
||||||
skip[i] = struct{}{}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if gfxOverride == "" {
|
if gfxOverride == "" {
|
||||||
if !slices.Contains[[]string, string](supported, gfx) {
|
if !slices.Contains[[]string, string](supported, gfx) {
|
||||||
slog.Warn(fmt.Sprintf("amdgpu [%d] %s is not supported by %s %v", i, gfx, libDir, supported))
|
slog.Warn("amdgpu is not supported", "gpu", i, "gpu_type", gfx, "library", libDir, "supported_types", supported)
|
||||||
// TODO - consider discrete markdown just for ROCM troubleshooting?
|
// TODO - consider discrete markdown just for ROCM troubleshooting?
|
||||||
slog.Warn("See https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md for HSA_OVERRIDE_GFX_VERSION usage")
|
slog.Warn("See https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md for HSA_OVERRIDE_GFX_VERSION usage")
|
||||||
skip[i] = struct{}{}
|
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
slog.Info(fmt.Sprintf("amdgpu [%d] %s is supported", i, gfx))
|
slog.Debug("amdgpu is supported", "gpu", i, "gpu_type", gfx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
totalMemory, freeMemory, err := hl.HipMemGetInfo()
|
freeMemory, totalMemory, err := hl.HipMemGetInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn(fmt.Sprintf("[%d] %s", i, err))
|
slog.Warn("get mem info", "id", i, "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO according to docs, freeMem may lie on windows!
|
// iGPU detection, remove this check once we can support an iGPU variant of the rocm library
|
||||||
slog.Info(fmt.Sprintf("[%d] Total Mem: %d", i, totalMemory))
|
if totalMemory < IGPUMemLimit {
|
||||||
slog.Info(fmt.Sprintf("[%d] Free Mem: %d", i, freeMemory))
|
slog.Info("amdgpu appears to be an iGPU, skipping", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
||||||
resp.memInfo.DeviceCount++
|
continue
|
||||||
resp.memInfo.TotalMemory += totalMemory
|
}
|
||||||
resp.memInfo.FreeMemory += freeMemory
|
|
||||||
|
// TODO revisit this once ROCm v6 is available on windows.
|
||||||
|
// v5.7 only reports VRAM used by this process, so it's completely wrong and unusable
|
||||||
|
slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
||||||
|
slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory))
|
||||||
|
gpuInfo := GpuInfo{
|
||||||
|
Library: "rocm",
|
||||||
|
memInfo: memInfo{
|
||||||
|
TotalMemory: totalMemory,
|
||||||
|
FreeMemory: freeMemory,
|
||||||
|
},
|
||||||
|
ID: fmt.Sprintf("%d", i), // TODO this is probably wrong if we specify visible devices
|
||||||
|
DependencyPath: libDir,
|
||||||
|
MinimumMemory: rocmMinimumMemory,
|
||||||
|
Name: name,
|
||||||
|
Compute: gfx,
|
||||||
|
|
||||||
|
// TODO - this information isn't accurate on windows, so don't report it until we find the right way to retrieve
|
||||||
|
// DriverMajor: driverMajor,
|
||||||
|
// DriverMinor: driverMinor,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp = append(resp, gpuInfo)
|
||||||
}
|
}
|
||||||
if resp.memInfo.DeviceCount > 0 {
|
|
||||||
resp.Library = "rocm"
|
return resp
|
||||||
}
|
|
||||||
// Abort if all GPUs are skipped
|
|
||||||
if len(skip) >= count {
|
|
||||||
slog.Info("all detected amdgpus are skipped, falling back to CPU")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(skip) > 0 {
|
|
||||||
amdSetVisibleDevices(ids, skip)
|
|
||||||
}
|
|
||||||
UpdatePath(libDir)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func AMDValidateLibDir() (string, error) {
|
func AMDValidateLibDir() (string, error) {
|
||||||
// On windows non-admins typically can't create links
|
libDir, err := commonAMDValidateLibDir()
|
||||||
// so instead of trying to rely on rpath and a link in
|
|
||||||
// $LibDir/rocm, we instead rely on setting PATH to point
|
|
||||||
// to the location of the ROCm library
|
|
||||||
|
|
||||||
// Installer payload location if we're running the installed binary
|
|
||||||
exe, err := os.Executable()
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
rocmTargetDir := filepath.Join(filepath.Dir(exe), "rocm")
|
return libDir, nil
|
||||||
if rocmLibUsable(rocmTargetDir) {
|
|
||||||
slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir)
|
|
||||||
return rocmTargetDir, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Installer payload (if we're running from some other location)
|
// Installer payload (if we're running from some other location)
|
||||||
@@ -159,21 +155,6 @@ func AMDValidateLibDir() (string, error) {
|
|||||||
return rocmTargetDir, nil
|
return rocmTargetDir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prefer explicit HIP env var
|
|
||||||
hipPath := os.Getenv("HIP_PATH")
|
|
||||||
if hipPath != "" {
|
|
||||||
hipLibDir := filepath.Join(hipPath, "bin")
|
|
||||||
if rocmLibUsable(hipLibDir) {
|
|
||||||
slog.Debug("detected ROCM via HIP_PATH=" + hipPath)
|
|
||||||
return hipLibDir, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Well known location(s)
|
|
||||||
if rocmLibUsable(RocmStandardLocation) {
|
|
||||||
return RocmStandardLocation, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should not happen on windows since we include it in the installer, but stand-alone binary might hit this
|
// Should not happen on windows since we include it in the installer, but stand-alone binary might hit this
|
||||||
slog.Warn("amdgpu detected, but no compatible rocm library found. Please install ROCm")
|
slog.Warn("amdgpu detected, but no compatible rocm library found. Please install ROCm")
|
||||||
return "", fmt.Errorf("no suitable rocm found, falling back to CPU")
|
return "", fmt.Errorf("no suitable rocm found, falling back to CPU")
|
||||||
|
|||||||
@@ -12,6 +12,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -24,8 +26,16 @@ func PayloadsDir() (string, error) {
|
|||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
var err error
|
var err error
|
||||||
if payloadsDir == "" {
|
if payloadsDir == "" {
|
||||||
|
runnersDir := envconfig.RunnersDir
|
||||||
|
|
||||||
|
if runnersDir != "" {
|
||||||
|
payloadsDir = runnersDir
|
||||||
|
return payloadsDir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The remainder only applies on non-windows where we still carry payloads in the main executable
|
||||||
cleanupTmpDirs()
|
cleanupTmpDirs()
|
||||||
tmpDir := os.Getenv("OLLAMA_TMPDIR")
|
tmpDir := envconfig.TmpDir
|
||||||
if tmpDir == "" {
|
if tmpDir == "" {
|
||||||
tmpDir, err = os.MkdirTemp("", "ollama")
|
tmpDir, err = os.MkdirTemp("", "ollama")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -80,7 +90,7 @@ func cleanupTmpDirs() {
|
|||||||
}
|
}
|
||||||
err = os.RemoveAll(d)
|
err = os.RemoveAll(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Debug(fmt.Sprintf("unable to cleanup stale tmpdir %s: %s", d, err))
|
slog.Debug("unable to cleanup stale tmpdir", "path", d, "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -88,7 +98,8 @@ func cleanupTmpDirs() {
|
|||||||
func Cleanup() {
|
func Cleanup() {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
if payloadsDir != "" {
|
runnersDir := envconfig.RunnersDir
|
||||||
|
if payloadsDir != "" && runnersDir == "" && runtime.GOOS != "windows" {
|
||||||
// We want to fully clean up the tmpdir parent of the payloads dir
|
// We want to fully clean up the tmpdir parent of the payloads dir
|
||||||
tmpDir := filepath.Clean(filepath.Join(payloadsDir, ".."))
|
tmpDir := filepath.Clean(filepath.Join(payloadsDir, ".."))
|
||||||
slog.Debug("cleaning up", "dir", tmpDir)
|
slog.Debug("cleaning up", "dir", tmpDir)
|
||||||
@@ -120,7 +131,7 @@ func UpdatePath(dir string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
newPath := strings.Join(append([]string{dir}, pathComponents...), ";")
|
newPath := strings.Join(append([]string{dir}, pathComponents...), ";")
|
||||||
slog.Info(fmt.Sprintf("Updating PATH to %s", newPath))
|
slog.Info("updating", "PATH", newPath)
|
||||||
os.Setenv("PATH", newPath)
|
os.Setenv("PATH", newPath)
|
||||||
}
|
}
|
||||||
// linux and darwin rely on rpath
|
// linux and darwin rely on rpath
|
||||||
|
|||||||
@@ -8,14 +8,14 @@ import (
|
|||||||
|
|
||||||
func GetCPUVariant() string {
|
func GetCPUVariant() string {
|
||||||
if cpu.X86.HasAVX2 {
|
if cpu.X86.HasAVX2 {
|
||||||
slog.Info("CPU has AVX2")
|
slog.Debug("CPU has AVX2")
|
||||||
return "avx2"
|
return "avx2"
|
||||||
}
|
}
|
||||||
if cpu.X86.HasAVX {
|
if cpu.X86.HasAVX {
|
||||||
slog.Info("CPU has AVX")
|
slog.Debug("CPU has AVX")
|
||||||
return "avx"
|
return "avx"
|
||||||
}
|
}
|
||||||
slog.Info("CPU does not have vector extensions")
|
slog.Debug("CPU does not have vector extensions")
|
||||||
// else LCD
|
// else LCD
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|||||||
22
gpu/cuda_common.go
Normal file
22
gpu/cuda_common.go
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
//go:build linux || windows
|
||||||
|
|
||||||
|
package gpu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func cudaGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
|
||||||
|
ids := []string{}
|
||||||
|
for _, info := range gpuInfo {
|
||||||
|
if info.Library != "cuda" {
|
||||||
|
// TODO shouldn't happen if things are wired correctly...
|
||||||
|
slog.Debug("cudaGetVisibleDevicesEnv skipping over non-cuda device", "library", info.Library)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ids = append(ids, info.ID)
|
||||||
|
}
|
||||||
|
return "CUDA_VISIBLE_DEVICES", strings.Join(ids, ",")
|
||||||
|
|
||||||
|
}
|
||||||
360
gpu/gpu.go
360
gpu/gpu.go
@@ -16,17 +16,19 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
"github.com/ollama/ollama/format"
|
"github.com/ollama/ollama/format"
|
||||||
)
|
)
|
||||||
|
|
||||||
type handles struct {
|
type handles struct {
|
||||||
nvml *C.nvml_handle_t
|
deviceCount int
|
||||||
cudart *C.cudart_handle_t
|
cudart *C.cudart_handle_t
|
||||||
|
nvcuda *C.nvcuda_handle_t
|
||||||
|
oneapi *C.oneapi_handle_t
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -39,26 +41,10 @@ var gpuMutex sync.Mutex
|
|||||||
// With our current CUDA compile flags, older than 5.0 will not work properly
|
// With our current CUDA compile flags, older than 5.0 will not work properly
|
||||||
var CudaComputeMin = [2]C.int{5, 0}
|
var CudaComputeMin = [2]C.int{5, 0}
|
||||||
|
|
||||||
// Possible locations for the nvidia-ml library
|
var RocmComputeMin = 9
|
||||||
var NvmlLinuxGlobs = []string{
|
|
||||||
"/usr/local/cuda/lib64/libnvidia-ml.so*",
|
|
||||||
"/usr/lib/x86_64-linux-gnu/nvidia/current/libnvidia-ml.so*",
|
|
||||||
"/usr/lib/x86_64-linux-gnu/libnvidia-ml.so*",
|
|
||||||
"/usr/lib/wsl/lib/libnvidia-ml.so*",
|
|
||||||
"/usr/lib/wsl/drivers/*/libnvidia-ml.so*",
|
|
||||||
"/opt/cuda/lib64/libnvidia-ml.so*",
|
|
||||||
"/usr/lib*/libnvidia-ml.so*",
|
|
||||||
"/usr/lib/aarch64-linux-gnu/nvidia/current/libnvidia-ml.so*",
|
|
||||||
"/usr/lib/aarch64-linux-gnu/libnvidia-ml.so*",
|
|
||||||
"/usr/local/lib*/libnvidia-ml.so*",
|
|
||||||
|
|
||||||
// TODO: are these stubs ever valid?
|
// TODO find a better way to detect iGPU instead of minimum memory
|
||||||
"/opt/cuda/targets/x86_64-linux/lib/stubs/libnvidia-ml.so*",
|
const IGPUMemLimit = 1 * format.GibiByte // 512G is what they typically report, so anything less than 1G must be iGPU
|
||||||
}
|
|
||||||
|
|
||||||
var NvmlWindowsGlobs = []string{
|
|
||||||
"c:\\Windows\\System32\\nvml.dll",
|
|
||||||
}
|
|
||||||
|
|
||||||
var CudartLinuxGlobs = []string{
|
var CudartLinuxGlobs = []string{
|
||||||
"/usr/local/cuda/lib64/libcudart.so*",
|
"/usr/local/cuda/lib64/libcudart.so*",
|
||||||
@@ -79,6 +65,31 @@ var CudartWindowsGlobs = []string{
|
|||||||
"c:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\bin\\cudart64_*.dll",
|
"c:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\bin\\cudart64_*.dll",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var NvcudaLinuxGlobs = []string{
|
||||||
|
"/usr/local/cuda*/targets/*/lib/libcuda.so*",
|
||||||
|
"/usr/lib/*-linux-gnu/nvidia/current/libcuda.so*",
|
||||||
|
"/usr/lib/*-linux-gnu/libcuda.so*",
|
||||||
|
"/usr/lib/wsl/lib/libcuda.so*",
|
||||||
|
"/usr/lib/wsl/drivers/*/libcuda.so*",
|
||||||
|
"/opt/cuda/lib*/libcuda.so*",
|
||||||
|
"/usr/local/cuda/lib*/libcuda.so*",
|
||||||
|
"/usr/lib*/libcuda.so*",
|
||||||
|
"/usr/local/lib*/libcuda.so*",
|
||||||
|
}
|
||||||
|
|
||||||
|
var NvcudaWindowsGlobs = []string{
|
||||||
|
"c:\\windows\\system*\\nvcuda.dll",
|
||||||
|
}
|
||||||
|
|
||||||
|
var OneapiWindowsGlobs = []string{
|
||||||
|
"c:\\Windows\\System32\\DriverStore\\FileRepository\\*\\ze_intel_gpu64.dll",
|
||||||
|
}
|
||||||
|
|
||||||
|
var OneapiLinuxGlobs = []string{
|
||||||
|
"/usr/lib/x86_64-linux-gnu/libze_intel_gpu.so*",
|
||||||
|
"/usr/lib*/libze_intel_gpu.so*",
|
||||||
|
}
|
||||||
|
|
||||||
// Jetson devices have JETSON_JETPACK="x.y.z" factory set to the Jetpack version installed.
|
// Jetson devices have JETSON_JETPACK="x.y.z" factory set to the Jetpack version installed.
|
||||||
// Included to drive logic for reducing Ollama-allocated overhead on L4T/Jetson devices.
|
// Included to drive logic for reducing Ollama-allocated overhead on L4T/Jetson devices.
|
||||||
var CudaTegra string = os.Getenv("JETSON_JETPACK")
|
var CudaTegra string = os.Getenv("JETSON_JETPACK")
|
||||||
@@ -88,61 +99,63 @@ func initGPUHandles() *handles {
|
|||||||
|
|
||||||
// TODO - if the ollama build is CPU only, don't do these checks as they're irrelevant and confusing
|
// TODO - if the ollama build is CPU only, don't do these checks as they're irrelevant and confusing
|
||||||
|
|
||||||
gpuHandles := &handles{nil, nil}
|
gpuHandles := &handles{}
|
||||||
var nvmlMgmtName string
|
|
||||||
var nvmlMgmtPatterns []string
|
|
||||||
var cudartMgmtName string
|
var cudartMgmtName string
|
||||||
var cudartMgmtPatterns []string
|
var cudartMgmtPatterns []string
|
||||||
|
var nvcudaMgmtName string
|
||||||
|
var nvcudaMgmtPatterns []string
|
||||||
|
|
||||||
tmpDir, _ := PayloadsDir()
|
tmpDir, _ := PayloadsDir()
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "windows":
|
case "windows":
|
||||||
nvmlMgmtName = "nvml.dll"
|
|
||||||
nvmlMgmtPatterns = make([]string, len(NvmlWindowsGlobs))
|
|
||||||
copy(nvmlMgmtPatterns, NvmlWindowsGlobs)
|
|
||||||
cudartMgmtName = "cudart64_*.dll"
|
cudartMgmtName = "cudart64_*.dll"
|
||||||
localAppData := os.Getenv("LOCALAPPDATA")
|
localAppData := os.Getenv("LOCALAPPDATA")
|
||||||
cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", cudartMgmtName)}
|
cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", cudartMgmtName)}
|
||||||
cudartMgmtPatterns = append(cudartMgmtPatterns, CudartWindowsGlobs...)
|
cudartMgmtPatterns = append(cudartMgmtPatterns, CudartWindowsGlobs...)
|
||||||
|
// Aligned with driver, we can't carry as payloads
|
||||||
|
nvcudaMgmtName = "nvcuda.dll"
|
||||||
|
nvcudaMgmtPatterns = NvcudaWindowsGlobs
|
||||||
case "linux":
|
case "linux":
|
||||||
nvmlMgmtName = "libnvidia-ml.so"
|
|
||||||
nvmlMgmtPatterns = make([]string, len(NvmlLinuxGlobs))
|
|
||||||
copy(nvmlMgmtPatterns, NvmlLinuxGlobs)
|
|
||||||
cudartMgmtName = "libcudart.so*"
|
cudartMgmtName = "libcudart.so*"
|
||||||
if tmpDir != "" {
|
if tmpDir != "" {
|
||||||
// TODO - add "payloads" for subprocess
|
// TODO - add "payloads" for subprocess
|
||||||
cudartMgmtPatterns = []string{filepath.Join(tmpDir, "cuda*", cudartMgmtName)}
|
cudartMgmtPatterns = []string{filepath.Join(tmpDir, "cuda*", cudartMgmtName)}
|
||||||
}
|
}
|
||||||
cudartMgmtPatterns = append(cudartMgmtPatterns, CudartLinuxGlobs...)
|
cudartMgmtPatterns = append(cudartMgmtPatterns, CudartLinuxGlobs...)
|
||||||
|
// Aligned with driver, we can't carry as payloads
|
||||||
|
nvcudaMgmtName = "libcuda.so*"
|
||||||
|
nvcudaMgmtPatterns = NvcudaLinuxGlobs
|
||||||
default:
|
default:
|
||||||
return gpuHandles
|
return gpuHandles
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Info("Detecting GPU type")
|
slog.Debug("Detecting GPUs")
|
||||||
cudartLibPaths := FindGPULibs(cudartMgmtName, cudartMgmtPatterns)
|
nvcudaLibPaths := FindGPULibs(nvcudaMgmtName, nvcudaMgmtPatterns)
|
||||||
if len(cudartLibPaths) > 0 {
|
if len(nvcudaLibPaths) > 0 {
|
||||||
cudart := LoadCUDARTMgmt(cudartLibPaths)
|
deviceCount, nvcuda, libPath := LoadNVCUDAMgmt(nvcudaLibPaths)
|
||||||
if cudart != nil {
|
if nvcuda != nil {
|
||||||
slog.Info("Nvidia GPU detected via cudart")
|
slog.Debug("detected GPUs", "count", deviceCount, "library", libPath)
|
||||||
gpuHandles.cudart = cudart
|
gpuHandles.nvcuda = nvcuda
|
||||||
|
gpuHandles.deviceCount = deviceCount
|
||||||
return gpuHandles
|
return gpuHandles
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO once we build confidence, remove this and the gpu_info_nvml.[ch] files
|
cudartLibPaths := FindGPULibs(cudartMgmtName, cudartMgmtPatterns)
|
||||||
nvmlLibPaths := FindGPULibs(nvmlMgmtName, nvmlMgmtPatterns)
|
if len(cudartLibPaths) > 0 {
|
||||||
if len(nvmlLibPaths) > 0 {
|
deviceCount, cudart, libPath := LoadCUDARTMgmt(cudartLibPaths)
|
||||||
nvml := LoadNVMLMgmt(nvmlLibPaths)
|
if cudart != nil {
|
||||||
if nvml != nil {
|
slog.Debug("detected GPUs", "library", libPath, "count", deviceCount)
|
||||||
slog.Info("Nvidia GPU detected via nvidia-ml")
|
gpuHandles.cudart = cudart
|
||||||
gpuHandles.nvml = nvml
|
gpuHandles.deviceCount = deviceCount
|
||||||
return gpuHandles
|
return gpuHandles
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return gpuHandles
|
return gpuHandles
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetGPUInfo() GpuInfo {
|
func GetGPUInfo() GpuInfoList {
|
||||||
// TODO - consider exploring lspci (and equivalent on windows) to check for
|
// TODO - consider exploring lspci (and equivalent on windows) to check for
|
||||||
// GPUs so we can report warnings if we see Nvidia/AMD but fail to load the libraries
|
// GPUs so we can report warnings if we see Nvidia/AMD but fail to load the libraries
|
||||||
gpuMutex.Lock()
|
gpuMutex.Lock()
|
||||||
@@ -150,12 +163,12 @@ func GetGPUInfo() GpuInfo {
|
|||||||
|
|
||||||
gpuHandles := initGPUHandles()
|
gpuHandles := initGPUHandles()
|
||||||
defer func() {
|
defer func() {
|
||||||
if gpuHandles.nvml != nil {
|
|
||||||
C.nvml_release(*gpuHandles.nvml)
|
|
||||||
}
|
|
||||||
if gpuHandles.cudart != nil {
|
if gpuHandles.cudart != nil {
|
||||||
C.cudart_release(*gpuHandles.cudart)
|
C.cudart_release(*gpuHandles.cudart)
|
||||||
}
|
}
|
||||||
|
if gpuHandles.nvcuda != nil {
|
||||||
|
C.nvcuda_release(*gpuHandles.nvcuda)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// All our GPU builds on x86 have AVX enabled, so fallback to CPU if we don't detect at least AVX
|
// All our GPU builds on x86 have AVX enabled, so fallback to CPU if we don't detect at least AVX
|
||||||
@@ -164,73 +177,83 @@ func GetGPUInfo() GpuInfo {
|
|||||||
slog.Warn("CPU does not have AVX or AVX2, disabling GPU support.")
|
slog.Warn("CPU does not have AVX or AVX2, disabling GPU support.")
|
||||||
}
|
}
|
||||||
|
|
||||||
var memInfo C.mem_info_t
|
// On windows we bundle the nvidia library one level above the runner dir
|
||||||
resp := GpuInfo{}
|
depPath := ""
|
||||||
if gpuHandles.nvml != nil && (cpuVariant != "" || runtime.GOARCH != "amd64") {
|
if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {
|
||||||
C.nvml_check_vram(*gpuHandles.nvml, &memInfo)
|
depPath = filepath.Dir(envconfig.RunnersDir)
|
||||||
if memInfo.err != nil {
|
}
|
||||||
slog.Info(fmt.Sprintf("[nvidia-ml] error looking up NVML GPU memory: %s", C.GoString(memInfo.err)))
|
|
||||||
C.free(unsafe.Pointer(memInfo.err))
|
var memInfo C.mem_info_t
|
||||||
} else if memInfo.count > 0 {
|
resp := []GpuInfo{}
|
||||||
// Verify minimum compute capability
|
|
||||||
var cc C.nvml_compute_capability_t
|
// NVIDIA first
|
||||||
C.nvml_compute_capability(*gpuHandles.nvml, &cc)
|
for i := 0; i < gpuHandles.deviceCount; i++ {
|
||||||
if cc.err != nil {
|
// TODO once we support CPU compilation variants of GPU libraries refine this...
|
||||||
slog.Info(fmt.Sprintf("[nvidia-ml] error looking up NVML GPU compute capability: %s", C.GoString(cc.err)))
|
if cpuVariant == "" && runtime.GOARCH == "amd64" {
|
||||||
C.free(unsafe.Pointer(cc.err))
|
continue
|
||||||
} else if cc.major > CudaComputeMin[0] || (cc.major == CudaComputeMin[0] && cc.minor >= CudaComputeMin[1]) {
|
}
|
||||||
slog.Info(fmt.Sprintf("[nvidia-ml] NVML CUDA Compute Capability detected: %d.%d", cc.major, cc.minor))
|
if gpuHandles.cudart != nil || gpuHandles.nvcuda != nil {
|
||||||
resp.Library = "cuda"
|
gpuInfo := GpuInfo{
|
||||||
resp.MinimumMemory = cudaMinimumMemory
|
Library: "cuda",
|
||||||
} else {
|
}
|
||||||
slog.Info(fmt.Sprintf("[nvidia-ml] CUDA GPU is too old. Falling back to CPU mode. Compute Capability detected: %d.%d", cc.major, cc.minor))
|
var driverMajor int
|
||||||
}
|
var driverMinor int
|
||||||
}
|
if gpuHandles.cudart != nil {
|
||||||
} else if gpuHandles.cudart != nil && (cpuVariant != "" || runtime.GOARCH != "amd64") {
|
C.cudart_check_vram(*gpuHandles.cudart, C.int(i), &memInfo)
|
||||||
C.cudart_check_vram(*gpuHandles.cudart, &memInfo)
|
} else {
|
||||||
if memInfo.err != nil {
|
C.nvcuda_check_vram(*gpuHandles.nvcuda, C.int(i), &memInfo)
|
||||||
slog.Info(fmt.Sprintf("[cudart] error looking up CUDART GPU memory: %s", C.GoString(memInfo.err)))
|
driverMajor = int(gpuHandles.nvcuda.driver_major)
|
||||||
C.free(unsafe.Pointer(memInfo.err))
|
driverMinor = int(gpuHandles.nvcuda.driver_minor)
|
||||||
} else if memInfo.count > 0 {
|
}
|
||||||
// Verify minimum compute capability
|
if memInfo.err != nil {
|
||||||
var cc C.cudart_compute_capability_t
|
slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
||||||
C.cudart_compute_capability(*gpuHandles.cudart, &cc)
|
C.free(unsafe.Pointer(memInfo.err))
|
||||||
if cc.err != nil {
|
continue
|
||||||
slog.Info(fmt.Sprintf("[cudart] error looking up CUDA compute capability: %s", C.GoString(cc.err)))
|
}
|
||||||
C.free(unsafe.Pointer(cc.err))
|
if memInfo.major < CudaComputeMin[0] || (memInfo.major == CudaComputeMin[0] && memInfo.minor < CudaComputeMin[1]) {
|
||||||
} else if cc.major > CudaComputeMin[0] || (cc.major == CudaComputeMin[0] && cc.minor >= CudaComputeMin[1]) {
|
slog.Info(fmt.Sprintf("[%d] CUDA GPU is too old. Compute Capability detected: %d.%d", i, memInfo.major, memInfo.minor))
|
||||||
slog.Info(fmt.Sprintf("[cudart] CUDART CUDA Compute Capability detected: %d.%d", cc.major, cc.minor))
|
continue
|
||||||
resp.Library = "cuda"
|
}
|
||||||
resp.MinimumMemory = cudaMinimumMemory
|
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||||
} else {
|
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||||
slog.Info(fmt.Sprintf("[cudart] CUDA GPU is too old. Falling back to CPU mode. Compute Capability detected: %d.%d", cc.major, cc.minor))
|
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||||
}
|
gpuInfo.Compute = fmt.Sprintf("%d.%d", memInfo.major, memInfo.minor)
|
||||||
}
|
gpuInfo.MinimumMemory = cudaMinimumMemory
|
||||||
} else {
|
gpuInfo.DependencyPath = depPath
|
||||||
AMDGetGPUInfo(&resp)
|
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||||
if resp.Library != "" {
|
gpuInfo.DriverMajor = int(driverMajor)
|
||||||
resp.MinimumMemory = rocmMinimumMemory
|
gpuInfo.DriverMinor = int(driverMinor)
|
||||||
return resp
|
|
||||||
}
|
// TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
|
||||||
}
|
resp = append(resp, gpuInfo)
|
||||||
if resp.Library == "" {
|
}
|
||||||
C.cpu_check_ram(&memInfo)
|
}
|
||||||
resp.Library = "cpu"
|
|
||||||
resp.Variant = cpuVariant
|
// Then AMD
|
||||||
}
|
resp = append(resp, AMDGetGPUInfo()...)
|
||||||
if memInfo.err != nil {
|
|
||||||
slog.Info(fmt.Sprintf("error looking up CPU memory: %s", C.GoString(memInfo.err)))
|
if len(resp) == 0 {
|
||||||
C.free(unsafe.Pointer(memInfo.err))
|
C.cpu_check_ram(&memInfo)
|
||||||
return resp
|
if memInfo.err != nil {
|
||||||
|
slog.Info("error looking up CPU memory", "error", C.GoString(memInfo.err))
|
||||||
|
C.free(unsafe.Pointer(memInfo.err))
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
gpuInfo := GpuInfo{
|
||||||
|
Library: "cpu",
|
||||||
|
Variant: cpuVariant,
|
||||||
|
}
|
||||||
|
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||||
|
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||||
|
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||||
|
|
||||||
|
resp = append(resp, gpuInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp.DeviceCount = uint32(memInfo.count)
|
|
||||||
resp.FreeMemory = uint64(memInfo.free)
|
|
||||||
resp.TotalMemory = uint64(memInfo.total)
|
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCPUMem() (memInfo, error) {
|
func GetCPUMem() (memInfo, error) {
|
||||||
var ret memInfo
|
var ret memInfo
|
||||||
var info C.mem_info_t
|
var info C.mem_info_t
|
||||||
C.cpu_check_ram(&info)
|
C.cpu_check_ram(&info)
|
||||||
@@ -243,29 +266,12 @@ func getCPUMem() (memInfo, error) {
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func CheckVRAM() (uint64, error) {
|
func FindGPULibs(baseLibName string, defaultPatterns []string) []string {
|
||||||
userLimit := os.Getenv("OLLAMA_MAX_VRAM")
|
|
||||||
if userLimit != "" {
|
|
||||||
avail, err := strconv.ParseInt(userLimit, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("Invalid OLLAMA_MAX_VRAM setting %s: %s", userLimit, err)
|
|
||||||
}
|
|
||||||
slog.Info(fmt.Sprintf("user override OLLAMA_MAX_VRAM=%d", avail))
|
|
||||||
return uint64(avail), nil
|
|
||||||
}
|
|
||||||
gpuInfo := GetGPUInfo()
|
|
||||||
if gpuInfo.FreeMemory > 0 && (gpuInfo.Library == "cuda" || gpuInfo.Library == "rocm") {
|
|
||||||
return gpuInfo.FreeMemory, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, fmt.Errorf("no GPU detected") // TODO - better handling of CPU based memory determiniation
|
|
||||||
}
|
|
||||||
|
|
||||||
func FindGPULibs(baseLibName string, patterns []string) []string {
|
|
||||||
// Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them
|
// Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them
|
||||||
var ldPaths []string
|
var ldPaths []string
|
||||||
|
var patterns []string
|
||||||
gpuLibPaths := []string{}
|
gpuLibPaths := []string{}
|
||||||
slog.Info(fmt.Sprintf("Searching for GPU management library %s", baseLibName))
|
slog.Debug("Searching for GPU library", "name", baseLibName)
|
||||||
|
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "windows":
|
case "windows":
|
||||||
@@ -283,8 +289,14 @@ func FindGPULibs(baseLibName string, patterns []string) []string {
|
|||||||
}
|
}
|
||||||
patterns = append(patterns, filepath.Join(d, baseLibName+"*"))
|
patterns = append(patterns, filepath.Join(d, baseLibName+"*"))
|
||||||
}
|
}
|
||||||
slog.Debug(fmt.Sprintf("gpu management search paths: %v", patterns))
|
patterns = append(patterns, defaultPatterns...)
|
||||||
|
slog.Debug("gpu library search", "globs", patterns)
|
||||||
for _, pattern := range patterns {
|
for _, pattern := range patterns {
|
||||||
|
|
||||||
|
// Nvidia PhysX known to return bogus results
|
||||||
|
if strings.Contains(pattern, "PhysX") {
|
||||||
|
slog.Debug("skipping PhysX cuda library path", "path", pattern)
|
||||||
|
}
|
||||||
// Ignore glob discovery errors
|
// Ignore glob discovery errors
|
||||||
matches, _ := filepath.Glob(pattern)
|
matches, _ := filepath.Glob(pattern)
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
@@ -311,28 +323,11 @@ func FindGPULibs(baseLibName string, patterns []string) []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
slog.Info(fmt.Sprintf("Discovered GPU libraries: %v", gpuLibPaths))
|
slog.Debug("discovered GPU libraries", "paths", gpuLibPaths)
|
||||||
return gpuLibPaths
|
return gpuLibPaths
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadNVMLMgmt(nvmlLibPaths []string) *C.nvml_handle_t {
|
func LoadCUDARTMgmt(cudartLibPaths []string) (int, *C.cudart_handle_t, string) {
|
||||||
var resp C.nvml_init_resp_t
|
|
||||||
resp.ch.verbose = getVerboseState()
|
|
||||||
for _, libPath := range nvmlLibPaths {
|
|
||||||
lib := C.CString(libPath)
|
|
||||||
defer C.free(unsafe.Pointer(lib))
|
|
||||||
C.nvml_init(lib, &resp)
|
|
||||||
if resp.err != nil {
|
|
||||||
slog.Info(fmt.Sprintf("Unable to load NVML management library %s: %s", libPath, C.GoString(resp.err)))
|
|
||||||
C.free(unsafe.Pointer(resp.err))
|
|
||||||
} else {
|
|
||||||
return &resp.ch
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LoadCUDARTMgmt(cudartLibPaths []string) *C.cudart_handle_t {
|
|
||||||
var resp C.cudart_init_resp_t
|
var resp C.cudart_init_resp_t
|
||||||
resp.ch.verbose = getVerboseState()
|
resp.ch.verbose = getVerboseState()
|
||||||
for _, libPath := range cudartLibPaths {
|
for _, libPath := range cudartLibPaths {
|
||||||
@@ -340,18 +335,73 @@ func LoadCUDARTMgmt(cudartLibPaths []string) *C.cudart_handle_t {
|
|||||||
defer C.free(unsafe.Pointer(lib))
|
defer C.free(unsafe.Pointer(lib))
|
||||||
C.cudart_init(lib, &resp)
|
C.cudart_init(lib, &resp)
|
||||||
if resp.err != nil {
|
if resp.err != nil {
|
||||||
slog.Info(fmt.Sprintf("Unable to load cudart CUDA management library %s: %s", libPath, C.GoString(resp.err)))
|
slog.Debug("Unable to load cudart", "library", libPath, "error", C.GoString(resp.err))
|
||||||
C.free(unsafe.Pointer(resp.err))
|
C.free(unsafe.Pointer(resp.err))
|
||||||
} else {
|
} else {
|
||||||
return &resp.ch
|
return int(resp.num_devices), &resp.ch, libPath
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return 0, nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) {
|
||||||
|
var resp C.nvcuda_init_resp_t
|
||||||
|
resp.ch.verbose = getVerboseState()
|
||||||
|
for _, libPath := range nvcudaLibPaths {
|
||||||
|
lib := C.CString(libPath)
|
||||||
|
defer C.free(unsafe.Pointer(lib))
|
||||||
|
C.nvcuda_init(lib, &resp)
|
||||||
|
if resp.err != nil {
|
||||||
|
slog.Debug("Unable to load nvcuda", "library", libPath, "error", C.GoString(resp.err))
|
||||||
|
C.free(unsafe.Pointer(resp.err))
|
||||||
|
} else {
|
||||||
|
return int(resp.num_devices), &resp.ch, libPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string) {
|
||||||
|
var resp C.oneapi_init_resp_t
|
||||||
|
resp.oh.verbose = getVerboseState()
|
||||||
|
for _, libPath := range oneapiLibPaths {
|
||||||
|
lib := C.CString(libPath)
|
||||||
|
defer C.free(unsafe.Pointer(lib))
|
||||||
|
C.oneapi_init(lib, &resp)
|
||||||
|
if resp.err != nil {
|
||||||
|
slog.Debug("Unable to load oneAPI management library", "library", libPath, "error", C.GoString(resp.err))
|
||||||
|
C.free(unsafe.Pointer(resp.err))
|
||||||
|
} else {
|
||||||
|
return int(resp.num_devices), &resp.oh, libPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, nil, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVerboseState() C.uint16_t {
|
func getVerboseState() C.uint16_t {
|
||||||
if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
|
if envconfig.Debug {
|
||||||
return C.uint16_t(1)
|
return C.uint16_t(1)
|
||||||
}
|
}
|
||||||
return C.uint16_t(0)
|
return C.uint16_t(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Given the list of GPUs this instantiation is targeted for,
|
||||||
|
// figure out the visible devices environment variable
|
||||||
|
//
|
||||||
|
// If different libraries are detected, the first one is what we use
|
||||||
|
func (l GpuInfoList) GetVisibleDevicesEnv() (string, string) {
|
||||||
|
if len(l) == 0 {
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
switch l[0].Library {
|
||||||
|
case "cuda":
|
||||||
|
return cudaGetVisibleDevicesEnv(l)
|
||||||
|
case "rocm":
|
||||||
|
return rocmGetVisibleDevicesEnv(l)
|
||||||
|
case "oneapi":
|
||||||
|
return oneapiGetVisibleDevicesEnv(l)
|
||||||
|
default:
|
||||||
|
slog.Debug("no filter required for library " + l[0].Library)
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,52 +9,47 @@ package gpu
|
|||||||
*/
|
*/
|
||||||
import "C"
|
import "C"
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
|
||||||
|
"github.com/ollama/ollama/format"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
|
const (
|
||||||
func CheckVRAM() (uint64, error) {
|
metalMinimumMemory = 512 * format.MebiByte
|
||||||
userLimit := os.Getenv("OLLAMA_MAX_VRAM")
|
)
|
||||||
if userLimit != "" {
|
|
||||||
avail, err := strconv.ParseInt(userLimit, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("Invalid OLLAMA_MAX_VRAM setting %s: %s", userLimit, err)
|
|
||||||
}
|
|
||||||
slog.Info(fmt.Sprintf("user override OLLAMA_MAX_VRAM=%d", avail))
|
|
||||||
return uint64(avail), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
|
func GetGPUInfo() GpuInfoList {
|
||||||
|
mem, _ := GetCPUMem()
|
||||||
if runtime.GOARCH == "amd64" {
|
if runtime.GOARCH == "amd64" {
|
||||||
// gpu not supported, this may not be metal
|
return []GpuInfo{
|
||||||
return 0, nil
|
{
|
||||||
}
|
Library: "cpu",
|
||||||
|
Variant: GetCPUVariant(),
|
||||||
return uint64(C.getRecommendedMaxVRAM()), nil
|
memInfo: mem,
|
||||||
}
|
},
|
||||||
|
|
||||||
func GetGPUInfo() GpuInfo {
|
|
||||||
mem, _ := getCPUMem()
|
|
||||||
if runtime.GOARCH == "amd64" {
|
|
||||||
return GpuInfo{
|
|
||||||
Library: "cpu",
|
|
||||||
Variant: GetCPUVariant(),
|
|
||||||
memInfo: mem,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return GpuInfo{
|
info := GpuInfo{
|
||||||
Library: "metal",
|
Library: "metal",
|
||||||
memInfo: mem,
|
ID: "0",
|
||||||
}
|
}
|
||||||
|
info.TotalMemory = uint64(C.getRecommendedMaxVRAM())
|
||||||
|
|
||||||
|
// TODO is there a way to gather actual allocated video memory? (currentAllocatedSize doesn't work)
|
||||||
|
info.FreeMemory = info.TotalMemory
|
||||||
|
|
||||||
|
info.MinimumMemory = metalMinimumMemory
|
||||||
|
return []GpuInfo{info}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCPUMem() (memInfo, error) {
|
func GetCPUMem() (memInfo, error) {
|
||||||
return memInfo{
|
return memInfo{
|
||||||
TotalMemory: uint64(C.getPhysicalMemory()),
|
TotalMemory: uint64(C.getPhysicalMemory()),
|
||||||
FreeMemory: 0,
|
FreeMemory: 0,
|
||||||
DeviceCount: 1,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l GpuInfoList) GetVisibleDevicesEnv() (string, string) {
|
||||||
|
// No-op on darwin
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
|||||||
@@ -38,12 +38,20 @@
|
|||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define GPU_ID_LEN 64
|
||||||
|
#define GPU_NAME_LEN 96
|
||||||
|
|
||||||
typedef struct mem_info {
|
typedef struct mem_info {
|
||||||
|
char *err; // If non-nill, caller responsible for freeing
|
||||||
|
char gpu_id[GPU_ID_LEN];
|
||||||
|
char gpu_name[GPU_NAME_LEN];
|
||||||
uint64_t total;
|
uint64_t total;
|
||||||
uint64_t free;
|
uint64_t free;
|
||||||
unsigned int count;
|
|
||||||
int igpu_index; // If >= 0, we detected an integrated GPU to ignore
|
// Compute Capability
|
||||||
char *err; // If non-nill, caller responsible for freeing
|
int major;
|
||||||
|
int minor;
|
||||||
|
int patch;
|
||||||
} mem_info_t;
|
} mem_info_t;
|
||||||
|
|
||||||
void cpu_check_ram(mem_info_t *resp);
|
void cpu_check_ram(mem_info_t *resp);
|
||||||
@@ -52,8 +60,9 @@ void cpu_check_ram(mem_info_t *resp);
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "gpu_info_nvml.h"
|
|
||||||
#include "gpu_info_cudart.h"
|
#include "gpu_info_cudart.h"
|
||||||
|
#include "gpu_info_nvcuda.h"
|
||||||
|
#include "gpu_info_oneapi.h"
|
||||||
|
|
||||||
#endif // __GPU_INFO_H__
|
#endif // __GPU_INFO_H__
|
||||||
#endif // __APPLE__
|
#endif // __APPLE__
|
||||||
@@ -8,9 +8,9 @@ void cpu_check_ram(mem_info_t *resp) {
|
|||||||
MEMORYSTATUSEX info;
|
MEMORYSTATUSEX info;
|
||||||
info.dwLength = sizeof(info);
|
info.dwLength = sizeof(info);
|
||||||
if (GlobalMemoryStatusEx(&info) != 0) {
|
if (GlobalMemoryStatusEx(&info) != 0) {
|
||||||
resp->count = 1;
|
|
||||||
resp->total = info.ullTotalPhys;
|
resp->total = info.ullTotalPhys;
|
||||||
resp->free = info.ullAvailPhys;
|
resp->free = info.ullAvailPhys;
|
||||||
|
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "0");
|
||||||
} else {
|
} else {
|
||||||
resp->err = LOAD_ERR();
|
resp->err = LOAD_ERR();
|
||||||
}
|
}
|
||||||
@@ -27,9 +27,9 @@ void cpu_check_ram(mem_info_t *resp) {
|
|||||||
if (sysinfo(&info) != 0) {
|
if (sysinfo(&info) != 0) {
|
||||||
resp->err = strdup(strerror(errno));
|
resp->err = strdup(strerror(errno));
|
||||||
} else {
|
} else {
|
||||||
resp->count = 1;
|
|
||||||
resp->total = info.totalram * info.mem_unit;
|
resp->total = info.totalram * info.mem_unit;
|
||||||
resp->free = info.freeram * info.mem_unit;
|
resp->free = info.freeram * info.mem_unit;
|
||||||
|
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "0");
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp) {
|
void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp) {
|
||||||
cudartReturn_t ret;
|
cudartReturn_t ret;
|
||||||
resp->err = NULL;
|
resp->err = NULL;
|
||||||
|
resp->num_devices = 0;
|
||||||
const int buflen = 256;
|
const int buflen = 256;
|
||||||
char buf[buflen + 1];
|
char buf[buflen + 1];
|
||||||
int i;
|
int i;
|
||||||
@@ -21,6 +22,7 @@ void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp) {
|
|||||||
{"cudaGetDeviceCount", (void *)&resp->ch.cudaGetDeviceCount},
|
{"cudaGetDeviceCount", (void *)&resp->ch.cudaGetDeviceCount},
|
||||||
{"cudaDeviceGetAttribute", (void *)&resp->ch.cudaDeviceGetAttribute},
|
{"cudaDeviceGetAttribute", (void *)&resp->ch.cudaDeviceGetAttribute},
|
||||||
{"cudaDriverGetVersion", (void *)&resp->ch.cudaDriverGetVersion},
|
{"cudaDriverGetVersion", (void *)&resp->ch.cudaDriverGetVersion},
|
||||||
|
{"cudaGetDeviceProperties", (void *)&resp->ch.cudaGetDeviceProperties},
|
||||||
{NULL, NULL},
|
{NULL, NULL},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -36,13 +38,7 @@ void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO once we've squashed the remaining corner cases remove this log
|
|
||||||
LOG(resp->ch.verbose, "wiring cudart library functions in %s\n", cudart_lib_path);
|
|
||||||
|
|
||||||
for (i = 0; l[i].s != NULL; i++) {
|
for (i = 0; l[i].s != NULL; i++) {
|
||||||
// TODO once we've squashed the remaining corner cases remove this log
|
|
||||||
LOG(resp->ch.verbose, "dlsym: %s\n", l[i].s);
|
|
||||||
|
|
||||||
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
||||||
if (!l[i].p) {
|
if (!l[i].p) {
|
||||||
char *msg = LOAD_ERR();
|
char *msg = LOAD_ERR();
|
||||||
@@ -63,7 +59,7 @@ void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp) {
|
|||||||
UNLOAD_LIBRARY(resp->ch.handle);
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
resp->ch.handle = NULL;
|
resp->ch.handle = NULL;
|
||||||
if (ret == CUDA_ERROR_INSUFFICIENT_DRIVER) {
|
if (ret == CUDA_ERROR_INSUFFICIENT_DRIVER) {
|
||||||
resp->err = strdup("your nvidia driver is too old or missing, please upgrade to run ollama");
|
resp->err = strdup("your nvidia driver is too old or missing. If you have a CUDA GPU please upgrade to run ollama");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
snprintf(buf, buflen, "cudart init failure: %d", ret);
|
snprintf(buf, buflen, "cudart init failure: %d", ret);
|
||||||
@@ -85,110 +81,95 @@ void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp) {
|
|||||||
driverVersion.minor = (version - (driverVersion.major * 1000)) / 10;
|
driverVersion.minor = (version - (driverVersion.major * 1000)) / 10;
|
||||||
LOG(resp->ch.verbose, "CUDA driver version: %d-%d\n", driverVersion.major, driverVersion.minor);
|
LOG(resp->ch.verbose, "CUDA driver version: %d-%d\n", driverVersion.major, driverVersion.minor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = (*resp->ch.cudaGetDeviceCount)(&resp->num_devices);
|
||||||
|
if (ret != CUDART_SUCCESS) {
|
||||||
|
LOG(resp->ch.verbose, "cudaGetDeviceCount err: %d\n", ret);
|
||||||
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
|
resp->ch.handle = NULL;
|
||||||
|
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void cudart_check_vram(cudart_handle_t h, mem_info_t *resp) {
|
void cudart_check_vram(cudart_handle_t h, int i, mem_info_t *resp) {
|
||||||
resp->err = NULL;
|
resp->err = NULL;
|
||||||
cudartMemory_t memInfo = {0,0,0};
|
cudartMemory_t memInfo = {0,0,0};
|
||||||
cudartReturn_t ret;
|
cudartReturn_t ret;
|
||||||
const int buflen = 256;
|
const int buflen = 256;
|
||||||
char buf[buflen + 1];
|
char buf[buflen + 1];
|
||||||
int i;
|
|
||||||
|
|
||||||
if (h.handle == NULL) {
|
if (h.handle == NULL) {
|
||||||
resp->err = strdup("cudart handle isn't initialized");
|
resp->err = strdup("cudart handle isn't initialized");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// cudaGetDeviceCount takes int type, resp-> count is uint
|
ret = (*h.cudaSetDevice)(i);
|
||||||
int deviceCount;
|
|
||||||
ret = (*h.cudaGetDeviceCount)(&deviceCount);
|
|
||||||
if (ret != CUDART_SUCCESS) {
|
if (ret != CUDART_SUCCESS) {
|
||||||
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
snprintf(buf, buflen, "cudart device failed to initialize");
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cudaDeviceProp_t props;
|
||||||
|
ret = (*h.cudaGetDeviceProperties)(&props, i);
|
||||||
|
if (ret != CUDART_SUCCESS) {
|
||||||
|
LOG(h.verbose, "[%d] device properties lookup failure: %d\n", i, ret);
|
||||||
|
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "%d", i);
|
||||||
|
resp->major = 0;
|
||||||
|
resp->minor = 0;
|
||||||
} else {
|
} else {
|
||||||
resp->count = (unsigned int)deviceCount;
|
int allNull = 1;
|
||||||
}
|
for (int j = 0; j < 16; j++) {
|
||||||
|
if (props.uuid.bytes[j] != 0) {
|
||||||
resp->total = 0;
|
allNull = 0;
|
||||||
resp->free = 0;
|
break;
|
||||||
for (i = 0; i < resp-> count; i++) {
|
}
|
||||||
ret = (*h.cudaSetDevice)(i);
|
|
||||||
if (ret != CUDART_SUCCESS) {
|
|
||||||
snprintf(buf, buflen, "cudart device failed to initialize");
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
ret = (*h.cudaMemGetInfo)(&memInfo.free, &memInfo.total);
|
if (allNull != 0) {
|
||||||
if (ret != CUDART_SUCCESS) {
|
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "%d", i);
|
||||||
snprintf(buf, buflen, "cudart device memory info lookup failure %d", ret);
|
} else {
|
||||||
resp->err = strdup(buf);
|
// GPU-d110a105-ac29-1d54-7b49-9c90440f215b
|
||||||
return;
|
snprintf(&resp->gpu_id[0], GPU_ID_LEN,
|
||||||
|
"GPU-%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
|
||||||
|
props.uuid.bytes[0],
|
||||||
|
props.uuid.bytes[1],
|
||||||
|
props.uuid.bytes[2],
|
||||||
|
props.uuid.bytes[3],
|
||||||
|
props.uuid.bytes[4],
|
||||||
|
props.uuid.bytes[5],
|
||||||
|
props.uuid.bytes[6],
|
||||||
|
props.uuid.bytes[7],
|
||||||
|
props.uuid.bytes[8],
|
||||||
|
props.uuid.bytes[9],
|
||||||
|
props.uuid.bytes[10],
|
||||||
|
props.uuid.bytes[11],
|
||||||
|
props.uuid.bytes[12],
|
||||||
|
props.uuid.bytes[13],
|
||||||
|
props.uuid.bytes[14],
|
||||||
|
props.uuid.bytes[15]
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
resp->major = props.major;
|
||||||
|
resp->minor = props.minor;
|
||||||
|
|
||||||
LOG(h.verbose, "[%d] CUDA totalMem %lu\n", i, memInfo.total);
|
// TODO add other useful properties from props
|
||||||
LOG(h.verbose, "[%d] CUDA freeMem %lu\n", i, memInfo.free);
|
|
||||||
|
|
||||||
resp->total += memInfo.total;
|
|
||||||
resp->free += memInfo.free;
|
|
||||||
}
|
}
|
||||||
}
|
ret = (*h.cudaMemGetInfo)(&memInfo.free, &memInfo.total);
|
||||||
|
|
||||||
void cudart_compute_capability(cudart_handle_t h, cudart_compute_capability_t *resp) {
|
|
||||||
resp->err = NULL;
|
|
||||||
resp->major = 0;
|
|
||||||
resp->minor = 0;
|
|
||||||
int major = 0;
|
|
||||||
int minor = 0;
|
|
||||||
cudartReturn_t ret;
|
|
||||||
const int buflen = 256;
|
|
||||||
char buf[buflen + 1];
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (h.handle == NULL) {
|
|
||||||
resp->err = strdup("cudart handle not initialized");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
int devices;
|
|
||||||
ret = (*h.cudaGetDeviceCount)(&devices);
|
|
||||||
if (ret != CUDART_SUCCESS) {
|
if (ret != CUDART_SUCCESS) {
|
||||||
snprintf(buf, buflen, "unable to get cudart device count: %d", ret);
|
snprintf(buf, buflen, "cudart device memory info lookup failure %d", ret);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < devices; i++) {
|
resp->total = memInfo.total;
|
||||||
ret = (*h.cudaSetDevice)(i);
|
resp->free = memInfo.free;
|
||||||
if (ret != CUDART_SUCCESS) {
|
|
||||||
snprintf(buf, buflen, "cudart device failed to initialize");
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = (*h.cudaDeviceGetAttribute)(&major, cudartDevAttrComputeCapabilityMajor, i);
|
LOG(h.verbose, "[%s] CUDA totalMem %lu\n", resp->gpu_id, resp->total);
|
||||||
if (ret != CUDART_SUCCESS) {
|
LOG(h.verbose, "[%s] CUDA freeMem %lu\n", resp->gpu_id, resp->free);
|
||||||
snprintf(buf, buflen, "device compute capability lookup failure %d: %d", i, ret);
|
LOG(h.verbose, "[%s] Compute Capability %d.%d\n", resp->gpu_id, resp->major, resp->minor);
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ret = (*h.cudaDeviceGetAttribute)(&minor, cudartDevAttrComputeCapabilityMinor, i);
|
|
||||||
if (ret != CUDART_SUCCESS) {
|
|
||||||
snprintf(buf, buflen, "device compute capability lookup failure %d: %d", i, ret);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Report the lowest major.minor we detect as that limits our compatibility
|
|
||||||
if (resp->major == 0 || resp->major > major ) {
|
|
||||||
resp->major = major;
|
|
||||||
resp->minor = minor;
|
|
||||||
} else if ( resp->major == major && resp->minor > minor ) {
|
|
||||||
resp->minor = minor;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cudart_release(cudart_handle_t h) {
|
void cudart_release(cudart_handle_t h) {
|
||||||
|
|||||||
@@ -6,14 +6,20 @@
|
|||||||
// Just enough typedef's to dlopen/dlsym for memory information
|
// Just enough typedef's to dlopen/dlsym for memory information
|
||||||
typedef enum cudartReturn_enum {
|
typedef enum cudartReturn_enum {
|
||||||
CUDART_SUCCESS = 0,
|
CUDART_SUCCESS = 0,
|
||||||
CUDART_UNSUPPORTED = 1,
|
CUDART_ERROR_INVALID_VALUE = 1,
|
||||||
CUDA_ERROR_INSUFFICIENT_DRIVER = 35,
|
CUDART_ERROR_MEMORY_ALLOCATION = 2,
|
||||||
|
CUDART_ERROR_INSUFFICIENT_DRIVER = 35,
|
||||||
// Other values omitted for now...
|
// Other values omitted for now...
|
||||||
} cudartReturn_t;
|
} cudartReturn_t;
|
||||||
|
|
||||||
typedef enum cudartDeviceAttr_enum {
|
typedef enum cudartDeviceAttr_enum {
|
||||||
cudartDevAttrComputeCapabilityMajor = 75,
|
cudartDevAttrComputeCapabilityMajor = 75,
|
||||||
cudartDevAttrComputeCapabilityMinor = 76,
|
cudartDevAttrComputeCapabilityMinor = 76,
|
||||||
|
|
||||||
|
// TODO - not yet wired up but may be useful for Jetson or other
|
||||||
|
// integrated GPU scenarios with shared memory
|
||||||
|
cudaDevAttrIntegrated = 18
|
||||||
|
|
||||||
} cudartDeviceAttr_t;
|
} cudartDeviceAttr_t;
|
||||||
|
|
||||||
typedef void *cudartDevice_t; // Opaque is sufficient
|
typedef void *cudartDevice_t; // Opaque is sufficient
|
||||||
@@ -28,6 +34,92 @@ typedef struct cudartDriverVersion {
|
|||||||
int minor;
|
int minor;
|
||||||
} cudartDriverVersion_t;
|
} cudartDriverVersion_t;
|
||||||
|
|
||||||
|
typedef struct cudaUUID {
|
||||||
|
unsigned char bytes[16];
|
||||||
|
} cudaUUID_t;
|
||||||
|
typedef struct cudaDeviceProp {
|
||||||
|
char name[256]; /**< ASCII string identifying device */
|
||||||
|
cudaUUID_t uuid; /**< 16-byte unique identifier */
|
||||||
|
char luid[8]; /**< 8-byte locally unique identifier. Value is undefined on TCC and non-Windows platforms */
|
||||||
|
unsigned int luidDeviceNodeMask; /**< LUID device node mask. Value is undefined on TCC and non-Windows platforms */
|
||||||
|
size_t totalGlobalMem; /**< Global memory available on device in bytes */
|
||||||
|
size_t sharedMemPerBlock; /**< Shared memory available per block in bytes */
|
||||||
|
int regsPerBlock; /**< 32-bit registers available per block */
|
||||||
|
int warpSize; /**< Warp size in threads */
|
||||||
|
size_t memPitch; /**< Maximum pitch in bytes allowed by memory copies */
|
||||||
|
int maxThreadsPerBlock; /**< Maximum number of threads per block */
|
||||||
|
int maxThreadsDim[3]; /**< Maximum size of each dimension of a block */
|
||||||
|
int maxGridSize[3]; /**< Maximum size of each dimension of a grid */
|
||||||
|
int clockRate; /**< Clock frequency in kilohertz */
|
||||||
|
size_t totalConstMem; /**< Constant memory available on device in bytes */
|
||||||
|
int major; /**< Major compute capability */
|
||||||
|
int minor; /**< Minor compute capability */
|
||||||
|
size_t textureAlignment; /**< Alignment requirement for textures */
|
||||||
|
size_t texturePitchAlignment; /**< Pitch alignment requirement for texture references bound to pitched memory */
|
||||||
|
int deviceOverlap; /**< Device can concurrently copy memory and execute a kernel. Deprecated. Use instead asyncEngineCount. */
|
||||||
|
int multiProcessorCount; /**< Number of multiprocessors on device */
|
||||||
|
int kernelExecTimeoutEnabled; /**< Specified whether there is a run time limit on kernels */
|
||||||
|
int integrated; /**< Device is integrated as opposed to discrete */
|
||||||
|
int canMapHostMemory; /**< Device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer */
|
||||||
|
int computeMode; /**< Compute mode (See ::cudaComputeMode) */
|
||||||
|
int maxTexture1D; /**< Maximum 1D texture size */
|
||||||
|
int maxTexture1DMipmap; /**< Maximum 1D mipmapped texture size */
|
||||||
|
int maxTexture1DLinear; /**< Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead. */
|
||||||
|
int maxTexture2D[2]; /**< Maximum 2D texture dimensions */
|
||||||
|
int maxTexture2DMipmap[2]; /**< Maximum 2D mipmapped texture dimensions */
|
||||||
|
int maxTexture2DLinear[3]; /**< Maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory */
|
||||||
|
int maxTexture2DGather[2]; /**< Maximum 2D texture dimensions if texture gather operations have to be performed */
|
||||||
|
int maxTexture3D[3]; /**< Maximum 3D texture dimensions */
|
||||||
|
int maxTexture3DAlt[3]; /**< Maximum alternate 3D texture dimensions */
|
||||||
|
int maxTextureCubemap; /**< Maximum Cubemap texture dimensions */
|
||||||
|
int maxTexture1DLayered[2]; /**< Maximum 1D layered texture dimensions */
|
||||||
|
int maxTexture2DLayered[3]; /**< Maximum 2D layered texture dimensions */
|
||||||
|
int maxTextureCubemapLayered[2];/**< Maximum Cubemap layered texture dimensions */
|
||||||
|
int maxSurface1D; /**< Maximum 1D surface size */
|
||||||
|
int maxSurface2D[2]; /**< Maximum 2D surface dimensions */
|
||||||
|
int maxSurface3D[3]; /**< Maximum 3D surface dimensions */
|
||||||
|
int maxSurface1DLayered[2]; /**< Maximum 1D layered surface dimensions */
|
||||||
|
int maxSurface2DLayered[3]; /**< Maximum 2D layered surface dimensions */
|
||||||
|
int maxSurfaceCubemap; /**< Maximum Cubemap surface dimensions */
|
||||||
|
int maxSurfaceCubemapLayered[2];/**< Maximum Cubemap layered surface dimensions */
|
||||||
|
size_t surfaceAlignment; /**< Alignment requirements for surfaces */
|
||||||
|
int concurrentKernels; /**< Device can possibly execute multiple kernels concurrently */
|
||||||
|
int ECCEnabled; /**< Device has ECC support enabled */
|
||||||
|
int pciBusID; /**< PCI bus ID of the device */
|
||||||
|
int pciDeviceID; /**< PCI device ID of the device */
|
||||||
|
int pciDomainID; /**< PCI domain ID of the device */
|
||||||
|
int tccDriver; /**< 1 if device is a Tesla device using TCC driver, 0 otherwise */
|
||||||
|
int asyncEngineCount; /**< Number of asynchronous engines */
|
||||||
|
int unifiedAddressing; /**< Device shares a unified address space with the host */
|
||||||
|
int memoryClockRate; /**< Peak memory clock frequency in kilohertz */
|
||||||
|
int memoryBusWidth; /**< Global memory bus width in bits */
|
||||||
|
int l2CacheSize; /**< Size of L2 cache in bytes */
|
||||||
|
int persistingL2CacheMaxSize; /**< Device's maximum l2 persisting lines capacity setting in bytes */
|
||||||
|
int maxThreadsPerMultiProcessor;/**< Maximum resident threads per multiprocessor */
|
||||||
|
int streamPrioritiesSupported; /**< Device supports stream priorities */
|
||||||
|
int globalL1CacheSupported; /**< Device supports caching globals in L1 */
|
||||||
|
int localL1CacheSupported; /**< Device supports caching locals in L1 */
|
||||||
|
size_t sharedMemPerMultiprocessor; /**< Shared memory available per multiprocessor in bytes */
|
||||||
|
int regsPerMultiprocessor; /**< 32-bit registers available per multiprocessor */
|
||||||
|
int managedMemory; /**< Device supports allocating managed memory on this system */
|
||||||
|
int isMultiGpuBoard; /**< Device is on a multi-GPU board */
|
||||||
|
int multiGpuBoardGroupID; /**< Unique identifier for a group of devices on the same multi-GPU board */
|
||||||
|
int hostNativeAtomicSupported; /**< Link between the device and the host supports native atomic operations */
|
||||||
|
int singleToDoublePrecisionPerfRatio; /**< Ratio of single precision performance (in floating-point operations per second) to double precision performance */
|
||||||
|
int pageableMemoryAccess; /**< Device supports coherently accessing pageable memory without calling cudaHostRegister on it */
|
||||||
|
int concurrentManagedAccess; /**< Device can coherently access managed memory concurrently with the CPU */
|
||||||
|
int computePreemptionSupported; /**< Device supports Compute Preemption */
|
||||||
|
int canUseHostPointerForRegisteredMem; /**< Device can access host registered memory at the same virtual address as the CPU */
|
||||||
|
int cooperativeLaunch; /**< Device supports launching cooperative kernels via ::cudaLaunchCooperativeKernel */
|
||||||
|
int cooperativeMultiDeviceLaunch; /**< Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated. */
|
||||||
|
size_t sharedMemPerBlockOptin; /**< Per device maximum shared memory per block usable by special opt in */
|
||||||
|
int pageableMemoryAccessUsesHostPageTables; /**< Device accesses pageable memory via the host's page tables */
|
||||||
|
int directManagedMemAccessFromHost; /**< Host can directly access managed memory on the device without migration. */
|
||||||
|
int maxBlocksPerMultiProcessor; /**< Maximum number of resident blocks per multiprocessor */
|
||||||
|
int accessPolicyMaxWindowSize; /**< The maximum value of ::cudaAccessPolicyWindow::num_bytes. */
|
||||||
|
size_t reservedSharedMemPerBlock; /**< Shared memory reserved by CUDA driver per block in bytes */
|
||||||
|
} cudaDeviceProp_t;
|
||||||
|
|
||||||
typedef struct cudart_handle {
|
typedef struct cudart_handle {
|
||||||
void *handle;
|
void *handle;
|
||||||
uint16_t verbose;
|
uint16_t verbose;
|
||||||
@@ -38,23 +130,17 @@ typedef struct cudart_handle {
|
|||||||
cudartReturn_t (*cudaGetDeviceCount)(int *);
|
cudartReturn_t (*cudaGetDeviceCount)(int *);
|
||||||
cudartReturn_t (*cudaDeviceGetAttribute)(int* value, cudartDeviceAttr_t attr, int device);
|
cudartReturn_t (*cudaDeviceGetAttribute)(int* value, cudartDeviceAttr_t attr, int device);
|
||||||
cudartReturn_t (*cudaDriverGetVersion) (int *driverVersion);
|
cudartReturn_t (*cudaDriverGetVersion) (int *driverVersion);
|
||||||
|
cudartReturn_t (*cudaGetDeviceProperties) (cudaDeviceProp_t* prop, int device);
|
||||||
} cudart_handle_t;
|
} cudart_handle_t;
|
||||||
|
|
||||||
typedef struct cudart_init_resp {
|
typedef struct cudart_init_resp {
|
||||||
char *err; // If err is non-null handle is invalid
|
char *err; // If err is non-null handle is invalid
|
||||||
cudart_handle_t ch;
|
cudart_handle_t ch;
|
||||||
|
int num_devices;
|
||||||
} cudart_init_resp_t;
|
} cudart_init_resp_t;
|
||||||
|
|
||||||
typedef struct cudart_compute_capability {
|
|
||||||
char *err;
|
|
||||||
int major;
|
|
||||||
int minor;
|
|
||||||
} cudart_compute_capability_t;
|
|
||||||
|
|
||||||
|
|
||||||
void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp);
|
void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp);
|
||||||
void cudart_check_vram(cudart_handle_t ch, mem_info_t *resp);
|
void cudart_check_vram(cudart_handle_t ch, int device_id, mem_info_t *resp);
|
||||||
void cudart_compute_capability(cudart_handle_t th, cudart_compute_capability_t *cc);
|
|
||||||
void cudart_release(cudart_handle_t ch);
|
void cudart_release(cudart_handle_t ch);
|
||||||
|
|
||||||
#endif // __GPU_INFO_CUDART_H__
|
#endif // __GPU_INFO_CUDART_H__
|
||||||
|
|||||||
207
gpu/gpu_info_nvcuda.c
Normal file
207
gpu/gpu_info_nvcuda.c
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
#ifndef __APPLE__ // TODO - maybe consider nvidia support on intel macs?
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
|
#include "gpu_info_nvcuda.h"
|
||||||
|
|
||||||
|
void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||||
|
CUresult ret;
|
||||||
|
resp->err = NULL;
|
||||||
|
resp->num_devices = 0;
|
||||||
|
const int buflen = 256;
|
||||||
|
char buf[buflen + 1];
|
||||||
|
int i;
|
||||||
|
|
||||||
|
struct lookup {
|
||||||
|
char *s;
|
||||||
|
void **p;
|
||||||
|
} l[] = {
|
||||||
|
|
||||||
|
{"cuInit", (void *)&resp->ch.cuInit},
|
||||||
|
{"cuDriverGetVersion", (void *)&resp->ch.cuDriverGetVersion},
|
||||||
|
{"cuDeviceGetCount", (void *)&resp->ch.cuDeviceGetCount},
|
||||||
|
{"cuDeviceGet", (void *)&resp->ch.cuDeviceGet},
|
||||||
|
{"cuDeviceGetAttribute", (void *)&resp->ch.cuDeviceGetAttribute},
|
||||||
|
{"cuDeviceGetUuid", (void *)&resp->ch.cuDeviceGetUuid},
|
||||||
|
{"cuDeviceGetName", (void *)&resp->ch.cuDeviceGetName},
|
||||||
|
{"cuCtxCreate_v3", (void *)&resp->ch.cuCtxCreate_v3},
|
||||||
|
{"cuMemGetInfo_v2", (void *)&resp->ch.cuMemGetInfo_v2},
|
||||||
|
{"cuCtxDestroy", (void *)&resp->ch.cuCtxDestroy},
|
||||||
|
{NULL, NULL},
|
||||||
|
};
|
||||||
|
|
||||||
|
resp->ch.handle = LOAD_LIBRARY(nvcuda_lib_path, RTLD_LAZY);
|
||||||
|
if (!resp->ch.handle) {
|
||||||
|
char *msg = LOAD_ERR();
|
||||||
|
LOG(resp->ch.verbose, "library %s load err: %s\n", nvcuda_lib_path, msg);
|
||||||
|
snprintf(buf, buflen,
|
||||||
|
"Unable to load %s library to query for Nvidia GPUs: %s",
|
||||||
|
nvcuda_lib_path, msg);
|
||||||
|
free(msg);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; l[i].s != NULL; i++) {
|
||||||
|
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
||||||
|
if (!*l[i].p) {
|
||||||
|
char *msg = LOAD_ERR();
|
||||||
|
LOG(resp->ch.verbose, "dlerr: %s\n", msg);
|
||||||
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
|
resp->ch.handle = NULL;
|
||||||
|
snprintf(buf, buflen, "symbol lookup for %s failed: %s", l[i].s,
|
||||||
|
msg);
|
||||||
|
free(msg);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*resp->ch.cuInit)(0);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(resp->ch.verbose, "cuInit err: %d\n", ret);
|
||||||
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
|
resp->ch.handle = NULL;
|
||||||
|
if (ret == CUDA_ERROR_INSUFFICIENT_DRIVER) {
|
||||||
|
resp->err = strdup("your nvidia driver is too old or missing. If you have a CUDA GPU please upgrade to run ollama");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
snprintf(buf, buflen, "nvcuda init failure: %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
int version = 0;
|
||||||
|
resp->ch.driver_major = 0;
|
||||||
|
resp->ch.driver_minor = 0;
|
||||||
|
|
||||||
|
// Report driver version if we're in verbose mode, ignore errors
|
||||||
|
ret = (*resp->ch.cuDriverGetVersion)(&version);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(resp->ch.verbose, "cuDriverGetVersion failed: %d\n", ret);
|
||||||
|
} else {
|
||||||
|
resp->ch.driver_major = version / 1000;
|
||||||
|
resp->ch.driver_minor = (version - (resp->ch.driver_major * 1000)) / 10;
|
||||||
|
LOG(resp->ch.verbose, "CUDA driver version: %d.%d\n", resp->ch.driver_major, resp->ch.driver_minor);
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*resp->ch.cuDeviceGetCount)(&resp->num_devices);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(resp->ch.verbose, "cuDeviceGetCount err: %d\n", ret);
|
||||||
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
|
resp->ch.handle = NULL;
|
||||||
|
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const int buflen = 256;
|
||||||
|
void nvcuda_check_vram(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
||||||
|
resp->err = NULL;
|
||||||
|
nvcudaMemory_t memInfo = {0,0};
|
||||||
|
CUresult ret;
|
||||||
|
CUdevice device = -1;
|
||||||
|
CUcontext ctx = NULL;
|
||||||
|
char buf[buflen + 1];
|
||||||
|
CUuuid uuid = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
|
||||||
|
|
||||||
|
if (h.handle == NULL) {
|
||||||
|
resp->err = strdup("nvcuda handle isn't initialized");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*h.cuDeviceGet)(&device, i);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
snprintf(buf, buflen, "nvcuda device failed to initialize");
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
int major = 0;
|
||||||
|
int minor = 0;
|
||||||
|
ret = (*h.cuDeviceGetAttribute)(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, device);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(h.verbose, "[%d] device major lookup failure: %d\n", i, ret);
|
||||||
|
} else {
|
||||||
|
ret = (*h.cuDeviceGetAttribute)(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, device);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(h.verbose, "[%d] device minor lookup failure: %d\n", i, ret);
|
||||||
|
} else {
|
||||||
|
resp->minor = minor;
|
||||||
|
resp->major = major;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*h.cuDeviceGetUuid)(&uuid, device);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(h.verbose, "[%d] device uuid lookup failure: %d\n", i, ret);
|
||||||
|
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "%d", i);
|
||||||
|
} else {
|
||||||
|
// GPU-d110a105-ac29-1d54-7b49-9c90440f215b
|
||||||
|
snprintf(&resp->gpu_id[0], GPU_ID_LEN,
|
||||||
|
"GPU-%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
|
||||||
|
uuid.bytes[0],
|
||||||
|
uuid.bytes[1],
|
||||||
|
uuid.bytes[2],
|
||||||
|
uuid.bytes[3],
|
||||||
|
uuid.bytes[4],
|
||||||
|
uuid.bytes[5],
|
||||||
|
uuid.bytes[6],
|
||||||
|
uuid.bytes[7],
|
||||||
|
uuid.bytes[8],
|
||||||
|
uuid.bytes[9],
|
||||||
|
uuid.bytes[10],
|
||||||
|
uuid.bytes[11],
|
||||||
|
uuid.bytes[12],
|
||||||
|
uuid.bytes[13],
|
||||||
|
uuid.bytes[14],
|
||||||
|
uuid.bytes[15]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*h.cuDeviceGetName)(&resp->gpu_name[0], GPU_NAME_LEN, device);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(h.verbose, "[%d] device name lookup failure: %d\n", i, ret);
|
||||||
|
resp->gpu_name[0] = '\0';
|
||||||
|
}
|
||||||
|
|
||||||
|
// To get memory we have to set (and release) a context
|
||||||
|
ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
snprintf(buf, buflen, "nvcuda failed to get primary device context %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*h.cuMemGetInfo_v2)(&memInfo.free, &memInfo.total);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
snprintf(buf, buflen, "nvcuda device memory info lookup failure %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
// Best effort on failure...
|
||||||
|
(*h.cuCtxDestroy)(ctx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
resp->total = memInfo.total;
|
||||||
|
resp->free = memInfo.free;
|
||||||
|
|
||||||
|
LOG(h.verbose, "[%s] CUDA totalMem %lu mb\n", resp->gpu_id, resp->total / 1024 / 1024);
|
||||||
|
LOG(h.verbose, "[%s] CUDA freeMem %lu mb\n", resp->gpu_id, resp->free / 1024 / 1024);
|
||||||
|
LOG(h.verbose, "[%s] Compute Capability %d.%d\n", resp->gpu_id, resp->major, resp->minor);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
ret = (*h.cuCtxDestroy)(ctx);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(1, "nvcuda failed to release primary device context %d", ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvcuda_release(nvcuda_handle_t h) {
|
||||||
|
LOG(h.verbose, "releasing nvcuda library\n");
|
||||||
|
UNLOAD_LIBRARY(h.handle);
|
||||||
|
// TODO and other context release logic?
|
||||||
|
h.handle = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // __APPLE__
|
||||||
74
gpu/gpu_info_nvcuda.h
Normal file
74
gpu/gpu_info_nvcuda.h
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
#ifndef __APPLE__
|
||||||
|
#ifndef __GPU_INFO_NVCUDA_H__
|
||||||
|
#define __GPU_INFO_NVCUDA_H__
|
||||||
|
#include "gpu_info.h"
|
||||||
|
|
||||||
|
// Just enough typedef's to dlopen/dlsym for memory information
|
||||||
|
typedef enum cudaError_enum {
|
||||||
|
CUDA_SUCCESS = 0,
|
||||||
|
CUDA_ERROR_INVALID_VALUE = 1,
|
||||||
|
CUDA_ERROR_MEMORY_ALLOCATION = 2,
|
||||||
|
CUDA_ERROR_NOT_INITIALIZED = 3,
|
||||||
|
CUDA_ERROR_INSUFFICIENT_DRIVER = 35,
|
||||||
|
// Other values omitted for now...
|
||||||
|
} CUresult;
|
||||||
|
|
||||||
|
typedef enum CUdevice_attribute_enum {
|
||||||
|
CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = 75,
|
||||||
|
CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = 76,
|
||||||
|
|
||||||
|
// TODO - not yet wired up but may be useful for Jetson or other
|
||||||
|
// integrated GPU scenarios with shared memory
|
||||||
|
CU_DEVICE_ATTRIBUTE_INTEGRATED = 18
|
||||||
|
|
||||||
|
} CUdevice_attribute;
|
||||||
|
|
||||||
|
typedef void *nvcudaDevice_t; // Opaque is sufficient
|
||||||
|
typedef struct nvcudaMemory_st {
|
||||||
|
uint64_t total;
|
||||||
|
uint64_t free;
|
||||||
|
} nvcudaMemory_t;
|
||||||
|
|
||||||
|
typedef struct nvcudaDriverVersion {
|
||||||
|
int major;
|
||||||
|
int minor;
|
||||||
|
} nvcudaDriverVersion_t;
|
||||||
|
|
||||||
|
typedef struct CUuuid_st {
|
||||||
|
unsigned char bytes[16];
|
||||||
|
} CUuuid;
|
||||||
|
|
||||||
|
typedef int CUdevice;
|
||||||
|
typedef void* CUcontext;
|
||||||
|
|
||||||
|
typedef struct nvcuda_handle {
|
||||||
|
void *handle;
|
||||||
|
uint16_t verbose;
|
||||||
|
int driver_major;
|
||||||
|
int driver_minor;
|
||||||
|
CUresult (*cuInit)(unsigned int Flags);
|
||||||
|
CUresult (*cuDriverGetVersion)(int *driverVersion);
|
||||||
|
CUresult (*cuDeviceGetCount)(int *);
|
||||||
|
CUresult (*cuDeviceGet)(CUdevice* device, int ordinal);
|
||||||
|
CUresult (*cuDeviceGetAttribute)(int* pi, CUdevice_attribute attrib, CUdevice dev);
|
||||||
|
CUresult (*cuDeviceGetUuid)(CUuuid* uuid, CUdevice dev); // signature compatible with cuDeviceGetUuid_v2
|
||||||
|
CUresult (*cuDeviceGetName)(char *name, int len, CUdevice dev);
|
||||||
|
|
||||||
|
// Context specific aspects
|
||||||
|
CUresult (*cuCtxCreate_v3)(CUcontext* pctx, void *params, int len, unsigned int flags, CUdevice dev);
|
||||||
|
CUresult (*cuMemGetInfo_v2)(uint64_t* free, uint64_t* total);
|
||||||
|
CUresult (*cuCtxDestroy)(CUcontext ctx);
|
||||||
|
} nvcuda_handle_t;
|
||||||
|
|
||||||
|
typedef struct nvcuda_init_resp {
|
||||||
|
char *err; // If err is non-null handle is invalid
|
||||||
|
nvcuda_handle_t ch;
|
||||||
|
int num_devices;
|
||||||
|
} nvcuda_init_resp_t;
|
||||||
|
|
||||||
|
void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp);
|
||||||
|
void nvcuda_check_vram(nvcuda_handle_t ch, int device_id, mem_info_t *resp);
|
||||||
|
void nvcuda_release(nvcuda_handle_t ch);
|
||||||
|
|
||||||
|
#endif // __GPU_INFO_NVCUDA_H__
|
||||||
|
#endif // __APPLE__
|
||||||
@@ -1,221 +0,0 @@
|
|||||||
#ifndef __APPLE__ // TODO - maybe consider nvidia support on intel macs?
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#include "gpu_info_nvml.h"
|
|
||||||
|
|
||||||
void nvml_init(char *nvml_lib_path, nvml_init_resp_t *resp) {
|
|
||||||
nvmlReturn_t ret;
|
|
||||||
resp->err = NULL;
|
|
||||||
const int buflen = 256;
|
|
||||||
char buf[buflen + 1];
|
|
||||||
int i;
|
|
||||||
|
|
||||||
struct lookup {
|
|
||||||
char *s;
|
|
||||||
void **p;
|
|
||||||
} l[] = {
|
|
||||||
{"nvmlInit_v2", (void *)&resp->ch.nvmlInit_v2},
|
|
||||||
{"nvmlShutdown", (void *)&resp->ch.nvmlShutdown},
|
|
||||||
{"nvmlDeviceGetHandleByIndex", (void *)&resp->ch.nvmlDeviceGetHandleByIndex},
|
|
||||||
{"nvmlDeviceGetMemoryInfo", (void *)&resp->ch.nvmlDeviceGetMemoryInfo},
|
|
||||||
{"nvmlDeviceGetCount_v2", (void *)&resp->ch.nvmlDeviceGetCount_v2},
|
|
||||||
{"nvmlDeviceGetCudaComputeCapability", (void *)&resp->ch.nvmlDeviceGetCudaComputeCapability},
|
|
||||||
{"nvmlSystemGetDriverVersion", (void *)&resp->ch.nvmlSystemGetDriverVersion},
|
|
||||||
{"nvmlDeviceGetName", (void *)&resp->ch.nvmlDeviceGetName},
|
|
||||||
{"nvmlDeviceGetSerial", (void *)&resp->ch.nvmlDeviceGetSerial},
|
|
||||||
{"nvmlDeviceGetVbiosVersion", (void *)&resp->ch.nvmlDeviceGetVbiosVersion},
|
|
||||||
{"nvmlDeviceGetBoardPartNumber", (void *)&resp->ch.nvmlDeviceGetBoardPartNumber},
|
|
||||||
{"nvmlDeviceGetBrand", (void *)&resp->ch.nvmlDeviceGetBrand},
|
|
||||||
{NULL, NULL},
|
|
||||||
};
|
|
||||||
|
|
||||||
resp->ch.handle = LOAD_LIBRARY(nvml_lib_path, RTLD_LAZY);
|
|
||||||
if (!resp->ch.handle) {
|
|
||||||
char *msg = LOAD_ERR();
|
|
||||||
LOG(resp->ch.verbose, "library %s load err: %s\n", nvml_lib_path, msg);
|
|
||||||
snprintf(buf, buflen,
|
|
||||||
"Unable to load %s library to query for Nvidia GPUs: %s",
|
|
||||||
nvml_lib_path, msg);
|
|
||||||
free(msg);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO once we've squashed the remaining corner cases remove this log
|
|
||||||
LOG(resp->ch.verbose, "wiring nvidia management library functions in %s\n", nvml_lib_path);
|
|
||||||
|
|
||||||
for (i = 0; l[i].s != NULL; i++) {
|
|
||||||
// TODO once we've squashed the remaining corner cases remove this log
|
|
||||||
LOG(resp->ch.verbose, "dlsym: %s\n", l[i].s);
|
|
||||||
|
|
||||||
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
|
||||||
if (!l[i].p) {
|
|
||||||
resp->ch.handle = NULL;
|
|
||||||
char *msg = LOAD_ERR();
|
|
||||||
LOG(resp->ch.verbose, "dlerr: %s\n", msg);
|
|
||||||
UNLOAD_LIBRARY(resp->ch.handle);
|
|
||||||
snprintf(buf, buflen, "symbol lookup for %s failed: %s", l[i].s,
|
|
||||||
msg);
|
|
||||||
free(msg);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = (*resp->ch.nvmlInit_v2)();
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
LOG(resp->ch.verbose, "nvmlInit_v2 err: %d\n", ret);
|
|
||||||
UNLOAD_LIBRARY(resp->ch.handle);
|
|
||||||
resp->ch.handle = NULL;
|
|
||||||
snprintf(buf, buflen, "nvml vram init failure: %d", ret);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Report driver version if we're in verbose mode, ignore errors
|
|
||||||
ret = (*resp->ch.nvmlSystemGetDriverVersion)(buf, buflen);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
LOG(resp->ch.verbose, "nvmlSystemGetDriverVersion failed: %d\n", ret);
|
|
||||||
} else {
|
|
||||||
LOG(resp->ch.verbose, "CUDA driver version: %s\n", buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void nvml_check_vram(nvml_handle_t h, mem_info_t *resp) {
|
|
||||||
resp->err = NULL;
|
|
||||||
nvmlDevice_t device;
|
|
||||||
nvmlMemory_t memInfo = {0};
|
|
||||||
nvmlReturn_t ret;
|
|
||||||
const int buflen = 256;
|
|
||||||
char buf[buflen + 1];
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (h.handle == NULL) {
|
|
||||||
resp->err = strdup("nvml handle isn't initialized");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = (*h.nvmlDeviceGetCount_v2)(&resp->count);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
resp->total = 0;
|
|
||||||
resp->free = 0;
|
|
||||||
for (i = 0; i < resp->count; i++) {
|
|
||||||
ret = (*h.nvmlDeviceGetHandleByIndex)(i, &device);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
snprintf(buf, buflen, "unable to get device handle %d: %d", i, ret);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = (*h.nvmlDeviceGetMemoryInfo)(device, &memInfo);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
snprintf(buf, buflen, "device memory info lookup failure %d: %d", i, ret);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (h.verbose) {
|
|
||||||
nvmlBrandType_t brand = 0;
|
|
||||||
// When in verbose mode, report more information about
|
|
||||||
// the card we discover, but don't fail on error
|
|
||||||
ret = (*h.nvmlDeviceGetName)(device, buf, buflen);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
LOG(h.verbose, "nvmlDeviceGetName failed: %d\n", ret);
|
|
||||||
} else {
|
|
||||||
LOG(h.verbose, "[%d] CUDA device name: %s\n", i, buf);
|
|
||||||
}
|
|
||||||
ret = (*h.nvmlDeviceGetBoardPartNumber)(device, buf, buflen);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
LOG(h.verbose, "nvmlDeviceGetBoardPartNumber failed: %d\n", ret);
|
|
||||||
} else {
|
|
||||||
LOG(h.verbose, "[%d] CUDA part number: %s\n", i, buf);
|
|
||||||
}
|
|
||||||
ret = (*h.nvmlDeviceGetSerial)(device, buf, buflen);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
LOG(h.verbose, "nvmlDeviceGetSerial failed: %d\n", ret);
|
|
||||||
} else {
|
|
||||||
LOG(h.verbose, "[%d] CUDA S/N: %s\n", i, buf);
|
|
||||||
}
|
|
||||||
ret = (*h.nvmlDeviceGetVbiosVersion)(device, buf, buflen);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
LOG(h.verbose, "nvmlDeviceGetVbiosVersion failed: %d\n", ret);
|
|
||||||
} else {
|
|
||||||
LOG(h.verbose, "[%d] CUDA vbios version: %s\n", i, buf);
|
|
||||||
}
|
|
||||||
ret = (*h.nvmlDeviceGetBrand)(device, &brand);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
LOG(h.verbose, "nvmlDeviceGetBrand failed: %d\n", ret);
|
|
||||||
} else {
|
|
||||||
LOG(h.verbose, "[%d] CUDA brand: %d\n", i, brand);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG(h.verbose, "[%d] CUDA totalMem %ld\n", i, memInfo.total);
|
|
||||||
LOG(h.verbose, "[%d] CUDA freeMem %ld\n", i, memInfo.free);
|
|
||||||
|
|
||||||
resp->total += memInfo.total;
|
|
||||||
resp->free += memInfo.free;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void nvml_compute_capability(nvml_handle_t h, nvml_compute_capability_t *resp) {
|
|
||||||
resp->err = NULL;
|
|
||||||
resp->major = 0;
|
|
||||||
resp->minor = 0;
|
|
||||||
nvmlDevice_t device;
|
|
||||||
int major = 0;
|
|
||||||
int minor = 0;
|
|
||||||
nvmlReturn_t ret;
|
|
||||||
const int buflen = 256;
|
|
||||||
char buf[buflen + 1];
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (h.handle == NULL) {
|
|
||||||
resp->err = strdup("nvml handle not initialized");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned int devices;
|
|
||||||
ret = (*h.nvmlDeviceGetCount_v2)(&devices);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < devices; i++) {
|
|
||||||
ret = (*h.nvmlDeviceGetHandleByIndex)(i, &device);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
snprintf(buf, buflen, "unable to get device handle %d: %d", i, ret);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = (*h.nvmlDeviceGetCudaComputeCapability)(device, &major, &minor);
|
|
||||||
if (ret != NVML_SUCCESS) {
|
|
||||||
snprintf(buf, buflen, "device compute capability lookup failure %d: %d", i, ret);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// Report the lowest major.minor we detect as that limits our compatibility
|
|
||||||
if (resp->major == 0 || resp->major > major ) {
|
|
||||||
resp->major = major;
|
|
||||||
resp->minor = minor;
|
|
||||||
} else if ( resp->major == major && resp->minor > minor ) {
|
|
||||||
resp->minor = minor;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void nvml_release(nvml_handle_t h) {
|
|
||||||
LOG(h.verbose, "releasing nvml library\n");
|
|
||||||
UNLOAD_LIBRARY(h.handle);
|
|
||||||
h.handle = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // __APPLE__
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
#ifndef __APPLE__
|
|
||||||
#ifndef __GPU_INFO_NVML_H__
|
|
||||||
#define __GPU_INFO_NVML_H__
|
|
||||||
#include "gpu_info.h"
|
|
||||||
|
|
||||||
// Just enough typedef's to dlopen/dlsym for memory information
|
|
||||||
typedef enum nvmlReturn_enum {
|
|
||||||
NVML_SUCCESS = 0,
|
|
||||||
// Other values omitted for now...
|
|
||||||
} nvmlReturn_t;
|
|
||||||
typedef void *nvmlDevice_t; // Opaque is sufficient
|
|
||||||
typedef struct nvmlMemory_st {
|
|
||||||
unsigned long long total;
|
|
||||||
unsigned long long free;
|
|
||||||
unsigned long long used;
|
|
||||||
} nvmlMemory_t;
|
|
||||||
|
|
||||||
typedef enum nvmlBrandType_enum
|
|
||||||
{
|
|
||||||
NVML_BRAND_UNKNOWN = 0,
|
|
||||||
} nvmlBrandType_t;
|
|
||||||
|
|
||||||
typedef struct nvml_handle {
|
|
||||||
void *handle;
|
|
||||||
uint16_t verbose;
|
|
||||||
nvmlReturn_t (*nvmlInit_v2)(void);
|
|
||||||
nvmlReturn_t (*nvmlShutdown)(void);
|
|
||||||
nvmlReturn_t (*nvmlDeviceGetHandleByIndex)(unsigned int, nvmlDevice_t *);
|
|
||||||
nvmlReturn_t (*nvmlDeviceGetMemoryInfo)(nvmlDevice_t, nvmlMemory_t *);
|
|
||||||
nvmlReturn_t (*nvmlDeviceGetCount_v2)(unsigned int *);
|
|
||||||
nvmlReturn_t (*nvmlDeviceGetCudaComputeCapability)(nvmlDevice_t, int* major, int* minor);
|
|
||||||
nvmlReturn_t (*nvmlSystemGetDriverVersion) (char* version, unsigned int length);
|
|
||||||
nvmlReturn_t (*nvmlDeviceGetName) (nvmlDevice_t device, char* name, unsigned int length);
|
|
||||||
nvmlReturn_t (*nvmlDeviceGetSerial) (nvmlDevice_t device, char* serial, unsigned int length);
|
|
||||||
nvmlReturn_t (*nvmlDeviceGetVbiosVersion) (nvmlDevice_t device, char* version, unsigned int length);
|
|
||||||
nvmlReturn_t (*nvmlDeviceGetBoardPartNumber) (nvmlDevice_t device, char* partNumber, unsigned int length);
|
|
||||||
nvmlReturn_t (*nvmlDeviceGetBrand) (nvmlDevice_t device, nvmlBrandType_t* type);
|
|
||||||
} nvml_handle_t;
|
|
||||||
|
|
||||||
typedef struct nvml_init_resp {
|
|
||||||
char *err; // If err is non-null handle is invalid
|
|
||||||
nvml_handle_t ch;
|
|
||||||
} nvml_init_resp_t;
|
|
||||||
|
|
||||||
typedef struct nvml_compute_capability {
|
|
||||||
char *err;
|
|
||||||
int major;
|
|
||||||
int minor;
|
|
||||||
} nvml_compute_capability_t;
|
|
||||||
|
|
||||||
void nvml_init(char *nvml_lib_path, nvml_init_resp_t *resp);
|
|
||||||
void nvml_check_vram(nvml_handle_t ch, mem_info_t *resp);
|
|
||||||
void nvml_compute_capability(nvml_handle_t ch, nvml_compute_capability_t *cc);
|
|
||||||
void nvml_release(nvml_handle_t ch);
|
|
||||||
|
|
||||||
#endif // __GPU_INFO_NVML_H__
|
|
||||||
#endif // __APPLE__
|
|
||||||
214
gpu/gpu_info_oneapi.c
Normal file
214
gpu/gpu_info_oneapi.c
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
#ifndef __APPLE__
|
||||||
|
|
||||||
|
#include "gpu_info_oneapi.h"
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp)
|
||||||
|
{
|
||||||
|
ze_result_t ret;
|
||||||
|
resp->err = NULL;
|
||||||
|
const int buflen = 256;
|
||||||
|
char buf[buflen + 1];
|
||||||
|
int i;
|
||||||
|
struct lookup
|
||||||
|
{
|
||||||
|
char *s;
|
||||||
|
void **p;
|
||||||
|
} l[] = {
|
||||||
|
{"zesInit", (void *)&resp->oh.zesInit},
|
||||||
|
{"zesDriverGet", (void *)&resp->oh.zesDriverGet},
|
||||||
|
{"zesDeviceGet", (void *)&resp->oh.zesDeviceGet},
|
||||||
|
{"zesDeviceGetProperties", (void *)&resp->oh.zesDeviceGetProperties},
|
||||||
|
{"zesDeviceEnumMemoryModules",
|
||||||
|
(void *)&resp->oh.zesDeviceEnumMemoryModules},
|
||||||
|
{"zesMemoryGetProperties", (void *)&resp->oh.zesMemoryGetProperties},
|
||||||
|
{"zesMemoryGetState", (void *)&resp->oh.zesMemoryGetState},
|
||||||
|
{NULL, NULL},
|
||||||
|
};
|
||||||
|
|
||||||
|
resp->oh.handle = LOAD_LIBRARY(oneapi_lib_path, RTLD_LAZY);
|
||||||
|
if (!resp->oh.handle)
|
||||||
|
{
|
||||||
|
char *msg = LOAD_ERR();
|
||||||
|
snprintf(buf, buflen,
|
||||||
|
"Unable to load %s library to query for Intel GPUs: %s\n",
|
||||||
|
oneapi_lib_path, msg);
|
||||||
|
free(msg);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO once we've squashed the remaining corner cases remove this log
|
||||||
|
LOG(resp->oh.verbose,
|
||||||
|
"wiring Level-Zero management library functions in %s\n",
|
||||||
|
oneapi_lib_path);
|
||||||
|
|
||||||
|
for (i = 0; l[i].s != NULL; i++)
|
||||||
|
{
|
||||||
|
// TODO once we've squashed the remaining corner cases remove this log
|
||||||
|
LOG(resp->oh.verbose, "dlsym: %s\n", l[i].s);
|
||||||
|
|
||||||
|
*l[i].p = LOAD_SYMBOL(resp->oh.handle, l[i].s);
|
||||||
|
if (!l[i].p)
|
||||||
|
{
|
||||||
|
resp->oh.handle = NULL;
|
||||||
|
char *msg = LOAD_ERR();
|
||||||
|
LOG(resp->oh.verbose, "dlerr: %s\n", msg);
|
||||||
|
UNLOAD_LIBRARY(resp->oh.handle);
|
||||||
|
snprintf(buf, buflen, "symbol lookup for %s failed: %s", l[i].s, msg);
|
||||||
|
free(msg);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*resp->oh.zesInit)(0);
|
||||||
|
if (ret != ZE_RESULT_SUCCESS)
|
||||||
|
{
|
||||||
|
LOG(resp->oh.verbose, "zesInit err: %d\n", ret);
|
||||||
|
UNLOAD_LIBRARY(resp->oh.handle);
|
||||||
|
resp->oh.handle = NULL;
|
||||||
|
snprintf(buf, buflen, "oneapi vram init failure: %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
(*resp->oh.zesDriverGet)(&resp->num_devices, NULL);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
void oneapi_check_vram(oneapi_handle_t h, mem_info_t *resp)
|
||||||
|
{
|
||||||
|
ze_result_t ret;
|
||||||
|
resp->err = NULL;
|
||||||
|
uint64_t totalMem = 0;
|
||||||
|
uint64_t usedMem = 0;
|
||||||
|
const int buflen = 256;
|
||||||
|
char buf[buflen + 1];
|
||||||
|
int i, d, m;
|
||||||
|
|
||||||
|
if (h.handle == NULL)
|
||||||
|
{
|
||||||
|
resp->err = strdup("Level-Zero handle not initialized");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t driversCount = 0;
|
||||||
|
ret = (*h.zesDriverGet)(&driversCount, NULL);
|
||||||
|
if (ret != ZE_RESULT_SUCCESS)
|
||||||
|
{
|
||||||
|
snprintf(buf, buflen, "unable to get driver count: %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
LOG(h.verbose, "discovered %d Level-Zero drivers\n", driversCount);
|
||||||
|
|
||||||
|
zes_driver_handle_t *allDrivers =
|
||||||
|
malloc(driversCount * sizeof(zes_driver_handle_t));
|
||||||
|
(*h.zesDriverGet)(&driversCount, allDrivers);
|
||||||
|
|
||||||
|
resp->total = 0;
|
||||||
|
resp->free = 0;
|
||||||
|
|
||||||
|
for (d = 0; d < driversCount; d++)
|
||||||
|
{
|
||||||
|
uint32_t deviceCount = 0;
|
||||||
|
ret = (*h.zesDeviceGet)(allDrivers[d], &deviceCount, NULL);
|
||||||
|
if (ret != ZE_RESULT_SUCCESS)
|
||||||
|
{
|
||||||
|
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
free(allDrivers);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG(h.verbose, "discovered %d Level-Zero devices\n", deviceCount);
|
||||||
|
|
||||||
|
zes_device_handle_t *devices =
|
||||||
|
malloc(deviceCount * sizeof(zes_device_handle_t));
|
||||||
|
(*h.zesDeviceGet)(allDrivers[d], &deviceCount, devices);
|
||||||
|
|
||||||
|
for (i = 0; i < deviceCount; i++)
|
||||||
|
{
|
||||||
|
zes_device_ext_properties_t ext_props;
|
||||||
|
ext_props.stype = ZES_STRUCTURE_TYPE_DEVICE_EXT_PROPERTIES;
|
||||||
|
ext_props.pNext = NULL;
|
||||||
|
|
||||||
|
zes_device_properties_t props;
|
||||||
|
props.stype = ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES;
|
||||||
|
props.pNext = &ext_props;
|
||||||
|
|
||||||
|
ret = (*h.zesDeviceGetProperties)(devices[i], &props);
|
||||||
|
if (ret != ZE_RESULT_SUCCESS)
|
||||||
|
{
|
||||||
|
snprintf(buf, buflen, "unable to get device properties: %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
free(allDrivers);
|
||||||
|
free(devices);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (h.verbose)
|
||||||
|
{
|
||||||
|
// When in verbose mode, report more information about
|
||||||
|
// the card we discover.
|
||||||
|
LOG(h.verbose, "[%d] oneAPI device name: %s\n", i,
|
||||||
|
props.modelName);
|
||||||
|
LOG(h.verbose, "[%d] oneAPI brand: %s\n", i,
|
||||||
|
props.brandName);
|
||||||
|
LOG(h.verbose, "[%d] oneAPI vendor: %s\n", i,
|
||||||
|
props.vendorName);
|
||||||
|
LOG(h.verbose, "[%d] oneAPI S/N: %s\n", i,
|
||||||
|
props.serialNumber);
|
||||||
|
LOG(h.verbose, "[%d] oneAPI board number: %s\n", i,
|
||||||
|
props.boardNumber);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t memCount = 0;
|
||||||
|
ret = (*h.zesDeviceEnumMemoryModules)(devices[i], &memCount, NULL);
|
||||||
|
if (ret != ZE_RESULT_SUCCESS)
|
||||||
|
{
|
||||||
|
snprintf(buf, buflen,
|
||||||
|
"unable to enumerate Level-Zero memory modules: %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
free(allDrivers);
|
||||||
|
free(devices);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG(h.verbose, "discovered %d Level-Zero memory modules\n", memCount);
|
||||||
|
|
||||||
|
zes_mem_handle_t *mems = malloc(memCount * sizeof(zes_mem_handle_t));
|
||||||
|
(*h.zesDeviceEnumMemoryModules)(devices[i], &memCount, mems);
|
||||||
|
|
||||||
|
for (m = 0; m < memCount; m++)
|
||||||
|
{
|
||||||
|
zes_mem_state_t state;
|
||||||
|
state.stype = ZES_STRUCTURE_TYPE_MEM_STATE;
|
||||||
|
state.pNext = NULL;
|
||||||
|
ret = (*h.zesMemoryGetState)(mems[m], &state);
|
||||||
|
if (ret != ZE_RESULT_SUCCESS)
|
||||||
|
{
|
||||||
|
snprintf(buf, buflen, "unable to get memory state: %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
free(allDrivers);
|
||||||
|
free(devices);
|
||||||
|
free(mems);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
resp->total += state.size;
|
||||||
|
resp->free += state.free;
|
||||||
|
}
|
||||||
|
|
||||||
|
free(mems);
|
||||||
|
}
|
||||||
|
|
||||||
|
free(devices);
|
||||||
|
}
|
||||||
|
|
||||||
|
free(allDrivers);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // __APPLE__
|
||||||
211
gpu/gpu_info_oneapi.h
Normal file
211
gpu/gpu_info_oneapi.h
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
#ifndef __APPLE__
|
||||||
|
#ifndef __GPU_INFO_ONEAPI_H__
|
||||||
|
#define __GPU_INFO_ONEAPI_H__
|
||||||
|
#include "gpu_info.h"
|
||||||
|
|
||||||
|
#define ZE_MAX_DEVICE_NAME 256
|
||||||
|
#define ZE_MAX_DEVICE_UUID_SIZE 16
|
||||||
|
#define ZES_STRING_PROPERTY_SIZE 64
|
||||||
|
#define ZE_BIT(_i) (1 << _i)
|
||||||
|
|
||||||
|
// Just enough typedef's to dlopen/dlsym for memory information
|
||||||
|
typedef enum ze_result_t
|
||||||
|
{
|
||||||
|
ZE_RESULT_SUCCESS = 0,
|
||||||
|
// Other values omitted for now...
|
||||||
|
} ze_result_t;
|
||||||
|
|
||||||
|
typedef uint8_t ze_bool_t;
|
||||||
|
typedef struct _zes_driver_handle_t *zes_driver_handle_t;
|
||||||
|
typedef struct _zes_device_handle_t *zes_device_handle_t;
|
||||||
|
typedef struct _zes_mem_handle_t *zes_mem_handle_t;
|
||||||
|
|
||||||
|
typedef enum _ze_structure_type_t
|
||||||
|
{
|
||||||
|
ZE_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||||
|
} ze_structure_type_t;
|
||||||
|
|
||||||
|
typedef enum _zes_structure_type_t
|
||||||
|
{
|
||||||
|
ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES = 0x1,
|
||||||
|
ZES_STRUCTURE_TYPE_MEM_PROPERTIES = 0xb,
|
||||||
|
ZES_STRUCTURE_TYPE_MEM_STATE = 0x1e,
|
||||||
|
ZES_STRUCTURE_TYPE_DEVICE_EXT_PROPERTIES = 0x2d,
|
||||||
|
ZES_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||||
|
} zes_structure_type_t;
|
||||||
|
|
||||||
|
typedef enum _zes_mem_type_t
|
||||||
|
{
|
||||||
|
ZES_MEM_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||||
|
} zes_mem_type_t;
|
||||||
|
|
||||||
|
typedef enum _zes_mem_loc_t
|
||||||
|
{
|
||||||
|
ZES_MEM_LOC_SYSTEM = 0,
|
||||||
|
ZES_MEM_LOC_DEVICE = 1,
|
||||||
|
ZES_MEM_LOC_FORCE_UINT32 = 0x7fffffff
|
||||||
|
} zes_mem_loc_t;
|
||||||
|
|
||||||
|
typedef enum _zes_mem_health_t
|
||||||
|
{
|
||||||
|
ZES_MEM_HEALTH_FORCE_UINT32 = 0x7fffffff
|
||||||
|
} zes_mem_health_t;
|
||||||
|
|
||||||
|
typedef struct _ze_device_uuid_t
|
||||||
|
{
|
||||||
|
uint8_t id[ZE_MAX_DEVICE_UUID_SIZE];
|
||||||
|
} ze_device_uuid_t;
|
||||||
|
|
||||||
|
typedef struct _zes_uuid_t
|
||||||
|
{
|
||||||
|
uint8_t id[ZE_MAX_DEVICE_UUID_SIZE];
|
||||||
|
} zes_uuid_t;
|
||||||
|
|
||||||
|
typedef enum _ze_device_type_t
|
||||||
|
{
|
||||||
|
ZE_DEVICE_TYPE_GPU = 1,
|
||||||
|
ZE_DEVICE_TYPE_CPU = 2,
|
||||||
|
ZE_DEVICE_TYPE_FPGA = 3,
|
||||||
|
ZE_DEVICE_TYPE_MCA = 4,
|
||||||
|
ZE_DEVICE_TYPE_VPU = 5,
|
||||||
|
ZE_DEVICE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||||
|
} ze_device_type_t;
|
||||||
|
|
||||||
|
typedef enum _zes_device_type_t
|
||||||
|
{
|
||||||
|
ZES_DEVICE_TYPE_GPU = 1,
|
||||||
|
ZES_DEVICE_TYPE_CPU = 2,
|
||||||
|
ZES_DEVICE_TYPE_FPGA = 3,
|
||||||
|
ZES_DEVICE_TYPE_MCA = 4,
|
||||||
|
ZES_DEVICE_TYPE_VPU = 5,
|
||||||
|
ZES_DEVICE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||||
|
} zes_device_type_t;
|
||||||
|
|
||||||
|
typedef uint32_t ze_device_property_flags_t;
|
||||||
|
typedef enum _ze_device_property_flag_t
|
||||||
|
{
|
||||||
|
ZE_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0),
|
||||||
|
ZE_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1),
|
||||||
|
ZE_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2),
|
||||||
|
ZE_DEVICE_PROPERTY_FLAG_ONDEMANDPAGING = ZE_BIT(3),
|
||||||
|
ZE_DEVICE_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff
|
||||||
|
} ze_device_property_flag_t;
|
||||||
|
|
||||||
|
typedef uint32_t zes_device_property_flags_t;
|
||||||
|
typedef enum _zes_device_property_flag_t
|
||||||
|
{
|
||||||
|
ZES_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0),
|
||||||
|
ZES_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1),
|
||||||
|
ZES_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2),
|
||||||
|
ZES_DEVICE_PROPERTY_FLAG_ONDEMANDPAGING = ZE_BIT(3),
|
||||||
|
ZES_DEVICE_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff
|
||||||
|
} zes_device_property_flag_t;
|
||||||
|
|
||||||
|
typedef struct _ze_device_properties_t
|
||||||
|
{
|
||||||
|
ze_structure_type_t stype;
|
||||||
|
void *pNext;
|
||||||
|
ze_device_type_t type;
|
||||||
|
uint32_t vendorId;
|
||||||
|
uint32_t deviceId;
|
||||||
|
ze_device_property_flags_t flags;
|
||||||
|
uint32_t subdeviceId;
|
||||||
|
uint32_t coreClockRate;
|
||||||
|
uint64_t maxMemAllocSize;
|
||||||
|
uint32_t maxHardwareContexts;
|
||||||
|
uint32_t maxCommandQueuePriority;
|
||||||
|
uint32_t numThreadsPerEU;
|
||||||
|
uint32_t physicalEUSimdWidth;
|
||||||
|
uint32_t numEUsPerSubslice;
|
||||||
|
uint32_t numSubslicesPerSlice;
|
||||||
|
uint32_t numSlices;
|
||||||
|
uint64_t timerResolution;
|
||||||
|
uint32_t timestampValidBits;
|
||||||
|
uint32_t kernelTimestampValidBits;
|
||||||
|
ze_device_uuid_t uuid;
|
||||||
|
char name[ZE_MAX_DEVICE_NAME];
|
||||||
|
} ze_device_properties_t;
|
||||||
|
|
||||||
|
typedef struct _zes_device_properties_t
|
||||||
|
{
|
||||||
|
zes_structure_type_t stype;
|
||||||
|
void *pNext;
|
||||||
|
ze_device_properties_t core;
|
||||||
|
uint32_t numSubdevices;
|
||||||
|
char serialNumber[ZES_STRING_PROPERTY_SIZE];
|
||||||
|
char boardNumber[ZES_STRING_PROPERTY_SIZE];
|
||||||
|
char brandName[ZES_STRING_PROPERTY_SIZE];
|
||||||
|
char modelName[ZES_STRING_PROPERTY_SIZE];
|
||||||
|
char vendorName[ZES_STRING_PROPERTY_SIZE];
|
||||||
|
char driverVersion[ZES_STRING_PROPERTY_SIZE];
|
||||||
|
} zes_device_properties_t;
|
||||||
|
|
||||||
|
typedef struct _zes_device_ext_properties_t
|
||||||
|
{
|
||||||
|
zes_structure_type_t stype;
|
||||||
|
void *pNext;
|
||||||
|
zes_uuid_t uuid;
|
||||||
|
zes_device_type_t type;
|
||||||
|
zes_device_property_flags_t flags;
|
||||||
|
} zes_device_ext_properties_t;
|
||||||
|
|
||||||
|
typedef struct _zes_mem_properties_t
|
||||||
|
{
|
||||||
|
zes_structure_type_t stype;
|
||||||
|
void *pNext;
|
||||||
|
zes_mem_type_t type;
|
||||||
|
ze_bool_t onSubdevice;
|
||||||
|
uint32_t subdeviceId;
|
||||||
|
zes_mem_loc_t location;
|
||||||
|
uint64_t physicalSize;
|
||||||
|
int32_t busWidth;
|
||||||
|
int32_t numChannels;
|
||||||
|
} zes_mem_properties_t;
|
||||||
|
|
||||||
|
typedef struct _zes_mem_state_t
|
||||||
|
{
|
||||||
|
zes_structure_type_t stype;
|
||||||
|
const void *pNext;
|
||||||
|
zes_mem_health_t health;
|
||||||
|
uint64_t free;
|
||||||
|
uint64_t size;
|
||||||
|
} zes_mem_state_t;
|
||||||
|
|
||||||
|
typedef struct oneapi_handle
|
||||||
|
{
|
||||||
|
void *handle;
|
||||||
|
uint16_t verbose;
|
||||||
|
ze_result_t (*zesInit)(int);
|
||||||
|
ze_result_t (*zesDriverGet)(uint32_t *pCount, zes_driver_handle_t *phDrivers);
|
||||||
|
ze_result_t (*zesDeviceGet)(zes_driver_handle_t hDriver, uint32_t *pCount,
|
||||||
|
zes_device_handle_t *phDevices);
|
||||||
|
ze_result_t (*zesDeviceGetProperties)(zes_device_handle_t hDevice,
|
||||||
|
zes_device_properties_t *pProperties);
|
||||||
|
ze_result_t (*zesDeviceEnumMemoryModules)(zes_device_handle_t hDevice,
|
||||||
|
uint32_t *pCount,
|
||||||
|
zes_mem_handle_t *phMemory);
|
||||||
|
ze_result_t (*zesMemoryGetProperties)(zes_mem_handle_t hMemory,
|
||||||
|
zes_mem_properties_t *pProperties);
|
||||||
|
ze_result_t (*zesMemoryGetState)(zes_mem_handle_t hMemory,
|
||||||
|
zes_mem_state_t *pState);
|
||||||
|
|
||||||
|
} oneapi_handle_t;
|
||||||
|
|
||||||
|
typedef struct oneapi_init_resp
|
||||||
|
{
|
||||||
|
char *err; // If err is non-null handle is invalid
|
||||||
|
int num_devices;
|
||||||
|
oneapi_handle_t oh;
|
||||||
|
} oneapi_init_resp_t;
|
||||||
|
|
||||||
|
typedef struct oneapi_version_resp
|
||||||
|
{
|
||||||
|
ze_result_t status;
|
||||||
|
char *str; // Contains version or error string if status != 0
|
||||||
|
} oneapi_version_resp_t;
|
||||||
|
|
||||||
|
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp);
|
||||||
|
void oneapi_check_vram(oneapi_handle_t rh, mem_info_t *resp);
|
||||||
|
|
||||||
|
#endif // __GPU_INFO_INTEL_H__
|
||||||
|
#endif // __APPLE__
|
||||||
21
gpu/gpu_oneapi.go
Normal file
21
gpu/gpu_oneapi.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
//go:build linux || windows
|
||||||
|
|
||||||
|
package gpu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func oneapiGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
|
||||||
|
ids := []string{}
|
||||||
|
for _, info := range gpuInfo {
|
||||||
|
if info.Library != "oneapi" {
|
||||||
|
// TODO shouldn't happen if things are wired correctly...
|
||||||
|
slog.Debug("oneapiGetVisibleDevicesEnv skipping over non-sycl device", "library", info.Library)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ids = append(ids, info.ID)
|
||||||
|
}
|
||||||
|
return "ONEAPI_DEVICE_SELECTOR", "level_zero:" + strings.Join(ids, ",")
|
||||||
|
}
|
||||||
@@ -9,23 +9,16 @@ import (
|
|||||||
|
|
||||||
func TestBasicGetGPUInfo(t *testing.T) {
|
func TestBasicGetGPUInfo(t *testing.T) {
|
||||||
info := GetGPUInfo()
|
info := GetGPUInfo()
|
||||||
assert.Contains(t, "cuda rocm cpu metal", info.Library)
|
assert.Greater(t, len(info), 0)
|
||||||
|
assert.Contains(t, "cuda rocm cpu metal", info[0].Library)
|
||||||
switch runtime.GOOS {
|
if info[0].Library != "cpu" {
|
||||||
case "darwin":
|
assert.Greater(t, info[0].TotalMemory, uint64(0))
|
||||||
// TODO - remove this once MacOS returns some size for CPU
|
assert.Greater(t, info[0].FreeMemory, uint64(0))
|
||||||
return
|
|
||||||
case "linux", "windows":
|
|
||||||
assert.Greater(t, info.TotalMemory, uint64(0))
|
|
||||||
assert.Greater(t, info.FreeMemory, uint64(0))
|
|
||||||
assert.Greater(t, info.DeviceCount, uint32(0))
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCPUMemInfo(t *testing.T) {
|
func TestCPUMemInfo(t *testing.T) {
|
||||||
info, err := getCPUMem()
|
info, err := GetCPUMem()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "darwin":
|
case "darwin":
|
||||||
|
|||||||
73
gpu/types.go
73
gpu/types.go
@@ -1,9 +1,15 @@
|
|||||||
package gpu
|
package gpu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/format"
|
||||||
|
)
|
||||||
|
|
||||||
type memInfo struct {
|
type memInfo struct {
|
||||||
TotalMemory uint64 `json:"total_memory,omitempty"`
|
TotalMemory uint64 `json:"total_memory,omitempty"`
|
||||||
FreeMemory uint64 `json:"free_memory,omitempty"`
|
FreeMemory uint64 `json:"free_memory,omitempty"`
|
||||||
DeviceCount uint32 `json:"device_count,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Beginning of an `ollama info` command
|
// Beginning of an `ollama info` command
|
||||||
@@ -17,11 +23,66 @@ type GpuInfo struct {
|
|||||||
// MinimumMemory represents the minimum memory required to use the GPU
|
// MinimumMemory represents the minimum memory required to use the GPU
|
||||||
MinimumMemory uint64 `json:"-"`
|
MinimumMemory uint64 `json:"-"`
|
||||||
|
|
||||||
// TODO add other useful attributes about the card here for discovery information
|
// Any extra PATH/LD_LIBRARY_PATH dependencies required for the Library to operate properly
|
||||||
|
DependencyPath string `json:"lib_path,omitempty"`
|
||||||
|
|
||||||
|
// GPU information
|
||||||
|
ID string `json:"gpu_id"` // string to use for selection of this specific GPU
|
||||||
|
Name string `json:"name"` // user friendly name if available
|
||||||
|
Compute string `json:"compute"` // Compute Capability or gfx
|
||||||
|
|
||||||
|
// Driver Information - TODO no need to put this on each GPU
|
||||||
|
DriverMajor int `json:"driver_major,omitempty"`
|
||||||
|
DriverMinor int `json:"driver_minor,omitempty"`
|
||||||
|
|
||||||
|
// TODO other performance capability info to help in scheduling decisions
|
||||||
}
|
}
|
||||||
|
|
||||||
type Version struct {
|
type GpuInfoList []GpuInfo
|
||||||
Major uint
|
|
||||||
Minor uint
|
// Split up the set of gpu info's by Library and variant
|
||||||
Patch uint
|
func (l GpuInfoList) ByLibrary() []GpuInfoList {
|
||||||
|
resp := []GpuInfoList{}
|
||||||
|
libs := []string{}
|
||||||
|
for _, info := range l {
|
||||||
|
found := false
|
||||||
|
requested := info.Library
|
||||||
|
if info.Variant != "" {
|
||||||
|
requested += "_" + info.Variant
|
||||||
|
}
|
||||||
|
for i, lib := range libs {
|
||||||
|
if lib == requested {
|
||||||
|
resp[i] = append(resp[i], info)
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
libs = append(libs, info.Library)
|
||||||
|
resp = append(resp, []GpuInfo{info})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Report the GPU information into the log an Info level
|
||||||
|
func (l GpuInfoList) LogDetails() {
|
||||||
|
for _, g := range l {
|
||||||
|
slog.Info("inference compute",
|
||||||
|
"id", g.ID,
|
||||||
|
"library", g.Library,
|
||||||
|
"compute", g.Compute,
|
||||||
|
"driver", fmt.Sprintf("%d.%d", g.DriverMajor, g.DriverMinor),
|
||||||
|
"name", g.Name,
|
||||||
|
"total", format.HumanBytes2(g.TotalMemory),
|
||||||
|
"available", format.HumanBytes2(g.FreeMemory),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by Free Space
|
||||||
|
type ByFreeMemory []GpuInfo
|
||||||
|
|
||||||
|
func (a ByFreeMemory) Len() int { return len(a) }
|
||||||
|
func (a ByFreeMemory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
func (a ByFreeMemory) Less(i, j int) bool { return a[i].FreeMemory < a[j].FreeMemory }
|
||||||
|
|||||||
@@ -4,11 +4,14 @@ package integration
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOrcaMiniBlueSky(t *testing.T) {
|
func TestOrcaMiniBlueSky(t *testing.T) {
|
||||||
@@ -24,5 +27,44 @@ func TestOrcaMiniBlueSky(t *testing.T) {
|
|||||||
"seed": 123,
|
"seed": 123,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
GenerateTestHelper(ctx, t, &http.Client{}, req, []string{"rayleigh", "scattering"})
|
GenerateTestHelper(ctx, t, req, []string{"rayleigh", "scattering"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnicodeModelDir(t *testing.T) {
|
||||||
|
// This is only useful for Windows with utf-16 characters, so skip this test for other platforms
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
t.Skip("Unicode test only applicable to windows")
|
||||||
|
}
|
||||||
|
// Only works for local testing
|
||||||
|
if os.Getenv("OLLAMA_TEST_EXISTING") != "" {
|
||||||
|
t.Skip("TestUnicodeModelDir only works for local testing, skipping")
|
||||||
|
}
|
||||||
|
|
||||||
|
modelDir, err := os.MkdirTemp("", "ollama_埃")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer os.RemoveAll(modelDir)
|
||||||
|
slog.Info("unicode", "OLLAMA_MODELS", modelDir)
|
||||||
|
|
||||||
|
oldModelsDir := os.Getenv("OLLAMA_MODELS")
|
||||||
|
if oldModelsDir == "" {
|
||||||
|
defer os.Unsetenv("OLLAMA_MODELS")
|
||||||
|
} else {
|
||||||
|
defer os.Setenv("OLLAMA_MODELS", oldModelsDir)
|
||||||
|
}
|
||||||
|
err = os.Setenv("OLLAMA_MODELS", modelDir)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := api.GenerateRequest{
|
||||||
|
Model: "orca-mini",
|
||||||
|
Prompt: "why is the sky blue?",
|
||||||
|
Stream: &stream,
|
||||||
|
Options: map[string]interface{}{
|
||||||
|
"temperature": 0,
|
||||||
|
"seed": 123,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
GenerateTestHelper(ctx, t, req, []string{"rayleigh", "scattering"})
|
||||||
}
|
}
|
||||||
|
|||||||
225
integration/concurrency_test.go
Normal file
225
integration/concurrency_test.go
Normal file
@@ -0,0 +1,225 @@
|
|||||||
|
//go:build integration
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMultiModelConcurrency(t *testing.T) {
|
||||||
|
var (
|
||||||
|
req = [2]api.GenerateRequest{
|
||||||
|
{
|
||||||
|
Model: "orca-mini",
|
||||||
|
Prompt: "why is the ocean blue?",
|
||||||
|
Stream: &stream,
|
||||||
|
Options: map[string]interface{}{
|
||||||
|
"seed": 42,
|
||||||
|
"temperature": 0.0,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Model: "tinydolphin",
|
||||||
|
Prompt: "what is the origin of the us thanksgiving holiday?",
|
||||||
|
Stream: &stream,
|
||||||
|
Options: map[string]interface{}{
|
||||||
|
"seed": 42,
|
||||||
|
"temperature": 0.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
resp = [2][]string{
|
||||||
|
[]string{"sunlight"},
|
||||||
|
[]string{"england", "english", "massachusetts", "pilgrims"},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(req))
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
|
||||||
|
defer cancel()
|
||||||
|
for i := 0; i < len(req); i++ {
|
||||||
|
go func(i int) {
|
||||||
|
defer wg.Done()
|
||||||
|
GenerateTestHelper(ctx, t, req[i], resp[i])
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) // GTX 750 2G card takes ~9 minutes
|
||||||
|
defer cancel()
|
||||||
|
client, _, cleanup := InitServerConnection(ctx, t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
req, resp := GenerateRequests()
|
||||||
|
// Get the server running (if applicable) warm the model up with a single initial request
|
||||||
|
DoGenerate(ctx, t, client, req[0], resp[0], 60*time.Second, 5*time.Second)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(req))
|
||||||
|
for i := 0; i < len(req); i++ {
|
||||||
|
go func(i int) {
|
||||||
|
defer wg.Done()
|
||||||
|
for j := 0; j < 5; j++ {
|
||||||
|
slog.Info("Starting", "req", i, "iter", j)
|
||||||
|
// On slower GPUs it can take a while to process the 4 concurrent requests
|
||||||
|
// so we allow a much longer initial timeout
|
||||||
|
DoGenerate(ctx, t, client, req[i], resp[i], 90*time.Second, 5*time.Second)
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stress the system if we know how much VRAM it has, and attempt to load more models than will fit
|
||||||
|
func TestMultiModelStress(t *testing.T) {
|
||||||
|
vram := os.Getenv("OLLAMA_MAX_VRAM")
|
||||||
|
if vram == "" {
|
||||||
|
t.Skip("OLLAMA_MAX_VRAM not specified, can't pick the right models for the stress test")
|
||||||
|
}
|
||||||
|
max, err := strconv.ParseUint(vram, 10, 64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
const MB = uint64(1024 * 1024)
|
||||||
|
type model struct {
|
||||||
|
name string
|
||||||
|
size uint64 // Approximate amount of VRAM they typically use when fully loaded in VRAM
|
||||||
|
}
|
||||||
|
|
||||||
|
smallModels := []model{
|
||||||
|
{
|
||||||
|
name: "orca-mini",
|
||||||
|
size: 2992 * MB,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "phi",
|
||||||
|
size: 2616 * MB,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "gemma:2b",
|
||||||
|
size: 2364 * MB,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "stable-code:3b",
|
||||||
|
size: 2608 * MB,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "starcoder2:3b",
|
||||||
|
size: 2166 * MB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mediumModels := []model{
|
||||||
|
{
|
||||||
|
name: "llama2",
|
||||||
|
size: 5118 * MB,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mistral",
|
||||||
|
size: 4620 * MB,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "orca-mini:7b",
|
||||||
|
size: 5118 * MB,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "dolphin-mistral",
|
||||||
|
size: 4620 * MB,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "gemma:7b",
|
||||||
|
size: 5000 * MB,
|
||||||
|
},
|
||||||
|
// TODO - uncomment this once #3565 is merged and this is rebased on it
|
||||||
|
// {
|
||||||
|
// name: "codellama:7b",
|
||||||
|
// size: 5118 * MB,
|
||||||
|
// },
|
||||||
|
}
|
||||||
|
|
||||||
|
// These seem to be too slow to be useful...
|
||||||
|
// largeModels := []model{
|
||||||
|
// {
|
||||||
|
// name: "llama2:13b",
|
||||||
|
// size: 7400 * MB,
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// name: "codellama:13b",
|
||||||
|
// size: 7400 * MB,
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// name: "orca-mini:13b",
|
||||||
|
// size: 7400 * MB,
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// name: "gemma:7b",
|
||||||
|
// size: 5000 * MB,
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// name: "starcoder2:15b",
|
||||||
|
// size: 9100 * MB,
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
|
||||||
|
var chosenModels []model
|
||||||
|
switch {
|
||||||
|
case max < 10000*MB:
|
||||||
|
slog.Info("selecting small models")
|
||||||
|
chosenModels = smallModels
|
||||||
|
// case max < 30000*MB:
|
||||||
|
default:
|
||||||
|
slog.Info("selecting medium models")
|
||||||
|
chosenModels = mediumModels
|
||||||
|
// default:
|
||||||
|
// slog.Info("selecting large models")
|
||||||
|
// chosenModels = largModels
|
||||||
|
}
|
||||||
|
|
||||||
|
req, resp := GenerateRequests()
|
||||||
|
|
||||||
|
for i := range req {
|
||||||
|
if i > len(chosenModels) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
req[i].Model = chosenModels[i].name
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) // TODO baseline -- 10m too short
|
||||||
|
defer cancel()
|
||||||
|
client, _, cleanup := InitServerConnection(ctx, t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Make sure all the models are pulled before we get started
|
||||||
|
for _, r := range req {
|
||||||
|
require.NoError(t, PullIfMissing(ctx, client, r.Model))
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
consumed := uint64(256 * MB) // Assume some baseline usage
|
||||||
|
for i := 0; i < len(req); i++ {
|
||||||
|
// Always get at least 2 models, but dont' overshoot VRAM too much or we'll take too long
|
||||||
|
if i > 1 && consumed > max {
|
||||||
|
slog.Info("achieved target vram exhaustion", "count", i, "vramMB", max/1024/1024, "modelsMB", consumed/1024/1024)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
consumed += chosenModels[i].size
|
||||||
|
slog.Info("target vram", "count", i, "vramMB", max/1024/1024, "modelsMB", consumed/1024/1024)
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func(i int) {
|
||||||
|
defer wg.Done()
|
||||||
|
for j := 0; j < 3; j++ {
|
||||||
|
slog.Info("Starting", "req", i, "iter", j, "model", req[i].Model)
|
||||||
|
DoGenerate(ctx, t, client, req[i], resp[i], 120*time.Second, 5*time.Second)
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
@@ -4,7 +4,6 @@ package integration
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -25,5 +24,5 @@ func TestContextExhaustion(t *testing.T) {
|
|||||||
"num_ctx": 128,
|
"num_ctx": 128,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
GenerateTestHelper(ctx, t, &http.Client{}, req, []string{"once", "upon", "lived"})
|
GenerateTestHelper(ctx, t, req, []string{"once", "upon", "lived"})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ package integration
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"net/http"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -29,10 +28,11 @@ func TestIntegrationMultimodal(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := "the ollamas"
|
// Note: sometimes it returns "the ollamas" sometimes "the ollams"
|
||||||
|
resp := "the ollam"
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
GenerateTestHelper(ctx, t, &http.Client{}, req, []string{resp})
|
GenerateTestHelper(ctx, t, req, []string{resp})
|
||||||
}
|
}
|
||||||
|
|
||||||
const imageEncoding = `iVBORw0KGgoAAAANSUhEUgAAANIAAAB4CAYAAACHHqzKAAAAAXNSR0IArs4c6QAAAIRlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEb
|
const imageEncoding = `iVBORw0KGgoAAAANSUhEUgAAANIAAAB4CAYAAACHHqzKAAAAAXNSR0IArs4c6QAAAIRlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEb
|
||||||
|
|||||||
@@ -4,8 +4,6 @@ package integration
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -45,25 +43,5 @@ var (
|
|||||||
func TestIntegrationSimpleOrcaMini(t *testing.T) {
|
func TestIntegrationSimpleOrcaMini(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
GenerateTestHelper(ctx, t, &http.Client{}, req[0], resp[0])
|
GenerateTestHelper(ctx, t, req[0], resp[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO
|
|
||||||
// The server always loads a new runner and closes the old one, which forces serial execution
|
|
||||||
// At present this test case fails with concurrency problems. Eventually we should try to
|
|
||||||
// get true concurrency working with n_parallel support in the backend
|
|
||||||
func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(len(req))
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
|
|
||||||
defer cancel()
|
|
||||||
for i := 0; i < len(req); i++ {
|
|
||||||
go func(i int) {
|
|
||||||
defer wg.Done()
|
|
||||||
GenerateTestHelper(ctx, t, &http.Client{}, req[i], resp[i])
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO - create a parallel test with 2 different models once we support concurrency
|
|
||||||
|
|||||||
122
integration/max_queue_test.go
Normal file
122
integration/max_queue_test.go
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
//go:build integration
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMaxQueue(t *testing.T) {
|
||||||
|
if os.Getenv("OLLAMA_TEST_EXISTING") != "" {
|
||||||
|
t.Skip("Max Queue test requires spawing a local server so we can adjust the queue size")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: This test can be quite slow when running in CPU mode, so keep the threadCount low unless your on GPU
|
||||||
|
// Also note that by default Darwin can't sustain > ~128 connections without adjusting limits
|
||||||
|
threadCount := 32
|
||||||
|
mq := os.Getenv("OLLAMA_MAX_QUEUE")
|
||||||
|
if mq != "" {
|
||||||
|
var err error
|
||||||
|
threadCount, err = strconv.Atoi(mq)
|
||||||
|
require.NoError(t, err)
|
||||||
|
} else {
|
||||||
|
os.Setenv("OLLAMA_MAX_QUEUE", fmt.Sprintf("%d", threadCount))
|
||||||
|
}
|
||||||
|
|
||||||
|
req := api.GenerateRequest{
|
||||||
|
Model: "orca-mini",
|
||||||
|
Prompt: "write a long historical fiction story about christopher columbus. use at least 10 facts from his actual journey",
|
||||||
|
Options: map[string]interface{}{
|
||||||
|
"seed": 42,
|
||||||
|
"temperature": 0.0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
resp := []string{"explore", "discover", "ocean"}
|
||||||
|
|
||||||
|
// CPU mode takes much longer at the limit with a large queue setting
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
client, _, cleanup := InitServerConnection(ctx, t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
require.NoError(t, PullIfMissing(ctx, client, req.Model))
|
||||||
|
|
||||||
|
// Context for the worker threads so we can shut them down
|
||||||
|
// embedCtx, embedCancel := context.WithCancel(ctx)
|
||||||
|
embedCtx := ctx
|
||||||
|
|
||||||
|
var genwg sync.WaitGroup
|
||||||
|
go func() {
|
||||||
|
genwg.Add(1)
|
||||||
|
defer genwg.Done()
|
||||||
|
slog.Info("Starting generate request")
|
||||||
|
DoGenerate(ctx, t, client, req, resp, 45*time.Second, 5*time.Second)
|
||||||
|
slog.Info("generate completed")
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Give the generate a chance to get started before we start hammering on embed requests
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
|
||||||
|
threadCount += 10 // Add a few extra to ensure we push the queue past its limit
|
||||||
|
busyCount := 0
|
||||||
|
resetByPeerCount := 0
|
||||||
|
canceledCount := 0
|
||||||
|
succesCount := 0
|
||||||
|
counterMu := sync.Mutex{}
|
||||||
|
var embedwg sync.WaitGroup
|
||||||
|
for i := 0; i < threadCount; i++ {
|
||||||
|
go func(i int) {
|
||||||
|
embedwg.Add(1)
|
||||||
|
defer embedwg.Done()
|
||||||
|
slog.Info("embed started", "id", i)
|
||||||
|
embedReq := api.EmbeddingRequest{
|
||||||
|
Model: req.Model,
|
||||||
|
Prompt: req.Prompt,
|
||||||
|
Options: req.Options,
|
||||||
|
}
|
||||||
|
// Fresh client for every request
|
||||||
|
client, _ = GetTestEndpoint()
|
||||||
|
|
||||||
|
resp, genErr := client.Embeddings(embedCtx, &embedReq)
|
||||||
|
counterMu.Lock()
|
||||||
|
defer counterMu.Unlock()
|
||||||
|
switch {
|
||||||
|
case genErr == nil:
|
||||||
|
succesCount++
|
||||||
|
require.Greater(t, len(resp.Embedding), 5) // somewhat arbitrary, but sufficient to be reasonable
|
||||||
|
case errors.Is(genErr, context.Canceled):
|
||||||
|
canceledCount++
|
||||||
|
case strings.Contains(genErr.Error(), "busy"):
|
||||||
|
busyCount++
|
||||||
|
case strings.Contains(genErr.Error(), "connection reset by peer"):
|
||||||
|
resetByPeerCount++
|
||||||
|
default:
|
||||||
|
require.NoError(t, genErr, "%d request failed", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("embed finished", "id", i)
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
genwg.Wait()
|
||||||
|
slog.Info("generate done, waiting for embeds")
|
||||||
|
embedwg.Wait()
|
||||||
|
|
||||||
|
slog.Info("embeds completed", "success", succesCount, "busy", busyCount, "reset", resetByPeerCount, "canceled", canceledCount)
|
||||||
|
require.Equal(t, resetByPeerCount, 0, "Connections reset by peer, have you updated your fd and socket limits?")
|
||||||
|
require.True(t, busyCount > 0, "no requests hit busy error but some should have")
|
||||||
|
require.True(t, canceledCount == 0, "no requests should have been canceled due to timeout")
|
||||||
|
|
||||||
|
}
|
||||||
@@ -5,13 +5,14 @@ package integration
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
@@ -23,9 +24,13 @@ import (
|
|||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
"github.com/ollama/ollama/app/lifecycle"
|
"github.com/ollama/ollama/app/lifecycle"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func Init() {
|
||||||
|
lifecycle.InitLogging()
|
||||||
|
}
|
||||||
|
|
||||||
func FindPort() string {
|
func FindPort() string {
|
||||||
port := 0
|
port := 0
|
||||||
if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
|
if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
|
||||||
@@ -41,7 +46,7 @@ func FindPort() string {
|
|||||||
return strconv.Itoa(port)
|
return strconv.Itoa(port)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetTestEndpoint() (string, string) {
|
func GetTestEndpoint() (*api.Client, string) {
|
||||||
defaultPort := "11434"
|
defaultPort := "11434"
|
||||||
ollamaHost := os.Getenv("OLLAMA_HOST")
|
ollamaHost := os.Getenv("OLLAMA_HOST")
|
||||||
|
|
||||||
@@ -67,16 +72,20 @@ func GetTestEndpoint() (string, string) {
|
|||||||
port = FindPort()
|
port = FindPort()
|
||||||
}
|
}
|
||||||
|
|
||||||
url := fmt.Sprintf("%s:%s", host, port)
|
slog.Info("server connection", "host", host, "port", port)
|
||||||
slog.Info("server connection", "url", url)
|
|
||||||
return scheme, url
|
return api.NewClient(
|
||||||
|
&url.URL{
|
||||||
|
Scheme: scheme,
|
||||||
|
Host: net.JoinHostPort(host, port),
|
||||||
|
},
|
||||||
|
http.DefaultClient), fmt.Sprintf("%s:%s", host, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO make fanicier, grab logs, etc.
|
|
||||||
var serverMutex sync.Mutex
|
var serverMutex sync.Mutex
|
||||||
var serverReady bool
|
var serverReady bool
|
||||||
|
|
||||||
func StartServer(ctx context.Context, ollamaHost string) error {
|
func startServer(t *testing.T, ctx context.Context, ollamaHost string) error {
|
||||||
// Make sure the server has been built
|
// Make sure the server has been built
|
||||||
CLIName, err := filepath.Abs("../ollama")
|
CLIName, err := filepath.Abs("../ollama")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -98,7 +107,7 @@ func StartServer(ctx context.Context, ollamaHost string) error {
|
|||||||
|
|
||||||
if tmp := os.Getenv("OLLAMA_HOST"); tmp != ollamaHost {
|
if tmp := os.Getenv("OLLAMA_HOST"); tmp != ollamaHost {
|
||||||
slog.Info("setting env", "OLLAMA_HOST", ollamaHost)
|
slog.Info("setting env", "OLLAMA_HOST", ollamaHost)
|
||||||
os.Setenv("OLLAMA_HOST", ollamaHost)
|
t.Setenv("OLLAMA_HOST", ollamaHost)
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Info("starting server", "url", ollamaHost)
|
slog.Info("starting server", "url", ollamaHost)
|
||||||
@@ -125,67 +134,76 @@ func StartServer(ctx context.Context, ollamaHost string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PullIfMissing(ctx context.Context, client *http.Client, scheme, testEndpoint, modelName string) error {
|
func PullIfMissing(ctx context.Context, client *api.Client, modelName string) error {
|
||||||
slog.Info("checking status of model", "model", modelName)
|
slog.Info("checking status of model", "model", modelName)
|
||||||
showReq := &api.ShowRequest{Name: modelName}
|
showReq := &api.ShowRequest{Name: modelName}
|
||||||
requestJSON, err := json.Marshal(showReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", scheme+"://"+testEndpoint+"/api/show", bytes.NewReader(requestJSON))
|
showCtx, cancel := context.WithDeadlineCause(
|
||||||
if err != nil {
|
ctx,
|
||||||
|
time.Now().Add(5*time.Second),
|
||||||
|
fmt.Errorf("show for existing model %s took too long", modelName),
|
||||||
|
)
|
||||||
|
defer cancel()
|
||||||
|
_, err := client.Show(showCtx, showReq)
|
||||||
|
var statusError api.StatusError
|
||||||
|
switch {
|
||||||
|
case errors.As(err, &statusError) && statusError.StatusCode == http.StatusNotFound:
|
||||||
|
break
|
||||||
|
case err != nil:
|
||||||
return err
|
return err
|
||||||
}
|
default:
|
||||||
|
|
||||||
// Make the request with the HTTP client
|
|
||||||
response, err := client.Do(req.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer response.Body.Close()
|
|
||||||
if response.StatusCode == 200 {
|
|
||||||
slog.Info("model already present", "model", modelName)
|
slog.Info("model already present", "model", modelName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
slog.Info("model missing", "status", response.StatusCode)
|
slog.Info("model missing", "model", modelName)
|
||||||
|
|
||||||
|
stallDuration := 30 * time.Second // This includes checksum verification, which can take a while on larger models
|
||||||
|
stallTimer := time.NewTimer(stallDuration)
|
||||||
|
fn := func(resp api.ProgressResponse) error {
|
||||||
|
// fmt.Print(".")
|
||||||
|
if !stallTimer.Reset(stallDuration) {
|
||||||
|
return fmt.Errorf("stall was detected, aborting status reporting")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stream := true
|
||||||
pullReq := &api.PullRequest{Name: modelName, Stream: &stream}
|
pullReq := &api.PullRequest{Name: modelName, Stream: &stream}
|
||||||
requestJSON, err = json.Marshal(pullReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err = http.NewRequest("POST", scheme+"://"+testEndpoint+"/api/pull", bytes.NewReader(requestJSON))
|
var pullError error
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
slog.Info("pulling", "model", modelName)
|
|
||||||
|
|
||||||
response, err = client.Do(req.WithContext(ctx))
|
done := make(chan int)
|
||||||
if err != nil {
|
go func() {
|
||||||
return err
|
pullError = client.Pull(ctx, pullReq, fn)
|
||||||
|
done <- 0
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-stallTimer.C:
|
||||||
|
return fmt.Errorf("download stalled")
|
||||||
|
case <-done:
|
||||||
|
return pullError
|
||||||
}
|
}
|
||||||
defer response.Body.Close()
|
|
||||||
if response.StatusCode != 200 {
|
|
||||||
return fmt.Errorf("failed to pull model") // TODO more details perhaps
|
|
||||||
}
|
|
||||||
slog.Info("model pulled", "model", modelName)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var serverProcMutex sync.Mutex
|
var serverProcMutex sync.Mutex
|
||||||
|
|
||||||
func GenerateTestHelper(ctx context.Context, t *testing.T, client *http.Client, genReq api.GenerateRequest, anyResp []string) {
|
// Returns an Client, the testEndpoint, and a cleanup function, fails the test on errors
|
||||||
|
// Starts the server if needed
|
||||||
// TODO maybe stuff in an init routine?
|
func InitServerConnection(ctx context.Context, t *testing.T) (*api.Client, string, func()) {
|
||||||
lifecycle.InitLogging()
|
client, testEndpoint := GetTestEndpoint()
|
||||||
|
if os.Getenv("OLLAMA_TEST_EXISTING") == "" {
|
||||||
requestJSON, err := json.Marshal(genReq)
|
serverProcMutex.Lock()
|
||||||
if err != nil {
|
fp, err := os.CreateTemp("", "ollama-server-*.log")
|
||||||
t.Fatalf("Error serializing request: %v", err)
|
if err != nil {
|
||||||
|
t.Fatalf("failed to generate log file: %s", err)
|
||||||
|
}
|
||||||
|
lifecycle.ServerLogFile = fp.Name()
|
||||||
|
fp.Close()
|
||||||
|
require.NoError(t, startServer(t, ctx, testEndpoint))
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
|
return client, testEndpoint, func() {
|
||||||
if os.Getenv("OLLAMA_TEST_EXISTING") == "" {
|
if os.Getenv("OLLAMA_TEST_EXISTING") == "" {
|
||||||
defer serverProcMutex.Unlock()
|
defer serverProcMutex.Unlock()
|
||||||
if t.Failed() {
|
if t.Failed() {
|
||||||
@@ -203,63 +221,118 @@ func GenerateTestHelper(ctx context.Context, t *testing.T, client *http.Client,
|
|||||||
os.Stderr.Write(data)
|
os.Stderr.Write(data)
|
||||||
slog.Warn("END OF SERVER")
|
slog.Warn("END OF SERVER")
|
||||||
}
|
}
|
||||||
err = os.Remove(lifecycle.ServerLogFile)
|
err := os.Remove(lifecycle.ServerLogFile)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
slog.Warn("failed to cleanup", "logfile", lifecycle.ServerLogFile, "error", err)
|
slog.Warn("failed to cleanup", "logfile", lifecycle.ServerLogFile, "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
scheme, testEndpoint := GetTestEndpoint()
|
|
||||||
|
|
||||||
if os.Getenv("OLLAMA_TEST_EXISTING") == "" {
|
|
||||||
serverProcMutex.Lock()
|
|
||||||
fp, err := os.CreateTemp("", "ollama-server-*.log")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to generate log file: %s", err)
|
|
||||||
}
|
|
||||||
lifecycle.ServerLogFile = fp.Name()
|
|
||||||
fp.Close()
|
|
||||||
assert.NoError(t, StartServer(ctx, testEndpoint))
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
err = PullIfMissing(ctx, client, scheme, testEndpoint, genReq.Model)
|
|
||||||
if err != nil {
|
func GenerateTestHelper(ctx context.Context, t *testing.T, genReq api.GenerateRequest, anyResp []string) {
|
||||||
t.Fatalf("Error pulling model: %v", err)
|
client, _, cleanup := InitServerConnection(ctx, t)
|
||||||
}
|
defer cleanup()
|
||||||
|
require.NoError(t, PullIfMissing(ctx, client, genReq.Model))
|
||||||
// Make the request and get the response
|
DoGenerate(ctx, t, client, genReq, anyResp, 30*time.Second, 10*time.Second)
|
||||||
req, err := http.NewRequest("POST", scheme+"://"+testEndpoint+"/api/generate", bytes.NewReader(requestJSON))
|
}
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error creating request: %v", err)
|
func DoGenerate(ctx context.Context, t *testing.T, client *api.Client, genReq api.GenerateRequest, anyResp []string, initialTimeout, streamTimeout time.Duration) {
|
||||||
}
|
stallTimer := time.NewTimer(initialTimeout)
|
||||||
|
var buf bytes.Buffer
|
||||||
// Set the content type for the request
|
fn := func(response api.GenerateResponse) error {
|
||||||
req.Header.Set("Content-Type", "application/json")
|
// fmt.Print(".")
|
||||||
|
buf.Write([]byte(response.Response))
|
||||||
// Make the request with the HTTP client
|
if !stallTimer.Reset(streamTimeout) {
|
||||||
response, err := client.Do(req.WithContext(ctx))
|
return fmt.Errorf("stall was detected while streaming response, aborting")
|
||||||
if err != nil {
|
}
|
||||||
t.Fatalf("Error making request: %v", err)
|
return nil
|
||||||
}
|
}
|
||||||
defer response.Body.Close()
|
|
||||||
body, err := io.ReadAll(response.Body)
|
stream := true
|
||||||
assert.NoError(t, err)
|
genReq.Stream = &stream
|
||||||
assert.Equal(t, response.StatusCode, 200, string(body))
|
done := make(chan int)
|
||||||
|
var genErr error
|
||||||
// Verify the response is valid JSON
|
go func() {
|
||||||
var payload api.GenerateResponse
|
genErr = client.Generate(ctx, &genReq, fn)
|
||||||
err = json.Unmarshal(body, &payload)
|
done <- 0
|
||||||
if err != nil {
|
}()
|
||||||
assert.NoError(t, err, body)
|
|
||||||
}
|
select {
|
||||||
|
case <-stallTimer.C:
|
||||||
// Verify the response contains the expected data
|
if buf.Len() == 0 {
|
||||||
atLeastOne := false
|
t.Errorf("generate never started. Timed out after :%s", initialTimeout.String())
|
||||||
for _, resp := range anyResp {
|
} else {
|
||||||
if strings.Contains(strings.ToLower(payload.Response), resp) {
|
t.Errorf("generate stalled. Response so far:%s", buf.String())
|
||||||
atLeastOne = true
|
}
|
||||||
break
|
case <-done:
|
||||||
}
|
require.NoError(t, genErr, "failed with %s request prompt %s ", genReq.Model, genReq.Prompt)
|
||||||
}
|
// Verify the response contains the expected data
|
||||||
assert.True(t, atLeastOne, "none of %v found in %s", anyResp, payload.Response)
|
response := buf.String()
|
||||||
|
atLeastOne := false
|
||||||
|
for _, resp := range anyResp {
|
||||||
|
if strings.Contains(strings.ToLower(response), resp) {
|
||||||
|
atLeastOne = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.True(t, atLeastOne, "none of %v found in %s", anyResp, response)
|
||||||
|
slog.Info("test pass", "model", genReq.Model, "prompt", genReq.Prompt, "contains", anyResp, "response", response)
|
||||||
|
case <-ctx.Done():
|
||||||
|
t.Error("outer test context done while waiting for generate")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a set of requests
|
||||||
|
// By default each request uses orca-mini as the model
|
||||||
|
func GenerateRequests() ([]api.GenerateRequest, [][]string) {
|
||||||
|
return []api.GenerateRequest{
|
||||||
|
{
|
||||||
|
Model: "orca-mini",
|
||||||
|
Prompt: "why is the ocean blue?",
|
||||||
|
Stream: &stream,
|
||||||
|
Options: map[string]interface{}{
|
||||||
|
"seed": 42,
|
||||||
|
"temperature": 0.0,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Model: "orca-mini",
|
||||||
|
Prompt: "why is the color of dirt brown?",
|
||||||
|
Stream: &stream,
|
||||||
|
Options: map[string]interface{}{
|
||||||
|
"seed": 42,
|
||||||
|
"temperature": 0.0,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Model: "orca-mini",
|
||||||
|
Prompt: "what is the origin of the us thanksgiving holiday?",
|
||||||
|
Stream: &stream,
|
||||||
|
Options: map[string]interface{}{
|
||||||
|
"seed": 42,
|
||||||
|
"temperature": 0.0,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Model: "orca-mini",
|
||||||
|
Prompt: "what is the origin of independence day?",
|
||||||
|
Stream: &stream,
|
||||||
|
Options: map[string]interface{}{
|
||||||
|
"seed": 42,
|
||||||
|
"temperature": 0.0,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Model: "orca-mini",
|
||||||
|
Prompt: "what is the composition of air?",
|
||||||
|
Stream: &stream,
|
||||||
|
Options: map[string]interface{}{
|
||||||
|
"seed": 42,
|
||||||
|
"temperature": 0.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
[][]string{
|
||||||
|
[]string{"sunlight"},
|
||||||
|
[]string{"soil", "organic", "earth", "black", "tan"},
|
||||||
|
[]string{"england", "english", "massachusetts", "pilgrims"},
|
||||||
|
[]string{"fourth", "july", "declaration", "independence"},
|
||||||
|
[]string{"nitrogen", "oxygen", "carbon", "dioxide"},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
238
llm/ext_server/server.cpp
vendored
238
llm/ext_server/server.cpp
vendored
@@ -66,7 +66,7 @@ struct server_params {
|
|||||||
};
|
};
|
||||||
|
|
||||||
bool server_verbose = false;
|
bool server_verbose = false;
|
||||||
bool server_log_json = true;
|
bool server_log_json = false;
|
||||||
|
|
||||||
enum stop_type {
|
enum stop_type {
|
||||||
STOP_FULL,
|
STOP_FULL,
|
||||||
@@ -140,7 +140,6 @@ struct server_slot {
|
|||||||
std::vector<llama_token> cache_tokens;
|
std::vector<llama_token> cache_tokens;
|
||||||
std::vector<completion_token_output> generated_token_probs;
|
std::vector<completion_token_output> generated_token_probs;
|
||||||
|
|
||||||
bool infill = false;
|
|
||||||
bool embedding = false;
|
bool embedding = false;
|
||||||
bool has_next_token = true;
|
bool has_next_token = true;
|
||||||
bool truncated = false;
|
bool truncated = false;
|
||||||
@@ -187,7 +186,6 @@ struct server_slot {
|
|||||||
n_past = 0;
|
n_past = 0;
|
||||||
n_sent_text = 0;
|
n_sent_text = 0;
|
||||||
n_sent_token_probs = 0;
|
n_sent_token_probs = 0;
|
||||||
infill = false;
|
|
||||||
ga_i = 0;
|
ga_i = 0;
|
||||||
n_past_se = 0;
|
n_past_se = 0;
|
||||||
|
|
||||||
@@ -266,7 +264,7 @@ struct server_slot {
|
|||||||
sprintf(buffer, "prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)",
|
sprintf(buffer, "prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)",
|
||||||
t_prompt_processing, n_prompt_tokens_processed,
|
t_prompt_processing, n_prompt_tokens_processed,
|
||||||
t_token, n_tokens_second);
|
t_token, n_tokens_second);
|
||||||
LOG_INFO(buffer, {
|
LOG_DEBUG(buffer, {
|
||||||
{"slot_id", id},
|
{"slot_id", id},
|
||||||
{"task_id", task_id},
|
{"task_id", task_id},
|
||||||
{"t_prompt_processing", t_prompt_processing},
|
{"t_prompt_processing", t_prompt_processing},
|
||||||
@@ -280,7 +278,7 @@ struct server_slot {
|
|||||||
sprintf(buffer, "generation eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)",
|
sprintf(buffer, "generation eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)",
|
||||||
t_token_generation, n_decoded,
|
t_token_generation, n_decoded,
|
||||||
t_token, n_tokens_second);
|
t_token, n_tokens_second);
|
||||||
LOG_INFO(buffer, {
|
LOG_DEBUG(buffer, {
|
||||||
{"slot_id", id},
|
{"slot_id", id},
|
||||||
{"task_id", task_id},
|
{"task_id", task_id},
|
||||||
{"t_token_generation", t_token_generation},
|
{"t_token_generation", t_token_generation},
|
||||||
@@ -290,7 +288,7 @@ struct server_slot {
|
|||||||
});
|
});
|
||||||
|
|
||||||
sprintf(buffer, " total time = %10.2f ms", t_prompt_processing + t_token_generation);
|
sprintf(buffer, " total time = %10.2f ms", t_prompt_processing + t_token_generation);
|
||||||
LOG_INFO(buffer, {
|
LOG_DEBUG(buffer, {
|
||||||
{"slot_id", id},
|
{"slot_id", id},
|
||||||
{"task_id", task_id},
|
{"task_id", task_id},
|
||||||
{"t_prompt_processing", t_prompt_processing},
|
{"t_prompt_processing", t_prompt_processing},
|
||||||
@@ -334,6 +332,7 @@ struct server_metrics {
|
|||||||
struct llama_server_context
|
struct llama_server_context
|
||||||
{
|
{
|
||||||
llama_model *model = nullptr;
|
llama_model *model = nullptr;
|
||||||
|
float modelProgress = 0.0;
|
||||||
llama_context *ctx = nullptr;
|
llama_context *ctx = nullptr;
|
||||||
|
|
||||||
clip_ctx *clp_ctx = nullptr;
|
clip_ctx *clp_ctx = nullptr;
|
||||||
@@ -371,7 +370,7 @@ struct llama_server_context
|
|||||||
{
|
{
|
||||||
if (clp_ctx)
|
if (clp_ctx)
|
||||||
{
|
{
|
||||||
LOG_INFO("freeing clip model", {});
|
LOG_DEBUG("freeing clip model", {});
|
||||||
clip_free(clp_ctx);
|
clip_free(clp_ctx);
|
||||||
clp_ctx = nullptr;
|
clp_ctx = nullptr;
|
||||||
}
|
}
|
||||||
@@ -392,7 +391,7 @@ struct llama_server_context
|
|||||||
params = params_;
|
params = params_;
|
||||||
if (!params.mmproj.empty()) {
|
if (!params.mmproj.empty()) {
|
||||||
multimodal = true;
|
multimodal = true;
|
||||||
LOG_INFO("Multi Modal Mode Enabled", {});
|
LOG_DEBUG("Multi Modal Mode Enabled", {});
|
||||||
clp_ctx = clip_model_load(params.mmproj.c_str(), /*verbosity=*/ 1);
|
clp_ctx = clip_model_load(params.mmproj.c_str(), /*verbosity=*/ 1);
|
||||||
if(clp_ctx == nullptr) {
|
if(clp_ctx == nullptr) {
|
||||||
LOG_ERROR("unable to load clip model", {{"model", params.mmproj}});
|
LOG_ERROR("unable to load clip model", {{"model", params.mmproj}});
|
||||||
@@ -445,7 +444,7 @@ struct llama_server_context
|
|||||||
|
|
||||||
const int32_t n_ctx_slot = n_ctx / params.n_parallel;
|
const int32_t n_ctx_slot = n_ctx / params.n_parallel;
|
||||||
|
|
||||||
LOG_INFO("initializing slots", {{"n_slots", params.n_parallel}});
|
LOG_DEBUG("initializing slots", {{"n_slots", params.n_parallel}});
|
||||||
for (int i = 0; i < params.n_parallel; i++)
|
for (int i = 0; i < params.n_parallel; i++)
|
||||||
{
|
{
|
||||||
server_slot slot;
|
server_slot slot;
|
||||||
@@ -454,7 +453,7 @@ struct llama_server_context
|
|||||||
slot.n_ctx = n_ctx_slot;
|
slot.n_ctx = n_ctx_slot;
|
||||||
slot.n_predict = params.n_predict;
|
slot.n_predict = params.n_predict;
|
||||||
|
|
||||||
LOG_INFO("new slot", {
|
LOG_DEBUG("new slot", {
|
||||||
{"slot_id", slot.id},
|
{"slot_id", slot.id},
|
||||||
{"n_ctx_slot", slot.n_ctx}
|
{"n_ctx_slot", slot.n_ctx}
|
||||||
});
|
});
|
||||||
@@ -468,7 +467,7 @@ struct llama_server_context
|
|||||||
//GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of ga_w"); // NOLINT
|
//GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of ga_w"); // NOLINT
|
||||||
//GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT
|
//GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT
|
||||||
|
|
||||||
LOG_INFO("slot self-extend", {
|
LOG_DEBUG("slot self-extend", {
|
||||||
{"slot_id", slot.id},
|
{"slot_id", slot.id},
|
||||||
{"ga_n", ga_n},
|
{"ga_n", ga_n},
|
||||||
{"ga_w", ga_w}
|
{"ga_w", ga_w}
|
||||||
@@ -599,16 +598,6 @@ struct llama_server_context
|
|||||||
slot->params.n_predict = slot->n_predict;
|
slot->params.n_predict = slot->n_predict;
|
||||||
}
|
}
|
||||||
|
|
||||||
// infill
|
|
||||||
if (data.count("input_prefix") != 0)
|
|
||||||
{
|
|
||||||
slot->params.input_prefix = data["input_prefix"];
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
slot->params.input_prefix = "";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (data.count("input_suffix") != 0)
|
if (data.count("input_suffix") != 0)
|
||||||
{
|
{
|
||||||
slot->params.input_suffix = data["input_suffix"];
|
slot->params.input_suffix = data["input_suffix"];
|
||||||
@@ -737,7 +726,7 @@ struct llama_server_context
|
|||||||
sampler_names.emplace_back(sampler_name);
|
sampler_names.emplace_back(sampler_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
slot->sparams.samplers_sequence = sampler_types_from_names(sampler_names, false);
|
slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -827,7 +816,7 @@ struct llama_server_context
|
|||||||
|
|
||||||
all_slots_are_idle = false;
|
all_slots_are_idle = false;
|
||||||
|
|
||||||
LOG_INFO("slot is processing task", {
|
LOG_DEBUG("slot is processing task", {
|
||||||
{"slot_id", slot->id},
|
{"slot_id", slot->id},
|
||||||
{"task_id", slot->task_id},
|
{"task_id", slot->task_id},
|
||||||
});
|
});
|
||||||
@@ -896,15 +885,6 @@ struct llama_server_context
|
|||||||
system_need_update = true;
|
system_need_update = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void system_prompt_process(const json &sys_props) {
|
|
||||||
system_prompt = sys_props.value("prompt", "");
|
|
||||||
name_user = sys_props.value("anti_prompt", "");
|
|
||||||
name_assistant = sys_props.value("assistant_name", "");
|
|
||||||
|
|
||||||
|
|
||||||
system_prompt_notify();
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
|
static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
|
||||||
const stop_type type, server_slot &slot)
|
const stop_type type, server_slot &slot)
|
||||||
{
|
{
|
||||||
@@ -1032,7 +1012,7 @@ struct llama_server_context
|
|||||||
slot.has_next_token = false;
|
slot.has_next_token = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!slot.cache_tokens.empty() && result.tok == llama_token_eos(model))
|
if (!slot.cache_tokens.empty() && llama_token_is_eog(model, result.tok))
|
||||||
{
|
{
|
||||||
slot.stopped_eos = true;
|
slot.stopped_eos = true;
|
||||||
slot.has_next_token = false;
|
slot.has_next_token = false;
|
||||||
@@ -1095,7 +1075,7 @@ struct llama_server_context
|
|||||||
std::vector<std::string> samplers_sequence;
|
std::vector<std::string> samplers_sequence;
|
||||||
for (const auto &sampler_type : slot.sparams.samplers_sequence)
|
for (const auto &sampler_type : slot.sparams.samplers_sequence)
|
||||||
{
|
{
|
||||||
samplers_sequence.emplace_back(sampler_type_to_name_string(sampler_type));
|
samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type));
|
||||||
}
|
}
|
||||||
|
|
||||||
return json {
|
return json {
|
||||||
@@ -1144,12 +1124,15 @@ struct llama_server_context
|
|||||||
|
|
||||||
res.result_json = json
|
res.result_json = json
|
||||||
{
|
{
|
||||||
{"content", tkn.text_to_send},
|
|
||||||
{"stop", false},
|
{"stop", false},
|
||||||
{"slot_id", slot.id},
|
{"slot_id", slot.id},
|
||||||
{"multimodal", multimodal}
|
{"multimodal", multimodal}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (!llama_token_is_eog(model, tkn.tok)) {
|
||||||
|
res.result_json["content"] = tkn.text_to_send;
|
||||||
|
}
|
||||||
|
|
||||||
if (slot.sparams.n_probs > 0)
|
if (slot.sparams.n_probs > 0)
|
||||||
{
|
{
|
||||||
std::vector<completion_token_output> probs_output = {};
|
std::vector<completion_token_output> probs_output = {};
|
||||||
@@ -1183,8 +1166,6 @@ struct llama_server_context
|
|||||||
{"model", params.model_alias},
|
{"model", params.model_alias},
|
||||||
{"tokens_predicted", slot.n_decoded},
|
{"tokens_predicted", slot.n_decoded},
|
||||||
{"tokens_evaluated", slot.n_prompt_tokens},
|
{"tokens_evaluated", slot.n_prompt_tokens},
|
||||||
{"generation_settings", get_formated_generation(slot)},
|
|
||||||
{"prompt", slot.prompt},
|
|
||||||
{"truncated", slot.truncated},
|
{"truncated", slot.truncated},
|
||||||
{"stopped_eos", slot.stopped_eos},
|
{"stopped_eos", slot.stopped_eos},
|
||||||
{"stopped_word", slot.stopped_word},
|
{"stopped_word", slot.stopped_word},
|
||||||
@@ -1261,13 +1242,12 @@ struct llama_server_context
|
|||||||
queue_results.send(res);
|
queue_results.send(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
void request_completion(int task_id, json data, bool infill, bool embedding, int multitask_id)
|
void request_completion(int task_id, json data, bool embedding, int multitask_id)
|
||||||
{
|
{
|
||||||
task_server task;
|
task_server task;
|
||||||
task.id = task_id;
|
task.id = task_id;
|
||||||
task.target_id = 0;
|
task.target_id = 0;
|
||||||
task.data = std::move(data);
|
task.data = std::move(data);
|
||||||
task.infill_mode = infill;
|
|
||||||
task.embedding_mode = embedding;
|
task.embedding_mode = embedding;
|
||||||
task.type = TASK_TYPE_COMPLETION;
|
task.type = TASK_TYPE_COMPLETION;
|
||||||
task.multitask_id = multitask_id;
|
task.multitask_id = multitask_id;
|
||||||
@@ -1413,8 +1393,8 @@ struct llama_server_context
|
|||||||
json subtask_data = multiprompt_task.data;
|
json subtask_data = multiprompt_task.data;
|
||||||
subtask_data["prompt"] = subtask_data["prompt"][i];
|
subtask_data["prompt"] = subtask_data["prompt"][i];
|
||||||
|
|
||||||
// subtasks inherit everything else (infill mode, embedding mode, etc.)
|
// subtasks inherit everything else (embedding mode, etc.)
|
||||||
request_completion(subtask_ids[i], subtask_data, multiprompt_task.infill_mode, multiprompt_task.embedding_mode, multitask_id);
|
request_completion(subtask_ids[i], subtask_data, multiprompt_task.embedding_mode, multitask_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1432,26 +1412,8 @@ struct llama_server_context
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (task.data.contains("system_prompt"))
|
|
||||||
{
|
|
||||||
if (!all_slots_are_idle) {
|
|
||||||
send_error(task, "system prompt can only be updated when all slots are idle");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
system_prompt_process(task.data["system_prompt"]);
|
|
||||||
|
|
||||||
// reset cache_tokens for all slots
|
|
||||||
for (server_slot &slot : slots)
|
|
||||||
{
|
|
||||||
slot.cache_tokens.clear();
|
|
||||||
slot.n_past = 0;
|
|
||||||
slot.n_past_se = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
slot->reset();
|
slot->reset();
|
||||||
|
|
||||||
slot->infill = task.infill_mode;
|
|
||||||
slot->embedding = task.embedding_mode;
|
slot->embedding = task.embedding_mode;
|
||||||
slot->task_id = task.id;
|
slot->task_id = task.id;
|
||||||
slot->multitask_id = task.multitask_id;
|
slot->multitask_id = task.multitask_id;
|
||||||
@@ -1503,7 +1465,7 @@ struct llama_server_context
|
|||||||
}
|
}
|
||||||
slots_data.push_back(slot_data);
|
slots_data.push_back(slot_data);
|
||||||
}
|
}
|
||||||
LOG_INFO("slot data", {
|
LOG_DEBUG("slot data", {
|
||||||
{"task_id", task.id},
|
{"task_id", task.id},
|
||||||
{"n_idle_slots", n_idle_slots},
|
{"n_idle_slots", n_idle_slots},
|
||||||
{"n_processing_slots", n_processing_slots}
|
{"n_processing_slots", n_processing_slots}
|
||||||
@@ -1565,7 +1527,7 @@ struct llama_server_context
|
|||||||
bool update_slots() {
|
bool update_slots() {
|
||||||
if (system_need_update)
|
if (system_need_update)
|
||||||
{
|
{
|
||||||
LOG_INFO("updating system prompt", {});
|
LOG_DEBUG("updating system prompt", {});
|
||||||
system_prompt_update();
|
system_prompt_update();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1575,7 +1537,7 @@ struct llama_server_context
|
|||||||
{
|
{
|
||||||
if (system_prompt.empty() && clean_kv_cache)
|
if (system_prompt.empty() && clean_kv_cache)
|
||||||
{
|
{
|
||||||
LOG_INFO("all slots are idle and system prompt is empty, clear the KV cache", {});
|
LOG_DEBUG("all slots are idle and system prompt is empty, clear the KV cache", {});
|
||||||
kv_cache_clear();
|
kv_cache_clear();
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@@ -1598,7 +1560,7 @@ struct llama_server_context
|
|||||||
const int n_left = (int) system_tokens.size() + slot.n_past - n_keep;
|
const int n_left = (int) system_tokens.size() + slot.n_past - n_keep;
|
||||||
const int n_discard = n_left / 2;
|
const int n_discard = n_left / 2;
|
||||||
|
|
||||||
LOG_INFO("slot context shift", {
|
LOG_DEBUG("slot context shift", {
|
||||||
{"slot_id", slot.id},
|
{"slot_id", slot.id},
|
||||||
{"task_id", slot.task_id},
|
{"task_id", slot.task_id},
|
||||||
{"n_keep", n_keep},
|
{"n_keep", n_keep},
|
||||||
@@ -1637,7 +1599,7 @@ struct llama_server_context
|
|||||||
slot.command = NONE;
|
slot.command = NONE;
|
||||||
slot.t_last_used = ggml_time_us();
|
slot.t_last_used = ggml_time_us();
|
||||||
|
|
||||||
LOG_INFO("slot released", {
|
LOG_DEBUG("slot released", {
|
||||||
{"slot_id", slot.id},
|
{"slot_id", slot.id},
|
||||||
{"task_id", slot.task_id},
|
{"task_id", slot.task_id},
|
||||||
{"n_ctx", n_ctx},
|
{"n_ctx", n_ctx},
|
||||||
@@ -1677,8 +1639,7 @@ struct llama_server_context
|
|||||||
const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
|
const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
|
||||||
|
|
||||||
// empty prompt passed -> release the slot and send empty response
|
// empty prompt passed -> release the slot and send empty response
|
||||||
// note: infill mode allows empty prompt
|
if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt)
|
||||||
if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt && !slot.infill)
|
|
||||||
{
|
{
|
||||||
slot.release();
|
slot.release();
|
||||||
slot.print_timings();
|
slot.print_timings();
|
||||||
@@ -1695,33 +1656,7 @@ struct llama_server_context
|
|||||||
slot.t_start_process_prompt = ggml_time_us();
|
slot.t_start_process_prompt = ggml_time_us();
|
||||||
slot.t_start_genereration = 0;
|
slot.t_start_genereration = 0;
|
||||||
|
|
||||||
if (slot.infill)
|
prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt
|
||||||
{
|
|
||||||
bool suff_rm_leading_spc = true;
|
|
||||||
if (params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1)
|
|
||||||
{
|
|
||||||
params.input_suffix.erase(0, 1);
|
|
||||||
suff_rm_leading_spc = false;
|
|
||||||
}
|
|
||||||
auto prefix_tokens = tokenize(slot.params.input_prefix, false);
|
|
||||||
auto suffix_tokens = tokenize(slot.params.input_suffix, false);
|
|
||||||
|
|
||||||
const int space_token = 29871; // TODO: this should not be hardcoded
|
|
||||||
if (suff_rm_leading_spc && !suffix_tokens.empty() && suffix_tokens[0] == space_token) {
|
|
||||||
suffix_tokens.erase(suffix_tokens.begin());
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(model));
|
|
||||||
prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
|
|
||||||
prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(model));
|
|
||||||
prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
|
|
||||||
prefix_tokens.push_back(llama_token_middle(model));
|
|
||||||
prompt_tokens = prefix_tokens;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt
|
|
||||||
}
|
|
||||||
|
|
||||||
slot.n_prompt_tokens = prompt_tokens.size();
|
slot.n_prompt_tokens = prompt_tokens.size();
|
||||||
|
|
||||||
@@ -1806,7 +1741,7 @@ struct llama_server_context
|
|||||||
slot.ga_i = ga_i;
|
slot.ga_i = ga_i;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO("slot progression", {
|
LOG_DEBUG("slot progression", {
|
||||||
{ "slot_id", slot.id },
|
{ "slot_id", slot.id },
|
||||||
{ "task_id", slot.task_id },
|
{ "task_id", slot.task_id },
|
||||||
{ "n_past", slot.n_past },
|
{ "n_past", slot.n_past },
|
||||||
@@ -1821,7 +1756,7 @@ struct llama_server_context
|
|||||||
if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0)
|
if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0)
|
||||||
{
|
{
|
||||||
// we have to evaluate at least 1 token to generate logits.
|
// we have to evaluate at least 1 token to generate logits.
|
||||||
LOG_INFO("we have to evaluate at least 1 token to generate logits", {
|
LOG_DEBUG("we have to evaluate at least 1 token to generate logits", {
|
||||||
{ "slot_id", slot.id },
|
{ "slot_id", slot.id },
|
||||||
{ "task_id", slot.task_id }
|
{ "task_id", slot.task_id }
|
||||||
});
|
});
|
||||||
@@ -1833,7 +1768,7 @@ struct llama_server_context
|
|||||||
}
|
}
|
||||||
|
|
||||||
int p0 = (int) system_tokens.size() + slot.n_past;
|
int p0 = (int) system_tokens.size() + slot.n_past;
|
||||||
LOG_INFO("kv cache rm [p0, end)", {
|
LOG_DEBUG("kv cache rm [p0, end)", {
|
||||||
{ "slot_id", slot.id },
|
{ "slot_id", slot.id },
|
||||||
{ "task_id", slot.task_id },
|
{ "task_id", slot.task_id },
|
||||||
{ "p0", p0 }
|
{ "p0", p0 }
|
||||||
@@ -2103,6 +2038,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
|||||||
printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
|
printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
|
||||||
printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
|
printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
|
||||||
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
|
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
|
||||||
|
printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
|
||||||
printf(" -spf FNAME, --system-prompt-file FNAME\n");
|
printf(" -spf FNAME, --system-prompt-file FNAME\n");
|
||||||
printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
|
printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
|
||||||
printf(" -ctk TYPE, --cache-type-k TYPE\n");
|
printf(" -ctk TYPE, --cache-type-k TYPE\n");
|
||||||
@@ -2127,8 +2063,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
|||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void server_params_parse(int argc, char **argv, server_params &sparams,
|
static void server_params_parse(int argc, char **argv, server_params &sparams, gpt_params ¶ms)
|
||||||
gpt_params ¶ms, llama_server_context& llama)
|
|
||||||
{
|
{
|
||||||
gpt_params default_params;
|
gpt_params default_params;
|
||||||
server_params default_sparams;
|
server_params default_sparams;
|
||||||
@@ -2490,11 +2425,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
}
|
}
|
||||||
else if (arg == "-v" || arg == "--verbose")
|
else if (arg == "-v" || arg == "--verbose")
|
||||||
{
|
{
|
||||||
#if SERVER_VERBOSE != 1
|
|
||||||
LOG_WARNING("server.cpp is not built with verbose logging.", {});
|
|
||||||
#else
|
|
||||||
server_verbose = true;
|
server_verbose = true;
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
else if (arg == "--mlock")
|
else if (arg == "--mlock")
|
||||||
{
|
{
|
||||||
@@ -2504,7 +2435,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
{
|
{
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
}
|
}
|
||||||
else if (arg == "--numa") {
|
else if (arg == "--numa")
|
||||||
|
{
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
@@ -2524,6 +2456,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
{
|
{
|
||||||
params.cont_batching = true;
|
params.cont_batching = true;
|
||||||
}
|
}
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn")
|
||||||
|
{
|
||||||
|
params.flash_attn = true;
|
||||||
|
}
|
||||||
else if (arg == "-np" || arg == "--parallel")
|
else if (arg == "-np" || arg == "--parallel")
|
||||||
{
|
{
|
||||||
if (++i >= argc)
|
if (++i >= argc)
|
||||||
@@ -2532,7 +2468,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.n_parallel = std::stoi(argv[i]);
|
params.n_parallel = std::stoi(argv[i]);
|
||||||
} else if (arg == "-n" || arg == "--n-predict")
|
}
|
||||||
|
else if (arg == "-n" || arg == "--n-predict")
|
||||||
{
|
{
|
||||||
if (++i >= argc)
|
if (++i >= argc)
|
||||||
{
|
{
|
||||||
@@ -2540,26 +2477,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.n_predict = std::stoi(argv[i]);
|
params.n_predict = std::stoi(argv[i]);
|
||||||
} else if (arg == "-spf" || arg == "--system-prompt-file")
|
|
||||||
{
|
|
||||||
if (++i >= argc)
|
|
||||||
{
|
|
||||||
invalid_param = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
std::ifstream file(argv[i]);
|
|
||||||
if (!file) {
|
|
||||||
fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
|
||||||
invalid_param = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
std::string systm_content;
|
|
||||||
std::copy(
|
|
||||||
std::istreambuf_iterator<char>(file),
|
|
||||||
std::istreambuf_iterator<char>(),
|
|
||||||
std::back_inserter(systm_content)
|
|
||||||
);
|
|
||||||
llama.system_prompt_process(json::parse(systm_content));
|
|
||||||
}
|
}
|
||||||
else if (arg == "-ctk" || arg == "--cache-type-k") {
|
else if (arg == "-ctk" || arg == "--cache-type-k") {
|
||||||
params.cache_type_k = argv[++i];
|
params.cache_type_k = argv[++i];
|
||||||
@@ -2600,7 +2517,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
else if (arg == "--log-disable")
|
else if (arg == "--log-disable")
|
||||||
{
|
{
|
||||||
log_set_target(stdout);
|
log_set_target(stdout);
|
||||||
LOG_INFO("logging to file is disabled.", {});
|
LOG_DEBUG("logging to file is disabled.", {});
|
||||||
}
|
}
|
||||||
else if (arg == "--slots-endpoint-disable")
|
else if (arg == "--slots-endpoint-disable")
|
||||||
{
|
{
|
||||||
@@ -2644,18 +2561,18 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
if (strncmp(sep, "int:", 4) == 0) {
|
if (strncmp(sep, "int:", 4) == 0) {
|
||||||
sep += 4;
|
sep += 4;
|
||||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||||
kvo.int_value = std::atol(sep);
|
kvo.val_i64 = std::atol(sep);
|
||||||
} else if (strncmp(sep, "float:", 6) == 0) {
|
} else if (strncmp(sep, "float:", 6) == 0) {
|
||||||
sep += 6;
|
sep += 6;
|
||||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
||||||
kvo.float_value = std::atof(sep);
|
kvo.val_f64 = std::atof(sep);
|
||||||
} else if (strncmp(sep, "bool:", 5) == 0) {
|
} else if (strncmp(sep, "bool:", 5) == 0) {
|
||||||
sep += 5;
|
sep += 5;
|
||||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
||||||
if (std::strcmp(sep, "true") == 0) {
|
if (std::strcmp(sep, "true") == 0) {
|
||||||
kvo.bool_value = true;
|
kvo.val_bool = true;
|
||||||
} else if (std::strcmp(sep, "false") == 0) {
|
} else if (std::strcmp(sep, "false") == 0) {
|
||||||
kvo.bool_value = false;
|
kvo.val_bool = false;
|
||||||
} else {
|
} else {
|
||||||
fprintf(stderr, "error: Invalid boolean value for KV override: %s\n", argv[i]);
|
fprintf(stderr, "error: Invalid boolean value for KV override: %s\n", argv[i]);
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@@ -2726,12 +2643,12 @@ static json format_detokenized_response(std::string content)
|
|||||||
static void log_server_request(const httplib::Request &req, const httplib::Response &res)
|
static void log_server_request(const httplib::Request &req, const httplib::Response &res)
|
||||||
{
|
{
|
||||||
// skip GH copilot requests when using default port
|
// skip GH copilot requests when using default port
|
||||||
if (req.path == "/v1/health" || req.path == "/v1/completions")
|
if (req.path == "/health" || req.path == "/v1/health" || req.path == "/v1/completions")
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO("request", {
|
LOG_DEBUG("request", {
|
||||||
{"remote_addr", req.remote_addr},
|
{"remote_addr", req.remote_addr},
|
||||||
{"remote_port", req.remote_port},
|
{"remote_port", req.remote_port},
|
||||||
{"status", res.status},
|
{"status", res.status},
|
||||||
@@ -2774,6 +2691,12 @@ inline void signal_handler(int signal) {
|
|||||||
shutdown_handler(signal);
|
shutdown_handler(signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool update_load_progress(float progress, void *data)
|
||||||
|
{
|
||||||
|
((llama_server_context*)data)->modelProgress = progress;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
char* wchar_to_char(const wchar_t* wstr) {
|
char* wchar_to_char(const wchar_t* wstr) {
|
||||||
if (wstr == nullptr) return nullptr;
|
if (wstr == nullptr) return nullptr;
|
||||||
@@ -2806,7 +2729,7 @@ int main(int argc, char **argv) {
|
|||||||
// struct that contains llama context and inference
|
// struct that contains llama context and inference
|
||||||
llama_server_context llama;
|
llama_server_context llama;
|
||||||
|
|
||||||
server_params_parse(argc, argv, sparams, params, llama);
|
server_params_parse(argc, argv, sparams, params);
|
||||||
|
|
||||||
if (params.model_alias == "unknown")
|
if (params.model_alias == "unknown")
|
||||||
{
|
{
|
||||||
@@ -2879,7 +2802,9 @@ int main(int argc, char **argv) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case SERVER_STATE_LOADING_MODEL:
|
case SERVER_STATE_LOADING_MODEL:
|
||||||
res.set_content(R"({"status": "loading model"})", "application/json");
|
char buf[128];
|
||||||
|
snprintf(&buf[0], 128, R"({"status": "loading model", "progress": %0.2f})", llama.modelProgress);
|
||||||
|
res.set_content(buf, "application/json");
|
||||||
res.status = 503; // HTTP Service Unavailable
|
res.status = 503; // HTTP Service Unavailable
|
||||||
break;
|
break;
|
||||||
case SERVER_STATE_ERROR:
|
case SERVER_STATE_ERROR:
|
||||||
@@ -3053,7 +2978,30 @@ int main(int argc, char **argv) {
|
|||||||
log_data["api_key"] = "api_key: " + std::to_string(sparams.api_keys.size()) + " keys loaded";
|
log_data["api_key"] = "api_key: " + std::to_string(sparams.api_keys.size()) + " keys loaded";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (sparams.n_threads_http < 1) {
|
||||||
|
// +2 threads for monitoring endpoints
|
||||||
|
sparams.n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
|
||||||
|
}
|
||||||
|
log_data["n_threads_http"] = std::to_string(sparams.n_threads_http);
|
||||||
|
svr.new_task_queue = [&sparams] { return new httplib::ThreadPool(sparams.n_threads_http); };
|
||||||
|
|
||||||
|
LOG_INFO("HTTP server listening", log_data);
|
||||||
|
// run the HTTP server in a thread - see comment below
|
||||||
|
std::thread t([&]()
|
||||||
|
{
|
||||||
|
if (!svr.listen_after_bind())
|
||||||
|
{
|
||||||
|
state.store(SERVER_STATE_ERROR);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
// load the model
|
// load the model
|
||||||
|
params.progress_callback = update_load_progress;
|
||||||
|
params.progress_callback_user_data = (void*)&llama;
|
||||||
|
|
||||||
if (!llama.load_model(params))
|
if (!llama.load_model(params))
|
||||||
{
|
{
|
||||||
state.store(SERVER_STATE_ERROR);
|
state.store(SERVER_STATE_ERROR);
|
||||||
@@ -3113,7 +3061,7 @@ int main(int argc, char **argv) {
|
|||||||
json data = json::parse(req.body);
|
json data = json::parse(req.body);
|
||||||
const int task_id = llama.queue_tasks.get_new_id();
|
const int task_id = llama.queue_tasks.get_new_id();
|
||||||
llama.queue_results.add_waiting_task_id(task_id);
|
llama.queue_results.add_waiting_task_id(task_id);
|
||||||
llama.request_completion(task_id, data, false, false, -1);
|
llama.request_completion(task_id, data, false, -1);
|
||||||
if (!json_value(data, "stream", false)) {
|
if (!json_value(data, "stream", false)) {
|
||||||
std::string completion_text;
|
std::string completion_text;
|
||||||
task_result result = llama.queue_results.recv(task_id);
|
task_result result = llama.queue_results.recv(task_id);
|
||||||
@@ -3235,7 +3183,7 @@ int main(int argc, char **argv) {
|
|||||||
// create and queue the task
|
// create and queue the task
|
||||||
const int task_id = llama.queue_tasks.get_new_id();
|
const int task_id = llama.queue_tasks.get_new_id();
|
||||||
llama.queue_results.add_waiting_task_id(task_id);
|
llama.queue_results.add_waiting_task_id(task_id);
|
||||||
llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, false, true, -1);
|
llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, true, -1);
|
||||||
|
|
||||||
// get the result
|
// get the result
|
||||||
task_result result = llama.queue_results.recv(task_id);
|
task_result result = llama.queue_results.recv(task_id);
|
||||||
@@ -3257,26 +3205,6 @@ int main(int argc, char **argv) {
|
|||||||
}*/
|
}*/
|
||||||
//);
|
//);
|
||||||
|
|
||||||
if (sparams.n_threads_http < 1) {
|
|
||||||
// +2 threads for monitoring endpoints
|
|
||||||
sparams.n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
|
|
||||||
}
|
|
||||||
log_data["n_threads_http"] = std::to_string(sparams.n_threads_http);
|
|
||||||
svr.new_task_queue = [&sparams] { return new httplib::ThreadPool(sparams.n_threads_http); };
|
|
||||||
|
|
||||||
LOG_INFO("HTTP server listening", log_data);
|
|
||||||
// run the HTTP server in a thread - see comment below
|
|
||||||
std::thread t([&]()
|
|
||||||
{
|
|
||||||
if (!svr.listen_after_bind())
|
|
||||||
{
|
|
||||||
state.store(SERVER_STATE_ERROR);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
});
|
|
||||||
|
|
||||||
llama.queue_tasks.on_new_task(std::bind(
|
llama.queue_tasks.on_new_task(std::bind(
|
||||||
&llama_server_context::process_single_task, &llama, std::placeholders::_1));
|
&llama_server_context::process_single_task, &llama, std::placeholders::_1));
|
||||||
llama.queue_tasks.on_finish_multitask(std::bind(
|
llama.queue_tasks.on_finish_multitask(std::bind(
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user