handle igpu as gpu

This commit is contained in:
Inforithmics 2025-10-05 16:20:10 +02:00
parent 908b31814d
commit d5a2462c8e
2 changed files with 8 additions and 2 deletions

View File

@ -69,7 +69,9 @@ func EnumerateGPUs() []ml.DeviceID {
for i := range C.ggml_backend_dev_count() {
device := C.ggml_backend_dev_get(i)
if C.ggml_backend_dev_type(device) == C.GGML_BACKEND_DEVICE_TYPE_GPU {
switch C.ggml_backend_dev_type(device) {
case C.GGML_BACKEND_DEVICE_TYPE_GPU:
case C.GGML_BACKEND_DEVICE_TYPE_IGPU:
var props C.struct_ggml_backend_dev_props
C.ggml_backend_dev_get_props(device, &props)
ids = append(ids, ml.DeviceID{

View File

@ -58,6 +58,7 @@ var initDevices = sync.OnceFunc(func() {
case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
accels = append(accels, d)
case C.GGML_BACKEND_DEVICE_TYPE_GPU:
case C.GGML_BACKEND_DEVICE_TYPE_IGPU:
gpus = append(gpus, d)
}
@ -470,7 +471,9 @@ func (b *Backend) Load(ctx context.Context, progress func(float32)) error {
// Mimic llama runner logs summarizing layers and memory
gpuLayers := 0
for layer := range maps.Values(b.layers) {
if C.ggml_backend_dev_type(layer.d) == C.GGML_BACKEND_DEVICE_TYPE_GPU {
switch C.ggml_backend_dev_type(layer.d) {
case C.GGML_BACKEND_DEVICE_TYPE_GPU:
case C.GGML_BACKEND_DEVICE_TYPE_IGPU:
gpuLayers++
}
}
@ -480,6 +483,7 @@ func (b *Backend) Load(ctx context.Context, progress func(float32)) error {
case C.GGML_BACKEND_DEVICE_TYPE_CPU:
slog.Info("offloading output layer to CPU")
case C.GGML_BACKEND_DEVICE_TYPE_GPU:
case C.GGML_BACKEND_DEVICE_TYPE_IGPU:
slog.Info("offloading output layer to GPU")
gpuLayers++
case C.GGML_BACKEND_DEVICE_TYPE_ACCEL: