cleanup + add tokenizer hash
This commit is contained in:
parent
e2f8845f1c
commit
5584bf1e19
|
|
@ -147,8 +147,8 @@ func (p *mistralLarge3Model) KV(t *Tokenizer) ggml.KV {
|
||||||
kv["deepseek2.spatial_merge_size"] = p.VisionEncoder.SpatialMergeSize
|
kv["deepseek2.spatial_merge_size"] = p.VisionEncoder.SpatialMergeSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set tokenizer type - use default for Mistral models
|
// Set tokenizer type - use tekken preprocessing (now supported!)
|
||||||
kv["tokenizer.ggml.pre"] = "tekken" // Let it use the default tokenizer preprocessing
|
kv["tokenizer.ggml.pre"] = "tekken"
|
||||||
|
|
||||||
return kv
|
return kv
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -101,6 +101,8 @@ func parseTokenizer(fsys fs.FS, specialTokenTypes []string) (*Tokenizer, error)
|
||||||
t.Pre = "deepseek-coder"
|
t.Pre = "deepseek-coder"
|
||||||
case "1ff7f41064896984db5d1bb6ff64fa4bc29007d08c1b439e505b7392777a319e":
|
case "1ff7f41064896984db5d1bb6ff64fa4bc29007d08c1b439e505b7392777a319e":
|
||||||
t.Pre = "qwen2"
|
t.Pre = "qwen2"
|
||||||
|
case "1d64a9a8eaf9f1bd80331984d81fdd514e7feafe8df83a525dd31472f275699a":
|
||||||
|
t.Pre = "tekken"
|
||||||
case "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855":
|
case "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855":
|
||||||
// noop, empty pretokenizer
|
// noop, empty pretokenizer
|
||||||
default:
|
default:
|
||||||
|
|
|
||||||
|
|
@ -216,7 +216,6 @@ type Layer struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Layer) Forward(ctx ml.Context, hiddenStates, positions, attentionScales, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor {
|
func (t *Layer) Forward(ctx ml.Context, hiddenStates, positions, attentionScales, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor {
|
||||||
fmt.Println("[LAYER] In the new engine")
|
|
||||||
residual := hiddenStates
|
residual := hiddenStates
|
||||||
hiddenStates = t.AttentionNorm.Forward(ctx, hiddenStates, opts.eps)
|
hiddenStates = t.AttentionNorm.Forward(ctx, hiddenStates, opts.eps)
|
||||||
hiddenStates = t.Attention.Forward(ctx, hiddenStates, positions, attentionScales, cache, opts)
|
hiddenStates = t.Attention.Forward(ctx, hiddenStates, positions, attentionScales, cache, opts)
|
||||||
|
|
@ -249,8 +248,11 @@ type Model struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(c fs.Config) (model.Model, error) {
|
func New(c fs.Config) (model.Model, error) {
|
||||||
layers := make([]Layer, c.Uint("block_count"))
|
// layers := make([]Layer, c.Uint("block_count"))
|
||||||
fmt.Printf("[MODEL DEBUG] Creating model with %d layers\n", c.Uint("block_count"))
|
// fmt.Printf("[MODEL DEBUG] Creating model with %d layers\n", c.Uint("block_count"))
|
||||||
|
|
||||||
|
layers := make([]Layer, 4)
|
||||||
|
fmt.Printf("[MODEL DEBUG] Creating model with %d layers\n", 4)
|
||||||
|
|
||||||
firstDenseLayerIndex := int(c.Uint("leading_dense_block_count"))
|
firstDenseLayerIndex := int(c.Uint("leading_dense_block_count"))
|
||||||
for i := range layers {
|
for i := range layers {
|
||||||
|
|
@ -269,7 +271,6 @@ func New(c fs.Config) (model.Model, error) {
|
||||||
valueLength := int(cmp.Or(c.Uint("attention.value_length_mla"), c.Uint("attention.value_length")))
|
valueLength := int(cmp.Or(c.Uint("attention.value_length_mla"), c.Uint("attention.value_length")))
|
||||||
|
|
||||||
var pre []string
|
var pre []string
|
||||||
fmt.Println("[TOKENIZER] Using tokenizer", c.String("tokenizer.ggml.pre"))
|
|
||||||
switch c.String("tokenizer.ggml.pre") {
|
switch c.String("tokenizer.ggml.pre") {
|
||||||
case "deepseek-v3":
|
case "deepseek-v3":
|
||||||
pre = []string{
|
pre = []string{
|
||||||
|
|
@ -279,7 +280,6 @@ func New(c fs.Config) (model.Model, error) {
|
||||||
"[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+",
|
"[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+",
|
||||||
}
|
}
|
||||||
case "tekken":
|
case "tekken":
|
||||||
fmt.Println("[TOKENIZER] Using Tekken tokenizer")
|
|
||||||
pre = []string{
|
pre = []string{
|
||||||
"[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))*((?=[\\p{L}])([^A-Z]))+|[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))+((?=[\\p{L}])([^A-Z]))*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
"[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))*((?=[\\p{L}])([^A-Z]))+|[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))+((?=[\\p{L}])([^A-Z]))*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
||||||
}
|
}
|
||||||
|
|
@ -303,24 +303,7 @@ func New(c fs.Config) (model.Model, error) {
|
||||||
tokenTypes := c.Ints("tokenizer.ggml.token_type")
|
tokenTypes := c.Ints("tokenizer.ggml.token_type")
|
||||||
merges := c.Strings("tokenizer.ggml.merges")
|
merges := c.Strings("tokenizer.ggml.merges")
|
||||||
|
|
||||||
fmt.Printf("[TOKENIZER DEBUG] Loading vocabulary:\n")
|
// Debug output removed for performance
|
||||||
fmt.Printf("[TOKENIZER DEBUG] - Tokens count: %d\n", len(tokens))
|
|
||||||
fmt.Printf("[TOKENIZER DEBUG] - Token types count: %d\n", len(tokenTypes))
|
|
||||||
fmt.Printf("[TOKENIZER DEBUG] - Merges count: %d\n", len(merges))
|
|
||||||
fmt.Printf("[TOKENIZER DEBUG] - BOS token ID: %d\n", c.Uint("tokenizer.ggml.bos_token_id"))
|
|
||||||
fmt.Printf("[TOKENIZER DEBUG] - EOS token ID: %d\n", c.Uint("tokenizer.ggml.eos_token_id"))
|
|
||||||
fmt.Printf("[TOKENIZER DEBUG] - Add BOS: %v\n", c.Bool("tokenizer.ggml.add_bos_token", true))
|
|
||||||
fmt.Printf("[TOKENIZER DEBUG] - Add EOS: %v\n", c.Bool("tokenizer.ggml.add_eos_token", false))
|
|
||||||
|
|
||||||
if len(tokens) > 0 {
|
|
||||||
maxShow := 10
|
|
||||||
if len(tokens) < maxShow {
|
|
||||||
maxShow = len(tokens)
|
|
||||||
}
|
|
||||||
fmt.Printf("[TOKENIZER DEBUG] First %d tokens: %v\n", maxShow, tokens[:maxShow])
|
|
||||||
} else {
|
|
||||||
fmt.Printf("[TOKENIZER DEBUG] ERROR: No tokens loaded from GGUF!\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
m := Model{
|
m := Model{
|
||||||
BytePairEncoding: model.NewBytePairEncoding(
|
BytePairEncoding: model.NewBytePairEncoding(
|
||||||
|
|
|
||||||
|
|
@ -213,7 +213,6 @@ func (s *Server) NewSequence(prompt string, images []llm.ImageData, params NewSe
|
||||||
func calculateLogprobs(logits []float32, selectedToken int32, topK int, textProcessor model.TextProcessor) []llm.Logprob {
|
func calculateLogprobs(logits []float32, selectedToken int32, topK int, textProcessor model.TextProcessor) []llm.Logprob {
|
||||||
decoder := func(tokenID int) string {
|
decoder := func(tokenID int) string {
|
||||||
text, _ := textProcessor.Decode([]int32{int32(tokenID)})
|
text, _ := textProcessor.Decode([]int32{int32(tokenID)})
|
||||||
fmt.Printf("[TOKENIZER] Decoded token %d to: %q\n", tokenID, text)
|
|
||||||
return text
|
return text
|
||||||
}
|
}
|
||||||
return common.CalculateLogprobs(logits, int(selectedToken), topK, decoder)
|
return common.CalculateLogprobs(logits, int(selectedToken), topK, decoder)
|
||||||
|
|
@ -243,52 +242,10 @@ func (s *Server) inputs(prompt string, images []llm.ImageData) ([]*input.Input,
|
||||||
|
|
||||||
for i, part := range parts {
|
for i, part := range parts {
|
||||||
// text - tokenize
|
// text - tokenize
|
||||||
fmt.Printf("[TOKENIZER] Encoding text: %q\n", part)
|
|
||||||
|
|
||||||
// Debug: Test what token 0 decodes to
|
|
||||||
token0Text, _ := s.model.(model.TextProcessor).Decode([]int32{0})
|
|
||||||
fmt.Printf("[TOKENIZER] Token 0 decodes to: %q\n", token0Text)
|
|
||||||
|
|
||||||
// Debug: Test a few other common tokens
|
|
||||||
for testToken := int32(1); testToken <= 10; testToken++ {
|
|
||||||
testText, _ := s.model.(model.TextProcessor).Decode([]int32{testToken})
|
|
||||||
fmt.Printf("[TOKENIZER] Token %d decodes to: %q\n", testToken, testText)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug: Test higher token IDs where real vocabulary might be
|
|
||||||
fmt.Printf("[TOKENIZER] Testing higher token IDs:\n")
|
|
||||||
testHighTokens := []int32{100, 1000, 10000, 50000, 100000, 131000}
|
|
||||||
for _, testToken := range testHighTokens {
|
|
||||||
testText, _ := s.model.(model.TextProcessor).Decode([]int32{testToken})
|
|
||||||
fmt.Printf("[TOKENIZER] Token %d decodes to: %q\n", testToken, testText)
|
|
||||||
}
|
|
||||||
|
|
||||||
tokens, err := s.model.(model.TextProcessor).Encode(part, i == 0)
|
tokens, err := s.model.(model.TextProcessor).Encode(part, i == 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
fmt.Printf("[TOKENIZER] Encoded to %d tokens: %v\n", len(tokens), tokens)
|
|
||||||
|
|
||||||
// Debug: Decode the encoded tokens back to text
|
|
||||||
if len(tokens) > 0 {
|
|
||||||
decodedText, _ := s.model.(model.TextProcessor).Decode(tokens)
|
|
||||||
fmt.Printf("[TOKENIZER] Tokens %v decode back to: %q\n", tokens, decodedText)
|
|
||||||
|
|
||||||
// Debug: Show each token individually
|
|
||||||
fmt.Printf("[TOKENIZER] Individual tokens:\n")
|
|
||||||
for i, token := range tokens {
|
|
||||||
singleText, _ := s.model.(model.TextProcessor).Decode([]int32{token})
|
|
||||||
fmt.Printf("[TOKENIZER] Token %d: %d → %q (hex: %x)\n", i, token, singleText, []byte(singleText))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug: Test specific tokens that should be clean
|
|
||||||
fmt.Printf("[TOKENIZER] Testing specific clean tokens:\n")
|
|
||||||
testTokens := []int32{8101, 1033, 29706} // hi, !, hello
|
|
||||||
for _, testToken := range testTokens {
|
|
||||||
testText, _ := s.model.(model.TextProcessor).Decode([]int32{testToken})
|
|
||||||
fmt.Printf("[TOKENIZER] Clean test %d → %q (hex: %x)\n", testToken, testText, []byte(testText))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, t := range tokens {
|
for _, t := range tokens {
|
||||||
inputs = append(inputs, &input.Input{Token: t})
|
inputs = append(inputs, &input.Input{Token: t})
|
||||||
|
|
@ -823,9 +780,6 @@ func (s *Server) computeBatch(activeBatch batchState) {
|
||||||
panic("failed to decode token")
|
panic("failed to decode token")
|
||||||
}
|
}
|
||||||
|
|
||||||
// DEBUG: Show what token is being generated
|
|
||||||
fmt.Printf("[GENERATION] Token %d → %q (hex: %x)\n", token, piece, []byte(piece))
|
|
||||||
|
|
||||||
// Calculate logprobs if requested (after EOS check to avoid logprobs for EOS tokens)
|
// Calculate logprobs if requested (after EOS check to avoid logprobs for EOS tokens)
|
||||||
if seq.logprobs {
|
if seq.logprobs {
|
||||||
logprobs := calculateLogprobs(logits, token, seq.topLogprobs, s.model.(model.TextProcessor))
|
logprobs := calculateLogprobs(logits, token, seq.topLogprobs, s.model.(model.TextProcessor))
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue