chore(model gallery): add ai21labs_ai21-jamba-reasoning-3b (#6417)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
committed by
GitHub
parent
99a72a4b11
commit
fa8037b21d
@@ -1,4 +1,29 @@
|
||||
---
|
||||
- &jamba
|
||||
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/65e60c0ed5313c06372446ff/QwehUHgP2HtVAMW5MzJ2j.png
|
||||
name: "ai21labs_ai21-jamba-reasoning-3b"
|
||||
url: "github:mudler/LocalAI/gallery/jamba.yaml@master"
|
||||
license: apache-2.0
|
||||
tags:
|
||||
- gguf
|
||||
- GPU
|
||||
- CPU
|
||||
- text-to-text
|
||||
- jamba
|
||||
- mamba
|
||||
urls:
|
||||
- https://huggingface.co/ai21labs/AI21-Jamba-Reasoning-3B
|
||||
- https://huggingface.co/bartowski/ai21labs_AI21-Jamba-Reasoning-3B-GGUF
|
||||
description: |
|
||||
AI21’s Jamba Reasoning 3B is a top-performing reasoning model that packs leading scores on intelligence benchmarks and highly-efficient processing into a compact 3B build.
|
||||
The hybrid design combines Transformer attention with Mamba (a state-space model). Mamba layers are more efficient for sequence processing, while attention layers capture complex dependencies. This mix reduces memory overhead, improves throughput, and makes the model run smoothly on laptops, GPUs, and even mobile devices, while maintainig impressive quality.
|
||||
overrides:
|
||||
parameters:
|
||||
model: ai21labs_AI21-Jamba-Reasoning-3B-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: ai21labs_AI21-Jamba-Reasoning-3B-Q4_K_M.gguf
|
||||
sha256: ac7ec0648dea62d1efb5ef6e7268c748ffc71f1c26eebe97eccff0a8d41608e6
|
||||
uri: huggingface://bartowski/ai21labs_AI21-Jamba-Reasoning-3B-GGUF/ai21labs_AI21-Jamba-Reasoning-3B-Q4_K_M.gguf
|
||||
- &granite4
|
||||
url: "github:mudler/LocalAI/gallery/granite4.yaml@master"
|
||||
name: "ibm-granite_granite-4.0-h-small"
|
||||
|
||||
57
gallery/jamba.yaml
Normal file
57
gallery/jamba.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
name: "jamba"
|
||||
|
||||
config_file: |
|
||||
mmap: true
|
||||
backend: "llama-cpp"
|
||||
template:
|
||||
chat_message: |
|
||||
<|im_start|>{{if eq .RoleName "tool" }}user{{else}}{{ .RoleName }}{{end}}
|
||||
{{ if eq .RoleName "tool" -}}
|
||||
<tool_response>
|
||||
{{ end -}}
|
||||
{{ if .Content -}}
|
||||
{{.Content }}
|
||||
{{ end -}}
|
||||
{{ if eq .RoleName "tool" -}}
|
||||
</tool_response>
|
||||
{{ end -}}
|
||||
{{ if .FunctionCall -}}
|
||||
<tool_call>
|
||||
{{toJson .FunctionCall}}
|
||||
</tool_call>
|
||||
{{ end -}}<|im_end|>
|
||||
function: |
|
||||
<|im_start|>system
|
||||
# Tools
|
||||
You may call one or more functions to assist with the user query.
|
||||
You are provided with function signatures within <tools></tools> XML tags:
|
||||
<tools>
|
||||
{{range .Functions}}
|
||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
||||
{{end}}
|
||||
</tools>
|
||||
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
|
||||
<tool_call>
|
||||
{\"name\": <function-name>, \"arguments\": <args-json-object>}
|
||||
</tool_call>
|
||||
<|im_end|>
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
chat: |
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
<think>
|
||||
completion: |
|
||||
{{.Input}}
|
||||
context_size: 8192
|
||||
function:
|
||||
grammar:
|
||||
triggers:
|
||||
- word: "<tool_call>"
|
||||
f16: true
|
||||
stopwords:
|
||||
- '<|im_end|>'
|
||||
- '<dummy32000>'
|
||||
- '</s>'
|
||||
- '<|endoftext|>'
|
||||
Reference in New Issue
Block a user