Add files using upload-large-folder tool
Browse files- .gitattributes +2 -0
- README.md +404 -0
- SYSTEM_PROMPT.txt +29 -0
- _README.md +230 -0
- chat_template.jinja +121 -0
- consolidated-00025-of-00273.safetensors +3 -0
- consolidated-00032-of-00273.safetensors +3 -0
- consolidated-00053-of-00273.safetensors +3 -0
- consolidated-00056-of-00273.safetensors +3 -0
- consolidated-00060-of-00273.safetensors +3 -0
- consolidated-00073-of-00273.safetensors +3 -0
- consolidated-00077-of-00273.safetensors +3 -0
- consolidated-00082-of-00273.safetensors +3 -0
- consolidated-00088-of-00273.safetensors +3 -0
- consolidated-00090-of-00273.safetensors +3 -0
- consolidated-00095-of-00273.safetensors +3 -0
- consolidated-00105-of-00273.safetensors +3 -0
- consolidated-00109-of-00273.safetensors +3 -0
- consolidated-00123-of-00273.safetensors +3 -0
- consolidated-00124-of-00273.safetensors +3 -0
- consolidated-00130-of-00273.safetensors +3 -0
- consolidated-00137-of-00273.safetensors +3 -0
- consolidated-00141-of-00273.safetensors +3 -0
- consolidated-00146-of-00273.safetensors +3 -0
- consolidated-00151-of-00273.safetensors +3 -0
- consolidated-00152-of-00273.safetensors +3 -0
- consolidated-00153-of-00273.safetensors +3 -0
- consolidated-00160-of-00273.safetensors +3 -0
- consolidated-00166-of-00273.safetensors +3 -0
- consolidated-00170-of-00273.safetensors +3 -0
- consolidated-00172-of-00273.safetensors +3 -0
- consolidated-00174-of-00273.safetensors +3 -0
- consolidated-00176-of-00273.safetensors +3 -0
- consolidated-00181-of-00273.safetensors +3 -0
- consolidated-00183-of-00273.safetensors +3 -0
- consolidated-00184-of-00273.safetensors +3 -0
- consolidated-00185-of-00273.safetensors +3 -0
- consolidated-00190-of-00273.safetensors +3 -0
- consolidated-00193-of-00273.safetensors +3 -0
- consolidated-00200-of-00273.safetensors +3 -0
- consolidated-00202-of-00273.safetensors +3 -0
- consolidated-00209-of-00273.safetensors +3 -0
- consolidated-00229-of-00273.safetensors +3 -0
- consolidated.safetensors.index.json +0 -0
- params.json +113 -0
- processor_config.json +42 -0
- special_tokens_map.json +0 -0
- tekken.json +3 -0
- tokenizer.json +3 -0
- tokenizer_config.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tekken.json filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: vllm
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
- fr
|
| 6 |
+
- es
|
| 7 |
+
- de
|
| 8 |
+
- it
|
| 9 |
+
- pt
|
| 10 |
+
- nl
|
| 11 |
+
- zh
|
| 12 |
+
- ja
|
| 13 |
+
- ko
|
| 14 |
+
- ar
|
| 15 |
+
license: apache-2.0
|
| 16 |
+
inference: false
|
| 17 |
+
extra_gated_description: >-
|
| 18 |
+
If you want to learn more about how we process your personal data, please read
|
| 19 |
+
our <a href="https://mistral.ai/terms/">Privacy Policy</a>.
|
| 20 |
+
base_model:
|
| 21 |
+
- mistralai/Mistral-Large-3-675B-Base-2512
|
| 22 |
+
tags:
|
| 23 |
+
- mistral-common
|
| 24 |
+
---
|
| 25 |
+
|
| 26 |
+
# Mistral Large 3 675B Instruct 2512 NVFP4
|
| 27 |
+
From our family of large models, **Mistral Large 3** is a state-of-the-art general-purpose **Multimodal granular Mixture-of-Experts** model with **41B active parameters** and **675B total parameters** trained from the ground up with 3000 H200s.
|
| 28 |
+
|
| 29 |
+
This model is the instruct post-trained version, fine-tuned for instruction tasks, making it ideal for chat, agentic and instruction based use cases.
|
| 30 |
+
Designed for reliability and long-context comprehension - It is engineered for production-grade assistants, retrieval-augmented systems, scientific workloads, and complex enterprise workflows.
|
| 31 |
+
|
| 32 |
+
Mistral Large 3 is deployable on-premises in:
|
| 33 |
+
- [FP8](https://huggingface.co/mistralai/Mistral-Large-3-675B-Instruct-2512-FP8) on a single node of B200s or H200s.
|
| 34 |
+
- **NVFP4** on a single node of H100s or A100s.
|
| 35 |
+
|
| 36 |
+
## Key Features
|
| 37 |
+
Mistral Large 3 consists of two main architectural components:
|
| 38 |
+
- **A Granular MoE Language Model with 673B params and 39B active**
|
| 39 |
+
- **A 2.5B Vision Encoder**
|
| 40 |
+
|
| 41 |
+
The Mistral Large 3 Instruct model offers the following capabilities:
|
| 42 |
+
- **Vision**: Enables the model to analyze images and provide insights based on visual content, in addition to text.
|
| 43 |
+
- **Multilingual**: Supports dozens of languages, including English, French, Spanish, German, Italian, Portuguese, Dutch, Chinese, Japanese, Korean, Arabic.
|
| 44 |
+
- **System Prompt**: Maintains strong adherence and support for system prompts.
|
| 45 |
+
- **Agentic**: Offers best-in-class agentic capabilities with native function calling and JSON outputting.
|
| 46 |
+
- **Frontier**: Delivers best-in-class performance.
|
| 47 |
+
- **Apache 2.0 License**: Open-source license allowing usage and modification for both commercial and non-commercial purposes.
|
| 48 |
+
- **Large Context Window**: Supports a 256k context window.
|
| 49 |
+
|
| 50 |
+
## Use Cases
|
| 51 |
+
With powerful long-context performance, stable and consistent cross-domain behavior, Mistral Large 3 is perfect for:
|
| 52 |
+
- Long Document Understanding
|
| 53 |
+
- Powerful Daily-Driver AI Assistants
|
| 54 |
+
- State-of-the-Art Agentic and Tool-Use Capabilities
|
| 55 |
+
- Enterprise Knowledge Work
|
| 56 |
+
- General Coding Assistant
|
| 57 |
+
|
| 58 |
+
And enterprise-grade use cases requiring frontier capabilities.
|
| 59 |
+
|
| 60 |
+
## Recommended Settings
|
| 61 |
+
|
| 62 |
+
We recommend deploying Large 3 in a client-server configuration with the following best practices:
|
| 63 |
+
|
| 64 |
+
- **System Prompt**: Define a clear environment and use case, including guidance on how to effectively leverage tools in agentic systems.
|
| 65 |
+
- **Sampling Parameters**: Use a temperature below 0.1 for daily-driver and production environments ; Higher temperatures may be explored for creative use cases - developers are encouraged to experiment with alternative settings.
|
| 66 |
+
- **Tools**: Keep the set of tools well-defined and limit their number to the minimum required for the use case - Avoiding overloading the model with an excessive number of tools.
|
| 67 |
+
- **Vision**: When deploying with vision capabilities, we recommend maintaining an aspect ratio close to 1:1 (width-to-height) for images. Avoiding the use of overly thin or wide images - crop them as needed to ensure optimal performance.
|
| 68 |
+
|
| 69 |
+
### Known Issues / Limitations
|
| 70 |
+
|
| 71 |
+
- **Not a dedicated reasoning model**: Dedicated reasoning models can outperform Mistral Large 3 in strict reasoning use cases.
|
| 72 |
+
- **Behind vision-first models in multimodal tasks**: Mistral Large 3 can lag behind models optimized for vision tasks and use cases.
|
| 73 |
+
- **Complex deployment**: Due to its large size and architecture, the model can be challenging to deploy efficiently with constrained resources or at scale.
|
| 74 |
+
|
| 75 |
+
## Benchmark Results
|
| 76 |
+
|
| 77 |
+
We compare Mistral Large 3 to similar sized models.
|
| 78 |
+
|
| 79 |
+
### Text
|
| 80 |
+
|
| 81 |
+
### Vision
|
| 82 |
+
|
| 83 |
+
## Usage
|
| 84 |
+
|
| 85 |
+
The model can be used with the following frameworks;
|
| 86 |
+
- [`vllm`](https://github.com/vllm-project/vllm): See [here](#vllm)
|
| 87 |
+
|
| 88 |
+
### vLLM
|
| 89 |
+
|
| 90 |
+
We recommend using this model with [vLLM](https://github.com/vllm-project/vllm).
|
| 91 |
+
|
| 92 |
+
#### Installation
|
| 93 |
+
|
| 94 |
+
Make sure to install [`vLLM >= 0.12.0`](https://github.com/vllm-project/vllm/releases/tag/v0.12.0):
|
| 95 |
+
|
| 96 |
+
```
|
| 97 |
+
pip install vllm --upgrade
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
Doing so should automatically install [`mistral_common >= 1.8.6`](https://github.com/mistralai/mistral-common/releases/tag/v1.8.6).
|
| 101 |
+
|
| 102 |
+
To check:
|
| 103 |
+
```
|
| 104 |
+
python -c "import mistral_common; print(mistral_common.__version__)"
|
| 105 |
+
```
|
| 106 |
+
|
| 107 |
+
You can also make use of a ready-to-go [docker image](https://github.com/vllm-project/vllm/blob/main/Dockerfile) or on the [docker hub](https://hub.docker.com/layers/vllm/vllm-openai/latest/images/sha256-de9032a92ffea7b5c007dad80b38fd44aac11eddc31c435f8e52f3b7404bbf39).
|
| 108 |
+
|
| 109 |
+
#### Serve
|
| 110 |
+
|
| 111 |
+
We recommend to use this format if you plan to deploy Mistral Large 3 has it achieves performance similar to FP8 for less memory. However please note that for large context (`> 64k`) we observed a subsequent drop of performance. In such cases, please use the FP8 weights. Otherwise on B200 (Blackwell 200) we observe a significant speed-up and a minor regression on vision datasets probably due to the calibration that was performed mainly on text data.
|
| 112 |
+
|
| 113 |
+
To take full advantage of the NVFP4 format, consider using B200 GPUs that has a dedicated architecture to maximize its performance. For those who do not have access to this GPU generation, vLLM has the awesome feature to fall back to Marlin FP4 which allows you to still run the quantized model in older generations (A100, H100). You won't notice a speed-up in comparison with FP8 quantization but still benefit a memory gain.
|
| 114 |
+
|
| 115 |
+
A simple launch command is:
|
| 116 |
+
|
| 117 |
+
```bash
|
| 118 |
+
|
| 119 |
+
vllm serve mistralai/Mistral-Large-3-675B-Instruct-2512-NVFP4 \
|
| 120 |
+
--tensor-parallel-size 4 \
|
| 121 |
+
--enable-auto-tool-choice --tool-call-parser mistral
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
Key parameter notes:
|
| 125 |
+
|
| 126 |
+
* enable-auto-tool-choice: Required when enabling tool usage.
|
| 127 |
+
* tool-call-parser mistral: Required when enabling tool usage.
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
Additional flags:
|
| 131 |
+
|
| 132 |
+
* You can set `--max-model-len` to preserve memory. By default it is set to `262144` which is quite large but not necessary for most scenarios.
|
| 133 |
+
* You can set `--max-num-batched-tokens` to balance throughput and latency, higher means higher throughput but higher latency.
|
| 134 |
+
|
| 135 |
+
#### Usage of the model
|
| 136 |
+
|
| 137 |
+
Here we asumme that the model `mistralai/Mistral-Large-3-675B-Instruct-2512-NVFP4` is served and you can ping it to the domain `localhost` with the port `8000` which is the default for vLLM.
|
| 138 |
+
|
| 139 |
+
<details>
|
| 140 |
+
<summary>Vision Reasoning</summary>
|
| 141 |
+
|
| 142 |
+
Let's see if Mistral Large 3 knows when to pick a fight !
|
| 143 |
+
|
| 144 |
+
```python
|
| 145 |
+
from datetime import datetime, timedelta
|
| 146 |
+
|
| 147 |
+
from openai import OpenAI
|
| 148 |
+
from huggingface_hub import hf_hub_download
|
| 149 |
+
|
| 150 |
+
# Modify OpenAI's API key and API base to use vLLM's API server.
|
| 151 |
+
openai_api_key = "EMPTY"
|
| 152 |
+
openai_api_base = "http://localhost:8000/v1"
|
| 153 |
+
|
| 154 |
+
TEMP = 0.15
|
| 155 |
+
MAX_TOK = 262144
|
| 156 |
+
|
| 157 |
+
client = OpenAI(
|
| 158 |
+
api_key=openai_api_key,
|
| 159 |
+
base_url=openai_api_base,
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
models = client.models.list()
|
| 163 |
+
model = models.data[0].id
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def load_system_prompt(repo_id: str, filename: str) -> str:
|
| 167 |
+
file_path = hf_hub_download(repo_id=repo_id, filename=filename)
|
| 168 |
+
with open(file_path, "r") as file:
|
| 169 |
+
system_prompt = file.read()
|
| 170 |
+
today = datetime.today().strftime("%Y-%m-%d")
|
| 171 |
+
yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
|
| 172 |
+
model_name = repo_id.split("/")[-1]
|
| 173 |
+
return system_prompt.format(name=model_name, today=today, yesterday=yesterday)
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
SYSTEM_PROMPT = load_system_prompt(model, "SYSTEM_PROMPT.txt")
|
| 177 |
+
image_url = "https://static.wikia.nocookie.net/essentialsdocs/images/7/70/Battle.png/revision/latest?cb=20220523172438"
|
| 178 |
+
|
| 179 |
+
messages = [
|
| 180 |
+
{"role": "system", "content": SYSTEM_PROMPT},
|
| 181 |
+
{
|
| 182 |
+
"role": "user",
|
| 183 |
+
"content": [
|
| 184 |
+
{
|
| 185 |
+
"type": "text",
|
| 186 |
+
"text": "What action do you think I should take in this situation? List all the possible actions and explain why you think they are good or bad.",
|
| 187 |
+
},
|
| 188 |
+
{"type": "image_url", "image_url": {"url": image_url}},
|
| 189 |
+
],
|
| 190 |
+
},
|
| 191 |
+
]
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
response = client.chat.completions.create(
|
| 195 |
+
model=model,
|
| 196 |
+
messages=messages,
|
| 197 |
+
temperature=TEMP,
|
| 198 |
+
max_tokens=MAX_TOK,
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
print(response.choices[0].message.content)
|
| 202 |
+
```
|
| 203 |
+
</details>
|
| 204 |
+
|
| 205 |
+
<details>
|
| 206 |
+
<summary>Function Calling</summary>
|
| 207 |
+
|
| 208 |
+
Let's solve some equations thanks to our simple Python calculator tool.
|
| 209 |
+
|
| 210 |
+
```python
|
| 211 |
+
import json
|
| 212 |
+
from openai import OpenAI
|
| 213 |
+
from huggingface_hub import hf_hub_download
|
| 214 |
+
|
| 215 |
+
# Modify OpenAI's API key and API base to use vLLM's API server.
|
| 216 |
+
openai_api_key = "EMPTY"
|
| 217 |
+
openai_api_base = "http://localhost:8000/v1"
|
| 218 |
+
|
| 219 |
+
TEMP = 0.15
|
| 220 |
+
MAX_TOK = 262144
|
| 221 |
+
|
| 222 |
+
client = OpenAI(
|
| 223 |
+
api_key=openai_api_key,
|
| 224 |
+
base_url=openai_api_base,
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
models = client.models.list()
|
| 228 |
+
model = models.data[0].id
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def load_system_prompt(repo_id: str, filename: str) -> str:
|
| 232 |
+
file_path = hf_hub_download(repo_id=repo_id, filename=filename)
|
| 233 |
+
with open(file_path, "r") as file:
|
| 234 |
+
system_prompt = file.read()
|
| 235 |
+
return system_prompt
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
SYSTEM_PROMPT = load_system_prompt(model, "SYSTEM_PROMPT.txt")
|
| 239 |
+
|
| 240 |
+
image_url = "https://math-coaching.com/img/fiche/46/expressions-mathematiques.jpg"
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def my_calculator(expression: str) -> str:
|
| 244 |
+
return str(eval(expression))
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
tools = [
|
| 248 |
+
{
|
| 249 |
+
"type": "function",
|
| 250 |
+
"function": {
|
| 251 |
+
"name": "my_calculator",
|
| 252 |
+
"description": "A calculator that can evaluate a mathematical equation and compute its results.",
|
| 253 |
+
"parameters": {
|
| 254 |
+
"type": "object",
|
| 255 |
+
"properties": {
|
| 256 |
+
"expression": {
|
| 257 |
+
"type": "string",
|
| 258 |
+
"description": "The mathematical expression to evaluate.",
|
| 259 |
+
},
|
| 260 |
+
},
|
| 261 |
+
"required": ["expression"],
|
| 262 |
+
},
|
| 263 |
+
},
|
| 264 |
+
},
|
| 265 |
+
{
|
| 266 |
+
"type": "function",
|
| 267 |
+
"function": {
|
| 268 |
+
"name": "rewrite",
|
| 269 |
+
"description": "Rewrite a given text for improved clarity",
|
| 270 |
+
"parameters": {
|
| 271 |
+
"type": "object",
|
| 272 |
+
"properties": {
|
| 273 |
+
"text": {
|
| 274 |
+
"type": "string",
|
| 275 |
+
"description": "The input text to rewrite",
|
| 276 |
+
}
|
| 277 |
+
},
|
| 278 |
+
},
|
| 279 |
+
},
|
| 280 |
+
},
|
| 281 |
+
]
|
| 282 |
+
|
| 283 |
+
messages = [
|
| 284 |
+
{"role": "system", "content": SYSTEM_PROMPT},
|
| 285 |
+
{
|
| 286 |
+
"role": "user",
|
| 287 |
+
"content": [
|
| 288 |
+
{
|
| 289 |
+
"type": "text",
|
| 290 |
+
"text": "Thanks to your calculator, compute the results for the equations that involve numbers displayed in the image.",
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"type": "image_url",
|
| 294 |
+
"image_url": {
|
| 295 |
+
"url": image_url,
|
| 296 |
+
},
|
| 297 |
+
},
|
| 298 |
+
],
|
| 299 |
+
},
|
| 300 |
+
]
|
| 301 |
+
|
| 302 |
+
response = client.chat.completions.create(
|
| 303 |
+
model=model,
|
| 304 |
+
messages=messages,
|
| 305 |
+
temperature=TEMP,
|
| 306 |
+
max_tokens=MAX_TOK,
|
| 307 |
+
tools=tools,
|
| 308 |
+
tool_choice="auto",
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
tool_calls = response.choices[0].message.tool_calls
|
| 312 |
+
|
| 313 |
+
results = []
|
| 314 |
+
for tool_call in tool_calls:
|
| 315 |
+
function_name = tool_call.function.name
|
| 316 |
+
function_args = tool_call.function.arguments
|
| 317 |
+
if function_name == "my_calculator":
|
| 318 |
+
result = my_calculator(**json.loads(function_args))
|
| 319 |
+
results.append(result)
|
| 320 |
+
|
| 321 |
+
messages.append({"role": "assistant", "tool_calls": tool_calls})
|
| 322 |
+
for tool_call, result in zip(tool_calls, results):
|
| 323 |
+
messages.append(
|
| 324 |
+
{
|
| 325 |
+
"role": "tool",
|
| 326 |
+
"tool_call_id": tool_call.id,
|
| 327 |
+
"name": tool_call.function.name,
|
| 328 |
+
"content": result,
|
| 329 |
+
}
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
response = client.chat.completions.create(
|
| 334 |
+
model=model,
|
| 335 |
+
messages=messages,
|
| 336 |
+
temperature=TEMP,
|
| 337 |
+
max_tokens=MAX_TOK,
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
print(response.choices[0].message.content)
|
| 341 |
+
```
|
| 342 |
+
|
| 343 |
+
</details>
|
| 344 |
+
|
| 345 |
+
<details>
|
| 346 |
+
<summary>Text-Only Request</summary>
|
| 347 |
+
|
| 348 |
+
Mistral Large 3 can follow your instructions down to the letter.
|
| 349 |
+
|
| 350 |
+
```python
|
| 351 |
+
from openai import OpenAI
|
| 352 |
+
from huggingface_hub import hf_hub_download
|
| 353 |
+
|
| 354 |
+
# Modify OpenAI's API key and API base to use vLLM's API server.
|
| 355 |
+
openai_api_key = "EMPTY"
|
| 356 |
+
openai_api_base = "http://localhost:8000/v1"
|
| 357 |
+
|
| 358 |
+
TEMP = 0.15
|
| 359 |
+
MAX_TOK = 262144
|
| 360 |
+
|
| 361 |
+
client = OpenAI(
|
| 362 |
+
api_key=openai_api_key,
|
| 363 |
+
base_url=openai_api_base,
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
models = client.models.list()
|
| 367 |
+
model = models.data[0].id
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def load_system_prompt(repo_id: str, filename: str) -> str:
|
| 371 |
+
file_path = hf_hub_download(repo_id=repo_id, filename=filename)
|
| 372 |
+
with open(file_path, "r") as file:
|
| 373 |
+
system_prompt = file.read()
|
| 374 |
+
return system_prompt
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
SYSTEM_PROMPT = load_system_prompt(model, "SYSTEM_PROMPT.txt")
|
| 378 |
+
|
| 379 |
+
messages = [
|
| 380 |
+
{"role": "system", "content": SYSTEM_PROMPT},
|
| 381 |
+
{
|
| 382 |
+
"role": "user",
|
| 383 |
+
"content": "Write me a sentence where every word starts with the next letter in the alphabet - start with 'a' and end with 'z'.",
|
| 384 |
+
},
|
| 385 |
+
]
|
| 386 |
+
|
| 387 |
+
response = client.chat.completions.create(
|
| 388 |
+
model=model,
|
| 389 |
+
messages=messages,
|
| 390 |
+
temperature=TEMP,
|
| 391 |
+
max_tokens=MAX_TOK,
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
assistant_message = response.choices[0].message.content
|
| 395 |
+
print(assistant_message)
|
| 396 |
+
```
|
| 397 |
+
|
| 398 |
+
</details>
|
| 399 |
+
|
| 400 |
+
## License
|
| 401 |
+
|
| 402 |
+
This model is licensed under the [Apache 2.0 License](https://www.apache.org/licenses/LICENSE-2.0.txt).
|
| 403 |
+
|
| 404 |
+
*You must not use this model in a manner that infringes, misappropriates, or otherwise violates any third party’s rights, including intellectual property rights.*
|
SYSTEM_PROMPT.txt
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
You are Mistral-Large-3-675B-Instruct-2512-NVFP4, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.
|
| 2 |
+
You power an AI assistant called Le Chat.
|
| 3 |
+
Your knowledge base was last updated on 2023-10-01.
|
| 4 |
+
The current date is {today}.
|
| 5 |
+
|
| 6 |
+
When you're not sure about some information or when the user's request requires up-to-date or specific data, you must use the available tools to fetch the information. Do not hesitate to use tools whenever they can provide a more accurate or complete response. If no relevant tools are available, then clearly state that you don't have the information and avoid making up anything.
|
| 7 |
+
If the user's question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. "What are some good restaurants around me?" => "Where are you?" or "When is the next flight to Tokyo" => "Where do you travel from?").
|
| 8 |
+
You are always very attentive to dates, in particular you try to resolve dates (e.g. "yesterday" is {yesterday}) and when asked about information at specific dates, you discard information that is at another date.
|
| 9 |
+
You follow these instructions in all languages, and always respond to the user in the language they use or request.
|
| 10 |
+
Next sections describe the capabilities that you have.
|
| 11 |
+
|
| 12 |
+
# WEB BROWSING INSTRUCTIONS
|
| 13 |
+
|
| 14 |
+
You cannot perform any web search or access internet to open URLs, links etc. If it seems like the user is expecting you to do so, you clarify the situation and ask the user to copy paste the text directly in the chat.
|
| 15 |
+
|
| 16 |
+
# MULTI-MODAL INSTRUCTIONS
|
| 17 |
+
|
| 18 |
+
You have the ability to read images, but you cannot generate images. You also cannot transcribe audio files or videos.
|
| 19 |
+
You cannot read nor transcribe audio files or videos.
|
| 20 |
+
|
| 21 |
+
# TOOL CALLING INSTRUCTIONS
|
| 22 |
+
|
| 23 |
+
You may have access to tools that you can use to fetch information or perform actions. You must use these tools in the following situations:
|
| 24 |
+
|
| 25 |
+
1. When the request requires up-to-date information.
|
| 26 |
+
2. When the request requires specific data that you do not have in your knowledge base.
|
| 27 |
+
3. When the request involves actions that you cannot perform without tools.
|
| 28 |
+
|
| 29 |
+
Always prioritize using tools to provide the most accurate and helpful response. If tools are not available, inform the user that you cannot perform the requested action at the moment.
|
_README.md
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
---
|
| 4 |
+
|
| 5 |
+
## Install vLLM
|
| 6 |
+
|
| 7 |
+
Use private vLLM branch: https://github.com/mistralai/vllm-private/compare/main...add_ml3_v4
|
| 8 |
+
|
| 9 |
+
You can install it by using the public vLLM docker:
|
| 10 |
+
|
| 11 |
+
```sh
|
| 12 |
+
ghcr.io/mistralai/vllm/vllm-openai:latest 8
|
| 13 |
+
```
|
| 14 |
+
|
| 15 |
+
from https://hub.docker.com/layers/vllm/vllm-openai/latest/images/sha256-d731ee65c044ae0977421eed3d93f931d4b7d79614394184c939db35b8f28fc2.
|
| 16 |
+
|
| 17 |
+
and then install vLLM with:
|
| 18 |
+
|
| 19 |
+
```sh
|
| 20 |
+
cd vllm
|
| 21 |
+
VLLM_USE_PRECOMPILED=1 pip install --editable .
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
## Launch
|
| 25 |
+
|
| 26 |
+
We tested ML3 by launching it from two 8xH200 nodes in parallel.
|
| 27 |
+
|
| 28 |
+
You can do the following to start the model.
|
| 29 |
+
Connect to two 8xH200 nodes (connected via InfiniBand) and make sure to launch ray on each node.
|
| 30 |
+
On one node you should start ray as "head" on the other as a "worker".
|
| 31 |
+
|
| 32 |
+
1.) Start ray
|
| 33 |
+
|
| 34 |
+
As soon as everything is install make sure to start ray on all GPU nodes.
|
| 35 |
+
|
| 36 |
+
**Important**: Make sure that on each node the command line has access to a `python` cmd. If it doesn't exist symlink or alias it to `python3`.
|
| 37 |
+
Then save the following code in a "start_ray.py" script and executed it as follows:
|
| 38 |
+
|
| 39 |
+
```sh
|
| 40 |
+
python3 start_ray.py --is_head --address 172.17.199.135 --ray_port 6379 --num_gpus 8 --num_cpus 20 --nnodes 2
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
on the "HEAD" node with the correct IP address (you can retrieve it via `hostname -I`).
|
| 44 |
+
|
| 45 |
+
On the worker nodes execute exactly the same command just remove `--is_head` and `--nnodes`.
|
| 46 |
+
|
| 47 |
+
```py
|
| 48 |
+
import argparse
|
| 49 |
+
import os
|
| 50 |
+
from typing import Union
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def generate_head_script(
|
| 54 |
+
address: str, ray_port: int, num_cpus: str, num_gpus: str, nnodes: int
|
| 55 |
+
) -> str:
|
| 56 |
+
num_cpus_str = str(num_cpus) if num_cpus.isdigit() else num_cpus
|
| 57 |
+
num_gpus_str = str(num_gpus) if num_gpus.isdigit() else num_gpus
|
| 58 |
+
|
| 59 |
+
script = f"""# Spawning Ray cluster (head node)
|
| 60 |
+
echo "Ray: Starting HEAD at $(hostname)..."
|
| 61 |
+
export RAY_memory_monitor_refresh_ms=0
|
| 62 |
+
ray start \\
|
| 63 |
+
--head \\
|
| 64 |
+
--node-ip-address={address} \\
|
| 65 |
+
--port={ray_port} \\
|
| 66 |
+
--num-cpus {num_cpus_str} \\
|
| 67 |
+
--num-gpus {num_gpus_str}
|
| 68 |
+
|
| 69 |
+
# Ray cluster needs to be initialized before spawning workers
|
| 70 |
+
echo "Waiting for {nnodes} worker nodes to connect..."
|
| 71 |
+
START_TIME=$(date +%s)
|
| 72 |
+
TIMEOUT=120 # seconds
|
| 73 |
+
INTERVAL=1
|
| 74 |
+
while :; do
|
| 75 |
+
# Count alive nodes
|
| 76 |
+
WORKER_COUNT=$(python -c 'import ray; ray.init(); print(sum(node["Alive"] for node in ray.nodes()))')
|
| 77 |
+
if [ "$WORKER_COUNT" -ge "{nnodes}" ]; then
|
| 78 |
+
echo "Ray: ✅ Found all ($WORKER_COUNT) nodes."
|
| 79 |
+
break
|
| 80 |
+
fi
|
| 81 |
+
NOW=$(date +%s)
|
| 82 |
+
ELAPSED=$(( NOW - START_TIME ))
|
| 83 |
+
if [ "$ELAPSED" -ge "$TIMEOUT" ]; then
|
| 84 |
+
echo "Ray: ❌ Timeout after $TIMEOUT seconds: not enough workers joined."
|
| 85 |
+
exit 1
|
| 86 |
+
fi
|
| 87 |
+
echo "⏳ Still waiting... ($WORKER_COUNT found)"
|
| 88 |
+
sleep "$INTERVAL"
|
| 89 |
+
done
|
| 90 |
+
"""
|
| 91 |
+
return script
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_start_ray_worker_cmd(
|
| 95 |
+
main_address: str,
|
| 96 |
+
ray_port: int,
|
| 97 |
+
num_cpus: Union[int, str],
|
| 98 |
+
num_gpus: Union[int, str],
|
| 99 |
+
) -> str:
|
| 100 |
+
num_cpus_str = str(num_cpus) if isinstance(num_cpus, int) else num_cpus
|
| 101 |
+
num_gpus_str = str(num_gpus) if isinstance(num_gpus, int) else num_gpus
|
| 102 |
+
|
| 103 |
+
return f"""echo "Ray: Starting WORKER at $(hostname)..."
|
| 104 |
+
export RAY_memory_monitor_refresh_ms=0
|
| 105 |
+
ray start \\
|
| 106 |
+
--address {main_address}:{ray_port} \\
|
| 107 |
+
--num-cpus {num_cpus_str} \\
|
| 108 |
+
--num-gpus {num_gpus_str} \\
|
| 109 |
+
--block
|
| 110 |
+
"""
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def main() -> None:
|
| 114 |
+
parser = argparse.ArgumentParser(
|
| 115 |
+
description="Generate Ray cluster scripts for head or worker nodes."
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
parser.add_argument(
|
| 119 |
+
"--is_head",
|
| 120 |
+
action="store_true",
|
| 121 |
+
help="Generate script for the head node (default: worker).",
|
| 122 |
+
)
|
| 123 |
+
parser.add_argument(
|
| 124 |
+
"--address", type=str, required=True, help="IP address of the head node."
|
| 125 |
+
)
|
| 126 |
+
parser.add_argument(
|
| 127 |
+
"--ray_port", type=int, required=True, help="Port for the Ray cluster."
|
| 128 |
+
)
|
| 129 |
+
parser.add_argument(
|
| 130 |
+
"--num_cpus",
|
| 131 |
+
type=str,
|
| 132 |
+
required=True,
|
| 133 |
+
help="Number of CPUs to allocate (e.g., '4' or 'auto').",
|
| 134 |
+
)
|
| 135 |
+
parser.add_argument(
|
| 136 |
+
"--num_gpus",
|
| 137 |
+
type=str,
|
| 138 |
+
required=True,
|
| 139 |
+
help="Number of GPUs to allocate (e.g., '1' or 'auto').",
|
| 140 |
+
)
|
| 141 |
+
parser.add_argument(
|
| 142 |
+
"--nnodes",
|
| 143 |
+
type=int,
|
| 144 |
+
help="Total number of nodes to wait for (only for head node).",
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
args = parser.parse_args()
|
| 148 |
+
print(f"Ray: Args: {args}")
|
| 149 |
+
|
| 150 |
+
if args.is_head:
|
| 151 |
+
script = generate_head_script(
|
| 152 |
+
address=args.address,
|
| 153 |
+
ray_port=args.ray_port,
|
| 154 |
+
num_cpus=args.num_cpus,
|
| 155 |
+
num_gpus=args.num_gpus,
|
| 156 |
+
nnodes=args.nnodes,
|
| 157 |
+
)
|
| 158 |
+
else:
|
| 159 |
+
assert args.nnodes is None, "nnodes is not used for worker nodes"
|
| 160 |
+
script = get_start_ray_worker_cmd(
|
| 161 |
+
main_address=args.address,
|
| 162 |
+
ray_port=args.ray_port,
|
| 163 |
+
num_cpus=args.num_cpus,
|
| 164 |
+
num_gpus=args.num_gpus,
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
os.system(script)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
if __name__ == "__main__":
|
| 171 |
+
main()
|
| 172 |
+
```
|
| 173 |
+
|
| 174 |
+
on both machines.
|
| 175 |
+
|
| 176 |
+
2.) Once you've started ray you can launch vLLM just on the HEAD node.
|
| 177 |
+
|
| 178 |
+
```sh
|
| 179 |
+
vllm serve mistralai/mistral-large-3 \
|
| 180 |
+
--tokenizer_mode mistral --config_format mistral \
|
| 181 |
+
--load_format mistral --tool-call-parser mistral \
|
| 182 |
+
--enable-auto-tool-choice \
|
| 183 |
+
--limit-mm-per-prompt '{"image":10}' \
|
| 184 |
+
--tensor-parallel-size 16 \
|
| 185 |
+
--max_model_len 65536 \
|
| 186 |
+
--max_num_seqs 128 \
|
| 187 |
+
--enforce_eager
|
| 188 |
+
```
|
| 189 |
+
|
| 190 |
+
Loading the checkponit will take a while. Once the model is loaded you can ping it for example with the
|
| 191 |
+
code as stated below.
|
| 192 |
+
|
| 193 |
+
## Client
|
| 194 |
+
|
| 195 |
+
```py
|
| 196 |
+
import requests
|
| 197 |
+
import json
|
| 198 |
+
|
| 199 |
+
url = "http://<your-server-url>:8000/v1/chat/completions"
|
| 200 |
+
headers = {"Content-Type": "application/json", "Authorization": "Bearer token"}
|
| 201 |
+
|
| 202 |
+
model = "mistralai/mistral-large-3"
|
| 203 |
+
image_url = "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/europe.png"
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
messages = [
|
| 207 |
+
{
|
| 208 |
+
"role": "user",
|
| 209 |
+
"content": "Without browsing the web, how many days ago was Mistral founded?"
|
| 210 |
+
},
|
| 211 |
+
]
|
| 212 |
+
|
| 213 |
+
# messages = [
|
| 214 |
+
# {
|
| 215 |
+
# "role": "user",
|
| 216 |
+
# "content": [
|
| 217 |
+
# {
|
| 218 |
+
# "type": "text",
|
| 219 |
+
# "text": "Which of the depicted countries has the best food? Which the second and third and fourth? Name the country, its color on the map and one its city that is visible on the map, but is not the capital. Make absolutely sure to only name a city that can be seen on the map.",
|
| 220 |
+
# },
|
| 221 |
+
# {"type": "image_url", "image_url": {"url": image_url}},
|
| 222 |
+
# ],
|
| 223 |
+
# },
|
| 224 |
+
# ]
|
| 225 |
+
|
| 226 |
+
data = {"model": model, "messages": messages}
|
| 227 |
+
|
| 228 |
+
response = requests.post(url, headers=headers, data=json.dumps(data), max_tokens=16)
|
| 229 |
+
print(response.json()["choices"][0]["message"]["content"])
|
| 230 |
+
```
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{#- Default system message if no system prompt is passed. #}
|
| 2 |
+
{%- set default_system_message = 'You are Mistral-Large-3-675B-Instruct-2512-NVFP4, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.\nYou power an AI assistant called Le Chat.\nYour knowledge base was last updated on 2023-10-01.\nThe current date is {today}.\n\nWhen you\'re not sure about some information or when the user\'s request requires up-to-date or specific data, you must use the available tools to fetch the information. Do not hesitate to use tools whenever they can provide a more accurate or complete response. If no relevant tools are available, then clearly state that you don\'t have the information and avoid making up anything.\nIf the user\'s question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. "What are some good restaurants around me?" => "Where are you?" or "When is the next flight to Tokyo" => "Where do you travel from?").\nYou are always very attentive to dates, in particular you try to resolve dates (e.g. "yesterday" is {yesterday}) and when asked about information at specific dates, you discard information that is at another date.\nYou follow these instructions in all languages, and always respond to the user in the language they use or request.\nNext sections describe the capabilities that you have.\n\n# WEB BROWSING INSTRUCTIONS\n\nYou cannot perform any web search or access internet to open URLs, links etc. If it seems like the user is expecting you to do so, you clarify the situation and ask the user to copy paste the text directly in the chat.\n\n# MULTI-MODAL INSTRUCTIONS\n\nYou have the ability to read images, but you cannot generate images. You also cannot transcribe audio files or videos.\nYou cannot read nor transcribe audio files or videos.\n\n# TOOL CALLING INSTRUCTIONS\n\nYou may have access to tools that you can use to fetch information or perform actions. You must use these tools in the following situations:\n\n1. When the request requires up-to-date information.\n2. When the request requires specific data that you do not have in your knowledge base.\n3. When the request involves actions that you cannot perform without tools.\n\nAlways prioritize using tools to provide the most accurate and helpful response. If tools are not available, inform the user that you cannot perform the requested action at the moment.' %}
|
| 3 |
+
|
| 4 |
+
{#- Begin of sequence token. #}
|
| 5 |
+
{{- bos_token }}
|
| 6 |
+
|
| 7 |
+
{#- Handle system prompt if it exists. #}
|
| 8 |
+
{#- System prompt supports text content or text chunks. #}
|
| 9 |
+
{%- if messages[0]['role'] == 'system' %}
|
| 10 |
+
{{- '[SYSTEM_PROMPT]' -}}
|
| 11 |
+
{%- if messages[0]['content'] is string %}
|
| 12 |
+
{{- messages[0]['content'] -}}
|
| 13 |
+
{%- else %}
|
| 14 |
+
{%- for block in messages[0]['content'] %}
|
| 15 |
+
{%- if block['type'] == 'text' %}
|
| 16 |
+
{{- block['text'] }}
|
| 17 |
+
{%- else %}
|
| 18 |
+
{{- raise_exception('Only text chunks are supported in system message contents.') }}
|
| 19 |
+
{%- endif %}
|
| 20 |
+
{%- endfor %}
|
| 21 |
+
{%- endif %}
|
| 22 |
+
{{- '[/SYSTEM_PROMPT]' -}}
|
| 23 |
+
{%- set loop_messages = messages[1:] %}
|
| 24 |
+
{%- else %}
|
| 25 |
+
{%- set loop_messages = messages %}
|
| 26 |
+
{%- if default_system_message != '' %}
|
| 27 |
+
{{- '[SYSTEM_PROMPT]' + default_system_message + '[/SYSTEM_PROMPT]' }}
|
| 28 |
+
{%- endif %}
|
| 29 |
+
{%- endif %}
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
{#- Tools definition #}
|
| 33 |
+
{%- set tools_definition = '' %}
|
| 34 |
+
{%- set has_tools = false %}
|
| 35 |
+
{%- if tools is defined and tools is not none and tools|length > 0 %}
|
| 36 |
+
{%- set has_tools = true %}
|
| 37 |
+
{%- set tools_definition = '[AVAILABLE_TOOLS]' + (tools| tojson) + '[/AVAILABLE_TOOLS]' %}
|
| 38 |
+
{{- tools_definition }}
|
| 39 |
+
{%- endif %}
|
| 40 |
+
|
| 41 |
+
{#- Checks for alternating user/assistant messages. #}
|
| 42 |
+
{%- set ns = namespace(index=0) %}
|
| 43 |
+
{%- for message in loop_messages %}
|
| 44 |
+
{%- if message.role == 'user' or (message.role == 'assistant' and (message.tool_calls is not defined or message.tool_calls is none or message.tool_calls | length == 0)) %}
|
| 45 |
+
{%- if (message['role'] == 'user') != (ns.index % 2 == 0) %}
|
| 46 |
+
{{- raise_exception('After the optional system message, conversation roles must alternate user and assistant roles except for tool calls and results.') }}
|
| 47 |
+
{%- endif %}
|
| 48 |
+
{%- set ns.index = ns.index + 1 %}
|
| 49 |
+
{%- endif %}
|
| 50 |
+
{%- endfor %}
|
| 51 |
+
|
| 52 |
+
{#- Handle conversation messages. #}
|
| 53 |
+
{%- for message in loop_messages %}
|
| 54 |
+
|
| 55 |
+
{#- User messages supports text content or text and image chunks. #}
|
| 56 |
+
{%- if message['role'] == 'user' %}
|
| 57 |
+
{%- if message['content'] is string %}
|
| 58 |
+
{{- '[INST]' + message['content'] + '[/INST]' }}
|
| 59 |
+
{%- elif message['content'] | length > 0 %}
|
| 60 |
+
{{- '[INST]' }}
|
| 61 |
+
{%- if message['content'] | length == 2 %}
|
| 62 |
+
{%- set blocks = message['content'] | sort(attribute='type') %}
|
| 63 |
+
{%- else %}
|
| 64 |
+
{%- set blocks = message['content'] %}
|
| 65 |
+
{%- endif %}
|
| 66 |
+
{%- for block in blocks %}
|
| 67 |
+
{%- if block['type'] == 'text' %}
|
| 68 |
+
{{- block['text'] }}
|
| 69 |
+
{%- elif block['type'] in ['image', 'image_url'] %}
|
| 70 |
+
{{- '[IMG]' }}
|
| 71 |
+
{%- else %}
|
| 72 |
+
{{- raise_exception('Only text, image and image_url chunks are supported in user message content.') }}
|
| 73 |
+
{%- endif %}
|
| 74 |
+
{%- endfor %}
|
| 75 |
+
{{- '[/INST]' }}
|
| 76 |
+
{%- else %}
|
| 77 |
+
{{- raise_exception('User message must have a string or a list of chunks in content') }}
|
| 78 |
+
{%- endif %}
|
| 79 |
+
|
| 80 |
+
{#- Assistant messages supports text content or text and image chunks. #}
|
| 81 |
+
{%- elif message['role'] == 'assistant' %}
|
| 82 |
+
{%- if (message['content'] is none or message['content'] == '' or message['content']|length == 0) and (message['tool_calls'] is not defined or message['tool_calls'] is none or message['tool_calls']|length == 0) %}
|
| 83 |
+
{{- raise_exception('Assistant message must have a string or a list of chunks in content or a list of tool calls.') }}
|
| 84 |
+
{%- endif %}
|
| 85 |
+
|
| 86 |
+
{%- if message['content'] is string %}
|
| 87 |
+
{{- message['content'] }}
|
| 88 |
+
{%- elif message['content'] | length > 0 %}
|
| 89 |
+
{%- for block in message['content'] %}
|
| 90 |
+
{%- if block['type'] == 'text' %}
|
| 91 |
+
{{- block['text'] }}
|
| 92 |
+
{%- else %}
|
| 93 |
+
{{- raise_exception('Only text chunks are supported in assistant message contents.') }}
|
| 94 |
+
{%- endif %}
|
| 95 |
+
{%- endfor %}
|
| 96 |
+
{%- endif %}
|
| 97 |
+
|
| 98 |
+
{%- if message['tool_calls'] is defined and message['tool_calls'] is not none and message['tool_calls']|length > 0 %}
|
| 99 |
+
{%- for tool in message['tool_calls'] %}
|
| 100 |
+
{%- set arguments = tool['function']['arguments'] %}
|
| 101 |
+
{%- if arguments is not string %}
|
| 102 |
+
{%- set arguments = arguments|tojson|safe %}
|
| 103 |
+
{%- elif arguments == '' %}
|
| 104 |
+
{%- set arguments = '{}' %}
|
| 105 |
+
{%- endif %}
|
| 106 |
+
{{- '[TOOL_CALLS]' + tool['function']['name'] + '[ARGS]' + arguments }}
|
| 107 |
+
{%- endfor %}
|
| 108 |
+
{%- endif %}
|
| 109 |
+
|
| 110 |
+
{#- End of sequence token for each assistant messages. #}
|
| 111 |
+
{{- eos_token }}
|
| 112 |
+
|
| 113 |
+
{#- Tool messages only supports text content. #}
|
| 114 |
+
{%- elif message['role'] == 'tool' %}
|
| 115 |
+
{{- '[TOOL_RESULTS]' + message['content']|string + '[/TOOL_RESULTS]' }}
|
| 116 |
+
|
| 117 |
+
{#- Raise exception for unsupported roles. #}
|
| 118 |
+
{%- else %}
|
| 119 |
+
{{- raise_exception('Only user, assistant and tool roles are supported, got ' + message['role'] + '.') }}
|
| 120 |
+
{%- endif %}
|
| 121 |
+
{%- endfor %}
|
consolidated-00025-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ab9d2bf87a6b60a5e37a8f54b75233c9d4ee4f2ef6e4352f65c5c24401a91322
|
| 3 |
+
size 1403809268
|
consolidated-00032-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0664a1b9fa4796a8bb787680854f3374df2b4e5ef6745388dc70320a69e21f0b
|
| 3 |
+
size 1403809044
|
consolidated-00053-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:05770e2e8e0c3d1546df453849535987776e16e24140170ac4c35531888c6801
|
| 3 |
+
size 1420324688
|
consolidated-00056-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8fc0c7fb8476c304328957be47a827ecb33c7348e61e3563537e8564a85ba592
|
| 3 |
+
size 1420324440
|
consolidated-00060-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:75ca4f9a74b3b31d621b203f7cf6830e4caba16b61d9a7e48d11084d7dc89d66
|
| 3 |
+
size 1420324440
|
consolidated-00073-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2fbbfb9c405be49b10b69c1758fc8d03d808a1777008718c4285f5586a313d4
|
| 3 |
+
size 1387293632
|
consolidated-00077-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3f098a7e4b5ca0b60bf74b83b6be926b2b16f7f1d0260ac02c1033da1f212b29
|
| 3 |
+
size 1387293672
|
consolidated-00082-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6839a852fc29e9992f5110bb18dd8c9cf4391ecb3cfe1a5eeae9c7742a7a88d3
|
| 3 |
+
size 1403809036
|
consolidated-00088-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4a61a1a6c7fcaaaf50664e7bc88c937cdd1a8709681df5d30c59428b5aa0d8c1
|
| 3 |
+
size 1387293640
|
consolidated-00090-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:32f426d8e487d3fe4b0b44c2df7d3391c878c13c3fc20ebc1b4ee45b973167d9
|
| 3 |
+
size 1403809300
|
consolidated-00095-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:09f09ef90c83fe35433dd225c351959ddd2fb9210126b8b9536a60da3e32c632
|
| 3 |
+
size 1420324360
|
consolidated-00105-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:03e6d03154721af5db4f7426ef46f59594224d89cdaa687f7428dd34d104ae84
|
| 3 |
+
size 1403809036
|
consolidated-00109-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:663756c104b3459b4c1404bcce9981c3ac37b6a4f6c28c0f81c43825dac9aecf
|
| 3 |
+
size 1403809196
|
consolidated-00123-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:375ab538a3505943690912a8a57fb3f9c0afce950278c94f2c8399f1c562bb57
|
| 3 |
+
size 1420324576
|
consolidated-00124-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:49b132a666d365117f850d422c6de6eb0a7dcb80f1b18b7938c8f591f7ab08a6
|
| 3 |
+
size 1387293632
|
consolidated-00130-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec752a7c438d44e9724ce40940a49ecec6d5204fd271e8c46a9667fd81b46091
|
| 3 |
+
size 1420324448
|
consolidated-00137-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2058f954f9757ac03167b06e4d59a76550ea2428781b4656032242f2c6b43401
|
| 3 |
+
size 1403809148
|
consolidated-00141-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5cc1840c7b07ad92eabcc65fe90d5d00b833cb58053f6a489951e12fd4b059c6
|
| 3 |
+
size 1403809300
|
consolidated-00146-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:57e66a6ee20c183ceebe4962db769872b5e18a3f7ec5bb016b83f6f8aafdecd1
|
| 3 |
+
size 1420324360
|
consolidated-00151-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:edfd723ed05b5488eb340834d37ce81f1c56bd066c791aaccaac13f895f66cc6
|
| 3 |
+
size 1387293712
|
consolidated-00152-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:27c7c5256568962d2925b601dfd2a4848d78c88c7765cede0777906c0a6c3005
|
| 3 |
+
size 1403809044
|
consolidated-00153-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:27583d1a6610c20cc592a1a0358abc8463fdfbcf6c565ecee0c4deab5939c055
|
| 3 |
+
size 1420324440
|
consolidated-00160-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:767e64501f09e55f1d78608933f6260cc5dc262ec8782b4ff5d4562fbfed4b52
|
| 3 |
+
size 1403809196
|
consolidated-00166-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5b606f7e243dda92021dc39e3aa38654dccc07fb793b6bc63b0db27e3f4c8939
|
| 3 |
+
size 1420324448
|
consolidated-00170-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e83d4879836b1ea4c289b0e1450831ec49c788659f1b36afcdffb8afd265c414
|
| 3 |
+
size 1420324440
|
consolidated-00172-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4c4e7f80776ca7bddb1c94c466d0eda3aafcf525b7e402d48d5a4f79886f5cea
|
| 3 |
+
size 1403809036
|
consolidated-00174-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dd04ac9f1a2a9d2df913cf16023c66f47da5f76c6a6f3ada98eab4d2c940b3c8
|
| 3 |
+
size 1420324576
|
consolidated-00176-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dc0fd039bb18145d203b21c2617e0f29d26a861bf6c682505f232e76add25a95
|
| 3 |
+
size 1403809044
|
consolidated-00181-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b97758cdd8cbc2cf4779926f3644080e6bcce2e68f02f861d9f75e431dea0351
|
| 3 |
+
size 1420324448
|
consolidated-00183-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4844c03d8fe9a1d320482e954c419aa0c0de4830661e2a70558063397295ff4b
|
| 3 |
+
size 1387293832
|
consolidated-00184-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f465e8b96cc0e274f827b394fa2358325af7fc3964d06fea28e034412a6f4f2
|
| 3 |
+
size 1403809036
|
consolidated-00185-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e8784a96e192e02155668072b57f695056b97b6f0aa4dfa5f013d5b6327b9ff0
|
| 3 |
+
size 1420324440
|
consolidated-00190-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:728dda60ea3cb06d2bf4b2b90cea9cb61a9a8e863f2b41d7dc03278157e150ce
|
| 3 |
+
size 1387293640
|
consolidated-00193-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:42a91b66ae81114d4da26ae9b5f84f2bf7b52daa2e52a6ba4dfcb1c954e19418
|
| 3 |
+
size 1420324464
|
consolidated-00200-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1d52dfafa6da7d4804da3f7243d70271c79cb95928c8958d43fa55a84712c794
|
| 3 |
+
size 1420324184
|
consolidated-00202-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e48714e62ec11e249ad6fa45f21cb48bde36c4f68cb76fdc0ca4d32804ee581
|
| 3 |
+
size 1387293712
|
consolidated-00209-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7fa07ef27038b7927598ff1c96a5e157e70807b9b7bace3a3c5740942a7569f4
|
| 3 |
+
size 1387293632
|
consolidated-00229-of-00273.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bfe89feb369ba45be68de8e5c62a3e58f95d8c8b46e886e432156d9fdade3af0
|
| 3 |
+
size 1420324664
|
consolidated.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
params.json
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"dim": 7168,
|
| 3 |
+
"head_dim": 192,
|
| 4 |
+
"hidden_dim": 16384,
|
| 5 |
+
"kv_lora_rank": 512,
|
| 6 |
+
"llama_4_scaling": {
|
| 7 |
+
"beta": 0.1,
|
| 8 |
+
"original_max_position_embeddings": 8192
|
| 9 |
+
},
|
| 10 |
+
"max_position_embeddings": 294912,
|
| 11 |
+
"max_seq_len": 262144,
|
| 12 |
+
"moe": {
|
| 13 |
+
"expert_hidden_dim": 4096,
|
| 14 |
+
"expert_model_parallel": 1,
|
| 15 |
+
"expert_parallel": 1,
|
| 16 |
+
"first_k_dense_replace": 3,
|
| 17 |
+
"num_expert_groups": 1,
|
| 18 |
+
"num_expert_groups_per_tok": 1,
|
| 19 |
+
"num_experts": 128,
|
| 20 |
+
"num_experts_per_tok": 4,
|
| 21 |
+
"num_shared_experts": 1,
|
| 22 |
+
"route_every_n": 1,
|
| 23 |
+
"routed_scale": 1.0
|
| 24 |
+
},
|
| 25 |
+
"n_heads": 128,
|
| 26 |
+
"n_kv_heads": 128,
|
| 27 |
+
"n_layers": 61,
|
| 28 |
+
"norm_eps": 1e-06,
|
| 29 |
+
"q_lora_rank": 1536,
|
| 30 |
+
"qk_nope_head_dim": 128,
|
| 31 |
+
"qk_rope_head_dim": 64,
|
| 32 |
+
"quantization_config": {
|
| 33 |
+
"config_groups": {
|
| 34 |
+
"NVFP4": {
|
| 35 |
+
"format": "nvfp4-pack-quantized",
|
| 36 |
+
"input_activations": {
|
| 37 |
+
"actorder": null,
|
| 38 |
+
"block_structure": null,
|
| 39 |
+
"dynamic": "local",
|
| 40 |
+
"group_size": 16,
|
| 41 |
+
"num_bits": 4,
|
| 42 |
+
"observer": "static_minmax",
|
| 43 |
+
"observer_kwargs": {},
|
| 44 |
+
"strategy": "tensor_group",
|
| 45 |
+
"symmetric": true,
|
| 46 |
+
"type": "float"
|
| 47 |
+
},
|
| 48 |
+
"output_activations": null,
|
| 49 |
+
"targets": [
|
| 50 |
+
"Linear"
|
| 51 |
+
],
|
| 52 |
+
"weights": {
|
| 53 |
+
"actorder": null,
|
| 54 |
+
"block_structure": null,
|
| 55 |
+
"dynamic": false,
|
| 56 |
+
"group_size": 16,
|
| 57 |
+
"num_bits": 4,
|
| 58 |
+
"observer": "static_minmax",
|
| 59 |
+
"observer_kwargs": {},
|
| 60 |
+
"strategy": "tensor_group",
|
| 61 |
+
"symmetric": true,
|
| 62 |
+
"type": "float"
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
},
|
| 66 |
+
"format": "nvfp4-pack-quantized",
|
| 67 |
+
"global_compression_ratio": null,
|
| 68 |
+
"ignore": [
|
| 69 |
+
"model.embed_tokens",
|
| 70 |
+
"re:patch_merger.*",
|
| 71 |
+
"re:vision_encoder.*",
|
| 72 |
+
"re:vision_language_adapter.*",
|
| 73 |
+
"re:.*attn.*",
|
| 74 |
+
"re:.*gate$",
|
| 75 |
+
"lm_head"
|
| 76 |
+
],
|
| 77 |
+
"kv_cache_scheme": null,
|
| 78 |
+
"quant_method": "compressed-tensors",
|
| 79 |
+
"quantization_status": "compressed",
|
| 80 |
+
"sparsity_config": {},
|
| 81 |
+
"transform_config": {},
|
| 82 |
+
"version": "0.12.3.dev29+g73c2cf9.d20251119"
|
| 83 |
+
},
|
| 84 |
+
"rope_theta": 10000.0,
|
| 85 |
+
"tied_embeddings": false,
|
| 86 |
+
"v_head_dim": 128,
|
| 87 |
+
"vision_encoder": {
|
| 88 |
+
"adapter_bias": false,
|
| 89 |
+
"add_pre_mm_projector_layer_norm": true,
|
| 90 |
+
"hidden_size": 1664,
|
| 91 |
+
"image_break_token_id": 12,
|
| 92 |
+
"image_end_token_id": 13,
|
| 93 |
+
"image_size": 1540,
|
| 94 |
+
"image_token_id": 10,
|
| 95 |
+
"intermediate_size": 8192,
|
| 96 |
+
"max_image_size": 1540,
|
| 97 |
+
"mm_projector_id": "patch_merge",
|
| 98 |
+
"num_attention_heads": 16,
|
| 99 |
+
"num_channels": 3,
|
| 100 |
+
"num_hidden_layers": 48,
|
| 101 |
+
"patch_size": 14,
|
| 102 |
+
"rope_theta": 10000.0,
|
| 103 |
+
"spatial_merge_size": 2
|
| 104 |
+
},
|
| 105 |
+
"vocab_size": 131072,
|
| 106 |
+
"yarn": {
|
| 107 |
+
"alpha": 1,
|
| 108 |
+
"apply_scale": false,
|
| 109 |
+
"beta": 32,
|
| 110 |
+
"factor": 36,
|
| 111 |
+
"original_max_position_embeddings": 8192
|
| 112 |
+
}
|
| 113 |
+
}
|
processor_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"image_break_token": "[IMG_BREAK]",
|
| 3 |
+
"image_end_token": "[IMG_END]",
|
| 4 |
+
"image_processor": {
|
| 5 |
+
"crop_size": null,
|
| 6 |
+
"data_format": "channels_first",
|
| 7 |
+
"device": null,
|
| 8 |
+
"disable_grouping": null,
|
| 9 |
+
"do_center_crop": null,
|
| 10 |
+
"do_convert_rgb": true,
|
| 11 |
+
"do_normalize": true,
|
| 12 |
+
"do_pad": null,
|
| 13 |
+
"do_rescale": true,
|
| 14 |
+
"do_resize": true,
|
| 15 |
+
"image_mean": [
|
| 16 |
+
0.48145466,
|
| 17 |
+
0.4578275,
|
| 18 |
+
0.40821073
|
| 19 |
+
],
|
| 20 |
+
"image_processor_type": "PixtralImageProcessorFast",
|
| 21 |
+
"image_seq_length": null,
|
| 22 |
+
"image_std": [
|
| 23 |
+
0.26862954,
|
| 24 |
+
0.26130258,
|
| 25 |
+
0.27577711
|
| 26 |
+
],
|
| 27 |
+
"input_data_format": null,
|
| 28 |
+
"pad_size": null,
|
| 29 |
+
"patch_size": 14,
|
| 30 |
+
"processor_class": "PixtralProcessor",
|
| 31 |
+
"resample": 3,
|
| 32 |
+
"rescale_factor": 0.00392156862745098,
|
| 33 |
+
"return_tensors": null,
|
| 34 |
+
"size": {
|
| 35 |
+
"longest_edge": 1540
|
| 36 |
+
}
|
| 37 |
+
},
|
| 38 |
+
"image_token": "[IMG]",
|
| 39 |
+
"patch_size": 14,
|
| 40 |
+
"processor_class": "PixtralProcessor",
|
| 41 |
+
"spatial_merge_size": 2
|
| 42 |
+
}
|
special_tokens_map.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tekken.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e29d19ea32eb7e26e6c0572d57cb7f9eca0f4420e0e0fe6ae1cf3be94da1c0d6
|
| 3 |
+
size 16753777
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:577575622324b2e099e2648be26bdeb5e5815ffe66d7004e9e3ddbf421db6bf1
|
| 3 |
+
size 17078110
|
tokenizer_config.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|