Rainbowdesign commited on
Commit
75654bd
·
verified ·
1 Parent(s): 08127e8

Update default_models.json

Browse files
Files changed (1) hide show
  1. default_models.json +204 -56
default_models.json CHANGED
@@ -86,71 +86,219 @@
86
  }
87
  }
88
  },
89
- "Qwen-Coder": {
90
- "Qwen2.5-Coder-0.5B-Instruct": {
91
- "id": "Qwen/Qwen2.5-Coder-0.5B-Instruct",
92
- "description": "Lightweight coding model suitable for fast inference and simple code tasks.",
93
- "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B-Instruct",
94
- "emoji": "💻"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  },
96
- "Qwen2.5-Coder-1.5B-Instruct": {
97
- "id": "Qwen/Qwen2.5-Coder-1.5B-Instruct",
98
- "description": "Small instruction-tuned coding model with improved reasoning over the 0.5B version.",
99
- "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct",
100
- "emoji": "💻"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  },
102
- "Qwen2.5-Coder-3B-Instruct": {
103
- "id": "Qwen/Qwen2.5-Coder-3B-Instruct",
104
- "description": "Mid-sized coding model with strong performance for code generation and debugging.",
105
- "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-3B-Instruct",
106
- "emoji": "💻"
 
 
 
 
 
 
 
 
 
107
  },
108
- "Qwen2.5-Coder-7B-Instruct": {
109
- "id": "Qwen/Qwen2.5-Coder-7B-Instruct",
110
- "description": "Coding-focused Qwen 2.5 model with strong capabilities for code generation, editing, and explanation.",
111
- "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct",
112
- "emoji": "💻"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  },
114
- "Qwen2.5-Coder-14B-Instruct": {
115
- "id": "Qwen/Qwen2.5-Coder-14B-Instruct",
116
- "description": "Large coding model with improved reasoning and long-context capabilities.",
117
- "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-14B-Instruct",
118
- "emoji": "💻"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  },
120
- "Qwen2.5-Coder-32B-Instruct": {
121
- "id": "Qwen/Qwen2.5-Coder-32B-Instruct",
122
- "description": "⚠️ Large model — may require paid HF Inference API. High-performance coding assistant with strong reasoning.",
123
- "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct",
124
- "emoji": "💻"
125
- }
126
- }
127
-
128
- "CodeLlama": {
129
- "CodeLlama-7B-Instruct": {
130
- "id": "codellama/CodeLlama-7b-Instruct-hf",
131
- "description": "Instruction-tuned CodeLlama model specialized for programming assistance and code chat.",
132
- "link": "https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf",
133
- "emoji": "🧑‍💻"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  },
135
- "CodeLlama-13B-Instruct": {
136
- "id": "codellama/CodeLlama-13b-Instruct-hf",
137
- "description": "Larger CodeLlama instruction model with improved coding and reasoning capabilities.",
138
- "link": "https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf",
139
- "emoji": "🧑‍💻"
 
 
 
 
 
 
 
 
 
140
  },
141
- "CodeLlama-34B-Instruct": {
142
- "id": "codellama/CodeLlama-34b-Instruct-hf",
143
- "description": "⚠️ Large model — may require paid HF Inference API. High-performance CodeLlama variant for advanced coding tasks.",
144
- "link": "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf",
145
- "emoji": "🧑‍💻"
 
 
 
146
  },
147
- "CodeLlama-70B-Instruct": {
148
- "id": "meta-llama/CodeLlama-70B-Instruct-hf",
149
- "description": "⚠️ Extremely large model — requires paid HF Inference API. 🔒 Gated model — requires accepting the Meta license. Highest-capacity CodeLlama model for complex coding tasks.",
150
- "link": "https://huggingface.co/meta-llama/CodeLlama-70B-Instruct-hf",
151
- "emoji": "🧑‍💻"
 
 
 
152
  }
153
- }
 
154
 
155
 
156
  "Reasoning": {
 
86
  }
87
  }
88
  },
89
+
90
+ "Coding": {
91
+ "Qwen-Coder": {
92
+ "Qwen2.5-Coder-0.5B-Instruct": {
93
+ "id": "Qwen/Qwen2.5-Coder-0.5B-Instruct",
94
+ "description": "Sehr leichtes Coding‑Modell für schnelle Inferenz und einfache Aufgaben.",
95
+ "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B-Instruct",
96
+ "emoji": "💻"
97
+ },
98
+ "Qwen2.5-Coder-1.5B-Instruct": {
99
+ "id": "Qwen/Qwen2.5-Coder-1.5B-Instruct",
100
+ "description": "Kleines instruction‑tuned Coding‑Modell mit besserer Logik als die 0.5B‑Variante.",
101
+ "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct",
102
+ "emoji": "💻"
103
+ },
104
+ "Qwen2.5-Coder-3B-Instruct": {
105
+ "id": "Qwen/Qwen2.5-Coder-3B-Instruct",
106
+ "description": "Mittelgroßes Coding‑Modell mit guter Performance bei Codegenerierung und Debugging.",
107
+ "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-3B-Instruct",
108
+ "emoji": "💻"
109
+ },
110
+ "Qwen2.5-Coder-7B-Instruct": {
111
+ "id": "Qwen/Qwen2.5-Coder-7B-Instruct",
112
+ "description": "Beliebter Allround‑Coder mit starker Fähigkeit für Codegenerierung, -bearbeitung und -erklärung.",
113
+ "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct",
114
+ "emoji": "💻"
115
+ },
116
+ "Qwen2.5-Coder-14B-Instruct": {
117
+ "id": "Qwen/Qwen2.5-Coder-14B-Instruct",
118
+ "description": "Großes Modell mit verbesserter Logik und langen Kontexten — geeignet für umfangreiche Projekte.",
119
+ "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-14B-Instruct",
120
+ "emoji": "💻"
121
+ },
122
+ "Qwen2.5-Coder-32B-Instruct": {
123
+ "id": "Qwen/Qwen2.5-Coder-32B-Instruct",
124
+ "description": "Leistungsstarker Coding‑Assistent mit sehr hoher Genauigkeit — große Modelle benötigen viel VRAM.",
125
+ "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct",
126
+ "emoji": "💻"
127
+ }
128
  },
129
+
130
+ "CodeLlama": {
131
+ "CodeLlama-7B-Instruct": {
132
+ "id": "codellama/CodeLlama-7b-Instruct-hf",
133
+ "description": "Leichtes, instruction‑tuned Modell — beliebt für Coding‑Chat und kleinere Projekte.",
134
+ "link": "https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf",
135
+ "emoji": "🧑‍💻"
136
+ },
137
+ "CodeLlama-13B-Instruct": {
138
+ "id": "codellama/CodeLlama-13b-Instruct-hf",
139
+ "description": "Mittelklasse mit guter Codegenerierung und Reasoning — vielseitig einsetzbar.",
140
+ "link": "https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf",
141
+ "emoji": "🧑‍💻"
142
+ },
143
+ "CodeLlama-34B-Instruct": {
144
+ "id": "codellama/CodeLlama-34b-Instruct-hf",
145
+ "description": "Leistungsstarkes Modell für größere sowie komplexere Projekte und umfangreiche Codebasen.",
146
+ "link": "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf",
147
+ "emoji": "🧑‍💻"
148
+ },
149
+ "CodeLlama-70B-Instruct": {
150
+ "id": "meta-llama/CodeLlama-70B-Instruct-hf",
151
+ "description": "Sehr großes Modell — maximale Kapazität für komplexe Tasks, aber hoher Ressourcenbedarf.",
152
+ "link": "https://huggingface.co/meta-llama/CodeLlama-70B-Instruct-hf",
153
+ "emoji": "🧑‍💻"
154
+ }
155
  },
156
+
157
+ "Llama3.1-Code": {
158
+ "Llama-3.1-8B-Instruct-Code": {
159
+ "id": "meta-llama/Llama-3.1-8B-Instruct",
160
+ "description": "Kompaktes Modell für vielseitige Aufgaben — unterstützt auch Code, gute Balance aus Größe und Leistung.",
161
+ "link": "https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct",
162
+ "emoji": "🦙"
163
+ },
164
+ "Llama-3.1-70B-Instruct-Code": {
165
+ "id": "meta-llama/Llama-3.1-70B-Instruct",
166
+ "description": "High‑End Modell für professionelle und komplexe Coding‑Workflows, inklusive Multilanguage.",
167
+ "link": "https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct",
168
+ "emoji": "🦙"
169
+ }
170
  },
171
+
172
+ "DeepSeek-Coder": {
173
+ "DeepSeek-Coder-1.3B": {
174
+ "id": "deepseek-ai/DeepSeek-Coder-1.3B-instruct",
175
+ "description": "Kleines, schnelles Modell — geeignet für einfache Aufgaben und schnelles Prototyping.",
176
+ "link": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-1.3B-instruct",
177
+ "emoji": "🚀"
178
+ },
179
+ "DeepSeek-Coder-6.7B": {
180
+ "id": "deepseek-ai/DeepSeek-Coder-6.7B-instruct",
181
+ "description": "Mittelklasse mit guter Multisprachen‑ und Code‑Performance — guter Kompromiss zwischen Ressourcen und Leistung.",
182
+ "link": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-6.7B-instruct",
183
+ "emoji": "🚀"
184
+ },
185
+ "DeepSeek-Coder-33B": {
186
+ "id": "deepseek-ai/DeepSeek-Coder-33B-instruct",
187
+ "description": "Stark in großen Projekten und komplexem Code — beliebt für umfangreiche Codebasen und generelle Multi‑Language‑Projekte.",
188
+ "link": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-33B-instruct",
189
+ "emoji": "🚀"
190
+ }
191
  },
192
+
193
+ "StarCoder": {
194
+ "StarCoder-1B": {
195
+ "id": "bigcode/starcoder",
196
+ "description": "Leichtes Basis‑Code‑Modell — gut für kleinere Skripte und simple Aufgaben.",
197
+ "link": "https://huggingface.co/bigcode/starcoder",
198
+ "emoji": "⭐"
199
+ },
200
+ "StarCoder2-7B": {
201
+ "id": "bigcode/starcoder2-7b",
202
+ "description": "Aktuelle Generation mit guter Performance für viele Sprachen und mittelgroße Projekte.",
203
+ "link": "https://huggingface.co/bigcode/starcoder2-7b",
204
+ "emoji": "⭐"
205
+ },
206
+ "StarCoder2-15B": {
207
+ "id": "bigcode/starcoder2-15b",
208
+ "description": "Leistungsfähiges Modell mit breiter Sprachabdeckung und guter Codegenerierung — beliebt für Open-Source‑Projekte.",
209
+ "link": "https://huggingface.co/bigcode/starcoder2-15b",
210
+ "emoji": "⭐"
211
+ }
212
  },
213
+
214
+ "Phind-CodeLlama-v2": {
215
+ "Phind-CodeLlama-34B-v2": {
216
+ "id": "phind/Phind-CodeLlama-34B-v2",
217
+ "description": "Sehr starkes Code‑LLM, oft an der Spitze der Open‑Source‑Leaderboards — gute Leistung in vielen Sprachen inklusive C#, C++, Python.",
218
+ "link": "https://huggingface.co/phind/Phind-CodeLlama-34B-v2",
219
+ "emoji": "🔥"
220
+ }
221
+ },
222
+
223
+ "WizardCoder": {
224
+ "WizardCoder-15B": {
225
+ "id": "WizardLM/WizardCoder-15B-V1.0",
226
+ "description": "Beliebtes Chat‑Coding‑Modell — gut für kreative Aufgaben, Erklärungen und Code‑Generierung in vielen Sprachen.",
227
+ "link": "https://huggingface.co/WizardLM/WizardCoder-15B-V1.0",
228
+ "emoji": "🧙‍♂️"
229
+ }
230
+ },
231
+
232
+ "CodeGemma": {
233
+ "CodeGemma-2B": {
234
+ "id": "google/codegemma-2b-code",
235
+ "description": "Kompaktes Modell — gut für Mobile oder leichtes Coding, Java & Kotlin Fokus.",
236
+ "link": "https://huggingface.co/google/codegemma-2b-code",
237
+ "emoji": "💎"
238
+ },
239
+ "CodeGemma-7B": {
240
+ "id": "google/codegemma-7b-code",
241
+ "description": "Stabil und solide — gute Performance für vielfältige Sprachen und Projekte.",
242
+ "link": "https://huggingface.co/google/codegemma-7b-code",
243
+ "emoji": "💎"
244
+ }
245
+ },
246
+
247
+ "CodeGen": {
248
+ "CodeGen-2B": {
249
+ "id": "salesforce/codegen-2B-multi",
250
+ "description": "Multilingual Code‑Generation — solide Grundausstattung für viele Sprachen.",
251
+ "link": "https://huggingface.co/Salesforce/codegen-2B-multi",
252
+ "emoji": "📘"
253
+ },
254
+ "CodeGen-6B": {
255
+ "id": "salesforce/codegen-6B-multi",
256
+ "description": "Gute Balance zwischen Leistungsfähigkeit und Ressourcenbedarf.",
257
+ "link": "https://huggingface.co/Salesforce/codegen-6B-multi",
258
+ "emoji": "📘"
259
+ },
260
+ "CodeGen-16B": {
261
+ "id": "salesforce/codegen-16B-multi",
262
+ "description": "Leistungsstark für große und komplexe Code‑Generierungsaufgaben.",
263
+ "link": "https://huggingface.co/Salesforce/codegen-16B-multi",
264
+ "emoji": "📘"
265
+ }
266
  },
267
+
268
+ "InCoder": {
269
+ "InCoder-1B": {
270
+ "id": "facebook/incoder-1B",
271
+ "description": "Gutes Infill‑Modell — praktisch für Code-Ergänzungen und Code‑Refactoring.",
272
+ "link": "https://huggingface.co/facebook/incoder-1B",
273
+ "emoji": "✨"
274
+ },
275
+ "InCoder-6B": {
276
+ "id": "facebook/incoder-6B",
277
+ "description": "Robustere Version mit besserer Codequalität und Kontextverarbeitung.",
278
+ "link": "https://huggingface.co/facebook/incoder-6B",
279
+ "emoji": "✨"
280
+ }
281
  },
282
+
283
+ "PolyCoder": {
284
+ "PolyCoder-2.7B": {
285
+ "id": "NinedayWang/PolyCoder-2.7B",
286
+ "description": "Besonders stark in C, C++, System‑ und Performance‑Sprache — nützlich für low‑level oder cross‑platform Code.",
287
+ "link": "https://huggingface.co/NinedayWang/PolyCoder-2.7B",
288
+ "emoji": "🧩"
289
+ }
290
  },
291
+
292
+ "Replit-Code-LLM": {
293
+ "Replit-3B": {
294
+ "id": "replit/Replit-code-v1-3B",
295
+ "description": "Beliebt bei Web‑ und API‑Projekten — schneller Ladevorgang, gute Performance für kleine & mittlere Projekte.",
296
+ "link": "https://huggingface.co/replit/Replit-code-v1-3B",
297
+ "emoji": "🔧"
298
+ }
299
  }
300
+ },
301
+
302
 
303
 
304
  "Reasoning": {