{ "Id": "B839B0CA-97B8-4603-9097-0BC641EDE391", "FileVersion": "1", "Created": "2025-03-07T00:00:00", "IsProtected": false, "Name": "FLUX.1-Kontext", "ImageIcon": "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/Icon.png", "Author": "BlackForestLabs", "Description": "FLUX.1 [Kontext] is a 12-billion parameter rectified flow transformer designed to generate detailed and high-quality images directly from text descriptions. This model offers impressive capabilities in transforming prompts into vivid and accurate visual representations.", "Rank": 30, "Group": "Online", "Template": "FluxKontext", "Category": "StableDiffusion", "StableDiffusionTemplate": { "PipelineType": "Flux", "ModelType": "Instruct", "SampleSize": 1024, "TokenizerLength": 768, "Tokenizer2Limit": 512, "Optimization": "None", "DiffuserTypes": [ "ImageToImage" ], "SchedulerDefaults": { "SchedulerType": "FlowMatchEulerDiscrete", "Steps": 28, "StepsMin": 1, "StepsMax": 100, "Guidance": 1, "GuidanceMin": 1, "GuidanceMax": 1, "Guidance2": 3.5, "Guidance2Min": 0, "Guidance2Max": 15, "TimestepSpacing": "Linspace", "BetaSchedule": "ScaledLinear", "BetaStart": 0.00085, "BetaEnd": 0.012 } }, "MemoryMin": 41, "MemoryMax": 52, "DownloadSize": 34, "Website": "https://blackforestlabs.ai", "Licence": "https://github.com/black-forest-labs/flux/blob/main/model_licenses/LICENSE-FLUX1-kontext", "LicenceType": "NonCommercial", "IsLicenceAccepted": false, "Repository": "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse", "RepositoryFiles": [ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/text_encoder/model.onnx", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/text_encoder/model.onnx.data", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/text_encoder_2/model.onnx", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/text_encoder_2/model.onnx.data", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer/merges.txt", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer/special_tokens_map.json", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer/vocab.json", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer_2/special_tokens_map.json", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer_2/spiece.model", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer_2/tokenizer.json", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/transformer/model.onnx", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/transformer/model.onnx.data", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/vae_decoder/model.onnx", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/vae_decoder/model.onnx.data", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/vae_encoder/model.onnx", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/vae_encoder/model.onnx.data", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/amuse_template.json", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/README.md" ], "PreviewImages": [ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/Sample.png", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/Sample2.png", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/Sample3.png", "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/Sample4.png" ], "Tags": [] }