模型简介
模型特点
模型能力
使用案例
🚀 🔥 量化模型:Mistral-Small-24B-Instruct-2501_GPTQ_G128_W4A16_MSE 🔥
这是 mistralai/Mistral-Small-24B-Instruct-2501 模型的 4 位量化版本,由 ConfidentialMind.com 进行量化 🤖✨。它利用开源的 GPTQModel 量化方法,实现了 4 位精度和 128 的分组大小,从而在性能损失最小的情况下,得到一个更小、更快的模型。
该模型在配备 80GB VRAM 的单张 NVIDIA A100 GPU 上运行。
⚠️ 重要提示
由于模型较小,
batch_size
设置得相当高,你可能需要根据自己的 GPU VRAM 进行调整。
⚠️ 重要提示
由于 mistral-small 权重的 “打包” 性质,积极使用了 MSE 以及更高的阻尼因子,这导致了更小的损失和困惑度,但更建议使用 G32。
🚀 快速开始
本模型是基于 GPTQ 方法对 Mistral-Small-24B-Instruct-2501 模型进行量化得到的,在性能和模型大小上取得了较好的平衡。以下将详细介绍模型的使用、安装等内容。
✨ 主要特性
- 量化优势:通过 4 位量化和 128 的分组大小,显著减小模型大小,同时保持性能。
- 高效运行:可在单张 NVIDIA A100 GPU 上运行,适合研究和实验。
📦 安装指南
包版本和安装说明
具体的 Python 库版本请参考 pyproject.toml
(需要 uv
)。
uv venv
source venv/bin/activate
uv sync
环境变量
HF_TOKEN=<YOUR_HF_TOKEN>
TOKENIZERS_PARALLELISM="true"
PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
或者使用以下命令安装依赖
pip install \
gptqmodel==1.9.0 \
typer==0.15.1 \
huggingface_hub==<version> \
datasets==3.3.0 \
transformers==4.48.3 \
safetensors==0.5.2 \
torch==2.6.0
💻 使用示例
基础用法
from gptqmodel import GPTQModel
from transformers import AutoTokenizer
# 使用本地目录或上传后的 JustJaro/Mistral-Small-24B-Instruct-2501_gptq_g128_4bit
quantized_model_id = "/home/jaro/models/quantized/Mistral-Small-24B-Instruct-2501_gptq_g128_4bit" # 或者 "JustJaro/Mistral-Small-24B-Instruct-2501_gptq_g128_4bit"
tokenizer = AutoTokenizer.from_pretrained(quantized_model_id)
model = GPTQModel.load(quantized_model_id, device="cuda:0") # 或者 "cpu"
input_text = "This is a test prompt"
inputs = tokenizer(input_text, return_tensors="pt").to("cuda:0")
outputs = model.generate(**inputs)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
📚 详细文档
模型详情
属性 | 详情 |
---|---|
原始模型 | mistralai/Mistral-Small-24B-Instruct-2501 |
量化模型 | Mistral-Small-24B-Instruct-2501_gptq_g128_4bit(本仓库) |
量化方法 | GPTQ(4 位,分组大小 128) |
量化库 | GPTQModel |
校准数据集 | neuralmagic/LLM_compression_calibration(使用 512 个样本,序列长度 4096) |
量化者 | ConfidentialMind.com |
量化脚本
以下是用于生成此模型的 quantize.py
脚本(包含依赖的确切版本):
#!/usr/bin/env python3
"""
This script loads a source Hugging Face model and a calibration dataset,
quantizes the model using GPTQModel (with 4-bit precision and group size 128),
saves the quantized model using the Transformers API with safetensors (safe serialization)
under ~/models/quantized/, and then creates/updates a Hugging Face repository (with the
_gptq_g128_4bit suffix) by uploading the model, tokenizer, and an auto-generated README.md.
Usage example:
python quantize.py --source-model TinyLlama/TinyLlama-1.1B-Chat-v1.0 \
--calibration-dataset wikitext/wikitext-2-raw-v1 \
--seq-len 1024 --nsamples 256 --hf-token <YOUR_HF_TOKEN>
"""
import os
import shutil
import subprocess
from pathlib import Path
from typing import List
import torch
import typer
from datasets import load_dataset
from dotenv import load_dotenv, find_dotenv
from gptqmodel import GPTQModel, QuantizeConfig
from gptqmodel.utils import Perplexity
# For later pushing to the model hub
from huggingface_hub import HfApi
from transformers import AutoTokenizer, PreTrainedTokenizerBase
load_dotenv(find_dotenv())
HF_TOKEN = os.getenv("HF_TOKEN")
app = typer.Typer()
def get_text_from_example(example: dict) -> str:
"""
Returns text from a dataset example.
If the example contains a "text" field, and it is nonempty, that text is used.
Otherwise, if it has a "messages" field (a list of dicts with a "content" key),
the function returns the concatenation of all non-empty message contents.
"""
if "text" in example and example["text"]:
return example["text"]
elif "messages" in example:
contents = [msg.get("content", "").strip() for msg in example["messages"]]
return " ".join([s for s in contents if s])
else:
return ""
def get_calibration_dataset(
tokenizer: PreTrainedTokenizerBase,
nsamples: int,
seqlen: int,
calibration_dataset: str
) -> List[dict]:
"""
Loads a calibration dataset from the Hugging Face Hub (or from a local file).
It accepts datasets with a single "text" field (like wikitext)
or with a "messages" field (as in the Neural Magic LLM Compression Calibration dataset).
Only examples whose extracted text length is at least 'seqlen' are kept.
Each chosen example is tokenized (with truncation up to 'seqlen') and returned as a dict.
"""
ds = None
try:
# Attempt to load from HF Hub.
try:
if "/" in calibration_dataset:
parts = calibration_dataset.split("/", 1)
ds = load_dataset(parts[0], parts[1], split="train")
else:
ds = load_dataset(calibration_dataset, split="train")
except Exception as e:
print(f"Error loading dataset '{calibration_dataset}' via load_dataset: {e}")
ds = load_dataset(calibration_dataset, split="train")
print(f"Loaded calibration dataset from full remote path {calibration_dataset}.")
except Exception as e:
print(f"Error loading dataset '{calibration_dataset}' via load_dataset: {e}")
# Fallback: if the supplied calibration_dataset is a local path, try to load it as JSON-lines.
if os.path.exists(calibration_dataset):
try:
ds = load_dataset("json", data_files=calibration_dataset, split="train")
print(f"Loaded calibration dataset from local file {calibration_dataset}.")
except Exception as e2:
print(f"Error loading local json dataset from '{calibration_dataset}': {e2}")
return []
else:
return []
print(f"Dataset features: {ds.features}")
# Filter examples that have at least 80% 'seqlen' of extracted text.
ds = ds.filter(lambda x: len(get_text_from_example(x)) >= int(seqlen*0.8))
sample_range = min(nsamples, len(ds))
calibration_data = []
for i in range(sample_range):
example = ds[i]
text = get_text_from_example(example)
tokenized = tokenizer(text, truncation=True, max_length=seqlen, return_tensors="pt")
tokenized = {k: v.squeeze(0) for k, v in tokenized.items()}
calibration_data.append(tokenized)
return calibration_data
def calculate_avg_ppl(model, tokenizer):
"""
Computes the average perplexity on the wikitext-2-raw-v1 train split using GPTQModel's Perplexity utility.
"""
ppl = Perplexity(
model=model,
tokenizer=tokenizer,
dataset_path="wikitext",
dataset_name="wikitext-2-raw-v1",
split="train",
text_column="text",
)
ppl_values = ppl.calculate(n_ctx=512, n_batch=512)
avg = sum(ppl_values) / len(ppl_values)
return avg
def get_pinned_package_versions():
"""
Retrieves pinned package versions using 'uv pip freeze'.
Returns a dictionary mapping lowercased package names to their versions.
"""
try:
result = subprocess.run(["uv", "pip", "freeze"], capture_output=True, text=True, check=True)
packages_output = result.stdout.strip()
versions = {}
for line in packages_output.splitlines():
if "==" in line:
package_name, package_version = line.split("==", 1)
versions[package_name.lower()] = package_version
return versions
except subprocess.CalledProcessError as e:
typer.echo(f"Error running 'uv pip freeze': {e}", err=True)
return {}
except FileNotFoundError:
typer.echo("uv command not found. Make sure uv is installed and in your PATH.", err=True)
return {}
@app.command()
def main(
seq_len: int = typer.Option(4096, help="Sequence length for tokenization and calibration."),
nsamples: int = typer.Option(512, help="Number of samples to use for calibration."),
source_model: str = typer.Option("mistralai/Mistral-Small-24B-Instruct-2501",
help="Source model HF repository identifier."),
calibration_dataset: str = typer.Option("wikitext/wikitext-2-raw-v1",
help="Calibration dataset identifier (in 'dataset/config' format) or local file path."),
hf_token: str = typer.Option(HF_TOKEN,
help="Hugging Face token for creating/updating your repo."),
):
# Prepare destination directory and model names.
model_name = source_model.split("/")[-1]
quantized_model_name = f"{model_name}_gptq_g128_4bit"
quantized_model_dir = os.path.expanduser(os.path.join("~/models/quantized", quantized_model_name))
if not os.path.exists(quantized_model_dir):
os.makedirs(quantized_model_dir, exist_ok=True)
os.makedirs(quantized_model_dir, exist_ok=True)
typer.echo("Loading tokenizer from source model...")
tokenizer_obj = AutoTokenizer.from_pretrained(source_model, use_fast=True)
typer.echo("Loading calibration dataset...")
typer.echo(f"Calibration dataset: {calibration_dataset}")
calibration_data = get_calibration_dataset(tokenizer_obj, nsamples, seq_len, calibration_dataset)
if not calibration_data:
typer.echo("Calibration dataset is empty. Aborting.", err=True)
raise typer.Exit(code=1)
quantize_config = QuantizeConfig(bits=4, group_size=128, mse=0.01, damp_percent=0.015)
device = "cuda:0" if torch.cuda.is_available() else "cpu"
typer.echo(f"Loading model in {device} mode...")
model = GPTQModel.load(source_model, quantize_config)
typer.echo("Quantizing model...")
model.quantize(calibration_data, auto_gc=False, batch_size=int(nsamples*0.1))
# Retrieve Hugging Face user info for README generation.
package_versions = get_pinned_package_versions()
username = get_my_user(hf_token)
script_content = self_read_script()
typer.echo(f"Saving quantized model to {quantized_model_dir} using Transformers safe serialization...")
try:
model.save_pretrained(quantized_model_dir)
tokenizer_obj.save_pretrained(quantized_model_dir)
except Exception as ex:
typer.echo(f"Error during saving with safe_serialization: {ex}. Aborting.")
raise
typer.echo(f"Model uploaded to Hugging Face repo: {quantized_model_name}")
else:
tokenizer_obj = AutoTokenizer.from_pretrained(source_model, use_fast=True)
package_versions = get_pinned_package_versions()
username = get_my_user(hf_token)
script_content = self_read_script()
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model = GPTQModel.load(quantized_model_dir, device=device)
avg_ppl = calculate_avg_ppl(model, tokenizer_obj)
typer.echo(f"Average perplexity (PPL) on wikitext v2 dataset: {avg_ppl}")
deps = Path("./pyproject.toml")
shutil.copy(deps, quantized_model_dir)
generate_readme(calibration_dataset, nsamples, package_versions, quantized_model_dir,
quantized_model_name, script_content, seq_len, source_model, username, avg_ppl)
GPTQModel.push_to_hub(quantized_path=quantized_model_dir, private=False, repo_id=quantized_model_name,
token=HF_TOKEN)
typer.echo(f"Model uploaded to Hugging Face repo: {quantized_model_name}")
demo_input = tokenizer_obj("test is", return_tensors="pt").to(device)
generated_ids = model.generate(**demo_input)
output_text = tokenizer_obj.decode(generated_ids[0])
typer.echo(f"Inference demo output: {output_text}")
typer.echo(f"Average perplexity (PPL) on calibration dataset: {avg_ppl}")
def self_read_script():
try:
script_path = os.path.abspath(__file__)
with open(script_path, "r") as f:
script_content = f.read()
except Exception as e:
script_content = "Error reading script content: " + str(e)
return script_content
def get_my_user(hf_token):
api = HfApi(token=hf_token)
user_info = api.whoami()
try:
username = user_info.get("name") or user_info.get("username")
except Exception as e:
typer.echo(f"Error retrieving username from Hugging Face API: {e}. Using default username.")
username = api.whoami()
if not username:
typer.echo("Could not determine your Hugging Face username from the token, defaulting to hard coded username.",
err=True)
username = "JustJaro"
return username
def generate_readme(calibration_dataset, nsamples, package_versions, quantized_model_dir,
quantized_model_name, script_content, seq_len, source_model, username, avg_ppl):
readme_content = f"""{MakeYourown}""
🔧 技术细节
量化性能
在 wikitext v2 数据集上的平均困惑度(PPL):23.63232087314638。完整的评估工具将在后续发布中提供。
📄 许可证
本模型继承了原始模型的许可证。有关更多详细信息,请参考原始模型卡片。 原始模型卡片:https://huggingface.co/mistralai/Mistral-Small-24B-Instruct-2501
免责声明
本模型仅用于研究目的。它可能继承了原始模型和量化过程中的局限性和偏差。请谨慎使用,并参考原始模型卡片以获取更多详细信息。
联系我们
如有任何问题或需要支持,请访问 ConfidentialMind.com 或直接与我们联系。你可以通过电子邮件 jaro@confidentialmind.com 或 LinkedIn 与 我 联系。
鸣谢
本模型由 Jaro 进行量化,计算资源由 ConfidentialMind 慷慨提供。量化使用了 GPTQModel 管道。
TODO
- 添加
gptqmodel.utils.eval
集成和评估表的自动生成。 - 修复
README.md
生成问题。
使用 GPTQModel 生成和量化。



