模型简介
模型特点
模型能力
使用案例
🚀 InternVL3-8B-Instruct
InternVL3-8B-Instruct 是一款先进的多模态大语言模型,在多模态感知、推理等能力上表现出色,拓展了多模态能力的应用范围,如工具使用、GUI 代理、工业图像分析等。
🚀 快速开始
模型加载
16 位(bf16 / fp16)
import torch
from transformers import AutoTokenizer, AutoModel
path = "OpenGVLab/InternVL3-8B"
model = AutoModel.from_pretrained(
path,
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
use_flash_attn=True,
trust_remote_code=True).eval().cuda()
BNB 8 位量化
import torch
from transformers import AutoTokenizer, AutoModel
path = "OpenGVLab/InternVL3-8B"
model = AutoModel.from_pretrained(
path,
torch_dtype=torch.bfloat16,
load_in_8bit=True,
low_cpu_mem_usage=True,
use_flash_attn=True,
trust_remote_code=True).eval()
多 GPU 情况
import math
import torch
from transformers import AutoTokenizer, AutoModel
def split_model(model_name):
device_map = {}
world_size = torch.cuda.device_count()
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
num_layers = config.llm_config.num_hidden_layers
# Since the first GPU will be used for ViT, treat it as half a GPU.
num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
num_layers_per_gpu = [num_layers_per_gpu] * world_size
num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
layer_cnt = 0
for i, num_layer in enumerate(num_layers_per_gpu):
for j in range(num_layer):
device_map[f'language_model.model.layers.{layer_cnt}'] = i
layer_cnt += 1
device_map['vision_model'] = 0
device_map['mlp1'] = 0
device_map['language_model.model.tok_embeddings'] = 0
device_map['language_model.model.embed_tokens'] = 0
device_map['language_model.output'] = 0
device_map['language_model.model.norm'] = 0
device_map['language_model.model.rotary_emb'] = 0
device_map['language_model.lm_head'] = 0
device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
return device_map
path = "OpenGVLab/InternVL3-8B"
device_map = split_model('InternVL3-8B')
model = AutoModel.from_pretrained(
path,
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
use_flash_attn=True,
trust_remote_code=True,
device_map=device_map).eval()
使用 Transformers 进行推理
import math
import numpy as np
import torch
import torchvision.transforms as T
from decord import VideoReader, cpu
from PIL import Image
from torchvision.transforms.functional import InterpolationMode
from transformers import AutoModel, AutoTokenizer
IMAGENET_MEAN = (0.485, 0.456, 0.406)
IMAGENET_STD = (0.229, 0.224, 0.225)
def build_transform(input_size):
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
T.ToTensor(),
T.Normalize(mean=MEAN, std=STD)
])
return transform
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
best_ratio_diff = float('inf')
best_ratio = (1, 1)
area = width * height
for ratio in target_ratios:
target_aspect_ratio = ratio[0] / ratio[1]
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
if ratio_diff < best_ratio_diff:
best_ratio_diff = ratio_diff
best_ratio = ratio
elif ratio_diff == best_ratio_diff:
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
best_ratio = ratio
return best_ratio
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
orig_width, orig_height = image.size
aspect_ratio = orig_width / orig_height
# calculate the existing image aspect ratio
target_ratios = set(
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
i * j <= max_num and i * j >= min_num)
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
# find the closest aspect ratio to the target
target_aspect_ratio = find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
# calculate the target width and height
target_width = image_size * target_aspect_ratio[0]
target_height = image_size * target_aspect_ratio[1]
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
# resize the image
resized_img = image.resize((target_width, target_height))
processed_images = []
for i in range(blocks):
box = (
(i % (target_width // image_size)) * image_size,
(i // (target_width // image_size)) * image_size,
((i % (target_width // image_size)) + 1) * image_size,
((i // (target_width // image_size)) + 1) * image_size
)
# split the image
split_img = resized_img.crop(box)
processed_images.append(split_img)
assert len(processed_images) == blocks
if use_thumbnail and len(processed_images) != 1:
thumbnail_img = image.resize((image_size, image_size))
processed_images.append(thumbnail_img)
return processed_images
def load_image(image_file, input_size=448, max_num=12):
image = Image.open(image_file).convert('RGB')
transform = build_transform(input_size=input_size)
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
pixel_values = [transform(image) for image in images]
pixel_values = torch.stack(pixel_values)
return pixel_values
def split_model(model_name):
device_map = {}
world_size = torch.cuda.device_count()
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
num_layers = config.llm_config.num_hidden_layers
# Since the first GPU will be used for ViT, treat it as half a GPU.
num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
num_layers_per_gpu = [num_layers_per_gpu] * world_size
num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
layer_cnt = 0
for i, num_layer in enumerate(num_layers_per_gpu):
for j in range(num_layer):
device_map[f'language_model.model.layers.{layer_cnt}'] = i
layer_cnt += 1
device_map['vision_model'] = 0
device_map['mlp1'] = 0
device_map['language_model.model.tok_embeddings'] = 0
device_map['language_model.model.embed_tokens'] = 0
device_map['language_model.output'] = 0
device_map['language_model.model.norm'] = 0
device_map['language_model.model.rotary_emb'] = 0
device_map['language_model.lm_head'] = 0
device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
return device_map
# If you set `load_in_8bit=True`, you will need two 80GB GPUs.
# If you set `load_in_8bit=False`, you will need at least three 80GB GPUs.
path = 'OpenGVLab/InternVL3-8B'
device_map = split_model('InternVL3-8B')
model = AutoModel.from_pretrained(
path,
torch_dtype=torch.bfloat16,
load_in_8bit=False,
low_cpu_mem_usage=True,
use_flash_attn=True,
trust_remote_code=True,
device_map=device_map).eval()
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
# set the max number of tiles in `max_num`
pixel_values = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
generation_config = dict(max_new_tokens=1024, do_sample=True)
# pure-text conversation (纯文本对话)
question = 'Hello, who are you?'
response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True)
print(f'User: {question}\nAssistant: {response}')
question = 'Can you tell me a story?'
response, history = model.chat(tokenizer, None, question, generation_config, history=history, return_history=True)
print(f'User: {question}\nAssistant: {response}')
# single-image single-round conversation (单图像单轮对话)
question = '<image>\nPlease describe the image shortly.'
response = model.chat(tokenizer, pixel_values, question, generation_config)
print(f'User: {question}\nAssistant: {response}')
# single-image multi-round conversation (单图像多轮对话)
question = '<image>\nPlease describe the image in detail.'
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
print(f'User: {question}\nAssistant: {response}')
question = 'Please write a poem according to the image.'
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True)
print(f'User: {question}\nAssistant: {response}')
# multi-image multi-round conversation, combined images (多图像多轮对话,组合图像)
pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
question = '<image>\nDescribe the two images in detail.'
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
history=None, return_history=True)
print(f'User: {question}\nAssistant: {response}')
question = 'What are the similarities and differences between these two images.'
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
history=history, return_history=True)
print(f'User: {question}\nAssistant: {response}')
# multi-image multi-round conversation, separate images (多图像多轮对话,分离图像)
pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
question = 'Image-1: <image>\nImage-2: <image>\nDescribe the two images in detail.'
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
num_patches_list=num_patches_list,
history=None, return_history=True)
print(f'User: {question}\nAssistant: {response}')
question = 'What are the similarities and differences between these two images.'
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
num_patches_list=num_patches_list,
history=history, return_history=True)
print(f'User: {question}\nAssistant: {response}')
# batch inference, single image per sample (单图像批量推理)
pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
questions = ['<image>\nDescribe the image in detail.'] * len(num_patches_list)
responses = model.batch_chat(tokenizer, pixel_values,
num_patches_list=num_patches_list,
questions=questions,
generation_config=generation_config)
for question, response in zip(questions, responses):
print(f'User: {question}\nAssistant: {response}')
# video multi-round conversation (视频多轮对话)
def get_index(bound, fps, max_frame, first_idx=0, num_segments=32):
if bound:
start, end = bound[0], bound[1]
else:
start, end = -100000, 100000
start_idx = max(first_idx, round(start * fps))
end_idx = min(round(end * fps), max_frame)
seg_size = float(end_idx - start_idx) / num_segments
frame_indices = np.array([
int(start_idx + (seg_size / 2) + np.round(seg_size * idx))
for idx in range(num_segments)
])
return frame_indices
def load_video(video_path, bound=None, input_size=448, max_num=1, num_segments=32):
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
max_frame = len(vr) - 1
fps = float(vr.get_avg_fps())
pixel_values_list, num_patches_list = [], []
transform = build_transform(input_size=input_size)
frame_indices = get_index(bound, fps, max_frame, first_idx=0, num_segments=num_segments)
for frame_index in frame_indices:
img = Image.fromarray(vr[frame_index].asnumpy()).convert('RGB')
img = dynamic_preprocess(img, image_size=input_size, use_thumbnail=True, max_num=max_num)
pixel_values = [transform(tile) for tile in img]
pixel_values = torch.stack(pixel_values)
num_patches_list.append(pixel_values.shape[0])
pixel_values_list.append(pixel_values)
pixel_values = torch.cat(pixel_values_list)
return pixel_values, num_patches_list
video_path = './examples/red-panda.mp4'
pixel_values, num_patches_list = load_video(video_path, num_segments=8, max_num=1)
pixel_values = pixel_values.to(torch.bfloat16).cuda()
video_prefix = ''.join([f'Frame{i+1}: <image>\n' for i in range(len(num_patches_list))])
question = video_prefix + 'What is the red panda doing?'
# Frame1: <image>\nFrame2: <image>\n...\nFrame8: <image>\n{question}
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
num_patches_list=num_patches_list, history=None, return_history=True)
print(f'User: {question}\nAssistant: {response}')
question = 'Describe this video in detail.'
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
num_patches_list=num_patches_list, history=history, return_history=True)
print(f'User: {question}\nAssistant: {response}')
流式输出
from transformers import TextIteratorStreamer
from threading import Thread
# Initialize the streamer
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=10)
# Define the generation configuration
generation_config = dict(max_new_tokens=1024, do_sample=False, streamer=streamer)
# Start the model chat in a separate thread
thread = Thread(target=model.chat, kwargs=dict(
tokenizer=tokenizer, pixel_values=pixel_values, question=question,
history=None, return_history=False, generation_config=generation_config,
))
thread.start()
# Initialize an empty string to store the generated text
generated_text = ''
# Loop through the streamer to get the new text as it is generated
for new_text in streamer:
if new_text == model.conv_template.sep:
break
generated_text += new_text
print(new_text, end='', flush=True) # Print each new chunk of generated text on the same line
✨ 主要特性
- 先进的多模态能力:相比 InternVL 2.5,InternVL3 展现出更卓越的多模态感知和推理能力,还将多模态能力拓展到工具使用、GUI 代理、工业图像分析、3D 视觉感知等领域。
- 优秀的语言性能:得益于原生多模态预训练,InternVL3 系列在整体文本性能上甚至优于 Qwen2.5 系列。
- 灵活的模型架构:沿用 “ViT - MLP - LLM” 范式,整合新的增量预训练 InternViT 和多种预训练 LLM,如 InternLM 3 和 Qwen 2.5。
- 创新的训练策略:采用原生多模态预训练、监督微调、混合偏好优化和测试时缩放等策略,提升模型性能。
📦 安装指南
LMDeploy
# if lmdeploy<0.7.3, you need to explicitly set chat_template_config=ChatTemplateConfig(model_name='internvl2_5')
pip install lmdeploy>=0.7.3
若要使用 OpenAI 风格的接口,需要安装 OpenAI:
pip install openai
💻 使用示例
基础用法
from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
from lmdeploy.vl import load_image
model = 'OpenGVLab/InternVL3-8B'
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=16384, tp=1), chat_template_config=ChatTemplateConfig(model_name='internvl2_5'))
response = pipe(('describe this image', image))
print(response.text)
高级用法
多图像推理
from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
from lmdeploy.vl import load_image
from lmdeploy.vl.constants import IMAGE_TOKEN
model = 'OpenGVLab/InternVL3-8B'
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=16384, tp=1), chat_template_config=ChatTemplateConfig(model_name='internvl2_5'))
image_urls=[
'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg',
'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/det.jpg'
]
images = [load_image(img_url) for img_url in image_urls]
# Numbering images improves multi-image conversations
response = pipe((f'Image-1: {IMAGE_TOKEN}\nImage-2: {IMAGE_TOKEN}\ndescribe these two images', images))
print(response.text)
批量提示推理
from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
from lmdeploy.vl import load_image
model = 'OpenGVLab/InternVL3-8B'
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=16384, tp=1), chat_template_config=ChatTemplateConfig(model_name='internvl2_5'))
image_urls=[
"https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg",
"https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/det.jpg"
]
prompts = [('describe this image', load_image(img_url)) for img_url in image_urls]
response = pipe(prompts)
print(response)
多轮对话
from lmdeploy import pipeline, TurbomindEngineConfig, GenerationConfig, ChatTemplateConfig
from lmdeploy.vl import load_image
model = 'OpenGVLab/InternVL3-8B'
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=16384, tp=1), chat_template_config=ChatTemplateConfig(model_name='internvl2_5'))
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg')
gen_config = GenerationConfig(top_k=40, top_p=0.8, temperature=0.8)
sess = pipe.chat(('describe this image', image), gen_config=gen_config)
print(sess.response.text)
sess = pipe.chat('What is the woman doing?', session=sess, gen_config=gen_config)
print(sess.response.text)
服务部署
lmdeploy serve api_server OpenGVLab/InternVL3-8B --chat-template internvl2_5 --server-port 23333 --tp 1
使用 OpenAI 风格接口进行 API 调用:
from openai import OpenAI
client = OpenAI(api_key='YOUR_API_KEY', base_url='http://0.0.0.0:23333/v1')
model_name = client.models.list().data[0].id
response = client.chat.completions.create(
model=model_name,
messages=[{
'role':
'user',
'content': [{
'type': 'text',
'text': 'describe this image',
}, {
'type': 'image_url',
'image_url': {
'url':
'https://modelscope.oss-cn-beijing.aliyuncs.com/resource/tiger.jpeg',
},
}],
}],
temperature=0.8,
top_p=0.8)
print(response)
📚 详细文档
InternVL3 家族
模型名称 | 视觉部分 | 语言部分 | Hugging Face 链接 |
---|---|---|---|
InternVL3 - 1B | [InternViT - 300M - 448px - V2_5](https://huggingface.co/OpenGVLab/InternViT - 300M - 448px - V2_5) | [Qwen2.5 - 0.5B](https://huggingface.co/Qwen/Qwen2.5 - 0.5B) | [链接](https://huggingface.co/OpenGVLab/InternVL3 - 1B) |
InternVL3 - 2B | [InternViT - 300M - 448px - V2_5](https://huggingface.co/OpenGVLab/InternViT - 300M - 448px - V2_5) | [Qwen2.5 - 1.5B](https://huggingface.co/Qwen/Qwen2.5 - 1.5B) | [链接](https://huggingface.co/OpenGVLab/InternVL3 - 2B) |
InternVL3 - 8B | [InternViT - 300M - 448px - V2_5](https://huggingface.co/OpenGVLab/InternViT - 300M - 448px - V2_5) | [Qwen2.5 - 7B](https://huggingface.co/Qwen/Qwen2.5 - 7B) | [链接](https://huggingface.co/OpenGVLab/InternVL3 - 8B) |
InternVL3 - 9B | [InternViT - 300M - 448px - V2_5](https://huggingface.co/OpenGVLab/InternViT - 300M - 448px - V2_5) | [internlm3 - 8b - instruct](https://huggingface.co/internlm/internlm3 - 8b - instruct) | [链接](https://huggingface.co/OpenGVLab/InternVL3 - 9B) |
InternVL3 - 14B | [InternViT - 300M - 448px - V2_5](https://huggingface.co/OpenGVLab/InternViT - 300M - 448px - V2_5) | [Qwen2.5 - 14B](https://huggingface.co/Qwen/Qwen2.5 - 14B) | [链接](https://huggingface.co/OpenGVLab/InternVL3 - 14B) |
InternVL3 - 38B | [InternViT - 6B - 448px - V2_5](https://huggingface.co/OpenGVLab/InternViT - 6B - 448px - V2_5) | [Qwen2.5 - 32B](https://huggingface.co/Qwen/Qwen2.5 - 32B) | [链接](https://huggingface.co/OpenGVLab/InternVL3 - 38B) |
InternVL3 - 78B | [InternViT - 6B - 448px - V2_5](https://huggingface.co/OpenGVLab/InternViT - 6B - 448px - V2_5) | [Qwen2.5 - 72B](https://huggingface.co/Qwen/Qwen2.5 - 72B) | [链接](https://huggingface.co/OpenGVLab/InternVL3 - 78B) |
模型架构
[InternVL3](https://internvl.github.io/blog/2025 - 04 - 11 - InternVL - 3/) 沿用了 [InternVL 2.5](https://internvl.github.io/blog/2024 - 12 - 05 - InternVL - 2.5/) 及其前身 InternVL 1.5 和 2.0 的模型架构,遵循 “ViT - MLP - LLM” 范式。在新版本中,使用随机初始化的 MLP 投影器,将新的增量预训练 InternViT 与多种预训练 LLM 整合。
同时,应用了像素重排操作,将视觉标记数量减少到原来的四分之一,并采用了与 InternVL 1.5 类似的动态分辨率策略,将图像划分为 448×448 像素的图块。从 InternVL 2.0 开始,还增加了对多图像和视频数据的支持。此外,InternVL3 集成了 可变视觉位置编码 (V2PE),使模型在长上下文理解能力上优于前代。
训练策略
原生多模态预训练
提出 原生多模态预训练 方法,将语言和视觉学习整合到一个预训练阶段。与先训练纯语言模型再适应其他模态的标准范式不同,该方法将多模态数据(如图像 - 文本、视频 - 文本或图像 - 文本交错序列)与大规模文本语料交织,使模型同时学习语言和多模态表示,无需单独的对齐或桥接模块即可处理视觉 - 语言任务。
监督微调
在 InternVL3 系列中,采用了 InternVL2.5 中提出的随机 JPEG 压缩、平方损失重新加权和多模态数据打包等技术。与 InternVL2.5 相比,InternVL3 在监督微调阶段的主要改进在于使用了更高质量和更多样化的训练数据,进一步扩展了工具使用、3D 场景理解、GUI 操作、长上下文任务、视频理解、科学图表、创意写作和多模态推理等方面的训练样本。
混合偏好优化
在预训练和监督微调阶段,模型基于先前的真实标记预测下一个标记;而在推理阶段,模型根据自身先前的输出预测每个标记。这种真实标记和模型预测标记之间的差异会引入分布偏移,影响模型的思维链 (CoT) 推理能力。为缓解这一问题,采用 MPO,通过引入正样本和负样本的额外监督,使模型响应分布与真实分布对齐,从而提高推理性能。MPO 的训练目标是偏好损失 \(\mathcal{L}{\text{p}}\)、质量损失 \(\mathcal{L}{\text{q}}\) 和生成损失 \(\mathcal{L}{\text{g}}\) 的组合,公式如下: $$ \mathcal{L}=w{p}\cdot\mathcal{L}{\text{p}} + w{q}\cdot\mathcal{L}{\text{q}} + w{g}\cdot\mathcal{L}{\text{g}}, $$ 其中 \(w{*}\) 表示每个损失组件的权重。
测试时缩放
测试时缩放已被证明是提高大语言模型和多模态大语言模型推理能力的有效方法。在本工作中,使用 Best - of - N 评估策略,并采用 [VisualPRM - 8B](https://huggingface.co/OpenGVLab/VisualPRM - 8B) 作为评判模型,为推理和数学评估选择最佳响应。
多模态能力评估
- 多模态推理和数学
- OCR、图表和文档理解
- 多图像和现实世界理解
- 综合多模态和幻觉评估
- 视觉定位
- 多模态多语言理解
- 视频理解
- GUI 定位
- 空间推理
语言能力评估
将 InternVL3 与 Qwen2.5 聊天模型进行比较,Qwen2.5 相应的预训练基础模型被用作 InternVL3 语言组件的初始化。得益于原生多模态预训练,InternVL3 系列在整体文本性能上优于 Qwen2.5 系列。需要注意的是,Qwen2.5 系列的评估分数可能与官方报告的不同,因为在所有数据集上采用了表中提供的提示版本进行 OpenCompass 评估。
消融实验
原生多模态预训练
在 InternVL2 - 8B 模型上进行实验,保持其架构、初始化参数和训练数据完全不变。传统上,InternVL2 - 8B 采用先进行 MLP 预热阶段进行特征对齐,然后进行指令微调的训练管道。在实验中,用原生多模态预训练过程替代了传统的 MLP 预热阶段。评估结果表明,经过原生多模态预训练的模型在大多数基准测试中的性能与经过完整多阶段训练的 InternVL2 - 8B 基线相当。此外,在更高质量数据上进行指令微调后,模型在评估的多模态任务中表现出进一步的性能提升。这些结果强调了原生多模态预训练在赋予多模态大语言模型强大多模态能力方面的效率。
混合偏好优化
使用 MPO 进行微调的模型在七个多模态推理基准测试中表现出优于未使用 MPO 的模型的推理性能。具体而言,InternVL3 - 78B 和 InternVL3 - 38B 分别比其对应模型高出 4.1 和 4.5 分。值得注意的是,MPO 使用的训练数据是监督微调使用数据的子集,这表明性能提升主要源于训练算法而非训练数据。
可变视觉位置编码
引入 V2PE 导致大多数评估指标的性能显著提升。此外,通过改变位置增量 \( \delta \) 的消融实验表明,即使对于主要涉及常规上下文的任务,相对较小的 \( \delta \) 值也能实现最佳性能。这些发现为未来改进多模态大语言模型中视觉标记的位置编码策略提供了重要见解。
🔧 技术细节
模型架构细节
- 范式遵循:遵循 “ViT - MLP - LLM” 范式,整合新的增量预训练 InternViT 与多种预训练 LLM。
- 像素操作:应用像素重排操作,减少视觉标记数量。
- 分辨率策略:采用动态分辨率策略,划分图像图块。
- 数据支持:从 InternVL 2.0 开始支持多图像和视频数据。
- 位置编码:集成可变视觉位置编码 (V2PE),提升长上下文理解能力。
训练策略细节
原生多模态预训练
- 整合学习:将语言和视觉学习整合到一个预训练阶段。
- 数据交织:将多模态数据与大规模文本语料交织。
- 无需对齐:无需单独的对齐或桥接模块。
监督微调
- 技术沿用:沿用随机 JPEG 压缩、平方损失重新加权和多模态数据打包等技术。
- 数据升级:使用更高质量和更多样化的训练数据。
混合偏好优化
- 解决分布偏移:引入正样本和负样本的额外监督,对齐模型响应分布与真实分布。
- 训练目标组合:训练目标是偏好损失、质量损失和生成损失的组合。
测试时缩放
- 评估策略:采用 Best - of - N 评估策略。
- 评判模型:使用 [VisualPRM - 8B](https://huggingface.co/OpenGVLab/VisualPRM - 8B) 作为评判模型。
📄 许可证
本项目采用 MIT 许可证发布。本项目使用预训练的 Qwen2.5 作为组件,Qwen2.5 遵循 Apache - 2.0 许可证。
引用
如果您在研究中发现本项目有用,请考虑引用以下文献:
@article{chen2024expanding,
title={Expanding Performance Boundaries of Open-Source Multimodal Models with Model, Data, and Test-Time Scaling},
author={Chen, Zhe and Wang, Weiyun and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Cui, Erfei and Zhu, Jinguo and Ye, Shenglong and Tian, Hao and Liu, Zhaoyang and others},
journal={arXiv preprint arXiv:2412.05271},
year={2024}
}
@article{wang2024mpo,
title={Enhancing the Reasoning Ability of Multimodal Large Language Models via Mixed Preference Optimization},
author={Wang, Weiyun and Chen, Zhe and Wang, Wenhai and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Zhu, Jinguo and Zhu, Xizhou and Lu, Lewei and Qiao, Yu and Dai, Jifeng},
journal={arXiv preprint arXiv:2411.10442},
year={2024}
}
@article{chen2024far,
title={How Far Are We to GPT-4V? Closing the Gap to Commercial Multimodal Models with Open-Source Suites},
author={Chen, Zhe and Wang, Weiyun and Tian, Hao and Ye, Shenglong and Gao, Zhangwei and Cui, Erfei and Tong, Wenwen and Hu, Kongzhi and Luo, Jiapeng and Ma, Zheng and others},
journal={arXiv preprint arXiv:2404.16821},
year={2024}
}
@inproceedings{chen2024internvl,
title={Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks},
author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and others},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages={24185--24198},
year={2024}
}
⚠️ 重要提示
请使用 transformers >= 4.37.2 以确保模型正常工作。
💡 使用建议
- 在进行多 GPU 推理时,确保大语言模型 (LLM) 的第一层和最后一层在同一设备上,以避免错误。
- 如果在执行 LMDeploy 示例时出现
ImportError
,请按照提示安装所需的依赖包。- 使用 OpenAI 风格接口时,需要设置有效的 API 密钥。









