TimeSearch-R-raw / time_r1 /test_data.py
CSDDSFSFSAFSAF's picture
Add files using upload-large-folder tool
d50283e verified
import copy
import torch
import json
import os
import sys
from PIL import Image
try:
from peft import PeftModel
except:
pass
import tqdm
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, DistributedSampler, SequentialSampler
import torch.distributed as dist
import glob
import datetime
import numpy as np
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor, AutoTokenizer
from time_r1.dataset import LazyVLDataset, LazyVLDatasetBaseline, LazyVLDatasetSFT, get_dataset_class
from time_r1.utils import setup_ddp, cleanup_ddp, merge_results
from time_r1.utils.qwen_vl_utils import process_vision_info, replace_vision_info_with_placeholder, replace_vision_info_with_base64
from time_r1.environment.video_env import VideoInteraction
import json
import re
from datetime import datetime
import os
import math
# from time_r1.reward.llm_judge import llm_judge_score
import ast
import json
import re
from datetime import datetime
import os
import math
# from time_r1.reward.llm_judge import llm_judge_score
import ast
import numpy as np
MAX_TOOL_USE_NUM=10
# import debugpy
# try:
# # 5678 is the default attach port in the VS Code debug configurations. Unless a host and port are specified, host defaults to 127.0.0.1
# debugpy.listen(("localhost", 9519))
# print("Waiting for debugger attach")
# debugpy.wait_for_client()
# except Exception as e:
# pass
def parse_options_string(options_str):
"""
将选项字符串解析为candidates列表
例如: "A) First, a cartoon...\nB) First, an image..." -> ["First, a cartoon...", "First, an image..."]
"""
candidates = []
lines = options_str.strip().split('\n')
current_option = ""
for line in lines:
line = line.strip()
if not line:
continue
# 检查是否是新的选项开始(以A)、B)等开头)
if len(line) >= 2 and line[0].isalpha() and line[1] == ')':
# 如果已经有收集的选项内容,添加到candidates
if current_option:
candidates.append(current_option.strip())
# 开始新的选项,去掉"A) "前缀
current_option = line[3:] if len(line) > 3 else ""
else:
# 继续当前选项的内容
if current_option:
current_option += " " + line
# 添加最后一个选项
if current_option:
candidates.append(current_option.strip())
return candidates
def extract_prediction_from_message(messages):
"""
从消息列表中提取最后一个 assistant 消息中的 answer 标签内容
"""
answers = []
patterns = [
r'<answer>(.*?)</answer>', # 原始模式
]
for message in messages:
if message['role'] == 'assistant':
for content in message['content']:
if content['type'] == 'text':
text = content['text']
# 尝试所有正则表达式模式
for pattern in patterns:
all_answers = re.findall(pattern, text, re.DOTALL)
if all_answers:
answer = all_answers[-1].strip()
answers.append(answer)
break
if len(answers) > 0:
result = answers[-1]
else:
result = ""
for message in messages:
if message['role'] == 'assistant':
for content in message['content']:
if content['type'] == 'text':
text = content['text']
for pattern in patterns:
match = re.search(pattern, text)
if match:
result = match.group(1).strip()
break
if not result:
result = text.strip()
return result
def is_valid_json_time_format(s):
"""检查JSON格式是否正确"""
try:
item = json.loads(s)
start_time = item.get("start_time")
end_time = item.get("end_time")
if start_time is None or end_time is None:
return False
if start_time < 0 or end_time < 0:
return False
if start_time > end_time:
return False
if not isinstance(start_time, (int, float)) or not isinstance(end_time, (int, float)):
return False
return True
except Exception as e:
print(f"Error in is_valid_json_time_format: {e}, s: {s}")
return False
def merge_intervals(intervals):
"""合并重叠或相邻的时间区间"""
if not intervals:
return []
intervals = [list(i) for i in intervals] # tuple to list
# 按起始时间排序
sorted_intervals = sorted(intervals, key=lambda x: x[0])
merged = [sorted_intervals[0][:]] # 复制第一个区间
for current in sorted_intervals[1:]:
last = merged[-1]
if current[0] <= last[1]:
# 合并区间
merged[-1][1] = max(last[1], current[1])
else:
merged.append(current[:])
return merged
def compute_iou(list_a, list_b):
# # 示例用法
# list_a = [[0, 3], [2, 4], [22, 25]]
# list_b = [[1, 5], [2, 2], [2, 4]]
# iou = compute_iou(list_a, list_b)
# 合并两个列表的区间
merged_a = merge_intervals(list_a)
merged_b = merge_intervals(list_b)
# 计算各自的总长度
len_a = sum(end - start for start, end in merged_a)
len_b = sum(end - start for start, end in merged_b)
# 计算交集的总长度
intersection = 0
i = j = 0
while i < len(merged_a) and j < len(merged_b):
a_start, a_end = merged_a[i]
b_start, b_end = merged_b[j]
# 计算当前两个区间的重叠部分
start = max(a_start, b_start)
end = min(a_end, b_end)
if start < end:
intersection += end - start
# 移动指针
if a_end < b_end:
i += 1
else:
j += 1
# 计算并集总长度
union = len_a + len_b - intersection
if union == 0:
return 1.0
return intersection / union
def extract_sequence_index(answer):
# input: The sequence of the topics introduced in this video is (a) Men are setting up a tent in the dark, (c) Women do their beauty routine in the bathroom, (b) A baby is eating from a large platter of french fries on a black tray.
# 输出: (a)(c)(b)
pattern = r'(\([a-g,1-6]\))'
matches = re.findall(pattern, answer)
return ''.join(matches)
def parse_multi_choice_response(response, all_choices, index2ans):
"""
Parse the prediction from the generated response.
Return the predicted index e.g., A, B, C, D.
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L10
"""
for char in [",", ".", "!", "?", ";", ":", "'"]:
response = response.strip(char)
response = " " + response + " " # add space to avoid partial match
index_ans = True
ans_with_brack = False
candidates = []
for choice in all_choices: # e.g., (A) (B) (C) (D)
if f"({choice})" in response:
candidates.append(choice)
ans_with_brack = True
if len(candidates) == 0:
for choice in all_choices: # e.g., A B C D
if f"{choice} " in response:
candidates.append(choice)
if len(candidates) == 0:
for choice in all_choices: # e.g., A. B. C. D.
if f"{choice}." in response:
candidates.append(choice)
if len(candidates) == 0:
for choice in all_choices:
if f"{choice})" in response: # 匹配 "C)" 格式
candidates.append(choice)
if len(candidates) == 0:
for choice in all_choices:
if f"({choice}" in response: # 匹配 "(C" 格式
candidates.append(choice)
# if all above doesn't get candidates, check if the content is larger than 5 tokens and try to parse the example
if len(candidates) == 0 and len(response.split()) > 5:
for index, ans in index2ans.items():
if ans.lower() in response.lower():
candidates.append(index)
index_ans = False # it's content ans.
if len(candidates) == 0: # still not get answer, randomly choose one.
pred_index = None
elif len(candidates) > 1:
start_indexes = []
if index_ans:
if ans_with_brack:
for can in candidates:
index = response.rfind(f"({can})")
start_indexes.append(index) # -1 will be ignored anyway
else:
for can in candidates:
index = response.rfind(f" {can} ")
start_indexes.append(index)
else:
for can in candidates:
index = response.lower().rfind(index2ans[can].lower())
start_indexes.append(index)
# get the last one
pred_index = candidates[np.argmax(start_indexes)]
else: # if only one candidate, use it.
pred_index = candidates[0]
return pred_index
def has_tag(text: str, tag: str) -> bool:
return re.search(fr"<{tag}>", text)
def answer_format_check(text):
pattern = re.compile(r'<think>.*?</think>\s*<answer>.*?</answer>', re.DOTALL)
match = re.fullmatch(pattern, text.strip())
return 1.0 if match else 0.0
def tool_call_format_check(text):
pattern = re.compile(r'<think>.*?</think>\s*<tool_call>.*?</tool_call>', re.DOTALL)
match = re.fullmatch(pattern, text.strip())
return 1.0 if match else 0.0
def multiturn_format_check(messages, **kwargs):
"""
检查多轮对话中每条 assistant 消息的格式是否严格符合要求:
0. 必须有answer,且answer/tool_call都符合格式
1. 如果包含 answer,必须符合 answer_format_check
2. 如果包含 tool_call,必须符合 tool_call_format_check
3. answer 和 tool_call 不能同时出现在同一条消息中
"""
answer_format_stats = []
tool_call_format_stats = []
for message in messages:
if message["role"] == "assistant":
for content in message["content"]:
if isinstance(content, dict) and content["type"] == "text":
text = content["text"]
# 检查 answer 和 tool_call 不能同时出现
if has_tag(text, "answer") and has_tag(text, "tool_call"):
return 0.0
if has_tag(text, "answer"):
answer_format_stats.append(answer_format_check(text))
elif has_tag(text, "tool_call"):
tool_call_format_stats.append(tool_call_format_check(text))
if len(answer_format_stats) > 0 and all(answer_format_stats) and all(tool_call_format_stats):
return 1.0
else:
return 0.0
def multiturn_format_reward(messages, **kwargs):
"""
Calculate the multiturn format reward.
"""
reward_list = []
for msg in messages:
reward_list.append(multiturn_format_check(msg))
return reward_list
def advanced_tool_success_check(messages):
"""
综合评估工具调用成功情况,包括:
1. 基础工具调用成功检查
2. 工具多样性和数量评估
3. 重复调用惩罚
4. 调用失败惩罚
NOTE: VideoInteraction.avoid_mm_missing=True时,这项永远为1;当使用counterfactual reasoning时,这项不再重要
"""
if not messages:
return 0.0
# 基础工具调用成功检查
successful_tools = 0
total_tool_calls = 0
response_signitures_count = dict()
tool_failure_count = 0
for message in messages:
if message.get("role") == "tool" and message.get("name") == "parse_error":
tool_failure_count += 1
if message.get("role") == "tool" and message.get("name") != "parse_error":
total_tool_calls += 1
content = message.get("content", [])
if not isinstance(content, list):
continue
for item in content:
if isinstance(item, dict) and item.get("type") in ["video", "image"]:
successful_tools += 1
break
elif not isinstance(item, dict):
print(f"Error in tool_success_check: {item}, content: {content}")
tool_score = 1.0 / (1.0 + math.exp(-(successful_tools - 2)))
if successful_tools == 0:
tool_score = 0.0
return tool_score
def tool_usage_reward(batch_messages, **kwargs):
"""
适配器函数,用于直接放入 reward_functions 列表中
"""
return [advanced_tool_success_check(msg) for msg in batch_messages]
def accuracy_reward(messages, **kwargs):
# completions: 模型生成的完整响应列表
# messages: 对话消息列表
# target: 目标/参考答案列表(每个元素是包含"answer"键的字典)
# question: 问题列表
# type: 问题类型列表
# **kwargs: 额外参数
"""
Calculate the llm judge reward.
"""
reward_list = []
for msg, sol, q, options in zip(messages, kwargs["answer"], kwargs["question"], kwargs["options"]):
question = q
# 解析选项字符串
options_str = options
candidates = parse_options_string(options_str)
# 获取正确答案(字母形式,如 "A", "B", "C")
correct_answer = sol
# 提取模型预测的文本
pred_text = extract_prediction_from_message(msg)
# 解析预测结果
all_choices = []
index2ans = {}
for i, option in enumerate(candidates):
index2ans[chr(ord("A") + i)] = option
all_choices.append(chr(ord("A") + i))
parsed_pred = parse_multi_choice_response(pred_text, all_choices, index2ans) ## 从文本中提取出 "A", "B" 等字母
if parsed_pred is None:
# 解析失败,给 0 分
reward_list.append(0.0)
else:
is_correct = (parsed_pred.strip().upper() == sol.strip().upper())
reward_list.append(1.0 if is_correct else 0.0)
return reward_list
#################################################################################################
@torch.no_grad()
def forward_model(data_batch, model, processor, max_new_tokens=2048):
# 兼容 TP 模式(无 dist)和 DDP 模式
if dist.is_initialized():
rank = dist.get_rank()
device = f"cuda:{rank}"
else:
rank = 0
device = "cuda"
messages_batch = [item["messages"] for item in data_batch]
image_inputs, video_inputs, video_kwargs = process_vision_info(messages_batch, return_video_kwargs=True)
text_inputs = processor.apply_chat_template(messages_batch, tokenize=False, add_generation_prompt=True)
inputs = processor(
text=text_inputs,
images=image_inputs,
videos=video_inputs,
fps=video_kwargs["fps"],
padding=True,
return_tensors="pt",
add_special_tokens=False,
)
inputs = inputs.to(device)
output_ids = model.generate(**inputs, max_new_tokens=max_new_tokens)
generated_ids = [output_ids[i][len(inputs.input_ids[i]):] for i in range(len(output_ids))]
output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
return output_text
def forward_model_with_env(data_batch, model, processor, use_vllm=False, save_vision_info=False, **kwargs):
env = VideoInteraction(processor, model, max_turns=10, max_new_tokens_per_turn=1024, use_vllm=use_vllm)
messages_batch = [item["messages"] for item in data_batch]
multimodal_cache_batch = [item["multimodal_cache"] for item in data_batch]
output_msgs = env.generate(messages_batch, multimodal_cache=multimodal_cache_batch, **kwargs)
if save_vision_info:
output_msgs = replace_vision_info_with_base64(output_msgs)
else:
output_msgs = replace_vision_info_with_placeholder(output_msgs)
return output_msgs
def setup_model(model_base, lora_checkpoint=None):
local_rank = int(os.environ.get("LOCAL_RANK", 0))
device = f"cuda:{local_rank}"
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
model_base,
torch_dtype=torch.bfloat16,
use_sliding_window=True,
attn_implementation="flash_attention_2",
device_map=device
)
if lora_checkpoint is not None:
print("Model loaded, type:", type(model))
model = PeftModel.from_pretrained(model, lora_checkpoint)
print("LORA loaded, type:", type(model))
model = model.merge_and_unload()
print("LORA merged, type:", type(model))
model.eval()
model = model.to(device)
processor = AutoProcessor.from_pretrained(model_base)
return model, processor
def setup_vllm(model_base, limit_image_per_prompt=1024, limit_video_per_prompt=10):
# TP 模式下不需要指定特定 device,vLLM 会管理所有可见 GPU
print(f"Setting up VLLM model with TP={torch.cuda.device_count()}")
from vllm import LLM, SamplingParams
model = LLM(
model=model_base,
tensor_parallel_size=torch.cuda.device_count(), # 自动使用所有可见 GPU
gpu_memory_utilization=0.9, # TP模式显存利用率可以稍微高一点
dtype="bfloat16",
enable_prefix_caching=False,
enable_chunked_prefill=False,
max_model_len=None, # None
seed=42, # 统一随机种子
limit_mm_per_prompt={"image": limit_image_per_prompt, "video": limit_video_per_prompt},
)
processor = AutoProcessor.from_pretrained(model_base)
tokenizer = AutoTokenizer.from_pretrained(model_base)
print('tokenizer.padding_side before:', tokenizer.padding_side)
tokenizer.padding_side = "left" # 左预处理
processor.tokenizer = tokenizer
sampling_params = SamplingParams(
n=1,
repetition_penalty=1.0,
max_tokens=1024,
temperature=1.0, # 这里的 temperature 用于 vLLM 采样,生成数据时建议 0.7 1.O还是 0.0 看看
top_p=0.95,
top_k=-1,
seed=42
)
return model, processor, sampling_params
def main(input_path, save_path,
data_root="datasets",
model_base="Qwen/Qwen2.5-VL-7B-Instruct",
prompt_template="v4",
tool_name_list=["seek_video_frames"],
use_env=True,
use_vllm=False,
batch_size=1,
lora_checkpoint=None,
dataset_type="lazy_dataset",
num_data_workers=4,
total_video_tokens=15360,
max_frames=768,
min_tokens=16,
max_tokens=192,
save_vision_info=False,
append_time_instruction=False,
):
# 【修改1】判断是否使用 vLLM TP 模式
# 如果开启 vLLM 且可见 GPU > 1,默认认为是 TP 模式,不启动 DDP
is_tp_mode = use_vllm and torch.cuda.device_count() > 1
is_ddp_mode = "LOCAL_RANK" in os.environ
if is_tp_mode:
print(f"Running in vLLM TP Mode on {torch.cuda.device_count()} GPUs.")
rank = 0
world_size = 1
elif is_ddp_mode:
# 只有真正使用 torchrun 启动时,才初始化 DDP
setup_ddp()
rank = dist.get_rank()
local_rank = int(os.environ.get("LOCAL_RANK", 0))
world_size = dist.get_world_size()
else:
# 3. 单卡调试模式 (Single GPU Debug Mode)
# 既不是 TP 也不是 DDP,说明是 python script.py 直接启动的单卡
print("Running in Single-GPU Non-Distributed Mode (Debug).")
rank = 0
world_size = 1
# 手动设置设备,防止后续代码出错
if torch.cuda.is_available():
torch.cuda.set_device(0)
dataset_class = get_dataset_class(dataset_type)
video_kwargs = {
"total_pixels": total_video_tokens * 28 * 28,
"min_pixels": min_tokens * 28 * 28,
"max_pixels": max_tokens * 28 * 28,
"max_frames": max_frames,
}
dataset = dataset_class(input_path, data_root, prompt_template=prompt_template, tool_name_list=tool_name_list, video_kwargs=video_kwargs, append_time_instruction=append_time_instruction)
if use_vllm:
model, processor, sampling_params = setup_vllm(model_base)
else:
model, processor = setup_model(model_base, lora_checkpoint)
# 【修改2】Sampler 的选择
if is_tp_mode:
# TP 模式单进程处理全部数据,不需要切分
sampler = SequentialSampler(dataset)
else:
# DDP 模式需要切分数据
sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank, shuffle=False)
dataloader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_data_workers, collate_fn=lambda x: x)
if not os.path.exists(save_path):
os.makedirs(save_path, exist_ok=True)
# 【修改3】文件名处理
if is_tp_mode:
# TP 模式直接保存最终文件,不需要 rank 后缀
local_save_path = os.path.join(save_path, "prediction_tp.jsonl")
else:
local_save_path = os.path.join(save_path, f"rank{rank}.jsonl")
with open(local_save_path, "w") as writer:
for data_batch in tqdm.tqdm(dataloader):
if use_env:
pred_batch = forward_model_with_env(data_batch, model, processor, use_vllm=use_vllm, save_vision_info=save_vision_info, sampling_params=sampling_params)
else:
# 简单处理:如果是非 env 的 vLLM 调用,需自行封装 generate
# 这里假设主要用 forward_model
pred_batch = forward_model(data_batch, model, processor)
batch_answers = [item.get('answer', '') for item in data_batch]
batch_questions = [item.get('question', '') for item in data_batch]
batch_options = [item.get('options', '') for item in data_batch]
#### 筛选,之后需要解决工具调用失败的样本,也丢掉
acc_rewards = accuracy_reward(pred_batch, answer=batch_answers, question=batch_questions, options=batch_options)
tool_rewars = tool_usage_reward(pred_batch)
format_rewards = multiturn_format_reward(pred_batch)
for data, pred, score_1, score_2, score_3 in zip(data_batch, pred_batch, acc_rewards, tool_rewars, format_rewards):
if score_1 == 0 or score_2 < 0.1 or score_3 == 0:
data['select'] = False
else:
data['select'] = True
data['prediction'] = pred
data['score_accuracy'] = score_1
data['score_tool_usage'] = score_2
data['score_format'] = score_3
for k in ["image_inputs", "video_inputs", "video_kwargs", "messages", "multimodal_cache"]:
if k in data:
data.pop(k)
writer.write(json.dumps(data) + "\n")
writer.flush()
# 【修改4】清理工作
if not is_tp_mode:
dist.barrier()
if rank == 0:
merge_results(save_path)
cleanup_ddp()
else:
print(f"Results saved to {local_save_path}")
print("Inference done.")
if __name__ == "__main__":
import multiprocessing
# 【核心修复】强制设置启动方式为 spawn
# 必须在任何 CUDA 操作之前设置
try:
multiprocessing.set_start_method('spawn', force=True)
except RuntimeError:
pass
import fire
fire.Fire(main)