|
|
import copy |
|
|
import torch |
|
|
import json |
|
|
import os |
|
|
import sys |
|
|
from PIL import Image |
|
|
try: |
|
|
from peft import PeftModel |
|
|
except: |
|
|
pass |
|
|
import tqdm |
|
|
from torch.nn.parallel import DistributedDataParallel as DDP |
|
|
from torch.utils.data import DataLoader, DistributedSampler, SequentialSampler |
|
|
import torch.distributed as dist |
|
|
import glob |
|
|
import datetime |
|
|
import numpy as np |
|
|
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor, AutoTokenizer |
|
|
from time_r1.dataset import LazyVLDataset, LazyVLDatasetBaseline, LazyVLDatasetSFT, get_dataset_class |
|
|
from time_r1.utils import setup_ddp, cleanup_ddp, merge_results |
|
|
from time_r1.utils.qwen_vl_utils import process_vision_info, replace_vision_info_with_placeholder, replace_vision_info_with_base64 |
|
|
from time_r1.environment.video_env import VideoInteraction |
|
|
|
|
|
|
|
|
import json |
|
|
import re |
|
|
from datetime import datetime |
|
|
import os |
|
|
import math |
|
|
|
|
|
import ast |
|
|
|
|
|
import json |
|
|
import re |
|
|
from datetime import datetime |
|
|
import os |
|
|
import math |
|
|
|
|
|
import ast |
|
|
import numpy as np |
|
|
MAX_TOOL_USE_NUM=10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_options_string(options_str): |
|
|
""" |
|
|
将选项字符串解析为candidates列表 |
|
|
例如: "A) First, a cartoon...\nB) First, an image..." -> ["First, a cartoon...", "First, an image..."] |
|
|
""" |
|
|
candidates = [] |
|
|
lines = options_str.strip().split('\n') |
|
|
|
|
|
current_option = "" |
|
|
for line in lines: |
|
|
line = line.strip() |
|
|
if not line: |
|
|
continue |
|
|
|
|
|
|
|
|
if len(line) >= 2 and line[0].isalpha() and line[1] == ')': |
|
|
|
|
|
if current_option: |
|
|
candidates.append(current_option.strip()) |
|
|
|
|
|
current_option = line[3:] if len(line) > 3 else "" |
|
|
else: |
|
|
|
|
|
if current_option: |
|
|
current_option += " " + line |
|
|
|
|
|
|
|
|
if current_option: |
|
|
candidates.append(current_option.strip()) |
|
|
|
|
|
return candidates |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_prediction_from_message(messages): |
|
|
""" |
|
|
从消息列表中提取最后一个 assistant 消息中的 answer 标签内容 |
|
|
""" |
|
|
answers = [] |
|
|
patterns = [ |
|
|
r'<answer>(.*?)</answer>', |
|
|
] |
|
|
|
|
|
for message in messages: |
|
|
if message['role'] == 'assistant': |
|
|
for content in message['content']: |
|
|
if content['type'] == 'text': |
|
|
text = content['text'] |
|
|
|
|
|
for pattern in patterns: |
|
|
all_answers = re.findall(pattern, text, re.DOTALL) |
|
|
if all_answers: |
|
|
answer = all_answers[-1].strip() |
|
|
answers.append(answer) |
|
|
break |
|
|
|
|
|
if len(answers) > 0: |
|
|
result = answers[-1] |
|
|
else: |
|
|
result = "" |
|
|
for message in messages: |
|
|
if message['role'] == 'assistant': |
|
|
for content in message['content']: |
|
|
if content['type'] == 'text': |
|
|
text = content['text'] |
|
|
for pattern in patterns: |
|
|
match = re.search(pattern, text) |
|
|
if match: |
|
|
result = match.group(1).strip() |
|
|
break |
|
|
if not result: |
|
|
result = text.strip() |
|
|
|
|
|
return result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def is_valid_json_time_format(s): |
|
|
"""检查JSON格式是否正确""" |
|
|
try: |
|
|
item = json.loads(s) |
|
|
start_time = item.get("start_time") |
|
|
end_time = item.get("end_time") |
|
|
if start_time is None or end_time is None: |
|
|
return False |
|
|
if start_time < 0 or end_time < 0: |
|
|
return False |
|
|
if start_time > end_time: |
|
|
return False |
|
|
if not isinstance(start_time, (int, float)) or not isinstance(end_time, (int, float)): |
|
|
return False |
|
|
return True |
|
|
except Exception as e: |
|
|
print(f"Error in is_valid_json_time_format: {e}, s: {s}") |
|
|
return False |
|
|
|
|
|
|
|
|
def merge_intervals(intervals): |
|
|
"""合并重叠或相邻的时间区间""" |
|
|
if not intervals: |
|
|
return [] |
|
|
intervals = [list(i) for i in intervals] |
|
|
|
|
|
sorted_intervals = sorted(intervals, key=lambda x: x[0]) |
|
|
merged = [sorted_intervals[0][:]] |
|
|
for current in sorted_intervals[1:]: |
|
|
last = merged[-1] |
|
|
if current[0] <= last[1]: |
|
|
|
|
|
merged[-1][1] = max(last[1], current[1]) |
|
|
else: |
|
|
merged.append(current[:]) |
|
|
return merged |
|
|
|
|
|
|
|
|
def compute_iou(list_a, list_b): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
merged_a = merge_intervals(list_a) |
|
|
merged_b = merge_intervals(list_b) |
|
|
|
|
|
|
|
|
len_a = sum(end - start for start, end in merged_a) |
|
|
len_b = sum(end - start for start, end in merged_b) |
|
|
|
|
|
|
|
|
intersection = 0 |
|
|
i = j = 0 |
|
|
while i < len(merged_a) and j < len(merged_b): |
|
|
a_start, a_end = merged_a[i] |
|
|
b_start, b_end = merged_b[j] |
|
|
|
|
|
|
|
|
start = max(a_start, b_start) |
|
|
end = min(a_end, b_end) |
|
|
if start < end: |
|
|
intersection += end - start |
|
|
|
|
|
|
|
|
if a_end < b_end: |
|
|
i += 1 |
|
|
else: |
|
|
j += 1 |
|
|
|
|
|
union = len_a + len_b - intersection |
|
|
if union == 0: |
|
|
return 1.0 |
|
|
return intersection / union |
|
|
|
|
|
|
|
|
def extract_sequence_index(answer): |
|
|
|
|
|
|
|
|
pattern = r'(\([a-g,1-6]\))' |
|
|
matches = re.findall(pattern, answer) |
|
|
return ''.join(matches) |
|
|
|
|
|
|
|
|
def parse_multi_choice_response(response, all_choices, index2ans): |
|
|
""" |
|
|
Parse the prediction from the generated response. |
|
|
Return the predicted index e.g., A, B, C, D. |
|
|
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L10 |
|
|
""" |
|
|
for char in [",", ".", "!", "?", ";", ":", "'"]: |
|
|
response = response.strip(char) |
|
|
response = " " + response + " " |
|
|
|
|
|
index_ans = True |
|
|
ans_with_brack = False |
|
|
candidates = [] |
|
|
for choice in all_choices: |
|
|
if f"({choice})" in response: |
|
|
candidates.append(choice) |
|
|
ans_with_brack = True |
|
|
|
|
|
if len(candidates) == 0: |
|
|
for choice in all_choices: |
|
|
if f"{choice} " in response: |
|
|
candidates.append(choice) |
|
|
|
|
|
if len(candidates) == 0: |
|
|
for choice in all_choices: |
|
|
if f"{choice}." in response: |
|
|
candidates.append(choice) |
|
|
|
|
|
if len(candidates) == 0: |
|
|
for choice in all_choices: |
|
|
if f"{choice})" in response: |
|
|
candidates.append(choice) |
|
|
|
|
|
if len(candidates) == 0: |
|
|
for choice in all_choices: |
|
|
if f"({choice}" in response: |
|
|
candidates.append(choice) |
|
|
|
|
|
|
|
|
if len(candidates) == 0 and len(response.split()) > 5: |
|
|
for index, ans in index2ans.items(): |
|
|
if ans.lower() in response.lower(): |
|
|
candidates.append(index) |
|
|
index_ans = False |
|
|
|
|
|
if len(candidates) == 0: |
|
|
pred_index = None |
|
|
elif len(candidates) > 1: |
|
|
start_indexes = [] |
|
|
if index_ans: |
|
|
if ans_with_brack: |
|
|
for can in candidates: |
|
|
index = response.rfind(f"({can})") |
|
|
start_indexes.append(index) |
|
|
else: |
|
|
for can in candidates: |
|
|
index = response.rfind(f" {can} ") |
|
|
start_indexes.append(index) |
|
|
else: |
|
|
for can in candidates: |
|
|
index = response.lower().rfind(index2ans[can].lower()) |
|
|
start_indexes.append(index) |
|
|
|
|
|
pred_index = candidates[np.argmax(start_indexes)] |
|
|
else: |
|
|
pred_index = candidates[0] |
|
|
|
|
|
return pred_index |
|
|
|
|
|
|
|
|
def has_tag(text: str, tag: str) -> bool: |
|
|
return re.search(fr"<{tag}>", text) |
|
|
|
|
|
|
|
|
def answer_format_check(text): |
|
|
pattern = re.compile(r'<think>.*?</think>\s*<answer>.*?</answer>', re.DOTALL) |
|
|
match = re.fullmatch(pattern, text.strip()) |
|
|
return 1.0 if match else 0.0 |
|
|
|
|
|
|
|
|
def tool_call_format_check(text): |
|
|
pattern = re.compile(r'<think>.*?</think>\s*<tool_call>.*?</tool_call>', re.DOTALL) |
|
|
match = re.fullmatch(pattern, text.strip()) |
|
|
return 1.0 if match else 0.0 |
|
|
|
|
|
|
|
|
def multiturn_format_check(messages, **kwargs): |
|
|
""" |
|
|
检查多轮对话中每条 assistant 消息的格式是否严格符合要求: |
|
|
0. 必须有answer,且answer/tool_call都符合格式 |
|
|
1. 如果包含 answer,必须符合 answer_format_check |
|
|
2. 如果包含 tool_call,必须符合 tool_call_format_check |
|
|
3. answer 和 tool_call 不能同时出现在同一条消息中 |
|
|
""" |
|
|
answer_format_stats = [] |
|
|
tool_call_format_stats = [] |
|
|
|
|
|
for message in messages: |
|
|
if message["role"] == "assistant": |
|
|
for content in message["content"]: |
|
|
if isinstance(content, dict) and content["type"] == "text": |
|
|
text = content["text"] |
|
|
|
|
|
if has_tag(text, "answer") and has_tag(text, "tool_call"): |
|
|
return 0.0 |
|
|
if has_tag(text, "answer"): |
|
|
answer_format_stats.append(answer_format_check(text)) |
|
|
elif has_tag(text, "tool_call"): |
|
|
tool_call_format_stats.append(tool_call_format_check(text)) |
|
|
if len(answer_format_stats) > 0 and all(answer_format_stats) and all(tool_call_format_stats): |
|
|
return 1.0 |
|
|
else: |
|
|
return 0.0 |
|
|
|
|
|
|
|
|
def multiturn_format_reward(messages, **kwargs): |
|
|
""" |
|
|
Calculate the multiturn format reward. |
|
|
""" |
|
|
reward_list = [] |
|
|
for msg in messages: |
|
|
reward_list.append(multiturn_format_check(msg)) |
|
|
return reward_list |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def advanced_tool_success_check(messages): |
|
|
""" |
|
|
综合评估工具调用成功情况,包括: |
|
|
1. 基础工具调用成功检查 |
|
|
2. 工具多样性和数量评估 |
|
|
3. 重复调用惩罚 |
|
|
4. 调用失败惩罚 |
|
|
NOTE: VideoInteraction.avoid_mm_missing=True时,这项永远为1;当使用counterfactual reasoning时,这项不再重要 |
|
|
|
|
|
""" |
|
|
if not messages: |
|
|
return 0.0 |
|
|
|
|
|
|
|
|
successful_tools = 0 |
|
|
total_tool_calls = 0 |
|
|
response_signitures_count = dict() |
|
|
tool_failure_count = 0 |
|
|
|
|
|
for message in messages: |
|
|
if message.get("role") == "tool" and message.get("name") == "parse_error": |
|
|
tool_failure_count += 1 |
|
|
if message.get("role") == "tool" and message.get("name") != "parse_error": |
|
|
total_tool_calls += 1 |
|
|
content = message.get("content", []) |
|
|
if not isinstance(content, list): |
|
|
continue |
|
|
for item in content: |
|
|
if isinstance(item, dict) and item.get("type") in ["video", "image"]: |
|
|
successful_tools += 1 |
|
|
break |
|
|
elif not isinstance(item, dict): |
|
|
print(f"Error in tool_success_check: {item}, content: {content}") |
|
|
tool_score = 1.0 / (1.0 + math.exp(-(successful_tools - 2))) |
|
|
if successful_tools == 0: |
|
|
tool_score = 0.0 |
|
|
return tool_score |
|
|
|
|
|
def tool_usage_reward(batch_messages, **kwargs): |
|
|
""" |
|
|
适配器函数,用于直接放入 reward_functions 列表中 |
|
|
""" |
|
|
return [advanced_tool_success_check(msg) for msg in batch_messages] |
|
|
|
|
|
|
|
|
|
|
|
def accuracy_reward(messages, **kwargs): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
Calculate the llm judge reward. |
|
|
""" |
|
|
reward_list = [] |
|
|
for msg, sol, q, options in zip(messages, kwargs["answer"], kwargs["question"], kwargs["options"]): |
|
|
question = q |
|
|
|
|
|
options_str = options |
|
|
candidates = parse_options_string(options_str) |
|
|
|
|
|
correct_answer = sol |
|
|
|
|
|
pred_text = extract_prediction_from_message(msg) |
|
|
|
|
|
all_choices = [] |
|
|
index2ans = {} |
|
|
for i, option in enumerate(candidates): |
|
|
index2ans[chr(ord("A") + i)] = option |
|
|
all_choices.append(chr(ord("A") + i)) |
|
|
parsed_pred = parse_multi_choice_response(pred_text, all_choices, index2ans) |
|
|
if parsed_pred is None: |
|
|
|
|
|
reward_list.append(0.0) |
|
|
else: |
|
|
is_correct = (parsed_pred.strip().upper() == sol.strip().upper()) |
|
|
reward_list.append(1.0 if is_correct else 0.0) |
|
|
return reward_list |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def forward_model(data_batch, model, processor, max_new_tokens=2048): |
|
|
|
|
|
if dist.is_initialized(): |
|
|
rank = dist.get_rank() |
|
|
device = f"cuda:{rank}" |
|
|
else: |
|
|
rank = 0 |
|
|
device = "cuda" |
|
|
|
|
|
messages_batch = [item["messages"] for item in data_batch] |
|
|
image_inputs, video_inputs, video_kwargs = process_vision_info(messages_batch, return_video_kwargs=True) |
|
|
text_inputs = processor.apply_chat_template(messages_batch, tokenize=False, add_generation_prompt=True) |
|
|
inputs = processor( |
|
|
text=text_inputs, |
|
|
images=image_inputs, |
|
|
videos=video_inputs, |
|
|
fps=video_kwargs["fps"], |
|
|
padding=True, |
|
|
return_tensors="pt", |
|
|
add_special_tokens=False, |
|
|
) |
|
|
inputs = inputs.to(device) |
|
|
output_ids = model.generate(**inputs, max_new_tokens=max_new_tokens) |
|
|
generated_ids = [output_ids[i][len(inputs.input_ids[i]):] for i in range(len(output_ids))] |
|
|
output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) |
|
|
return output_text |
|
|
|
|
|
|
|
|
def forward_model_with_env(data_batch, model, processor, use_vllm=False, save_vision_info=False, **kwargs): |
|
|
env = VideoInteraction(processor, model, max_turns=10, max_new_tokens_per_turn=1024, use_vllm=use_vllm) |
|
|
messages_batch = [item["messages"] for item in data_batch] |
|
|
multimodal_cache_batch = [item["multimodal_cache"] for item in data_batch] |
|
|
output_msgs = env.generate(messages_batch, multimodal_cache=multimodal_cache_batch, **kwargs) |
|
|
if save_vision_info: |
|
|
output_msgs = replace_vision_info_with_base64(output_msgs) |
|
|
else: |
|
|
output_msgs = replace_vision_info_with_placeholder(output_msgs) |
|
|
return output_msgs |
|
|
|
|
|
|
|
|
def setup_model(model_base, lora_checkpoint=None): |
|
|
local_rank = int(os.environ.get("LOCAL_RANK", 0)) |
|
|
device = f"cuda:{local_rank}" |
|
|
model = Qwen2_5_VLForConditionalGeneration.from_pretrained( |
|
|
model_base, |
|
|
torch_dtype=torch.bfloat16, |
|
|
use_sliding_window=True, |
|
|
attn_implementation="flash_attention_2", |
|
|
device_map=device |
|
|
) |
|
|
if lora_checkpoint is not None: |
|
|
print("Model loaded, type:", type(model)) |
|
|
model = PeftModel.from_pretrained(model, lora_checkpoint) |
|
|
print("LORA loaded, type:", type(model)) |
|
|
model = model.merge_and_unload() |
|
|
print("LORA merged, type:", type(model)) |
|
|
model.eval() |
|
|
model = model.to(device) |
|
|
processor = AutoProcessor.from_pretrained(model_base) |
|
|
return model, processor |
|
|
|
|
|
|
|
|
def setup_vllm(model_base, limit_image_per_prompt=1024, limit_video_per_prompt=10): |
|
|
|
|
|
print(f"Setting up VLLM model with TP={torch.cuda.device_count()}") |
|
|
|
|
|
from vllm import LLM, SamplingParams |
|
|
model = LLM( |
|
|
model=model_base, |
|
|
tensor_parallel_size=torch.cuda.device_count(), |
|
|
gpu_memory_utilization=0.9, |
|
|
dtype="bfloat16", |
|
|
enable_prefix_caching=False, |
|
|
enable_chunked_prefill=False, |
|
|
max_model_len=None, |
|
|
seed=42, |
|
|
limit_mm_per_prompt={"image": limit_image_per_prompt, "video": limit_video_per_prompt}, |
|
|
) |
|
|
processor = AutoProcessor.from_pretrained(model_base) |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_base) |
|
|
print('tokenizer.padding_side before:', tokenizer.padding_side) |
|
|
tokenizer.padding_side = "left" |
|
|
processor.tokenizer = tokenizer |
|
|
sampling_params = SamplingParams( |
|
|
n=1, |
|
|
repetition_penalty=1.0, |
|
|
max_tokens=1024, |
|
|
temperature=1.0, |
|
|
top_p=0.95, |
|
|
top_k=-1, |
|
|
seed=42 |
|
|
) |
|
|
return model, processor, sampling_params |
|
|
|
|
|
|
|
|
def main(input_path, save_path, |
|
|
data_root="datasets", |
|
|
model_base="Qwen/Qwen2.5-VL-7B-Instruct", |
|
|
prompt_template="v4", |
|
|
tool_name_list=["seek_video_frames"], |
|
|
use_env=True, |
|
|
use_vllm=False, |
|
|
batch_size=1, |
|
|
lora_checkpoint=None, |
|
|
dataset_type="lazy_dataset", |
|
|
num_data_workers=4, |
|
|
total_video_tokens=15360, |
|
|
max_frames=768, |
|
|
min_tokens=16, |
|
|
max_tokens=192, |
|
|
save_vision_info=False, |
|
|
append_time_instruction=False, |
|
|
): |
|
|
|
|
|
|
|
|
|
|
|
is_tp_mode = use_vllm and torch.cuda.device_count() > 1 |
|
|
is_ddp_mode = "LOCAL_RANK" in os.environ |
|
|
if is_tp_mode: |
|
|
print(f"Running in vLLM TP Mode on {torch.cuda.device_count()} GPUs.") |
|
|
rank = 0 |
|
|
world_size = 1 |
|
|
elif is_ddp_mode: |
|
|
|
|
|
setup_ddp() |
|
|
rank = dist.get_rank() |
|
|
local_rank = int(os.environ.get("LOCAL_RANK", 0)) |
|
|
world_size = dist.get_world_size() |
|
|
else: |
|
|
|
|
|
|
|
|
print("Running in Single-GPU Non-Distributed Mode (Debug).") |
|
|
rank = 0 |
|
|
world_size = 1 |
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
torch.cuda.set_device(0) |
|
|
|
|
|
dataset_class = get_dataset_class(dataset_type) |
|
|
video_kwargs = { |
|
|
"total_pixels": total_video_tokens * 28 * 28, |
|
|
"min_pixels": min_tokens * 28 * 28, |
|
|
"max_pixels": max_tokens * 28 * 28, |
|
|
"max_frames": max_frames, |
|
|
} |
|
|
dataset = dataset_class(input_path, data_root, prompt_template=prompt_template, tool_name_list=tool_name_list, video_kwargs=video_kwargs, append_time_instruction=append_time_instruction) |
|
|
|
|
|
if use_vllm: |
|
|
model, processor, sampling_params = setup_vllm(model_base) |
|
|
else: |
|
|
model, processor = setup_model(model_base, lora_checkpoint) |
|
|
|
|
|
|
|
|
if is_tp_mode: |
|
|
|
|
|
sampler = SequentialSampler(dataset) |
|
|
else: |
|
|
|
|
|
sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank, shuffle=False) |
|
|
|
|
|
dataloader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_data_workers, collate_fn=lambda x: x) |
|
|
|
|
|
if not os.path.exists(save_path): |
|
|
os.makedirs(save_path, exist_ok=True) |
|
|
|
|
|
|
|
|
if is_tp_mode: |
|
|
|
|
|
local_save_path = os.path.join(save_path, "prediction_tp.jsonl") |
|
|
else: |
|
|
local_save_path = os.path.join(save_path, f"rank{rank}.jsonl") |
|
|
|
|
|
with open(local_save_path, "w") as writer: |
|
|
for data_batch in tqdm.tqdm(dataloader): |
|
|
if use_env: |
|
|
pred_batch = forward_model_with_env(data_batch, model, processor, use_vllm=use_vllm, save_vision_info=save_vision_info, sampling_params=sampling_params) |
|
|
else: |
|
|
|
|
|
|
|
|
pred_batch = forward_model(data_batch, model, processor) |
|
|
|
|
|
batch_answers = [item.get('answer', '') for item in data_batch] |
|
|
batch_questions = [item.get('question', '') for item in data_batch] |
|
|
batch_options = [item.get('options', '') for item in data_batch] |
|
|
|
|
|
|
|
|
acc_rewards = accuracy_reward(pred_batch, answer=batch_answers, question=batch_questions, options=batch_options) |
|
|
tool_rewars = tool_usage_reward(pred_batch) |
|
|
format_rewards = multiturn_format_reward(pred_batch) |
|
|
for data, pred, score_1, score_2, score_3 in zip(data_batch, pred_batch, acc_rewards, tool_rewars, format_rewards): |
|
|
if score_1 == 0 or score_2 < 0.1 or score_3 == 0: |
|
|
data['select'] = False |
|
|
else: |
|
|
data['select'] = True |
|
|
data['prediction'] = pred |
|
|
data['score_accuracy'] = score_1 |
|
|
data['score_tool_usage'] = score_2 |
|
|
data['score_format'] = score_3 |
|
|
for k in ["image_inputs", "video_inputs", "video_kwargs", "messages", "multimodal_cache"]: |
|
|
if k in data: |
|
|
data.pop(k) |
|
|
writer.write(json.dumps(data) + "\n") |
|
|
writer.flush() |
|
|
|
|
|
|
|
|
if not is_tp_mode: |
|
|
dist.barrier() |
|
|
if rank == 0: |
|
|
merge_results(save_path) |
|
|
cleanup_ddp() |
|
|
else: |
|
|
print(f"Results saved to {local_save_path}") |
|
|
|
|
|
print("Inference done.") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
import multiprocessing |
|
|
|
|
|
|
|
|
try: |
|
|
multiprocessing.set_start_method('spawn', force=True) |
|
|
except RuntimeError: |
|
|
pass |
|
|
|
|
|
|
|
|
import fire |
|
|
fire.Fire(main) |
|
|
|
|
|
|