TimeSearch-R-raw / prepare_frame_cache_1_delete.py
CSDDSFSFSAFSAF's picture
Add files using upload-large-folder tool
bbaf249 verified
from time_r1.utils.qwen_vl_utils_prepare import floor_by_factor, FRAME_FACTOR, smart_resize, ceil_by_factor
import decord
import torch
import os
import tqdm
import glob
import multiprocessing
from torchvision import io, transforms
from torchvision.transforms import InterpolationMode
from functools import partial
import json
os.environ['DECORD_EOF_RETRY_MAX'] = '20480'
# ================= 配置区域 =================
DATA_path = "/data/shuimu.chen/Video-R1-data/Video-R1-103K_sft_candidates_60k_id.json"
# 定义源目录根路径 (原始视频所在的根目录)
SOURCE_ROOT = "/data/shuimu.chen/Video-R1-data"
# 定义目标目录根路径 (你想保存缓存的新目录)
TARGET_ROOT = "/data/shuimu.chen/Video-R1-data_cache_60_sft"
FPS_MIN_FRAMES = 4
FPS_MAX_FRAMES = 60
FRAME_FACTOR = 2
# ===========================================
def get_video_tensor(video_path, target_fps=2, image_factor=28, min_pixels=28 * 28 * 4, max_pixels=28 * 28 * 128):
"""
将视频以固定帧率提前抽帧、解码保存为tensor
"""
vr = decord.VideoReader(video_path)
total_frames, video_fps = len(vr), vr.get_avg_fps()
min_nframes = ceil_by_factor(FPS_MIN_FRAMES, FRAME_FACTOR)
max_frames = floor_by_factor(min(FPS_MAX_FRAMES, total_frames), FRAME_FACTOR)
nframes = total_frames / video_fps * target_fps
nframes = min(min(max(nframes, min_nframes), max_frames), total_frames)
nframes = floor_by_factor(nframes, FRAME_FACTOR)
frame_idx = torch.linspace(0, total_frames - 1, nframes).round().long().tolist()
frame_tensor = vr.get_batch(frame_idx).asnumpy()
frame_tensor = torch.tensor(frame_tensor).permute(0, 3, 1, 2)
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
height, width = frame_tensor.shape[2], frame_tensor.shape[3]
resized_height, resized_width = smart_resize(
height,
width,
factor=image_factor,
min_pixels=min_pixels,
max_pixels=max_pixels,
)
frame_tensor = transforms.functional.resize(
frame_tensor,
[resized_height, resized_width],
interpolation=InterpolationMode.BICUBIC,
antialias=True,
)
frame_cache = {
"frame_tensor": frame_tensor,
"fps": sample_fps,
}
return frame_cache
def process_single_video(video_path, target_fps=1, image_factor=28, min_pixels=28*28*128, max_pixels=28*28*256):
# 1. 计算新的保存路径
try:
# 计算相对路径:例如 NeXT-QA/NextQA/NExTVideo/1012/8702139498.mp4
rel_path = os.path.relpath(video_path, SOURCE_ROOT)
# 拼接目标路径:/data/.../Video-R1-data_cache_60/NeXT-QA/.../8702139498.mp4.frame_cache
save_path = os.path.join(TARGET_ROOT, rel_path + ".frame_cache")
except ValueError:
# 如果 video_path 不在 SOURCE_ROOT 下(比如路径写错了),回退到保持原文件名保存在 TARGET_ROOT 根目录下,或者报错
# 这里选择简单报错返回
return f"Error Path: {video_path} is not under {SOURCE_ROOT}"
# 2. 检查是否已处理 (断点续传)
if os.path.exists(save_path):
# print(f"Skipping {save_path} (Already exists)")
return None
# print(f"Processing {video_path} -> {save_path}...")
try:
# 3. 确保目标目录存在
os.makedirs(os.path.dirname(save_path), exist_ok=True)
# 4. 处理视频
frame_cache = get_video_tensor(video_path, target_fps, image_factor, min_pixels, max_pixels)
# 5. 保存
torch.save(frame_cache, save_path)
# print(f"Saved: {save_path}")
return None
except Exception as e:
return f"Error: {video_path} -> {str(e)}"
def prepare_frame_cache(video_root=None, dataset_path=None, num_workers=8, target_fps=2, overwrite=False, image_factor=28, min_pixels=28*28*128, max_pixels=28*28*256):
dataset_path = DATA_path
print(f"Source Root: {SOURCE_ROOT}")
print(f"Target Root: {TARGET_ROOT}")
if dataset_path is not None:
with open(dataset_path, 'r', encoding='utf-8') as f:
video_data = json.load(f)
video_paths = set()
for v in video_data:
# 兼容不同的 key
path = v.get("video_path") or v.get("video") or v.get("path")
if path:
# 确保是绝对路径
if not os.path.isabs(path) and video_root:
path = os.path.join(video_root, path)
video_paths.add(path)
video_list = list(video_paths)
else:
# 如果没传 json,就扫描目录(通常不推荐,因为慢)
video_list = glob.glob(os.path.join(SOURCE_ROOT, "**", "*.mp4"), recursive=True)
print(f"Starting processing {len(video_list)} videos...")
failed_videos = []
# 使用 partial 传递参数
func = partial(process_single_video, target_fps=target_fps, image_factor=image_factor, min_pixels=min_pixels, max_pixels=max_pixels)
with multiprocessing.Pool(processes=num_workers) as pool:
# 使用 imap_unordered + tqdm 显示进度
for result in tqdm.tqdm(pool.imap_unordered(func, video_list), total=len(video_list)):
if result is not None:
failed_videos.append(result)
print("\n" + "="*30)
print("Processing Complete.")
if failed_videos:
print(f"❌ {len(failed_videos)} videos failed to process. Check 'failed_log.txt'")
# 将失败的记录保存下来,方便排查
with open("failed_log.txt", "w") as f:
for err in failed_videos:
f.write(err + "\n")
print(err)
else:
print("✅ All videos processed successfully.")
if __name__ == "__main__":
import fire
fire.Fire(prepare_frame_cache)