File size: 6,335 Bytes
bbaf249 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
from time_r1.utils.qwen_vl_utils_prepare import floor_by_factor, FRAME_FACTOR, smart_resize, ceil_by_factor
import decord
import torch
import os
import tqdm
import glob
import multiprocessing
from torchvision import io, transforms
from torchvision.transforms import InterpolationMode
from functools import partial
from time_r1.utils.io import load_jsonl
import debugpy
# try:
# # 5678 is the default attach port in the VS Code debug configurations. Unless a host and port are specified, host defaults to 127.0.0.1
# debugpy.listen(("localhost", 9511))
# print("Waiting for debugger attach")
# debugpy.wait_for_client()
# except Exception as e:
# pass
FPS_MIN_FRAMES = 4
FPS_MAX_FRAMES = 120 # 建议根据你的显存情况调整,60-80 是常用范围
FRAME_FACTOR = 2
def get_video_tensor(video_path, target_fps=2, image_factor = 28, min_pixels = 28 * 28 * 4, max_pixels = 28 * 28 * 128):
"""
将视频以固定帧率提前抽帧、解码保存为tensor,用于后续训练
"""
vr = decord.VideoReader(video_path)
total_frames, video_fps = len(vr), vr.get_avg_fps()
min_nframes = ceil_by_factor(FPS_MIN_FRAMES, FRAME_FACTOR) # :ceil_by_factor 是为了保证帧数是 FRAME_FACTOR 的倍数
max_frames = floor_by_factor(min(FPS_MAX_FRAMES, total_frames), FRAME_FACTOR)
nframes = total_frames / video_fps * target_fps # 理论帧数
if nframes > total_frames:
print(f"smart_nframes: nframes[{nframes}] > total_frames[{total_frames}]")
nframes = min(min(max(nframes, min_nframes), max_frames), total_frames)
nframes = floor_by_factor(nframes, FRAME_FACTOR)
frame_idx = torch.linspace(0, total_frames - 1, nframes).round().long().tolist() # 均匀采样
frame_tensor = vr.get_batch(frame_idx).asnumpy()
frame_tensor = torch.tensor(frame_tensor).permute(0, 3, 1, 2) # Convert to TCHW format
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
height, width = frame_tensor.shape[2], frame_tensor.shape[3]
resized_height, resized_width = smart_resize(
height,
width,
factor=image_factor,
min_pixels=min_pixels,
max_pixels=max_pixels,
)
frame_tensor = transforms.functional.resize(
frame_tensor,
[resized_height, resized_width],
interpolation=InterpolationMode.BICUBIC,
antialias=True,
)
frame_cache = {
"frame_tensor": frame_tensor,
"fps": sample_fps,
}
return frame_cache
def process_single_video(video_path, target_fps=1, image_factor = 28, min_pixels = 28 * 28 * 128, max_pixels = 28 * 28 * 256):
save_dir = '/data/shuimu.chen/LongVideoBench/video_cache_300p_120'
os.makedirs(save_dir, exist_ok=True)
video_filename = os.path.basename(video_path)
save_path = os.path.join(save_dir, video_filename + '.frame_cache')
"""Helper function to process and save frame cache for a single video."""
print(f"Processing {video_path}...")
try:
frame_cache = get_video_tensor(video_path, target_fps, image_factor, min_pixels, max_pixels)
torch.save(frame_cache, save_path)
print(f"Successfully saved frame cache for {video_path}")
except Exception as e:
print(f"Error processing {video_path}: {e}")
# def process_single_video(video_path, target_fps=1, image_factor = 28, min_pixels = 28 * 28 * 128, max_pixels = 28 * 28 * 256):
# save_path = '/data/shuimu.chen/LongVideoBench/video_cache_3p_60'
# """Helper function to process and save frame cache for a single video."""
# print(f"Processing {video_path}...")
# try:
# frame_cache = get_video_tensor(video_path, target_fps, image_factor, min_pixels, max_pixels)
# torch.save(frame_cache, video_path + ".frame_cache")
# print(f"Successfully saved frame cache for {video_path}")
# except Exception as e:
# print(f"Error processing {video_path}: {e}")
def prepare_frame_cache(video_root, dataset_path=None, num_workers=8, target_fps=1, overwrite=False, image_factor = 28, min_pixels = 28 * 28 * 128, max_pixels = 28 * 28 * 256):
if dataset_path is not None:
# 修改这里:直接使用json.load而不是load_jsonl
import json
with open(dataset_path, 'r', encoding='utf-8') as f:
video_data = json.load(f) # 这是JSON数组
# 提取video_path字段,并去重(同一个视频可能有多条记录)
video_paths = set() # 使用set去重
for v in video_data:
if "video_path" in v:
video_paths.add(v["video_path"])
elif "video" in v:
# 如果有video字段,可能需要拼接路径
video_path = os.path.join(video_root, v["video"])
video_paths.add(video_path)
video_list = list(video_paths)
else:
video_list = glob.glob(os.path.join(video_root, "*.mp4"))
if not video_list:
print(f"No MP4 videos found in {video_root}")
return
# remove videos that already have frame cache
if not overwrite:
print("skipping videos that already have frame cache")
num_total = len(video_list)
video_list = [v for v in video_list if not os.path.exists(v + ".frame_cache")]
num_skipped = num_total - len(video_list)
print(f"skipped {num_skipped} videos")
if num_workers is None:
num_workers = multiprocessing.cpu_count() # Default to using all available CPU cores
print(f"Found {len(video_list)} videos. Starting processing with {num_workers} workers...")
# Use a multiprocessing Pool to process videos in parallel
with multiprocessing.Pool(processes=num_workers) as pool:
# Using tqdm with pool.imap_unordered for progress bar and efficient iteration
# We wrap process_single_video if it needs more arguments or if we want to handle results
# For this case, process_single_video only takes video_path
func = partial(process_single_video, target_fps=target_fps, image_factor = image_factor, min_pixels = min_pixels, max_pixels = max_pixels)
list(tqdm.tqdm(pool.imap_unordered(func, video_list), total=len(video_list)))
print("All videos processed.")
if __name__ == "__main__":
import fire
fire.Fire(prepare_frame_cache)
|