TimeSearch-R-raw / prepare_frame_cache_1.py
CSDDSFSFSAFSAF's picture
Add files using upload-large-folder tool
bbaf249 verified
from time_r1.utils.qwen_vl_utils_prepare import floor_by_factor, FRAME_FACTOR, smart_resize, ceil_by_factor
import decord
import torch
import os
import tqdm
import glob
import multiprocessing
from torchvision import io, transforms
from torchvision.transforms import InterpolationMode
from functools import partial
from time_r1.utils.io import load_jsonl
import debugpy
# try:
# # 5678 is the default attach port in the VS Code debug configurations. Unless a host and port are specified, host defaults to 127.0.0.1
# debugpy.listen(("localhost", 9511))
# print("Waiting for debugger attach")
# debugpy.wait_for_client()
# except Exception as e:
# pass
import os
os.environ['DECORD_EOF_RETRY_MAX'] = '20480'
SAVE_DIR = '/data/shuimu.chen/videomarathon/downloaded_videos/activitynet_192_50_frame_cache_unit8'
VIDEO_PATH = "/data/shuimu.chen/videomarathon/downloaded_videos/activitynet"
FPS_MIN_FRAMES = 4
FPS_MAX_FRAMES = 50 # 建议根据你的显存情况调整,60-80 是常用范围
FRAME_FACTOR = 2
# def get_video_tensor(video_path, target_fps=2, image_factor = 28, min_pixels = 28 * 28 * 4, max_pixels = 28 * 28 * 128):
# """
# 将视频以固定帧率提前抽帧、解码保存为tensor,用于后续训练
# """
# vr = decord.VideoReader(video_path)
# total_frames, video_fps = len(vr), vr.get_avg_fps()
# min_nframes = ceil_by_factor(FPS_MIN_FRAMES, FRAME_FACTOR) # :ceil_by_factor 是为了保证帧数是 FRAME_FACTOR 的倍数
# max_frames = floor_by_factor(min(FPS_MAX_FRAMES, total_frames), FRAME_FACTOR)
# nframes = total_frames / video_fps * target_fps # 理论帧数
# if nframes > total_frames:
# print(f"smart_nframes: nframes[{nframes}] > total_frames[{total_frames}]")
# nframes = min(min(max(nframes, min_nframes), max_frames), total_frames)
# nframes = floor_by_factor(nframes, FRAME_FACTOR)
# frame_idx = torch.linspace(0, total_frames - 1, nframes).round().long().tolist() # 均匀采样
# frame_tensor = vr.get_batch(frame_idx).asnumpy()
# frame_tensor = torch.tensor(frame_tensor).permute(0, 3, 1, 2) # Convert to TCHW format
# sample_fps = nframes / max(total_frames, 1e-6) * video_fps
# height, width = frame_tensor.shape[2], frame_tensor.shape[3]
# resized_height, resized_width = smart_resize(
# height,
# width,
# factor=image_factor,
# min_pixels=min_pixels,
# max_pixels=max_pixels,
# )
# frame_tensor = transforms.functional.resize(
# frame_tensor,
# [resized_height, resized_width],
# interpolation=InterpolationMode.BICUBIC,
# antialias=True,
# )
# if frame_tensor.dtype != torch.uint8:
# frame_tensor = frame_tensor.clamp(0, 255).to(torch.uint8)
# frame_cache = {
# "frame_tensor": frame_tensor,
# "fps": sample_fps,
# }
# return frame_cache
import cv2 # 必须引入 cv2
import numpy as np
# ... 其他引用保持不变 ...
def get_video_tensor(video_path, target_fps=2, image_factor=28, min_pixels=28 * 28 * 4, max_pixels=28 * 28 * 128):
"""
内存优化版:
1. 优先 Decord (CPU 模式)
2. 失败回退 OpenCV (流式读取,不占内存)
"""
# ------------------------------------------------------------------
# 1. 尝试读取视频元数据 & 选择读取器
# ------------------------------------------------------------------
vr = None
cap = None
method = "unknown"
total_frames = 0
video_fps = 0.0
# === 方案 A: Decord (CPU) ===
try:
vr = decord.VideoReader(video_path, ctx=decord.cpu(0), num_threads=1)
total_frames = len(vr)
video_fps = vr.get_avg_fps()
method = "decord"
except Exception:
# === 方案 B: OpenCV (流式读取,内存安全) ===
try:
cap = cv2.VideoCapture(video_path)
if cap.isOpened():
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
video_fps = cap.get(cv2.CAP_PROP_FPS)
method = "cv2"
except Exception:
pass
# 如果两者都挂了,或者读不到帧数,直接报错
if total_frames <= 0 or video_fps <= 0:
if cap: cap.release()
raise RuntimeError(f"Failed to read video (or empty): {video_path}")
# ------------------------------------------------------------------
# 2. 计算采样逻辑 (保持不变)
# ------------------------------------------------------------------
min_nframes = ceil_by_factor(FPS_MIN_FRAMES, FRAME_FACTOR)
max_frames_limit = floor_by_factor(min(FPS_MAX_FRAMES, total_frames), FRAME_FACTOR)
nframes = total_frames / video_fps * target_fps
nframes = min(min(max(nframes, min_nframes), max_frames_limit), total_frames)
nframes = floor_by_factor(nframes, FRAME_FACTOR)
nframes = int(nframes)
# 均匀采样索引
frame_idx = torch.linspace(0, total_frames - 1, nframes).round().long()
# ------------------------------------------------------------------
# 3. 提取帧数据 (内存安全的方式)
# ------------------------------------------------------------------
frames_list = []
if method == "decord":
# Decord: 批量读取 (高效)
try:
frame_tensor = vr.get_batch(frame_idx.tolist()).asnumpy()
frame_tensor = torch.tensor(frame_tensor).permute(0, 3, 1, 2) # (T, H, W, C) -> (T, C, H, W)
except Exception as e:
# 如果 decord 虽然打开了但在 get_batch 挂了 (比如坏帧),尝试切回 cv2
method = "cv2"
cap = cv2.VideoCapture(video_path) # 重新打开
if method == "cv2":
# OpenCV: 逐帧 Seek 读取 (内存占用极低)
for idx in frame_idx.tolist():
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
ret, frame = cap.read()
if ret:
# BGR -> RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames_list.append(frame)
else:
# 如果读取失败(坏帧),补上一帧或者全黑帧
if len(frames_list) > 0:
frames_list.append(frames_list[-1])
else:
frames_list.append(np.zeros((224, 224, 3), dtype=np.uint8))
cap.release()
if len(frames_list) == 0:
raise RuntimeError(f"OpenCV failed to read any frames: {video_path}")
# Stack 起来: List[H,W,C] -> (T, H, W, C)
frame_tensor = torch.tensor(np.array(frames_list)).permute(0, 3, 1, 2) # (T, C, H, W)
# ------------------------------------------------------------------
# 4. 后处理 (Resize + Uint8转换)
# ------------------------------------------------------------------
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
frame_tensor = frame_tensor.float()
height, width = frame_tensor.shape[2], frame_tensor.shape[3]
resized_height, resized_width = smart_resize(
height,
width,
factor=image_factor,
min_pixels=min_pixels,
max_pixels=max_pixels,
)
frame_tensor = transforms.functional.resize(
frame_tensor,
[resized_height, resized_width],
interpolation=InterpolationMode.BICUBIC,
antialias=True,
)
if frame_tensor.dtype != torch.uint8:
frame_tensor = frame_tensor.clamp(0, 255).to(torch.uint8)
frame_cache = {
"frame_tensor": frame_tensor,
"fps": sample_fps,
}
return frame_cache
def process_single_video(video_path, target_fps=1, image_factor=28, min_pixels=28*28*128, max_pixels=28*28*256):
os.makedirs(SAVE_DIR, exist_ok=True)
# 【修复1】文件名处理:建议保留部分目录结构或使用Hash避免重名
# 这里简单处理:如果有重名风险,建议用 hashlib 对路径做 hash 作为前缀
video_filename = os.path.basename(video_path)
save_path = os.path.join(SAVE_DIR, video_filename + '.frame_cache')
# 如果已经存在且文件正常,跳过(双重保险)
if os.path.exists(save_path):
return None # 表示已跳过
try:
frame_cache = get_video_tensor(video_path, target_fps, image_factor, min_pixels, max_pixels)
torch.save(frame_cache, save_path)
return None # 成功
except Exception as e:
# 【修复2】返回错误信息而不是直接打印,方便主进程统计
return f"Error: {video_path} -> {str(e)}"
def prepare_frame_cache(video_root = None, dataset_path=None, num_workers=8, target_fps=2, overwrite=False, image_factor=28, min_pixels=28*28*128, max_pixels=28*28*256):
video_root = VIDEO_PATH
dataset_path = None
if dataset_path is not None:
# 修改这里:直接使用json.load而不是load_jsonl
import json
with open(dataset_path, 'r', encoding='utf-8') as f:
video_data = json.load(f) # 这是JSON数组
# 提取video_path字段,并去重(同一个视频可能有多条记录)
video_paths = set() # 使用set去重
for v in video_data:
if "video_path" in v:
video_paths.add(v["video_path"])
elif "video" in v:
# 如果有video字段,可能需要拼接路径
video_path = os.path.join(video_root, v["video"])
video_paths.add(video_path)
video_list = list(video_paths)
else:
video_list = glob.glob(os.path.join(video_root, "*.mp4"))
# 【修复3】修正跳过逻辑,检查目标目录
if not overwrite:
print("Checking existing files in target directory...")
# 预先计算目标文件名集合
existing_files = set(os.listdir(SAVE_DIR)) if os.path.exists(SAVE_DIR) else set()
new_video_list = []
skipped_count = 0
duplicate_name_warning = []
seen_basenames = set()
for v in video_list:
basename = os.path.basename(v)
target_name = basename + '.frame_cache'
# 检查重名冲突
if basename in seen_basenames:
duplicate_name_warning.append(v)
seen_basenames.add(basename)
if target_name in existing_files:
skipped_count += 1
else:
new_video_list.append(v)
video_list = new_video_list
print(f"Skipped {skipped_count} existing files.")
if duplicate_name_warning:
print(f"⚠️ 警告: 发现 {len(duplicate_name_warning)} 个同名不同路径的视频!这会导致覆盖问题。")
# 打印前5个例子
for d in duplicate_name_warning[:5]:
print(f" - Duplicate basename: {d}")
print(f"Starting processing {len(video_list)} videos...")
failed_videos = []
with multiprocessing.Pool(processes=num_workers) as pool:
func = partial(process_single_video, target_fps=target_fps, image_factor=image_factor, min_pixels=min_pixels, max_pixels=max_pixels)
# 使用 tqdm 接收返回值
for result in tqdm.tqdm(pool.imap_unordered(func, video_list), total=len(video_list)):
if result is not None: # 如果 result 是字符串,说明报错了
failed_videos.append(result)
print("\n" + "="*30)
print("Processing Complete.")
if failed_videos:
print(f"❌ {len(failed_videos)} videos failed to process:")
for err in failed_videos:
print(err)
else:
print("✅ All videos processed successfully.")
if __name__ == "__main__":
import fire
fire.Fire(prepare_frame_cache)