|
|
from time_r1.utils.qwen_vl_utils_prepare import floor_by_factor, FRAME_FACTOR, smart_resize, ceil_by_factor |
|
|
import decord |
|
|
import torch |
|
|
import os |
|
|
import tqdm |
|
|
import glob |
|
|
import multiprocessing |
|
|
from torchvision import io, transforms |
|
|
from torchvision.transforms import InterpolationMode |
|
|
from functools import partial |
|
|
from time_r1.utils.io import load_jsonl |
|
|
import debugpy |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
os.environ['DECORD_EOF_RETRY_MAX'] = '20480' |
|
|
SAVE_DIR = '/data/shuimu.chen/videomarathon/downloaded_videos/activitynet_192_50_frame_cache_unit8' |
|
|
VIDEO_PATH = "/data/shuimu.chen/videomarathon/downloaded_videos/activitynet" |
|
|
FPS_MIN_FRAMES = 4 |
|
|
FPS_MAX_FRAMES = 50 |
|
|
FRAME_FACTOR = 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import cv2 |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
def get_video_tensor(video_path, target_fps=2, image_factor=28, min_pixels=28 * 28 * 4, max_pixels=28 * 28 * 128): |
|
|
""" |
|
|
内存优化版: |
|
|
1. 优先 Decord (CPU 模式) |
|
|
2. 失败回退 OpenCV (流式读取,不占内存) |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vr = None |
|
|
cap = None |
|
|
method = "unknown" |
|
|
total_frames = 0 |
|
|
video_fps = 0.0 |
|
|
|
|
|
|
|
|
try: |
|
|
vr = decord.VideoReader(video_path, ctx=decord.cpu(0), num_threads=1) |
|
|
total_frames = len(vr) |
|
|
video_fps = vr.get_avg_fps() |
|
|
method = "decord" |
|
|
except Exception: |
|
|
|
|
|
try: |
|
|
cap = cv2.VideoCapture(video_path) |
|
|
if cap.isOpened(): |
|
|
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
|
video_fps = cap.get(cv2.CAP_PROP_FPS) |
|
|
method = "cv2" |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
|
|
|
if total_frames <= 0 or video_fps <= 0: |
|
|
if cap: cap.release() |
|
|
raise RuntimeError(f"Failed to read video (or empty): {video_path}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
min_nframes = ceil_by_factor(FPS_MIN_FRAMES, FRAME_FACTOR) |
|
|
max_frames_limit = floor_by_factor(min(FPS_MAX_FRAMES, total_frames), FRAME_FACTOR) |
|
|
|
|
|
nframes = total_frames / video_fps * target_fps |
|
|
nframes = min(min(max(nframes, min_nframes), max_frames_limit), total_frames) |
|
|
nframes = floor_by_factor(nframes, FRAME_FACTOR) |
|
|
nframes = int(nframes) |
|
|
|
|
|
|
|
|
frame_idx = torch.linspace(0, total_frames - 1, nframes).round().long() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
frames_list = [] |
|
|
|
|
|
if method == "decord": |
|
|
|
|
|
try: |
|
|
frame_tensor = vr.get_batch(frame_idx.tolist()).asnumpy() |
|
|
frame_tensor = torch.tensor(frame_tensor).permute(0, 3, 1, 2) |
|
|
except Exception as e: |
|
|
|
|
|
method = "cv2" |
|
|
cap = cv2.VideoCapture(video_path) |
|
|
|
|
|
if method == "cv2": |
|
|
|
|
|
for idx in frame_idx.tolist(): |
|
|
cap.set(cv2.CAP_PROP_POS_FRAMES, idx) |
|
|
ret, frame = cap.read() |
|
|
if ret: |
|
|
|
|
|
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
|
frames_list.append(frame) |
|
|
else: |
|
|
|
|
|
if len(frames_list) > 0: |
|
|
frames_list.append(frames_list[-1]) |
|
|
else: |
|
|
frames_list.append(np.zeros((224, 224, 3), dtype=np.uint8)) |
|
|
|
|
|
cap.release() |
|
|
|
|
|
if len(frames_list) == 0: |
|
|
raise RuntimeError(f"OpenCV failed to read any frames: {video_path}") |
|
|
|
|
|
|
|
|
frame_tensor = torch.tensor(np.array(frames_list)).permute(0, 3, 1, 2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sample_fps = nframes / max(total_frames, 1e-6) * video_fps |
|
|
|
|
|
frame_tensor = frame_tensor.float() |
|
|
|
|
|
height, width = frame_tensor.shape[2], frame_tensor.shape[3] |
|
|
resized_height, resized_width = smart_resize( |
|
|
height, |
|
|
width, |
|
|
factor=image_factor, |
|
|
min_pixels=min_pixels, |
|
|
max_pixels=max_pixels, |
|
|
) |
|
|
|
|
|
frame_tensor = transforms.functional.resize( |
|
|
frame_tensor, |
|
|
[resized_height, resized_width], |
|
|
interpolation=InterpolationMode.BICUBIC, |
|
|
antialias=True, |
|
|
) |
|
|
|
|
|
if frame_tensor.dtype != torch.uint8: |
|
|
frame_tensor = frame_tensor.clamp(0, 255).to(torch.uint8) |
|
|
|
|
|
frame_cache = { |
|
|
"frame_tensor": frame_tensor, |
|
|
"fps": sample_fps, |
|
|
} |
|
|
return frame_cache |
|
|
|
|
|
|
|
|
def process_single_video(video_path, target_fps=1, image_factor=28, min_pixels=28*28*128, max_pixels=28*28*256): |
|
|
os.makedirs(SAVE_DIR, exist_ok=True) |
|
|
|
|
|
|
|
|
video_filename = os.path.basename(video_path) |
|
|
save_path = os.path.join(SAVE_DIR, video_filename + '.frame_cache') |
|
|
|
|
|
|
|
|
if os.path.exists(save_path): |
|
|
return None |
|
|
|
|
|
try: |
|
|
frame_cache = get_video_tensor(video_path, target_fps, image_factor, min_pixels, max_pixels) |
|
|
torch.save(frame_cache, save_path) |
|
|
return None |
|
|
except Exception as e: |
|
|
|
|
|
return f"Error: {video_path} -> {str(e)}" |
|
|
|
|
|
def prepare_frame_cache(video_root = None, dataset_path=None, num_workers=8, target_fps=2, overwrite=False, image_factor=28, min_pixels=28*28*128, max_pixels=28*28*256): |
|
|
video_root = VIDEO_PATH |
|
|
dataset_path = None |
|
|
if dataset_path is not None: |
|
|
|
|
|
import json |
|
|
with open(dataset_path, 'r', encoding='utf-8') as f: |
|
|
video_data = json.load(f) |
|
|
|
|
|
|
|
|
video_paths = set() |
|
|
for v in video_data: |
|
|
if "video_path" in v: |
|
|
video_paths.add(v["video_path"]) |
|
|
elif "video" in v: |
|
|
|
|
|
video_path = os.path.join(video_root, v["video"]) |
|
|
video_paths.add(video_path) |
|
|
|
|
|
video_list = list(video_paths) |
|
|
else: |
|
|
video_list = glob.glob(os.path.join(video_root, "*.mp4")) |
|
|
|
|
|
|
|
|
if not overwrite: |
|
|
print("Checking existing files in target directory...") |
|
|
|
|
|
existing_files = set(os.listdir(SAVE_DIR)) if os.path.exists(SAVE_DIR) else set() |
|
|
|
|
|
new_video_list = [] |
|
|
skipped_count = 0 |
|
|
duplicate_name_warning = [] |
|
|
|
|
|
seen_basenames = set() |
|
|
|
|
|
for v in video_list: |
|
|
basename = os.path.basename(v) |
|
|
target_name = basename + '.frame_cache' |
|
|
|
|
|
|
|
|
if basename in seen_basenames: |
|
|
duplicate_name_warning.append(v) |
|
|
seen_basenames.add(basename) |
|
|
|
|
|
if target_name in existing_files: |
|
|
skipped_count += 1 |
|
|
else: |
|
|
new_video_list.append(v) |
|
|
|
|
|
video_list = new_video_list |
|
|
print(f"Skipped {skipped_count} existing files.") |
|
|
|
|
|
if duplicate_name_warning: |
|
|
print(f"⚠️ 警告: 发现 {len(duplicate_name_warning)} 个同名不同路径的视频!这会导致覆盖问题。") |
|
|
|
|
|
for d in duplicate_name_warning[:5]: |
|
|
print(f" - Duplicate basename: {d}") |
|
|
|
|
|
print(f"Starting processing {len(video_list)} videos...") |
|
|
|
|
|
failed_videos = [] |
|
|
|
|
|
with multiprocessing.Pool(processes=num_workers) as pool: |
|
|
func = partial(process_single_video, target_fps=target_fps, image_factor=image_factor, min_pixels=min_pixels, max_pixels=max_pixels) |
|
|
|
|
|
|
|
|
for result in tqdm.tqdm(pool.imap_unordered(func, video_list), total=len(video_list)): |
|
|
if result is not None: |
|
|
failed_videos.append(result) |
|
|
|
|
|
print("\n" + "="*30) |
|
|
print("Processing Complete.") |
|
|
if failed_videos: |
|
|
print(f"❌ {len(failed_videos)} videos failed to process:") |
|
|
for err in failed_videos: |
|
|
print(err) |
|
|
else: |
|
|
print("✅ All videos processed successfully.") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
import fire |
|
|
fire.Fire(prepare_frame_cache) |
|
|
|