|
|
import os |
|
|
import json |
|
|
import yt_dlp |
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
|
|
|
JSON_DATA_PATH = "/data/shuimu.chen/videomarathon/panda70m_mc_subset_100k.json" |
|
|
SAVE_ROOT = "/data/shuimu.chen/videomarathon/100k_videos" |
|
|
MAX_WORKERS = 1 |
|
|
|
|
|
|
|
|
COOKIE_PATH = "/data/shuimu.chen/TimeSearch-R/data_prepare/cookies.txt" |
|
|
|
|
|
|
|
|
def process_video(item): |
|
|
""" |
|
|
单个视频下载任务函数 (完全不变) |
|
|
""" |
|
|
video_rel_path = item['rel_path'] |
|
|
url = item['url'] |
|
|
|
|
|
|
|
|
|
|
|
base_name = os.path.splitext(video_rel_path)[0] |
|
|
full_save_path_no_ext = os.path.join(SAVE_ROOT, base_name) |
|
|
|
|
|
|
|
|
final_file_path = full_save_path_no_ext + ".mp4" |
|
|
|
|
|
|
|
|
if os.path.exists(final_file_path): |
|
|
return "skipped" |
|
|
|
|
|
|
|
|
os.makedirs(os.path.dirname(final_file_path), exist_ok=True) |
|
|
|
|
|
|
|
|
ydl_opts = { |
|
|
'format': 'bestvideo[height<=480]+bestaudio/best[height<=480]', |
|
|
'sleep_interval': 5, |
|
|
'max_sleep_interval': 15, |
|
|
'merge_output_format': 'mp4', |
|
|
'sleep_interval_requests': 1, |
|
|
|
|
|
|
|
|
|
|
|
'outtmpl': f"{full_save_path_no_ext}.%(ext)s", |
|
|
'js_runtimes': ['node:/data/shuimu.chen/TimeSearch-R/data_prepare/node-v24.12.0-linux-x64/bin/node'], |
|
|
|
|
|
'quiet': False, |
|
|
'no_warnings': False, |
|
|
'ignoreerrors': True, |
|
|
'cookiefile': COOKIE_PATH, |
|
|
} |
|
|
|
|
|
try: |
|
|
with yt_dlp.YoutubeDL(ydl_opts) as ydl: |
|
|
ydl.download([url]) |
|
|
|
|
|
|
|
|
if os.path.exists(final_file_path): |
|
|
return "success" |
|
|
else: |
|
|
return "failed" |
|
|
except Exception as e: |
|
|
return "error" |
|
|
|
|
|
def main(): |
|
|
|
|
|
if not os.path.exists(COOKIE_PATH): |
|
|
print(f"❌ 错误: 找不到 Cookie 文件: {COOKIE_PATH}") |
|
|
print("请上传 cookies.txt 后再重试!") |
|
|
return |
|
|
|
|
|
|
|
|
print(f"正在加载JSON数据文件: {JSON_DATA_PATH} ...") |
|
|
|
|
|
|
|
|
with open(JSON_DATA_PATH, 'r', encoding='utf-8') as f: |
|
|
dataset = json.load(f) |
|
|
|
|
|
print(f"已加载 {len(dataset)} 条数据") |
|
|
|
|
|
unique_videos = {} |
|
|
print("正在从JSON数据中筛选唯一视频任务 (Panda-70M + Multiple Choice) ...") |
|
|
|
|
|
|
|
|
for item in tqdm(dataset, desc="筛选唯一视频"): |
|
|
|
|
|
video_rel_path = item.get('video') |
|
|
url = item.get('URL') |
|
|
|
|
|
if video_rel_path and url: |
|
|
if video_rel_path not in unique_videos: |
|
|
unique_videos[video_rel_path] = url |
|
|
|
|
|
tasks = [{'rel_path': k, 'url': v} for k, v in unique_videos.items()] |
|
|
print(f"筛选完成!共 {len(tasks)} 个唯一视频任务。") |
|
|
print(f"注: 原JSON中有 {len(dataset)} 条问答对,涉及 {len(tasks)} 个唯一视频。") |
|
|
|
|
|
|
|
|
print(f"\n开始并发下载 (Workers={MAX_WORKERS}, Cookie已启用)...") |
|
|
|
|
|
results = { |
|
|
"success": 0, |
|
|
"skipped": 0, |
|
|
"failed": 0, |
|
|
"error": 0 |
|
|
} |
|
|
|
|
|
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: |
|
|
future_to_video = {executor.submit(process_video, task): task for task in tasks} |
|
|
|
|
|
for future in tqdm(as_completed(future_to_video), total=len(tasks), desc="下载进度"): |
|
|
status = future.result() |
|
|
results[status] += 1 |
|
|
|
|
|
|
|
|
print("\n" + "=" * 40) |
|
|
print("处理完毕") |
|
|
print("=" * 40) |
|
|
print(f"JSON中问答对总数: {len(dataset)}") |
|
|
print(f"唯一视频任务数: {len(tasks)}") |
|
|
print(f"跳过(已存在): {results['skipped']}") |
|
|
print(f"下载成功: {results['success']}") |
|
|
print(f"下载失败: {results['failed'] + results['error']}") |
|
|
print(f"数据保存在: {os.path.abspath(SAVE_ROOT)}") |
|
|
|
|
|
|
|
|
if results['success'] > 0: |
|
|
coverage_rate = results['success'] / len(tasks) * 100 |
|
|
print(f"视频覆盖率: {coverage_rate:.1f}%") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |