CSDDSFSFSAFSAF's picture
Add files using upload-large-folder tool
9848efb verified
import os
import json # 新增:用于读取JSON文件
import yt_dlp
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
# ================= 配置区域 =================
# 修改:直接读取你提供的JSON文件路径
JSON_DATA_PATH = "/data/shuimu.chen/videomarathon/panda70m_mc_subset_100k.json"
SAVE_ROOT = "/data/shuimu.chen/videomarathon/100k_videos"
MAX_WORKERS = 1 # 保持单线程比较稳
# 【请确认】cookies.txt 文件的绝对路径
COOKIE_PATH = "/data/shuimu.chen/TimeSearch-R/data_prepare/cookies.txt"
# ===========================================
def process_video(item):
"""
单个视频下载任务函数 (完全不变)
"""
video_rel_path = item['rel_path']
url = item['url']
# 构建保存路径
# base_name 类似于 "category/video_id"
base_name = os.path.splitext(video_rel_path)[0]
full_save_path_no_ext = os.path.join(SAVE_ROOT, base_name)
# 你的脚本用来检查的最终路径(带 .mp4)
final_file_path = full_save_path_no_ext + ".mp4"
# 检查文件是否已存在
if os.path.exists(final_file_path):
return "skipped"
# 确保目录存在
os.makedirs(os.path.dirname(final_file_path), exist_ok=True)
# yt-dlp 配置
ydl_opts = {
'format': 'bestvideo[height<=480]+bestaudio/best[height<=480]',
'sleep_interval': 5, # # 每次下载完至少睡 5 秒
'max_sleep_interval': 15, # # 最多随机睡到 30 秒
'merge_output_format': 'mp4',
'sleep_interval_requests': 1, # 甚至连请求视频信息也要睡 5 秒
# 'js_runtimes': ['node'],
# 【核心修改】这里加上了 .%(ext)s
# 意思就是:文件名 = 你的路径 + . + 扩展名(mp4)
'outtmpl': f"{full_save_path_no_ext}.%(ext)s",
'js_runtimes': ['node:/data/shuimu.chen/TimeSearch-R/data_prepare/node-v24.12.0-linux-x64/bin/node'],
'quiet': False, # 建议开启 False 以便调试,稳定后可改为 True
'no_warnings': False,
'ignoreerrors': True,
'cookiefile': COOKIE_PATH,
}
try:
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
# 再次检查文件是否生成
if os.path.exists(final_file_path):
return "success"
else:
return "failed"
except Exception as e:
return "error"
def main():
# 0. 检查 cookie
if not os.path.exists(COOKIE_PATH):
print(f"❌ 错误: 找不到 Cookie 文件: {COOKIE_PATH}")
print("请上传 cookies.txt 后再重试!")
return
# 1. 加载和筛选数据
print(f"正在加载JSON数据文件: {JSON_DATA_PATH} ...")
# 修改:直接加载你提供的JSON文件
with open(JSON_DATA_PATH, 'r', encoding='utf-8') as f:
dataset = json.load(f)
print(f"已加载 {len(dataset)} 条数据")
unique_videos = {}
print("正在从JSON数据中筛选唯一视频任务 (Panda-70M + Multiple Choice) ...")
# 修改:简化筛选逻辑,因为你的JSON已经是MC样本了
for item in tqdm(dataset, desc="筛选唯一视频"):
# 你提供的JSON里都是Panda-70M的MC样本,这里可以简化判断
video_rel_path = item.get('video')
url = item.get('URL')
if video_rel_path and url:
if video_rel_path not in unique_videos:
unique_videos[video_rel_path] = url
tasks = [{'rel_path': k, 'url': v} for k, v in unique_videos.items()]
print(f"筛选完成!共 {len(tasks)} 个唯一视频任务。")
print(f"注: 原JSON中有 {len(dataset)} 条问答对,涉及 {len(tasks)} 个唯一视频。")
# 2. 多线程下载
print(f"\n开始并发下载 (Workers={MAX_WORKERS}, Cookie已启用)...")
results = {
"success": 0,
"skipped": 0,
"failed": 0,
"error": 0
}
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
future_to_video = {executor.submit(process_video, task): task for task in tasks}
for future in tqdm(as_completed(future_to_video), total=len(tasks), desc="下载进度"):
status = future.result()
results[status] += 1
# 3. 总结
print("\n" + "=" * 40)
print("处理完毕")
print("=" * 40)
print(f"JSON中问答对总数: {len(dataset)}")
print(f"唯一视频任务数: {len(tasks)}")
print(f"跳过(已存在): {results['skipped']}")
print(f"下载成功: {results['success']}")
print(f"下载失败: {results['failed'] + results['error']}")
print(f"数据保存在: {os.path.abspath(SAVE_ROOT)}")
# 额外统计信息
if results['success'] > 0:
coverage_rate = results['success'] / len(tasks) * 100
print(f"视频覆盖率: {coverage_rate:.1f}%")
if __name__ == "__main__":
main()