| | import gc |
| | import hashlib |
| | import os |
| | from glob import glob |
| | from pathlib import Path |
| |
|
| | import librosa |
| | import torch |
| | from diskcache import Cache |
| | from qdrant_client import QdrantClient |
| | from qdrant_client.http import models |
| | from tqdm import tqdm |
| | from transformers import ClapModel, ClapProcessor |
| |
|
| | from s3_utils import s3_auth, upload_file_to_bucket |
| | from dotenv import load_dotenv |
| | load_dotenv() |
| |
|
| | |
| | CACHE_FOLDER = '/home/nahia/audio' |
| | KAGGLE_DB_PATH = '/home/nahia/Documents/audio/actor/Actor_01' |
| | AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] |
| | AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] |
| | S3_BUCKET = "synthia-research" |
| | S3_FOLDER = "huggingface_spaces_demo" |
| | AWS_REGION = "eu-west-3" |
| | from dotenv import load_dotenv |
| | import os |
| |
|
| | |
| | load_dotenv() |
| |
|
| | |
| | QDRANT_URL = os.getenv('QDRANT_URL') |
| | QDRANT_KEY = os.getenv('QDRANT_KEY') |
| |
|
| |
|
| | s3 = s3_auth(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION) |
| |
|
| |
|
| | |
| | def get_md5(fpath): |
| | with open(fpath, "rb") as f: |
| | file_hash = hashlib.md5() |
| | while chunk := f.read(8192): |
| | file_hash.update(chunk) |
| | return file_hash.hexdigest() |
| |
|
| |
|
| | def get_audio_embedding(model, audio_file, cache): |
| | |
| | file_key = f"{model.config._name_or_path}" + get_md5(audio_file) |
| | if file_key in cache: |
| | |
| | embedding = cache[file_key] |
| | else: |
| | |
| | y, sr = librosa.load(audio_file, sr=48000) |
| | inputs = processor(audios=y, sampling_rate=sr, return_tensors="pt") |
| | embedding = model.get_audio_features(**inputs)[0] |
| | gc.collect() |
| | torch.cuda.empty_cache() |
| | cache[file_key] = embedding |
| | return embedding |
| |
|
| |
|
| |
|
| | |
| | |
| | print("[INFO] Loading the model...") |
| | model_name = "laion/larger_clap_general" |
| | model = ClapModel.from_pretrained(model_name) |
| | processor = ClapProcessor.from_pretrained(model_name) |
| |
|
| | |
| | os.makedirs(CACHE_FOLDER, exist_ok=True) |
| | cache = Cache(CACHE_FOLDER) |
| |
|
| | |
| | client = QdrantClient(QDRANT_URL,api_key=QDRANT_KEY) |
| | print("[INFO] Client created...") |
| |
|
| | print("[INFO] Creating qdrant data collection...") |
| | if not client.collection_exists("demo_spaces_db"): |
| | client.create_collection( |
| | collection_name="demo_spaces_db", |
| | vectors_config=models.VectorParams( |
| | size=model.config.projection_dim, |
| | distance=models.Distance.COSINE |
| | ), |
| | ) |
| |
|
| | |
| | audio_files = [p for p in glob(os.path.join(KAGGLE_DB_PATH, '*/*.wav'))] |
| | chunk_size, idx = 1, 0 |
| | total_chunks = int(len(audio_files) / chunk_size) |
| |
|
| | |
| | print("Uploading on DB + S3") |
| | for i in tqdm(range(0, len(audio_files), chunk_size), |
| | desc="[INFO] Uploading data records to data collection..."): |
| | chunk = audio_files[i:i + chunk_size] |
| | records = [] |
| | for audio_file in chunk: |
| | embedding = get_audio_embedding(model, audio_file, cache) |
| | file_obj = open(audio_file, 'rb') |
| | s3key = f'{S3_FOLDER}/{Path(audio_file).name}' |
| | upload_file_to_bucket(s3, file_obj, S3_BUCKET, s3key) |
| | records.append( |
| | models.PointStruct( |
| | id=idx, vector=embedding, |
| | payload={ |
| | "audio_path": audio_file, |
| | "audio_s3url": f"https://{S3_BUCKET}.s3.amazonaws.com/{s3key}", |
| | "style": audio_file.split('/')[-1]} |
| | ) |
| | ) |
| | f"Uploaded s3 file : {idx}" |
| | idx += 1 |
| | client.upload_points( |
| | collection_name="demo_spaces_db", |
| | points=records |
| | ) |
| | print("[INFO] Successfully uploaded data records to data collection!") |
| |
|
| |
|
| | |
| | cache.close() |