Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -202,32 +202,75 @@ def install_flash_attn():
|
|
| 202 |
def initialize_system():
|
| 203 |
optimize_gpu_settings()
|
| 204 |
|
| 205 |
-
|
| 206 |
-
|
|
|
|
|
|
|
|
|
|
| 207 |
|
| 208 |
-
|
|
|
|
|
|
|
| 209 |
|
| 210 |
-
from huggingface_hub import snapshot_download
|
| 211 |
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
repo_id="m-a-p/xcodec_mini_infer",
|
| 219 |
-
local_dir=
|
| 220 |
-
|
| 221 |
-
)
|
| 222 |
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
raise
|
| 232 |
|
| 233 |
@lru_cache(maxsize=100)
|
|
@@ -305,21 +348,22 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
| 305 |
os.makedirs(output_dir, exist_ok=True)
|
| 306 |
empty_output_folder(output_dir)
|
| 307 |
|
| 308 |
-
# μμ λ command - μ§μλμ§ μλ μΈμ μ κ±°
|
| 309 |
command = [
|
| 310 |
-
"python", "infer.py",
|
| 311 |
"--stage1_model", model_path,
|
| 312 |
"--stage2_model", "m-a-p/YuE-s2-1B-general",
|
| 313 |
"--genre_txt", genre_txt_path,
|
| 314 |
"--lyrics_txt", lyrics_txt_path,
|
| 315 |
"--run_n_segments", str(actual_num_segments),
|
| 316 |
"--stage2_batch_size", "16",
|
| 317 |
-
"--output_dir",
|
| 318 |
"--cuda_idx", "0",
|
| 319 |
"--max_new_tokens", str(actual_max_tokens),
|
| 320 |
-
"--disable_offload_model"
|
| 321 |
]
|
| 322 |
|
|
|
|
|
|
|
| 323 |
env = os.environ.copy()
|
| 324 |
if torch.cuda.is_available():
|
| 325 |
env.update({
|
|
@@ -496,7 +540,7 @@ def main():
|
|
| 496 |
with gr.Row(elem_id="header"):
|
| 497 |
gr.Markdown(
|
| 498 |
"""
|
| 499 |
-
# π΅ AI Song Creator
|
| 500 |
### Transform Your Lyrics into Complete Songs with Music
|
| 501 |
Create professional songs from your lyrics in multiple languages
|
| 502 |
"""
|
|
|
|
| 202 |
def initialize_system():
|
| 203 |
optimize_gpu_settings()
|
| 204 |
|
| 205 |
+
try:
|
| 206 |
+
# κΈ°λ³Έ λλ ν 리 ꡬ쑰 μμ±
|
| 207 |
+
base_dir = os.path.abspath("./inference")
|
| 208 |
+
os.makedirs(base_dir, exist_ok=True)
|
| 209 |
+
os.makedirs(os.path.join(base_dir, "models"), exist_ok=True)
|
| 210 |
|
| 211 |
+
# μμ
λλ ν 리 λ³κ²½
|
| 212 |
+
os.chdir(base_dir)
|
| 213 |
+
logging.info(f"Working directory changed to: {os.getcwd()}")
|
| 214 |
|
| 215 |
+
from huggingface_hub import snapshot_download, hf_hub_download
|
| 216 |
|
| 217 |
+
# xcodec_mini_infer λ€μ΄λ‘λ
|
| 218 |
+
xcodec_dir = "xcodec_mini_infer"
|
| 219 |
+
os.makedirs(xcodec_dir, exist_ok=True)
|
| 220 |
+
|
| 221 |
+
# νμ νμΌ λ€μ΄λ‘λ
|
| 222 |
+
snapshot_download(
|
| 223 |
repo_id="m-a-p/xcodec_mini_infer",
|
| 224 |
+
local_dir=xcodec_dir,
|
| 225 |
+
force_download=True
|
| 226 |
+
)
|
| 227 |
|
| 228 |
+
# infer.py νμΌ λ€μ΄λ‘λ
|
| 229 |
+
infer_script = hf_hub_download(
|
| 230 |
+
repo_id="m-a-p/xcodec_mini_infer",
|
| 231 |
+
filename="infer.py",
|
| 232 |
+
force_download=True
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
# infer.py νμΌμ νμ¬ λλ ν λ¦¬λ‘ λ³΅μ¬
|
| 236 |
+
shutil.copy2(infer_script, "infer.py")
|
| 237 |
+
|
| 238 |
+
# YuE λͺ¨λΈλ€ λ€μ΄λ‘λ
|
| 239 |
+
models = [
|
| 240 |
+
"m-a-p/YuE-s1-7B-anneal-jp-kr-cot",
|
| 241 |
+
"m-a-p/YuE-s1-7B-anneal-en-cot",
|
| 242 |
+
"m-a-p/YuE-s1-7B-anneal-zh-cot",
|
| 243 |
+
"m-a-p/YuE-s2-1B-general"
|
| 244 |
+
]
|
| 245 |
+
|
| 246 |
+
for model in models:
|
| 247 |
+
model_name = model.split('/')[-1]
|
| 248 |
+
model_path = os.path.join("models", model_name)
|
| 249 |
+
snapshot_download(
|
| 250 |
+
repo_id=model,
|
| 251 |
+
local_dir=model_path,
|
| 252 |
+
force_download=True
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
# νμν νμΌλ€ μ‘΄μ¬ νμΈ
|
| 256 |
+
required_files = [
|
| 257 |
+
"infer.py",
|
| 258 |
+
os.path.join(xcodec_dir, "config.json"),
|
| 259 |
+
os.path.join(xcodec_dir, "vocal_decoder.pth"),
|
| 260 |
+
os.path.join(xcodec_dir, "inst_decoder.pth")
|
| 261 |
+
]
|
| 262 |
+
|
| 263 |
+
for file_path in required_files:
|
| 264 |
+
if not os.path.exists(file_path):
|
| 265 |
+
raise FileNotFoundError(f"Required file not found: {file_path}")
|
| 266 |
+
else:
|
| 267 |
+
file_size = os.path.getsize(file_path)
|
| 268 |
+
logging.info(f"Verified {file_path}: {file_size} bytes")
|
| 269 |
+
|
| 270 |
+
logging.info("System initialization completed successfully")
|
| 271 |
+
|
| 272 |
+
except Exception as e:
|
| 273 |
+
logging.error(f"Initialization error: {e}")
|
| 274 |
raise
|
| 275 |
|
| 276 |
@lru_cache(maxsize=100)
|
|
|
|
| 348 |
os.makedirs(output_dir, exist_ok=True)
|
| 349 |
empty_output_folder(output_dir)
|
| 350 |
|
|
|
|
| 351 |
command = [
|
| 352 |
+
"python", "./infer.py", # μλ κ²½λ‘λ‘ μμ
|
| 353 |
"--stage1_model", model_path,
|
| 354 |
"--stage2_model", "m-a-p/YuE-s2-1B-general",
|
| 355 |
"--genre_txt", genre_txt_path,
|
| 356 |
"--lyrics_txt", lyrics_txt_path,
|
| 357 |
"--run_n_segments", str(actual_num_segments),
|
| 358 |
"--stage2_batch_size", "16",
|
| 359 |
+
"--output_dir", "./output",
|
| 360 |
"--cuda_idx", "0",
|
| 361 |
"--max_new_tokens", str(actual_max_tokens),
|
| 362 |
+
"--disable_offload_model"
|
| 363 |
]
|
| 364 |
|
| 365 |
+
|
| 366 |
+
|
| 367 |
env = os.environ.copy()
|
| 368 |
if torch.cuda.is_available():
|
| 369 |
env.update({
|
|
|
|
| 540 |
with gr.Row(elem_id="header"):
|
| 541 |
gr.Markdown(
|
| 542 |
"""
|
| 543 |
+
# π΅ AI Song Creator 'Open SUNO'
|
| 544 |
### Transform Your Lyrics into Complete Songs with Music
|
| 545 |
Create professional songs from your lyrics in multiple languages
|
| 546 |
"""
|