rahul7star commited on
Commit
488a414
·
verified ·
1 Parent(s): a58261c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -0
app.py CHANGED
@@ -12,6 +12,71 @@ from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
12
  import math
13
  import os
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  # --- Model Loading ---
16
  dtype = torch.bfloat16
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -77,6 +142,11 @@ def _append_prompt(base: str, extra: str) -> str:
77
 
78
  def generate_single_view(input_images, prompt, seed, num_inference_steps, true_guidance_scale):
79
  generator = torch.Generator(device=device).manual_seed(seed)
 
 
 
 
 
80
  result = pipe(
81
  image=input_images if input_images else None,
82
  prompt=prompt,
 
12
  import math
13
  import os
14
 
15
+ import os
16
+ import spaces
17
+ import torch
18
+ from diffusers import AutoencoderKLWan, WanPipeline, WanImageToVideoPipeline, UniPCMultistepScheduler
19
+ from diffusers.utils import export_to_video
20
+ import gradio as gr
21
+ import tempfile
22
+ from huggingface_hub import hf_hub_download
23
+ import numpy as np
24
+ from PIL import Image
25
+ import random
26
+
27
+
28
+ HF_MODEL = os.environ.get("HF_UPLOAD_REPO", "rahul7star/qwen-edit-img-repo")
29
+
30
+ # --- CPU-only upload function ---
31
+ def upload_image_and_prompt_cpu(input_image, prompt_text) -> str:
32
+ from datetime import datetime
33
+ import tempfile, os, uuid, shutil
34
+ from huggingface_hub import HfApi
35
+
36
+ # Instantiate the HfApi class
37
+ api = HfApi()
38
+ print(prompt_text)
39
+
40
+ today_str = datetime.now().strftime("%Y-%m-%d")
41
+ unique_subfolder = f"Upload-Image-{uuid.uuid4().hex[:8]}"
42
+ hf_folder = f"{today_str}/{unique_subfolder}"
43
+
44
+ # Save image temporarily
45
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_img:
46
+ if isinstance(input_image, str):
47
+ shutil.copy(input_image, tmp_img.name)
48
+ else:
49
+ input_image.save(tmp_img.name, format="PNG")
50
+ tmp_img_path = tmp_img.name
51
+
52
+ # Upload image using HfApi instance
53
+ api.upload_file(
54
+ path_or_fileobj=tmp_img_path,
55
+ path_in_repo=f"{hf_folder}/input_image.png",
56
+ repo_id=HF_MODEL,
57
+ repo_type="model",
58
+ token=os.environ.get("HUGGINGFACE_HUB_TOKEN")
59
+ )
60
+
61
+ # Save prompt as summary.txt
62
+ summary_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt").name
63
+ with open(summary_file, "w", encoding="utf-8") as f:
64
+ f.write(prompt_text)
65
+
66
+ api.upload_file(
67
+ path_or_fileobj=summary_file,
68
+ path_in_repo=f"{hf_folder}/summary.txt",
69
+ repo_id=HF_MODEL,
70
+ repo_type="model",
71
+ token=os.environ.get("HUGGINGFACE_HUB_TOKEN")
72
+ )
73
+
74
+ # Cleanup
75
+ os.remove(tmp_img_path)
76
+ os.remove(summary_file)
77
+
78
+ return hf_folder
79
+
80
  # --- Model Loading ---
81
  dtype = torch.bfloat16
82
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
142
 
143
  def generate_single_view(input_images, prompt, seed, num_inference_steps, true_guidance_scale):
144
  generator = torch.Generator(device=device).manual_seed(seed)
145
+ print(prompt)
146
+ try:
147
+ upload_image_and_prompt_cpu(input_image, prompt)
148
+ except Exception as e:
149
+ print("Upload failed:", e)
150
  result = pipe(
151
  image=input_images if input_images else None,
152
  prompt=prompt,