Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,6 +9,7 @@ import numpy as np
|
|
| 9 |
from diffusers import BriaFiboPipeline
|
| 10 |
from diffusers.modular_pipelines import ModularPipeline
|
| 11 |
import requests
|
|
|
|
| 12 |
|
| 13 |
MAX_SEED = np.iinfo(np.int32).max
|
| 14 |
dtype = torch.bfloat16
|
|
@@ -44,25 +45,33 @@ def get_default_negative_prompt(existing_json: dict) -> str:
|
|
| 44 |
|
| 45 |
def generate_json_prompt(
|
| 46 |
prompt,
|
|
|
|
| 47 |
prompt_inspire_image=None,
|
| 48 |
seed=42
|
| 49 |
):
|
| 50 |
-
|
| 51 |
-
|
| 52 |
api_key = os.environ.get("BRIA_API_TOKEN")
|
| 53 |
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
"seed": seed,
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
response = requests.post(url, json=payload, headers=headers)
|
|
|
|
| 64 |
data = response.json()
|
| 65 |
-
|
|
|
|
| 66 |
|
| 67 |
@spaces.GPU(duration=300)
|
| 68 |
def generate_image(
|
|
@@ -86,7 +95,7 @@ def generate_image(
|
|
| 86 |
if not prompt_in_json or prompt_in_json == "":
|
| 87 |
if prompt_inspire_image is not None:
|
| 88 |
#output = vlm_pipe(image=prompt_inspire_image, prompt="")
|
| 89 |
-
json_prompt = generate_json_prompt(prompt=prompt, seed=seed)
|
| 90 |
else:
|
| 91 |
# output = vlm_pipe(prompt=prompt)
|
| 92 |
json_prompt = generate_json_prompt(prompt=prompt, seed=seed)
|
|
@@ -147,12 +156,14 @@ def refine_prompt(
|
|
| 147 |
if isinstance(refine_json, (dict, list))
|
| 148 |
else str(refine_json)
|
| 149 |
)
|
| 150 |
-
output = vlm_pipe(json_prompt=json_prompt_str, prompt=refine_instruction)
|
| 151 |
-
json_prompt = output.values["json_prompt"]
|
|
|
|
| 152 |
|
| 153 |
if negative_prompt:
|
| 154 |
-
neg_output = vlm_pipe(prompt=negative_prompt)
|
| 155 |
-
neg_json_prompt = neg_output.values["json_prompt"]
|
|
|
|
| 156 |
else:
|
| 157 |
neg_json_prompt = get_default_negative_prompt(json.loads(json_prompt))
|
| 158 |
|
|
|
|
| 9 |
from diffusers import BriaFiboPipeline
|
| 10 |
from diffusers.modular_pipelines import ModularPipeline
|
| 11 |
import requests
|
| 12 |
+
import io
|
| 13 |
|
| 14 |
MAX_SEED = np.iinfo(np.int32).max
|
| 15 |
dtype = torch.bfloat16
|
|
|
|
| 45 |
|
| 46 |
def generate_json_prompt(
|
| 47 |
prompt,
|
| 48 |
+
json_prompt,
|
| 49 |
prompt_inspire_image=None,
|
| 50 |
seed=42
|
| 51 |
):
|
| 52 |
+
|
|
|
|
| 53 |
api_key = os.environ.get("BRIA_API_TOKEN")
|
| 54 |
|
| 55 |
+
url = "https://engine.prod.bria-api.com/v2/structured_prompt/generate/pro"
|
| 56 |
+
|
| 57 |
+
payload = {"seed": seed, "sync": True}
|
| 58 |
+
if json_prompt:
|
| 59 |
+
payload["structured_prompt"] = structured_prompt
|
| 60 |
+
if prompt:
|
| 61 |
+
payload["prompt"] = prompt
|
| 62 |
+
if prompt_inspire_image:
|
| 63 |
+
buffered = io.BytesIO()
|
| 64 |
+
prompt_inspire_image.save(buffered, format="PNG")
|
| 65 |
+
image_bytes = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
| 66 |
+
payload["images"] = [image_bytes]
|
| 67 |
+
|
| 68 |
+
headers = {"Content-Type": "application/json", "api_token": api_key}
|
| 69 |
+
|
| 70 |
response = requests.post(url, json=payload, headers=headers)
|
| 71 |
+
|
| 72 |
data = response.json()
|
| 73 |
+
|
| 74 |
+
return data["result"]["structured_prompt"]
|
| 75 |
|
| 76 |
@spaces.GPU(duration=300)
|
| 77 |
def generate_image(
|
|
|
|
| 95 |
if not prompt_in_json or prompt_in_json == "":
|
| 96 |
if prompt_inspire_image is not None:
|
| 97 |
#output = vlm_pipe(image=prompt_inspire_image, prompt="")
|
| 98 |
+
json_prompt = generate_json_prompt(prompt=prompt, prompt_inspire_image=prompt_inspire_image, seed=seed)
|
| 99 |
else:
|
| 100 |
# output = vlm_pipe(prompt=prompt)
|
| 101 |
json_prompt = generate_json_prompt(prompt=prompt, seed=seed)
|
|
|
|
| 156 |
if isinstance(refine_json, (dict, list))
|
| 157 |
else str(refine_json)
|
| 158 |
)
|
| 159 |
+
# output = vlm_pipe(json_prompt=json_prompt_str, prompt=refine_instruction)
|
| 160 |
+
# json_prompt = output.values["json_prompt"]
|
| 161 |
+
json_prompt = generate_json_prompt(prompt=refine_instruction, json_prompt=json_prompt_str, seed=seed)
|
| 162 |
|
| 163 |
if negative_prompt:
|
| 164 |
+
# neg_output = vlm_pipe(prompt=negative_prompt)
|
| 165 |
+
# neg_json_prompt = neg_output.values["json_prompt"]
|
| 166 |
+
neg_json_prompt = generate_json_prompt(prompt=negative_prompt, seed=seed)
|
| 167 |
else:
|
| 168 |
neg_json_prompt = get_default_negative_prompt(json.loads(json_prompt))
|
| 169 |
|