Spaces:
Runtime error
Runtime error
File size: 12,014 Bytes
b1f3166 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 |
#Error communicating with the chatbot API: Object of type Textbox is not JSON serializable
import gradio as gr
import requests
from PIL import Image
import os
import io
from transformers import BlipProcessor, BlipForConditionalGeneration
import time
from gradio_client import Client
token = os.getenv('HF_TOKEN')
blipper="Salesforce/blip-image-captioning-large"
chatter="K00B404/transcript_image_generator"
# Set your API endpoint and authorization details
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
headers = {"Authorization": f"Bearer {token}"} # Replace with your actual token
timeout = 60 # seconds
# Load BLIP model for image captioning
processor = BlipProcessor.from_pretrained(blipper)
model = BlipForConditionalGeneration.from_pretrained(blipper)
# Initialize the API client for the chatbot
chatbot_client = Client(chatter)
def caption_to_persona(caption):
"""Convert a basic image caption into a character persona prompt"""
persona = f"""You are {caption.replace('arafed image of ','a ').replace('arafed ','a ')}
Your personality, speech patterns, knowledge, and behavior should reflect this description.
When responding to users:
1. Stay in character at all times
2. Use speech patterns and vocabulary that would be natural for your character
3. Reference experiences, emotions, and perspectives that align with your character's background
4. Maintain a consistent personality throughout the conversation
Additional context: Your responses should vary in length based on what would be natural for your character.
Some characters might be terse while others might be more verbose."""
return persona
def helper_llm(message, system_prompt, max_tokens=256, temperature=0.5, top_p=0.95):
"""Function to interact with the chatbot API using the generated persona"""
try:
# Call the API with the current message and system prompt (persona)
response = chatbot_client.predict(
message=message,
system_message=system_prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
api_name="/chat"
)
return response
except Exception as e:
return f"Error communicating with the chatbot API: {str(e)}"
def generate_persona(img, min_len, max_len, persona_detail_level):
# Process the image
raw_image = Image.open(img).convert('RGB')
# Resize image to 512x512
raw_image = raw_image.resize((256, 256), Image.Resampling.LANCZOS)
inputs = processor(raw_image, return_tensors="pt")
# Generate caption with specified length constraints
start = time.time()
out = model.generate(**inputs, min_length=min_len, max_length=max_len)
caption = processor.decode(out[0], skip_special_tokens=True)
# Enhance the caption based on detail level
if persona_detail_level == "Basic":
enhanced_caption = caption
elif persona_detail_level == "Detailed":
enhanced_caption = f"{caption} You have a distinct personality with unique mannerisms and speech patterns."
else: # Comprehensive
enhanced_caption = f"{caption} You have a complex backstory, rich emotional depth, unique perspectives, and distinctive speech patterns that set you apart."
# Generate persona from caption
persona = caption_to_persona(enhanced_caption)
# Calculate processing time
end = time.time()
total_time = f"Processing time: {end - start:.2f} seconds"
# dramaturg to mae a solid role for a actor from pragmatic description
system_prompt="You are a Expert Dramaturg and your task is to use the input persona information and write a 'Role' description as compact instuctions for the actor"
persona = helper_llm(persona, system_prompt=system_prompt)
return caption, persona, total_time
def chat_with_persona(message, history, system_message, max_tokens, temperature, top_p):
"""Function to interact with the chatbot API using the generated persona"""
try:
# Call the API with the current message and system prompt (persona)
response = chatbot_client.predict(
message=message,
system_message=system_message,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
api_name="/chat"
)
return response
except Exception as e:
return f"Error communicating with the chatbot API: {str(e)}"
def generate_flux_image(final_prompt, is_negative, steps, cfg_scale, seed, strength):
"""
Generate an image using the FLUX model via Hugging Face's inference API.
The function sends a POST request with the given payload and returns the image,
along with the seed and prompt used.
"""
payload = {
"inputs": final_prompt,
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed,
"strength": strength
}
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
if response.status_code != 200:
print(f"Error: Failed to get image. Response status: {response.status_code}")
print(f"Response content: {response.text}")
if response.status_code == 503:
raise gr.Error(f"{response.status_code} : The model is being loaded")
raise gr.Error(f"{response.status_code}")
try:
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
# Optionally save the image to a file (filename based on seed)
output_path = f"./output_{seed}.png"
image.save(output_path)
print(f'\033[1mGeneration completed!\033[0m (Prompt: {final_prompt})')
return output_path, str(seed), final_prompt
except Exception as e:
print(f"Error when trying to open the image: {e}")
return None, None, None
# Create Gradio interface with tabs
with gr.Blocks(title="Image Character Persona Generator") as iface:
# Store the generated persona in a state variable to share between tabs
persona_state = gr.State("")
with gr.Tabs():
# First tab: Persona Generator
with gr.TabItem("Generate Persona"):
gr.Markdown("# Image Character Persona Generator")
gr.Markdown("Upload an image containing a character to generate an LLM persona based on that character.")
with gr.Row():
with gr.Column():
input_image = gr.Image(type='filepath', label='Character Image')
min_length = gr.Slider(label='Minimum Description Length', minimum=10, maximum=500, value=50, step=5)
max_length = gr.Slider(label='Maximum Description Length', minimum=50, maximum=1000, value=200, step=10)
detail_level = gr.Radio(["Basic", "Detailed", "Comprehensive"], label="Persona Detail Level", value="Comprehensive")
submit_btn = gr.Button("Generate Character Persona")
with gr.Column():
caption_output = gr.Textbox(label='Character Description (Base Caption)')
persona_output = gr.Textbox(label='LLM Character Persona Prompt', lines=10)
time_output = gr.Textbox(label='Processing Information')
gr.Markdown("""
## How to use this tool
1. Upload an image containing a character (real or fictional)
2. Adjust the sliders to control description length
3. Select detail level for the persona
4. Click "Generate Character Persona"
5. Switch to the "Test Persona" tab to chat with your character
6. create similar images inspired by the 'role'
""")
# Second tab: Test Character Chat
with gr.TabItem("Test Persona"):
gr.Markdown("# Test Your Character Persona")
gr.Markdown("Chat with an AI using your generated character persona to see how it behaves.")
with gr.Row():
with gr.Column():
system_prompt = gr.Textbox(label="Character Persona (System Prompt)", lines=8)
with gr.Accordion("Advanced Settings", open=False):
max_tokens = gr.Slider(label="Max Tokens", minimum=50, maximum=2048, value=512, step=1)
temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.5, value=0.7, step=0.1)
top_p = gr.Slider(label="Top P", minimum=0.1, maximum=1.0, value=0.95, step=0.05)
with gr.Column():
chatbot = gr.Chatbot(label="Conversation with Character")
msg = gr.Textbox(label="Your message")
clear_btn = gr.Button("Clear Conversation")
# Handle sending messages in the chat
def respond(message, chat_history, system_message, max_tokens, temperature, top_p):
if not message.strip():
return "", chat_history
# Add user message to history
chat_history.append((message, ""))
# Get response from API
bot_response = chat_with_persona(message, chat_history, system_message, max_tokens, temperature, top_p)
# Update the last response in history
chat_history[-1] = (message, bot_response)
return "", chat_history
# Clear chat history
def clear_chat():
return []
# Connect message input to chat response
msg.submit(respond,
[msg, chatbot, system_prompt, max_tokens, temperature, top_p],
[msg, chatbot])
clear_btn.click(clear_chat, outputs=chatbot)
# New Tab 3: Flux Image Generation
with gr.Tab("Flux Image Generation"):
gr.Markdown("### Flux Image Generation")
final_prompt = gr.Textbox(label="Prompt", lines=2, placeholder="Enter your prompt for Flux...")
is_negative = gr.Checkbox(label="Use Negative Prompt", value=False)
steps = gr.Slider(minimum=10, maximum=100, step=1, value=50, label="Steps")
cfg_scale = gr.Slider(minimum=1, maximum=20, step=1, value=7, label="CFG Scale")
seed = gr.Number(value=42, label="Seed")
strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.8, label="Strength")
generate_button = gr.Button("Generate Flux Image")
output_image = gr.Image(label="Generated Image")
output_seed = gr.Textbox(label="Seed Used")
output_prompt = gr.Textbox(label="Prompt Used")
generate_button.click(
fn=generate_flux_image,
inputs=[final_prompt, is_negative, steps, cfg_scale, seed, strength],
outputs=[output_image, output_seed, output_prompt]
)
# Function to update system prompt in Test tab when persona is generated
def update_persona_state(caption, persona, time_output):
return persona, persona
# Connect the persona generator to update the system prompt
submit_btn.click(fn=generate_persona,
inputs=[input_image, min_length, max_length, detail_level],
outputs=[caption_output, persona_output, time_output])
# Update the system prompt in Test tab when persona is generated
submit_btn.click(fn=update_persona_state,
inputs=[caption_output, persona_output, time_output],
outputs=[persona_state, system_prompt])
# Launch the interface
iface.launch() |