Spaces:
Runtime error
Runtime error
Update handle text and generate
Browse files
app.py
CHANGED
|
@@ -91,6 +91,16 @@ def get_chat_response(chatbot, task_history):
|
|
| 91 |
|
| 92 |
def handle_text_input(history, task_history, text):
|
| 93 |
"""Handle text input from the user."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
task_text = text
|
| 95 |
if len(text) >= 2 and text[-1] in PUNCTUATION and text[-2] not in PUNCTUATION:
|
| 96 |
task_text = text[:-1]
|
|
@@ -113,22 +123,33 @@ def clear_history(task_history):
|
|
| 113 |
task_history.clear()
|
| 114 |
return []
|
| 115 |
|
| 116 |
-
def handle_regeneration(chatbot, task_history):
|
| 117 |
"""Handle the regeneration of the last response."""
|
| 118 |
print("Regenerate clicked")
|
| 119 |
print("Before:", task_history, chatbot)
|
| 120 |
if not task_history:
|
| 121 |
return chatbot
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
|
|
|
|
|
|
| 129 |
else:
|
| 130 |
-
|
| 131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
return get_chat_response(chatbot, task_history)
|
| 133 |
|
| 134 |
# Custom CSS
|
|
@@ -194,14 +215,14 @@ with gr.Blocks(css=css) as demo:
|
|
| 194 |
|
| 195 |
gr.Markdown("### Key Features:\n- **Strong Performance**: Surpasses existing LVLMs on multiple English benchmarks including Zero-shot Captioning and VQA.\n- **Multi-lingual Support**: Supports English, Chinese, and multi-lingual conversation.\n- **High Resolution**: Utilizes 448*448 resolution for fine-grained recognition and understanding.")
|
| 196 |
submit_btn.click(handle_text_input, [chatbot, task_history, query], [chatbot, task_history]).then(
|
| 197 |
-
|
| 198 |
-
|
| 199 |
|
| 200 |
|
| 201 |
|
| 202 |
submit_btn.click(clear_input, [], [query])
|
| 203 |
clear_btn.click(clear_history, [task_history], [chatbot], show_progress=True)
|
| 204 |
-
regen_btn.click(handle_regeneration, [chatbot, task_history], [chatbot], show_progress=True)
|
| 205 |
upload_btn.upload(handle_file_upload, [chatbot, task_history, upload_btn], [chatbot, task_history], show_progress=True)
|
| 206 |
|
| 207 |
|
|
|
|
| 91 |
|
| 92 |
def handle_text_input(history, task_history, text):
|
| 93 |
"""Handle text input from the user."""
|
| 94 |
+
# Überprüfen, ob das Eingabefeld leer ist
|
| 95 |
+
if not text:
|
| 96 |
+
# Wenn das Eingabefeld leer ist, senden Sie eine vordefinierte Anfrage
|
| 97 |
+
text = "Describe the image for me..."
|
| 98 |
+
# Aktualisieren Sie das Chat- und Task-Verlauf mit der vordefinierten Anfrage
|
| 99 |
+
history = history + [(format_text(text), None)]
|
| 100 |
+
task_history = task_history + [(text, None)]
|
| 101 |
+
# Rufen Sie get_chat_response auf, um eine Antwort zu generieren
|
| 102 |
+
return get_chat_response(history, task_history)
|
| 103 |
+
|
| 104 |
task_text = text
|
| 105 |
if len(text) >= 2 and text[-1] in PUNCTUATION and text[-2] not in PUNCTUATION:
|
| 106 |
task_text = text[:-1]
|
|
|
|
| 123 |
task_history.clear()
|
| 124 |
return []
|
| 125 |
|
| 126 |
+
def handle_regeneration(chatbot, task_history, input_field):
|
| 127 |
"""Handle the regeneration of the last response."""
|
| 128 |
print("Regenerate clicked")
|
| 129 |
print("Before:", task_history, chatbot)
|
| 130 |
if not task_history:
|
| 131 |
return chatbot
|
| 132 |
+
|
| 133 |
+
# Überprüfen, ob das Eingabefeld leer ist
|
| 134 |
+
if not input_field.value:
|
| 135 |
+
# Wenn das Eingabefeld leer ist, senden Sie eine vordefinierte Anfrage
|
| 136 |
+
predefined_query = "Describe this image for me..."
|
| 137 |
+
# Aktualisieren Sie das Eingabefeld mit der vordefinierten Anfrage
|
| 138 |
+
input_field.update(value=predefined_query)
|
| 139 |
+
# Führen Sie die normale Texteingabebehandlung durch
|
| 140 |
+
handle_text_input(chatbot, task_history, predefined_query)
|
| 141 |
else:
|
| 142 |
+
item = task_history[-1]
|
| 143 |
+
if item[1] is None:
|
| 144 |
+
return chatbot
|
| 145 |
+
task_history[-1] = (item[0], None)
|
| 146 |
+
chatbot_item = chatbot.pop(-1)
|
| 147 |
+
if chatbot_item[0] is None:
|
| 148 |
+
chatbot[-1] = (chatbot[-1][0], None)
|
| 149 |
+
else:
|
| 150 |
+
chatbot.append((chatbot_item[0], None))
|
| 151 |
+
print("After:", task_history, chatbot)
|
| 152 |
+
|
| 153 |
return get_chat_response(chatbot, task_history)
|
| 154 |
|
| 155 |
# Custom CSS
|
|
|
|
| 215 |
|
| 216 |
gr.Markdown("### Key Features:\n- **Strong Performance**: Surpasses existing LVLMs on multiple English benchmarks including Zero-shot Captioning and VQA.\n- **Multi-lingual Support**: Supports English, Chinese, and multi-lingual conversation.\n- **High Resolution**: Utilizes 448*448 resolution for fine-grained recognition and understanding.")
|
| 217 |
submit_btn.click(handle_text_input, [chatbot, task_history, query], [chatbot, task_history]).then(
|
| 218 |
+
get_chat_response, [chatbot, task_history], [chatbot], show_progress=True
|
| 219 |
+
)
|
| 220 |
|
| 221 |
|
| 222 |
|
| 223 |
submit_btn.click(clear_input, [], [query])
|
| 224 |
clear_btn.click(clear_history, [task_history], [chatbot], show_progress=True)
|
| 225 |
+
regen_btn.click(handle_regeneration, [chatbot, task_history, query], [chatbot], show_progress=True)
|
| 226 |
upload_btn.upload(handle_file_upload, [chatbot, task_history, upload_btn], [chatbot, task_history], show_progress=True)
|
| 227 |
|
| 228 |
|