Spaces:
Paused
Paused
| import os | |
| import google.generativeai as genai | |
| import gradio as gr | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") | |
| # model_name = "gemini-1.5-flash-exp-0827" | |
| TITLE = """<h1 align="center">๐ฎChat with Gemini 1.5๐ฅ</h1>""" | |
| NOTICE = """ | |
| **Notices** ๐: | |
| - This app is still in development | |
| - Some features may not work as expected | |
| """ | |
| ABOUT = """ | |
| **Updates (2024-9-25)** ๐: Upgrade model to SOTA Gemini 1.5 Flash Experimental 0924 | |
| """ | |
| ERRORS = """ | |
| Known errors โ ๏ธ: | |
| """ | |
| FUTURE_IMPLEMENTATIONS = """ | |
| Future features ๐: | |
| - More tools such as web search | |
| """ | |
| # genai.configure(api_key=GEMINI_API_KEY) | |
| # model = genai.GenerativeModel( | |
| # model_name, | |
| # safety_settings=[ | |
| # { | |
| # "category": "HARM_CATEGORY_HARASSMENT", | |
| # "threshold": "BLOCK_NONE" | |
| # }, | |
| # { | |
| # "category": "HARM_CATEGORY_HATE_SPEECH", | |
| # "threshold": "BLOCK_NONE" | |
| # }, | |
| # { | |
| # "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", | |
| # "threshold": "BLOCK_NONE" | |
| # }, | |
| # { | |
| # "category": "HARM_CATEGORY_DANGEROUS_CONTENT", | |
| # "threshold": "BLOCK_NONE" | |
| # } | |
| # ], | |
| # generation_config={ | |
| # "temperature": 1, | |
| # "top_p": 0.95, | |
| # "top_k": 64, | |
| # "max_output_tokens": 8192, | |
| # "response_mime_type": "text/plain", | |
| # } | |
| # ) | |
| # chat = model.start_chat(history=[]) | |
| # def clear_chat_history(): | |
| # chat.history = [] | |
| # def undo_chat(): | |
| # last_send, last_received = chat.rewind() | |
| # def transform_history(history): | |
| # new_history = [] | |
| # for user_msg, model_msg in history: | |
| # new_history.append({"role": "user", "parts": [user_msg]}) | |
| # new_history.append({"role": "model", "parts": [model_msg]}) | |
| # return new_history | |
| # def chatbot_stable(message, history): | |
| # message_text = message["text"] | |
| # message_files = message["files"] | |
| # if message_files: | |
| # image_uris = [genai.upload_file(path=file_path["path"]) for file_path in message_files] | |
| # message_content = [message_text] + image_uris | |
| # else: | |
| # message_content = [message_text] | |
| # response = chat.send_message(message_content, stream=True) | |
| # response.resolve() | |
| # return response.text | |
| # gemini_chatbot_interface = gr.Chatbot( | |
| # height=400, | |
| # likeable=True, | |
| # avatar_images=( | |
| # None, | |
| # "https://media.roboflow.com/spaces/gemini-icon.png" | |
| # ), | |
| # show_copy_button=True, | |
| # show_share_button=True, | |
| # render_markdown=True | |
| # ) | |
| # clear_chat_button = gr.ClearButton( | |
| # components=[gemini_chatbot_interface], | |
| # value="๐๏ธ Clear" | |
| # ) | |
| # undo_chat_button = gr.Button( | |
| # value="โฉ๏ธ Undo" | |
| # ) | |
| # gemini_chatbot = gr.ChatInterface( | |
| # fn=chatbot_stable, | |
| # chatbot=gemini_chatbot_interface, | |
| # multimodal=True, | |
| # clear_btn=clear_chat_button, | |
| # undo_btn=undo_chat_button | |
| # ) | |
| model_list = ["gemini-1.5-pro", "gemini-1.5-pro-002", "gemini-1.5-pro-exp-0827", "gemini-1.5-flash", "gemini-1.5-flash-002", "gemini-1.5-flash-8b-exp-0924"] | |
| genai.configure(api_key=GEMINI_API_KEY) | |
| def clear_chat_history(): | |
| chat.history = [] | |
| def undo_chat(): | |
| last_send, last_received = chat.rewind() | |
| def transform_history(history): | |
| new_history = [] | |
| for user_msg, model_msg in history: | |
| new_history.append({"role": "user", "parts": [user_msg]}) | |
| new_history.append({"role": "model", "parts": [model_msg]}) | |
| return new_history | |
| def chatbot_stable(message, history, model_id, system_message, max_tokens, temperature, top_p,): | |
| global model, chat | |
| model = genai.GenerativeModel( | |
| model_name=model_id, | |
| system_instruction=system_message, | |
| safety_settings=[ | |
| { | |
| "category": "HARM_CATEGORY_HARASSMENT", | |
| "threshold": "BLOCK_NONE" | |
| }, | |
| { | |
| "category": "HARM_CATEGORY_HATE_SPEECH", | |
| "threshold": "BLOCK_NONE" | |
| }, | |
| { | |
| "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", | |
| "threshold": "BLOCK_NONE" | |
| }, | |
| { | |
| "category": "HARM_CATEGORY_DANGEROUS_CONTENT", | |
| "threshold": "BLOCK_NONE" | |
| } | |
| ], | |
| generation_config={ | |
| "temperature": temperature, | |
| "top_p": top_p, | |
| "top_k": 40, | |
| "max_output_tokens": max_tokens, | |
| "response_mime_type": "text/plain", | |
| } | |
| ) | |
| chat = model.start_chat(history=[]) | |
| message_text = message["text"] | |
| message_files = message["files"] | |
| if message_files: | |
| image_uris = [genai.upload_file(path=file_path["path"]) for file_path in message_files] | |
| message_content = [message_text] + image_uris | |
| else: | |
| message_content = [message_text] | |
| response = chat.send_message(message_content, stream=True) | |
| response.resolve() | |
| return response.text | |
| gemini_chatbot_interface = gr.Chatbot( | |
| height=500, | |
| likeable=True, | |
| avatar_images=( | |
| None, | |
| "https://media.roboflow.com/spaces/gemini-icon.png" | |
| ), | |
| show_copy_button=True, | |
| show_share_button=True, | |
| render_markdown=True | |
| ) | |
| clear_chat_button = gr.ClearButton( | |
| components=[gemini_chatbot_interface], | |
| value="๐๏ธ Clear" | |
| ) | |
| undo_chat_button = gr.Button( | |
| value="โฉ๏ธ Undo" | |
| ) | |
| gemini_chatbot = gr.ChatInterface( | |
| fn=chatbot_stable, | |
| chatbot=gemini_chatbot_interface, | |
| multimodal=True, | |
| clear_btn=clear_chat_button, | |
| undo_btn=undo_chat_button, | |
| additional_inputs=[ | |
| gr.Dropdown( | |
| choices=model_list, | |
| value="gemini-1.5-flash-002", | |
| label="Models" | |
| ), | |
| gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
| gr.Slider(minimum=1, maximum=8192, value=4096, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=1.0, value=1, step=0.1, label="Temperature"), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p (nucleus sampling)", | |
| ), | |
| ], | |
| ) |