UltraBuGu commited on
Commit
0a5eb28
Β·
verified Β·
1 Parent(s): 92d048a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -48
app.py CHANGED
@@ -7,22 +7,19 @@ from concurrent.futures import ThreadPoolExecutor
7
  gemini_key = os.getenv("GEMINI_API_KEY")
8
  samba_key = os.getenv("SAMBANOVA_API_KEY")
9
  github_key = os.getenv("GITHUB_TOKEN")
10
- hf_token = os.getenv("ONCHI_AI_V3_2") # For Qwen, Media, Video
11
 
12
- # --- 2. THE FULL 2025 ARSENAL (12 Models) ---
13
  GENERALS = [
14
- # --- THE BIG THREE ---
 
 
15
  ("πŸ’Ž Gemini 3.0 Pro", "google", "gemini-3.0-pro"),
16
  ("🦁 GPT-5", "github", "gpt-5"),
17
- ("πŸš€ Llama 4 Maverick", "samba", "Llama-4-Maverick-17B-128E-Instruct"),
18
-
19
- # --- THE SPECIALISTS ---
20
  ("πŸŒͺ️ Mistral 5 Le Grande", "github", "Mistral-5-LeGrande"),
21
  ("🧠 Phi-4 Genius", "github", "Phi-4-Genius"),
22
  ("⌘ Cohere Command X", "github", "Cohere-command-x"),
23
  ("🐍 Jamba 2 Hybrid", "github", "AI21-Jamba-2-Hybrid"),
24
-
25
- # --- THE VISIONARIES ---
26
  ("πŸ‘οΈ Llama Vision", "samba", "Llama-3.2-90B-Vision-Instruct"),
27
  ("πŸ”­ Llama Scout", "samba", "Llama-Scout-Verified"),
28
  ("πŸ‰ Qwen 3 Max", "hf", "Qwen/Qwen3-Max-Instruct"),
@@ -46,17 +43,15 @@ def generate_media(prompt, media_type):
46
  resp = requests.post(api_url, headers=headers, json={"inputs": prompt}, timeout=40)
47
  if resp.status_code != 200: return f"Media Error: {resp.text}"
48
 
49
- # Save file to disk so Gradio can show it
50
  filename = f"output.{'mp3' if media_type == 'audio' else 'mp4' if media_type == 'video' else 'jpg'}"
51
  with open(filename, "wb") as f:
52
  f.write(resp.content)
53
  return filename
54
  except Exception as e: return f"Error: {str(e)}"
55
 
56
- # --- 4. TEXT ENGINE (Routing to 4 Different Clouds) ---
57
  def call_text_model(prompt, model_type, model_id):
58
  try:
59
- # GOOGLE (Gemini 3)
60
  if model_type == "google":
61
  if not gemini_key: return "Error: No Key"
62
  url = f"https://generativelanguage.googleapis.com/v1beta/models/{model_id}:generateContent?key={gemini_key}"
@@ -64,7 +59,6 @@ def call_text_model(prompt, model_type, model_id):
64
  if resp.status_code != 200: return f"Error {resp.status_code}"
65
  return resp.json()['candidates'][0]['content']['parts'][0]['text']
66
 
67
- # SAMBANOVA (Llama 4, Scout, Vision)
68
  elif model_type == "samba":
69
  if not samba_key: return "Error: No Key"
70
  resp = requests.post(
@@ -75,7 +69,6 @@ def call_text_model(prompt, model_type, model_id):
75
  if resp.status_code != 200: return f"Error {resp.status_code}"
76
  return resp.json()['choices'][0]['message']['content']
77
 
78
- # GITHUB MODELS (GPT-5, Mistral 5, Phi 4, Cohere, Jamba)
79
  elif model_type == "github":
80
  if not github_key: return "Error: No Key"
81
  resp = requests.post(
@@ -83,10 +76,10 @@ def call_text_model(prompt, model_type, model_id):
83
  headers={"Authorization": f"Bearer {github_key}", "Content-Type": "application/json"},
84
  json={"model": model_id, "messages": [{"role": "user", "content": prompt}], "max_tokens": 1000}, timeout=25
85
  )
86
- if resp.status_code != 200: return f"Error {resp.status_code}"
 
87
  return resp.json()['choices'][0]['message']['content']
88
 
89
- # HUGGING FACE (Qwen 3 Max)
90
  elif model_type == "hf":
91
  if not hf_token: return "Error: No Key"
92
  resp = requests.post(
@@ -111,32 +104,21 @@ def call_boss(prompt, reports):
111
  return "πŸš€ **Llama 4 Maverick Synthesis:**\n\n" + resp.json()['choices'][0]['message']['content']
112
  except Exception as e: return f"Synthesis Error: {str(e)}"
113
 
114
- # --- 6. ORCHESTRATOR ---
115
  def run_swarm(prompt, history):
116
- if history is None: history = []
117
-
118
- # 1. Detect Media Requests
119
- lower_prompt = prompt.lower()
120
- if "image" in lower_prompt or "draw" in lower_prompt:
121
- history.append([prompt, "🎨 Generating Image (Flux)..."])
122
- yield history, None, None, None
123
- img_path = generate_media(prompt, "image")
124
- history[-1][1] = "🎨 Image Generated Below"
125
- yield history, img_path, None, None
126
- return
127
-
128
- if "music" in lower_prompt or "song" in lower_prompt:
129
- history.append([prompt, "🎡 Generating Audio (MusicGen)..."])
130
- yield history, None, None, None
131
- aud_path = generate_media(prompt, "audio")
132
- history[-1][1] = "🎡 Audio Generated Below"
133
- yield history, None, aud_path, None
134
- return
135
-
136
- # 2. Run Text Swarm
137
- history.append([prompt, "⚑ Deploying FULL 2025 SWARM (12 Models)..."])
138
  yield history, None, None, None
139
 
 
 
 
 
 
 
 
 
140
  reports = ""
141
  successful = 0
142
 
@@ -148,31 +130,29 @@ def run_swarm(prompt, history):
148
  res = future.result()
149
  if "Error" not in res: successful += 1
150
  reports += f"\n=== {name} ===\n{res}\n"
151
- history[-1][1] = f"βœ… {successful}/10 Models Reported...\nWaiting for Maverick..."
152
  yield history, None, None, None
153
  except: pass
154
 
155
  final = call_boss(prompt, reports)
156
- history[-1][1] = final
157
  yield history, None, None, None
158
 
159
- # --- 7. UI ---
160
  with gr.Blocks(title="ONCHI AI GOD MODE") as demo:
161
  gr.Markdown("# ⚑ ONCHI AI GOD MODE (2025)")
162
- gr.Markdown("Gemini 3.0 β€’ GPT-5 β€’ Llama 4 β€’ Qwen 3 β€’ Mistral 5 β€’ Phi 4 β€’ Vision β€’ Audio β€’ Video")
163
 
164
- chatbot = gr.Chatbot(height=500)
 
165
 
166
- # MEDIA OUTPUTS
167
  with gr.Row():
168
- img_out = gr.Image(label="Generated Image")
169
- aud_out = gr.Audio(label="Generated Audio")
170
- vid_out = gr.Video(label="Generated Video")
171
 
172
  msg = gr.Textbox(label="Command the Swarm")
173
  btn = gr.Button("IGNITE")
174
 
175
- # Connect everything
176
  msg.submit(run_swarm, [msg, chatbot], [chatbot, img_out, aud_out, vid_out])
177
  btn.click(run_swarm, [msg, chatbot], [chatbot, img_out, aud_out, vid_out])
178
 
 
7
  gemini_key = os.getenv("GEMINI_API_KEY")
8
  samba_key = os.getenv("SAMBANOVA_API_KEY")
9
  github_key = os.getenv("GITHUB_TOKEN")
10
+ hf_token = os.getenv("ONCHI_AI_V3_2")
11
 
12
+ # --- 2. THE 2025 GOD SQUAD (12 Models) ---
13
  GENERALS = [
14
+ # verified: Llama 4 Maverick (SambaNova)
15
+ ("πŸš€ Llama 4 Maverick", "samba", "Llama-4-Maverick-17B-128E-Instruct"),
16
+ ("πŸ¦™ Llama 3.3 70B", "samba", "Meta-Llama-3.3-70B-Instruct"),
17
  ("πŸ’Ž Gemini 3.0 Pro", "google", "gemini-3.0-pro"),
18
  ("🦁 GPT-5", "github", "gpt-5"),
 
 
 
19
  ("πŸŒͺ️ Mistral 5 Le Grande", "github", "Mistral-5-LeGrande"),
20
  ("🧠 Phi-4 Genius", "github", "Phi-4-Genius"),
21
  ("⌘ Cohere Command X", "github", "Cohere-command-x"),
22
  ("🐍 Jamba 2 Hybrid", "github", "AI21-Jamba-2-Hybrid"),
 
 
23
  ("πŸ‘οΈ Llama Vision", "samba", "Llama-3.2-90B-Vision-Instruct"),
24
  ("πŸ”­ Llama Scout", "samba", "Llama-Scout-Verified"),
25
  ("πŸ‰ Qwen 3 Max", "hf", "Qwen/Qwen3-Max-Instruct"),
 
43
  resp = requests.post(api_url, headers=headers, json={"inputs": prompt}, timeout=40)
44
  if resp.status_code != 200: return f"Media Error: {resp.text}"
45
 
 
46
  filename = f"output.{'mp3' if media_type == 'audio' else 'mp4' if media_type == 'video' else 'jpg'}"
47
  with open(filename, "wb") as f:
48
  f.write(resp.content)
49
  return filename
50
  except Exception as e: return f"Error: {str(e)}"
51
 
52
+ # --- 4. TEXT ENGINE ---
53
  def call_text_model(prompt, model_type, model_id):
54
  try:
 
55
  if model_type == "google":
56
  if not gemini_key: return "Error: No Key"
57
  url = f"https://generativelanguage.googleapis.com/v1beta/models/{model_id}:generateContent?key={gemini_key}"
 
59
  if resp.status_code != 200: return f"Error {resp.status_code}"
60
  return resp.json()['candidates'][0]['content']['parts'][0]['text']
61
 
 
62
  elif model_type == "samba":
63
  if not samba_key: return "Error: No Key"
64
  resp = requests.post(
 
69
  if resp.status_code != 200: return f"Error {resp.status_code}"
70
  return resp.json()['choices'][0]['message']['content']
71
 
 
72
  elif model_type == "github":
73
  if not github_key: return "Error: No Key"
74
  resp = requests.post(
 
76
  headers={"Authorization": f"Bearer {github_key}", "Content-Type": "application/json"},
77
  json={"model": model_id, "messages": [{"role": "user", "content": prompt}], "max_tokens": 1000}, timeout=25
78
  )
79
+ # Fallback for experimental IDs
80
+ if resp.status_code != 200: return " [Waitlist/Error] "
81
  return resp.json()['choices'][0]['message']['content']
82
 
 
83
  elif model_type == "hf":
84
  if not hf_token: return "Error: No Key"
85
  resp = requests.post(
 
104
  return "πŸš€ **Llama 4 Maverick Synthesis:**\n\n" + resp.json()['choices'][0]['message']['content']
105
  except Exception as e: return f"Synthesis Error: {str(e)}"
106
 
107
+ # --- 6. ORCHESTRATOR (GRADIO 4 COMPATIBLE) ---
108
  def run_swarm(prompt, history):
109
+ # Gradio 4 Format: List of Dictionaries
110
+ history.append({"role": "user", "content": prompt})
111
+ history.append({"role": "assistant", "content": "⚑ Deploying 2025 GOD SQUAD..."})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  yield history, None, None, None
113
 
114
+ # Check Media
115
+ lower = prompt.lower()
116
+ if "draw" in lower or "image" in lower:
117
+ img = generate_media(prompt, "image")
118
+ history[-1]['content'] = "🎨 Image Generated!"
119
+ yield history, img, None, None
120
+ return
121
+
122
  reports = ""
123
  successful = 0
124
 
 
130
  res = future.result()
131
  if "Error" not in res: successful += 1
132
  reports += f"\n=== {name} ===\n{res}\n"
133
+ history[-1]['content'] = f"βœ… {successful}/10 Models Reported...\nWaiting for Maverick..."
134
  yield history, None, None, None
135
  except: pass
136
 
137
  final = call_boss(prompt, reports)
138
+ history[-1]['content'] = final
139
  yield history, None, None, None
140
 
141
+ # --- 7. UI (MODERN) ---
142
  with gr.Blocks(title="ONCHI AI GOD MODE") as demo:
143
  gr.Markdown("# ⚑ ONCHI AI GOD MODE (2025)")
 
144
 
145
+ # CRITICAL: type="messages" matches the Dictionary format used above
146
+ chatbot = gr.Chatbot(height=500, type="messages")
147
 
 
148
  with gr.Row():
149
+ img_out = gr.Image(label="Image")
150
+ aud_out = gr.Audio(label="Audio")
151
+ vid_out = gr.Video(label="Video")
152
 
153
  msg = gr.Textbox(label="Command the Swarm")
154
  btn = gr.Button("IGNITE")
155
 
 
156
  msg.submit(run_swarm, [msg, chatbot], [chatbot, img_out, aud_out, vid_out])
157
  btn.click(run_swarm, [msg, chatbot], [chatbot, img_out, aud_out, vid_out])
158