Spaces:
Running
Running
Oviya
commited on
Commit
·
bf503a3
1
Parent(s):
d7101fa
update chatbot
Browse files- assistant.py +43 -0
- pytrade.py +10 -58
- requirements.txt +2 -0
assistant.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# chat_chain.py
|
| 2 |
+
import os
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from langchain_openai import ChatOpenAI
|
| 6 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 7 |
+
from langchain_core.output_parsers import StrOutputParser
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# ---- Configuration (via env, with safe defaults) ----
|
| 11 |
+
HF_TOKEN: Optional[str] = os.environ.get("HF_TOKEN")
|
| 12 |
+
MODEL_ID: str = os.environ.get("MODEL_ID", "openai/gpt-oss-20b:nebius")
|
| 13 |
+
BASE_URL: str = os.environ.get("BASE_URL", "https://router.huggingface.co/v1")
|
| 14 |
+
TEMP: float = float(os.environ.get("TEMPERATURE", "0.2"))
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# ---- Build the chain once (module-level cache) ----
|
| 18 |
+
if not HF_TOKEN:
|
| 19 |
+
raise RuntimeError("HF_TOKEN is not set. Add it to your environment or Spaces → Settings → Secrets.")
|
| 20 |
+
|
| 21 |
+
_llm = ChatOpenAI(
|
| 22 |
+
model=MODEL_ID,
|
| 23 |
+
api_key=HF_TOKEN,
|
| 24 |
+
base_url=BASE_URL,
|
| 25 |
+
temperature=TEMP,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
_prompt = ChatPromptTemplate.from_messages([
|
| 29 |
+
("system", "You are a helpful, precise assistant. Reply in simple, neutral English."),
|
| 30 |
+
("user", "{message}")
|
| 31 |
+
])
|
| 32 |
+
|
| 33 |
+
_chain = _prompt | _llm | StrOutputParser()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_answer(message: str) -> str:
|
| 37 |
+
"""
|
| 38 |
+
Generate a single reply for the given user message.
|
| 39 |
+
Keeps LangChain initialization separate from the web layer.
|
| 40 |
+
"""
|
| 41 |
+
if not message or not message.strip():
|
| 42 |
+
raise ValueError("message cannot be empty.")
|
| 43 |
+
return _chain.invoke({"message": message.strip()})
|
pytrade.py
CHANGED
|
@@ -16,17 +16,7 @@ import os
|
|
| 16 |
import time
|
| 17 |
import requests
|
| 18 |
from typing import List, Dict
|
| 19 |
-
from
|
| 20 |
-
normalize_entities,
|
| 21 |
-
needs_live_context,
|
| 22 |
-
pick_is_news,
|
| 23 |
-
serpapi_search,
|
| 24 |
-
compose_live_user_prompt,
|
| 25 |
-
compose_general_user_prompt,
|
| 26 |
-
call_llm,
|
| 27 |
-
BASE_SYSTEM_PROMPT,
|
| 28 |
-
SEARCH_TOPK
|
| 29 |
-
)
|
| 30 |
|
| 31 |
app = Flask(__name__)
|
| 32 |
|
|
@@ -125,56 +115,18 @@ def analyze_all():
|
|
| 125 |
|
| 126 |
@app.route("/chat", methods=["POST"])
|
| 127 |
def chat():
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
{ "question": "your question" }
|
| 133 |
-
|
| 134 |
-
Response JSON:
|
| 135 |
-
{
|
| 136 |
-
"answer": "...",
|
| 137 |
-
"live": true/false,
|
| 138 |
-
"sources": [{title, link, source, snippet}]
|
| 139 |
-
}
|
| 140 |
-
"""
|
| 141 |
-
data = request.get_json(force=True, silent=True) or {}
|
| 142 |
-
message = (data.get("message") or data.get("question") or "").strip()
|
| 143 |
-
|
| 144 |
-
if not message:
|
| 145 |
-
return jsonify({"error": "message or question is required"}), 400
|
| 146 |
-
|
| 147 |
-
# Normalize common aliases (e.g., TCS -> Tata Consultancy Services)
|
| 148 |
-
message = normalize_entities(message)
|
| 149 |
-
|
| 150 |
-
# Decide if this needs live context
|
| 151 |
-
live = needs_live_context(message)
|
| 152 |
-
|
| 153 |
-
hits: List[Dict[str, str]] = []
|
| 154 |
-
if live:
|
| 155 |
-
is_news = pick_is_news(message)
|
| 156 |
-
try:
|
| 157 |
-
hits = serpapi_search(message, is_news=is_news, num=SEARCH_TOPK)
|
| 158 |
-
except Exception:
|
| 159 |
-
hits = []
|
| 160 |
-
live = False
|
| 161 |
|
| 162 |
try:
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
answer = call_llm(BASE_SYSTEM_PROMPT, user_prompt)
|
| 166 |
-
return jsonify({"answer": answer, "live": True, "sources": hits})
|
| 167 |
-
else:
|
| 168 |
-
user_prompt = compose_general_user_prompt(message)
|
| 169 |
-
answer = call_llm(BASE_SYSTEM_PROMPT, user_prompt)
|
| 170 |
-
return jsonify({"answer": answer, "live": False, "sources": []})
|
| 171 |
except Exception as e:
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
"live": live,
|
| 176 |
-
"sources": hits
|
| 177 |
-
}), 500
|
| 178 |
|
| 179 |
if __name__ == "__main__":
|
| 180 |
# Default to 5000 locally; on Hugging Face Spaces the platform injects PORT.
|
|
|
|
| 16 |
import time
|
| 17 |
import requests
|
| 18 |
from typing import List, Dict
|
| 19 |
+
from assistant import get_answer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
app = Flask(__name__)
|
| 22 |
|
|
|
|
| 115 |
|
| 116 |
@app.route("/chat", methods=["POST"])
|
| 117 |
def chat():
|
| 118 |
+
data = request.get_json(silent=True) or {}
|
| 119 |
+
user_message = (data.get("message") or "").strip()
|
| 120 |
+
if not user_message:
|
| 121 |
+
return jsonify({"error": "message is required"}), 400
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
try:
|
| 124 |
+
reply = get_answer(user_message)
|
| 125 |
+
return jsonify({"answer": reply})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
except Exception as e:
|
| 127 |
+
# Keep error neutral and simple for the client
|
| 128 |
+
return jsonify({"error": str(e)}), 500
|
| 129 |
+
|
|
|
|
|
|
|
|
|
|
| 130 |
|
| 131 |
if __name__ == "__main__":
|
| 132 |
# Default to 5000 locally; on Hugging Face Spaces the platform injects PORT.
|
requirements.txt
CHANGED
|
@@ -21,4 +21,6 @@ gunicorn
|
|
| 21 |
torch
|
| 22 |
python-dotenv
|
| 23 |
openai>=1.0.0
|
|
|
|
|
|
|
| 24 |
|
|
|
|
| 21 |
torch
|
| 22 |
python-dotenv
|
| 23 |
openai>=1.0.0
|
| 24 |
+
langchain==0.3.4
|
| 25 |
+
langchain-openai==0.2.3
|
| 26 |
|