Oviya commited on
Commit
6c90094
·
1 Parent(s): 51b884b

update indices

Browse files
Files changed (3) hide show
  1. analysestock.py +2 -2
  2. list.py +220 -31
  3. pytrade.py +60 -17
analysestock.py CHANGED
@@ -39,7 +39,7 @@ BASE_DIR = Path(__file__).resolve().parent # folder where analysestock.py lives
39
  TRAIN_XLSX_PATH = (
40
  os.getenv("TRAIN_XLSX_PATH")
41
  or (str(BASE_DIR / "analysedata.xlsx") if (BASE_DIR / "analysedata.xlsx").exists() else None)
42
- or (r"D:\PY-Trade\backend alone\analysedata.xlsx" if os.name == "nt" else None)
43
  )
44
 
45
  if not TRAIN_XLSX_PATH or not Path(TRAIN_XLSX_PATH).exists():
@@ -51,7 +51,7 @@ if not TRAIN_XLSX_PATH or not Path(TRAIN_XLSX_PATH).exists():
51
  MODEL_BUNDLE_PATH = (
52
  os.getenv("MODEL_BUNDLE_PATH")
53
  or (str(BASE_DIR / "gps_highlow_extratrees.pkl") if (BASE_DIR / "gps_highlow_extratrees.pkl").exists() else None)
54
- or (r"D:\PY-Trade\backend alone\gps_highlow_extratrees.pkl" if os.name == "nt" else None)
55
  or "/tmp/pytrade-models/gps_highlow_extratrees.pkl"
56
  )
57
 
 
39
  TRAIN_XLSX_PATH = (
40
  os.getenv("TRAIN_XLSX_PATH")
41
  or (str(BASE_DIR / "analysedata.xlsx") if (BASE_DIR / "analysedata.xlsx").exists() else None)
42
+ or (r"C:\VIJI\huggingface-deployment\deployment\pytrade-backend\analysedata.xlsx" if os.name == "nt" else None)
43
  )
44
 
45
  if not TRAIN_XLSX_PATH or not Path(TRAIN_XLSX_PATH).exists():
 
51
  MODEL_BUNDLE_PATH = (
52
  os.getenv("MODEL_BUNDLE_PATH")
53
  or (str(BASE_DIR / "gps_highlow_extratrees.pkl") if (BASE_DIR / "gps_highlow_extratrees.pkl").exists() else None)
54
+ or (r"C:\VIJI\huggingface-deployment\deployment\pytrade-backend\gps_highlow_extratrees.pkl" if os.name == "nt" else None)
55
  or "/tmp/pytrade-models/gps_highlow_extratrees.pkl"
56
  )
57
 
list.py CHANGED
@@ -1,31 +1,30 @@
1
  # -*- coding: utf-8 -*-
2
- """
3
- Minimal API for PY-Trade filters & companies
4
- - /getfilters -> countries -> exchanges -> indices
5
- - /getcompanies?code=NIFTY50 -> { code, asOf, count, constituents[] }
6
- """
7
-
8
  from __future__ import annotations
9
  import csv, io, json, time, os
10
- from typing import Dict, List, Any
11
  from pathlib import Path
 
12
 
13
  import requests
14
- from flask import Flask, request, jsonify
15
- from flask_cors import CORS
16
 
17
- # ---------- configuration ----------
 
 
 
 
 
 
 
18
  UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127 Safari/537.36"
19
  REFERER = "https://www.niftyindices.com/indices/equity/broad-based-indices"
20
  TTL_SECONDS = 60 * 60 * 12 # 12h cache
21
- # Use /data/cache on HF Spaces (writable), else fall back to local folder "cache" next to this file.
22
  DEFAULT_CACHE_DIR = os.getenv("CACHE_DIR", "/data/cache")
23
  CACHE_DIR = Path(DEFAULT_CACHE_DIR if DEFAULT_CACHE_DIR else ".").expanduser()
24
  if CACHE_DIR == Path("."):
25
  CACHE_DIR = Path(__file__).with_name("cache")
26
  CACHE_DIR.mkdir(parents=True, exist_ok=True)
27
 
28
- # Official CSV endpoints for NSE indices
29
  NIFTY_URLS: Dict[str, str] = {
30
  "NIFTY50": "https://www.niftyindices.com/IndexConstituent/ind_nifty50list.csv",
31
  "NIFTY100": "https://www.niftyindices.com/IndexConstituent/ind_nifty100list.csv",
@@ -34,7 +33,7 @@ NIFTY_URLS: Dict[str, str] = {
34
  "NIFTY500": "https://www.niftyindices.com/IndexConstituent/ind_nifty500list.csv",
35
  }
36
 
37
- # Filters payload for the UI (add more countries/exchanges here later)
38
  MARKETS: Dict[str, Dict[str, List[Dict[str, str]]]] = {
39
  "India": {
40
  "NSE (National Stock Exchange)": [
@@ -47,17 +46,41 @@ MARKETS: Dict[str, Dict[str, List[Dict[str, str]]]] = {
47
  }
48
  }
49
 
50
- # ---------- utilities ----------
51
- def http_get_text(url: str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  sess = requests.Session()
53
- sess.headers.update({"User-Agent": UA, "Referer": REFERER, "Accept": "text/csv,*/*"})
54
- r = sess.get(url, timeout=25)
55
  r.raise_for_status()
56
  r.encoding = r.encoding or "utf-8"
57
  return r.text
58
 
59
  def parse_nifty_csv(text: str) -> List[Dict[str, str]]:
60
- # Columns: Company Name, Industry, Symbol, Series, ISIN Code
61
  out: List[Dict[str, str]] = []
62
  rdr = csv.DictReader(io.StringIO(text))
63
  for row in rdr:
@@ -85,30 +108,196 @@ def save_cache(code: str, payload: Any) -> None:
85
  with fp.open("w", encoding="utf-8") as f:
86
  json.dump(payload, f, ensure_ascii=False, indent=2)
87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  def build_companies_payload(code: str) -> Dict[str, Any]:
89
- code = code.upper()
90
- # 1) try cache
 
 
91
  cached = load_cache(code)
92
  if cached:
93
  return cached
94
 
95
- # 2) fetch official CSV
96
- url = NIFTY_URLS.get(code)
97
- if not url:
 
 
 
 
 
 
 
 
 
98
  raise ValueError(f"Unknown index code: {code}")
99
- text = http_get_text(url)
100
- rows = parse_nifty_csv(text)
101
 
102
  payload = {
103
  "code": code,
104
- "exchange": "NSE",
105
- "country": "IN",
106
- "currency": "INR",
107
- "asOf": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
108
  "count": len(rows),
109
  "constituents": rows,
110
- "source": url,
111
  }
112
  save_cache(code, payload)
113
  return payload
114
-
 
1
  # -*- coding: utf-8 -*-
 
 
 
 
 
 
2
  from __future__ import annotations
3
  import csv, io, json, time, os
4
+ from typing import Dict, List, Any, Optional
5
  from pathlib import Path
6
+ from io import StringIO
7
 
8
  import requests
 
 
9
 
10
+ # optional (for Wikipedia tables)
11
+ try:
12
+ import pandas as pd # requires: pip install pandas lxml
13
+ HAS_PANDAS = True
14
+ except Exception:
15
+ HAS_PANDAS = False
16
+
17
+ # ---------- configuration (unchanged names) ----------
18
  UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127 Safari/537.36"
19
  REFERER = "https://www.niftyindices.com/indices/equity/broad-based-indices"
20
  TTL_SECONDS = 60 * 60 * 12 # 12h cache
 
21
  DEFAULT_CACHE_DIR = os.getenv("CACHE_DIR", "/data/cache")
22
  CACHE_DIR = Path(DEFAULT_CACHE_DIR if DEFAULT_CACHE_DIR else ".").expanduser()
23
  if CACHE_DIR == Path("."):
24
  CACHE_DIR = Path(__file__).with_name("cache")
25
  CACHE_DIR.mkdir(parents=True, exist_ok=True)
26
 
27
+ # Official CSV endpoints for NSE indices (unchanged name)
28
  NIFTY_URLS: Dict[str, str] = {
29
  "NIFTY50": "https://www.niftyindices.com/IndexConstituent/ind_nifty50list.csv",
30
  "NIFTY100": "https://www.niftyindices.com/IndexConstituent/ind_nifty100list.csv",
 
33
  "NIFTY500": "https://www.niftyindices.com/IndexConstituent/ind_nifty500list.csv",
34
  }
35
 
36
+ # Filters payload for the UI (unchanged variable name)
37
  MARKETS: Dict[str, Dict[str, List[Dict[str, str]]]] = {
38
  "India": {
39
  "NSE (National Stock Exchange)": [
 
46
  }
47
  }
48
 
49
+ # ---------- extras (new, additive) ----------
50
+ WIKI_PAGES: Dict[str, str] = {
51
+ "NASDAQ100": "https://en.wikipedia.org/wiki/NASDAQ-100",
52
+ "DAX40": "https://en.wikipedia.org/wiki/DAX",
53
+ "OMXS30": "https://en.wikipedia.org/wiki/OMX_Stockholm_30",
54
+ }
55
+
56
+ EXTRA_MARKETS: Dict[str, Dict[str, List[Dict[str, str]]]] = {
57
+ "United States": {
58
+ "NASDAQ": [
59
+ {"code": "NASDAQ100", "name": "NASDAQ-100"}
60
+ ]
61
+ },
62
+ "Germany": {
63
+ "XETRA (Deutsche Börse)": [
64
+ {"code": "DAX40", "name": "DAX 40"}
65
+ ]
66
+ },
67
+ "Sweden": {
68
+ "OMX Stockholm": [
69
+ {"code": "OMXS30", "name": "OMX Stockholm 30"}
70
+ ]
71
+ }
72
+ }
73
+
74
+ # ---------- utilities (kept original names) ----------
75
+ def http_get_text(url: str, accept: str = "text/csv,*/*") -> str:
76
  sess = requests.Session()
77
+ sess.headers.update({"User-Agent": UA, "Referer": REFERER, "Accept": accept})
78
+ r = sess.get(url, timeout=30)
79
  r.raise_for_status()
80
  r.encoding = r.encoding or "utf-8"
81
  return r.text
82
 
83
  def parse_nifty_csv(text: str) -> List[Dict[str, str]]:
 
84
  out: List[Dict[str, str]] = []
85
  rdr = csv.DictReader(io.StringIO(text))
86
  for row in rdr:
 
108
  with fp.open("w", encoding="utf-8") as f:
109
  json.dump(payload, f, ensure_ascii=False, indent=2)
110
 
111
+ def _now_iso_utc() -> str:
112
+ return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
113
+
114
+ # ---- Wikipedia helpers ----
115
+ def _fetch_wiki_tables(url: str):
116
+ if not HAS_PANDAS:
117
+ raise RuntimeError("pandas/lxml not installed. Run: pip install pandas lxml")
118
+ html = http_get_text(url, accept="text/html,*/*")
119
+ return pd.read_html(StringIO(html))
120
+
121
+ def _pick_table_and_columns(tables, ticker_candidates, company_candidates):
122
+ for t in tables:
123
+ cols_map = {str(c).strip().lower(): c for c in t.columns}
124
+ ticker_col = next((cols_map[c] for c in ticker_candidates if c in cols_map), None)
125
+ company_col = next((cols_map[c] for c in company_candidates if c in cols_map), None)
126
+ if ticker_col is not None and company_col is not None:
127
+ return t, ticker_col, company_col
128
+ raise RuntimeError(
129
+ f"No suitable table found. Ticker in {ticker_candidates}, company in {company_candidates}."
130
+ )
131
+
132
+ def _parse_wiki_constituents(url: str, ticker_candidates, company_candidates, suffix: str, upper_tickers: bool) -> List[Dict[str, str]]:
133
+ tables = _fetch_wiki_tables(url)
134
+ df, t_col, c_col = _pick_table_and_columns(tables, ticker_candidates, company_candidates)
135
+ rows: List[Dict[str, str]] = []
136
+ for sym, name in zip(df[t_col], df[c_col]):
137
+ s = str(sym).strip()
138
+ n = str(name).strip()
139
+ if not s or not n:
140
+ continue
141
+ if upper_tickers:
142
+ s = s.upper()
143
+ rows.append({"symbol": f"{s}{suffix}", "company": n})
144
+ if not rows:
145
+ raise RuntimeError("Parsed zero rows from Wikipedia table.")
146
+ return rows
147
+
148
+ def _parse_nasdaq100():
149
+ url = WIKI_PAGES["NASDAQ100"]
150
+ rows = _parse_wiki_constituents(
151
+ url,
152
+ ticker_candidates=["ticker", "symbol"],
153
+ company_candidates=["company", "name"],
154
+ suffix="",
155
+ upper_tickers=True,
156
+ )
157
+ return rows, "NASDAQ", "US", "USD", url
158
+
159
+ def _parse_dax40():
160
+ url = WIKI_PAGES["DAX40"]
161
+ rows = _parse_wiki_constituents(
162
+ url,
163
+ ticker_candidates=["ticker symbol", "ticker", "symbol"],
164
+ company_candidates=["company", "name"],
165
+ suffix=".DE",
166
+ upper_tickers=True,
167
+ )
168
+ return rows, "XETRA", "DE", "EUR", url
169
+
170
+ def _parse_omxs30():
171
+ url = WIKI_PAGES["OMXS30"]
172
+ rows = _parse_wiki_constituents(
173
+ url,
174
+ ticker_candidates=["ticker", "symbol"],
175
+ company_candidates=["company", "name"],
176
+ suffix=".ST",
177
+ upper_tickers=True,
178
+ )
179
+ return rows, "OMX Stockholm", "SE", "SEK", url
180
+
181
+ # ---------- public helpers ----------
182
+ def get_markets() -> Dict[str, Dict[str, List[Dict[str, str]]]]:
183
+ """
184
+ Return filters structure for UI.
185
+ Does not mutate MARKETS; returns MARKETS + EXTRA_MARKETS merged.
186
+ """
187
+ # FIX: removed an extra ']' here
188
+ merged: Dict[str, Dict[str, List[Dict[str, str]]]] = {}
189
+ # deep copy MARKETS
190
+ for country, exchanges in MARKETS.items():
191
+ merged[country] = {ex: refs[:] for ex, refs in exchanges.items()}
192
+ # merge extras
193
+ for country, exchanges in EXTRA_MARKETS.items():
194
+ merged.setdefault(country, {})
195
+ for ex, refs in exchanges.items():
196
+ merged[country].setdefault(ex, [])
197
+ merged[country][ex].extend(refs)
198
+ return merged
199
+
200
+ def _all_supported_index_codes(markets: Dict[str, Dict[str, List[Dict[str, str]]]]) -> List[str]:
201
+ codes: List[str] = []
202
+ for _country, exchanges in markets.items():
203
+ for _exch, refs in exchanges.items():
204
+ for ref in refs:
205
+ codes.append(ref["code"])
206
+ return codes
207
+
208
+ def _index_display_name(code: str, markets: Dict[str, Dict[str, List[Dict[str, str]]]]) -> str:
209
+ cu = code.upper()
210
+ for _country, exchanges in markets.items():
211
+ for _exch, refs in exchanges.items():
212
+ for ref in refs:
213
+ if ref["code"].upper() == cu:
214
+ return ref.get("name", cu)
215
+ return cu
216
+
217
+ def search_companies(q: str,
218
+ indices: Optional[List[str]] = None,
219
+ limit: int = 50) -> List[Dict[str, Any]]:
220
+ """
221
+ Global search across supported indices (cached via build_companies_payload).
222
+ Returns items: {symbol, company, indexCode, indexName, exchange, country}
223
+ """
224
+ q_norm = (q or "").strip().lower()
225
+ if not q_norm:
226
+ return []
227
+
228
+ markets = get_markets()
229
+ index_codes = indices or _all_supported_index_codes(markets)
230
+
231
+ results: List[Dict[str, Any]] = []
232
+ for code in index_codes:
233
+ try:
234
+ payload = build_companies_payload(code)
235
+ except Exception:
236
+ continue
237
+ idx_name = _index_display_name(code, markets)
238
+ for row in payload.get("constituents", []):
239
+ sym = str(row.get("symbol", "")).strip()
240
+ com = str(row.get("company", "")).strip()
241
+ if not sym or not com:
242
+ continue
243
+ if q_norm in sym.lower() or q_norm in com.lower():
244
+ results.append({
245
+ "symbol": sym,
246
+ "company": com,
247
+ "indexCode": payload.get("code"),
248
+ "indexName": idx_name,
249
+ "exchange": payload.get("exchange"),
250
+ "country": payload.get("country"),
251
+ })
252
+ if len(results) >= limit:
253
+ break
254
+ if len(results) >= limit:
255
+ break
256
+
257
+ def rank(item):
258
+ sym, com = item["symbol"].lower(), item["company"].lower()
259
+ if sym == q_norm or com == q_norm:
260
+ return 0
261
+ if sym.startswith(q_norm) or com.startswith(q_norm):
262
+ return 1
263
+ return 2
264
+
265
+ results.sort(key=rank)
266
+ return results[:limit]
267
+
268
+ # ---------- core (unchanged name, extended) ----------
269
  def build_companies_payload(code: str) -> Dict[str, Any]:
270
+ code = (code or "").upper().strip()
271
+ if not code:
272
+ raise ValueError("Index code is required.")
273
+
274
  cached = load_cache(code)
275
  if cached:
276
  return cached
277
 
278
+ if code in NIFTY_URLS:
279
+ url = NIFTY_URLS[code]
280
+ text = http_get_text(url)
281
+ rows = parse_nifty_csv(text)
282
+ exchange, country, currency, source = "NSE", "IN", "INR", url
283
+ elif code == "NASDAQ100":
284
+ rows, exchange, country, currency, source = _parse_nasdaq100()
285
+ elif code == "DAX40":
286
+ rows, exchange, country, currency, source = _parse_dax40()
287
+ elif code == "OMXS30":
288
+ rows, exchange, country, currency, source = _parse_omxs30()
289
+ else:
290
  raise ValueError(f"Unknown index code: {code}")
 
 
291
 
292
  payload = {
293
  "code": code,
294
+ "exchange": exchange,
295
+ "country": country,
296
+ "currency": currency,
297
+ "asOf": _now_iso_utc(),
298
  "count": len(rows),
299
  "constituents": rows,
300
+ "source": source,
301
  }
302
  save_cache(code, payload)
303
  return payload
 
pytrade.py CHANGED
@@ -1,16 +1,22 @@
1
- from flask import Flask, request, jsonify
 
 
 
2
  from flask_cors import CORS
3
- from flask import Response
4
  from analysestock import analysestock
5
- from list import build_companies_payload, MARKETS
6
- import yfinance as yf
 
 
 
 
 
7
  import json
8
  import os
9
- import time # <-- add
10
- import requests # <-- add (used in the /getcompanies exception handler)
11
 
12
  app = Flask(__name__)
13
- # CORS(app) # allow all origins for simplicity
14
 
15
  # Allow your Angular Space + local dev
16
  FRONTEND_ORIGIN = os.environ.get(
@@ -28,10 +34,21 @@ def health():
28
 
29
  @app.get("/getfilters")
30
  def get_filters():
31
- return jsonify({"asOf": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "markets": MARKETS})
 
 
 
 
 
 
 
 
32
 
33
  @app.get("/getcompanies")
34
  def get_companies():
 
 
 
35
  code = (request.args.get("code") or request.args.get("index") or "").upper()
36
  if not code:
37
  return jsonify({"error": "Missing ?code=<INDEXCODE>"}), 400
@@ -43,31 +60,57 @@ def get_companies():
43
  except Exception as e:
44
  return jsonify({"error": str(e)}), 500
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  @app.route('/analysestock', methods=['POST'])
48
  def analyze_all():
49
  try:
50
- data = request.get_json()
51
  tickersSymbol = data['ticker']
52
  results = []
53
-
54
  for ticker in tickersSymbol:
55
  try:
56
  results.append(analysestock(ticker))
57
  except Exception as e:
58
  results.append({"ticker": ticker, "error": str(e)})
59
-
60
- # Use Response with json.dumps to preserve OrderedDict order
61
  return Response(json.dumps(results, indent=2), mimetype='application/json')
62
  except Exception as e:
63
  return jsonify({"error": str(e)}), 500
64
 
65
- # if __name__ == "__main__":
66
- # app.run(host="127.0.0.1", port=5000, debug=True)
67
-
68
  if __name__ == "__main__":
69
  # Default to 5000 locally; on Hugging Face Spaces the platform injects PORT.
70
  port = int(os.environ.get("PORT", "5000"))
71
- # Bind only to localhost in dev; bind to 0.0.0.0 when running on a platform port.
72
  host = "127.0.0.1" if port == 5000 else "0.0.0.0"
73
- app.run(host=host, port=port, debug=(host == "127.0.0.1"))
 
1
+ # -*- coding: utf-8 -*-
2
+ from __future__ import annotations
3
+
4
+ from flask import Flask, request, jsonify, Response
5
  from flask_cors import CORS
 
6
  from analysestock import analysestock
7
+ from list import (
8
+ build_companies_payload,
9
+ MARKETS, # kept for backward compat (not mutated)
10
+ get_markets, # merged filters (MARKETS + extras)
11
+ search_companies, # global search helper
12
+ )
13
+ import yfinance as yf # (kept as in your file; used elsewhere if needed)
14
  import json
15
  import os
16
+ import time
17
+ import requests
18
 
19
  app = Flask(__name__)
 
20
 
21
  # Allow your Angular Space + local dev
22
  FRONTEND_ORIGIN = os.environ.get(
 
34
 
35
  @app.get("/getfilters")
36
  def get_filters():
37
+ """
38
+ Returns UI filter tree.
39
+ Uses get_markets() so you get MARKETS + extra markets (NASDAQ-100, DAX-40, OMXS-30)
40
+ without changing your original MARKETS object.
41
+ """
42
+ return jsonify({
43
+ "asOf": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
44
+ "markets": get_markets()
45
+ })
46
 
47
  @app.get("/getcompanies")
48
  def get_companies():
49
+ """
50
+ /getcompanies?code=NIFTY50 (or ?index=...)
51
+ """
52
  code = (request.args.get("code") or request.args.get("index") or "").upper()
53
  if not code:
54
  return jsonify({"error": "Missing ?code=<INDEXCODE>"}), 400
 
60
  except Exception as e:
61
  return jsonify({"error": str(e)}), 500
62
 
63
+ @app.get("/search_companies")
64
+ def route_search_companies():
65
+ """
66
+ /search_companies?q=INFY&indices=NIFTY50,NIFTY100&limit=50
67
+ - q: search term (symbol or company substring)
68
+ - indices: optional CSV of index codes; defaults to all supported
69
+ - limit: 1..200 (default 50)
70
+ """
71
+ q = request.args.get("q", "")
72
+ indices_csv = request.args.get("indices", "")
73
+ limit_raw = request.args.get("limit", "50")
74
+
75
+ try:
76
+ limit_i = max(1, min(200, int(limit_raw)))
77
+ except Exception:
78
+ limit_i = 50
79
+
80
+ indices = None
81
+ if indices_csv.strip():
82
+ indices = [c.strip().upper() for c in indices_csv.split(",") if c.strip()]
83
+
84
+ try:
85
+ results = search_companies(q, indices=indices, limit=limit_i)
86
+ return jsonify({
87
+ "query": q,
88
+ "count": len(results),
89
+ "results": results
90
+ })
91
+ except RuntimeError as e:
92
+ # e.g., pandas/lxml not installed for Wikipedia-backed indices
93
+ return jsonify({"error": str(e)}), 500
94
+ except Exception as e:
95
+ return jsonify({"error": f"Unexpected error: {e}"}), 500
96
 
97
  @app.route('/analysestock', methods=['POST'])
98
  def analyze_all():
99
  try:
100
+ data = request.get_json(force=True)
101
  tickersSymbol = data['ticker']
102
  results = []
 
103
  for ticker in tickersSymbol:
104
  try:
105
  results.append(analysestock(ticker))
106
  except Exception as e:
107
  results.append({"ticker": ticker, "error": str(e)})
 
 
108
  return Response(json.dumps(results, indent=2), mimetype='application/json')
109
  except Exception as e:
110
  return jsonify({"error": str(e)}), 500
111
 
 
 
 
112
  if __name__ == "__main__":
113
  # Default to 5000 locally; on Hugging Face Spaces the platform injects PORT.
114
  port = int(os.environ.get("PORT", "5000"))
 
115
  host = "127.0.0.1" if port == 5000 else "0.0.0.0"
116
+ app.run(host=host, port=port, debug=(host == "127.0.0.1"))