Oviya commited on
Commit
9c5679c
·
1 Parent(s): 84f053e

update list

Browse files
Files changed (1) hide show
  1. list.py +62 -169
list.py CHANGED
@@ -1,26 +1,29 @@
1
  # -*- coding: utf-8 -*-
 
 
 
 
 
2
 
3
  from __future__ import annotations
4
- import csv, io, json, time
5
- from typing import Dict, List, Any, Optional
6
  from pathlib import Path
7
- from io import StringIO
8
 
9
  import requests
10
-
11
- # optional (for Wikipedia tables)
12
- try:
13
- import pandas as pd # requires: pip install pandas lxml
14
- HAS_PANDAS = True
15
- except Exception:
16
- HAS_PANDAS = False
17
 
18
  # ---------- configuration ----------
19
  UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127 Safari/537.36"
20
  REFERER = "https://www.niftyindices.com/indices/equity/broad-based-indices"
21
- TTL_SECONDS = 60 * 60 * 12 # 12 hours
22
- CACHE_DIR = Path(__file__).with_name("cache")
23
- CACHE_DIR.mkdir(exist_ok=True)
 
 
 
 
24
 
25
  # Official CSV endpoints for NSE indices
26
  NIFTY_URLS: Dict[str, str] = {
@@ -31,15 +34,8 @@ NIFTY_URLS: Dict[str, str] = {
31
  "NIFTY500": "https://www.niftyindices.com/IndexConstituent/ind_nifty500list.csv",
32
  }
33
 
34
- # Wikipedia pages for other markets
35
- WIKI_PAGES: Dict[str, str] = {
36
- "NASDAQ100": "https://en.wikipedia.org/wiki/NASDAQ-100",
37
- "DAX40": "https://en.wikipedia.org/wiki/DAX",
38
- "OMXS30": "https://en.wikipedia.org/wiki/OMX_Stockholm_30",
39
- }
40
-
41
- # Filters payload
42
- _MARKETS: Dict[str, Dict[str, List[Dict[str, str]]]] = {
43
  "India": {
44
  "NSE (National Stock Exchange)": [
45
  {"code": "NIFTY50", "name": "NIFTY 50"},
@@ -48,86 +44,34 @@ _MARKETS: Dict[str, Dict[str, List[Dict[str, str]]]] = {
48
  {"code": "NIFTYMID100", "name": "NIFTY Midcap 100"},
49
  {"code": "NIFTY500", "name": "NIFTY 500"},
50
  ]
51
- },
52
- "United States": {
53
- "NASDAQ": [
54
- {"code": "NASDAQ100", "name": "NASDAQ-100"}
55
- ]
56
- },
57
- "Germany": {
58
- "XETRA (Deutsche Börse)": [
59
- {"code": "DAX40", "name": "DAX 40"}
60
- ]
61
- },
62
- "Sweden": {
63
- "OMX Stockholm": [
64
- {"code": "OMXS30", "name": "OMX Stockholm 30"}
65
- ]
66
  }
67
  }
68
 
69
- # ---------- public API (for routes) ----------
70
- def get_markets() -> Dict[str, Dict[str, List[Dict[str, str]]]]:
71
- """Return filters structure used by UI."""
72
- return _MARKETS
73
-
74
- def build_companies_payload(code: str) -> Dict[str, Any]:
75
- """Return standardized payload for an index (with caching)."""
76
- code = (code or "").upper().strip()
77
- if not code:
78
- raise ValueError("Index code is required.")
79
-
80
- # try cache
81
- cached = _load_cache(code)
82
- if cached:
83
- return cached
84
-
85
- # route per code
86
- if code in NIFTY_URLS:
87
- url = NIFTY_URLS[code]
88
- text = _http_get_text(url)
89
- rows = _parse_nifty_csv(text)
90
- exchange, country, currency, source = "NSE", "IN", "INR", url
91
-
92
- elif code == "NASDAQ100":
93
- rows, exchange, country, currency, source = _parse_nasdaq100()
94
-
95
- elif code == "DAX40":
96
- rows, exchange, country, currency, source = _parse_dax40()
97
-
98
- elif code == "OMXS30":
99
- rows, exchange, country, currency, source = _parse_omxs30()
100
-
101
- else:
102
- raise ValueError(f"Unknown index code: {code}")
103
-
104
- payload = {
105
- "code": code,
106
- "exchange": exchange,
107
- "country": country,
108
- "currency": currency,
109
- "asOf": _now_iso_utc(),
110
- "count": len(rows),
111
- "constituents": rows,
112
- "source": source,
113
- }
114
- _save_cache(code, payload)
115
- return payload
116
-
117
- # ---------- internals ----------
118
- def _http_get_text(url: str, accept: str = "text/csv,*/*") -> str:
119
  sess = requests.Session()
120
- sess.headers.update({"User-Agent": UA, "Referer": REFERER, "Accept": accept})
121
- r = sess.get(url, timeout=30)
122
  r.raise_for_status()
123
  r.encoding = r.encoding or "utf-8"
124
  return r.text
125
 
126
- def _cache_path(code: str) -> Path:
 
 
 
 
 
 
 
 
 
 
 
127
  return CACHE_DIR / f"{code.lower()}.json"
128
 
129
- def _load_cache(code: str) -> Optional[Any]:
130
- fp = _cache_path(code)
131
  if not fp.exists():
132
  return None
133
  age = time.time() - fp.stat().st_mtime
@@ -136,86 +80,35 @@ def _load_cache(code: str) -> Optional[Any]:
136
  with fp.open("r", encoding="utf-8") as f:
137
  return json.load(f)
138
 
139
- def _save_cache(code: str, payload: Any) -> None:
140
- fp = _cache_path(code)
141
  with fp.open("w", encoding="utf-8") as f:
142
  json.dump(payload, f, ensure_ascii=False, indent=2)
143
 
144
- def _now_iso_utc() -> str:
145
- return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
146
-
147
- def _parse_nifty_csv(text: str) -> List[Dict[str, str]]:
148
- out: List[Dict[str, str]] = []
149
- rdr = csv.DictReader(io.StringIO(text))
150
- for row in rdr:
151
- sym = (row.get("Symbol") or "").strip()
152
- name = (row.get("Company Name") or "").strip()
153
- if sym and name:
154
- out.append({"symbol": f"{sym}.NS", "company": name})
155
- return out
156
-
157
- # ---- Wikipedia helpers ----
158
- def _fetch_wiki_tables(url: str):
159
- if not HAS_PANDAS:
160
- raise RuntimeError("pandas/lxml not installed. Run: pip install pandas lxml")
161
- html = _http_get_text(url, accept="text/html,*/*")
162
- return pd.read_html(StringIO(html))
163
-
164
- def _pick_table_and_columns(tables, ticker_candidates, company_candidates):
165
- for t in tables:
166
- cols_map = {str(c).strip().lower(): c for c in t.columns}
167
- ticker_col = next((cols_map[c] for c in ticker_candidates if c in cols_map), None)
168
- company_col = next((cols_map[c] for c in company_candidates if c in cols_map), None)
169
- if ticker_col is not None and company_col is not None:
170
- return t, ticker_col, company_col
171
- raise RuntimeError(
172
- f"No suitable table found. Ticker in {ticker_candidates}, company in {company_candidates}."
173
- )
174
-
175
- def _parse_wiki_constituents(url: str, ticker_candidates, company_candidates, suffix: str, upper_tickers: bool) -> List[Dict[str, str]]:
176
- tables = _fetch_wiki_tables(url)
177
- df, t_col, c_col = _pick_table_and_columns(tables, ticker_candidates, company_candidates)
178
- rows: List[Dict[str, str]] = []
179
- for sym, name in zip(df[t_col], df[c_col]):
180
- s = str(sym).strip()
181
- if not s or not str(name).strip():
182
- continue
183
- if upper_tickers:
184
- s = s.upper()
185
- rows.append({"symbol": f"{s}{suffix}", "company": str(name).strip()})
186
- if not rows:
187
- raise RuntimeError("Parsed zero rows from Wikipedia table.")
188
- return rows
189
 
190
- def _parse_nasdaq100():
191
- url = WIKI_PAGES["NASDAQ100"]
192
- rows = _parse_wiki_constituents(
193
- url,
194
- ticker_candidates=["ticker", "symbol"],
195
- company_candidates=["company", "name"],
196
- suffix="",
197
- upper_tickers=True,
198
- )
199
- return rows, "NASDAQ", "US", "USD", url
200
 
201
- def _parse_dax40():
202
- url = WIKI_PAGES["DAX40"]
203
- rows = _parse_wiki_constituents(
204
- url,
205
- ticker_candidates=["ticker symbol", "ticker", "symbol"],
206
- company_candidates=["company", "name"],
207
- suffix=".DE",
208
- upper_tickers=True,
209
- )
210
- return rows, "XETRA", "DE", "EUR", url
 
 
211
 
212
- def _parse_omxs30():
213
- url = WIKI_PAGES["OMXS30"]
214
- rows = _parse_wiki_constituents(
215
- url,
216
- ticker_candidates=["ticker", "symbol"],
217
- company_candidates=["company", "name"],
218
- suffix=".ST",
219
- upper_tickers=True,
220
- )
221
- return rows, "OMX Stockholm", "SE", "SEK", url
 
1
  # -*- coding: utf-8 -*-
2
+ """
3
+ Minimal API for PY-Trade filters & companies
4
+ - /getfilters -> countries -> exchanges -> indices
5
+ - /getcompanies?code=NIFTY50 -> { code, asOf, count, constituents[] }
6
+ """
7
 
8
  from __future__ import annotations
9
+ import csv, io, json, time, os
10
+ from typing import Dict, List, Any
11
  from pathlib import Path
 
12
 
13
  import requests
14
+ from flask import Flask, request, jsonify
15
+ from flask_cors import CORS
 
 
 
 
 
16
 
17
  # ---------- configuration ----------
18
  UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127 Safari/537.36"
19
  REFERER = "https://www.niftyindices.com/indices/equity/broad-based-indices"
20
+ TTL_SECONDS = 60 * 60 * 12 # 12h cache
21
+ # Use /data/cache on HF Spaces (writable), else fall back to local folder "cache" next to this file.
22
+ DEFAULT_CACHE_DIR = os.getenv("CACHE_DIR", "/data/cache")
23
+ CACHE_DIR = Path(DEFAULT_CACHE_DIR if DEFAULT_CACHE_DIR else ".").expanduser()
24
+ if CACHE_DIR == Path("."):
25
+ CACHE_DIR = Path(__file__).with_name("cache")
26
+ CACHE_DIR.mkdir(parents=True, exist_ok=True)
27
 
28
  # Official CSV endpoints for NSE indices
29
  NIFTY_URLS: Dict[str, str] = {
 
34
  "NIFTY500": "https://www.niftyindices.com/IndexConstituent/ind_nifty500list.csv",
35
  }
36
 
37
+ # Filters payload for the UI (add more countries/exchanges here later)
38
+ MARKETS: Dict[str, Dict[str, List[Dict[str, str]]]] = {
 
 
 
 
 
 
 
39
  "India": {
40
  "NSE (National Stock Exchange)": [
41
  {"code": "NIFTY50", "name": "NIFTY 50"},
 
44
  {"code": "NIFTYMID100", "name": "NIFTY Midcap 100"},
45
  {"code": "NIFTY500", "name": "NIFTY 500"},
46
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  }
48
  }
49
 
50
+ # ---------- utilities ----------
51
+ def http_get_text(url: str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  sess = requests.Session()
53
+ sess.headers.update({"User-Agent": UA, "Referer": REFERER, "Accept": "text/csv,*/*"})
54
+ r = sess.get(url, timeout=25)
55
  r.raise_for_status()
56
  r.encoding = r.encoding or "utf-8"
57
  return r.text
58
 
59
+ def parse_nifty_csv(text: str) -> List[Dict[str, str]]:
60
+ # Columns: Company Name, Industry, Symbol, Series, ISIN Code
61
+ out: List[Dict[str, str]] = []
62
+ rdr = csv.DictReader(io.StringIO(text))
63
+ for row in rdr:
64
+ sym = (row.get("Symbol") or "").strip()
65
+ name = (row.get("Company Name") or "").strip()
66
+ if sym and name:
67
+ out.append({"symbol": f"{sym}.NS", "company": name})
68
+ return out
69
+
70
+ def cache_path(code: str) -> Path:
71
  return CACHE_DIR / f"{code.lower()}.json"
72
 
73
+ def load_cache(code: str) -> Any | None:
74
+ fp = cache_path(code)
75
  if not fp.exists():
76
  return None
77
  age = time.time() - fp.stat().st_mtime
 
80
  with fp.open("r", encoding="utf-8") as f:
81
  return json.load(f)
82
 
83
+ def save_cache(code: str, payload: Any) -> None:
84
+ fp = cache_path(code)
85
  with fp.open("w", encoding="utf-8") as f:
86
  json.dump(payload, f, ensure_ascii=False, indent=2)
87
 
88
+ def build_companies_payload(code: str) -> Dict[str, Any]:
89
+ code = code.upper()
90
+ # 1) try cache
91
+ cached = load_cache(code)
92
+ if cached:
93
+ return cached
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
+ # 2) fetch official CSV
96
+ url = NIFTY_URLS.get(code)
97
+ if not url:
98
+ raise ValueError(f"Unknown index code: {code}")
99
+ text = http_get_text(url)
100
+ rows = parse_nifty_csv(text)
 
 
 
 
101
 
102
+ payload = {
103
+ "code": code,
104
+ "exchange": "NSE",
105
+ "country": "IN",
106
+ "currency": "INR",
107
+ "asOf": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
108
+ "count": len(rows),
109
+ "constituents": rows,
110
+ "source": url,
111
+ }
112
+ save_cache(code, payload)
113
+ return payload
114