Spaces:
Paused
Paused
| import spaces | |
| import os | |
| from dotenv import load_dotenv | |
| import re | |
| from urllib.parse import urlparse | |
| import pandas as pd | |
| import unicodedata as uni | |
| import emoji | |
| from langchain_openai import ChatOpenAI | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain_community.document_loaders import DataFrameLoader | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain_community.vectorstores import FAISS | |
| from langchain.chains import RetrievalQA | |
| import gradio as gr | |
| import logging | |
| import requests | |
| # Load environment variables | |
| load_dotenv() | |
| # Set command line arguments for Gradio | |
| os.environ["COMMANDLINE_ARGS"] = "--no-gradio-queue" | |
| # Configure logging | |
| logging.basicConfig( | |
| level=logging.DEBUG, | |
| format="%(asctime)s [%(levelname)s] %(message)s", | |
| handlers=[logging.StreamHandler()], | |
| ) | |
| logger = logging.getLogger(__name__) | |
| import http.client | |
| http.client.HTTPConnection.debuglevel = 1 | |
| req_log = logging.getLogger("requests.packages.urllib3") | |
| req_log.setLevel(logging.DEBUG) | |
| req_log.propagate = True | |
| # Constants | |
| LIMIT = 1000 # Limit to 1000 reviews to avoid long processing times | |
| OpenAIModel = "gpt-3.5-turbo" | |
| shop_id = "" | |
| item_id = "" | |
| item = {} | |
| cache_URL = "" | |
| db = None | |
| qa = None | |
| cache = {} | |
| import json | |
| # Function to request product ID from Tokopedia | |
| def request_product_id(shop_domain, product_key, url): | |
| endpoint = "https://gql.tokopedia.com/graphql/PDPGetLayoutQuery" | |
| payload = { | |
| "operationName": "PDPGetLayoutQuery", | |
| "variables": { | |
| "shopDomain": f"{shop_domain}", | |
| "productKey": f"{product_key}", | |
| "apiVersion": 1, | |
| }, | |
| "query": "fragment ProductVariant on pdpDataProductVariant { errorCode parentID defaultChild children { productID } __typename } query PDPGetLayoutQuery($shopDomain: String, $productKey: String, $layoutID: String, $apiVersion: Float, $userLocation: pdpUserLocation, $extParam: String, $tokonow: pdpTokoNow, $deviceID: String) { pdpGetLayout(shopDomain: $shopDomain, productKey: $productKey, layoutID: $layoutID, apiVersion: $apiVersion, userLocation: $userLocation, extParam: $extParam, tokonow: $tokonow, deviceID: $deviceID) { requestID name pdpSession basicInfo { id: productID } components { name type position data { ...ProductVariant __typename } __typename } __typename } }", | |
| } | |
| headers = { | |
| "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36", | |
| "Referer": "https://www.tokopedia.com", | |
| "X-TKPD-AKAMAI": "pdpGetLayout", | |
| } | |
| return requests.request( | |
| method="POST", url=endpoint, json=payload, headers=headers, timeout=30 | |
| ) | |
| # Function to request product reviews from Tokopedia | |
| def request_product_review(product_id, page=1, limit=20): | |
| ENDPOINT = "https://gql.tokopedia.com/graphql/productReviewList" | |
| payload = { | |
| "operationName": "productReviewList", | |
| "variables": { | |
| "productID": f"{product_id}", | |
| "page": page, | |
| "limit": limit, | |
| "sortBy": "", | |
| "filterBy": "", | |
| }, | |
| "query": """query productReviewList($productID: String!, $page: Int!, $limit: Int!, $sortBy: String, $filterBy: String) { | |
| productrevGetProductReviewList(productID: $productID, page: $page, limit: $limit, sortBy: $sortBy, filterBy: $filterBy) { | |
| productID | |
| list { | |
| id: feedbackID | |
| variantName | |
| message | |
| productRating | |
| reviewCreateTime | |
| reviewCreateTimestamp | |
| isReportable | |
| isAnonymous | |
| reviewResponse { | |
| message | |
| createTime | |
| __typename | |
| } | |
| user { | |
| userID | |
| fullName | |
| image | |
| url | |
| __typename | |
| } | |
| likeDislike { | |
| totalLike | |
| likeStatus | |
| __typename | |
| } | |
| stats { | |
| key | |
| formatted | |
| count | |
| __typename | |
| } | |
| badRatingReasonFmt | |
| __typename | |
| } | |
| shop { | |
| shopID | |
| name | |
| url | |
| image | |
| __typename | |
| } | |
| hasNext | |
| totalReviews | |
| __typename | |
| } | |
| } | |
| """, | |
| } | |
| headers = { | |
| "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36", | |
| "Referer": "https://www.tokopedia.com", | |
| "X-TKPD-AKAMAI": "productReviewList", | |
| } | |
| try: | |
| response = requests.post(ENDPOINT, json=payload, headers=headers, timeout=60) | |
| response.raise_for_status() | |
| logger.info(f"Request successful. Status code: {response.status_code}") | |
| return response | |
| except requests.exceptions.RequestException as e: | |
| logger.error(f"Request failed: {e}") | |
| return None | |
| # Function to scrape reviews for a product | |
| def scrape(product_id, max_reviews=LIMIT): | |
| all_reviews = [] | |
| page = 1 | |
| has_next = True | |
| logger.info("Extracting product reviews...") | |
| while has_next and len(all_reviews) < max_reviews: | |
| response = request_product_review(product_id, page=page) | |
| if not response: | |
| break | |
| data = response.json()["data"]["productrevGetProductReviewList"] | |
| reviews = data["list"] | |
| all_reviews.extend(reviews) | |
| has_next = data["hasNext"] | |
| page += 1 | |
| reviews_df = pd.json_normalize(all_reviews) | |
| reviews_df.rename(columns={"message": "comment"}, inplace=True) | |
| reviews_df = reviews_df[["comment"]] | |
| logger.info(reviews_df.head()) | |
| return reviews_df | |
| # Function to extract product ID from URL | |
| def get_product_id(URL): | |
| parsed_url = urlparse(URL) | |
| *_, shop, product_key = parsed_url.path.split("/") | |
| response = request_product_id(shop, product_key, URL) | |
| if response: | |
| product_id = response.json()["data"]["pdpGetLayout"]["basicInfo"]["id"] | |
| logger.info(f"Product ID: {product_id}") | |
| return product_id | |
| else: | |
| logger.error("Failed to get product ID") | |
| return None | |
| # Function to clean the reviews DataFrame | |
| def clean(df): | |
| df = df.dropna().copy().reset_index(drop=True) # Drop reviews with empty comments | |
| df = df[df["comment"] != ""].reset_index(drop=True) # Remove empty reviews | |
| df["comment"] = df["comment"].apply(lambda x: clean_text(x)) # Clean text | |
| df = df[df["comment"] != ""].reset_index(drop=True) # Remove empty reviews | |
| logger.info("Cleaned reviews DataFrame") | |
| return df | |
| # Function to clean individual text entries | |
| def clean_text(text): | |
| text = uni.normalize("NFKD", text) # Normalize characters | |
| text = emoji.replace_emoji(text, "") # Remove emoji | |
| text = re.sub(r"(\w)\1{2,}", r"\1", text) # Remove repeated characters | |
| text = re.sub(r"[ ]+", " ", text).strip() # Remove extra spaces | |
| return text | |
| # Initialize LLM and embeddings | |
| llm = ChatOpenAI(model=OpenAIModel, temperature=0.1) | |
| embeddings = HuggingFaceEmbeddings(model_name="LazarusNLP/all-indobert-base-v2") | |
| # Function to generate a summary or answer based on reviews | |
| async def generate(URL, query): | |
| global cache_URL, db, qa, cache | |
| if not URL or not query: | |
| return "Input kosong" | |
| try: | |
| product_id = get_product_id(URL) | |
| if not product_id: | |
| return "Gagal mendapatkan product ID" | |
| if URL not in cache: | |
| reviews = scrape(product_id) | |
| if reviews.empty: | |
| return "Tidak ada ulasan ditemukan" | |
| cleaned_reviews = clean(reviews) | |
| loader = DataFrameLoader(cleaned_reviews, page_content_column="comment") | |
| documents = loader.load() | |
| text_splitter = RecursiveCharacterTextSplitter( | |
| chunk_size=1000, chunk_overlap=50 | |
| ) | |
| docs = text_splitter.split_documents(documents) | |
| db = FAISS.from_documents(docs, embeddings) | |
| cache[URL] = (docs, db) | |
| else: | |
| docs, db = cache[URL] | |
| qa = RetrievalQA.from_chain_type(llm=llm, retriever=db.as_retriever()) | |
| res = await qa.ainvoke(query) | |
| return res["result"] | |
| except Exception as e: | |
| logger.error(f"Error in generating response: {e}") | |
| return "Gagal mendapatkan review dari URL" | |
| # Set up Gradio interface | |
| product_box = gr.Textbox(label="URL Produk", placeholder="URL produk dari Tokopedia") | |
| query_box = gr.Textbox( | |
| lines=2, | |
| label="Kueri", | |
| placeholder="Contoh: Apa yang orang katakan tentang kualitas produknya?, Bagaimana pendapat orang yang kurang puas dengan produknya?", | |
| ) | |
| gr.Interface( | |
| fn=generate, | |
| inputs=[product_box, query_box], | |
| outputs=[gr.Textbox(label="Jawaban")], | |
| title="RingkasUlas", | |
| description="Bot percakapan yang bisa meringkas ulasan-ulasan produk di Tokopedia Indonesia (https://tokopedia.com/). Harap bersabar, bot ini dapat memakan waktu agak lama saat mengambil ulasan dari Tokopedia dan menyiapkan jawabannya.", | |
| allow_flagging="never", | |
| ).launch(debug=True) | |