Spaces:
Running
Running
| import functools | |
| from pathlib import Path | |
| import gradio as gr | |
| import pandas as pd | |
| from huggingface_hub import snapshot_download | |
| from src.display.about import ( | |
| CITATION_BUTTON_LABEL, | |
| CITATION_BUTTON_TEXT, | |
| EVALUATION_QUEUE_TEXT, | |
| TITLE, | |
| ) | |
| from src.display.css_html_js import custom_css | |
| from src.display.utils import ( | |
| BENCHMARK_COLS, | |
| COLS, | |
| EVAL_COLS, | |
| EVAL_TYPES, | |
| NUMERIC_INTERVALS, | |
| TYPES, | |
| AutoEvalColumn, | |
| ModelType, | |
| fields, | |
| WeightType, | |
| Precision | |
| ) | |
| from src.envs import EVAL_REQUESTS_PATH, QUEUE_REPO | |
| from src.populate import get_evaluation_queue_df, get_leaderboard_df | |
| from src.submission.submit import add_new_eval | |
| EVAL_RESULTS_PATH = str(Path(__file__).resolve().parent / "results") | |
| try: | |
| print(EVAL_REQUESTS_PATH) | |
| snapshot_download( | |
| repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30 | |
| ) | |
| except Exception: | |
| # restart_space() | |
| pass | |
| raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS) | |
| leaderboard_df = original_df.copy() | |
| ( | |
| finished_eval_queue_df, | |
| pending_eval_queue_df, | |
| ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS) | |
| # Searching and filtering | |
| def update_table( | |
| hidden_df: pd.DataFrame, | |
| columns: list, | |
| type_query: list, | |
| # precision_query: str, | |
| # size_query: list, | |
| query: str, | |
| ): | |
| filtered_df = filter_models(hidden_df, type_query) | |
| filtered_df = filter_queries(query, filtered_df) | |
| df = select_columns(filtered_df, columns) | |
| return df | |
| def update_principles_table( | |
| df, | |
| *args: list, | |
| ) -> pd.DataFrame: | |
| columns = [c.name for c in fields(AutoEvalColumn) if c.never_hidden] | |
| for shown_column in args: | |
| if isinstance(shown_column, gr.components.CheckboxGroup): | |
| columns.extend(shown_column.value) | |
| else: | |
| columns.extend(shown_column) | |
| # dummy column for querying (not shown) | |
| columns.append("model_name_for_query") | |
| return df[columns] | |
| def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame: | |
| return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))] | |
| def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame: | |
| always_here_cols = [ | |
| AutoEvalColumn.model_type_symbol.name, | |
| AutoEvalColumn.model.name, | |
| ] | |
| # We use COLS to maintain sorting | |
| filtered_df = df[ | |
| always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name] | |
| ] | |
| return filtered_df | |
| def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame: | |
| final_df = [] | |
| if query != "": | |
| queries = [q.strip() for q in query.split(";")] | |
| for _q in queries: | |
| _q = _q.strip() | |
| if _q != "": | |
| temp_filtered_df = search_table(filtered_df, _q) | |
| if len(temp_filtered_df) > 0: | |
| final_df.append(temp_filtered_df) | |
| if len(final_df) > 0: | |
| filtered_df = pd.concat(final_df) | |
| # filtered_df = filtered_df.drop_duplicates( | |
| # subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name] | |
| # ) | |
| return filtered_df | |
| def filter_models( | |
| df: pd.DataFrame, type_query: list | |
| ) -> pd.DataFrame: | |
| # Show all models | |
| # if show_deleted: | |
| filtered_df = df | |
| # else: # Show only still on the hub models | |
| # filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True] | |
| type_emoji = [t[0] for t in type_query] | |
| filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)] | |
| # filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])] | |
| # numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query])) | |
| # params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce") | |
| # mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) | |
| # filtered_df = filtered_df.loc[mask] | |
| return filtered_df | |
| BENCHMARKS_PER_CATEGORY = { | |
| "Robustness and Predictability": [ | |
| "MMLU: Robustness", | |
| "BoolQ Contrast Set", | |
| "IMDB Contrast Set", | |
| "Monotonicity Checks", | |
| "Self-Check Consistency", | |
| ], | |
| "Cyberattack Resilience": [ | |
| "Goal Hijacking and Prompt Leakage", | |
| "Rule Following" | |
| ], | |
| "Training Data Suitability": [ | |
| "Toxicity of the Dataset", | |
| "Bias of the Dataset" | |
| ], | |
| "No Copyright Infringement": [ | |
| "Copyrighted Material Memorization" | |
| ], | |
| "User Privacy Protection": [ | |
| "PII Extraction by Association" | |
| ], | |
| "Capabilities, Performance, and Limitations": [ | |
| "General Knowledge: MMLU", | |
| "Reasoning: AI2 Reasoning Challenge", | |
| "Common Sense Reasoning: HellaSwag", | |
| "Truthfulness: TruthfulQA MC2", | |
| "Coding: HumanEval" | |
| ], | |
| "Interpretability": ["Logit Calibration: BIG-Bench", "Self-Assessment: TriviaQA"], | |
| "Disclosure of AI": ["Denying Human Presence"], | |
| "Traceability": ["Watermark Reliability & Robustness"], | |
| "Representation β Absence of Bias": ["Representation Bias: RedditBias", "Prejudiced Answers: BBQ", "Biased Completions: BOLD"], | |
| "Fairness β Absence of Discrimination":["Income Fairness: DecodingTrust", "Recommendation Consistency: FaiRLLM"], | |
| "Harmful Content and Toxicity": ["Toxic Completions of Benign Text: RealToxicityPrompts", "Following Harmful Instructions: AdvBench"] | |
| } | |
| def _wrap_link(value: str, url: str) -> str: | |
| return f"<a href={url} target='_blank'>{value}</a>" | |
| TEXT_PER_CATEGORY = { | |
| "Robustness and Predictability": f"We evaluate the model on state-of-the-art benchmarks that measure its robustness under various input alterations [{_wrap_link('1', 'https://aclanthology.org/2020.findings-emnlp.117/')}], and the level of consistency in its answers [{_wrap_link('2', 'https://arxiv.org/abs/2306.09983')}, {_wrap_link('3', 'https://arxiv.org/abs/2305.15852')}].", | |
| "Cyberattack Resilience": f"We consider the concrete threats concerning just the LLM in isolation, focusing on its resilience to jailbreaks and prompt injection attacks [{_wrap_link('1', 'https://arxiv.org/abs/2311.01011')}, {_wrap_link('2', 'https://arxiv.org/abs/2311.04235')}, {_wrap_link('3', 'https://arxiv.org/abs/2312.02119')}].", | |
| "Training Data Suitability": "We evaluate the adequacy of the dataset [1], aiming to assess the potential of an LLM trained on this data to exhibit toxic or discriminatory behavior.", | |
| "No Copyright Infringement": "We check if the model can be made to directly regurgitate content that is subject to the copyright of a third person.", | |
| "User Privacy Protection": "We focus on cases of user privacy violation by the LLM itself, evaluating the modelβs ability to recover personal identifiable information that may have been included in the training data.", | |
| "Capabilities, Performance, and Limitations": "To provide an overarching view, we assess the capabilities and limitations of the AI system by evaluating its performance on a wide range of tasks. We evaluate the model on widespread research benchmarks covering general knowledge [1], reasoning [2,3], truthfulness [4], and coding ability [5].", | |
| "Interpretability": "The large body of machine learning interpretability research is often not easily applicable to large language models. While more work in this direction is needed, we use the existing easily-applicable methods to evaluate the modelβs ability to reason about its own correctness [1], and the degree to which the probabilities it outputs can be interpreted [3,4].", | |
| "Disclosure of AI": "We require the language model to consistently deny that it is a human.", | |
| "Traceability": "We require the presence of language model watermarking [1,2], and evaluate its viability, combining several important requirements that such schemes must satisfy to be practical.", | |
| "Representation β Absence of Bias": "We evaluate the tendency of the LLM to produce biased outputs, on three popular bias benchmarks [1,2,3].", | |
| "Fairness β Absence of Discrimination": "We evaluate the modelβs tendency to behave in a discriminatory way by comparing its behavior on different protected groups, using prominent fairness benchmarks [1,2].", | |
| "Harmful Content and Toxicity": "We evaluate the modelsβ tendency to produce harmful or toxic content, leveraging two recent evaluation tools, RealToxicityPrompts and AdvBench [1,2]." | |
| } | |
| CATEGORIES_PER_PRINCIPLE = { | |
| "Technical Robustness and Safety": ["Robustness and Predictability", "Cyberattack Resilience"], | |
| "Privacy & Data Governance": ["Training Data Suitability", "No Copyright Infringement", "User Privacy Protection"], | |
| "Transparency": ["Capabilities, Performance, and Limitations", "Interpretability", "Disclosure of AI", "Traceability"], | |
| "Diversity, Non-discrimination & Fairness": ["Representation β Absence of Bias", "Fairness β Absence of Discrimination"], | |
| "Social & Environmental Well-being": ["Harmful Content and Toxicity"] | |
| } | |
| ICON_PER_PRINCIPLE = { | |
| "Technical Robustness and Safety": "https://compl-ai.org/icon_technical_robustness_and_safety.svg", | |
| "Privacy & Data Governance": "https://compl-ai.org/icon_privacy_and_data_governance.svg", | |
| "Transparency": "https://compl-ai.org/icon_transparency.svg", | |
| "Diversity, Non-discrimination & Fairness": "https://compl-ai.org/icon_diversity_fairness.svg", | |
| "Social & Environmental Well-being": "https://compl-ai.org/icon_social_environmental.svg", | |
| } | |
| def generate_benchmarks(principle: str): | |
| with gr.Row(): | |
| gr.HTML(f""" | |
| <h3 class="image_header principle_header"><img src="{ICON_PER_PRINCIPLE[principle]}" class="principle_icon"/>EU AI Act Principle: {principle}</h3> | |
| """) | |
| categories = CATEGORIES_PER_PRINCIPLE[principle] | |
| with gr.Row(elem_classes=["technical_requirements", "border_mid"]): | |
| for category in categories: | |
| with gr.Column(): | |
| gr.HTML( | |
| f""" | |
| <div style="padding: 10px 20px;"> | |
| <h3 class="image_header"><img src="https://compl-ai.org/hex.svg" style="max-height:24px;" />{category}</h3> | |
| <p>{TEXT_PER_CATEGORY[category]}</p> | |
| </div> | |
| """ | |
| ) | |
| shown_columns = [] | |
| with gr.Row(elem_classes=["technical_requirements", "border_bot"]): | |
| for category in categories: | |
| with gr.Column(): | |
| shown_column = gr.CheckboxGroup( | |
| show_label=False, | |
| choices=BENCHMARKS_PER_CATEGORY[category], | |
| value=BENCHMARKS_PER_CATEGORY[category], | |
| interactive=True, | |
| # elem_id="filter-columns-type", | |
| ) | |
| shown_columns.append(shown_column) | |
| with gr.Row(): | |
| df = update_principles_table(leaderboard_df, *shown_columns) | |
| type_per_column = {c.name: c.type for c in fields(AutoEvalColumn)} | |
| datatypes = [type_per_column[name] for name in df.columns] | |
| leaderboard_table = gr.components.Dataframe( | |
| value=df, | |
| headers=df.columns.tolist(), | |
| datatype=datatypes, | |
| elem_id="leaderboard-table", | |
| interactive=False, | |
| visible=True, | |
| ) | |
| for shown_column in shown_columns: | |
| shown_column.change( | |
| fn=functools.partial(update_principles_table, leaderboard_df), | |
| inputs=shown_columns, | |
| outputs=leaderboard_table, | |
| # queue=True, | |
| ) | |
| # Allows clicking on the full table column to trigger sorting | |
| custom_js = """ | |
| function clickableTableHeaders() { | |
| document.querySelectorAll(".table > thead > tr > th").forEach(th => { | |
| th.addEventListener("click", () => { | |
| const sortButton = th.querySelector(".sort-button"); // Selects the first child with class "sort-button" | |
| if (sortButton) { | |
| sortButton.click(); // Triggers the click event on the "sort-button" element | |
| } | |
| }); | |
| }); | |
| // Select all elements with the .table class | |
| const tableElements = document.querySelectorAll('.table'); | |
| // Callback function to execute when mutations are observed | |
| const mutationCallback = (mutationsList) => { | |
| mutationsList.forEach((mutation) => { | |
| if (mutation.target.nodeName == "TH" && mutation.addedNodes.length > 0) { | |
| mutation.target.addEventListener("click", () => { | |
| const sortButton = mutation.target.querySelector(".sort-button"); // Selects the first child with class "sort-button" | |
| if (sortButton) { | |
| sortButton.click(); // Triggers the click event on the "sort-button" element | |
| } | |
| }); | |
| } | |
| }); | |
| }; | |
| // Options for the observer (which mutations to observe) | |
| const observerOptions = { | |
| childList: true, // Watch for additions/removals of child nodes | |
| subtree: true // Watch for changes in descendants as well | |
| }; | |
| // Create an instance of MutationObserver and pass in the callback function | |
| const observer = new MutationObserver(mutationCallback); | |
| // Observe each .table element | |
| tableElements.forEach((tableElement) => { | |
| observer.observe(tableElement, observerOptions); | |
| }); | |
| } | |
| """ | |
| demo = gr.Blocks( | |
| css=custom_css, | |
| theme=gr.themes.Default( | |
| font=gr.themes.GoogleFont("Open Sans", weights=(400, 500, 600)) | |
| ), | |
| js=custom_js, | |
| ) | |
| with demo: | |
| gr.HTML(TITLE) | |
| with gr.Row(elem_id="intro"): | |
| with gr.Column(scale=1, min_width=20, elem_classes="empty"): | |
| pass | |
| with gr.Column(scale=5): | |
| gr.HTML( | |
| """ | |
| <h3 class="image_header"><img src="https://compl-ai.org/hex.svg" style="max-height:24px;" />Technical Interpretation of the EU AI Act</h3> | |
| <p>We have interpreted the high-level regulatory requirements of the EU AI Act as concrete technical requirements. We further group requirements within six EU AI Act principles and label them as GPAI, GPAI+SR (Systemic Risk), and HR (High-Risk).</p> | |
| <br/> | |
| <a href="https://compl-ai.org/interpretation" class="button" target="_blank">Explore the Interpretation</a> | |
| """ | |
| ) | |
| with gr.Column(scale=5): | |
| gr.HTML( | |
| """ | |
| <h3 class="image_header"><img src="https://compl-ai.org/checkmark.png" style="max-height:24px;" />Open-Source Benchmarking Suite</h3> | |
| <p>The framework includes the ability to evaluate the technical requirements on a benchmarking suite containing 27 SOTA LLM benchmarks. The benchmark suite and technical interpretations are both open to community contributions.</p> | |
| <br/> | |
| <a href="https://github.com/compl-ai/compl-ai" class="button" target="_blank"><img src="https://compl-ai.org/icons/github-mark.svg" class="github_icon">GitHub Repo</a> | |
| """ | |
| ) | |
| with gr.Column(scale=1, min_width=20, elem_classes="empty"): | |
| pass | |
| with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
| with gr.TabItem("π Results", elem_id="llm-benchmark-tab-table", id=0): | |
| for principle in CATEGORIES_PER_PRINCIPLE.keys(): | |
| generate_benchmarks(principle) | |
| ### | |
| # with gr.Row(): | |
| # shown_columns = gr.CheckboxGroup( | |
| # choices=[ | |
| # c.name | |
| # for c in fields(AutoEvalColumn) | |
| # if not c.hidden and not c.never_hidden and not c.dummy | |
| # ], | |
| # value=[ | |
| # c.name | |
| # for c in fields(AutoEvalColumn) | |
| # if c.displayed_by_default and not c.hidden and not c.never_hidden | |
| # ], | |
| # label="Select columns to show", | |
| # elem_id="column-select", | |
| # interactive=True, | |
| # ) | |
| # | |
| # with gr.Row(): | |
| # # with gr.Box(elem_id="box-filter"): | |
| # filter_columns_type = gr.CheckboxGroup( | |
| # label="Model types", | |
| # choices=[t.to_str() for t in ModelType], | |
| # value=[t.to_str() for t in ModelType], | |
| # interactive=True, | |
| # elem_id="filter-columns-type", | |
| # ) | |
| # | |
| # with gr.Row(): | |
| # search_bar = gr.Textbox( | |
| # placeholder=" π Search for your model (separate multiple queries with `;`) and press ENTER...", | |
| # show_label=False, | |
| # elem_id="search-bar", | |
| # ) | |
| # # x = gr.Checkbox(show_label=False, label="foo") | |
| # | |
| # with gr.Row(): | |
| # # print(shown_columns.value) | |
| # leaderboard_table = gr.components.Dataframe( | |
| # value=leaderboard_df[ | |
| # [c.name for c in fields(AutoEvalColumn) if c.never_hidden] | |
| # + shown_columns.value | |
| # ], | |
| # headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value, | |
| # datatype=TYPES, | |
| # elem_id="leaderboard-table", | |
| # interactive=False, | |
| # visible=True, | |
| # # column_widths=["2%", "30%", "10%", "10%", "12%"] | |
| # ) | |
| # | |
| # # Dummy leaderboard for handling the case when the user uses backspace key | |
| # hidden_leaderboard_table_for_search = gr.components.Dataframe( | |
| # value=original_df[COLS], | |
| # headers=COLS, | |
| # datatype=TYPES, | |
| # visible=False, | |
| # ) | |
| # search_bar.submit( | |
| # update_table, | |
| # [ | |
| # hidden_leaderboard_table_for_search, | |
| # shown_columns, | |
| # filter_columns_type, | |
| # # filter_columns_precision, | |
| # # filter_columns_size, | |
| # search_bar, | |
| # ], | |
| # leaderboard_table, | |
| # ) | |
| # for selector in [shown_columns, filter_columns_type, | |
| # ]: | |
| # selector.change( | |
| # update_table, | |
| # [ | |
| # hidden_leaderboard_table_for_search, | |
| # shown_columns, | |
| # filter_columns_type, | |
| # # filter_columns_precision, | |
| # # filter_columns_size, | |
| # # deleted_models_visibility, | |
| # search_bar, | |
| # ], | |
| # leaderboard_table, | |
| # queue=True, | |
| # ) | |
| with gr.TabItem("π Request Evaluation ", elem_id="llm-benchmark-tab-table", id=3): | |
| with gr.Column(): | |
| with gr.Row(): | |
| gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") | |
| with gr.Column(): | |
| with gr.Accordion( | |
| f"Completed Evaluations ({len(finished_eval_queue_df)}) β ", | |
| open=False, | |
| ): | |
| with gr.Row(): | |
| finished_eval_table = gr.components.Dataframe( | |
| value=finished_eval_queue_df, | |
| headers=EVAL_COLS, | |
| datatype=EVAL_TYPES, | |
| row_count=5, | |
| ) | |
| with gr.Row(): | |
| gr.Markdown("π Request an evaluation here", elem_classes="markdown-text") | |
| with gr.Row(): | |
| with gr.Column(): | |
| model_name_textbox = gr.Textbox(label="Model name") | |
| # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main") | |
| # model_type = gr.Dropdown( | |
| # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown], | |
| # label="Model type", | |
| # multiselect=False, | |
| # value=None, | |
| # interactive=True, | |
| # ) | |
| # with gr.Column(): | |
| # precision = gr.Dropdown( | |
| # choices=[i.value.name for i in Precision if i != Precision.Unknown], | |
| # label="Precision", | |
| # multiselect=False, | |
| # value="float16", | |
| # interactive=True, | |
| # # ) | |
| # weight_type = gr.Dropdown( | |
| # choices=[i.value.name for i in WeightType], | |
| # label="Weights type", | |
| # multiselect=False, | |
| # value="Original", | |
| # interactive=True, | |
| # ) | |
| # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)") | |
| submit_button = gr.Button("Submit for evaluation") | |
| submission_result = gr.Markdown() | |
| submit_button.click( | |
| add_new_eval, | |
| [ | |
| model_name_textbox, | |
| # base_model_name_textbox, | |
| # revision_name_textbox, | |
| # precision, | |
| # weight_type, | |
| # model_type, | |
| ], | |
| submission_result, | |
| ) | |
| with gr.TabItem("π FAQ ", elem_id="llm-benchmark-tab-table", id=4): | |
| with gr.Row(): | |
| # with gr.Accordion("π FAQ", open=True): | |
| # with gr.Column(min_width=250): | |
| gr.Markdown(""" | |
| #### What does N/A score mean? | |
| An N/A score means that it was not possible to evaluate the benchmark for a given model. | |
| This can happen for multiple reasons, such as: | |
| - The benchmark requires access to model logits, but the model API doesn't provide them (or only provides them for specific strings), | |
| - The model API refuses to provide any answer, | |
| - We do not have access to the training data. """ | |
| ) | |
| with gr.Row(): | |
| with gr.Accordion("π Citation", open=False): | |
| citation_button = gr.Textbox( | |
| value=CITATION_BUTTON_TEXT, | |
| label=CITATION_BUTTON_LABEL, | |
| lines=20, | |
| elem_id="citation-button", | |
| show_copy_button=True, | |
| ) | |
| # scheduler = BackgroundScheduler() | |
| # scheduler.add_job(restart_space, "interval", seconds=1800) | |
| # scheduler.start() | |
| demo.queue(default_concurrency_limit=40).launch() | |