| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """ |
| | Train an Image Classifier |
| | |
| | This marimo notebook fine-tunes a Vision Transformer (ViT) for image classification. |
| | |
| | Two ways to run: |
| | - Tutorial: uvx marimo edit --sandbox train-image-classifier.py |
| | - Script: uv run train-image-classifier.py --dataset beans --output-repo user/my-model |
| | |
| | On HF Jobs (GPU): |
| | hf jobs uv run --flavor l4x1 --secrets HF_TOKEN \ |
| | https://huggingface.co/datasets/uv-scripts/marimo/raw/main/train-image-classifier.py \ |
| | -- --dataset beans --output-repo user/beans-vit --epochs 5 |
| | """ |
| |
|
| | import marimo |
| |
|
| | __generated_with = "0.19.6" |
| | app = marimo.App(width="medium") |
| |
|
| |
|
| | @app.cell |
| | def _(): |
| | import marimo as mo |
| | return (mo,) |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | mo.md(""" |
| | # Train an Image Classifier |
| | |
| | This notebook fine-tunes a Vision Transformer (ViT) for image classification. |
| | |
| | **Two ways to run:** |
| | - **Tutorial**: `uvx marimo edit --sandbox train-image-classifier.py` |
| | - **Script**: `uv run train-image-classifier.py --dataset beans --output-repo user/my-model` |
| | |
| | The same code powers both experiences! |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | mo.md(""" |
| | ## Running on HF Jobs (GPU) |
| | |
| | This notebook can run on [Hugging Face Jobs](https://huggingface.co/docs/hub/jobs) for GPU training. |
| | No local GPU needed - just run: |
| | |
| | ```bash |
| | hf jobs uv run --flavor l4x1 --secrets HF_TOKEN \\ |
| | https://huggingface.co/datasets/uv-scripts/marimo/raw/main/train-image-classifier.py \\ |
| | -- --dataset beans --output-repo your-username/beans-vit --epochs 5 --push-to-hub |
| | ``` |
| | |
| | **GPU Flavors:** |
| | | Flavor | GPU | VRAM | Best for | |
| | |--------|-----|------|----------| |
| | | `l4x1` | L4 | 24GB | Most fine-tuning tasks | |
| | | `a10gx1` | A10G | 24GB | Slightly faster than L4 | |
| | | `a100x1` | A100 | 40GB | Large models, big batches | |
| | |
| | **Key flags:** |
| | - `--secrets HF_TOKEN` - Passes your HF token for pushing models |
| | - `--` - Separates `hf jobs` args from script args |
| | - `--push-to-hub` - Actually pushes the model (otherwise just saves locally) |
| | |
| | **Tip:** Start with `beans` dataset and 1-3 epochs to test, then scale up! |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | mo.md(""" |
| | ## Step 1: Configuration |
| | |
| | Set up training parameters. In interactive mode, use the controls below. |
| | In script mode, pass command-line arguments. |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | import argparse |
| |
|
| | |
| | parser = argparse.ArgumentParser(description="Fine-tune ViT for image classification") |
| | parser.add_argument( |
| | "--dataset", |
| | default="beans", |
| | help="HF dataset name (must be image classification dataset)", |
| | ) |
| | parser.add_argument( |
| | "--model", |
| | default="google/vit-base-patch16-224-in21k", |
| | help="Pretrained model to fine-tune", |
| | ) |
| | parser.add_argument( |
| | "--output-repo", |
| | default=None, |
| | help="Where to push trained model (e.g., user/my-model)", |
| | ) |
| | parser.add_argument("--epochs", type=int, default=3, help="Number of training epochs") |
| | parser.add_argument("--batch-size", type=int, default=16, help="Batch size") |
| | parser.add_argument("--lr", type=float, default=5e-5, help="Learning rate") |
| | parser.add_argument( |
| | "--push-to-hub", |
| | action="store_true", |
| | default=False, |
| | help="Push model to Hub after training", |
| | ) |
| | args, _ = parser.parse_known_args() |
| |
|
| | |
| | dataset_input = mo.ui.text(value=args.dataset, label="Dataset") |
| | model_input = mo.ui.text(value=args.model, label="Model") |
| | output_input = mo.ui.text(value=args.output_repo or "", label="Output Repo") |
| | epochs_input = mo.ui.slider(1, 20, value=args.epochs, label="Epochs") |
| | batch_size_input = mo.ui.dropdown( |
| | options=["8", "16", "32", "64"], value=str(args.batch_size), label="Batch Size" |
| | ) |
| | lr_input = mo.ui.dropdown( |
| | options=["1e-5", "2e-5", "5e-5", "1e-4"], |
| | value=f"{args.lr:.0e}".replace("e-0", "e-"), |
| | label="Learning Rate", |
| | ) |
| |
|
| | mo.vstack( |
| | [ |
| | mo.hstack([dataset_input, model_input]), |
| | mo.hstack([output_input]), |
| | mo.hstack([epochs_input, batch_size_input, lr_input]), |
| | ] |
| | ) |
| | return ( |
| | args, |
| | batch_size_input, |
| | dataset_input, |
| | epochs_input, |
| | lr_input, |
| | model_input, |
| | output_input, |
| | ) |
| |
|
| |
|
| | @app.cell |
| | def _( |
| | args, |
| | batch_size_input, |
| | dataset_input, |
| | epochs_input, |
| | lr_input, |
| | model_input, |
| | output_input, |
| | ): |
| | |
| | dataset_name = dataset_input.value or args.dataset |
| | model_name = model_input.value or args.model |
| | output_repo = output_input.value or args.output_repo |
| | num_epochs = epochs_input.value or args.epochs |
| | batch_size = int(batch_size_input.value) if batch_size_input.value else args.batch_size |
| | learning_rate = float(lr_input.value) if lr_input.value else args.lr |
| |
|
| | print("Configuration:") |
| | print(f" Dataset: {dataset_name}") |
| | print(f" Model: {model_name}") |
| | print(f" Output: {output_repo or '(not pushing to Hub)'}") |
| | print(f" Epochs: {num_epochs}, Batch Size: {batch_size}, LR: {learning_rate}") |
| | return ( |
| | batch_size, |
| | dataset_name, |
| | learning_rate, |
| | model_name, |
| | num_epochs, |
| | output_repo, |
| | ) |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | mo.md(""" |
| | ## Step 2: Load Dataset |
| | |
| | We'll load an image classification dataset from the Hub. |
| | The `beans` dataset is small (~1000 images) and trains quickly - perfect for learning! |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(dataset_name, mo): |
| | from datasets import load_dataset |
| |
|
| | print(f"Loading dataset: {dataset_name}...") |
| | dataset = load_dataset(dataset_name) |
| | print(f"Train: {len(dataset['train']):,} samples") |
| | print(f"Test: {len(dataset['test']):,} samples") |
| |
|
| | |
| | _features = dataset["train"].features |
| | label_column = "label" if "label" in _features else "labels" |
| | label_feature = _features[label_column] |
| | labels = label_feature.names if hasattr(label_feature, "names") else None |
| | num_labels = label_feature.num_classes if hasattr(label_feature, "num_classes") else len(set(dataset["train"][label_column])) |
| |
|
| | print(f"Label column: '{label_column}'") |
| | print(f"Labels ({num_labels}): {labels}") |
| |
|
| | mo.md(f"**Loaded {len(dataset['train']):,} training samples with {num_labels} classes**") |
| | return dataset, label_column, labels, num_labels |
| |
|
| |
|
| | @app.cell |
| | def _(dataset, label_column, labels, mo): |
| | |
| | import base64 as _base64 |
| | from io import BytesIO as _BytesIO |
| |
|
| | def _image_to_base64(img, max_size=150): |
| | """Convert PIL image to base64 for HTML display.""" |
| | _img_copy = img.copy() |
| | _img_copy.thumbnail((max_size, max_size)) |
| | _buffered = _BytesIO() |
| | _img_copy.save(_buffered, format="PNG") |
| | return _base64.b64encode(_buffered.getvalue()).decode() |
| |
|
| | |
| | _samples = dataset["train"].shuffle(seed=42).select(range(6)) |
| |
|
| | _images_html = [] |
| | for _sample in _samples: |
| | _img_b64 = _image_to_base64(_sample["image"]) |
| | _label_name = labels[_sample[label_column]] if labels else _sample[label_column] |
| | _images_html.append( |
| | f""" |
| | <div style="text-align: center; margin: 5px;"> |
| | <img src="data:image/png;base64,{_img_b64}" style="border-radius: 8px;"/> |
| | <br/><small>{_label_name}</small> |
| | </div> |
| | """ |
| | ) |
| |
|
| | mo.md(f""" |
| | ### Sample Images |
| | <div style="display: flex; flex-wrap: wrap; gap: 10px;"> |
| | {"".join(_images_html)} |
| | </div> |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | mo.md(""" |
| | ## Step 3: Prepare Model and Processor |
| | |
| | We load a pretrained Vision Transformer and its image processor. |
| | The processor handles resizing and normalization to match the model's training. |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(labels, model_name, num_labels): |
| | from transformers import AutoImageProcessor, AutoModelForImageClassification |
| |
|
| | print(f"Loading model: {model_name}...") |
| |
|
| | |
| | image_processor = AutoImageProcessor.from_pretrained(model_name) |
| | print(f"Image size: {image_processor.size}") |
| |
|
| | |
| | label2id = {label: i for i, label in enumerate(labels)} if labels else None |
| | id2label = {i: label for i, label in enumerate(labels)} if labels else None |
| |
|
| | model = AutoModelForImageClassification.from_pretrained( |
| | model_name, |
| | num_labels=num_labels, |
| | label2id=label2id, |
| | id2label=id2label, |
| | ignore_mismatched_sizes=True, |
| | ) |
| | print(f"Model loaded with {num_labels} output classes") |
| | return id2label, image_processor, model |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | mo.md(""" |
| | ## Step 4: Preprocess Data |
| | |
| | Apply the image processor to convert images into tensors suitable for the model. |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(dataset, image_processor, label_column): |
| | def preprocess(examples): |
| | """Apply image processor to batch of images.""" |
| | images = [img.convert("RGB") for img in examples["image"]] |
| | inputs = image_processor(images, return_tensors="pt") |
| | inputs["labels"] = examples[label_column] |
| | return inputs |
| |
|
| | print("Preprocessing dataset...") |
| | processed_dataset = dataset.with_transform(preprocess) |
| | print("Preprocessing complete (transforms applied lazily)") |
| | return (processed_dataset,) |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | mo.md(""" |
| | ## Step 5: Training |
| | |
| | We use the Hugging Face Trainer for a clean training loop with built-in logging. |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _( |
| | batch_size, |
| | learning_rate, |
| | model, |
| | num_epochs, |
| | output_repo, |
| | processed_dataset, |
| | ): |
| | import evaluate |
| | import numpy as np |
| | from transformers import Trainer, TrainingArguments |
| |
|
| | |
| | accuracy_metric = evaluate.load("accuracy") |
| |
|
| | def compute_metrics(eval_pred): |
| | predictions, labels = eval_pred |
| | predictions = np.argmax(predictions, axis=1) |
| | return accuracy_metric.compute(predictions=predictions, references=labels) |
| |
|
| | |
| | training_args = TrainingArguments( |
| | output_dir="./image-classifier-output", |
| | num_train_epochs=num_epochs, |
| | per_device_train_batch_size=batch_size, |
| | per_device_eval_batch_size=batch_size, |
| | learning_rate=learning_rate, |
| | eval_strategy="epoch", |
| | save_strategy="epoch", |
| | logging_steps=10, |
| | load_best_model_at_end=True, |
| | metric_for_best_model="accuracy", |
| | push_to_hub=bool(output_repo), |
| | hub_model_id=output_repo if output_repo else None, |
| | remove_unused_columns=False, |
| | report_to="none", |
| | ) |
| |
|
| | |
| | trainer = Trainer( |
| | model=model, |
| | args=training_args, |
| | train_dataset=processed_dataset["train"], |
| | eval_dataset=processed_dataset["test"], |
| | compute_metrics=compute_metrics, |
| | ) |
| |
|
| | print(f"Starting training for {num_epochs} epochs...") |
| | return (trainer,) |
| |
|
| |
|
| | @app.cell |
| | def _(trainer): |
| | |
| | train_result = trainer.train() |
| | print("\nTraining complete!") |
| | print(f" Total steps: {train_result.global_step}") |
| | print(f" Training loss: {train_result.training_loss:.4f}") |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | mo.md(""" |
| | ## Step 6: Evaluation |
| | |
| | Let's see how well our model performs on the test set. |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(trainer): |
| | |
| | eval_results = trainer.evaluate() |
| | print("\nEvaluation Results:") |
| | print(f" Accuracy: {eval_results['eval_accuracy']:.2%}") |
| | print(f" Loss: {eval_results['eval_loss']:.4f}") |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(dataset, id2label, image_processor, label_column, mo, model): |
| | import torch |
| | import base64 as _b64 |
| | from io import BytesIO as _BIO |
| |
|
| | |
| | model.eval() |
| | _test_samples = dataset["test"].shuffle(seed=42).select(range(4)) |
| |
|
| | _prediction_html = [] |
| | for _sample in _test_samples: |
| | _img = _sample["image"].convert("RGB") |
| | _inputs = image_processor(_img, return_tensors="pt") |
| |
|
| | with torch.no_grad(): |
| | _outputs = model(**_inputs) |
| | _pred_idx = _outputs.logits.argmax(-1).item() |
| |
|
| | _true_idx = _sample[label_column] |
| | _true_label = id2label[_true_idx] if id2label else _true_idx |
| | _pred_label = id2label[_pred_idx] if id2label else _pred_idx |
| | _correct = "correct" if _pred_idx == _true_idx else "wrong" |
| |
|
| | |
| | _img_copy = _img.copy() |
| | _img_copy.thumbnail((120, 120)) |
| | _buffered = _BIO() |
| | _img_copy.save(_buffered, format="PNG") |
| | _img_b64 = _b64.b64encode(_buffered.getvalue()).decode() |
| |
|
| | _border_color = "#4ade80" if _correct == "correct" else "#f87171" |
| | _prediction_html.append( |
| | f""" |
| | <div style="text-align: center; margin: 5px; padding: 10px; border: 2px solid {_border_color}; border-radius: 8px;"> |
| | <img src="data:image/png;base64,{_img_b64}" style="border-radius: 4px;"/> |
| | <br/><small>True: <b>{_true_label}</b></small> |
| | <br/><small>Pred: <b>{_pred_label}</b></small> |
| | </div> |
| | """ |
| | ) |
| |
|
| | mo.md(f""" |
| | ### Sample Predictions |
| | <div style="display: flex; flex-wrap: wrap; gap: 10px;"> |
| | {"".join(_prediction_html)} |
| | </div> |
| | <small>Green border = correct, Red border = wrong</small> |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | mo.md(""" |
| | ## Step 7: Push to Hub |
| | |
| | If you specified `--output-repo`, the model will be pushed to the Hugging Face Hub. |
| | """) |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(args, output_repo, trainer): |
| | if output_repo and args.push_to_hub: |
| | print(f"Pushing model to: https://huggingface.co/{output_repo}") |
| | trainer.push_to_hub() |
| | print("Model pushed successfully!") |
| | elif output_repo: |
| | print("Model saved locally. To push to Hub, add --push-to-hub flag.") |
| | print(" Or run: trainer.push_to_hub()") |
| | else: |
| | print("No output repo specified. Model saved locally to ./image-classifier-output") |
| | print("To push to Hub, run with: --output-repo your-username/model-name --push-to-hub") |
| | return |
| |
|
| |
|
| | @app.cell |
| | def _(mo): |
| | mo.md(""" |
| | ## Next Steps |
| | |
| | ### Try different datasets |
| | - `food101` - 101 food categories (75k train images) |
| | - `cifar10` - 10 classes of objects (50k train images) |
| | - `oxford_flowers102` - 102 flower species |
| | - `fashion_mnist` - Clothing items (grayscale) |
| | |
| | ### Try different models |
| | - `microsoft/resnet-50` - Classic CNN architecture |
| | - `facebook/deit-base-patch16-224` - Data-efficient ViT |
| | - `google/vit-large-patch16-224` - Larger ViT (needs more VRAM) |
| | |
| | ### Scale up with HF Jobs |
| | |
| | ```bash |
| | # Train on food101 with more epochs |
| | hf jobs uv run --flavor l4x1 --secrets HF_TOKEN \\ |
| | https://huggingface.co/datasets/uv-scripts/marimo/raw/main/train-image-classifier.py \\ |
| | -- --dataset food101 --epochs 10 --batch-size 32 \\ |
| | --output-repo your-username/food101-vit --push-to-hub |
| | ``` |
| | |
| | **More UV scripts**: [huggingface.co/uv-scripts](https://huggingface.co/uv-scripts) |
| | """) |
| | return |
| |
|
| |
|
| | if __name__ == "__main__": |
| | app.run() |
| |
|