yuto0o commited on
Commit
b4ef713
·
1 Parent(s): 2c7f82d

initial commit

Browse files
.dockerignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+
2
+ .git
3
+ .venv
4
+ __pycache__
5
+ *.pyc
6
+ .env
.gitattributes copy ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
11
+
12
+ db.sqlite3
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.12
Dockerfile ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Astral 公式 uv + Python 3.12 + bookworm-slim
2
+ FROM ghcr.io/astral-sh/uv:0.9.2-python3.12-bookworm-slim
3
+
4
+ WORKDIR /code
5
+
6
+ # user の作成(uid=1000)を先に行います
7
+ RUN useradd -m -u 1000 user
8
+
9
+ # uv で出力した requirements.txt をコピー
10
+ COPY requirements.txt .
11
+
12
+ # 依存関係のインストール
13
+ RUN uv pip install --system --no-cache -r requirements.txt
14
+
15
+ # --- 【修正ポイント】 ---
16
+ # 1. キャッシュディレクトリの環境変数を新しい推奨値 'HF_HOME' に変更
17
+ ENV HF_HOME=/code/cache
18
+
19
+ # 2. ディレクトリ作成時に所有権(chown)を user に変更する
20
+ # これで user が書き込めるようになります
21
+ RUN mkdir -p /code/cache && chown -R user:user /code/cache
22
+
23
+ # --------------------
24
+
25
+ USER user
26
+
27
+ ENV HOME=/home/user \
28
+ PATH=/home/user/.local/bin:$PATH
29
+
30
+ # プロジェクトコードを user 権限でコピー
31
+ COPY --chown=user . /code
32
+
33
+ CMD ["sh", "-c", "python manage.py migrate && python manage.py runserver 0.0.0.0:7860"]
README copy.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Django Ai Chat
3
+ emoji: 🌍
4
+ colorFrom: blue
5
+ colorTo: pink
6
+ sdk: docker
7
+ pinned: false
8
+ license: apache-2.0
9
+ ---
10
+
11
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
README_2.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## ローカルで作業時
2
+
3
+ ```
4
+ uv sync
5
+ ```
6
+
7
+ ## requirement.txt 更新するときは
8
+
9
+ ```
10
+ uv pip compile pyproject.toml -o requirements.txt
11
+ ```
12
+
13
+ を使うこと
14
+
15
+ ## build 実行
16
+
17
+ ```
18
+ docker build -t my-django-bot .
19
+ docker run -p 7860:7860 my-django-bot
20
+
21
+ ```
22
+
23
+ ## view とか書き換えただけの時?毎回ダウンロードいらないようです
24
+
25
+ ```
26
+ docker run -p 7860:7860 \
27
+ -v ~/.cache/huggingface:/root/.cache/huggingface \
28
+ my-django-bot
29
+ ```
30
+
31
+ ## 容量解放
32
+
33
+ ```
34
+
35
+ # 特定のモデルだけ消すのが面倒な場合、hubフォルダごと消しても、
36
+ # 次回使う時に再ダウンロードされるだけなので安全です。
37
+ rm -rf ~/.cache/huggingface/hub/
38
+
39
+
40
+ # 使っていないイメージやコンテナを一括削除
41
+ docker system prune -a
42
+
43
+ # uv cache clean
44
+ uv cache clean
45
+ ```
46
+
47
+ ## huggingface に push するとき
48
+
49
+ ```
50
+ uv run hf auth login
51
+ # あとはtokenとかを貼る?
52
+
53
+ git config --global credential.helper store
54
+ #トークン確認
55
+ cat ~/.cache/huggingface/token
56
+
57
+ # なんかpushできないとき
58
+ git push https://yuto0o:YOUR_HF_TOKEN@huggingface.co/spaces/yuto0o/django-ai-chat main
59
+
60
+ ```
config/__init__.py ADDED
File without changes
config/asgi.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ASGI config for config project.
3
+
4
+ It exposes the ASGI callable as a module-level variable named ``application``.
5
+
6
+ For more information on this file, see
7
+ https://docs.djangoproject.com/en/6.0/howto/deployment/asgi/
8
+ """
9
+
10
+ import os
11
+
12
+ from django.core.asgi import get_asgi_application
13
+
14
+ os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
15
+
16
+ application = get_asgi_application()
config/settings.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Django settings for config project.
3
+
4
+ Generated by 'django-admin startproject' using Django 6.0.
5
+
6
+ For more information on this file, see
7
+ https://docs.djangoproject.com/en/6.0/topics/settings/
8
+
9
+ For the full list of settings and their values, see
10
+ https://docs.djangoproject.com/en/6.0/ref/settings/
11
+ """
12
+
13
+ from pathlib import Path
14
+
15
+ # Build paths inside the project like this: BASE_DIR / 'subdir'.
16
+ BASE_DIR = Path(__file__).resolve().parent.parent
17
+
18
+
19
+ # Quick-start development settings - unsuitable for production
20
+ # See https://docs.djangoproject.com/en/6.0/howto/deployment/checklist/
21
+
22
+ # SECURITY WARNING: keep the secret key used in production secret!
23
+ SECRET_KEY = "django-insecure-t1!a(5z1#&355c2nl%w$nbiavye67l9f(4t$64uxg4g45y-xtl"
24
+
25
+ # SECURITY WARNING: don't run with debug turned on in production!
26
+ DEBUG = True
27
+
28
+ ALLOWED_HOSTS = ["huggingface.co", ".hf.space", "localhost", "127.0.0.1"]
29
+
30
+
31
+ # Application definition
32
+
33
+ INSTALLED_APPS = [
34
+ "django.contrib.admin",
35
+ "django.contrib.auth",
36
+ "django.contrib.contenttypes",
37
+ "django.contrib.sessions",
38
+ "django.contrib.messages",
39
+ "django.contrib.staticfiles",
40
+ "rest_framework",
41
+ "ml_api",
42
+ ]
43
+
44
+ MIDDLEWARE = [
45
+ "django.middleware.security.SecurityMiddleware",
46
+ "django.contrib.sessions.middleware.SessionMiddleware",
47
+ "django.middleware.common.CommonMiddleware",
48
+ "django.middleware.csrf.CsrfViewMiddleware",
49
+ "django.contrib.auth.middleware.AuthenticationMiddleware",
50
+ "django.contrib.messages.middleware.MessageMiddleware",
51
+ "django.middleware.clickjacking.XFrameOptionsMiddleware",
52
+ ]
53
+
54
+ ROOT_URLCONF = "config.urls"
55
+
56
+ TEMPLATES = [
57
+ {
58
+ "BACKEND": "django.template.backends.django.DjangoTemplates",
59
+ "DIRS": [],
60
+ "APP_DIRS": True,
61
+ "OPTIONS": {
62
+ "context_processors": [
63
+ "django.template.context_processors.request",
64
+ "django.contrib.auth.context_processors.auth",
65
+ "django.contrib.messages.context_processors.messages",
66
+ ],
67
+ },
68
+ },
69
+ ]
70
+
71
+ WSGI_APPLICATION = "config.wsgi.application"
72
+
73
+
74
+ # Database
75
+ # https://docs.djangoproject.com/en/6.0/ref/settings/#databases
76
+
77
+ DATABASES = {
78
+ "default": {
79
+ "ENGINE": "django.db.backends.sqlite3",
80
+ "NAME": BASE_DIR / "db.sqlite3",
81
+ }
82
+ }
83
+
84
+
85
+ # Password validation
86
+ # https://docs.djangoproject.com/en/6.0/ref/settings/#auth-password-validators
87
+
88
+ AUTH_PASSWORD_VALIDATORS = [
89
+ {
90
+ "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
91
+ },
92
+ {
93
+ "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
94
+ },
95
+ {
96
+ "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
97
+ },
98
+ {
99
+ "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
100
+ },
101
+ ]
102
+
103
+
104
+ # Internationalization
105
+ # https://docs.djangoproject.com/en/6.0/topics/i18n/
106
+
107
+ LANGUAGE_CODE = "en-us"
108
+
109
+ TIME_ZONE = "UTC"
110
+
111
+ USE_I18N = True
112
+
113
+ USE_TZ = True
114
+
115
+
116
+ # Static files (CSS, JavaScript, Images)
117
+ # https://docs.djangoproject.com/en/6.0/howto/static-files/
118
+
119
+ STATIC_URL = "static/"
config/urls.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ URL configuration for config project.
3
+
4
+ The `urlpatterns` list routes URLs to views. For more information please see:
5
+ https://docs.djangoproject.com/en/6.0/topics/http/urls/
6
+ Examples:
7
+ Function views
8
+ 1. Add an import: from my_app import views
9
+ 2. Add a URL to urlpatterns: path('', views.home, name='home')
10
+ Class-based views
11
+ 1. Add an import: from other_app.views import Home
12
+ 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
13
+ Including another URLconf
14
+ 1. Import the include() function: from django.urls import include, path
15
+ 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
16
+ """
17
+
18
+ from django.contrib import admin
19
+ from django.urls import include, path
20
+
21
+ urlpatterns = [path("admin/", admin.site.urls), path("api/", include("ml_api.urls"))]
config/wsgi.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ WSGI config for config project.
3
+
4
+ It exposes the WSGI callable as a module-level variable named ``application``.
5
+
6
+ For more information on this file, see
7
+ https://docs.djangoproject.com/en/6.0/howto/deployment/wsgi/
8
+ """
9
+
10
+ import os
11
+
12
+ from django.core.wsgi import get_wsgi_application
13
+
14
+ os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
15
+
16
+ application = get_wsgi_application()
main.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ def main():
2
+ print("Hello from django-ai-chat!")
3
+
4
+
5
+ if __name__ == "__main__":
6
+ main()
manage.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """Django's command-line utility for administrative tasks."""
3
+
4
+ import os
5
+ import sys
6
+
7
+
8
+ def main():
9
+ """Run administrative tasks."""
10
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
11
+ try:
12
+ from django.core.management import execute_from_command_line
13
+ except ImportError as exc:
14
+ raise ImportError(
15
+ "Couldn't import Django. Are you sure it's installed and "
16
+ "available on your PYTHONPATH environment variable? Did you "
17
+ "forget to activate a virtual environment?"
18
+ ) from exc
19
+ execute_from_command_line(sys.argv)
20
+
21
+
22
+ if __name__ == "__main__":
23
+ main()
ml_api/__init__.py ADDED
File without changes
ml_api/admin.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from django.contrib import admin
2
+
3
+ # Register your models here.
ml_api/apps.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # class MlApiConfig(AppConfig):
2
+ # default_auto_field = "django.db.models.BigAutoField"
3
+ # name = "ml_api"
4
+ # tokenizer = None
5
+ # model = None
6
+ # def ready(self):
7
+ # if MlApiConfig.model is None:
8
+ # model_name = "rinna/japanese-gpt-neox-small"
9
+ # print("Loading Model... (Download starts on first run)")
10
+ # # トークナイザーとモデルのロード
11
+ # MlApiConfig.tokenizer = AutoTokenizer.from_pretrained(
12
+ # model_name, use_fast=False
13
+ # )
14
+ # MlApiConfig.model = AutoModelForCausalLM.from_pretrained(model_name)
15
+ # print("Model Loaded!")
16
+ import torch
17
+ from django.apps import AppConfig
18
+ from transformers import AutoModelForCausalLM, AutoTokenizer
19
+
20
+
21
+ class MlApiConfig(AppConfig):
22
+ default_auto_field = "django.db.models.BigAutoField"
23
+ name = "ml_api"
24
+
25
+ tokenizer = None
26
+ model = None
27
+
28
+ def ready(self):
29
+ if MlApiConfig.model is None:
30
+ # 性能が高い Qwen2.5-3B-Instruct を指定
31
+ model_name = "Qwen/Qwen2.5-3B-Instruct"
32
+
33
+ print(f"Loading {model_name}... This will take time and RAM.")
34
+
35
+ # トークナイザーのロード
36
+ MlApiConfig.tokenizer = AutoTokenizer.from_pretrained(model_name)
37
+
38
+ # モデルのロード
39
+ # torch_dtype=torch.float32 はCPU実行時の安定性と速度のために指定
40
+ # device_map="cpu" を明示
41
+ MlApiConfig.model = AutoModelForCausalLM.from_pretrained(
42
+ model_name,
43
+ torch_dtype=torch.float32,
44
+ device_map="cpu",
45
+ trust_remote_code=True,
46
+ )
47
+
48
+ print("Strong Model Loaded!")
ml_api/migrations/__init__.py ADDED
File without changes
ml_api/models.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from django.db import models
2
+
3
+ # Create your models here.
ml_api/tests.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from django.test import TestCase
2
+
3
+ # Create your tests here.
ml_api/urls.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from django.urls import path
2
+
3
+ from .views import ChatView
4
+
5
+ urlpatterns = [
6
+ # エンドポイント名を 'chat' とします
7
+ # アクセスURLの末尾になります
8
+ path("chat/", ChatView.as_view(), name="chat"),
9
+ ]
ml_api/views.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from django.apps import apps
3
+ from rest_framework.response import Response
4
+ from rest_framework.views import APIView
5
+
6
+
7
+ class ChatView(APIView):
8
+ def post(self, request):
9
+ # ユーザーからのメッセージを取得
10
+ user_input = request.data.get("text", "")
11
+
12
+ app_config = apps.get_app_config("ml_api")
13
+ tokenizer = app_config.tokenizer
14
+ model = app_config.model
15
+
16
+ # 1. 会話フォーマットの作成
17
+ # system: AIのキャラ設定
18
+ messages = [
19
+ {
20
+ "role": "system",
21
+ "content": "あなたは親切でフレンドリーなAIアシスタントです。自然な日本語で簡潔に返事をしてください。",
22
+ },
23
+ {"role": "user", "content": user_input},
24
+ ]
25
+
26
+ # 2. プロンプトへの変換(チャットテンプレート適用)
27
+ text = tokenizer.apply_chat_template(
28
+ messages, tokenize=False, add_generation_prompt=True
29
+ )
30
+
31
+ inputs = tokenizer([text], return_tensors="pt").to(model.device)
32
+
33
+ # 3. 生成(待ち時間を考慮して短めに設定)
34
+ with torch.no_grad():
35
+ generated_ids = model.generate(
36
+ **inputs,
37
+ max_new_tokens=128, # 長すぎるとLINEタイムアウトするので短めに
38
+ do_sample=True, # ランダム性を持たせる
39
+ temperature=0.7,
40
+ top_p=0.9,
41
+ )
42
+
43
+ # 4. 入力部分を取り除いて、AIの返事だけ抽出
44
+ generated_ids = [
45
+ output_ids[len(input_ids) :]
46
+ for input_ids, output_ids in zip(inputs.input_ids, generated_ids)
47
+ ]
48
+ response_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[
49
+ 0
50
+ ]
51
+
52
+ return Response({"result": response_text})
53
+
54
+
55
+ # class ChatView(APIView):
56
+ # def post(self, request):
57
+ # input_text = request.data.get("text", "")
58
+
59
+ # # 簡易的なプロンプトエンジニアリング
60
+ # # モデルに「会話」であることを認識させるフォーマット
61
+ # prompt = f"ユーザー: {input_text}\nシステム: "
62
+
63
+ # app_config = apps.get_app_config("ml_api")
64
+ # tokenizer = app_config.tokenizer
65
+ # model = app_config.model
66
+
67
+ # # トークン化
68
+ # inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False)
69
+
70
+ # # 生成
71
+ # with torch.no_grad():
72
+ # output_ids = model.generate(
73
+ # inputs["input_ids"],
74
+ # max_new_tokens=50, # 返信の長さ
75
+ # do_sample=True,
76
+ # temperature=0.7, # 創造性(高いほどランダム)
77
+ # pad_token_id=tokenizer.pad_token_id,
78
+ # eos_token_id=tokenizer.eos_token_id,
79
+ # )
80
+
81
+ # # デコード
82
+ # output = tokenizer.decode(output_ids.tolist()[0])
83
+
84
+ # # プロンプト部分を除去して返信部分だけ抽出
85
+ # response_text = output.split("システム: ")[-1].strip()
86
+
87
+ # return Response({"result": response_text})
pyproject.toml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "django-ai-chat"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "accelerate>=1.12.0",
9
+ "django>=6.0",
10
+ "djangorestframework>=3.16.1",
11
+ "protobuf>=6.33.2",
12
+ "sentencepiece>=0.2.1",
13
+ "torch>=2.9.1",
14
+ "transformers>=4.57.3",
15
+ ]
requirements.txt ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ accelerate==1.12.0
4
+ # via django-ai-chat (pyproject.toml)
5
+ asgiref==3.11.0
6
+ # via django
7
+ certifi==2025.11.12
8
+ # via requests
9
+ charset-normalizer==3.4.4
10
+ # via requests
11
+ django==6.0
12
+ # via
13
+ # django-ai-chat (pyproject.toml)
14
+ # djangorestframework
15
+ djangorestframework==3.16.1
16
+ # via django-ai-chat (pyproject.toml)
17
+ filelock==3.20.0
18
+ # via
19
+ # huggingface-hub
20
+ # torch
21
+ # transformers
22
+ fsspec==2025.12.0
23
+ # via
24
+ # huggingface-hub
25
+ # torch
26
+ hf-xet==1.2.0
27
+ # via huggingface-hub
28
+ huggingface-hub==0.36.0
29
+ # via
30
+ # accelerate
31
+ # tokenizers
32
+ # transformers
33
+ idna==3.11
34
+ # via requests
35
+ jinja2==3.1.6
36
+ # via torch
37
+ markupsafe==3.0.3
38
+ # via jinja2
39
+ mpmath==1.3.0
40
+ # via sympy
41
+ networkx==3.6
42
+ # via torch
43
+ numpy==2.3.5
44
+ # via
45
+ # accelerate
46
+ # transformers
47
+ nvidia-cublas-cu12==12.8.4.1
48
+ # via
49
+ # nvidia-cudnn-cu12
50
+ # nvidia-cusolver-cu12
51
+ # torch
52
+ nvidia-cuda-cupti-cu12==12.8.90
53
+ # via torch
54
+ nvidia-cuda-nvrtc-cu12==12.8.93
55
+ # via torch
56
+ nvidia-cuda-runtime-cu12==12.8.90
57
+ # via torch
58
+ nvidia-cudnn-cu12==9.10.2.21
59
+ # via torch
60
+ nvidia-cufft-cu12==11.3.3.83
61
+ # via torch
62
+ nvidia-cufile-cu12==1.13.1.3
63
+ # via torch
64
+ nvidia-curand-cu12==10.3.9.90
65
+ # via torch
66
+ nvidia-cusolver-cu12==11.7.3.90
67
+ # via torch
68
+ nvidia-cusparse-cu12==12.5.8.93
69
+ # via
70
+ # nvidia-cusolver-cu12
71
+ # torch
72
+ nvidia-cusparselt-cu12==0.7.1
73
+ # via torch
74
+ nvidia-nccl-cu12==2.27.5
75
+ # via torch
76
+ nvidia-nvjitlink-cu12==12.8.93
77
+ # via
78
+ # nvidia-cufft-cu12
79
+ # nvidia-cusolver-cu12
80
+ # nvidia-cusparse-cu12
81
+ # torch
82
+ nvidia-nvshmem-cu12==3.3.20
83
+ # via torch
84
+ nvidia-nvtx-cu12==12.8.90
85
+ # via torch
86
+ packaging==25.0
87
+ # via
88
+ # accelerate
89
+ # huggingface-hub
90
+ # transformers
91
+ protobuf==6.33.2
92
+ # via django-ai-chat (pyproject.toml)
93
+ psutil==7.1.3
94
+ # via accelerate
95
+ pyyaml==6.0.3
96
+ # via
97
+ # accelerate
98
+ # huggingface-hub
99
+ # transformers
100
+ regex==2025.11.3
101
+ # via transformers
102
+ requests==2.32.5
103
+ # via
104
+ # huggingface-hub
105
+ # transformers
106
+ safetensors==0.7.0
107
+ # via
108
+ # accelerate
109
+ # transformers
110
+ sentencepiece==0.2.1
111
+ # via django-ai-chat (pyproject.toml)
112
+ setuptools==80.9.0
113
+ # via torch
114
+ sqlparse==0.5.4
115
+ # via django
116
+ sympy==1.14.0
117
+ # via torch
118
+ tokenizers==0.22.1
119
+ # via transformers
120
+ torch==2.9.1
121
+ # via
122
+ # django-ai-chat (pyproject.toml)
123
+ # accelerate
124
+ tqdm==4.67.1
125
+ # via
126
+ # huggingface-hub
127
+ # transformers
128
+ transformers==4.57.3
129
+ # via django-ai-chat (pyproject.toml)
130
+ triton==3.5.1
131
+ # via torch
132
+ typing-extensions==4.15.0
133
+ # via
134
+ # huggingface-hub
135
+ # torch
136
+ urllib3==2.6.0
137
+ # via requests
uv.lock ADDED
The diff for this file is too large to render. See raw diff