-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllama-install.bat
More file actions
36 lines (31 loc) · 1.27 KB
/
llama-install.bat
File metadata and controls
36 lines (31 loc) · 1.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
cd /D %~dp0
mkdir llama
cd llama
curl -o llama.zip -L https://github.com/ggml-org/llama.cpp/releases/download/b7964/llama-b7964-bin-win-cuda-13.1-x64.zip
curl -o llama-dll.zip -L https://github.com/ggml-org/llama.cpp/releases/download/b7964/cudart-llama-bin-win-cuda-13.1-x64.zip
tar -xf llama.zip
tar -xf llama-dll.zip
del llama.zip
del llama-dll.zip
mklink /D transformers ..\ComfyUI\transformers
mklink /D models ..\ComfyUI\models\llm
uv venv -p 3.11
call .venv\Scripts\activate
uv pip install open-webui
hf download --local-dir transformers\cl-nagoya--ruri-v3-310m cl-nagoya/ruri-v3-310m
hf download --local-dir transformers\cl-nagoya--ruri-v3-reranker-310m cl-nagoya/ruri-v3-reranker-310m
(
echo start http://127.0.0.1:8080
echo start llama-server -c 8192 --port 8081 --no-webui --models-max 1 --models-dir models
echo set WEBUI_ADMIN_EMAIL=root@example.com
echo set WEBUI_ADMIN_PASSWORD=root
echo set ENABLE_OLLAMA_API=false
echo set OPENAI_API_BASE_URL=http://127.0.0.1:8081/v1
echo set RAG_EMBEDDING_MODEL=transformers\cl-nagoya--ruri-v3-310m
echo set RAG_TOP_K=12
echo set RAG_TOP_K_RERANKER=6
echo set ENABLE_RAG_HYBRID_SEARCH=true
echo set RAG_RERANKING_MODEL=transformers\cl-nagoya--ruri-v3-reranker-310m
echo call .venv\Scripts\activate
echo open-webui serve --host 127.0.0.1
)> run.bat