手动安装Open-webui
1利用docker编译前端:
# syntax=docker/dockerfile:1
# Initialize device type args
# use build args in the docker build command with --build-arg="BUILDARG=true"
ARG USE_CUDA=false
ARG USE_OLLAMA=false
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
ARG USE_CUDA_VER=cu121
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
# Leaderboard: https://huggingface.co/spaces/mteb/leaderboard
# for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB)
# IMPORTANT: If you change the embedding model (sentence-transformers/all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
ARG USE_RERANKING_MODEL=""
# Tiktoken encoding name; models to use can be found at https://huggingface.co/models?library=tiktoken
ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base"
ARG BUILD_HASH=dev-build
# Override at your own risk - non-root configurations are untested
ARG UID=0
ARG GID=0
######## WebUI frontend ########
FROM --platform=$BUILDPLATFORM node:22-alpine3.20 AS build
ARG BUILD_HASH
WORKDIR /app
COPY package.json package-lock.json ./
RUN npm ci
COPY . .
ENV APP_BUILD_HASH=${BUILD_HASH}
RUN npm run build
RUN find * | grep -v '\(CHANGELOG.md\|package.json\|build\|backend\)'| xargs rm -rf
RUN rm -rf .svelte-kit/
RUN tar -czvf /root/app.tar.gz /app
######## WebUI backend ########
FROM node:22-alpine3.20 AS base
COPY --from=build /root/app.tar.gz /root
ENTRYPOINT [ "/bin/sh"]
启动docker 复制app.tar.gz
2主机部署
tar -xvzf app.tar.gz -C /
#!/bin/bash
# Define build arguments
USE_CUDA=0
USE_OLLAMA=0
USE_CUDA_VER=""
USE_EMBEDDING_MODEL_DOCKER="model-name"
USE_RERANKING_MODEL_DOCKER="reranking-model-name"
UID=1000
GID=1000
# tar xvf app.tar -C /
# Create user and group if not root
groupadd --gid $GID app
useradd --uid $UID --gid $GID --home /root -s /bin/bash app
# Set environment variables
export ENV="prod"
export PORT=8080
export OLLAMA_BASE_URL="/ollama"
export OPENAI_API_KEY=""
export WEBUI_SECRET_KEY=""
export SCARF_NO_ANALYTICS=true
export DO_NOT_TRACK=true
export ANONYMIZED_TELEMETRY=false
export USE_CUDA_VER=0
# Set other model settings
export WHISPER_MODEL="base"
export WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
export RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER"
export RAG_RERANKING_MODEL="$USE_RERANKING_MODEL_DOCKER"
export SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
# Set Tiktoken model settings
export TIKTOKEN_ENCODING_NAME="cl100k_base"
export TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken"
# Set Torch Extensions directory
# export TORCH_EXTENSIONS_DIR="/.cache/torch_extensions"
# Create necessary directories and set permissions
mkdir -p /root/.cache/chroma
echo -n 00000000-0000-0000-0000-000000000000 > /root/.cache/chroma/telemetry_user_id
# Install dependencies
apt-get update && \
apt-get install -y --no-install-recommends git build-essential pandoc gcc netcat-openbsd curl jq && \
apt-get install -y --no-install-recommends gcc python3-dev && \
# for RAG OCR
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
# cleanup
rm -rf /var/lib/apt/lists/*;
pip3 install uv
uv pip install torch --no-cache-dir
uv pip install torchvision --no-cache-dir
uv pip install torchaudio --no-cache-dir
uv pip install transformers pysqlite3-binary --no-cache-dir
cp /usr/local/python3.11/bin/uv* /usr/bin/
uv pip install --no-cache-dir -r /app/backend/requirements.txt && \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])";
# Change ownership of directories
chown -R $UID:$GID /root/.cache/chroma
# Install Python dependencies from requirements file
cd /app && uv venv
sed -i "1i\__import__('pysqlite3')\nimport sys\nsys.modules['sqlite3'] = sys.modules.pop('pysqlite3')\n" app/.venv/lib/python3.11/site-packages/chromadb/__init__.py
#if error:could not find _sqlite3 , copy that bellow
#cp /usr/local/python3.11/lib/python3.11/site-packages/pysqlite3/_sqlite3.cpython-311-x86_64-linux-gnu.so /usr/local/python3.11/lib/python3.11/lib-dynload/
# Run application
exec bash /app/backend/start.sh
