https://ollama.com/download/linux

mkdir ollama wget #也可以本地电脑下载,然后上传到服务器里面
tar xvf ollama-linux-amd64.tgz
#添加环境变量,如果不添加则监听在127.0.0.1 export OLLAMA_HOST=0.0.0.0 ./bin/ollama serve

./bin/ollama run deepseek-r1:1.5b



#!/bin/bash# 定义模型名称和版本 MODEL_NAME="deepseek-r1:1.5b" OLLAMA_PATH="/data/ollama/bin/ollama" # 设置环境变量以指定监听地址 export OLLAMA_HOST=0.0.0.0# 启动ollama服务,默认端口 echo "Starting ollama serve on $OLLAMA_HOST with default port..." $OLLAMA_PATH serve & SERVE_PID=$! # 等待几秒钟以确保ollama服务已完全启动 echo "Waiting for ollama serve to start..." sleep 5 # 检查ollama服务是否已经启动成功(假设默认端口为11434,可以根据实际情况调整) DEFAULT_PORT=11434 if ! nc -zv 127.0.0.1 $DEFAULT_PORT; then echo "Failed to start ollama serve." exit 1 fi echo "ollama serve started successfully." # 启动模型 echo "Starting model $MODEL_NAME..." $OLLAMA_PATH run $MODEL_NAME & MODEL_PID=$! # 输出启动信息 echo "Model service started with PID $MODEL_PID, listening on $OLLAMA_HOST:$DEFAULT_PORT"
推荐本站淘宝优惠价购买喜欢的宝贝:
本文链接:https://zblog.hqyman.cn/post/9759.html 非本站原创文章欢迎转载,原创文章需保留本站地址!
休息一下~~