42 lines
1.0 KiB
Bash
42 lines
1.0 KiB
Bash
#!/bin/bash
|
|
|
|
set -euo pipefail
|
|
source source ../../functions/base.sh
|
|
|
|
mark_done
|
|
|
|
exit 0
|
|
|
|
# uv pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu121
|
|
# uv pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu128
|
|
|
|
touch "$0.done"
|
|
exit 0
|
|
|
|
uv pip install --pre torch==2.9.0.dev20250804+cu128 \
|
|
--index-url https://download.pytorch.org/whl/nightly/cu128
|
|
|
|
uv pip install tiktoken ipython numpy psutil
|
|
|
|
# 4. Confirm it's correct
|
|
python -c "import torch; print(torch.__version__, torch.version.cuda)"
|
|
# 2.9.0.dev20250804+cu128 12.8
|
|
|
|
cd /root
|
|
source .venv/bin/activate
|
|
uv pip install --upgrade pip setuptools wheel ninja
|
|
export MAX_JOBS=8
|
|
export TORCH_CUDA_ARCH_LIST="12.0"
|
|
export NCCL_P2P_DISABLE=0
|
|
export NCCL_DEBUG=INFO
|
|
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
|
|
|
pip install flash-attn --no-build-isolation
|
|
|
|
uv pip install --pre vllm==0.10.1+gptoss \
|
|
--extra-index-url https://wheels.vllm.ai/gpt-oss/ \
|
|
--extra-index-url https://download.pytorch.org/whl/nightly/cu128 \
|
|
--index-strategy unsafe-best-match
|
|
|
|
|
|
mark_done |