This commit is contained in:
2025-08-25 07:06:50 +02:00
parent e8d09164ff
commit 581fb0c0f0
15 changed files with 287 additions and 2 deletions

47
atest.py Normal file
View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python3
import requests
import sys
import os
# ---- Config ----
LMSTUDIO_URL = "http://172.22.22.210:1234/v1"
AUDIO_FILE = "/Users/despiegk/Downloads/harvard.wav" # change to your input file
# ---- Step 1: List available models ----
models_resp = requests.get(f"{LMSTUDIO_URL}/models")
models_resp.raise_for_status()
models = [m["id"] for m in models_resp.json().get("data", [])]
print("Available models:", models)
# ---- Step 2: Find Whisper ----
whisper_model = None
for m in models:
if "whisper" in m.lower():
whisper_model = m
break
if not whisper_model:
print("❌ No Whisper model found in LM Studio. Please download/start one.")
sys.exit(1)
print(f"✅ Found Whisper model: {whisper_model}")
# ---- Step 3: Transcribe ----
if not os.path.exists(AUDIO_FILE):
print(f"❌ Audio file '{AUDIO_FILE}' not found.")
sys.exit(1)
with open(AUDIO_FILE, "rb") as f:
files = {"file": f}
data = {"model": whisper_model}
headers = {"Authorization": "Bearer no-key"} # LM Studio ignores key
resp = requests.post(f"{LMSTUDIO_URL}/audio/transcriptions",
headers=headers,
files=files,
data=data)
resp.raise_for_status()
result = resp.json()
print("📝 Transcription result:")
print(result.get("text", result))

View File

@@ -0,0 +1,32 @@
# Common error handling setup
# set -euo pipefail
SCRIPT="${BASH_SOURCE[-1]}" # last sourced = the actual script file
ERROR_FILE="$SCRIPT.error"
DONE_FILE="$SCRIPT.done"
# Reset markers
rm -f "$ERROR_FILE" "$DONE_FILE"
error_handler() {
local exit_code=$?
local line_no=$1
local cmd="$2"
{
echo "EXIT_CODE=$exit_code"
echo "LINE=$line_no"
echo "COMMAND=$cmd"
} > "$ERROR_FILE"
# If we are inside a sourced script, don't kill the shell
if [[ "${BASH_SOURCE[0]}" != "$0" ]]; then
return $exit_code
else
exit $exit_code
fi
}
trap 'error_handler ${LINENO} "$BASH_COMMAND"' ERR
mark_done() {
touch "$DONE_FILE"
}

View File

@@ -0,0 +1,12 @@
hpy() {
if [ ! -f ".venv/bin/activate" ]; then
echo "Error: .venv not found in current directory" >&2
return 1
fi
# Activate venv in a subshell so it doesnt pollute caller
(
source .venv/bin/activate
python "$@"
)
}

View File

@@ -0,0 +1,19 @@
get_session() {
local sessions
sessions=$(tmux ls 2>/dev/null | cut -d: -f1)
local count
count=$(echo "$sessions" | wc -l)
if [ "$count" -eq 0 ]; then
echo "Error: no tmux sessions found." >&2
return 1
elif [ "$count" -gt 1 ]; then
echo "Error: more than one tmux session found:" >&2
echo "$sessions" >&2
return 1
fi
export SESSIONNAME="$sessions"
echo "$SESSIONNAME"
}

View File

@@ -0,0 +1,3 @@
export SSH_SERVER=108.5.176.71
export SSH_PORT=10200

View File

@@ -0,0 +1,10 @@
#!/bin/bash
set -euo pipefail
source ../../functions/base.sh
apt update
apt upgrade -y
apt install -y tmux btop nvtop psutils htop
mark_done

View File

@@ -0,0 +1,57 @@
#!/usr/bin/env bash
set -euo pipefail
source source ../../functions/base.sh
# --- create ~/.tmux.conf ---
TMUX_CONF="$HOME/.tmux.conf"
cat > "$TMUX_CONF" <<'EOF'
# ~/.tmux.conf
# Enable mouse support (scroll, resize, select panes/windows)
set -g mouse on
# Use the mouse wheel to scroll in copy mode automatically
bind -T root WheelUpPane if-shell -F -t = "#{mouse_any_flag}" \
"send-keys -M" "if -Ft= '#{pane_in_mode}' 'send-keys -M' 'copy-mode -e'"
# Allow resizing panes by dragging borders
setw -g aggressive-resize on
# Easier navigation in copy mode
setw -g mode-keys vi
# Status bar improvements
set -g status-bg black
set -g status-fg green
set -g status-left-length 40
set -g status-left '#S '
set -g status-right '#(whoami)@#H %Y-%m-%d %H:%M'
# Pane borders more visible
set -g pane-border-style fg=cyan
set -g pane-active-border-style fg=yellow
# Reload config quickly
bind r source-file ~/.tmux.conf \; display-message "Reloaded tmux.conf"
# Use system clipboard on macOS
if-shell "command -v pbcopy >/dev/null 2>&1" \
"bind -T copy-mode-vi y send -X copy-pipe-and-cancel 'pbcopy'" \
"bind -T copy-mode-vi y send -X copy-pipe-and-cancel 'xclip -selection clipboard -in'"
EOF
echo "✅ Wrote $TMUX_CONF"
# --- apply config if tmux is running ---
if pgrep -x tmux >/dev/null 2>&1; then
echo "🔄 Reloading tmux config..."
tmux source-file "$TMUX_CONF"
else
echo " tmux is not running yet. Config will apply on next start."
fi
mark_done

View File

@@ -0,0 +1,42 @@
#!/bin/bash
set -euo pipefail
source source ../../functions/base.sh
mark_done
exit 0
URL="https://github.com/ollama/ollama/releases/download/v0.11.6/ollama-linux-amd64.tgz"
TGZ="/tmp/ollama.tgz"
INSTALL_PATH="/usr/bin/ollama"
echo "[*] Checking for running ollama serve..."
if pgrep -x "ollama" > /dev/null; then
echo "[*] Stopping running ollama process..."
pkill -9 ollama
sleep 2
fi
echo "[*] Downloading ollama..."
curl -L "$URL" -o "$TGZ"
echo "[*] Extracting..."
tar -xzf "$TGZ" -C /tmp
echo "[*] Installing to $INSTALL_PATH..."
sudo mv /tmp/ollama "$INSTALL_PATH"
sudo chmod +x "$INSTALL_PATH"
pkill -9 ollama
SESSION=$(tmux display-message -p '#S')
echo "[*] Using tmux session: $SESSION"
echo "[*] Started ollama pulls in tmux windows."
ollama pull adhishtanaka/llama_3.2_1b-SQL
mark_done

View File

@@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
source source ../../functions/base.sh
# vllm serve openai/gpt-oss-20b
# vllm serve openai/gpt-oss-20b --tensor-parallel-size 2
# For 120B
# vllm serve openai/gpt-oss-120b
mark_done

View File

@@ -0,0 +1,42 @@
#!/bin/bash
set -euo pipefail
source source ../../functions/base.sh
mark_done
exit 0
# uv pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu121
# uv pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu128
touch "$0.done"
exit 0
uv pip install --pre torch==2.9.0.dev20250804+cu128 \
--index-url https://download.pytorch.org/whl/nightly/cu128
uv pip install tiktoken ipython numpy psutil
# 4. Confirm it's correct
python -c "import torch; print(torch.__version__, torch.version.cuda)"
# 2.9.0.dev20250804+cu128 12.8
cd /root
source .venv/bin/activate
uv pip install --upgrade pip setuptools wheel ninja
export MAX_JOBS=8
export TORCH_CUDA_ARCH_LIST="12.0"
export NCCL_P2P_DISABLE=0
export NCCL_DEBUG=INFO
export CUDA_DEVICE_MAX_CONNECTIONS=1
pip install flash-attn --no-build-isolation
uv pip install --pre vllm==0.10.1+gptoss \
--extra-index-url https://wheels.vllm.ai/gpt-oss/ \
--extra-index-url https://download.pytorch.org/whl/nightly/cu128 \
--index-strategy unsafe-best-match
mark_done

View File

@@ -11,6 +11,8 @@ Requires-Dist: fastapi>=0.100.0
Requires-Dist: uvicorn>=0.23.0
Requires-Dist: toml>=0.10.2
Requires-Dist: libtmux>=0.25.0
Requires-Dist: lmstudio
Requires-Dist: requests
# herolib_python

View File

@@ -64,7 +64,10 @@ herolib/downloader/scrape_scapegraph/scrape_md.py
herolib/downloader/scrape_scapegraph/scrape_search.py
herolib/downloader/scrape_scapegraph/scrape_with_local_llm.py
herolib/downloader/scrape_scapegraph/scrape_with_local_llm_search.py
herolib/infra/tmuxrunner/enhanced_runner.py
herolib/infra/tmuxrunner/model.py
herolib/infra/tmuxrunner/process_monitor.py
herolib/infra/tmuxrunner/task_runner.py
herolib/infra/tmuxrunner/task_runner_api.py
herolib/tools/__init__.py
herolib/tools/extensions.py
herolib/tools/gitscanner.py

View File

@@ -4,3 +4,5 @@ fastapi>=0.100.0
uvicorn>=0.23.0
toml>=0.10.2
libtmux>=0.25.0
lmstudio
requests

View File

@@ -7,7 +7,7 @@ authors = [
]
readme = "README.md"
requires-python = ">=3.12"
dependencies = ["peewee", "psutil>=5.9.0", "fastapi>=0.100.0", "uvicorn>=0.23.0", "toml>=0.10.2", "libtmux>=0.25.0"]
dependencies = ["peewee", "psutil>=5.9.0", "fastapi>=0.100.0", "uvicorn>=0.23.0", "toml>=0.10.2", "libtmux>=0.25.0","lmstudio","requests"]
[build-system]