feat: update runpod example and client

- Update the RunPod example to use a new API key and
- reduce resource allocation for pods.
- Added stop pod functionality to the RunPod client and example.
- Updated the RunPod client to use new API endpoints.
- Updated the base URL for the RunPod client.
- Added authorization header to HTTP client.

Co-authored-by: mariobassem12 <mariobassem12@gmail.com>
This commit is contained in:
Mahmoud Emad
2025-01-21 15:54:48 +02:00
parent 50116651de
commit 7486d561ec
6 changed files with 100 additions and 42 deletions

View File

@@ -4,9 +4,9 @@
import freeflowuniverse.herolib.clients.runpod
// Example 1: Create client with direct API key
mut rp := runpod.get_or_create(
mut rp := runpod.get(
name: 'example1'
api_key: 'rpa_YYQ2HSM1AVP55MKX39R3LTH5KDCSWJBVKG5Y52Z2oryd46'
api_key: 'rpa_P77PNL3UHJ2XP0EC3XKYCH8M8BZREVLY9U4VGK4E1p4j68'
)!
// Create a new on demand pod
@@ -15,11 +15,11 @@ on_demand_pod_response := rp.create_on_demand_pod(
image_name: 'runpod/tensorflow'
cloud_type: .all
gpu_count: 1
volume_in_gb: 40
container_disk_in_gb: 40
min_memory_in_gb: 15
min_vcpu_count: 2
gpu_type_id: 'NVIDIA RTX A6000'
volume_in_gb: 5
container_disk_in_gb: 5
min_memory_in_gb: 4
min_vcpu_count: 1
gpu_type_id: 'NVIDIA RTX 2000 Ada'
ports: '8888/http'
volume_mount_path: '/workspace'
env: [
@@ -38,13 +38,13 @@ spot_pod_response := rp.create_spot_pod(
bid_per_gpu: 0.2
cloud_type: .secure
gpu_count: 1
volume_in_gb: 40
container_disk_in_gb: 40
min_vcpu_count: 2
min_memory_in_gb: 15
gpu_type_id: 'NVIDIA RTX A6000'
volume_in_gb: 5
container_disk_in_gb: 5
min_vcpu_count: 1
min_memory_in_gb: 4
gpu_type_id: 'NVIDIA RTX 2000 Ada'
name: 'RunPod Pytorch'
image_name: 'runpod/pytorc2h'
image_name: 'runpod/pytorch'
docker_args: ''
ports: '8888/http'
volume_mount_path: '/workspace'
@@ -57,6 +57,18 @@ spot_pod_response := rp.create_spot_pod(
)!
println('Created spot pod with ID: ${spot_pod_response.id}')
// stop on-demand pod
stop_on_demand_pod := rp.stop_pod(
pod_id: '${on_demand_pod_response.id}'
)!
println('Stopped on-demand pod with ID: ${stop_on_demand_pod.id}')
// stop spot pod
stop_spot_pod := rp.stop_pod(
pod_id: '${spot_pod_response.id}'
)!
println('Stopped spot pod with ID: ${stop_spot_pod.id}')
// start on-demand pod
start_on_demand_pod := rp.start_on_demand_pod(pod_id: '${on_demand_pod_response.id}', gpu_count: 1)!
println('Started on demand pod with ID: ${start_on_demand_pod.id}')