WIP: refactor RunPod client

- Refactor RunPod client to use a new GraphQL builder.
- This improves the readability and maintainability of the code.
- The old `build_query` function was removed, and the new
- `QueryBuilder` struct is now used.  This allows for a more
- flexible and extensible approach to constructing GraphQL
- queries.  The example in `runpod_example.vsh` is now
- commented out until the new GraphQL builder is fully
- implemented.

Co-authored-by: mariobassem12 <mariobassem12@gmail.com>
This commit is contained in:
Mahmoud Emad
2025-01-22 20:35:45 +02:00
parent 7486d561ec
commit 6f9d570a93
5 changed files with 340 additions and 188 deletions

View File

@@ -32,51 +32,51 @@ on_demand_pod_response := rp.create_on_demand_pod(
println('Created pod with ID: ${on_demand_pod_response.id}')
// create a spot pod
spot_pod_response := rp.create_spot_pod(
port: 1826
bid_per_gpu: 0.2
cloud_type: .secure
gpu_count: 1
volume_in_gb: 5
container_disk_in_gb: 5
min_vcpu_count: 1
min_memory_in_gb: 4
gpu_type_id: 'NVIDIA RTX 2000 Ada'
name: 'RunPod Pytorch'
image_name: 'runpod/pytorch'
docker_args: ''
ports: '8888/http'
volume_mount_path: '/workspace'
env: [
runpod.EnvironmentVariableInput{
key: 'JUPYTER_PASSWORD'
value: 'rn51hunbpgtltcpac3ol'
},
]
)!
println('Created spot pod with ID: ${spot_pod_response.id}')
// // create a spot pod
// spot_pod_response := rp.create_spot_pod(
// port: 1826
// bid_per_gpu: 0.2
// cloud_type: .secure
// gpu_count: 1
// volume_in_gb: 5
// container_disk_in_gb: 5
// min_vcpu_count: 1
// min_memory_in_gb: 4
// gpu_type_id: 'NVIDIA RTX 2000 Ada'
// name: 'RunPod Pytorch'
// image_name: 'runpod/pytorch'
// docker_args: ''
// ports: '8888/http'
// volume_mount_path: '/workspace'
// env: [
// runpod.EnvironmentVariableInput{
// key: 'JUPYTER_PASSWORD'
// value: 'rn51hunbpgtltcpac3ol'
// },
// ]
// )!
// println('Created spot pod with ID: ${spot_pod_response.id}')
// stop on-demand pod
stop_on_demand_pod := rp.stop_pod(
pod_id: '${on_demand_pod_response.id}'
)!
println('Stopped on-demand pod with ID: ${stop_on_demand_pod.id}')
// // stop on-demand pod
// stop_on_demand_pod := rp.stop_pod(
// pod_id: '${on_demand_pod_response.id}'
// )!
// println('Stopped on-demand pod with ID: ${stop_on_demand_pod.id}')
// stop spot pod
stop_spot_pod := rp.stop_pod(
pod_id: '${spot_pod_response.id}'
)!
println('Stopped spot pod with ID: ${stop_spot_pod.id}')
// // stop spot pod
// stop_spot_pod := rp.stop_pod(
// pod_id: '${spot_pod_response.id}'
// )!
// println('Stopped spot pod with ID: ${stop_spot_pod.id}')
// start on-demand pod
start_on_demand_pod := rp.start_on_demand_pod(pod_id: '${on_demand_pod_response.id}', gpu_count: 1)!
println('Started on demand pod with ID: ${start_on_demand_pod.id}')
// // start on-demand pod
// start_on_demand_pod := rp.start_on_demand_pod(pod_id: '${on_demand_pod_response.id}', gpu_count: 1)!
// println('Started on demand pod with ID: ${start_on_demand_pod.id}')
// start spot pod
start_spot_pod := rp.start_spot_pod(
pod_id: '${spot_pod_response.id}'
gpu_count: 1
bid_per_gpu: 0.2
)!
println('Started spot pod with ID: ${start_on_demand_pod.id}')
// // start spot pod
// start_spot_pod := rp.start_spot_pod(
// pod_id: '${spot_pod_response.id}'
// gpu_count: 1
// bid_per_gpu: 0.2
// )!
// println('Started spot pod with ID: ${start_on_demand_pod.id}')