feat: update runpod example and client
- Update the RunPod example to use a new API key and - reduce resource allocation for pods. - Added stop pod functionality to the RunPod client and example. - Updated the RunPod client to use new API endpoints. - Updated the base URL for the RunPod client. - Added authorization header to HTTP client. Co-authored-by: mariobassem12 <mariobassem12@gmail.com>
This commit is contained in:
@@ -4,9 +4,9 @@
|
||||
import freeflowuniverse.herolib.clients.runpod
|
||||
|
||||
// Example 1: Create client with direct API key
|
||||
mut rp := runpod.get_or_create(
|
||||
mut rp := runpod.get(
|
||||
name: 'example1'
|
||||
api_key: 'rpa_YYQ2HSM1AVP55MKX39R3LTH5KDCSWJBVKG5Y52Z2oryd46'
|
||||
api_key: 'rpa_P77PNL3UHJ2XP0EC3XKYCH8M8BZREVLY9U4VGK4E1p4j68'
|
||||
)!
|
||||
|
||||
// Create a new on demand pod
|
||||
@@ -15,11 +15,11 @@ on_demand_pod_response := rp.create_on_demand_pod(
|
||||
image_name: 'runpod/tensorflow'
|
||||
cloud_type: .all
|
||||
gpu_count: 1
|
||||
volume_in_gb: 40
|
||||
container_disk_in_gb: 40
|
||||
min_memory_in_gb: 15
|
||||
min_vcpu_count: 2
|
||||
gpu_type_id: 'NVIDIA RTX A6000'
|
||||
volume_in_gb: 5
|
||||
container_disk_in_gb: 5
|
||||
min_memory_in_gb: 4
|
||||
min_vcpu_count: 1
|
||||
gpu_type_id: 'NVIDIA RTX 2000 Ada'
|
||||
ports: '8888/http'
|
||||
volume_mount_path: '/workspace'
|
||||
env: [
|
||||
@@ -38,13 +38,13 @@ spot_pod_response := rp.create_spot_pod(
|
||||
bid_per_gpu: 0.2
|
||||
cloud_type: .secure
|
||||
gpu_count: 1
|
||||
volume_in_gb: 40
|
||||
container_disk_in_gb: 40
|
||||
min_vcpu_count: 2
|
||||
min_memory_in_gb: 15
|
||||
gpu_type_id: 'NVIDIA RTX A6000'
|
||||
volume_in_gb: 5
|
||||
container_disk_in_gb: 5
|
||||
min_vcpu_count: 1
|
||||
min_memory_in_gb: 4
|
||||
gpu_type_id: 'NVIDIA RTX 2000 Ada'
|
||||
name: 'RunPod Pytorch'
|
||||
image_name: 'runpod/pytorc2h'
|
||||
image_name: 'runpod/pytorch'
|
||||
docker_args: ''
|
||||
ports: '8888/http'
|
||||
volume_mount_path: '/workspace'
|
||||
@@ -57,6 +57,18 @@ spot_pod_response := rp.create_spot_pod(
|
||||
)!
|
||||
println('Created spot pod with ID: ${spot_pod_response.id}')
|
||||
|
||||
// stop on-demand pod
|
||||
stop_on_demand_pod := rp.stop_pod(
|
||||
pod_id: '${on_demand_pod_response.id}'
|
||||
)!
|
||||
println('Stopped on-demand pod with ID: ${stop_on_demand_pod.id}')
|
||||
|
||||
// stop spot pod
|
||||
stop_spot_pod := rp.stop_pod(
|
||||
pod_id: '${spot_pod_response.id}'
|
||||
)!
|
||||
println('Stopped spot pod with ID: ${stop_spot_pod.id}')
|
||||
|
||||
// start on-demand pod
|
||||
start_on_demand_pod := rp.start_on_demand_pod(pod_id: '${on_demand_pod_response.id}', gpu_count: 1)!
|
||||
println('Started on demand pod with ID: ${start_on_demand_pod.id}')
|
||||
|
||||
@@ -38,7 +38,7 @@ pub mut:
|
||||
|
||||
// Create On-Demand Pod
|
||||
pub fn (mut rp RunPod) create_on_demand_pod(input PodFindAndDeployOnDemandRequest) !PodResult {
|
||||
return rp.create_pod_find_and_deploy_on_demand_request(input)!
|
||||
return rp.create_on_demand_pod_request(input)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
@@ -78,7 +78,7 @@ pub mut:
|
||||
|
||||
// Create Spot Pod
|
||||
pub fn (mut rp RunPod) create_spot_pod(input PodRentInterruptableInput) !PodResult {
|
||||
return rp.create_create_spot_pod_request(input)!
|
||||
return rp.create_spot_pod_request(input)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
@@ -86,7 +86,6 @@ pub struct PodResume {
|
||||
pub mut:
|
||||
pod_id string @[json: 'podId']
|
||||
gpu_count int @[json: 'gpuCount']
|
||||
bid_per_gpu f32 @[json: 'bidPerGpu']
|
||||
}
|
||||
|
||||
// Start On-Demand Pod
|
||||
@@ -94,7 +93,20 @@ pub fn (mut rp RunPod) start_on_demand_pod(input PodResume) !PodResult {
|
||||
return rp.start_on_demand_pod_request(input)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PodBidResume {
|
||||
pub mut:
|
||||
pod_id string @[json: 'podId']
|
||||
gpu_count int @[json: 'gpuCount']
|
||||
bid_per_gpu f32 @[json: 'bidPerGpu']
|
||||
}
|
||||
|
||||
// Start Spot Pod
|
||||
pub fn (mut rp RunPod) start_spot_pod(input PodResume) !PodResult {
|
||||
pub fn (mut rp RunPod) start_spot_pod(input PodBidResume) !PodResult {
|
||||
return rp.start_spot_pod_request(input)!
|
||||
}
|
||||
|
||||
// Stop Pod
|
||||
pub fn (mut rp RunPod) stop_pod(input PodResume) !PodResult {
|
||||
return rp.stop_pod_request(input)!
|
||||
}
|
||||
|
||||
@@ -18,8 +18,8 @@ pub mut:
|
||||
api_key string // RunPod API key
|
||||
}
|
||||
|
||||
// get_or_create gets an existing RunPod instance or creates a new one
|
||||
pub fn get_or_create(args_ ArgsGet) !&RunPod {
|
||||
// The get method gets an existing RunPod instance or creates a new one
|
||||
pub fn get(args_ ArgsGet) !&RunPod {
|
||||
mut args := args_
|
||||
|
||||
if args.name == '' {
|
||||
|
||||
@@ -5,16 +5,16 @@ module runpod
|
||||
// - Decode the response received from the API into two objects `Data` and `Error`.
|
||||
// - The data field should contains the pod details same as `PodResult` struct.
|
||||
// - The error field should contain the error message.
|
||||
fn (mut rp RunPod) create_pod_find_and_deploy_on_demand_request(request PodFindAndDeployOnDemandRequest) !PodResult {
|
||||
fn (mut rp RunPod) create_on_demand_pod_request(request PodFindAndDeployOnDemandRequest) !PodResult {
|
||||
gql := build_query(
|
||||
query_type: .mutation
|
||||
method_name: 'podFindAndDeployOnDemand'
|
||||
request_model: request
|
||||
response_model: PodResult{}
|
||||
)
|
||||
response_ := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
|
||||
return response_.data['podFindAndDeployOnDemand'] or {
|
||||
return error('Could not find podFindAndDeployOnDemand in response data: ${response_.data}')
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
|
||||
return response.data['podFindAndDeployOnDemand'] or {
|
||||
return error('Could not find "podFindAndDeployOnDemand" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,16 +23,16 @@ fn (mut rp RunPod) create_pod_find_and_deploy_on_demand_request(request PodFindA
|
||||
// - Decode the response received from the API into two objects `Data` and `Error`.
|
||||
// - The data field should contains the pod details same as `PodResult` struct.
|
||||
// - The error field should contain the error message.
|
||||
fn (mut rp RunPod) create_create_spot_pod_request(input PodRentInterruptableInput) !PodResult {
|
||||
fn (mut rp RunPod) create_spot_pod_request(input PodRentInterruptableInput) !PodResult {
|
||||
gql := build_query(
|
||||
query_type: .mutation
|
||||
method_name: 'podRentInterruptable'
|
||||
request_model: input
|
||||
response_model: PodResult{}
|
||||
)
|
||||
response_ := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
|
||||
return response_.data['podRentInterruptable'] or {
|
||||
return error('Could not find podRentInterruptable in response data: ${response_.data}')
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
|
||||
return response.data['podRentInterruptable'] or {
|
||||
return error('Could not find "podRentInterruptable" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,9 +48,9 @@ fn (mut rp RunPod) start_on_demand_pod_request(input PodResume) !PodResult {
|
||||
request_model: input
|
||||
response_model: PodResult{}
|
||||
)
|
||||
response_ := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
|
||||
return response_.data['podResume'] or {
|
||||
return error('Could not find podRentInterruptable in response data: ${response_.data}')
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
|
||||
return response.data['podResume'] or {
|
||||
return error('Could not find "podResume" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,15 +59,33 @@ fn (mut rp RunPod) start_on_demand_pod_request(input PodResume) !PodResult {
|
||||
// - Decode the response received from the API into two objects `Data` and `Error`.
|
||||
// - The data field should contains the pod details same as `PodResult` struct.
|
||||
// - The error field should contain the error message.
|
||||
fn (mut rp RunPod) start_spot_pod_request(input PodResume) !PodResult {
|
||||
fn (mut rp RunPod) start_spot_pod_request(input PodBidResume) !PodResult {
|
||||
gql := build_query(
|
||||
query_type: .mutation
|
||||
method_name: 'podBidResume'
|
||||
request_model: input
|
||||
response_model: PodResult{}
|
||||
)
|
||||
response_ := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
|
||||
return response_.data['podBidResume'] or {
|
||||
return error('Could not find podRentInterruptable in response data: ${response_.data}')
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
|
||||
return response.data['podBidResume'] or {
|
||||
return error('Could not find "podBidResume" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
// #### Internally method doing a network call to stop a pod.
|
||||
// - Build the required query based pn the input sent by the user and send the request.
|
||||
// - Decode the response received from the API into two objects `Data` and `Error`.
|
||||
// - The data field should contains the pod details same as `PodResult` struct.
|
||||
// - The error field should contain the error message.
|
||||
fn (mut rp RunPod) stop_pod_request(input PodResume) !PodResult {
|
||||
gql := build_query(
|
||||
query_type: .mutation
|
||||
method_name: 'podStop'
|
||||
request_model: input
|
||||
response_model: PodResult{}
|
||||
)
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
|
||||
return response.data['podStop'] or {
|
||||
return error('Could not find "podStop" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ pub fn heroscript_default() !string {
|
||||
!!runpod.configure
|
||||
name:'default'
|
||||
api_key:''
|
||||
base_url:'https://api.runpod.io/v1'
|
||||
base_url:'https://api.runpod.io/'
|
||||
"
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ pub struct RunPod {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
api_key string
|
||||
base_url string = 'https://api.runpod.io/v1'
|
||||
base_url string = 'https://api.runpod.io/'
|
||||
}
|
||||
|
||||
pub enum CloudType {
|
||||
|
||||
@@ -12,6 +12,7 @@ fn (mut rp RunPod) httpclient() !&httpconnection.HTTPConnection {
|
||||
cache: true
|
||||
retry: 3
|
||||
)!
|
||||
http_conn.default_header.add(.authorization, 'Bearer ${rp.api_key}')
|
||||
return http_conn
|
||||
}
|
||||
|
||||
@@ -110,11 +111,26 @@ pub:
|
||||
|
||||
// Builds a GraphQL query or mutation string from provided arguments.
|
||||
fn build_query[T, R](args BuildQueryArgs[T, R]) string {
|
||||
mut request_fields := get_request_fields(args.request_model)
|
||||
mut response_fields := get_response_fields(args.response_model)
|
||||
mut request_fields := T{}
|
||||
mut response_fields := R{}
|
||||
|
||||
// Wrap the query correctly
|
||||
if args.request_model {
|
||||
request_fields = get_request_fields(args.request_model)
|
||||
}
|
||||
|
||||
if args.response_model {
|
||||
response_fields = get_response_fields(args.response_model)
|
||||
}
|
||||
|
||||
mut query := ''
|
||||
|
||||
if args.request_model && args.response_model{
|
||||
query := '${args.query_type.to_string()} { ${args.method_name}(input: ${request_fields}) ${response_fields} }'
|
||||
}
|
||||
|
||||
if args.response_model && !args.request_model{
|
||||
query := '${args.query_type.to_string()} { ${response_fields} }'
|
||||
}
|
||||
|
||||
// Wrap in the final structure
|
||||
gql := GqlQuery{
|
||||
|
||||
Reference in New Issue
Block a user