feat: Add RunPod client

- Added a new RunPod client to the project.
- Updated the example to use the new client.
- Improved error handling in the client.
- Refactored the code for better readability.

Co-authored-by: mariobassem12 <mariobassem12@gmail.com>
This commit is contained in:
Mahmoud Emad
2025-01-21 10:50:07 +02:00
parent 4422d67701
commit 309496ef5d
6 changed files with 107 additions and 110 deletions

View File

@@ -35,7 +35,7 @@ spot_pod_resp := rp.create_spot_pod(
min_memory_in_gb: 15 min_memory_in_gb: 15
gpu_type_id: 'NVIDIA RTX A6000' gpu_type_id: 'NVIDIA RTX A6000'
name: 'RunPod Pytorch' name: 'RunPod Pytorch'
image_name: 'runpod/pytorch' image_name: 'runpod/pytorc2h'
docker_args: '' docker_args: ''
ports: '8888/http' ports: '8888/http'
volume_mount_path: '/workspace' volume_mount_path: '/workspace'

View File

@@ -0,0 +1,64 @@
module runpod
// Input structure for the mutation
@[params]
pub struct PodFindAndDeployOnDemandRequest {
pub mut:
cloud_type CloudType = .all @[json: 'cloudType']
gpu_count int = 1 @[json: 'gpuCount']
volume_in_gb int = 40 @[json: 'volumeInGb']
container_disk_in_gb int = 40 @[json: 'containerDiskInGb']
min_vcpu_count int = 2 @[json: 'minVcpuCount']
min_memory_in_gb int = 15 @[json: 'minMemoryInGb']
gpu_type_id string = 'NVIDIA RTX A6000' @[json: 'gpuTypeId']
name string = 'RunPod Tensorflow' @[json: 'name']
image_name string = 'runpod/tensorflow' @[json: 'imageName']
docker_args string = '' @[json: 'dockerArgs']
ports string = '8888/http' @[json: 'ports']
volume_mount_path string = '/workspace' @[json: 'volumeMountPath']
env []EnvironmentVariableInput @[json: 'env']
}
// create_endpoint creates a new endpoint
pub fn (mut rp RunPod) create_on_demand_pod(input PodFindAndDeployOnDemandRequest) !PodResult {
return rp.create_pod_find_and_deploy_on_demand_request(input)!
}
@[params]
pub struct PodRentInterruptableInput {
pub mut:
port int @[json: 'port']
network_volume_id string @[json: 'networkVolumeId'; omitempty]
start_jupyter bool @[json: 'startJupyter']
start_ssh bool @[json: 'startSsh']
bid_per_gpu f32 @[json: 'bidPerGpu']
cloud_type CloudType @[json: 'cloudType']
container_disk_in_gb int @[json: 'containerDiskInGb']
country_code string @[json: 'countryCode'; omitempty]
docker_args string @[json: 'dockerArgs'; omitempty]
env []EnvironmentVariableInput @[json: 'env']
gpu_count int @[json: 'gpuCount']
gpu_type_id string @[json: 'gpuTypeId'; omitempty]
image_name string @[json: 'imageName'; omitempty]
min_disk int @[json: 'minDisk']
min_download int @[json: 'minDownload']
min_memory_in_gb int @[json: 'minMemoryInGb']
min_upload int @[json: 'minUpload']
min_vcpu_count int @[json: 'minVcpuCount']
name string @[json: 'name'; omitempty]
ports string @[json: 'ports'; omitempty]
stop_after string @[json: 'stopAfter'; omitempty]
support_public_ip bool @[json: 'supportPublicIp']
template_id string @[json: 'templateId'; omitempty]
terminate_after string @[json: 'terminateAfter'; omitempty]
volume_in_gb int @[json: 'volumeInGb']
volume_key string @[json: 'volumeKey'; omitempty]
volume_mount_path string @[json: 'volumeMountPath'; omitempty]
data_center_id string @[json: 'dataCenterId'; omitempty]
cuda_version string @[json: 'cudeVersion'; omitempty]
allowed_cuda_versions []string @[json: 'allowedCudaVersions']
}
pub fn (mut rp RunPod) create_spot_pod(input PodRentInterruptableInput) !PodResult {
return rp.create_create_spot_pod_request(input)!
}

View File

@@ -16,37 +16,28 @@ fn (mut rp RunPod) httpclient() !&httpconnection.HTTPConnection {
return http_conn return http_conn
} }
// Represents the entire mutation and input structure fn (mut rp RunPod) create_pod_find_and_deploy_on_demand_request(request PodFindAndDeployOnDemandRequest) !PodResult {
struct PodFindAndDeployOnDemand[T, V] { gql := build_query(
input T @[json: 'input'] query_type: .mutation
response V @[json: 'response'] method_name: 'podFindAndDeployOnDemand'
} request_model: request
response_model: PodResult{}
// GraphQL query structs )
struct GqlQuery { response_ := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
query string
}
// GraphQL response wrapper
struct GqlResponse[T] {
data map[string]T
}
// struct GqlResponseData[T] {
// pod_find_and_deploy_on_demand T @[json: 'podFindAndDeployOnDemand']
// }
fn (mut rp RunPod) create_pop_find_and_deploy_on_demand_request(request PodFindAndDeployOnDemandRequest) !PodFindAndDeployOnDemandResponse {
gql := build_query(BuildQueryArgs{
query_type: .mutation
method_name: 'podFindAndDeployOnDemand'
}, request, PodFindAndDeployOnDemandResponse{})
println('gql: ${gql}')
response_ := rp.make_request[GqlResponse[PodFindAndDeployOnDemandResponse]](.post,
'/graphql', gql)!
println('response: ${json.encode(response_)}')
return response_.data['podFindAndDeployOnDemand'] or { return response_.data['podFindAndDeployOnDemand'] or {
return error('Could not find podFindAndDeployOnDemand in response data: ${response_.data}') return error('Could not find podFindAndDeployOnDemand in response data: ${response_.data}')
} }
// return response.data.pod_find_and_deploy_on_demand }
fn (mut rp RunPod) create_create_spot_pod_request(input PodRentInterruptableInput) !PodResult {
gql := build_query(
query_type: .mutation
method_name: 'podRentInterruptable'
request_model: input
response_model: PodResult{}
)
response_ := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)!
return response_.data['podRentInterruptable'] or {
return error('Could not find podRentInterruptable in response data: ${response_.data}')
}
} }

View File

@@ -43,25 +43,6 @@ fn (ct CloudType) to_string() string {
} }
} }
// Input structure for the mutation
@[params]
pub struct PodFindAndDeployOnDemandRequest {
pub mut:
cloud_type CloudType = .all @[json: 'cloudType']
gpu_count int = 1 @[json: 'gpuCount']
volume_in_gb int = 40 @[json: 'volumeInGb']
container_disk_in_gb int = 40 @[json: 'containerDiskInGb']
min_vcpu_count int = 2 @[json: 'minVcpuCount']
min_memory_in_gb int = 15 @[json: 'minMemoryInGb']
gpu_type_id string = 'NVIDIA RTX A6000' @[json: 'gpuTypeId']
name string = 'RunPod Tensorflow' @[json: 'name']
image_name string = 'runpod/tensorflow' @[json: 'imageName']
docker_args string = '' @[json: 'dockerArgs']
ports string = '8888/http' @[json: 'ports']
volume_mount_path string = '/workspace' @[json: 'volumeMountPath']
env []EnvironmentVariableInput @[json: 'env']
}
pub struct EnvironmentVariableInput { pub struct EnvironmentVariableInput {
pub: pub:
key string key string
@@ -75,7 +56,7 @@ pub:
} }
// Response structure for the mutation // Response structure for the mutation
pub struct PodFindAndDeployOnDemandResponse { pub struct PodResult {
pub: pub:
id string @[json: 'id'] id string @[json: 'id']
image_name string @[json: 'imageName'] image_name string @[json: 'imageName']
@@ -94,59 +75,14 @@ pub fn new(api_key string) !&RunPod {
} }
} }
// create_endpoint creates a new endpoint // GraphQL query structs
pub fn (mut rp RunPod) create_on_demand_pod(pod PodFindAndDeployOnDemandRequest) !PodFindAndDeployOnDemandResponse { struct GqlQuery {
response_type := PodFindAndDeployOnDemandResponse{} query string
request_type := pod
response := rp.create_pop_find_and_deploy_on_demand_request(request_type)!
return response
} }
@[params] // GraphQL response wrapper
pub struct PodRentInterruptableInput { struct GqlResponse[T] {
pub mut: pub mut:
port int @[json: 'port'] data map[string]T
network_volume_id string @[json: 'networkVolumeId'; omitempty] errors []map[string]string
start_jupyter bool @[json: 'startJupyter']
start_ssh bool @[json: 'startSsh']
bid_per_gpu f32 @[json: 'bidPerGpu']
cloud_type CloudType @[json: 'cloudType']
container_disk_in_gb int @[json: 'containerDiskInGb']
country_code string @[json: 'countryCode'; omitempty]
docker_args string @[json: 'dockerArgs'; omitempty]
env []EnvironmentVariableInput @[json: 'env']
gpu_count int @[json: 'gpuCount']
gpu_type_id string @[json: 'gpuTypeId'; omitempty]
image_name string @[json: 'imageName'; omitempty]
min_disk int @[json: 'minDisk']
min_download int @[json: 'minDownload']
min_memory_in_gb int @[json: 'minMemoryInGb']
min_upload int @[json: 'minUpload']
min_vcpu_count int @[json: 'minVcpuCount']
name string @[json: 'name'; omitempty]
ports string @[json: 'ports'; omitempty]
stop_after string @[json: 'stopAfter'; omitempty]
support_public_ip bool @[json: 'supportPublicIp']
template_id string @[json: 'templateId'; omitempty]
terminate_after string @[json: 'terminateAfter'; omitempty]
volume_in_gb int @[json: 'volumeInGb']
volume_key string @[json: 'volumeKey'; omitempty]
volume_mount_path string @[json: 'volumeMountPath'; omitempty]
data_center_id string @[json: 'dataCenterId'; omitempty]
cuda_version string @[json: 'cudeVersion'; omitempty]
allowed_cuda_versions []string @[json: 'allowedCudaVersions']
}
pub fn (mut rp RunPod) create_spot_pod(input PodRentInterruptableInput) !PodFindAndDeployOnDemandResponse {
gql := build_query(BuildQueryArgs{
query_type: .mutation
method_name: 'podRentInterruptable'
}, input, PodFindAndDeployOnDemandResponse{})
println('gql: ${gql}')
response_ := rp.make_request[GqlResponse[PodFindAndDeployOnDemandResponse]](.post,
'/graphql', gql)!
println('response: ${response_}')
return response_.data['podRentInterruptable'] or {
return error('Could not find podRentInterruptable in response data: ${response_.data}')
}
} }

View File

@@ -86,19 +86,21 @@ pub enum QueryType {
} }
@[params] @[params]
pub struct BuildQueryArgs { pub struct BuildQueryArgs[T, R] {
pub: pub:
query_type QueryType // query or mutation query_type QueryType // query or mutation
method_name string method_name string
request_model T @[required]
response_model R @[required]
} }
fn build_query[T, R](args BuildQueryArgs, request T, response R) string { fn build_query[T, R](args BuildQueryArgs[T, R]) string {
// Convert input to JSON // Convert input to JSON
// input_json := json.encode(request) // input_json := json.encode(request)
// Build the GraphQL mutation string // Build the GraphQL mutation string
mut request_fields := get_request_fields(request) mut request_fields := get_request_fields(args.request_model)
mut response_fields := get_response_fields(response) mut response_fields := get_response_fields(args.response_model)
// Wrap the query correctly // Wrap the query correctly
query := '${args.query_type.to_string()} { ${args.method_name}(input: ${request_fields}) ${response_fields} }' query := '${args.query_type.to_string()} { ${args.method_name}(input: ${request_fields}) ${response_fields} }'
@@ -159,5 +161,10 @@ fn (mut rp RunPod) make_request[T](method HTTPMethod, path string, data string)
response = http.delete_json_generic[T](request)! response = http.delete_json_generic[T](request)!
} }
} }
if response.errors.len > 0 {
return error('Error while sending the request due to: ${response.errors[0]['message']}')
}
return response return response
} }

View File

@@ -9,7 +9,6 @@ pub fn (mut h HTTPConnection) get_json_generic[T](req Request) !T {
pub fn (mut h HTTPConnection) post_json_generic[T](req Request) !T { pub fn (mut h HTTPConnection) post_json_generic[T](req Request) !T {
data := h.post_json_str(req)! data := h.post_json_str(req)!
println('data: ${data}')
return json.decode(T, data) or { return error("couldn't decode json for ${req} for ${data}") } return json.decode(T, data) or { return error("couldn't decode json for ${req} for ${data}") }
} }