From 309496ef5d95ca51765134b6958924f149aa1361 Mon Sep 17 00:00:00 2001 From: Mahmoud Emad Date: Tue, 21 Jan 2025 10:50:07 +0200 Subject: [PATCH] feat: Add RunPod client - Added a new RunPod client to the project. - Updated the example to use the new client. - Improved error handling in the client. - Refactored the code for better readability. Co-authored-by: mariobassem12 --- examples/develop/runpod/runpod_example.vsh | 2 +- lib/clients/runpod/client.v | 64 +++++++++++++++ lib/clients/runpod/runpod_http.v | 51 +++++------- lib/clients/runpod/runpod_model.v | 80 ++----------------- lib/clients/runpod/utils.v | 19 +++-- .../connection_methods_generic.v | 1 - 6 files changed, 107 insertions(+), 110 deletions(-) create mode 100644 lib/clients/runpod/client.v diff --git a/examples/develop/runpod/runpod_example.vsh b/examples/develop/runpod/runpod_example.vsh index b7d44f9c..ce7286c1 100755 --- a/examples/develop/runpod/runpod_example.vsh +++ b/examples/develop/runpod/runpod_example.vsh @@ -35,7 +35,7 @@ spot_pod_resp := rp.create_spot_pod( min_memory_in_gb: 15 gpu_type_id: 'NVIDIA RTX A6000' name: 'RunPod Pytorch' - image_name: 'runpod/pytorch' + image_name: 'runpod/pytorc2h' docker_args: '' ports: '8888/http' volume_mount_path: '/workspace' diff --git a/lib/clients/runpod/client.v b/lib/clients/runpod/client.v new file mode 100644 index 00000000..770b2fd4 --- /dev/null +++ b/lib/clients/runpod/client.v @@ -0,0 +1,64 @@ +module runpod + +// Input structure for the mutation +@[params] +pub struct PodFindAndDeployOnDemandRequest { +pub mut: + cloud_type CloudType = .all @[json: 'cloudType'] + gpu_count int = 1 @[json: 'gpuCount'] + volume_in_gb int = 40 @[json: 'volumeInGb'] + container_disk_in_gb int = 40 @[json: 'containerDiskInGb'] + min_vcpu_count int = 2 @[json: 'minVcpuCount'] + min_memory_in_gb int = 15 @[json: 'minMemoryInGb'] + gpu_type_id string = 'NVIDIA RTX A6000' @[json: 'gpuTypeId'] + name string = 'RunPod Tensorflow' @[json: 'name'] + image_name string = 'runpod/tensorflow' @[json: 'imageName'] + docker_args string = '' @[json: 'dockerArgs'] + ports string = '8888/http' @[json: 'ports'] + volume_mount_path string = '/workspace' @[json: 'volumeMountPath'] + env []EnvironmentVariableInput @[json: 'env'] +} + +// create_endpoint creates a new endpoint +pub fn (mut rp RunPod) create_on_demand_pod(input PodFindAndDeployOnDemandRequest) !PodResult { + return rp.create_pod_find_and_deploy_on_demand_request(input)! +} + +@[params] +pub struct PodRentInterruptableInput { +pub mut: + port int @[json: 'port'] + network_volume_id string @[json: 'networkVolumeId'; omitempty] + start_jupyter bool @[json: 'startJupyter'] + start_ssh bool @[json: 'startSsh'] + bid_per_gpu f32 @[json: 'bidPerGpu'] + cloud_type CloudType @[json: 'cloudType'] + container_disk_in_gb int @[json: 'containerDiskInGb'] + country_code string @[json: 'countryCode'; omitempty] + docker_args string @[json: 'dockerArgs'; omitempty] + env []EnvironmentVariableInput @[json: 'env'] + gpu_count int @[json: 'gpuCount'] + gpu_type_id string @[json: 'gpuTypeId'; omitempty] + image_name string @[json: 'imageName'; omitempty] + min_disk int @[json: 'minDisk'] + min_download int @[json: 'minDownload'] + min_memory_in_gb int @[json: 'minMemoryInGb'] + min_upload int @[json: 'minUpload'] + min_vcpu_count int @[json: 'minVcpuCount'] + name string @[json: 'name'; omitempty] + ports string @[json: 'ports'; omitempty] + stop_after string @[json: 'stopAfter'; omitempty] + support_public_ip bool @[json: 'supportPublicIp'] + template_id string @[json: 'templateId'; omitempty] + terminate_after string @[json: 'terminateAfter'; omitempty] + volume_in_gb int @[json: 'volumeInGb'] + volume_key string @[json: 'volumeKey'; omitempty] + volume_mount_path string @[json: 'volumeMountPath'; omitempty] + data_center_id string @[json: 'dataCenterId'; omitempty] + cuda_version string @[json: 'cudeVersion'; omitempty] + allowed_cuda_versions []string @[json: 'allowedCudaVersions'] +} + +pub fn (mut rp RunPod) create_spot_pod(input PodRentInterruptableInput) !PodResult { + return rp.create_create_spot_pod_request(input)! +} diff --git a/lib/clients/runpod/runpod_http.v b/lib/clients/runpod/runpod_http.v index 2fc7e698..6f2cdea1 100644 --- a/lib/clients/runpod/runpod_http.v +++ b/lib/clients/runpod/runpod_http.v @@ -16,37 +16,28 @@ fn (mut rp RunPod) httpclient() !&httpconnection.HTTPConnection { return http_conn } -// Represents the entire mutation and input structure -struct PodFindAndDeployOnDemand[T, V] { - input T @[json: 'input'] - response V @[json: 'response'] -} - -// GraphQL query structs -struct GqlQuery { - query string -} - -// GraphQL response wrapper -struct GqlResponse[T] { - data map[string]T -} - -// struct GqlResponseData[T] { -// pod_find_and_deploy_on_demand T @[json: 'podFindAndDeployOnDemand'] -// } - -fn (mut rp RunPod) create_pop_find_and_deploy_on_demand_request(request PodFindAndDeployOnDemandRequest) !PodFindAndDeployOnDemandResponse { - gql := build_query(BuildQueryArgs{ - query_type: .mutation - method_name: 'podFindAndDeployOnDemand' - }, request, PodFindAndDeployOnDemandResponse{}) - println('gql: ${gql}') - response_ := rp.make_request[GqlResponse[PodFindAndDeployOnDemandResponse]](.post, - '/graphql', gql)! - println('response: ${json.encode(response_)}') +fn (mut rp RunPod) create_pod_find_and_deploy_on_demand_request(request PodFindAndDeployOnDemandRequest) !PodResult { + gql := build_query( + query_type: .mutation + method_name: 'podFindAndDeployOnDemand' + request_model: request + response_model: PodResult{} + ) + response_ := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)! return response_.data['podFindAndDeployOnDemand'] or { return error('Could not find podFindAndDeployOnDemand in response data: ${response_.data}') } - // return response.data.pod_find_and_deploy_on_demand +} + +fn (mut rp RunPod) create_create_spot_pod_request(input PodRentInterruptableInput) !PodResult { + gql := build_query( + query_type: .mutation + method_name: 'podRentInterruptable' + request_model: input + response_model: PodResult{} + ) + response_ := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', gql)! + return response_.data['podRentInterruptable'] or { + return error('Could not find podRentInterruptable in response data: ${response_.data}') + } } diff --git a/lib/clients/runpod/runpod_model.v b/lib/clients/runpod/runpod_model.v index af3b03ba..9aebdd36 100644 --- a/lib/clients/runpod/runpod_model.v +++ b/lib/clients/runpod/runpod_model.v @@ -43,25 +43,6 @@ fn (ct CloudType) to_string() string { } } -// Input structure for the mutation -@[params] -pub struct PodFindAndDeployOnDemandRequest { -pub mut: - cloud_type CloudType = .all @[json: 'cloudType'] - gpu_count int = 1 @[json: 'gpuCount'] - volume_in_gb int = 40 @[json: 'volumeInGb'] - container_disk_in_gb int = 40 @[json: 'containerDiskInGb'] - min_vcpu_count int = 2 @[json: 'minVcpuCount'] - min_memory_in_gb int = 15 @[json: 'minMemoryInGb'] - gpu_type_id string = 'NVIDIA RTX A6000' @[json: 'gpuTypeId'] - name string = 'RunPod Tensorflow' @[json: 'name'] - image_name string = 'runpod/tensorflow' @[json: 'imageName'] - docker_args string = '' @[json: 'dockerArgs'] - ports string = '8888/http' @[json: 'ports'] - volume_mount_path string = '/workspace' @[json: 'volumeMountPath'] - env []EnvironmentVariableInput @[json: 'env'] -} - pub struct EnvironmentVariableInput { pub: key string @@ -75,7 +56,7 @@ pub: } // Response structure for the mutation -pub struct PodFindAndDeployOnDemandResponse { +pub struct PodResult { pub: id string @[json: 'id'] image_name string @[json: 'imageName'] @@ -94,59 +75,14 @@ pub fn new(api_key string) !&RunPod { } } -// create_endpoint creates a new endpoint -pub fn (mut rp RunPod) create_on_demand_pod(pod PodFindAndDeployOnDemandRequest) !PodFindAndDeployOnDemandResponse { - response_type := PodFindAndDeployOnDemandResponse{} - request_type := pod - response := rp.create_pop_find_and_deploy_on_demand_request(request_type)! - return response +// GraphQL query structs +struct GqlQuery { + query string } -@[params] -pub struct PodRentInterruptableInput { +// GraphQL response wrapper +struct GqlResponse[T] { pub mut: - port int @[json: 'port'] - network_volume_id string @[json: 'networkVolumeId'; omitempty] - start_jupyter bool @[json: 'startJupyter'] - start_ssh bool @[json: 'startSsh'] - bid_per_gpu f32 @[json: 'bidPerGpu'] - cloud_type CloudType @[json: 'cloudType'] - container_disk_in_gb int @[json: 'containerDiskInGb'] - country_code string @[json: 'countryCode'; omitempty] - docker_args string @[json: 'dockerArgs'; omitempty] - env []EnvironmentVariableInput @[json: 'env'] - gpu_count int @[json: 'gpuCount'] - gpu_type_id string @[json: 'gpuTypeId'; omitempty] - image_name string @[json: 'imageName'; omitempty] - min_disk int @[json: 'minDisk'] - min_download int @[json: 'minDownload'] - min_memory_in_gb int @[json: 'minMemoryInGb'] - min_upload int @[json: 'minUpload'] - min_vcpu_count int @[json: 'minVcpuCount'] - name string @[json: 'name'; omitempty] - ports string @[json: 'ports'; omitempty] - stop_after string @[json: 'stopAfter'; omitempty] - support_public_ip bool @[json: 'supportPublicIp'] - template_id string @[json: 'templateId'; omitempty] - terminate_after string @[json: 'terminateAfter'; omitempty] - volume_in_gb int @[json: 'volumeInGb'] - volume_key string @[json: 'volumeKey'; omitempty] - volume_mount_path string @[json: 'volumeMountPath'; omitempty] - data_center_id string @[json: 'dataCenterId'; omitempty] - cuda_version string @[json: 'cudeVersion'; omitempty] - allowed_cuda_versions []string @[json: 'allowedCudaVersions'] -} - -pub fn (mut rp RunPod) create_spot_pod(input PodRentInterruptableInput) !PodFindAndDeployOnDemandResponse { - gql := build_query(BuildQueryArgs{ - query_type: .mutation - method_name: 'podRentInterruptable' - }, input, PodFindAndDeployOnDemandResponse{}) - println('gql: ${gql}') - response_ := rp.make_request[GqlResponse[PodFindAndDeployOnDemandResponse]](.post, - '/graphql', gql)! - println('response: ${response_}') - return response_.data['podRentInterruptable'] or { - return error('Could not find podRentInterruptable in response data: ${response_.data}') - } + data map[string]T + errors []map[string]string } diff --git a/lib/clients/runpod/utils.v b/lib/clients/runpod/utils.v index cf6a7b2e..88d8b4ad 100644 --- a/lib/clients/runpod/utils.v +++ b/lib/clients/runpod/utils.v @@ -86,19 +86,21 @@ pub enum QueryType { } @[params] -pub struct BuildQueryArgs { +pub struct BuildQueryArgs[T, R] { pub: - query_type QueryType // query or mutation - method_name string + query_type QueryType // query or mutation + method_name string + request_model T @[required] + response_model R @[required] } -fn build_query[T, R](args BuildQueryArgs, request T, response R) string { +fn build_query[T, R](args BuildQueryArgs[T, R]) string { // Convert input to JSON // input_json := json.encode(request) // Build the GraphQL mutation string - mut request_fields := get_request_fields(request) - mut response_fields := get_response_fields(response) + mut request_fields := get_request_fields(args.request_model) + mut response_fields := get_response_fields(args.response_model) // Wrap the query correctly query := '${args.query_type.to_string()} { ${args.method_name}(input: ${request_fields}) ${response_fields} }' @@ -159,5 +161,10 @@ fn (mut rp RunPod) make_request[T](method HTTPMethod, path string, data string) response = http.delete_json_generic[T](request)! } } + + if response.errors.len > 0 { + return error('Error while sending the request due to: ${response.errors[0]['message']}') + } + return response } diff --git a/lib/core/httpconnection/connection_methods_generic.v b/lib/core/httpconnection/connection_methods_generic.v index 12aa1191..897c8039 100644 --- a/lib/core/httpconnection/connection_methods_generic.v +++ b/lib/core/httpconnection/connection_methods_generic.v @@ -9,7 +9,6 @@ pub fn (mut h HTTPConnection) get_json_generic[T](req Request) !T { pub fn (mut h HTTPConnection) post_json_generic[T](req Request) !T { data := h.post_json_str(req)! - println('data: ${data}') return json.decode(T, data) or { return error("couldn't decode json for ${req} for ${data}") } }