ExAws.MachineLearning.Core

Amazon Machine Learning

Definition of the public APIs exposed by Amazon Machine Learning

Source

Summary

create_batch_prediction!(client, input)

Same as create_batch_prediction/2 but raise on error

create_batch_prediction(client, input)

CreateBatchPrediction

create_data_source_from_rds!(client, input)

Same as create_data_source_from_rds/2 but raise on error

create_data_source_from_rds(client, input)

CreateDataSourceFromRDS

create_data_source_from_redshift!(client, input)

Same as create_data_source_from_redshift/2 but raise on error

create_data_source_from_redshift(client, input)

CreateDataSourceFromRedshift

create_data_source_from_s3!(client, input)

Same as create_data_source_from_s3/2 but raise on error

create_data_source_from_s3(client, input)

CreateDataSourceFromS3

create_evaluation!(client, input)

Same as create_evaluation/2 but raise on error

create_evaluation(client, input)

CreateEvaluation

create_ml_model!(client, input)

Same as create_ml_model/2 but raise on error

create_ml_model(client, input)

CreateMLModel

create_realtime_endpoint!(client, input)

Same as create_realtime_endpoint/2 but raise on error

create_realtime_endpoint(client, input)

CreateRealtimeEndpoint

delete_batch_prediction!(client, input)

Same as delete_batch_prediction/2 but raise on error

delete_batch_prediction(client, input)

DeleteBatchPrediction

delete_data_source!(client, input)

Same as delete_data_source/2 but raise on error

delete_data_source(client, input)

DeleteDataSource

delete_evaluation!(client, input)

Same as delete_evaluation/2 but raise on error

delete_evaluation(client, input)

DeleteEvaluation

delete_ml_model!(client, input)

Same as delete_ml_model/2 but raise on error

delete_ml_model(client, input)

DeleteMLModel

delete_realtime_endpoint!(client, input)

Same as delete_realtime_endpoint/2 but raise on error

delete_realtime_endpoint(client, input)

DeleteRealtimeEndpoint

describe_batch_predictions!(client, input)

Same as describe_batch_predictions/2 but raise on error

describe_batch_predictions(client, input)

DescribeBatchPredictions

describe_data_sources!(client, input)

Same as describe_data_sources/2 but raise on error

describe_data_sources(client, input)

DescribeDataSources

describe_evaluations!(client, input)

Same as describe_evaluations/2 but raise on error

describe_evaluations(client, input)

DescribeEvaluations

describe_ml_models!(client, input)

Same as describe_ml_models/2 but raise on error

describe_ml_models(client, input)

DescribeMLModels

get_batch_prediction!(client, input)

Same as get_batch_prediction/2 but raise on error

get_batch_prediction(client, input)

GetBatchPrediction

get_data_source!(client, input)

Same as get_data_source/2 but raise on error

get_data_source(client, input)

GetDataSource

get_evaluation!(client, input)

Same as get_evaluation/2 but raise on error

get_evaluation(client, input)

GetEvaluation

get_ml_model!(client, input)

Same as get_ml_model/2 but raise on error

get_ml_model(client, input)

GetMLModel

predict!(client, input)

Same as predict/2 but raise on error

predict(client, input)

Predict

update_batch_prediction!(client, input)

Same as update_batch_prediction/2 but raise on error

update_batch_prediction(client, input)

UpdateBatchPrediction

update_data_source!(client, input)

Same as update_data_source/2 but raise on error

update_data_source(client, input)

UpdateDataSource

update_evaluation!(client, input)

Same as update_evaluation/2 but raise on error

update_evaluation(client, input)

UpdateEvaluation

update_ml_model!(client, input)

Same as update_ml_model/2 but raise on error

update_ml_model(client, input)

UpdateMLModel

Types

rds_metadata :: [data_pipeline_id: edp_pipeline_id, database: rds_database, database_user_name: rds_database_username, resource_role: edp_resource_role, select_sql_query: rds_select_sql_query, service_role: edp_service_role]

update_batch_prediction_input :: [batch_prediction_id: entity_id, batch_prediction_name: entity_name]

recipe :: binary

float_label :: float

get_batch_prediction_output :: [batch_prediction_data_source_id: entity_id, batch_prediction_id: entity_id, created_at: epoch_time, created_by_iam_user: aws_user_arn, input_data_location_s3: s3_url, last_updated_at: epoch_time, log_uri: presigned_s3_url, ml_model_id: entity_id, message: message, name: entity_name, output_uri: s3_url, status: entity_status]

entity_id :: binary

get_ml_model_output :: [created_at: epoch_time, created_by_iam_user: aws_user_arn, endpoint_info: realtime_endpoint_info, input_data_location_s3: s3_url, last_updated_at: epoch_time, log_uri: presigned_s3_url, ml_model_id: entity_id, ml_model_type: ml_model_type, message: message, name: ml_model_name, recipe: recipe, schema: data_schema, score_threshold: score_threshold, score_threshold_last_updated_at: epoch_time, size_in_bytes: long_type, status: entity_status, training_data_source_id: entity_id, training_parameters: training_parameters]

redshift_metadata :: [database_user_name: redshift_database_username, redshift_database: redshift_database, select_sql_query: redshift_select_sql_query]

delete_realtime_endpoint_output :: [ml_model_id: entity_id, realtime_endpoint_info: realtime_endpoint_info]

edp_resource_role :: binary

create_batch_prediction_input :: [batch_prediction_data_source_id: entity_id, batch_prediction_id: entity_id, batch_prediction_name: entity_name, ml_model_id: entity_id, output_uri: s3_url]

get_data_source_output :: [compute_statistics: compute_statistics, created_at: epoch_time, created_by_iam_user: aws_user_arn, data_location_s3: s3_url, data_rearrangement: data_rearrangement, data_size_in_bytes: long_type, data_source_id: entity_id, data_source_schema: data_schema, last_updated_at: epoch_time, log_uri: presigned_s3_url, message: message, name: entity_name, number_of_files: long_type, rds_metadata: rds_metadata, redshift_metadata: redshift_metadata, role_arn: role_arn, status: entity_status]

update_evaluation_input :: [evaluation_id: entity_id, evaluation_name: entity_name]

s3_data_spec :: [data_location_s3: s3_url, data_rearrangement: data_rearrangement, data_schema: data_schema, data_schema_location_s3: s3_url]

error_message :: binary

get_evaluation_output :: [created_at: epoch_time, created_by_iam_user: aws_user_arn, evaluation_data_source_id: entity_id, evaluation_id: entity_id, input_data_location_s3: s3_url, last_updated_at: epoch_time, log_uri: presigned_s3_url, ml_model_id: entity_id, message: message, name: entity_name, performance_metrics: performance_metrics, status: entity_status]

ml_model_type :: binary

aws_user_arn :: binary

sort_order :: binary

details_attributes :: binary

entity_status :: binary

predict_output :: [{:prediction, prediction}]

update_ml_model_input :: [ml_model_id: entity_id, ml_model_name: entity_name, score_threshold: score_threshold]

delete_batch_prediction_input :: [{:batch_prediction_id, entity_id}]

delete_batch_prediction_output :: [{:batch_prediction_id, entity_id}]

score_value :: float

variable_value :: binary

rds_data_spec :: [data_rearrangement: data_rearrangement, data_schema: data_schema, data_schema_uri: s3_url, database_credentials: rds_database_credentials, database_information: rds_database, resource_role: edp_resource_role, s3_staging_location: s3_url, security_group_ids: edp_security_group_ids, select_sql_query: rds_select_sql_query, service_role: edp_service_role, subnet_id: edp_subnet_id]

create_realtime_endpoint_output :: [ml_model_id: entity_id, realtime_endpoint_info: realtime_endpoint_info]

edp_service_role :: binary

create_evaluation_input :: [evaluation_data_source_id: entity_id, evaluation_id: entity_id, evaluation_name: entity_name, ml_model_id: entity_id]

role_arn :: binary

data_schema :: binary

prediction :: [details: details_map, predicted_label: label, predicted_scores: score_value_per_label_map, predicted_value: float_label]

details_value :: binary

update_data_source_output :: [{:data_source_id, entity_id}]

error_code :: integer

delete_data_source_output :: [{:data_source_id, entity_id}]

message :: binary

evaluation :: [created_at: epoch_time, created_by_iam_user: aws_user_arn, evaluation_data_source_id: entity_id, evaluation_id: entity_id, input_data_location_s3: s3_url, last_updated_at: epoch_time, ml_model_id: entity_id, message: message, name: entity_name, performance_metrics: performance_metrics, status: entity_status]

update_evaluation_output :: [{:evaluation_id, entity_id}]

integer_type :: integer

presigned_s3_url :: binary

edp_subnet_id :: binary

delete_data_source_input :: [{:data_source_id, entity_id}]

predict_input :: [ml_model_id: entity_id, predict_endpoint: vip_url, record: machine_learning_record]

vip_url :: binary

entity_name :: binary

ml_model_name :: binary

rds_database_name :: binary

edp_pipeline_id :: binary

batch_prediction :: [batch_prediction_data_source_id: entity_id, batch_prediction_id: entity_id, created_at: epoch_time, created_by_iam_user: aws_user_arn, input_data_location_s3: s3_url, last_updated_at: epoch_time, ml_model_id: entity_id, message: message, name: entity_name, output_uri: s3_url, status: entity_status]

create_ml_model_output :: [{:ml_model_id, entity_id}]

rds_database :: [database_name: rds_database_name, instance_identifier: rds_instance_identifier]

delete_evaluation_output :: [{:evaluation_id, entity_id}]

redshift_data_spec :: [data_rearrangement: data_rearrangement, data_schema: data_schema, data_schema_uri: s3_url, database_credentials: redshift_database_credentials, database_information: redshift_database, s3_staging_location: s3_url, select_sql_query: redshift_select_sql_query]

data_rearrangement :: binary

delete_ml_model_input :: [{:ml_model_id, entity_id}]

get_ml_model_input :: [ml_model_id: entity_id, verbose: verbose]

comparator_value :: binary

get_batch_prediction_input :: [{:batch_prediction_id, entity_id}]

algorithm :: binary

epoch_time :: integer

create_data_source_from_s3_input :: [compute_statistics: compute_statistics, data_source_id: entity_id, data_source_name: entity_name, data_spec: s3_data_spec]

update_ml_model_output :: [{:ml_model_id, entity_id}]

ml_model :: [algorithm: algorithm, created_at: epoch_time, created_by_iam_user: aws_user_arn, endpoint_info: realtime_endpoint_info, input_data_location_s3: s3_url, last_updated_at: epoch_time, ml_model_id: entity_id, ml_model_type: ml_model_type, message: message, name: ml_model_name, score_threshold: score_threshold, score_threshold_last_updated_at: epoch_time, size_in_bytes: long_type, status: entity_status, training_data_source_id: entity_id, training_parameters: training_parameters]

create_evaluation_output :: [{:evaluation_id, entity_id}]

get_evaluation_input :: [{:evaluation_id, entity_id}]

create_data_source_from_rds_input :: [compute_statistics: compute_statistics, data_source_id: entity_id, data_source_name: entity_name, rds_data: rds_data_spec, role_arn: role_arn]

update_data_source_input :: [data_source_id: entity_id, data_source_name: entity_name]

delete_ml_model_output :: [{:ml_model_id, entity_id}]

realtime_endpoint_info :: [created_at: epoch_time, endpoint_status: realtime_endpoint_status, endpoint_url: vip_url, peak_requests_per_second: integer_type]

describe_ml_models_output :: [next_token: string_type, results: ml_models]

update_batch_prediction_output :: [{:batch_prediction_id, entity_id}]

score_threshold :: float

compute_statistics :: boolean

create_batch_prediction_output :: [{:batch_prediction_id, entity_id}]

redshift_database :: [cluster_identifier: redshift_cluster_identifier, database_name: redshift_database_name]

delete_evaluation_input :: [{:evaluation_id, entity_id}]

label :: binary

verbose :: boolean

long_type :: integer

get_data_source_input :: [data_source_id: entity_id, verbose: verbose]

variable_name :: binary

page_limit :: integer

s3_url :: binary

create_ml_model_input :: [ml_model_id: entity_id, ml_model_name: entity_name, ml_model_type: ml_model_type, parameters: training_parameters, recipe: recipe, recipe_uri: s3_url, training_data_source_id: entity_id]

data_source :: [compute_statistics: compute_statistics, created_at: epoch_time, created_by_iam_user: aws_user_arn, data_location_s3: s3_url, data_rearrangement: data_rearrangement, data_size_in_bytes: long_type, data_source_id: entity_id, last_updated_at: epoch_time, message: message, name: entity_name, number_of_files: long_type, rds_metadata: rds_metadata, redshift_metadata: redshift_metadata, role_arn: role_arn, status: entity_status]

string_type :: binary

create_data_source_from_redshift_input :: [compute_statistics: compute_statistics, data_source_id: entity_id, data_source_name: entity_name, data_spec: redshift_data_spec, role_arn: role_arn]

Functions

create_batch_prediction(client, input)

Specs:

CreateBatchPrediction

Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a DataSource. This operation creates a new BatchPrediction, and uses an MLModel and the data files referenced by the DataSource as information sources.

CreateBatchPrediction is an asynchronous operation. In response to CreateBatchPrediction, Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction status to PENDING. After the BatchPrediction completes, Amazon ML sets the status to COMPLETED.

You can poll for status updates by using the GetBatchPrediction operation and checking the Status parameter of the result. After the COMPLETED status appears, the results are available in the location specified by the OutputUri parameter.

Source
create_batch_prediction!(client, input)

Specs:

  • create_batch_prediction!(client :: ExAws.MachineLearning.t, input :: create_batch_prediction_input) :: ExAws.Request.JSON.success_t | no_return

Same as create_batch_prediction/2 but raise on error.

Source
create_data_source_from_rds(client, input)

Specs:

CreateDataSourceFromRDS

Creates a DataSource object from an Amazon Relational Database Service (Amazon RDS). A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromRDS is an asynchronous operation. In response to CreateDataSourceFromRDS, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING status can only be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

Source
create_data_source_from_rds!(client, input)

Specs:

Same as create_data_source_from_rds/2 but raise on error.

Source
create_data_source_from_redshift(client, input)

Specs:

CreateDataSourceFromRedshift

Creates a DataSource from Amazon Redshift. A DataSource references data that can be used to perform either CreateMLModel, CreateEvaluation or CreateBatchPrediction operations.

CreateDataSourceFromRedshift is an asynchronous operation. In response to CreateDataSourceFromRedshift, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING status can only be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

The observations should exist in the database hosted on an Amazon Redshift cluster and should be specified by a SelectSqlQuery. Amazon ML executes Unload command in Amazon Redshift to transfer the result set of SelectSqlQuery to S3StagingLocation.

After the DataSource is created, it’s ready for use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource requires another item — a recipe. A recipe describes the observation variables that participate in training an MLModel. A recipe describes how each input variable will be used in training. Will the variable be included or excluded from training? Will the variable be manipulated, for example, combined with another variable or split apart into word combinations? The recipe provides answers to these questions. For more information, see the Amazon Machine Learning Developer Guide.

Source
create_data_source_from_redshift!(client, input)

Specs:

Same as create_data_source_from_redshift/2 but raise on error.

Source
create_data_source_from_s3(client, input)

Specs:

CreateDataSourceFromS3

Creates a DataSource object. A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromS3 is an asynchronous operation. In response to CreateDataSourceFromS3, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING status can only be used to perform CreateMLModel, CreateEvaluation or CreateBatchPrediction operations.

If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

The observation data used in a DataSource should be ready to use; that is, it should have a consistent structure, and missing data values should be kept to a minimum. The observation data must reside in one or more CSV files in an Amazon Simple Storage Service (Amazon S3) bucket, along with a schema that describes the data items by name and type. The same schema must be used for all of the data files referenced by the DataSource.

After the DataSource has been created, it’s ready to use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource requires another item: a recipe. A recipe describes the observation variables that participate in training an MLModel. A recipe describes how each input variable will be used in training. Will the variable be included or excluded from training? Will the variable be manipulated, for example, combined with another variable, or split apart into word combinations? The recipe provides answers to these questions. For more information, see the Amazon Machine Learning Developer Guide.

Source
create_data_source_from_s3!(client, input)

Specs:

Same as create_data_source_from_s3/2 but raise on error.

Source
create_evaluation(client, input)

Specs:

  • create_evaluation(client :: ExAws.MachineLearning.t, input :: create_evaluation_input) :: ExAws.Request.JSON.response_t

CreateEvaluation

Creates a new Evaluation of an MLModel. An MLModel is evaluated on a set of observations associated to a DataSource. Like a DataSource for an MLModel, the DataSource for an Evaluation contains values for the Target Variable. The Evaluation compares the predicted result for each observation to the actual outcome and provides a summary so that you know how effective the MLModel functions on the test data. Evaluation generates a relevant performance metric such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding MLModelType: BINARY, REGRESSION or MULTICLASS.

CreateEvaluation is an asynchronous operation. In response to CreateEvaluation, Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status to PENDING. After the Evaluation is created and ready for use, Amazon ML sets the status to COMPLETED.

You can use the GetEvaluation operation to check progress of the evaluation during the creation operation.

Source
create_evaluation!(client, input)

Specs:

  • create_evaluation!(client :: ExAws.MachineLearning.t, input :: create_evaluation_input) :: ExAws.Request.JSON.success_t | no_return

Same as create_evaluation/2 but raise on error.

Source
create_ml_model(client, input)

Specs:

  • create_ml_model(client :: ExAws.MachineLearning.t, input :: create_ml_model_input) :: ExAws.Request.JSON.response_t

CreateMLModel

Creates a new MLModel using the data files and the recipe as information sources.

An MLModel is nearly immutable. Users can only update the MLModelName and the ScoreThreshold in an MLModel without creating a new MLModel.

CreateMLModel is an asynchronous operation. In response to CreateMLModel, Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel status to PENDING. After the MLModel is created and ready for use, Amazon ML sets the status to COMPLETED.

You can use the GetMLModel operation to check progress of the MLModel during the creation operation.

CreateMLModel requires a DataSource with computed statistics, which can be created by setting ComputeStatistics to true in CreateDataSourceFromRDS, CreateDataSourceFromS3, or CreateDataSourceFromRedshift operations.

Source
create_ml_model!(client, input)

Specs:

  • create_ml_model!(client :: ExAws.MachineLearning.t, input :: create_ml_model_input) :: ExAws.Request.JSON.success_t | no_return

Same as create_ml_model/2 but raise on error.

Source
create_realtime_endpoint(client, input)

Specs:

CreateRealtimeEndpoint

Creates a real-time endpoint for the MLModel. The endpoint contains the URI of the MLModel; that is, the location to send real-time prediction requests for the specified MLModel.

Source
create_realtime_endpoint!(client, input)

Specs:

Same as create_realtime_endpoint/2 but raise on error.

Source
delete_batch_prediction(client, input)

Specs:

DeleteBatchPrediction

Assigns the DELETED status to a BatchPrediction, rendering it unusable.

After using the DeleteBatchPrediction operation, you can use the GetBatchPrediction operation to verify that the status of the BatchPrediction changed to DELETED.

Caution The result of the `DeleteBatchPrediction` operation is irreversible.
Source
delete_batch_prediction!(client, input)

Specs:

  • delete_batch_prediction!(client :: ExAws.MachineLearning.t, input :: delete_batch_prediction_input) :: ExAws.Request.JSON.success_t | no_return

Same as delete_batch_prediction/2 but raise on error.

Source
delete_data_source(client, input)

Specs:

  • delete_data_source(client :: ExAws.MachineLearning.t, input :: delete_data_source_input) :: ExAws.Request.JSON.response_t

DeleteDataSource

Assigns the DELETED status to a DataSource, rendering it unusable.

After using the DeleteDataSource operation, you can use the GetDataSource operation to verify that the status of the DataSource changed to DELETED.

Caution The results of the `DeleteDataSource` operation are irreversible.
Source
delete_data_source!(client, input)

Specs:

  • delete_data_source!(client :: ExAws.MachineLearning.t, input :: delete_data_source_input) :: ExAws.Request.JSON.success_t | no_return

Same as delete_data_source/2 but raise on error.

Source
delete_evaluation(client, input)

Specs:

  • delete_evaluation(client :: ExAws.MachineLearning.t, input :: delete_evaluation_input) :: ExAws.Request.JSON.response_t

DeleteEvaluation

Assigns the DELETED status to an Evaluation, rendering it unusable.

After invoking the DeleteEvaluation operation, you can use the GetEvaluation operation to verify that the status of the Evaluation changed to DELETED.

Caution The results of the `DeleteEvaluation` operation are irreversible.
Source
delete_evaluation!(client, input)

Specs:

  • delete_evaluation!(client :: ExAws.MachineLearning.t, input :: delete_evaluation_input) :: ExAws.Request.JSON.success_t | no_return

Same as delete_evaluation/2 but raise on error.

Source
delete_ml_model(client, input)

Specs:

  • delete_ml_model(client :: ExAws.MachineLearning.t, input :: delete_ml_model_input) :: ExAws.Request.JSON.response_t

DeleteMLModel

Assigns the DELETED status to an MLModel, rendering it unusable.

After using the DeleteMLModel operation, you can use the GetMLModel operation to verify that the status of the MLModel changed to DELETED.

Caution The result of the `DeleteMLModel` operation is irreversible.
Source
delete_ml_model!(client, input)

Specs:

  • delete_ml_model!(client :: ExAws.MachineLearning.t, input :: delete_ml_model_input) :: ExAws.Request.JSON.success_t | no_return

Same as delete_ml_model/2 but raise on error.

Source
delete_realtime_endpoint(client, input)

Specs:

DeleteRealtimeEndpoint

Deletes a real time endpoint of an MLModel.

Source
delete_realtime_endpoint!(client, input)

Specs:

Same as delete_realtime_endpoint/2 but raise on error.

Source
describe_batch_predictions(client, input)

Specs:

DescribeBatchPredictions

Returns a list of BatchPrediction operations that match the search criteria in the request.

Source
describe_batch_predictions!(client, input)

Specs:

Same as describe_batch_predictions/2 but raise on error.

Source
describe_data_sources(client, input)

Specs:

DescribeDataSources

Returns a list of DataSource that match the search criteria in the request.

Source
describe_data_sources!(client, input)

Specs:

  • describe_data_sources!(client :: ExAws.MachineLearning.t, input :: describe_data_sources_input) :: ExAws.Request.JSON.success_t | no_return

Same as describe_data_sources/2 but raise on error.

Source
describe_evaluations(client, input)

Specs:

DescribeEvaluations

Returns a list of DescribeEvaluations that match the search criteria in the request.

Source
describe_evaluations!(client, input)

Specs:

  • describe_evaluations!(client :: ExAws.MachineLearning.t, input :: describe_evaluations_input) :: ExAws.Request.JSON.success_t | no_return

Same as describe_evaluations/2 but raise on error.

Source
describe_ml_models(client, input)

Specs:

  • describe_ml_models(client :: ExAws.MachineLearning.t, input :: describe_ml_models_input) :: ExAws.Request.JSON.response_t

DescribeMLModels

Returns a list of MLModel that match the search criteria in the request.

Source
describe_ml_models!(client, input)

Specs:

  • describe_ml_models!(client :: ExAws.MachineLearning.t, input :: describe_ml_models_input) :: ExAws.Request.JSON.success_t | no_return

Same as describe_ml_models/2 but raise on error.

Source
get_batch_prediction(client, input)

Specs:

GetBatchPrediction

Returns a BatchPrediction that includes detailed metadata, status, and data file information for a Batch Prediction request.

Source
get_batch_prediction!(client, input)

Specs:

  • get_batch_prediction!(client :: ExAws.MachineLearning.t, input :: get_batch_prediction_input) :: ExAws.Request.JSON.success_t | no_return

Same as get_batch_prediction/2 but raise on error.

Source
get_data_source(client, input)

Specs:

  • get_data_source(client :: ExAws.MachineLearning.t, input :: get_data_source_input) :: ExAws.Request.JSON.response_t

GetDataSource

Returns a DataSource that includes metadata and data file information, as well as the current status of the DataSource.

GetDataSource provides results in normal or verbose format. The verbose format adds the schema description and the list of files pointed to by the DataSource to the normal format.

Source
get_data_source!(client, input)

Specs:

  • get_data_source!(client :: ExAws.MachineLearning.t, input :: get_data_source_input) :: ExAws.Request.JSON.success_t | no_return

Same as get_data_source/2 but raise on error.

Source
get_evaluation(client, input)

Specs:

  • get_evaluation(client :: ExAws.MachineLearning.t, input :: get_evaluation_input) :: ExAws.Request.JSON.response_t

GetEvaluation

Returns an Evaluation that includes metadata as well as the current status of the Evaluation.

Source
get_evaluation!(client, input)

Specs:

  • get_evaluation!(client :: ExAws.MachineLearning.t, input :: get_evaluation_input) :: ExAws.Request.JSON.success_t | no_return

Same as get_evaluation/2 but raise on error.

Source
get_ml_model(client, input)

Specs:

  • get_ml_model(client :: ExAws.MachineLearning.t, input :: get_ml_model_input) :: ExAws.Request.JSON.response_t

GetMLModel

Returns an MLModel that includes detailed metadata, and data source information as well as the current status of the MLModel.

GetMLModel provides results in normal or verbose format.

Source
get_ml_model!(client, input)

Specs:

  • get_ml_model!(client :: ExAws.MachineLearning.t, input :: get_ml_model_input) :: ExAws.Request.JSON.success_t | no_return

Same as get_ml_model/2 but raise on error.

Source
predict(client, input)

Specs:

  • predict(client :: ExAws.MachineLearning.t, input :: predict_input) :: ExAws.Request.JSON.response_t

Predict

Generates a prediction for the observation using the specified MLModel.

Note:Note Not all response parameters will be populated because this is dependent on the type of requested model.

Source
predict!(client, input)

Specs:

  • predict!(client :: ExAws.MachineLearning.t, input :: predict_input) :: ExAws.Request.JSON.success_t | no_return

Same as predict/2 but raise on error.

Source
update_batch_prediction(client, input)

Specs:

UpdateBatchPrediction

Updates the BatchPredictionName of a BatchPrediction.

You can use the GetBatchPrediction operation to view the contents of the updated data element.

Source
update_batch_prediction!(client, input)

Specs:

  • update_batch_prediction!(client :: ExAws.MachineLearning.t, input :: update_batch_prediction_input) :: ExAws.Request.JSON.success_t | no_return

Same as update_batch_prediction/2 but raise on error.

Source
update_data_source(client, input)

Specs:

  • update_data_source(client :: ExAws.MachineLearning.t, input :: update_data_source_input) :: ExAws.Request.JSON.response_t

UpdateDataSource

Updates the DataSourceName of a DataSource.

You can use the GetDataSource operation to view the contents of the updated data element.

Source
update_data_source!(client, input)

Specs:

  • update_data_source!(client :: ExAws.MachineLearning.t, input :: update_data_source_input) :: ExAws.Request.JSON.success_t | no_return

Same as update_data_source/2 but raise on error.

Source
update_evaluation(client, input)

Specs:

  • update_evaluation(client :: ExAws.MachineLearning.t, input :: update_evaluation_input) :: ExAws.Request.JSON.response_t

UpdateEvaluation

Updates the EvaluationName of an Evaluation.

You can use the GetEvaluation operation to view the contents of the updated data element.

Source
update_evaluation!(client, input)

Specs:

  • update_evaluation!(client :: ExAws.MachineLearning.t, input :: update_evaluation_input) :: ExAws.Request.JSON.success_t | no_return

Same as update_evaluation/2 but raise on error.

Source
update_ml_model(client, input)

Specs:

  • update_ml_model(client :: ExAws.MachineLearning.t, input :: update_ml_model_input) :: ExAws.Request.JSON.response_t

UpdateMLModel

Updates the MLModelName and the ScoreThreshold of an MLModel.

You can use the GetMLModel operation to view the contents of the updated data element.

Source
update_ml_model!(client, input)

Specs:

  • update_ml_model!(client :: ExAws.MachineLearning.t, input :: update_ml_model_input) :: ExAws.Request.JSON.success_t | no_return

Same as update_ml_model/2 but raise on error.

Source