Oracle Cloud Infrastructure v2.33.0 published on Thursday, May 1, 2025 by Pulumi
oci.AiLanguage.getModelEvaluationResults
Explore with Pulumi AI
This data source provides the list of Model Evaluation Results in Oracle Cloud Infrastructure Ai Language service.
Get a (paginated) list of evaluation results for a given model.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as oci from "@pulumi/oci";
const testModelEvaluationResults = oci.AiLanguage.getModelEvaluationResults({
    modelId: testModel.id,
});
import pulumi
import pulumi_oci as oci
test_model_evaluation_results = oci.AiLanguage.get_model_evaluation_results(model_id=test_model["id"])
package main
import (
	"github.com/pulumi/pulumi-oci/sdk/v2/go/oci/ailanguage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := ailanguage.GetModelEvaluationResults(ctx, &ailanguage.GetModelEvaluationResultsArgs{
			ModelId: testModel.Id,
		}, nil)
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Oci = Pulumi.Oci;
return await Deployment.RunAsync(() => 
{
    var testModelEvaluationResults = Oci.AiLanguage.GetModelEvaluationResults.Invoke(new()
    {
        ModelId = testModel.Id,
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.oci.AiLanguage.AiLanguageFunctions;
import com.pulumi.oci.AiLanguage.inputs.GetModelEvaluationResultsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        final var testModelEvaluationResults = AiLanguageFunctions.getModelEvaluationResults(GetModelEvaluationResultsArgs.builder()
            .modelId(testModel.id())
            .build());
    }
}
variables:
  testModelEvaluationResults:
    fn::invoke:
      function: oci:AiLanguage:getModelEvaluationResults
      arguments:
        modelId: ${testModel.id}
Using getModelEvaluationResults
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getModelEvaluationResults(args: GetModelEvaluationResultsArgs, opts?: InvokeOptions): Promise<GetModelEvaluationResultsResult>
function getModelEvaluationResultsOutput(args: GetModelEvaluationResultsOutputArgs, opts?: InvokeOptions): Output<GetModelEvaluationResultsResult>def get_model_evaluation_results(filters: Optional[Sequence[GetModelEvaluationResultsFilter]] = None,
                                 model_id: Optional[str] = None,
                                 opts: Optional[InvokeOptions] = None) -> GetModelEvaluationResultsResult
def get_model_evaluation_results_output(filters: Optional[pulumi.Input[Sequence[pulumi.Input[GetModelEvaluationResultsFilterArgs]]]] = None,
                                 model_id: Optional[pulumi.Input[str]] = None,
                                 opts: Optional[InvokeOptions] = None) -> Output[GetModelEvaluationResultsResult]func GetModelEvaluationResults(ctx *Context, args *GetModelEvaluationResultsArgs, opts ...InvokeOption) (*GetModelEvaluationResultsResult, error)
func GetModelEvaluationResultsOutput(ctx *Context, args *GetModelEvaluationResultsOutputArgs, opts ...InvokeOption) GetModelEvaluationResultsResultOutput> Note: This function is named GetModelEvaluationResults in the Go SDK.
public static class GetModelEvaluationResults 
{
    public static Task<GetModelEvaluationResultsResult> InvokeAsync(GetModelEvaluationResultsArgs args, InvokeOptions? opts = null)
    public static Output<GetModelEvaluationResultsResult> Invoke(GetModelEvaluationResultsInvokeArgs args, InvokeOptions? opts = null)
}public static CompletableFuture<GetModelEvaluationResultsResult> getModelEvaluationResults(GetModelEvaluationResultsArgs args, InvokeOptions options)
public static Output<GetModelEvaluationResultsResult> getModelEvaluationResults(GetModelEvaluationResultsArgs args, InvokeOptions options)
fn::invoke:
  function: oci:AiLanguage/getModelEvaluationResults:getModelEvaluationResults
  arguments:
    # arguments dictionaryThe following arguments are supported:
- ModelId string
- unique model OCID.
- Filters
List<GetModel Evaluation Results Filter> 
- ModelId string
- unique model OCID.
- Filters
[]GetModel Evaluation Results Filter 
- modelId String
- unique model OCID.
- filters
List<GetModel Evaluation Results Filter> 
- modelId string
- unique model OCID.
- filters
GetModel Evaluation Results Filter[] 
- model_id str
- unique model OCID.
- filters
Sequence[GetModel Evaluation Results Filter] 
- modelId String
- unique model OCID.
- filters List<Property Map>
getModelEvaluationResults Result
The following output properties are available:
- EvaluationResult List<GetCollections Model Evaluation Results Evaluation Result Collection> 
- The list of evaluation_result_collection.
- Id string
- The provider-assigned unique ID for this managed resource.
- ModelId string
- Filters
List<GetModel Evaluation Results Filter> 
- EvaluationResult []GetCollections Model Evaluation Results Evaluation Result Collection 
- The list of evaluation_result_collection.
- Id string
- The provider-assigned unique ID for this managed resource.
- ModelId string
- Filters
[]GetModel Evaluation Results Filter 
- evaluationResult List<GetCollections Model Evaluation Results Evaluation Result Collection> 
- The list of evaluation_result_collection.
- id String
- The provider-assigned unique ID for this managed resource.
- modelId String
- filters
List<GetModel Evaluation Results Filter> 
- evaluationResult GetCollections Model Evaluation Results Evaluation Result Collection[] 
- The list of evaluation_result_collection.
- id string
- The provider-assigned unique ID for this managed resource.
- modelId string
- filters
GetModel Evaluation Results Filter[] 
- evaluation_result_ Sequence[Getcollections Model Evaluation Results Evaluation Result Collection] 
- The list of evaluation_result_collection.
- id str
- The provider-assigned unique ID for this managed resource.
- model_id str
- filters
Sequence[GetModel Evaluation Results Filter] 
- evaluationResult List<Property Map>Collections 
- The list of evaluation_result_collection.
- id String
- The provider-assigned unique ID for this managed resource.
- modelId String
- filters List<Property Map>
Supporting Types
GetModelEvaluationResultsEvaluationResultCollection      
- Items
List<GetModel Evaluation Results Evaluation Result Collection Item> 
- List of model evaluation analysis
- Items
[]GetModel Evaluation Results Evaluation Result Collection Item 
- List of model evaluation analysis
- items
List<GetModel Evaluation Results Evaluation Result Collection Item> 
- List of model evaluation analysis
- items
GetModel Evaluation Results Evaluation Result Collection Item[] 
- List of model evaluation analysis
- items
Sequence[GetModel Evaluation Results Evaluation Result Collection Item] 
- List of model evaluation analysis
- items List<Property Map>
- List of model evaluation analysis
GetModelEvaluationResultsEvaluationResultCollectionItem       
- Dictionary<string, string>
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: {"foo-namespace.bar-key": "value"}
- Dictionary<string, string>
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: {"bar-key": "value"}
- Location string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- ModelType string
- Model type
- PredictedEntities List<GetModel Evaluation Results Evaluation Result Collection Item Predicted Entity> 
- List of true(actual) entities in test data for NER model
- PredictedLabels List<string>
- List of predicted labels by custom multi class or multi label TextClassification model
- Record string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- TrueEntities List<GetModel Evaluation Results Evaluation Result Collection Item True Entity> 
- List of true(actual) entities in test data for NER model
- TrueLabels List<string>
- List of true(actual) labels in test data for multi class or multi label TextClassification
- map[string]string
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: {"foo-namespace.bar-key": "value"}
- map[string]string
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: {"bar-key": "value"}
- Location string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- ModelType string
- Model type
- PredictedEntities []GetModel Evaluation Results Evaluation Result Collection Item Predicted Entity 
- List of true(actual) entities in test data for NER model
- PredictedLabels []string
- List of predicted labels by custom multi class or multi label TextClassification model
- Record string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- TrueEntities []GetModel Evaluation Results Evaluation Result Collection Item True Entity 
- List of true(actual) entities in test data for NER model
- TrueLabels []string
- List of true(actual) labels in test data for multi class or multi label TextClassification
- Map<String,String>
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: {"foo-namespace.bar-key": "value"}
- Map<String,String>
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: {"bar-key": "value"}
- location String
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- modelType String
- Model type
- predictedEntities List<GetModel Evaluation Results Evaluation Result Collection Item Predicted Entity> 
- List of true(actual) entities in test data for NER model
- predictedLabels List<String>
- List of predicted labels by custom multi class or multi label TextClassification model
- record String
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- trueEntities List<GetModel Evaluation Results Evaluation Result Collection Item True Entity> 
- List of true(actual) entities in test data for NER model
- trueLabels List<String>
- List of true(actual) labels in test data for multi class or multi label TextClassification
- {[key: string]: string}
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: {"foo-namespace.bar-key": "value"}
- {[key: string]: string}
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: {"bar-key": "value"}
- location string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- modelType string
- Model type
- predictedEntities GetModel Evaluation Results Evaluation Result Collection Item Predicted Entity[] 
- List of true(actual) entities in test data for NER model
- predictedLabels string[]
- List of predicted labels by custom multi class or multi label TextClassification model
- record string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- trueEntities GetModel Evaluation Results Evaluation Result Collection Item True Entity[] 
- List of true(actual) entities in test data for NER model
- trueLabels string[]
- List of true(actual) labels in test data for multi class or multi label TextClassification
- Mapping[str, str]
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: {"foo-namespace.bar-key": "value"}
- Mapping[str, str]
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: {"bar-key": "value"}
- location str
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- model_type str
- Model type
- predicted_entities Sequence[GetModel Evaluation Results Evaluation Result Collection Item Predicted Entity] 
- List of true(actual) entities in test data for NER model
- predicted_labels Sequence[str]
- List of predicted labels by custom multi class or multi label TextClassification model
- record str
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- true_entities Sequence[GetModel Evaluation Results Evaluation Result Collection Item True Entity] 
- List of true(actual) entities in test data for NER model
- true_labels Sequence[str]
- List of true(actual) labels in test data for multi class or multi label TextClassification
- Map<String>
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: {"foo-namespace.bar-key": "value"}
- Map<String>
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: {"bar-key": "value"}
- location String
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- modelType String
- Model type
- predictedEntities List<Property Map>
- List of true(actual) entities in test data for NER model
- predictedLabels List<String>
- List of predicted labels by custom multi class or multi label TextClassification model
- record String
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- trueEntities List<Property Map>
- List of true(actual) entities in test data for NER model
- trueLabels List<String>
- List of true(actual) labels in test data for multi class or multi label TextClassification
GetModelEvaluationResultsEvaluationResultCollectionItemPredictedEntity         
GetModelEvaluationResultsEvaluationResultCollectionItemTrueEntity         
GetModelEvaluationResultsFilter    
Package Details
- Repository
- oci pulumi/pulumi-oci
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the ociTerraform Provider.