1. Packages
  2. Databricks
  3. API Docs
  4. QualityMonitor
Databricks v1.46.1 published on Friday, Jun 28, 2024 by Pulumi

databricks.QualityMonitor

Explore with Pulumi AI

databricks logo
Databricks v1.46.1 published on Friday, Jun 28, 2024 by Pulumi

    This resource allows you to manage Lakehouse Monitors in Databricks.

    A databricks.QualityMonitor is attached to a databricks.SqlTable and can be of type timeseries, snapshot or inference.

    Example Usage

    Coming soon!
    
    Coming soon!
    
    Coming soon!
    
    Coming soon!
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Catalog;
    import com.pulumi.databricks.CatalogArgs;
    import com.pulumi.databricks.Schema;
    import com.pulumi.databricks.SchemaArgs;
    import com.pulumi.databricks.SqlTable;
    import com.pulumi.databricks.SqlTableArgs;
    import com.pulumi.databricks.inputs.SqlTableColumnArgs;
    import com.pulumi.databricks.QualityMonitor;
    import com.pulumi.databricks.QualityMonitorArgs;
    import com.pulumi.databricks.inputs.QualityMonitorTimeSeriesArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var sandbox = new Catalog("sandbox", CatalogArgs.builder()
                .name("sandbox")
                .comment("this catalog is managed by terraform")
                .properties(Map.of("purpose", "testing"))
                .build());
    
            var things = new Schema("things", SchemaArgs.builder()
                .catalogName(sandbox.id())
                .name("things")
                .comment("this database is managed by terraform")
                .properties(Map.of("kind", "various"))
                .build());
    
            var myTestTable = new SqlTable("myTestTable", SqlTableArgs.builder()
                .catalogName("main")
                .schemaName(things.name())
                .name("bar")
                .tableType("MANAGED")
                .dataSourceFormat("DELTA")
                .columns(SqlTableColumnArgs.builder()
                    .name("timestamp")
                    .position(1)
                    .type("int")
                    .build())
                .build());
    
            var testTimeseriesMonitor = new QualityMonitor("testTimeseriesMonitor", QualityMonitorArgs.builder()
                .tableName(Output.tuple(sandbox.name(), things.name(), myTestTable.name()).applyValue(values -> {
                    var sandboxName = values.t1;
                    var thingsName = values.t2;
                    var myTestTableName = values.t3;
                    return String.format("%s.%s.%s", sandboxName,thingsName,myTestTableName);
                }))
                .assetsDir(myTestTable.name().applyValue(name -> String.format("/Shared/provider-test/databricks_quality_monitoring/%s", name)))
                .outputSchemaName(Output.tuple(sandbox.name(), things.name()).applyValue(values -> {
                    var sandboxName = values.t1;
                    var thingsName = values.t2;
                    return String.format("%s.%s", sandboxName,thingsName);
                }))
                .timeSeries(QualityMonitorTimeSeriesArgs.builder()
                    .granularities("1 hour")
                    .timestampCol("timestamp")
                    .build())
                .build());
    
        }
    }
    
    resources:
      sandbox:
        type: databricks:Catalog
        properties:
          name: sandbox
          comment: this catalog is managed by terraform
          properties:
            purpose: testing
      things:
        type: databricks:Schema
        properties:
          catalogName: ${sandbox.id}
          name: things
          comment: this database is managed by terraform
          properties:
            kind: various
      myTestTable:
        type: databricks:SqlTable
        properties:
          catalogName: main
          schemaName: ${things.name}
          name: bar
          tableType: MANAGED
          dataSourceFormat: DELTA
          columns:
            - name: timestamp
              position: 1
              type: int
      testTimeseriesMonitor:
        type: databricks:QualityMonitor
        properties:
          tableName: ${sandbox.name}.${things.name}.${myTestTable.name}
          assetsDir: /Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}
          outputSchemaName: ${sandbox.name}.${things.name}
          timeSeries:
            granularities:
              - 1 hour
            timestampCol: timestamp
    

    Inference Monitor

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const testMonitorInference = new databricks.QualityMonitor("testMonitorInference", {
        tableName: `${sandbox.name}.${things.name}.${myTestTable.name}`,
        assetsDir: `/Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}`,
        outputSchemaName: `${sandbox.name}.${things.name}`,
        inferenceLog: {
            granularities: ["1 hour"],
            timestampCol: "timestamp",
            predictionCol: "prediction",
            modelIdCol: "model_id",
            problemType: "PROBLEM_TYPE_REGRESSION",
        },
    });
    
    import pulumi
    import pulumi_databricks as databricks
    
    test_monitor_inference = databricks.QualityMonitor("testMonitorInference",
        table_name=f"{sandbox['name']}.{things['name']}.{my_test_table['name']}",
        assets_dir=f"/Shared/provider-test/databricks_quality_monitoring/{my_test_table['name']}",
        output_schema_name=f"{sandbox['name']}.{things['name']}",
        inference_log=databricks.QualityMonitorInferenceLogArgs(
            granularities=["1 hour"],
            timestamp_col="timestamp",
            prediction_col="prediction",
            model_id_col="model_id",
            problem_type="PROBLEM_TYPE_REGRESSION",
        ))
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewQualityMonitor(ctx, "testMonitorInference", &databricks.QualityMonitorArgs{
    			TableName:        pulumi.String(fmt.Sprintf("%v.%v.%v", sandbox.Name, things.Name, myTestTable.Name)),
    			AssetsDir:        pulumi.String(fmt.Sprintf("/Shared/provider-test/databricks_quality_monitoring/%v", myTestTable.Name)),
    			OutputSchemaName: pulumi.String(fmt.Sprintf("%v.%v", sandbox.Name, things.Name)),
    			InferenceLog: &databricks.QualityMonitorInferenceLogArgs{
    				Granularities: pulumi.StringArray{
    					pulumi.String("1 hour"),
    				},
    				TimestampCol:  pulumi.String("timestamp"),
    				PredictionCol: pulumi.String("prediction"),
    				ModelIdCol:    pulumi.String("model_id"),
    				ProblemType:   pulumi.String("PROBLEM_TYPE_REGRESSION"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var testMonitorInference = new Databricks.QualityMonitor("testMonitorInference", new()
        {
            TableName = $"{sandbox.Name}.{things.Name}.{myTestTable.Name}",
            AssetsDir = $"/Shared/provider-test/databricks_quality_monitoring/{myTestTable.Name}",
            OutputSchemaName = $"{sandbox.Name}.{things.Name}",
            InferenceLog = new Databricks.Inputs.QualityMonitorInferenceLogArgs
            {
                Granularities = new[]
                {
                    "1 hour",
                },
                TimestampCol = "timestamp",
                PredictionCol = "prediction",
                ModelIdCol = "model_id",
                ProblemType = "PROBLEM_TYPE_REGRESSION",
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.QualityMonitor;
    import com.pulumi.databricks.QualityMonitorArgs;
    import com.pulumi.databricks.inputs.QualityMonitorInferenceLogArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var testMonitorInference = new QualityMonitor("testMonitorInference", QualityMonitorArgs.builder()
                .tableName(String.format("%s.%s.%s", sandbox.name(),things.name(),myTestTable.name()))
                .assetsDir(String.format("/Shared/provider-test/databricks_quality_monitoring/%s", myTestTable.name()))
                .outputSchemaName(String.format("%s.%s", sandbox.name(),things.name()))
                .inferenceLog(QualityMonitorInferenceLogArgs.builder()
                    .granularities("1 hour")
                    .timestampCol("timestamp")
                    .predictionCol("prediction")
                    .modelIdCol("model_id")
                    .problemType("PROBLEM_TYPE_REGRESSION")
                    .build())
                .build());
    
        }
    }
    
    resources:
      testMonitorInference:
        type: databricks:QualityMonitor
        properties:
          tableName: ${sandbox.name}.${things.name}.${myTestTable.name}
          assetsDir: /Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}
          outputSchemaName: ${sandbox.name}.${things.name}
          inferenceLog:
            granularities:
              - 1 hour
            timestampCol: timestamp
            predictionCol: prediction
            modelIdCol: model_id
            problemType: PROBLEM_TYPE_REGRESSION
    

    Snapshot Monitor

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const testMonitorInference = new databricks.QualityMonitor("testMonitorInference", {
        tableName: `${sandbox.name}.${things.name}.${myTestTable.name}`,
        assetsDir: `/Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}`,
        outputSchemaName: `${sandbox.name}.${things.name}`,
        snapshot: {},
    });
    
    import pulumi
    import pulumi_databricks as databricks
    
    test_monitor_inference = databricks.QualityMonitor("testMonitorInference",
        table_name=f"{sandbox['name']}.{things['name']}.{my_test_table['name']}",
        assets_dir=f"/Shared/provider-test/databricks_quality_monitoring/{my_test_table['name']}",
        output_schema_name=f"{sandbox['name']}.{things['name']}",
        snapshot=databricks.QualityMonitorSnapshotArgs())
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewQualityMonitor(ctx, "testMonitorInference", &databricks.QualityMonitorArgs{
    			TableName:        pulumi.String(fmt.Sprintf("%v.%v.%v", sandbox.Name, things.Name, myTestTable.Name)),
    			AssetsDir:        pulumi.String(fmt.Sprintf("/Shared/provider-test/databricks_quality_monitoring/%v", myTestTable.Name)),
    			OutputSchemaName: pulumi.String(fmt.Sprintf("%v.%v", sandbox.Name, things.Name)),
    			Snapshot:         nil,
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var testMonitorInference = new Databricks.QualityMonitor("testMonitorInference", new()
        {
            TableName = $"{sandbox.Name}.{things.Name}.{myTestTable.Name}",
            AssetsDir = $"/Shared/provider-test/databricks_quality_monitoring/{myTestTable.Name}",
            OutputSchemaName = $"{sandbox.Name}.{things.Name}",
            Snapshot = null,
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.QualityMonitor;
    import com.pulumi.databricks.QualityMonitorArgs;
    import com.pulumi.databricks.inputs.QualityMonitorSnapshotArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var testMonitorInference = new QualityMonitor("testMonitorInference", QualityMonitorArgs.builder()
                .tableName(String.format("%s.%s.%s", sandbox.name(),things.name(),myTestTable.name()))
                .assetsDir(String.format("/Shared/provider-test/databricks_quality_monitoring/%s", myTestTable.name()))
                .outputSchemaName(String.format("%s.%s", sandbox.name(),things.name()))
                .snapshot()
                .build());
    
        }
    }
    
    resources:
      testMonitorInference:
        type: databricks:QualityMonitor
        properties:
          tableName: ${sandbox.name}.${things.name}.${myTestTable.name}
          assetsDir: /Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}
          outputSchemaName: ${sandbox.name}.${things.name}
          snapshot: {}
    

    The following resources are often used in the same context:

    • databricks.Catalog
    • databricks.Schema
    • databricks.SqlTable

    Create QualityMonitor Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new QualityMonitor(name: string, args: QualityMonitorArgs, opts?: CustomResourceOptions);
    @overload
    def QualityMonitor(resource_name: str,
                       args: QualityMonitorArgs,
                       opts: Optional[ResourceOptions] = None)
    
    @overload
    def QualityMonitor(resource_name: str,
                       opts: Optional[ResourceOptions] = None,
                       assets_dir: Optional[str] = None,
                       table_name: Optional[str] = None,
                       output_schema_name: Optional[str] = None,
                       notifications: Optional[QualityMonitorNotificationsArgs] = None,
                       inference_log: Optional[QualityMonitorInferenceLogArgs] = None,
                       latest_monitor_failure_msg: Optional[str] = None,
                       data_classification_config: Optional[QualityMonitorDataClassificationConfigArgs] = None,
                       custom_metrics: Optional[Sequence[QualityMonitorCustomMetricArgs]] = None,
                       schedule: Optional[QualityMonitorScheduleArgs] = None,
                       skip_builtin_dashboard: Optional[bool] = None,
                       slicing_exprs: Optional[Sequence[str]] = None,
                       snapshot: Optional[QualityMonitorSnapshotArgs] = None,
                       baseline_table_name: Optional[str] = None,
                       time_series: Optional[QualityMonitorTimeSeriesArgs] = None,
                       warehouse_id: Optional[str] = None)
    func NewQualityMonitor(ctx *Context, name string, args QualityMonitorArgs, opts ...ResourceOption) (*QualityMonitor, error)
    public QualityMonitor(string name, QualityMonitorArgs args, CustomResourceOptions? opts = null)
    public QualityMonitor(String name, QualityMonitorArgs args)
    public QualityMonitor(String name, QualityMonitorArgs args, CustomResourceOptions options)
    
    type: databricks:QualityMonitor
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args QualityMonitorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args QualityMonitorArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args QualityMonitorArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args QualityMonitorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args QualityMonitorArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var qualityMonitorResource = new Databricks.QualityMonitor("qualityMonitorResource", new()
    {
        AssetsDir = "string",
        TableName = "string",
        OutputSchemaName = "string",
        Notifications = new Databricks.Inputs.QualityMonitorNotificationsArgs
        {
            OnFailure = new Databricks.Inputs.QualityMonitorNotificationsOnFailureArgs
            {
                EmailAddresses = new[]
                {
                    "string",
                },
            },
            OnNewClassificationTagDetected = new Databricks.Inputs.QualityMonitorNotificationsOnNewClassificationTagDetectedArgs
            {
                EmailAddresses = new[]
                {
                    "string",
                },
            },
        },
        InferenceLog = new Databricks.Inputs.QualityMonitorInferenceLogArgs
        {
            Granularities = new[]
            {
                "string",
            },
            ModelIdCol = "string",
            PredictionCol = "string",
            ProblemType = "string",
            TimestampCol = "string",
            LabelCol = "string",
            PredictionProbaCol = "string",
        },
        LatestMonitorFailureMsg = "string",
        DataClassificationConfig = new Databricks.Inputs.QualityMonitorDataClassificationConfigArgs
        {
            Enabled = false,
        },
        CustomMetrics = new[]
        {
            new Databricks.Inputs.QualityMonitorCustomMetricArgs
            {
                Definition = "string",
                InputColumns = new[]
                {
                    "string",
                },
                Name = "string",
                OutputDataType = "string",
                Type = "string",
            },
        },
        Schedule = new Databricks.Inputs.QualityMonitorScheduleArgs
        {
            QuartzCronExpression = "string",
            TimezoneId = "string",
            PauseStatus = "string",
        },
        SkipBuiltinDashboard = false,
        SlicingExprs = new[]
        {
            "string",
        },
        Snapshot = null,
        BaselineTableName = "string",
        TimeSeries = new Databricks.Inputs.QualityMonitorTimeSeriesArgs
        {
            Granularities = new[]
            {
                "string",
            },
            TimestampCol = "string",
        },
        WarehouseId = "string",
    });
    
    example, err := databricks.NewQualityMonitor(ctx, "qualityMonitorResource", &databricks.QualityMonitorArgs{
    	AssetsDir:        pulumi.String("string"),
    	TableName:        pulumi.String("string"),
    	OutputSchemaName: pulumi.String("string"),
    	Notifications: &databricks.QualityMonitorNotificationsArgs{
    		OnFailure: &databricks.QualityMonitorNotificationsOnFailureArgs{
    			EmailAddresses: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    		OnNewClassificationTagDetected: &databricks.QualityMonitorNotificationsOnNewClassificationTagDetectedArgs{
    			EmailAddresses: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    	},
    	InferenceLog: &databricks.QualityMonitorInferenceLogArgs{
    		Granularities: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		ModelIdCol:         pulumi.String("string"),
    		PredictionCol:      pulumi.String("string"),
    		ProblemType:        pulumi.String("string"),
    		TimestampCol:       pulumi.String("string"),
    		LabelCol:           pulumi.String("string"),
    		PredictionProbaCol: pulumi.String("string"),
    	},
    	LatestMonitorFailureMsg: pulumi.String("string"),
    	DataClassificationConfig: &databricks.QualityMonitorDataClassificationConfigArgs{
    		Enabled: pulumi.Bool(false),
    	},
    	CustomMetrics: databricks.QualityMonitorCustomMetricArray{
    		&databricks.QualityMonitorCustomMetricArgs{
    			Definition: pulumi.String("string"),
    			InputColumns: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			Name:           pulumi.String("string"),
    			OutputDataType: pulumi.String("string"),
    			Type:           pulumi.String("string"),
    		},
    	},
    	Schedule: &databricks.QualityMonitorScheduleArgs{
    		QuartzCronExpression: pulumi.String("string"),
    		TimezoneId:           pulumi.String("string"),
    		PauseStatus:          pulumi.String("string"),
    	},
    	SkipBuiltinDashboard: pulumi.Bool(false),
    	SlicingExprs: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	Snapshot:          nil,
    	BaselineTableName: pulumi.String("string"),
    	TimeSeries: &databricks.QualityMonitorTimeSeriesArgs{
    		Granularities: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		TimestampCol: pulumi.String("string"),
    	},
    	WarehouseId: pulumi.String("string"),
    })
    
    var qualityMonitorResource = new QualityMonitor("qualityMonitorResource", QualityMonitorArgs.builder()
        .assetsDir("string")
        .tableName("string")
        .outputSchemaName("string")
        .notifications(QualityMonitorNotificationsArgs.builder()
            .onFailure(QualityMonitorNotificationsOnFailureArgs.builder()
                .emailAddresses("string")
                .build())
            .onNewClassificationTagDetected(QualityMonitorNotificationsOnNewClassificationTagDetectedArgs.builder()
                .emailAddresses("string")
                .build())
            .build())
        .inferenceLog(QualityMonitorInferenceLogArgs.builder()
            .granularities("string")
            .modelIdCol("string")
            .predictionCol("string")
            .problemType("string")
            .timestampCol("string")
            .labelCol("string")
            .predictionProbaCol("string")
            .build())
        .latestMonitorFailureMsg("string")
        .dataClassificationConfig(QualityMonitorDataClassificationConfigArgs.builder()
            .enabled(false)
            .build())
        .customMetrics(QualityMonitorCustomMetricArgs.builder()
            .definition("string")
            .inputColumns("string")
            .name("string")
            .outputDataType("string")
            .type("string")
            .build())
        .schedule(QualityMonitorScheduleArgs.builder()
            .quartzCronExpression("string")
            .timezoneId("string")
            .pauseStatus("string")
            .build())
        .skipBuiltinDashboard(false)
        .slicingExprs("string")
        .snapshot()
        .baselineTableName("string")
        .timeSeries(QualityMonitorTimeSeriesArgs.builder()
            .granularities("string")
            .timestampCol("string")
            .build())
        .warehouseId("string")
        .build());
    
    quality_monitor_resource = databricks.QualityMonitor("qualityMonitorResource",
        assets_dir="string",
        table_name="string",
        output_schema_name="string",
        notifications=databricks.QualityMonitorNotificationsArgs(
            on_failure=databricks.QualityMonitorNotificationsOnFailureArgs(
                email_addresses=["string"],
            ),
            on_new_classification_tag_detected=databricks.QualityMonitorNotificationsOnNewClassificationTagDetectedArgs(
                email_addresses=["string"],
            ),
        ),
        inference_log=databricks.QualityMonitorInferenceLogArgs(
            granularities=["string"],
            model_id_col="string",
            prediction_col="string",
            problem_type="string",
            timestamp_col="string",
            label_col="string",
            prediction_proba_col="string",
        ),
        latest_monitor_failure_msg="string",
        data_classification_config=databricks.QualityMonitorDataClassificationConfigArgs(
            enabled=False,
        ),
        custom_metrics=[databricks.QualityMonitorCustomMetricArgs(
            definition="string",
            input_columns=["string"],
            name="string",
            output_data_type="string",
            type="string",
        )],
        schedule=databricks.QualityMonitorScheduleArgs(
            quartz_cron_expression="string",
            timezone_id="string",
            pause_status="string",
        ),
        skip_builtin_dashboard=False,
        slicing_exprs=["string"],
        snapshot=databricks.QualityMonitorSnapshotArgs(),
        baseline_table_name="string",
        time_series=databricks.QualityMonitorTimeSeriesArgs(
            granularities=["string"],
            timestamp_col="string",
        ),
        warehouse_id="string")
    
    const qualityMonitorResource = new databricks.QualityMonitor("qualityMonitorResource", {
        assetsDir: "string",
        tableName: "string",
        outputSchemaName: "string",
        notifications: {
            onFailure: {
                emailAddresses: ["string"],
            },
            onNewClassificationTagDetected: {
                emailAddresses: ["string"],
            },
        },
        inferenceLog: {
            granularities: ["string"],
            modelIdCol: "string",
            predictionCol: "string",
            problemType: "string",
            timestampCol: "string",
            labelCol: "string",
            predictionProbaCol: "string",
        },
        latestMonitorFailureMsg: "string",
        dataClassificationConfig: {
            enabled: false,
        },
        customMetrics: [{
            definition: "string",
            inputColumns: ["string"],
            name: "string",
            outputDataType: "string",
            type: "string",
        }],
        schedule: {
            quartzCronExpression: "string",
            timezoneId: "string",
            pauseStatus: "string",
        },
        skipBuiltinDashboard: false,
        slicingExprs: ["string"],
        snapshot: {},
        baselineTableName: "string",
        timeSeries: {
            granularities: ["string"],
            timestampCol: "string",
        },
        warehouseId: "string",
    });
    
    type: databricks:QualityMonitor
    properties:
        assetsDir: string
        baselineTableName: string
        customMetrics:
            - definition: string
              inputColumns:
                - string
              name: string
              outputDataType: string
              type: string
        dataClassificationConfig:
            enabled: false
        inferenceLog:
            granularities:
                - string
            labelCol: string
            modelIdCol: string
            predictionCol: string
            predictionProbaCol: string
            problemType: string
            timestampCol: string
        latestMonitorFailureMsg: string
        notifications:
            onFailure:
                emailAddresses:
                    - string
            onNewClassificationTagDetected:
                emailAddresses:
                    - string
        outputSchemaName: string
        schedule:
            pauseStatus: string
            quartzCronExpression: string
            timezoneId: string
        skipBuiltinDashboard: false
        slicingExprs:
            - string
        snapshot: {}
        tableName: string
        timeSeries:
            granularities:
                - string
            timestampCol: string
        warehouseId: string
    

    QualityMonitor Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The QualityMonitor resource accepts the following input properties:

    AssetsDir string
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    OutputSchemaName string
    Schema where output metric tables are created
    TableName string
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    BaselineTableName string
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    CustomMetrics List<QualityMonitorCustomMetric>
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    DataClassificationConfig QualityMonitorDataClassificationConfig
    The data classification config for the monitor
    InferenceLog QualityMonitorInferenceLog
    Configuration for the inference log monitor
    LatestMonitorFailureMsg string
    Notifications QualityMonitorNotifications
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    Schedule QualityMonitorSchedule
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    SkipBuiltinDashboard bool
    Whether to skip creating a default dashboard summarizing data quality metrics.
    SlicingExprs List<string>
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    Snapshot QualityMonitorSnapshot
    Configuration for monitoring snapshot tables.
    TimeSeries QualityMonitorTimeSeries
    Configuration for monitoring timeseries tables.
    WarehouseId string
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.
    AssetsDir string
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    OutputSchemaName string
    Schema where output metric tables are created
    TableName string
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    BaselineTableName string
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    CustomMetrics []QualityMonitorCustomMetricArgs
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    DataClassificationConfig QualityMonitorDataClassificationConfigArgs
    The data classification config for the monitor
    InferenceLog QualityMonitorInferenceLogArgs
    Configuration for the inference log monitor
    LatestMonitorFailureMsg string
    Notifications QualityMonitorNotificationsArgs
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    Schedule QualityMonitorScheduleArgs
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    SkipBuiltinDashboard bool
    Whether to skip creating a default dashboard summarizing data quality metrics.
    SlicingExprs []string
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    Snapshot QualityMonitorSnapshotArgs
    Configuration for monitoring snapshot tables.
    TimeSeries QualityMonitorTimeSeriesArgs
    Configuration for monitoring timeseries tables.
    WarehouseId string
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.
    assetsDir String
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    outputSchemaName String
    Schema where output metric tables are created
    tableName String
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    baselineTableName String
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    customMetrics List<QualityMonitorCustomMetric>
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    dataClassificationConfig QualityMonitorDataClassificationConfig
    The data classification config for the monitor
    inferenceLog QualityMonitorInferenceLog
    Configuration for the inference log monitor
    latestMonitorFailureMsg String
    notifications QualityMonitorNotifications
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    schedule QualityMonitorSchedule
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    skipBuiltinDashboard Boolean
    Whether to skip creating a default dashboard summarizing data quality metrics.
    slicingExprs List<String>
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    snapshot QualityMonitorSnapshot
    Configuration for monitoring snapshot tables.
    timeSeries QualityMonitorTimeSeries
    Configuration for monitoring timeseries tables.
    warehouseId String
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.
    assetsDir string
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    outputSchemaName string
    Schema where output metric tables are created
    tableName string
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    baselineTableName string
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    customMetrics QualityMonitorCustomMetric[]
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    dataClassificationConfig QualityMonitorDataClassificationConfig
    The data classification config for the monitor
    inferenceLog QualityMonitorInferenceLog
    Configuration for the inference log monitor
    latestMonitorFailureMsg string
    notifications QualityMonitorNotifications
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    schedule QualityMonitorSchedule
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    skipBuiltinDashboard boolean
    Whether to skip creating a default dashboard summarizing data quality metrics.
    slicingExprs string[]
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    snapshot QualityMonitorSnapshot
    Configuration for monitoring snapshot tables.
    timeSeries QualityMonitorTimeSeries
    Configuration for monitoring timeseries tables.
    warehouseId string
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.
    assets_dir str
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    output_schema_name str
    Schema where output metric tables are created
    table_name str
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    baseline_table_name str
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    custom_metrics Sequence[QualityMonitorCustomMetricArgs]
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    data_classification_config QualityMonitorDataClassificationConfigArgs
    The data classification config for the monitor
    inference_log QualityMonitorInferenceLogArgs
    Configuration for the inference log monitor
    latest_monitor_failure_msg str
    notifications QualityMonitorNotificationsArgs
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    schedule QualityMonitorScheduleArgs
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    skip_builtin_dashboard bool
    Whether to skip creating a default dashboard summarizing data quality metrics.
    slicing_exprs Sequence[str]
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    snapshot QualityMonitorSnapshotArgs
    Configuration for monitoring snapshot tables.
    time_series QualityMonitorTimeSeriesArgs
    Configuration for monitoring timeseries tables.
    warehouse_id str
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.
    assetsDir String
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    outputSchemaName String
    Schema where output metric tables are created
    tableName String
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    baselineTableName String
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    customMetrics List<Property Map>
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    dataClassificationConfig Property Map
    The data classification config for the monitor
    inferenceLog Property Map
    Configuration for the inference log monitor
    latestMonitorFailureMsg String
    notifications Property Map
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    schedule Property Map
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    skipBuiltinDashboard Boolean
    Whether to skip creating a default dashboard summarizing data quality metrics.
    slicingExprs List<String>
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    snapshot Property Map
    Configuration for monitoring snapshot tables.
    timeSeries Property Map
    Configuration for monitoring timeseries tables.
    warehouseId String
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the QualityMonitor resource produces the following output properties:

    DashboardId string
    The ID of the generated dashboard.
    DriftMetricsTableName string
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    Id string
    The provider-assigned unique ID for this managed resource.
    MonitorVersion string
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    ProfileMetricsTableName string
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    Status string
    Status of the Monitor
    DashboardId string
    The ID of the generated dashboard.
    DriftMetricsTableName string
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    Id string
    The provider-assigned unique ID for this managed resource.
    MonitorVersion string
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    ProfileMetricsTableName string
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    Status string
    Status of the Monitor
    dashboardId String
    The ID of the generated dashboard.
    driftMetricsTableName String
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    id String
    The provider-assigned unique ID for this managed resource.
    monitorVersion String
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    profileMetricsTableName String
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    status String
    Status of the Monitor
    dashboardId string
    The ID of the generated dashboard.
    driftMetricsTableName string
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    id string
    The provider-assigned unique ID for this managed resource.
    monitorVersion string
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    profileMetricsTableName string
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    status string
    Status of the Monitor
    dashboard_id str
    The ID of the generated dashboard.
    drift_metrics_table_name str
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    id str
    The provider-assigned unique ID for this managed resource.
    monitor_version str
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    profile_metrics_table_name str
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    status str
    Status of the Monitor
    dashboardId String
    The ID of the generated dashboard.
    driftMetricsTableName String
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    id String
    The provider-assigned unique ID for this managed resource.
    monitorVersion String
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    profileMetricsTableName String
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    status String
    Status of the Monitor

    Look up Existing QualityMonitor Resource

    Get an existing QualityMonitor resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: QualityMonitorState, opts?: CustomResourceOptions): QualityMonitor
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            assets_dir: Optional[str] = None,
            baseline_table_name: Optional[str] = None,
            custom_metrics: Optional[Sequence[QualityMonitorCustomMetricArgs]] = None,
            dashboard_id: Optional[str] = None,
            data_classification_config: Optional[QualityMonitorDataClassificationConfigArgs] = None,
            drift_metrics_table_name: Optional[str] = None,
            inference_log: Optional[QualityMonitorInferenceLogArgs] = None,
            latest_monitor_failure_msg: Optional[str] = None,
            monitor_version: Optional[str] = None,
            notifications: Optional[QualityMonitorNotificationsArgs] = None,
            output_schema_name: Optional[str] = None,
            profile_metrics_table_name: Optional[str] = None,
            schedule: Optional[QualityMonitorScheduleArgs] = None,
            skip_builtin_dashboard: Optional[bool] = None,
            slicing_exprs: Optional[Sequence[str]] = None,
            snapshot: Optional[QualityMonitorSnapshotArgs] = None,
            status: Optional[str] = None,
            table_name: Optional[str] = None,
            time_series: Optional[QualityMonitorTimeSeriesArgs] = None,
            warehouse_id: Optional[str] = None) -> QualityMonitor
    func GetQualityMonitor(ctx *Context, name string, id IDInput, state *QualityMonitorState, opts ...ResourceOption) (*QualityMonitor, error)
    public static QualityMonitor Get(string name, Input<string> id, QualityMonitorState? state, CustomResourceOptions? opts = null)
    public static QualityMonitor get(String name, Output<String> id, QualityMonitorState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AssetsDir string
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    BaselineTableName string
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    CustomMetrics List<QualityMonitorCustomMetric>
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    DashboardId string
    The ID of the generated dashboard.
    DataClassificationConfig QualityMonitorDataClassificationConfig
    The data classification config for the monitor
    DriftMetricsTableName string
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    InferenceLog QualityMonitorInferenceLog
    Configuration for the inference log monitor
    LatestMonitorFailureMsg string
    MonitorVersion string
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    Notifications QualityMonitorNotifications
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    OutputSchemaName string
    Schema where output metric tables are created
    ProfileMetricsTableName string
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    Schedule QualityMonitorSchedule
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    SkipBuiltinDashboard bool
    Whether to skip creating a default dashboard summarizing data quality metrics.
    SlicingExprs List<string>
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    Snapshot QualityMonitorSnapshot
    Configuration for monitoring snapshot tables.
    Status string
    Status of the Monitor
    TableName string
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    TimeSeries QualityMonitorTimeSeries
    Configuration for monitoring timeseries tables.
    WarehouseId string
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.
    AssetsDir string
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    BaselineTableName string
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    CustomMetrics []QualityMonitorCustomMetricArgs
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    DashboardId string
    The ID of the generated dashboard.
    DataClassificationConfig QualityMonitorDataClassificationConfigArgs
    The data classification config for the monitor
    DriftMetricsTableName string
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    InferenceLog QualityMonitorInferenceLogArgs
    Configuration for the inference log monitor
    LatestMonitorFailureMsg string
    MonitorVersion string
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    Notifications QualityMonitorNotificationsArgs
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    OutputSchemaName string
    Schema where output metric tables are created
    ProfileMetricsTableName string
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    Schedule QualityMonitorScheduleArgs
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    SkipBuiltinDashboard bool
    Whether to skip creating a default dashboard summarizing data quality metrics.
    SlicingExprs []string
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    Snapshot QualityMonitorSnapshotArgs
    Configuration for monitoring snapshot tables.
    Status string
    Status of the Monitor
    TableName string
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    TimeSeries QualityMonitorTimeSeriesArgs
    Configuration for monitoring timeseries tables.
    WarehouseId string
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.
    assetsDir String
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    baselineTableName String
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    customMetrics List<QualityMonitorCustomMetric>
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    dashboardId String
    The ID of the generated dashboard.
    dataClassificationConfig QualityMonitorDataClassificationConfig
    The data classification config for the monitor
    driftMetricsTableName String
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    inferenceLog QualityMonitorInferenceLog
    Configuration for the inference log monitor
    latestMonitorFailureMsg String
    monitorVersion String
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    notifications QualityMonitorNotifications
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    outputSchemaName String
    Schema where output metric tables are created
    profileMetricsTableName String
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    schedule QualityMonitorSchedule
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    skipBuiltinDashboard Boolean
    Whether to skip creating a default dashboard summarizing data quality metrics.
    slicingExprs List<String>
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    snapshot QualityMonitorSnapshot
    Configuration for monitoring snapshot tables.
    status String
    Status of the Monitor
    tableName String
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    timeSeries QualityMonitorTimeSeries
    Configuration for monitoring timeseries tables.
    warehouseId String
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.
    assetsDir string
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    baselineTableName string
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    customMetrics QualityMonitorCustomMetric[]
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    dashboardId string
    The ID of the generated dashboard.
    dataClassificationConfig QualityMonitorDataClassificationConfig
    The data classification config for the monitor
    driftMetricsTableName string
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    inferenceLog QualityMonitorInferenceLog
    Configuration for the inference log monitor
    latestMonitorFailureMsg string
    monitorVersion string
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    notifications QualityMonitorNotifications
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    outputSchemaName string
    Schema where output metric tables are created
    profileMetricsTableName string
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    schedule QualityMonitorSchedule
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    skipBuiltinDashboard boolean
    Whether to skip creating a default dashboard summarizing data quality metrics.
    slicingExprs string[]
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    snapshot QualityMonitorSnapshot
    Configuration for monitoring snapshot tables.
    status string
    Status of the Monitor
    tableName string
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    timeSeries QualityMonitorTimeSeries
    Configuration for monitoring timeseries tables.
    warehouseId string
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.
    assets_dir str
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    baseline_table_name str
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    custom_metrics Sequence[QualityMonitorCustomMetricArgs]
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    dashboard_id str
    The ID of the generated dashboard.
    data_classification_config QualityMonitorDataClassificationConfigArgs
    The data classification config for the monitor
    drift_metrics_table_name str
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    inference_log QualityMonitorInferenceLogArgs
    Configuration for the inference log monitor
    latest_monitor_failure_msg str
    monitor_version str
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    notifications QualityMonitorNotificationsArgs
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    output_schema_name str
    Schema where output metric tables are created
    profile_metrics_table_name str
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    schedule QualityMonitorScheduleArgs
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    skip_builtin_dashboard bool
    Whether to skip creating a default dashboard summarizing data quality metrics.
    slicing_exprs Sequence[str]
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    snapshot QualityMonitorSnapshotArgs
    Configuration for monitoring snapshot tables.
    status str
    Status of the Monitor
    table_name str
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    time_series QualityMonitorTimeSeriesArgs
    Configuration for monitoring timeseries tables.
    warehouse_id str
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.
    assetsDir String
    The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
    baselineTableName String
    Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
    customMetrics List<Property Map>
    Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
    dashboardId String
    The ID of the generated dashboard.
    dataClassificationConfig Property Map
    The data classification config for the monitor
    driftMetricsTableName String
    The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
    inferenceLog Property Map
    Configuration for the inference log monitor
    latestMonitorFailureMsg String
    monitorVersion String
    The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
    notifications Property Map
    The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addresses containing a list of emails to notify:
    outputSchemaName String
    Schema where output metric tables are created
    profileMetricsTableName String
    The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
    schedule Property Map
    The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
    skipBuiltinDashboard Boolean
    Whether to skip creating a default dashboard summarizing data quality metrics.
    slicingExprs List<String>
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
    snapshot Property Map
    Configuration for monitoring snapshot tables.
    status String
    Status of the Monitor
    tableName String
    The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
    timeSeries Property Map
    Configuration for monitoring timeseries tables.
    warehouseId String
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used.

    Supporting Types

    QualityMonitorCustomMetric, QualityMonitorCustomMetricArgs

    Definition string
    create metric definition
    InputColumns List<string>
    Columns on the monitored table to apply the custom metrics to.
    Name string
    Name of the custom metric.
    OutputDataType string
    The output type of the custom metric.
    Type string
    The type of the custom metric.
    Definition string
    create metric definition
    InputColumns []string
    Columns on the monitored table to apply the custom metrics to.
    Name string
    Name of the custom metric.
    OutputDataType string
    The output type of the custom metric.
    Type string
    The type of the custom metric.
    definition String
    create metric definition
    inputColumns List<String>
    Columns on the monitored table to apply the custom metrics to.
    name String
    Name of the custom metric.
    outputDataType String
    The output type of the custom metric.
    type String
    The type of the custom metric.
    definition string
    create metric definition
    inputColumns string[]
    Columns on the monitored table to apply the custom metrics to.
    name string
    Name of the custom metric.
    outputDataType string
    The output type of the custom metric.
    type string
    The type of the custom metric.
    definition str
    create metric definition
    input_columns Sequence[str]
    Columns on the monitored table to apply the custom metrics to.
    name str
    Name of the custom metric.
    output_data_type str
    The output type of the custom metric.
    type str
    The type of the custom metric.
    definition String
    create metric definition
    inputColumns List<String>
    Columns on the monitored table to apply the custom metrics to.
    name String
    Name of the custom metric.
    outputDataType String
    The output type of the custom metric.
    type String
    The type of the custom metric.

    QualityMonitorDataClassificationConfig, QualityMonitorDataClassificationConfigArgs

    Enabled bool
    Enabled bool
    enabled Boolean
    enabled boolean
    enabled bool
    enabled Boolean

    QualityMonitorInferenceLog, QualityMonitorInferenceLogArgs

    Granularities List<string>
    List of granularities to use when aggregating data into time windows based on their timestamp.
    ModelIdCol string
    Column of the model id or version
    PredictionCol string
    Column of the model prediction
    ProblemType string
    Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATION or PROBLEM_TYPE_REGRESSION
    TimestampCol string
    Column of the timestamp of predictions
    LabelCol string
    Column of the model label
    PredictionProbaCol string
    Column of the model prediction probabilities
    Granularities []string
    List of granularities to use when aggregating data into time windows based on their timestamp.
    ModelIdCol string
    Column of the model id or version
    PredictionCol string
    Column of the model prediction
    ProblemType string
    Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATION or PROBLEM_TYPE_REGRESSION
    TimestampCol string
    Column of the timestamp of predictions
    LabelCol string
    Column of the model label
    PredictionProbaCol string
    Column of the model prediction probabilities
    granularities List<String>
    List of granularities to use when aggregating data into time windows based on their timestamp.
    modelIdCol String
    Column of the model id or version
    predictionCol String
    Column of the model prediction
    problemType String
    Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATION or PROBLEM_TYPE_REGRESSION
    timestampCol String
    Column of the timestamp of predictions
    labelCol String
    Column of the model label
    predictionProbaCol String
    Column of the model prediction probabilities
    granularities string[]
    List of granularities to use when aggregating data into time windows based on their timestamp.
    modelIdCol string
    Column of the model id or version
    predictionCol string
    Column of the model prediction
    problemType string
    Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATION or PROBLEM_TYPE_REGRESSION
    timestampCol string
    Column of the timestamp of predictions
    labelCol string
    Column of the model label
    predictionProbaCol string
    Column of the model prediction probabilities
    granularities Sequence[str]
    List of granularities to use when aggregating data into time windows based on their timestamp.
    model_id_col str
    Column of the model id or version
    prediction_col str
    Column of the model prediction
    problem_type str
    Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATION or PROBLEM_TYPE_REGRESSION
    timestamp_col str
    Column of the timestamp of predictions
    label_col str
    Column of the model label
    prediction_proba_col str
    Column of the model prediction probabilities
    granularities List<String>
    List of granularities to use when aggregating data into time windows based on their timestamp.
    modelIdCol String
    Column of the model id or version
    predictionCol String
    Column of the model prediction
    problemType String
    Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATION or PROBLEM_TYPE_REGRESSION
    timestampCol String
    Column of the timestamp of predictions
    labelCol String
    Column of the model label
    predictionProbaCol String
    Column of the model prediction probabilities

    QualityMonitorNotifications, QualityMonitorNotificationsArgs

    OnFailure QualityMonitorNotificationsOnFailure
    who to send notifications to on monitor failure.
    OnNewClassificationTagDetected QualityMonitorNotificationsOnNewClassificationTagDetected
    Who to send notifications to when new data classification tags are detected.
    OnFailure QualityMonitorNotificationsOnFailure
    who to send notifications to on monitor failure.
    OnNewClassificationTagDetected QualityMonitorNotificationsOnNewClassificationTagDetected
    Who to send notifications to when new data classification tags are detected.
    onFailure QualityMonitorNotificationsOnFailure
    who to send notifications to on monitor failure.
    onNewClassificationTagDetected QualityMonitorNotificationsOnNewClassificationTagDetected
    Who to send notifications to when new data classification tags are detected.
    onFailure QualityMonitorNotificationsOnFailure
    who to send notifications to on monitor failure.
    onNewClassificationTagDetected QualityMonitorNotificationsOnNewClassificationTagDetected
    Who to send notifications to when new data classification tags are detected.
    on_failure QualityMonitorNotificationsOnFailure
    who to send notifications to on monitor failure.
    on_new_classification_tag_detected QualityMonitorNotificationsOnNewClassificationTagDetected
    Who to send notifications to when new data classification tags are detected.
    onFailure Property Map
    who to send notifications to on monitor failure.
    onNewClassificationTagDetected Property Map
    Who to send notifications to when new data classification tags are detected.

    QualityMonitorNotificationsOnFailure, QualityMonitorNotificationsOnFailureArgs

    EmailAddresses List<string>
    emailAddresses List<String>
    email_addresses Sequence[str]
    emailAddresses List<String>

    QualityMonitorNotificationsOnNewClassificationTagDetected, QualityMonitorNotificationsOnNewClassificationTagDetectedArgs

    EmailAddresses List<string>
    emailAddresses List<String>
    email_addresses Sequence[str]
    emailAddresses List<String>

    QualityMonitorSchedule, QualityMonitorScheduleArgs

    QuartzCronExpression string
    string expression that determines when to run the monitor. See Quartz documentation for examples.
    TimezoneId string
    string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
    PauseStatus string
    optional string field that indicates whether a schedule is paused (PAUSED) or not (UNPAUSED).
    QuartzCronExpression string
    string expression that determines when to run the monitor. See Quartz documentation for examples.
    TimezoneId string
    string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
    PauseStatus string
    optional string field that indicates whether a schedule is paused (PAUSED) or not (UNPAUSED).
    quartzCronExpression String
    string expression that determines when to run the monitor. See Quartz documentation for examples.
    timezoneId String
    string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
    pauseStatus String
    optional string field that indicates whether a schedule is paused (PAUSED) or not (UNPAUSED).
    quartzCronExpression string
    string expression that determines when to run the monitor. See Quartz documentation for examples.
    timezoneId string
    string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
    pauseStatus string
    optional string field that indicates whether a schedule is paused (PAUSED) or not (UNPAUSED).
    quartz_cron_expression str
    string expression that determines when to run the monitor. See Quartz documentation for examples.
    timezone_id str
    string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
    pause_status str
    optional string field that indicates whether a schedule is paused (PAUSED) or not (UNPAUSED).
    quartzCronExpression String
    string expression that determines when to run the monitor. See Quartz documentation for examples.
    timezoneId String
    string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
    pauseStatus String
    optional string field that indicates whether a schedule is paused (PAUSED) or not (UNPAUSED).

    QualityMonitorTimeSeries, QualityMonitorTimeSeriesArgs

    Granularities List<string>
    List of granularities to use when aggregating data into time windows based on their timestamp.
    TimestampCol string
    Column of the timestamp of predictions
    Granularities []string
    List of granularities to use when aggregating data into time windows based on their timestamp.
    TimestampCol string
    Column of the timestamp of predictions
    granularities List<String>
    List of granularities to use when aggregating data into time windows based on their timestamp.
    timestampCol String
    Column of the timestamp of predictions
    granularities string[]
    List of granularities to use when aggregating data into time windows based on their timestamp.
    timestampCol string
    Column of the timestamp of predictions
    granularities Sequence[str]
    List of granularities to use when aggregating data into time windows based on their timestamp.
    timestamp_col str
    Column of the timestamp of predictions
    granularities List<String>
    List of granularities to use when aggregating data into time windows based on their timestamp.
    timestampCol String
    Column of the timestamp of predictions

    Package Details

    Repository
    databricks pulumi/pulumi-databricks
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the databricks Terraform Provider.
    databricks logo
    Databricks v1.46.1 published on Friday, Jun 28, 2024 by Pulumi