aiven.ServiceIntegration
Explore with Pulumi AI
Creates and manages an Aiven service integration.
You can set up an integration between two Aiven services or an Aiven service and an external service. For example, you can send metrics from a Kafka service to an M3DB service, send metrics from an M3DB service to a Grafana service to show dashboards, and send logs from any service to OpenSearch.
Services integrations are not supported for services running on hobbyist plans.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as aiven from "@pulumi/aiven";
const exampleIntegration = new aiven.ServiceIntegration("example_integration", {
project: exampleProject.project,
integrationType: "metrics",
sourceServiceName: exampleKafka.serviceName,
destinationServiceName: exampleM3db.serviceName,
});
import pulumi
import pulumi_aiven as aiven
example_integration = aiven.ServiceIntegration("example_integration",
project=example_project["project"],
integration_type="metrics",
source_service_name=example_kafka["serviceName"],
destination_service_name=example_m3db["serviceName"])
package main
import (
"github.com/pulumi/pulumi-aiven/sdk/v6/go/aiven"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := aiven.NewServiceIntegration(ctx, "example_integration", &aiven.ServiceIntegrationArgs{
Project: pulumi.Any(exampleProject.Project),
IntegrationType: pulumi.String("metrics"),
SourceServiceName: pulumi.Any(exampleKafka.ServiceName),
DestinationServiceName: pulumi.Any(exampleM3db.ServiceName),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aiven = Pulumi.Aiven;
return await Deployment.RunAsync(() =>
{
var exampleIntegration = new Aiven.ServiceIntegration("example_integration", new()
{
Project = exampleProject.Project,
IntegrationType = "metrics",
SourceServiceName = exampleKafka.ServiceName,
DestinationServiceName = exampleM3db.ServiceName,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aiven.ServiceIntegration;
import com.pulumi.aiven.ServiceIntegrationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var exampleIntegration = new ServiceIntegration("exampleIntegration", ServiceIntegrationArgs.builder()
.project(exampleProject.project())
.integrationType("metrics")
.sourceServiceName(exampleKafka.serviceName())
.destinationServiceName(exampleM3db.serviceName())
.build());
}
}
resources:
exampleIntegration:
type: aiven:ServiceIntegration
name: example_integration
properties:
project: ${exampleProject.project}
integrationType: metrics
sourceServiceName: ${exampleKafka.serviceName}
destinationServiceName: ${exampleM3db.serviceName}
Create ServiceIntegration Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new ServiceIntegration(name: string, args: ServiceIntegrationArgs, opts?: CustomResourceOptions);
@overload
def ServiceIntegration(resource_name: str,
args: ServiceIntegrationArgs,
opts: Optional[ResourceOptions] = None)
@overload
def ServiceIntegration(resource_name: str,
opts: Optional[ResourceOptions] = None,
integration_type: Optional[str] = None,
project: Optional[str] = None,
external_opensearch_logs_user_config: Optional[ServiceIntegrationExternalOpensearchLogsUserConfigArgs] = None,
kafka_logs_user_config: Optional[ServiceIntegrationKafkaLogsUserConfigArgs] = None,
destination_service_name: Optional[str] = None,
external_aws_cloudwatch_logs_user_config: Optional[ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs] = None,
external_aws_cloudwatch_metrics_user_config: Optional[ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs] = None,
external_elasticsearch_logs_user_config: Optional[ServiceIntegrationExternalElasticsearchLogsUserConfigArgs] = None,
clickhouse_kafka_user_config: Optional[ServiceIntegrationClickhouseKafkaUserConfigArgs] = None,
datadog_user_config: Optional[ServiceIntegrationDatadogUserConfigArgs] = None,
kafka_connect_user_config: Optional[ServiceIntegrationKafkaConnectUserConfigArgs] = None,
destination_endpoint_id: Optional[str] = None,
kafka_mirrormaker_user_config: Optional[ServiceIntegrationKafkaMirrormakerUserConfigArgs] = None,
logs_user_config: Optional[ServiceIntegrationLogsUserConfigArgs] = None,
metrics_user_config: Optional[ServiceIntegrationMetricsUserConfigArgs] = None,
clickhouse_postgresql_user_config: Optional[ServiceIntegrationClickhousePostgresqlUserConfigArgs] = None,
prometheus_user_config: Optional[ServiceIntegrationPrometheusUserConfigArgs] = None,
source_endpoint_id: Optional[str] = None,
source_service_name: Optional[str] = None)
func NewServiceIntegration(ctx *Context, name string, args ServiceIntegrationArgs, opts ...ResourceOption) (*ServiceIntegration, error)
public ServiceIntegration(string name, ServiceIntegrationArgs args, CustomResourceOptions? opts = null)
public ServiceIntegration(String name, ServiceIntegrationArgs args)
public ServiceIntegration(String name, ServiceIntegrationArgs args, CustomResourceOptions options)
type: aiven:ServiceIntegration
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args ServiceIntegrationArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ServiceIntegrationArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ServiceIntegrationArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ServiceIntegrationArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ServiceIntegrationArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var serviceIntegrationResource = new Aiven.ServiceIntegration("serviceIntegrationResource", new()
{
IntegrationType = "string",
Project = "string",
ExternalOpensearchLogsUserConfig = new Aiven.Inputs.ServiceIntegrationExternalOpensearchLogsUserConfigArgs
{
SelectedLogFields = new[]
{
"string",
},
},
KafkaLogsUserConfig = new Aiven.Inputs.ServiceIntegrationKafkaLogsUserConfigArgs
{
KafkaTopic = "string",
SelectedLogFields = new[]
{
"string",
},
},
DestinationServiceName = "string",
ExternalAwsCloudwatchLogsUserConfig = new Aiven.Inputs.ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs
{
SelectedLogFields = new[]
{
"string",
},
},
ExternalAwsCloudwatchMetricsUserConfig = new Aiven.Inputs.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs
{
DroppedMetrics = new[]
{
new Aiven.Inputs.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigDroppedMetricArgs
{
Field = "string",
Metric = "string",
},
},
ExtraMetrics = new[]
{
new Aiven.Inputs.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigExtraMetricArgs
{
Field = "string",
Metric = "string",
},
},
},
ExternalElasticsearchLogsUserConfig = new Aiven.Inputs.ServiceIntegrationExternalElasticsearchLogsUserConfigArgs
{
SelectedLogFields = new[]
{
"string",
},
},
ClickhouseKafkaUserConfig = new Aiven.Inputs.ServiceIntegrationClickhouseKafkaUserConfigArgs
{
Tables = new[]
{
new Aiven.Inputs.ServiceIntegrationClickhouseKafkaUserConfigTableArgs
{
Name = "string",
Columns = new[]
{
new Aiven.Inputs.ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs
{
Name = "string",
Type = "string",
},
},
DataFormat = "string",
Topics = new[]
{
new Aiven.Inputs.ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs
{
Name = "string",
},
},
GroupName = "string",
MaxBlockSize = 0,
AutoOffsetReset = "string",
MaxRowsPerMessage = 0,
HandleErrorMode = "string",
NumConsumers = 0,
PollMaxBatchSize = 0,
SkipBrokenMessages = 0,
DateTimeInputFormat = "string",
},
},
},
DatadogUserConfig = new Aiven.Inputs.ServiceIntegrationDatadogUserConfigArgs
{
DatadogDbmEnabled = false,
DatadogPgbouncerEnabled = false,
DatadogTags = new[]
{
new Aiven.Inputs.ServiceIntegrationDatadogUserConfigDatadogTagArgs
{
Tag = "string",
Comment = "string",
},
},
ExcludeConsumerGroups = new[]
{
"string",
},
ExcludeTopics = new[]
{
"string",
},
IncludeConsumerGroups = new[]
{
"string",
},
IncludeTopics = new[]
{
"string",
},
KafkaCustomMetrics = new[]
{
"string",
},
MaxJmxMetrics = 0,
MirrormakerCustomMetrics = new[]
{
"string",
},
Opensearch = new Aiven.Inputs.ServiceIntegrationDatadogUserConfigOpensearchArgs
{
ClusterStatsEnabled = false,
IndexStatsEnabled = false,
PendingTaskStatsEnabled = false,
PshardStatsEnabled = false,
},
Redis = new Aiven.Inputs.ServiceIntegrationDatadogUserConfigRedisArgs
{
CommandStatsEnabled = false,
},
},
KafkaConnectUserConfig = new Aiven.Inputs.ServiceIntegrationKafkaConnectUserConfigArgs
{
KafkaConnect = new Aiven.Inputs.ServiceIntegrationKafkaConnectUserConfigKafkaConnectArgs
{
ConfigStorageTopic = "string",
GroupId = "string",
OffsetStorageTopic = "string",
StatusStorageTopic = "string",
},
},
DestinationEndpointId = "string",
KafkaMirrormakerUserConfig = new Aiven.Inputs.ServiceIntegrationKafkaMirrormakerUserConfigArgs
{
ClusterAlias = "string",
KafkaMirrormaker = new Aiven.Inputs.ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerArgs
{
ConsumerAutoOffsetReset = "string",
ConsumerFetchMinBytes = 0,
ConsumerMaxPollRecords = 0,
ProducerBatchSize = 0,
ProducerBufferMemory = 0,
ProducerCompressionType = "string",
ProducerLingerMs = 0,
ProducerMaxRequestSize = 0,
},
},
LogsUserConfig = new Aiven.Inputs.ServiceIntegrationLogsUserConfigArgs
{
ElasticsearchIndexDaysMax = 0,
ElasticsearchIndexPrefix = "string",
SelectedLogFields = new[]
{
"string",
},
},
MetricsUserConfig = new Aiven.Inputs.ServiceIntegrationMetricsUserConfigArgs
{
Database = "string",
RetentionDays = 0,
RoUsername = "string",
SourceMysql = new Aiven.Inputs.ServiceIntegrationMetricsUserConfigSourceMysqlArgs
{
Telegraf = new Aiven.Inputs.ServiceIntegrationMetricsUserConfigSourceMysqlTelegrafArgs
{
GatherEventWaits = false,
GatherFileEventsStats = false,
GatherIndexIoWaits = false,
GatherInfoSchemaAutoInc = false,
GatherInnodbMetrics = false,
GatherPerfEventsStatements = false,
GatherProcessList = false,
GatherSlaveStatus = false,
GatherTableIoWaits = false,
GatherTableLockWaits = false,
GatherTableSchema = false,
PerfEventsStatementsDigestTextLimit = 0,
PerfEventsStatementsLimit = 0,
PerfEventsStatementsTimeLimit = 0,
},
},
Username = "string",
},
ClickhousePostgresqlUserConfig = new Aiven.Inputs.ServiceIntegrationClickhousePostgresqlUserConfigArgs
{
Databases = new[]
{
new Aiven.Inputs.ServiceIntegrationClickhousePostgresqlUserConfigDatabaseArgs
{
Database = "string",
Schema = "string",
},
},
},
PrometheusUserConfig = new Aiven.Inputs.ServiceIntegrationPrometheusUserConfigArgs
{
SourceMysql = new Aiven.Inputs.ServiceIntegrationPrometheusUserConfigSourceMysqlArgs
{
Telegraf = new Aiven.Inputs.ServiceIntegrationPrometheusUserConfigSourceMysqlTelegrafArgs
{
GatherEventWaits = false,
GatherFileEventsStats = false,
GatherIndexIoWaits = false,
GatherInfoSchemaAutoInc = false,
GatherInnodbMetrics = false,
GatherPerfEventsStatements = false,
GatherProcessList = false,
GatherSlaveStatus = false,
GatherTableIoWaits = false,
GatherTableLockWaits = false,
GatherTableSchema = false,
PerfEventsStatementsDigestTextLimit = 0,
PerfEventsStatementsLimit = 0,
PerfEventsStatementsTimeLimit = 0,
},
},
},
SourceEndpointId = "string",
SourceServiceName = "string",
});
example, err := aiven.NewServiceIntegration(ctx, "serviceIntegrationResource", &aiven.ServiceIntegrationArgs{
IntegrationType: pulumi.String("string"),
Project: pulumi.String("string"),
ExternalOpensearchLogsUserConfig: &aiven.ServiceIntegrationExternalOpensearchLogsUserConfigArgs{
SelectedLogFields: pulumi.StringArray{
pulumi.String("string"),
},
},
KafkaLogsUserConfig: &aiven.ServiceIntegrationKafkaLogsUserConfigArgs{
KafkaTopic: pulumi.String("string"),
SelectedLogFields: pulumi.StringArray{
pulumi.String("string"),
},
},
DestinationServiceName: pulumi.String("string"),
ExternalAwsCloudwatchLogsUserConfig: &aiven.ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs{
SelectedLogFields: pulumi.StringArray{
pulumi.String("string"),
},
},
ExternalAwsCloudwatchMetricsUserConfig: &aiven.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs{
DroppedMetrics: aiven.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigDroppedMetricArray{
&aiven.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigDroppedMetricArgs{
Field: pulumi.String("string"),
Metric: pulumi.String("string"),
},
},
ExtraMetrics: aiven.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigExtraMetricArray{
&aiven.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigExtraMetricArgs{
Field: pulumi.String("string"),
Metric: pulumi.String("string"),
},
},
},
ExternalElasticsearchLogsUserConfig: &aiven.ServiceIntegrationExternalElasticsearchLogsUserConfigArgs{
SelectedLogFields: pulumi.StringArray{
pulumi.String("string"),
},
},
ClickhouseKafkaUserConfig: &aiven.ServiceIntegrationClickhouseKafkaUserConfigArgs{
Tables: aiven.ServiceIntegrationClickhouseKafkaUserConfigTableArray{
&aiven.ServiceIntegrationClickhouseKafkaUserConfigTableArgs{
Name: pulumi.String("string"),
Columns: aiven.ServiceIntegrationClickhouseKafkaUserConfigTableColumnArray{
&aiven.ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs{
Name: pulumi.String("string"),
Type: pulumi.String("string"),
},
},
DataFormat: pulumi.String("string"),
Topics: aiven.ServiceIntegrationClickhouseKafkaUserConfigTableTopicArray{
&aiven.ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs{
Name: pulumi.String("string"),
},
},
GroupName: pulumi.String("string"),
MaxBlockSize: pulumi.Int(0),
AutoOffsetReset: pulumi.String("string"),
MaxRowsPerMessage: pulumi.Int(0),
HandleErrorMode: pulumi.String("string"),
NumConsumers: pulumi.Int(0),
PollMaxBatchSize: pulumi.Int(0),
SkipBrokenMessages: pulumi.Int(0),
DateTimeInputFormat: pulumi.String("string"),
},
},
},
DatadogUserConfig: &aiven.ServiceIntegrationDatadogUserConfigArgs{
DatadogDbmEnabled: pulumi.Bool(false),
DatadogPgbouncerEnabled: pulumi.Bool(false),
DatadogTags: aiven.ServiceIntegrationDatadogUserConfigDatadogTagArray{
&aiven.ServiceIntegrationDatadogUserConfigDatadogTagArgs{
Tag: pulumi.String("string"),
Comment: pulumi.String("string"),
},
},
ExcludeConsumerGroups: pulumi.StringArray{
pulumi.String("string"),
},
ExcludeTopics: pulumi.StringArray{
pulumi.String("string"),
},
IncludeConsumerGroups: pulumi.StringArray{
pulumi.String("string"),
},
IncludeTopics: pulumi.StringArray{
pulumi.String("string"),
},
KafkaCustomMetrics: pulumi.StringArray{
pulumi.String("string"),
},
MaxJmxMetrics: pulumi.Int(0),
MirrormakerCustomMetrics: pulumi.StringArray{
pulumi.String("string"),
},
Opensearch: &aiven.ServiceIntegrationDatadogUserConfigOpensearchArgs{
ClusterStatsEnabled: pulumi.Bool(false),
IndexStatsEnabled: pulumi.Bool(false),
PendingTaskStatsEnabled: pulumi.Bool(false),
PshardStatsEnabled: pulumi.Bool(false),
},
Redis: &aiven.ServiceIntegrationDatadogUserConfigRedisArgs{
CommandStatsEnabled: pulumi.Bool(false),
},
},
KafkaConnectUserConfig: &aiven.ServiceIntegrationKafkaConnectUserConfigArgs{
KafkaConnect: &aiven.ServiceIntegrationKafkaConnectUserConfigKafkaConnectArgs{
ConfigStorageTopic: pulumi.String("string"),
GroupId: pulumi.String("string"),
OffsetStorageTopic: pulumi.String("string"),
StatusStorageTopic: pulumi.String("string"),
},
},
DestinationEndpointId: pulumi.String("string"),
KafkaMirrormakerUserConfig: &aiven.ServiceIntegrationKafkaMirrormakerUserConfigArgs{
ClusterAlias: pulumi.String("string"),
KafkaMirrormaker: &aiven.ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerArgs{
ConsumerAutoOffsetReset: pulumi.String("string"),
ConsumerFetchMinBytes: pulumi.Int(0),
ConsumerMaxPollRecords: pulumi.Int(0),
ProducerBatchSize: pulumi.Int(0),
ProducerBufferMemory: pulumi.Int(0),
ProducerCompressionType: pulumi.String("string"),
ProducerLingerMs: pulumi.Int(0),
ProducerMaxRequestSize: pulumi.Int(0),
},
},
LogsUserConfig: &aiven.ServiceIntegrationLogsUserConfigArgs{
ElasticsearchIndexDaysMax: pulumi.Int(0),
ElasticsearchIndexPrefix: pulumi.String("string"),
SelectedLogFields: pulumi.StringArray{
pulumi.String("string"),
},
},
MetricsUserConfig: &aiven.ServiceIntegrationMetricsUserConfigArgs{
Database: pulumi.String("string"),
RetentionDays: pulumi.Int(0),
RoUsername: pulumi.String("string"),
SourceMysql: &aiven.ServiceIntegrationMetricsUserConfigSourceMysqlArgs{
Telegraf: &aiven.ServiceIntegrationMetricsUserConfigSourceMysqlTelegrafArgs{
GatherEventWaits: pulumi.Bool(false),
GatherFileEventsStats: pulumi.Bool(false),
GatherIndexIoWaits: pulumi.Bool(false),
GatherInfoSchemaAutoInc: pulumi.Bool(false),
GatherInnodbMetrics: pulumi.Bool(false),
GatherPerfEventsStatements: pulumi.Bool(false),
GatherProcessList: pulumi.Bool(false),
GatherSlaveStatus: pulumi.Bool(false),
GatherTableIoWaits: pulumi.Bool(false),
GatherTableLockWaits: pulumi.Bool(false),
GatherTableSchema: pulumi.Bool(false),
PerfEventsStatementsDigestTextLimit: pulumi.Int(0),
PerfEventsStatementsLimit: pulumi.Int(0),
PerfEventsStatementsTimeLimit: pulumi.Int(0),
},
},
Username: pulumi.String("string"),
},
ClickhousePostgresqlUserConfig: &aiven.ServiceIntegrationClickhousePostgresqlUserConfigArgs{
Databases: aiven.ServiceIntegrationClickhousePostgresqlUserConfigDatabaseArray{
&aiven.ServiceIntegrationClickhousePostgresqlUserConfigDatabaseArgs{
Database: pulumi.String("string"),
Schema: pulumi.String("string"),
},
},
},
PrometheusUserConfig: &aiven.ServiceIntegrationPrometheusUserConfigArgs{
SourceMysql: &aiven.ServiceIntegrationPrometheusUserConfigSourceMysqlArgs{
Telegraf: &aiven.ServiceIntegrationPrometheusUserConfigSourceMysqlTelegrafArgs{
GatherEventWaits: pulumi.Bool(false),
GatherFileEventsStats: pulumi.Bool(false),
GatherIndexIoWaits: pulumi.Bool(false),
GatherInfoSchemaAutoInc: pulumi.Bool(false),
GatherInnodbMetrics: pulumi.Bool(false),
GatherPerfEventsStatements: pulumi.Bool(false),
GatherProcessList: pulumi.Bool(false),
GatherSlaveStatus: pulumi.Bool(false),
GatherTableIoWaits: pulumi.Bool(false),
GatherTableLockWaits: pulumi.Bool(false),
GatherTableSchema: pulumi.Bool(false),
PerfEventsStatementsDigestTextLimit: pulumi.Int(0),
PerfEventsStatementsLimit: pulumi.Int(0),
PerfEventsStatementsTimeLimit: pulumi.Int(0),
},
},
},
SourceEndpointId: pulumi.String("string"),
SourceServiceName: pulumi.String("string"),
})
var serviceIntegrationResource = new ServiceIntegration("serviceIntegrationResource", ServiceIntegrationArgs.builder()
.integrationType("string")
.project("string")
.externalOpensearchLogsUserConfig(ServiceIntegrationExternalOpensearchLogsUserConfigArgs.builder()
.selectedLogFields("string")
.build())
.kafkaLogsUserConfig(ServiceIntegrationKafkaLogsUserConfigArgs.builder()
.kafkaTopic("string")
.selectedLogFields("string")
.build())
.destinationServiceName("string")
.externalAwsCloudwatchLogsUserConfig(ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs.builder()
.selectedLogFields("string")
.build())
.externalAwsCloudwatchMetricsUserConfig(ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs.builder()
.droppedMetrics(ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigDroppedMetricArgs.builder()
.field("string")
.metric("string")
.build())
.extraMetrics(ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigExtraMetricArgs.builder()
.field("string")
.metric("string")
.build())
.build())
.externalElasticsearchLogsUserConfig(ServiceIntegrationExternalElasticsearchLogsUserConfigArgs.builder()
.selectedLogFields("string")
.build())
.clickhouseKafkaUserConfig(ServiceIntegrationClickhouseKafkaUserConfigArgs.builder()
.tables(ServiceIntegrationClickhouseKafkaUserConfigTableArgs.builder()
.name("string")
.columns(ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs.builder()
.name("string")
.type("string")
.build())
.dataFormat("string")
.topics(ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs.builder()
.name("string")
.build())
.groupName("string")
.maxBlockSize(0)
.autoOffsetReset("string")
.maxRowsPerMessage(0)
.handleErrorMode("string")
.numConsumers(0)
.pollMaxBatchSize(0)
.skipBrokenMessages(0)
.dateTimeInputFormat("string")
.build())
.build())
.datadogUserConfig(ServiceIntegrationDatadogUserConfigArgs.builder()
.datadogDbmEnabled(false)
.datadogPgbouncerEnabled(false)
.datadogTags(ServiceIntegrationDatadogUserConfigDatadogTagArgs.builder()
.tag("string")
.comment("string")
.build())
.excludeConsumerGroups("string")
.excludeTopics("string")
.includeConsumerGroups("string")
.includeTopics("string")
.kafkaCustomMetrics("string")
.maxJmxMetrics(0)
.mirrormakerCustomMetrics("string")
.opensearch(ServiceIntegrationDatadogUserConfigOpensearchArgs.builder()
.clusterStatsEnabled(false)
.indexStatsEnabled(false)
.pendingTaskStatsEnabled(false)
.pshardStatsEnabled(false)
.build())
.redis(ServiceIntegrationDatadogUserConfigRedisArgs.builder()
.commandStatsEnabled(false)
.build())
.build())
.kafkaConnectUserConfig(ServiceIntegrationKafkaConnectUserConfigArgs.builder()
.kafkaConnect(ServiceIntegrationKafkaConnectUserConfigKafkaConnectArgs.builder()
.configStorageTopic("string")
.groupId("string")
.offsetStorageTopic("string")
.statusStorageTopic("string")
.build())
.build())
.destinationEndpointId("string")
.kafkaMirrormakerUserConfig(ServiceIntegrationKafkaMirrormakerUserConfigArgs.builder()
.clusterAlias("string")
.kafkaMirrormaker(ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerArgs.builder()
.consumerAutoOffsetReset("string")
.consumerFetchMinBytes(0)
.consumerMaxPollRecords(0)
.producerBatchSize(0)
.producerBufferMemory(0)
.producerCompressionType("string")
.producerLingerMs(0)
.producerMaxRequestSize(0)
.build())
.build())
.logsUserConfig(ServiceIntegrationLogsUserConfigArgs.builder()
.elasticsearchIndexDaysMax(0)
.elasticsearchIndexPrefix("string")
.selectedLogFields("string")
.build())
.metricsUserConfig(ServiceIntegrationMetricsUserConfigArgs.builder()
.database("string")
.retentionDays(0)
.roUsername("string")
.sourceMysql(ServiceIntegrationMetricsUserConfigSourceMysqlArgs.builder()
.telegraf(ServiceIntegrationMetricsUserConfigSourceMysqlTelegrafArgs.builder()
.gatherEventWaits(false)
.gatherFileEventsStats(false)
.gatherIndexIoWaits(false)
.gatherInfoSchemaAutoInc(false)
.gatherInnodbMetrics(false)
.gatherPerfEventsStatements(false)
.gatherProcessList(false)
.gatherSlaveStatus(false)
.gatherTableIoWaits(false)
.gatherTableLockWaits(false)
.gatherTableSchema(false)
.perfEventsStatementsDigestTextLimit(0)
.perfEventsStatementsLimit(0)
.perfEventsStatementsTimeLimit(0)
.build())
.build())
.username("string")
.build())
.clickhousePostgresqlUserConfig(ServiceIntegrationClickhousePostgresqlUserConfigArgs.builder()
.databases(ServiceIntegrationClickhousePostgresqlUserConfigDatabaseArgs.builder()
.database("string")
.schema("string")
.build())
.build())
.prometheusUserConfig(ServiceIntegrationPrometheusUserConfigArgs.builder()
.sourceMysql(ServiceIntegrationPrometheusUserConfigSourceMysqlArgs.builder()
.telegraf(ServiceIntegrationPrometheusUserConfigSourceMysqlTelegrafArgs.builder()
.gatherEventWaits(false)
.gatherFileEventsStats(false)
.gatherIndexIoWaits(false)
.gatherInfoSchemaAutoInc(false)
.gatherInnodbMetrics(false)
.gatherPerfEventsStatements(false)
.gatherProcessList(false)
.gatherSlaveStatus(false)
.gatherTableIoWaits(false)
.gatherTableLockWaits(false)
.gatherTableSchema(false)
.perfEventsStatementsDigestTextLimit(0)
.perfEventsStatementsLimit(0)
.perfEventsStatementsTimeLimit(0)
.build())
.build())
.build())
.sourceEndpointId("string")
.sourceServiceName("string")
.build());
service_integration_resource = aiven.ServiceIntegration("serviceIntegrationResource",
integration_type="string",
project="string",
external_opensearch_logs_user_config=aiven.ServiceIntegrationExternalOpensearchLogsUserConfigArgs(
selected_log_fields=["string"],
),
kafka_logs_user_config=aiven.ServiceIntegrationKafkaLogsUserConfigArgs(
kafka_topic="string",
selected_log_fields=["string"],
),
destination_service_name="string",
external_aws_cloudwatch_logs_user_config=aiven.ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs(
selected_log_fields=["string"],
),
external_aws_cloudwatch_metrics_user_config=aiven.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs(
dropped_metrics=[aiven.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigDroppedMetricArgs(
field="string",
metric="string",
)],
extra_metrics=[aiven.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigExtraMetricArgs(
field="string",
metric="string",
)],
),
external_elasticsearch_logs_user_config=aiven.ServiceIntegrationExternalElasticsearchLogsUserConfigArgs(
selected_log_fields=["string"],
),
clickhouse_kafka_user_config=aiven.ServiceIntegrationClickhouseKafkaUserConfigArgs(
tables=[aiven.ServiceIntegrationClickhouseKafkaUserConfigTableArgs(
name="string",
columns=[aiven.ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs(
name="string",
type="string",
)],
data_format="string",
topics=[aiven.ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs(
name="string",
)],
group_name="string",
max_block_size=0,
auto_offset_reset="string",
max_rows_per_message=0,
handle_error_mode="string",
num_consumers=0,
poll_max_batch_size=0,
skip_broken_messages=0,
date_time_input_format="string",
)],
),
datadog_user_config=aiven.ServiceIntegrationDatadogUserConfigArgs(
datadog_dbm_enabled=False,
datadog_pgbouncer_enabled=False,
datadog_tags=[aiven.ServiceIntegrationDatadogUserConfigDatadogTagArgs(
tag="string",
comment="string",
)],
exclude_consumer_groups=["string"],
exclude_topics=["string"],
include_consumer_groups=["string"],
include_topics=["string"],
kafka_custom_metrics=["string"],
max_jmx_metrics=0,
mirrormaker_custom_metrics=["string"],
opensearch=aiven.ServiceIntegrationDatadogUserConfigOpensearchArgs(
cluster_stats_enabled=False,
index_stats_enabled=False,
pending_task_stats_enabled=False,
pshard_stats_enabled=False,
),
redis=aiven.ServiceIntegrationDatadogUserConfigRedisArgs(
command_stats_enabled=False,
),
),
kafka_connect_user_config=aiven.ServiceIntegrationKafkaConnectUserConfigArgs(
kafka_connect=aiven.ServiceIntegrationKafkaConnectUserConfigKafkaConnectArgs(
config_storage_topic="string",
group_id="string",
offset_storage_topic="string",
status_storage_topic="string",
),
),
destination_endpoint_id="string",
kafka_mirrormaker_user_config=aiven.ServiceIntegrationKafkaMirrormakerUserConfigArgs(
cluster_alias="string",
kafka_mirrormaker=aiven.ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerArgs(
consumer_auto_offset_reset="string",
consumer_fetch_min_bytes=0,
consumer_max_poll_records=0,
producer_batch_size=0,
producer_buffer_memory=0,
producer_compression_type="string",
producer_linger_ms=0,
producer_max_request_size=0,
),
),
logs_user_config=aiven.ServiceIntegrationLogsUserConfigArgs(
elasticsearch_index_days_max=0,
elasticsearch_index_prefix="string",
selected_log_fields=["string"],
),
metrics_user_config=aiven.ServiceIntegrationMetricsUserConfigArgs(
database="string",
retention_days=0,
ro_username="string",
source_mysql=aiven.ServiceIntegrationMetricsUserConfigSourceMysqlArgs(
telegraf=aiven.ServiceIntegrationMetricsUserConfigSourceMysqlTelegrafArgs(
gather_event_waits=False,
gather_file_events_stats=False,
gather_index_io_waits=False,
gather_info_schema_auto_inc=False,
gather_innodb_metrics=False,
gather_perf_events_statements=False,
gather_process_list=False,
gather_slave_status=False,
gather_table_io_waits=False,
gather_table_lock_waits=False,
gather_table_schema=False,
perf_events_statements_digest_text_limit=0,
perf_events_statements_limit=0,
perf_events_statements_time_limit=0,
),
),
username="string",
),
clickhouse_postgresql_user_config=aiven.ServiceIntegrationClickhousePostgresqlUserConfigArgs(
databases=[aiven.ServiceIntegrationClickhousePostgresqlUserConfigDatabaseArgs(
database="string",
schema="string",
)],
),
prometheus_user_config=aiven.ServiceIntegrationPrometheusUserConfigArgs(
source_mysql=aiven.ServiceIntegrationPrometheusUserConfigSourceMysqlArgs(
telegraf=aiven.ServiceIntegrationPrometheusUserConfigSourceMysqlTelegrafArgs(
gather_event_waits=False,
gather_file_events_stats=False,
gather_index_io_waits=False,
gather_info_schema_auto_inc=False,
gather_innodb_metrics=False,
gather_perf_events_statements=False,
gather_process_list=False,
gather_slave_status=False,
gather_table_io_waits=False,
gather_table_lock_waits=False,
gather_table_schema=False,
perf_events_statements_digest_text_limit=0,
perf_events_statements_limit=0,
perf_events_statements_time_limit=0,
),
),
),
source_endpoint_id="string",
source_service_name="string")
const serviceIntegrationResource = new aiven.ServiceIntegration("serviceIntegrationResource", {
integrationType: "string",
project: "string",
externalOpensearchLogsUserConfig: {
selectedLogFields: ["string"],
},
kafkaLogsUserConfig: {
kafkaTopic: "string",
selectedLogFields: ["string"],
},
destinationServiceName: "string",
externalAwsCloudwatchLogsUserConfig: {
selectedLogFields: ["string"],
},
externalAwsCloudwatchMetricsUserConfig: {
droppedMetrics: [{
field: "string",
metric: "string",
}],
extraMetrics: [{
field: "string",
metric: "string",
}],
},
externalElasticsearchLogsUserConfig: {
selectedLogFields: ["string"],
},
clickhouseKafkaUserConfig: {
tables: [{
name: "string",
columns: [{
name: "string",
type: "string",
}],
dataFormat: "string",
topics: [{
name: "string",
}],
groupName: "string",
maxBlockSize: 0,
autoOffsetReset: "string",
maxRowsPerMessage: 0,
handleErrorMode: "string",
numConsumers: 0,
pollMaxBatchSize: 0,
skipBrokenMessages: 0,
dateTimeInputFormat: "string",
}],
},
datadogUserConfig: {
datadogDbmEnabled: false,
datadogPgbouncerEnabled: false,
datadogTags: [{
tag: "string",
comment: "string",
}],
excludeConsumerGroups: ["string"],
excludeTopics: ["string"],
includeConsumerGroups: ["string"],
includeTopics: ["string"],
kafkaCustomMetrics: ["string"],
maxJmxMetrics: 0,
mirrormakerCustomMetrics: ["string"],
opensearch: {
clusterStatsEnabled: false,
indexStatsEnabled: false,
pendingTaskStatsEnabled: false,
pshardStatsEnabled: false,
},
redis: {
commandStatsEnabled: false,
},
},
kafkaConnectUserConfig: {
kafkaConnect: {
configStorageTopic: "string",
groupId: "string",
offsetStorageTopic: "string",
statusStorageTopic: "string",
},
},
destinationEndpointId: "string",
kafkaMirrormakerUserConfig: {
clusterAlias: "string",
kafkaMirrormaker: {
consumerAutoOffsetReset: "string",
consumerFetchMinBytes: 0,
consumerMaxPollRecords: 0,
producerBatchSize: 0,
producerBufferMemory: 0,
producerCompressionType: "string",
producerLingerMs: 0,
producerMaxRequestSize: 0,
},
},
logsUserConfig: {
elasticsearchIndexDaysMax: 0,
elasticsearchIndexPrefix: "string",
selectedLogFields: ["string"],
},
metricsUserConfig: {
database: "string",
retentionDays: 0,
roUsername: "string",
sourceMysql: {
telegraf: {
gatherEventWaits: false,
gatherFileEventsStats: false,
gatherIndexIoWaits: false,
gatherInfoSchemaAutoInc: false,
gatherInnodbMetrics: false,
gatherPerfEventsStatements: false,
gatherProcessList: false,
gatherSlaveStatus: false,
gatherTableIoWaits: false,
gatherTableLockWaits: false,
gatherTableSchema: false,
perfEventsStatementsDigestTextLimit: 0,
perfEventsStatementsLimit: 0,
perfEventsStatementsTimeLimit: 0,
},
},
username: "string",
},
clickhousePostgresqlUserConfig: {
databases: [{
database: "string",
schema: "string",
}],
},
prometheusUserConfig: {
sourceMysql: {
telegraf: {
gatherEventWaits: false,
gatherFileEventsStats: false,
gatherIndexIoWaits: false,
gatherInfoSchemaAutoInc: false,
gatherInnodbMetrics: false,
gatherPerfEventsStatements: false,
gatherProcessList: false,
gatherSlaveStatus: false,
gatherTableIoWaits: false,
gatherTableLockWaits: false,
gatherTableSchema: false,
perfEventsStatementsDigestTextLimit: 0,
perfEventsStatementsLimit: 0,
perfEventsStatementsTimeLimit: 0,
},
},
},
sourceEndpointId: "string",
sourceServiceName: "string",
});
type: aiven:ServiceIntegration
properties:
clickhouseKafkaUserConfig:
tables:
- autoOffsetReset: string
columns:
- name: string
type: string
dataFormat: string
dateTimeInputFormat: string
groupName: string
handleErrorMode: string
maxBlockSize: 0
maxRowsPerMessage: 0
name: string
numConsumers: 0
pollMaxBatchSize: 0
skipBrokenMessages: 0
topics:
- name: string
clickhousePostgresqlUserConfig:
databases:
- database: string
schema: string
datadogUserConfig:
datadogDbmEnabled: false
datadogPgbouncerEnabled: false
datadogTags:
- comment: string
tag: string
excludeConsumerGroups:
- string
excludeTopics:
- string
includeConsumerGroups:
- string
includeTopics:
- string
kafkaCustomMetrics:
- string
maxJmxMetrics: 0
mirrormakerCustomMetrics:
- string
opensearch:
clusterStatsEnabled: false
indexStatsEnabled: false
pendingTaskStatsEnabled: false
pshardStatsEnabled: false
redis:
commandStatsEnabled: false
destinationEndpointId: string
destinationServiceName: string
externalAwsCloudwatchLogsUserConfig:
selectedLogFields:
- string
externalAwsCloudwatchMetricsUserConfig:
droppedMetrics:
- field: string
metric: string
extraMetrics:
- field: string
metric: string
externalElasticsearchLogsUserConfig:
selectedLogFields:
- string
externalOpensearchLogsUserConfig:
selectedLogFields:
- string
integrationType: string
kafkaConnectUserConfig:
kafkaConnect:
configStorageTopic: string
groupId: string
offsetStorageTopic: string
statusStorageTopic: string
kafkaLogsUserConfig:
kafkaTopic: string
selectedLogFields:
- string
kafkaMirrormakerUserConfig:
clusterAlias: string
kafkaMirrormaker:
consumerAutoOffsetReset: string
consumerFetchMinBytes: 0
consumerMaxPollRecords: 0
producerBatchSize: 0
producerBufferMemory: 0
producerCompressionType: string
producerLingerMs: 0
producerMaxRequestSize: 0
logsUserConfig:
elasticsearchIndexDaysMax: 0
elasticsearchIndexPrefix: string
selectedLogFields:
- string
metricsUserConfig:
database: string
retentionDays: 0
roUsername: string
sourceMysql:
telegraf:
gatherEventWaits: false
gatherFileEventsStats: false
gatherIndexIoWaits: false
gatherInfoSchemaAutoInc: false
gatherInnodbMetrics: false
gatherPerfEventsStatements: false
gatherProcessList: false
gatherSlaveStatus: false
gatherTableIoWaits: false
gatherTableLockWaits: false
gatherTableSchema: false
perfEventsStatementsDigestTextLimit: 0
perfEventsStatementsLimit: 0
perfEventsStatementsTimeLimit: 0
username: string
project: string
prometheusUserConfig:
sourceMysql:
telegraf:
gatherEventWaits: false
gatherFileEventsStats: false
gatherIndexIoWaits: false
gatherInfoSchemaAutoInc: false
gatherInnodbMetrics: false
gatherPerfEventsStatements: false
gatherProcessList: false
gatherSlaveStatus: false
gatherTableIoWaits: false
gatherTableLockWaits: false
gatherTableSchema: false
perfEventsStatementsDigestTextLimit: 0
perfEventsStatementsLimit: 0
perfEventsStatementsTimeLimit: 0
sourceEndpointId: string
sourceServiceName: string
ServiceIntegration Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The ServiceIntegration resource accepts the following input properties:
- Integration
Type string - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- Project string
- Project the integration belongs to.
- Clickhouse
Kafka ServiceUser Config Integration Clickhouse Kafka User Config - ClickhouseKafka user configurable settings
- Clickhouse
Postgresql ServiceUser Config Integration Clickhouse Postgresql User Config - ClickhousePostgresql user configurable settings
- Datadog
User ServiceConfig Integration Datadog User Config - Datadog user configurable settings
- Destination
Endpoint stringId - Destination endpoint for the integration.
- Destination
Service stringName - Destination service for the integration.
- External
Aws ServiceCloudwatch Logs User Config Integration External Aws Cloudwatch Logs User Config - ExternalAwsCloudwatchLogs user configurable settings
- External
Aws ServiceCloudwatch Metrics User Config Integration External Aws Cloudwatch Metrics User Config - ExternalAwsCloudwatchMetrics user configurable settings
- External
Elasticsearch ServiceLogs User Config Integration External Elasticsearch Logs User Config - ExternalElasticsearchLogs user configurable settings
- External
Opensearch ServiceLogs User Config Integration External Opensearch Logs User Config - ExternalOpensearchLogs user configurable settings
- Kafka
Connect ServiceUser Config Integration Kafka Connect User Config - KafkaConnect user configurable settings
- Kafka
Logs ServiceUser Config Integration Kafka Logs User Config - KafkaLogs user configurable settings
- Kafka
Mirrormaker ServiceUser Config Integration Kafka Mirrormaker User Config - KafkaMirrormaker user configurable settings
- Logs
User ServiceConfig Integration Logs User Config - Logs user configurable settings
- Metrics
User ServiceConfig Integration Metrics User Config - Metrics user configurable settings
- Prometheus
User ServiceConfig Integration Prometheus User Config - Prometheus user configurable settings
- Source
Endpoint stringId - Source endpoint for the integration.
- Source
Service stringName - Source service for the integration (if any)
- Integration
Type string - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- Project string
- Project the integration belongs to.
- Clickhouse
Kafka ServiceUser Config Integration Clickhouse Kafka User Config Args - ClickhouseKafka user configurable settings
- Clickhouse
Postgresql ServiceUser Config Integration Clickhouse Postgresql User Config Args - ClickhousePostgresql user configurable settings
- Datadog
User ServiceConfig Integration Datadog User Config Args - Datadog user configurable settings
- Destination
Endpoint stringId - Destination endpoint for the integration.
- Destination
Service stringName - Destination service for the integration.
- External
Aws ServiceCloudwatch Logs User Config Integration External Aws Cloudwatch Logs User Config Args - ExternalAwsCloudwatchLogs user configurable settings
- External
Aws ServiceCloudwatch Metrics User Config Integration External Aws Cloudwatch Metrics User Config Args - ExternalAwsCloudwatchMetrics user configurable settings
- External
Elasticsearch ServiceLogs User Config Integration External Elasticsearch Logs User Config Args - ExternalElasticsearchLogs user configurable settings
- External
Opensearch ServiceLogs User Config Integration External Opensearch Logs User Config Args - ExternalOpensearchLogs user configurable settings
- Kafka
Connect ServiceUser Config Integration Kafka Connect User Config Args - KafkaConnect user configurable settings
- Kafka
Logs ServiceUser Config Integration Kafka Logs User Config Args - KafkaLogs user configurable settings
- Kafka
Mirrormaker ServiceUser Config Integration Kafka Mirrormaker User Config Args - KafkaMirrormaker user configurable settings
- Logs
User ServiceConfig Integration Logs User Config Args - Logs user configurable settings
- Metrics
User ServiceConfig Integration Metrics User Config Args - Metrics user configurable settings
- Prometheus
User ServiceConfig Integration Prometheus User Config Args - Prometheus user configurable settings
- Source
Endpoint stringId - Source endpoint for the integration.
- Source
Service stringName - Source service for the integration (if any)
- integration
Type String - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- project String
- Project the integration belongs to.
- clickhouse
Kafka ServiceUser Config Integration Clickhouse Kafka User Config - ClickhouseKafka user configurable settings
- clickhouse
Postgresql ServiceUser Config Integration Clickhouse Postgresql User Config - ClickhousePostgresql user configurable settings
- datadog
User ServiceConfig Integration Datadog User Config - Datadog user configurable settings
- destination
Endpoint StringId - Destination endpoint for the integration.
- destination
Service StringName - Destination service for the integration.
- external
Aws ServiceCloudwatch Logs User Config Integration External Aws Cloudwatch Logs User Config - ExternalAwsCloudwatchLogs user configurable settings
- external
Aws ServiceCloudwatch Metrics User Config Integration External Aws Cloudwatch Metrics User Config - ExternalAwsCloudwatchMetrics user configurable settings
- external
Elasticsearch ServiceLogs User Config Integration External Elasticsearch Logs User Config - ExternalElasticsearchLogs user configurable settings
- external
Opensearch ServiceLogs User Config Integration External Opensearch Logs User Config - ExternalOpensearchLogs user configurable settings
- kafka
Connect ServiceUser Config Integration Kafka Connect User Config - KafkaConnect user configurable settings
- kafka
Logs ServiceUser Config Integration Kafka Logs User Config - KafkaLogs user configurable settings
- kafka
Mirrormaker ServiceUser Config Integration Kafka Mirrormaker User Config - KafkaMirrormaker user configurable settings
- logs
User ServiceConfig Integration Logs User Config - Logs user configurable settings
- metrics
User ServiceConfig Integration Metrics User Config - Metrics user configurable settings
- prometheus
User ServiceConfig Integration Prometheus User Config - Prometheus user configurable settings
- source
Endpoint StringId - Source endpoint for the integration.
- source
Service StringName - Source service for the integration (if any)
- integration
Type string - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- project string
- Project the integration belongs to.
- clickhouse
Kafka ServiceUser Config Integration Clickhouse Kafka User Config - ClickhouseKafka user configurable settings
- clickhouse
Postgresql ServiceUser Config Integration Clickhouse Postgresql User Config - ClickhousePostgresql user configurable settings
- datadog
User ServiceConfig Integration Datadog User Config - Datadog user configurable settings
- destination
Endpoint stringId - Destination endpoint for the integration.
- destination
Service stringName - Destination service for the integration.
- external
Aws ServiceCloudwatch Logs User Config Integration External Aws Cloudwatch Logs User Config - ExternalAwsCloudwatchLogs user configurable settings
- external
Aws ServiceCloudwatch Metrics User Config Integration External Aws Cloudwatch Metrics User Config - ExternalAwsCloudwatchMetrics user configurable settings
- external
Elasticsearch ServiceLogs User Config Integration External Elasticsearch Logs User Config - ExternalElasticsearchLogs user configurable settings
- external
Opensearch ServiceLogs User Config Integration External Opensearch Logs User Config - ExternalOpensearchLogs user configurable settings
- kafka
Connect ServiceUser Config Integration Kafka Connect User Config - KafkaConnect user configurable settings
- kafka
Logs ServiceUser Config Integration Kafka Logs User Config - KafkaLogs user configurable settings
- kafka
Mirrormaker ServiceUser Config Integration Kafka Mirrormaker User Config - KafkaMirrormaker user configurable settings
- logs
User ServiceConfig Integration Logs User Config - Logs user configurable settings
- metrics
User ServiceConfig Integration Metrics User Config - Metrics user configurable settings
- prometheus
User ServiceConfig Integration Prometheus User Config - Prometheus user configurable settings
- source
Endpoint stringId - Source endpoint for the integration.
- source
Service stringName - Source service for the integration (if any)
- integration_
type str - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- project str
- Project the integration belongs to.
- clickhouse_
kafka_ Serviceuser_ config Integration Clickhouse Kafka User Config Args - ClickhouseKafka user configurable settings
- clickhouse_
postgresql_ Serviceuser_ config Integration Clickhouse Postgresql User Config Args - ClickhousePostgresql user configurable settings
- datadog_
user_ Serviceconfig Integration Datadog User Config Args - Datadog user configurable settings
- destination_
endpoint_ strid - Destination endpoint for the integration.
- destination_
service_ strname - Destination service for the integration.
- external_
aws_ Servicecloudwatch_ logs_ user_ config Integration External Aws Cloudwatch Logs User Config Args - ExternalAwsCloudwatchLogs user configurable settings
- external_
aws_ Servicecloudwatch_ metrics_ user_ config Integration External Aws Cloudwatch Metrics User Config Args - ExternalAwsCloudwatchMetrics user configurable settings
- external_
elasticsearch_ Servicelogs_ user_ config Integration External Elasticsearch Logs User Config Args - ExternalElasticsearchLogs user configurable settings
- external_
opensearch_ Servicelogs_ user_ config Integration External Opensearch Logs User Config Args - ExternalOpensearchLogs user configurable settings
- kafka_
connect_ Serviceuser_ config Integration Kafka Connect User Config Args - KafkaConnect user configurable settings
- kafka_
logs_ Serviceuser_ config Integration Kafka Logs User Config Args - KafkaLogs user configurable settings
- kafka_
mirrormaker_ Serviceuser_ config Integration Kafka Mirrormaker User Config Args - KafkaMirrormaker user configurable settings
- logs_
user_ Serviceconfig Integration Logs User Config Args - Logs user configurable settings
- metrics_
user_ Serviceconfig Integration Metrics User Config Args - Metrics user configurable settings
- prometheus_
user_ Serviceconfig Integration Prometheus User Config Args - Prometheus user configurable settings
- source_
endpoint_ strid - Source endpoint for the integration.
- source_
service_ strname - Source service for the integration (if any)
- integration
Type String - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- project String
- Project the integration belongs to.
- clickhouse
Kafka Property MapUser Config - ClickhouseKafka user configurable settings
- clickhouse
Postgresql Property MapUser Config - ClickhousePostgresql user configurable settings
- datadog
User Property MapConfig - Datadog user configurable settings
- destination
Endpoint StringId - Destination endpoint for the integration.
- destination
Service StringName - Destination service for the integration.
- external
Aws Property MapCloudwatch Logs User Config - ExternalAwsCloudwatchLogs user configurable settings
- external
Aws Property MapCloudwatch Metrics User Config - ExternalAwsCloudwatchMetrics user configurable settings
- external
Elasticsearch Property MapLogs User Config - ExternalElasticsearchLogs user configurable settings
- external
Opensearch Property MapLogs User Config - ExternalOpensearchLogs user configurable settings
- kafka
Connect Property MapUser Config - KafkaConnect user configurable settings
- kafka
Logs Property MapUser Config - KafkaLogs user configurable settings
- kafka
Mirrormaker Property MapUser Config - KafkaMirrormaker user configurable settings
- logs
User Property MapConfig - Logs user configurable settings
- metrics
User Property MapConfig - Metrics user configurable settings
- prometheus
User Property MapConfig - Prometheus user configurable settings
- source
Endpoint StringId - Source endpoint for the integration.
- source
Service StringName - Source service for the integration (if any)
Outputs
All input properties are implicitly available as output properties. Additionally, the ServiceIntegration resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Integration
Id string - The ID of the Aiven service integration.
- Id string
- The provider-assigned unique ID for this managed resource.
- Integration
Id string - The ID of the Aiven service integration.
- id String
- The provider-assigned unique ID for this managed resource.
- integration
Id String - The ID of the Aiven service integration.
- id string
- The provider-assigned unique ID for this managed resource.
- integration
Id string - The ID of the Aiven service integration.
- id str
- The provider-assigned unique ID for this managed resource.
- integration_
id str - The ID of the Aiven service integration.
- id String
- The provider-assigned unique ID for this managed resource.
- integration
Id String - The ID of the Aiven service integration.
Look up Existing ServiceIntegration Resource
Get an existing ServiceIntegration resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ServiceIntegrationState, opts?: CustomResourceOptions): ServiceIntegration
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
clickhouse_kafka_user_config: Optional[ServiceIntegrationClickhouseKafkaUserConfigArgs] = None,
clickhouse_postgresql_user_config: Optional[ServiceIntegrationClickhousePostgresqlUserConfigArgs] = None,
datadog_user_config: Optional[ServiceIntegrationDatadogUserConfigArgs] = None,
destination_endpoint_id: Optional[str] = None,
destination_service_name: Optional[str] = None,
external_aws_cloudwatch_logs_user_config: Optional[ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs] = None,
external_aws_cloudwatch_metrics_user_config: Optional[ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs] = None,
external_elasticsearch_logs_user_config: Optional[ServiceIntegrationExternalElasticsearchLogsUserConfigArgs] = None,
external_opensearch_logs_user_config: Optional[ServiceIntegrationExternalOpensearchLogsUserConfigArgs] = None,
integration_id: Optional[str] = None,
integration_type: Optional[str] = None,
kafka_connect_user_config: Optional[ServiceIntegrationKafkaConnectUserConfigArgs] = None,
kafka_logs_user_config: Optional[ServiceIntegrationKafkaLogsUserConfigArgs] = None,
kafka_mirrormaker_user_config: Optional[ServiceIntegrationKafkaMirrormakerUserConfigArgs] = None,
logs_user_config: Optional[ServiceIntegrationLogsUserConfigArgs] = None,
metrics_user_config: Optional[ServiceIntegrationMetricsUserConfigArgs] = None,
project: Optional[str] = None,
prometheus_user_config: Optional[ServiceIntegrationPrometheusUserConfigArgs] = None,
source_endpoint_id: Optional[str] = None,
source_service_name: Optional[str] = None) -> ServiceIntegration
func GetServiceIntegration(ctx *Context, name string, id IDInput, state *ServiceIntegrationState, opts ...ResourceOption) (*ServiceIntegration, error)
public static ServiceIntegration Get(string name, Input<string> id, ServiceIntegrationState? state, CustomResourceOptions? opts = null)
public static ServiceIntegration get(String name, Output<String> id, ServiceIntegrationState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Clickhouse
Kafka ServiceUser Config Integration Clickhouse Kafka User Config - ClickhouseKafka user configurable settings
- Clickhouse
Postgresql ServiceUser Config Integration Clickhouse Postgresql User Config - ClickhousePostgresql user configurable settings
- Datadog
User ServiceConfig Integration Datadog User Config - Datadog user configurable settings
- Destination
Endpoint stringId - Destination endpoint for the integration.
- Destination
Service stringName - Destination service for the integration.
- External
Aws ServiceCloudwatch Logs User Config Integration External Aws Cloudwatch Logs User Config - ExternalAwsCloudwatchLogs user configurable settings
- External
Aws ServiceCloudwatch Metrics User Config Integration External Aws Cloudwatch Metrics User Config - ExternalAwsCloudwatchMetrics user configurable settings
- External
Elasticsearch ServiceLogs User Config Integration External Elasticsearch Logs User Config - ExternalElasticsearchLogs user configurable settings
- External
Opensearch ServiceLogs User Config Integration External Opensearch Logs User Config - ExternalOpensearchLogs user configurable settings
- Integration
Id string - The ID of the Aiven service integration.
- Integration
Type string - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- Kafka
Connect ServiceUser Config Integration Kafka Connect User Config - KafkaConnect user configurable settings
- Kafka
Logs ServiceUser Config Integration Kafka Logs User Config - KafkaLogs user configurable settings
- Kafka
Mirrormaker ServiceUser Config Integration Kafka Mirrormaker User Config - KafkaMirrormaker user configurable settings
- Logs
User ServiceConfig Integration Logs User Config - Logs user configurable settings
- Metrics
User ServiceConfig Integration Metrics User Config - Metrics user configurable settings
- Project string
- Project the integration belongs to.
- Prometheus
User ServiceConfig Integration Prometheus User Config - Prometheus user configurable settings
- Source
Endpoint stringId - Source endpoint for the integration.
- Source
Service stringName - Source service for the integration (if any)
- Clickhouse
Kafka ServiceUser Config Integration Clickhouse Kafka User Config Args - ClickhouseKafka user configurable settings
- Clickhouse
Postgresql ServiceUser Config Integration Clickhouse Postgresql User Config Args - ClickhousePostgresql user configurable settings
- Datadog
User ServiceConfig Integration Datadog User Config Args - Datadog user configurable settings
- Destination
Endpoint stringId - Destination endpoint for the integration.
- Destination
Service stringName - Destination service for the integration.
- External
Aws ServiceCloudwatch Logs User Config Integration External Aws Cloudwatch Logs User Config Args - ExternalAwsCloudwatchLogs user configurable settings
- External
Aws ServiceCloudwatch Metrics User Config Integration External Aws Cloudwatch Metrics User Config Args - ExternalAwsCloudwatchMetrics user configurable settings
- External
Elasticsearch ServiceLogs User Config Integration External Elasticsearch Logs User Config Args - ExternalElasticsearchLogs user configurable settings
- External
Opensearch ServiceLogs User Config Integration External Opensearch Logs User Config Args - ExternalOpensearchLogs user configurable settings
- Integration
Id string - The ID of the Aiven service integration.
- Integration
Type string - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- Kafka
Connect ServiceUser Config Integration Kafka Connect User Config Args - KafkaConnect user configurable settings
- Kafka
Logs ServiceUser Config Integration Kafka Logs User Config Args - KafkaLogs user configurable settings
- Kafka
Mirrormaker ServiceUser Config Integration Kafka Mirrormaker User Config Args - KafkaMirrormaker user configurable settings
- Logs
User ServiceConfig Integration Logs User Config Args - Logs user configurable settings
- Metrics
User ServiceConfig Integration Metrics User Config Args - Metrics user configurable settings
- Project string
- Project the integration belongs to.
- Prometheus
User ServiceConfig Integration Prometheus User Config Args - Prometheus user configurable settings
- Source
Endpoint stringId - Source endpoint for the integration.
- Source
Service stringName - Source service for the integration (if any)
- clickhouse
Kafka ServiceUser Config Integration Clickhouse Kafka User Config - ClickhouseKafka user configurable settings
- clickhouse
Postgresql ServiceUser Config Integration Clickhouse Postgresql User Config - ClickhousePostgresql user configurable settings
- datadog
User ServiceConfig Integration Datadog User Config - Datadog user configurable settings
- destination
Endpoint StringId - Destination endpoint for the integration.
- destination
Service StringName - Destination service for the integration.
- external
Aws ServiceCloudwatch Logs User Config Integration External Aws Cloudwatch Logs User Config - ExternalAwsCloudwatchLogs user configurable settings
- external
Aws ServiceCloudwatch Metrics User Config Integration External Aws Cloudwatch Metrics User Config - ExternalAwsCloudwatchMetrics user configurable settings
- external
Elasticsearch ServiceLogs User Config Integration External Elasticsearch Logs User Config - ExternalElasticsearchLogs user configurable settings
- external
Opensearch ServiceLogs User Config Integration External Opensearch Logs User Config - ExternalOpensearchLogs user configurable settings
- integration
Id String - The ID of the Aiven service integration.
- integration
Type String - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- kafka
Connect ServiceUser Config Integration Kafka Connect User Config - KafkaConnect user configurable settings
- kafka
Logs ServiceUser Config Integration Kafka Logs User Config - KafkaLogs user configurable settings
- kafka
Mirrormaker ServiceUser Config Integration Kafka Mirrormaker User Config - KafkaMirrormaker user configurable settings
- logs
User ServiceConfig Integration Logs User Config - Logs user configurable settings
- metrics
User ServiceConfig Integration Metrics User Config - Metrics user configurable settings
- project String
- Project the integration belongs to.
- prometheus
User ServiceConfig Integration Prometheus User Config - Prometheus user configurable settings
- source
Endpoint StringId - Source endpoint for the integration.
- source
Service StringName - Source service for the integration (if any)
- clickhouse
Kafka ServiceUser Config Integration Clickhouse Kafka User Config - ClickhouseKafka user configurable settings
- clickhouse
Postgresql ServiceUser Config Integration Clickhouse Postgresql User Config - ClickhousePostgresql user configurable settings
- datadog
User ServiceConfig Integration Datadog User Config - Datadog user configurable settings
- destination
Endpoint stringId - Destination endpoint for the integration.
- destination
Service stringName - Destination service for the integration.
- external
Aws ServiceCloudwatch Logs User Config Integration External Aws Cloudwatch Logs User Config - ExternalAwsCloudwatchLogs user configurable settings
- external
Aws ServiceCloudwatch Metrics User Config Integration External Aws Cloudwatch Metrics User Config - ExternalAwsCloudwatchMetrics user configurable settings
- external
Elasticsearch ServiceLogs User Config Integration External Elasticsearch Logs User Config - ExternalElasticsearchLogs user configurable settings
- external
Opensearch ServiceLogs User Config Integration External Opensearch Logs User Config - ExternalOpensearchLogs user configurable settings
- integration
Id string - The ID of the Aiven service integration.
- integration
Type string - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- kafka
Connect ServiceUser Config Integration Kafka Connect User Config - KafkaConnect user configurable settings
- kafka
Logs ServiceUser Config Integration Kafka Logs User Config - KafkaLogs user configurable settings
- kafka
Mirrormaker ServiceUser Config Integration Kafka Mirrormaker User Config - KafkaMirrormaker user configurable settings
- logs
User ServiceConfig Integration Logs User Config - Logs user configurable settings
- metrics
User ServiceConfig Integration Metrics User Config - Metrics user configurable settings
- project string
- Project the integration belongs to.
- prometheus
User ServiceConfig Integration Prometheus User Config - Prometheus user configurable settings
- source
Endpoint stringId - Source endpoint for the integration.
- source
Service stringName - Source service for the integration (if any)
- clickhouse_
kafka_ Serviceuser_ config Integration Clickhouse Kafka User Config Args - ClickhouseKafka user configurable settings
- clickhouse_
postgresql_ Serviceuser_ config Integration Clickhouse Postgresql User Config Args - ClickhousePostgresql user configurable settings
- datadog_
user_ Serviceconfig Integration Datadog User Config Args - Datadog user configurable settings
- destination_
endpoint_ strid - Destination endpoint for the integration.
- destination_
service_ strname - Destination service for the integration.
- external_
aws_ Servicecloudwatch_ logs_ user_ config Integration External Aws Cloudwatch Logs User Config Args - ExternalAwsCloudwatchLogs user configurable settings
- external_
aws_ Servicecloudwatch_ metrics_ user_ config Integration External Aws Cloudwatch Metrics User Config Args - ExternalAwsCloudwatchMetrics user configurable settings
- external_
elasticsearch_ Servicelogs_ user_ config Integration External Elasticsearch Logs User Config Args - ExternalElasticsearchLogs user configurable settings
- external_
opensearch_ Servicelogs_ user_ config Integration External Opensearch Logs User Config Args - ExternalOpensearchLogs user configurable settings
- integration_
id str - The ID of the Aiven service integration.
- integration_
type str - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- kafka_
connect_ Serviceuser_ config Integration Kafka Connect User Config Args - KafkaConnect user configurable settings
- kafka_
logs_ Serviceuser_ config Integration Kafka Logs User Config Args - KafkaLogs user configurable settings
- kafka_
mirrormaker_ Serviceuser_ config Integration Kafka Mirrormaker User Config Args - KafkaMirrormaker user configurable settings
- logs_
user_ Serviceconfig Integration Logs User Config Args - Logs user configurable settings
- metrics_
user_ Serviceconfig Integration Metrics User Config Args - Metrics user configurable settings
- project str
- Project the integration belongs to.
- prometheus_
user_ Serviceconfig Integration Prometheus User Config Args - Prometheus user configurable settings
- source_
endpoint_ strid - Source endpoint for the integration.
- source_
service_ strname - Source service for the integration (if any)
- clickhouse
Kafka Property MapUser Config - ClickhouseKafka user configurable settings
- clickhouse
Postgresql Property MapUser Config - ClickhousePostgresql user configurable settings
- datadog
User Property MapConfig - Datadog user configurable settings
- destination
Endpoint StringId - Destination endpoint for the integration.
- destination
Service StringName - Destination service for the integration.
- external
Aws Property MapCloudwatch Logs User Config - ExternalAwsCloudwatchLogs user configurable settings
- external
Aws Property MapCloudwatch Metrics User Config - ExternalAwsCloudwatchMetrics user configurable settings
- external
Elasticsearch Property MapLogs User Config - ExternalElasticsearchLogs user configurable settings
- external
Opensearch Property MapLogs User Config - ExternalOpensearchLogs user configurable settings
- integration
Id String - The ID of the Aiven service integration.
- integration
Type String - Type of the service integration. Possible values:
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosstore
,vector
,vmalert
- kafka
Connect Property MapUser Config - KafkaConnect user configurable settings
- kafka
Logs Property MapUser Config - KafkaLogs user configurable settings
- kafka
Mirrormaker Property MapUser Config - KafkaMirrormaker user configurable settings
- logs
User Property MapConfig - Logs user configurable settings
- metrics
User Property MapConfig - Metrics user configurable settings
- project String
- Project the integration belongs to.
- prometheus
User Property MapConfig - Prometheus user configurable settings
- source
Endpoint StringId - Source endpoint for the integration.
- source
Service StringName - Source service for the integration (if any)
Supporting Types
ServiceIntegrationClickhouseKafkaUserConfig, ServiceIntegrationClickhouseKafkaUserConfigArgs
- Tables
List<Service
Integration Clickhouse Kafka User Config Table> - Tables to create
- Tables
[]Service
Integration Clickhouse Kafka User Config Table - Tables to create
- tables
List<Service
Integration Clickhouse Kafka User Config Table> - Tables to create
- tables
Service
Integration Clickhouse Kafka User Config Table[] - Tables to create
- tables
Sequence[Service
Integration Clickhouse Kafka User Config Table] - Tables to create
- tables List<Property Map>
- Tables to create
ServiceIntegrationClickhouseKafkaUserConfigTable, ServiceIntegrationClickhouseKafkaUserConfigTableArgs
- Columns
List<Service
Integration Clickhouse Kafka User Config Table Column> - Table columns
- Data
Format string - Enum:
Avro
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,TSKV
,TSV
,TabSeparated
,RawBLOB
,AvroConfluent
,Parquet
. Message data format. Default:JSONEachRow
. - Group
Name string - Kafka consumers group. Default:
clickhouse
. - Name string
- Name of the table. Example:
events
. - Topics
List<Service
Integration Clickhouse Kafka User Config Table Topic> - Kafka topics
- Auto
Offset stringReset - Enum:
smallest
,earliest
,beginning
,largest
,latest
,end
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - Date
Time stringInput Format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - Handle
Error stringMode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - Max
Block intSize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - Max
Rows intPer Message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - Num
Consumers int - The number of consumers per table per replica. Default:
1
. - Poll
Max intBatch Size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - Skip
Broken intMessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
.
- Columns
[]Service
Integration Clickhouse Kafka User Config Table Column - Table columns
- Data
Format string - Enum:
Avro
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,TSKV
,TSV
,TabSeparated
,RawBLOB
,AvroConfluent
,Parquet
. Message data format. Default:JSONEachRow
. - Group
Name string - Kafka consumers group. Default:
clickhouse
. - Name string
- Name of the table. Example:
events
. - Topics
[]Service
Integration Clickhouse Kafka User Config Table Topic - Kafka topics
- Auto
Offset stringReset - Enum:
smallest
,earliest
,beginning
,largest
,latest
,end
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - Date
Time stringInput Format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - Handle
Error stringMode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - Max
Block intSize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - Max
Rows intPer Message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - Num
Consumers int - The number of consumers per table per replica. Default:
1
. - Poll
Max intBatch Size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - Skip
Broken intMessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
.
- columns
List<Service
Integration Clickhouse Kafka User Config Table Column> - Table columns
- data
Format String - Enum:
Avro
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,TSKV
,TSV
,TabSeparated
,RawBLOB
,AvroConfluent
,Parquet
. Message data format. Default:JSONEachRow
. - group
Name String - Kafka consumers group. Default:
clickhouse
. - name String
- Name of the table. Example:
events
. - topics
List<Service
Integration Clickhouse Kafka User Config Table Topic> - Kafka topics
- auto
Offset StringReset - Enum:
smallest
,earliest
,beginning
,largest
,latest
,end
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - date
Time StringInput Format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - handle
Error StringMode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - max
Block IntegerSize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - max
Rows IntegerPer Message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - num
Consumers Integer - The number of consumers per table per replica. Default:
1
. - poll
Max IntegerBatch Size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - skip
Broken IntegerMessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
.
- columns
Service
Integration Clickhouse Kafka User Config Table Column[] - Table columns
- data
Format string - Enum:
Avro
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,TSKV
,TSV
,TabSeparated
,RawBLOB
,AvroConfluent
,Parquet
. Message data format. Default:JSONEachRow
. - group
Name string - Kafka consumers group. Default:
clickhouse
. - name string
- Name of the table. Example:
events
. - topics
Service
Integration Clickhouse Kafka User Config Table Topic[] - Kafka topics
- auto
Offset stringReset - Enum:
smallest
,earliest
,beginning
,largest
,latest
,end
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - date
Time stringInput Format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - handle
Error stringMode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - max
Block numberSize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - max
Rows numberPer Message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - num
Consumers number - The number of consumers per table per replica. Default:
1
. - poll
Max numberBatch Size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - skip
Broken numberMessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
.
- columns
Sequence[Service
Integration Clickhouse Kafka User Config Table Column] - Table columns
- data_
format str - Enum:
Avro
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,TSKV
,TSV
,TabSeparated
,RawBLOB
,AvroConfluent
,Parquet
. Message data format. Default:JSONEachRow
. - group_
name str - Kafka consumers group. Default:
clickhouse
. - name str
- Name of the table. Example:
events
. - topics
Sequence[Service
Integration Clickhouse Kafka User Config Table Topic] - Kafka topics
- auto_
offset_ strreset - Enum:
smallest
,earliest
,beginning
,largest
,latest
,end
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - date_
time_ strinput_ format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - handle_
error_ strmode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - max_
block_ intsize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - max_
rows_ intper_ message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - num_
consumers int - The number of consumers per table per replica. Default:
1
. - poll_
max_ intbatch_ size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - skip_
broken_ intmessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
.
- columns List<Property Map>
- Table columns
- data
Format String - Enum:
Avro
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,TSKV
,TSV
,TabSeparated
,RawBLOB
,AvroConfluent
,Parquet
. Message data format. Default:JSONEachRow
. - group
Name String - Kafka consumers group. Default:
clickhouse
. - name String
- Name of the table. Example:
events
. - topics List<Property Map>
- Kafka topics
- auto
Offset StringReset - Enum:
smallest
,earliest
,beginning
,largest
,latest
,end
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - date
Time StringInput Format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - handle
Error StringMode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - max
Block NumberSize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - max
Rows NumberPer Message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - num
Consumers Number - The number of consumers per table per replica. Default:
1
. - poll
Max NumberBatch Size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - skip
Broken NumberMessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
.
ServiceIntegrationClickhouseKafkaUserConfigTableColumn, ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs
ServiceIntegrationClickhouseKafkaUserConfigTableTopic, ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs
- Name string
- Name of the topic. Example:
topic_name
.
- Name string
- Name of the topic. Example:
topic_name
.
- name String
- Name of the topic. Example:
topic_name
.
- name string
- Name of the topic. Example:
topic_name
.
- name str
- Name of the topic. Example:
topic_name
.
- name String
- Name of the topic. Example:
topic_name
.
ServiceIntegrationClickhousePostgresqlUserConfig, ServiceIntegrationClickhousePostgresqlUserConfigArgs
- Databases
List<Service
Integration Clickhouse Postgresql User Config Database> - Databases to expose
- Databases
[]Service
Integration Clickhouse Postgresql User Config Database - Databases to expose
- databases
List<Service
Integration Clickhouse Postgresql User Config Database> - Databases to expose
- databases
Service
Integration Clickhouse Postgresql User Config Database[] - Databases to expose
- databases
Sequence[Service
Integration Clickhouse Postgresql User Config Database] - Databases to expose
- databases List<Property Map>
- Databases to expose
ServiceIntegrationClickhousePostgresqlUserConfigDatabase, ServiceIntegrationClickhousePostgresqlUserConfigDatabaseArgs
ServiceIntegrationDatadogUserConfig, ServiceIntegrationDatadogUserConfigArgs
- Datadog
Dbm boolEnabled - Enable Datadog Database Monitoring.
- Datadog
Pgbouncer boolEnabled - Enable Datadog PgBouncer Metric Tracking.
- List<Service
Integration Datadog User Config Datadog Tag> - Custom tags provided by user
- Exclude
Consumer List<string>Groups - List of custom metrics.
- Exclude
Topics List<string> - List of topics to exclude.
- Include
Consumer List<string>Groups - List of custom metrics.
- Include
Topics List<string> - List of topics to include.
- Kafka
Custom List<string>Metrics - List of custom metrics.
- Max
Jmx intMetrics - Maximum number of JMX metrics to send. Example:
2000
. - Mirrormaker
Custom List<string>Metrics - List of custom metrics.
- Opensearch
Service
Integration Datadog User Config Opensearch - Datadog Opensearch Options
- Redis
Service
Integration Datadog User Config Redis - Datadog Redis Options
- Datadog
Dbm boolEnabled - Enable Datadog Database Monitoring.
- Datadog
Pgbouncer boolEnabled - Enable Datadog PgBouncer Metric Tracking.
- []Service
Integration Datadog User Config Datadog Tag - Custom tags provided by user
- Exclude
Consumer []stringGroups - List of custom metrics.
- Exclude
Topics []string - List of topics to exclude.
- Include
Consumer []stringGroups - List of custom metrics.
- Include
Topics []string - List of topics to include.
- Kafka
Custom []stringMetrics - List of custom metrics.
- Max
Jmx intMetrics - Maximum number of JMX metrics to send. Example:
2000
. - Mirrormaker
Custom []stringMetrics - List of custom metrics.
- Opensearch
Service
Integration Datadog User Config Opensearch - Datadog Opensearch Options
- Redis
Service
Integration Datadog User Config Redis - Datadog Redis Options
- datadog
Dbm BooleanEnabled - Enable Datadog Database Monitoring.
- datadog
Pgbouncer BooleanEnabled - Enable Datadog PgBouncer Metric Tracking.
- List<Service
Integration Datadog User Config Datadog Tag> - Custom tags provided by user
- exclude
Consumer List<String>Groups - List of custom metrics.
- exclude
Topics List<String> - List of topics to exclude.
- include
Consumer List<String>Groups - List of custom metrics.
- include
Topics List<String> - List of topics to include.
- kafka
Custom List<String>Metrics - List of custom metrics.
- max
Jmx IntegerMetrics - Maximum number of JMX metrics to send. Example:
2000
. - mirrormaker
Custom List<String>Metrics - List of custom metrics.
- opensearch
Service
Integration Datadog User Config Opensearch - Datadog Opensearch Options
- redis
Service
Integration Datadog User Config Redis - Datadog Redis Options
- datadog
Dbm booleanEnabled - Enable Datadog Database Monitoring.
- datadog
Pgbouncer booleanEnabled - Enable Datadog PgBouncer Metric Tracking.
- Service
Integration Datadog User Config Datadog Tag[] - Custom tags provided by user
- exclude
Consumer string[]Groups - List of custom metrics.
- exclude
Topics string[] - List of topics to exclude.
- include
Consumer string[]Groups - List of custom metrics.
- include
Topics string[] - List of topics to include.
- kafka
Custom string[]Metrics - List of custom metrics.
- max
Jmx numberMetrics - Maximum number of JMX metrics to send. Example:
2000
. - mirrormaker
Custom string[]Metrics - List of custom metrics.
- opensearch
Service
Integration Datadog User Config Opensearch - Datadog Opensearch Options
- redis
Service
Integration Datadog User Config Redis - Datadog Redis Options
- datadog_
dbm_ boolenabled - Enable Datadog Database Monitoring.
- datadog_
pgbouncer_ boolenabled - Enable Datadog PgBouncer Metric Tracking.
- Sequence[Service
Integration Datadog User Config Datadog Tag] - Custom tags provided by user
- exclude_
consumer_ Sequence[str]groups - List of custom metrics.
- exclude_
topics Sequence[str] - List of topics to exclude.
- include_
consumer_ Sequence[str]groups - List of custom metrics.
- include_
topics Sequence[str] - List of topics to include.
- kafka_
custom_ Sequence[str]metrics - List of custom metrics.
- max_
jmx_ intmetrics - Maximum number of JMX metrics to send. Example:
2000
. - mirrormaker_
custom_ Sequence[str]metrics - List of custom metrics.
- opensearch
Service
Integration Datadog User Config Opensearch - Datadog Opensearch Options
- redis
Service
Integration Datadog User Config Redis - Datadog Redis Options
- datadog
Dbm BooleanEnabled - Enable Datadog Database Monitoring.
- datadog
Pgbouncer BooleanEnabled - Enable Datadog PgBouncer Metric Tracking.
- List<Property Map>
- Custom tags provided by user
- exclude
Consumer List<String>Groups - List of custom metrics.
- exclude
Topics List<String> - List of topics to exclude.
- include
Consumer List<String>Groups - List of custom metrics.
- include
Topics List<String> - List of topics to include.
- kafka
Custom List<String>Metrics - List of custom metrics.
- max
Jmx NumberMetrics - Maximum number of JMX metrics to send. Example:
2000
. - mirrormaker
Custom List<String>Metrics - List of custom metrics.
- opensearch Property Map
- Datadog Opensearch Options
- redis Property Map
- Datadog Redis Options
ServiceIntegrationDatadogUserConfigDatadogTag, ServiceIntegrationDatadogUserConfigDatadogTagArgs
ServiceIntegrationDatadogUserConfigOpensearch, ServiceIntegrationDatadogUserConfigOpensearchArgs
- Cluster
Stats boolEnabled - Enable Datadog Opensearch Cluster Monitoring.
- Index
Stats boolEnabled - Enable Datadog Opensearch Index Monitoring.
- Pending
Task boolStats Enabled - Enable Datadog Opensearch Pending Task Monitoring.
- Pshard
Stats boolEnabled - Enable Datadog Opensearch Primary Shard Monitoring.
- Cluster
Stats boolEnabled - Enable Datadog Opensearch Cluster Monitoring.
- Index
Stats boolEnabled - Enable Datadog Opensearch Index Monitoring.
- Pending
Task boolStats Enabled - Enable Datadog Opensearch Pending Task Monitoring.
- Pshard
Stats boolEnabled - Enable Datadog Opensearch Primary Shard Monitoring.
- cluster
Stats BooleanEnabled - Enable Datadog Opensearch Cluster Monitoring.
- index
Stats BooleanEnabled - Enable Datadog Opensearch Index Monitoring.
- pending
Task BooleanStats Enabled - Enable Datadog Opensearch Pending Task Monitoring.
- pshard
Stats BooleanEnabled - Enable Datadog Opensearch Primary Shard Monitoring.
- cluster
Stats booleanEnabled - Enable Datadog Opensearch Cluster Monitoring.
- index
Stats booleanEnabled - Enable Datadog Opensearch Index Monitoring.
- pending
Task booleanStats Enabled - Enable Datadog Opensearch Pending Task Monitoring.
- pshard
Stats booleanEnabled - Enable Datadog Opensearch Primary Shard Monitoring.
- cluster_
stats_ boolenabled - Enable Datadog Opensearch Cluster Monitoring.
- index_
stats_ boolenabled - Enable Datadog Opensearch Index Monitoring.
- pending_
task_ boolstats_ enabled - Enable Datadog Opensearch Pending Task Monitoring.
- pshard_
stats_ boolenabled - Enable Datadog Opensearch Primary Shard Monitoring.
- cluster
Stats BooleanEnabled - Enable Datadog Opensearch Cluster Monitoring.
- index
Stats BooleanEnabled - Enable Datadog Opensearch Index Monitoring.
- pending
Task BooleanStats Enabled - Enable Datadog Opensearch Pending Task Monitoring.
- pshard
Stats BooleanEnabled - Enable Datadog Opensearch Primary Shard Monitoring.
ServiceIntegrationDatadogUserConfigRedis, ServiceIntegrationDatadogUserConfigRedisArgs
- Command
Stats boolEnabled - Enable command_stats option in the agent's configuration. Default:
false
.
- Command
Stats boolEnabled - Enable command_stats option in the agent's configuration. Default:
false
.
- command
Stats BooleanEnabled - Enable command_stats option in the agent's configuration. Default:
false
.
- command
Stats booleanEnabled - Enable command_stats option in the agent's configuration. Default:
false
.
- command_
stats_ boolenabled - Enable command_stats option in the agent's configuration. Default:
false
.
- command
Stats BooleanEnabled - Enable command_stats option in the agent's configuration. Default:
false
.
ServiceIntegrationExternalAwsCloudwatchLogsUserConfig, ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs
- Selected
Log List<string>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- Selected
Log []stringFields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log string[]Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected_
log_ Sequence[str]fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
ServiceIntegrationExternalAwsCloudwatchMetricsUserConfig, ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs
- Dropped
Metrics List<ServiceIntegration External Aws Cloudwatch Metrics User Config Dropped Metric> - Metrics to not send to AWS CloudWatch (takes precedence over extra*metrics)
- Extra
Metrics List<ServiceIntegration External Aws Cloudwatch Metrics User Config Extra Metric> - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- Dropped
Metrics []ServiceIntegration External Aws Cloudwatch Metrics User Config Dropped Metric - Metrics to not send to AWS CloudWatch (takes precedence over extra*metrics)
- Extra
Metrics []ServiceIntegration External Aws Cloudwatch Metrics User Config Extra Metric - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- dropped
Metrics List<ServiceIntegration External Aws Cloudwatch Metrics User Config Dropped Metric> - Metrics to not send to AWS CloudWatch (takes precedence over extra*metrics)
- extra
Metrics List<ServiceIntegration External Aws Cloudwatch Metrics User Config Extra Metric> - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- dropped
Metrics ServiceIntegration External Aws Cloudwatch Metrics User Config Dropped Metric[] - Metrics to not send to AWS CloudWatch (takes precedence over extra*metrics)
- extra
Metrics ServiceIntegration External Aws Cloudwatch Metrics User Config Extra Metric[] - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- dropped_
metrics Sequence[ServiceIntegration External Aws Cloudwatch Metrics User Config Dropped Metric] - Metrics to not send to AWS CloudWatch (takes precedence over extra*metrics)
- extra_
metrics Sequence[ServiceIntegration External Aws Cloudwatch Metrics User Config Extra Metric] - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- dropped
Metrics List<Property Map> - Metrics to not send to AWS CloudWatch (takes precedence over extra*metrics)
- extra
Metrics List<Property Map> - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigDroppedMetric, ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigDroppedMetricArgs
ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigExtraMetric, ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigExtraMetricArgs
ServiceIntegrationExternalElasticsearchLogsUserConfig, ServiceIntegrationExternalElasticsearchLogsUserConfigArgs
- Selected
Log List<string>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- Selected
Log []stringFields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log string[]Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected_
log_ Sequence[str]fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
ServiceIntegrationExternalOpensearchLogsUserConfig, ServiceIntegrationExternalOpensearchLogsUserConfigArgs
- Selected
Log List<string>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- Selected
Log []stringFields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log string[]Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected_
log_ Sequence[str]fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
ServiceIntegrationKafkaConnectUserConfig, ServiceIntegrationKafkaConnectUserConfigArgs
- Kafka
Connect ServiceIntegration Kafka Connect User Config Kafka Connect - Kafka Connect service configuration values
- Kafka
Connect ServiceIntegration Kafka Connect User Config Kafka Connect - Kafka Connect service configuration values
- kafka
Connect ServiceIntegration Kafka Connect User Config Kafka Connect - Kafka Connect service configuration values
- kafka
Connect ServiceIntegration Kafka Connect User Config Kafka Connect - Kafka Connect service configuration values
- kafka_
connect ServiceIntegration Kafka Connect User Config Kafka Connect - Kafka Connect service configuration values
- kafka
Connect Property Map - Kafka Connect service configuration values
ServiceIntegrationKafkaConnectUserConfigKafkaConnect, ServiceIntegrationKafkaConnectUserConfigKafkaConnectArgs
- Config
Storage stringTopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - Group
Id string - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - Offset
Storage stringTopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - Status
Storage stringTopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
- Config
Storage stringTopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - Group
Id string - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - Offset
Storage stringTopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - Status
Storage stringTopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
- config
Storage StringTopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - group
Id String - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - offset
Storage StringTopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - status
Storage StringTopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
- config
Storage stringTopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - group
Id string - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - offset
Storage stringTopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - status
Storage stringTopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
- config_
storage_ strtopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - group_
id str - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - offset_
storage_ strtopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - status_
storage_ strtopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
- config
Storage StringTopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - group
Id String - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - offset
Storage StringTopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - status
Storage StringTopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
ServiceIntegrationKafkaLogsUserConfig, ServiceIntegrationKafkaLogsUserConfigArgs
- Kafka
Topic string - Topic name. Example:
mytopic
. - Selected
Log List<string>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- Kafka
Topic string - Topic name. Example:
mytopic
. - Selected
Log []stringFields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafka
Topic String - Topic name. Example:
mytopic
. - selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafka
Topic string - Topic name. Example:
mytopic
. - selected
Log string[]Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafka_
topic str - Topic name. Example:
mytopic
. - selected_
log_ Sequence[str]fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafka
Topic String - Topic name. Example:
mytopic
. - selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
ServiceIntegrationKafkaMirrormakerUserConfig, ServiceIntegrationKafkaMirrormakerUserConfigArgs
- Cluster
Alias string - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - Kafka
Mirrormaker ServiceIntegration Kafka Mirrormaker User Config Kafka Mirrormaker - Kafka MirrorMaker configuration values
- Cluster
Alias string - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - Kafka
Mirrormaker ServiceIntegration Kafka Mirrormaker User Config Kafka Mirrormaker - Kafka MirrorMaker configuration values
- cluster
Alias String - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - kafka
Mirrormaker ServiceIntegration Kafka Mirrormaker User Config Kafka Mirrormaker - Kafka MirrorMaker configuration values
- cluster
Alias string - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - kafka
Mirrormaker ServiceIntegration Kafka Mirrormaker User Config Kafka Mirrormaker - Kafka MirrorMaker configuration values
- cluster_
alias str - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - kafka_
mirrormaker ServiceIntegration Kafka Mirrormaker User Config Kafka Mirrormaker - Kafka MirrorMaker configuration values
- cluster
Alias String - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - kafka
Mirrormaker Property Map - Kafka MirrorMaker configuration values
ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker, ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerArgs
- Consumer
Auto stringOffset Reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - Consumer
Fetch intMin Bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - Consumer
Max intPoll Records - Set consumer max.poll.records. The default is 500. Example:
500
. - Producer
Batch intSize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - Producer
Buffer intMemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - Producer
Compression stringType - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - Producer
Linger intMs - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - Producer
Max intRequest Size - The maximum request size in bytes. Example:
1048576
.
- Consumer
Auto stringOffset Reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - Consumer
Fetch intMin Bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - Consumer
Max intPoll Records - Set consumer max.poll.records. The default is 500. Example:
500
. - Producer
Batch intSize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - Producer
Buffer intMemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - Producer
Compression stringType - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - Producer
Linger intMs - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - Producer
Max intRequest Size - The maximum request size in bytes. Example:
1048576
.
- consumer
Auto StringOffset Reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - consumer
Fetch IntegerMin Bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - consumer
Max IntegerPoll Records - Set consumer max.poll.records. The default is 500. Example:
500
. - producer
Batch IntegerSize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - producer
Buffer IntegerMemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - producer
Compression StringType - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer
Linger IntegerMs - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - producer
Max IntegerRequest Size - The maximum request size in bytes. Example:
1048576
.
- consumer
Auto stringOffset Reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - consumer
Fetch numberMin Bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - consumer
Max numberPoll Records - Set consumer max.poll.records. The default is 500. Example:
500
. - producer
Batch numberSize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - producer
Buffer numberMemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - producer
Compression stringType - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer
Linger numberMs - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - producer
Max numberRequest Size - The maximum request size in bytes. Example:
1048576
.
- consumer_
auto_ stroffset_ reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - consumer_
fetch_ intmin_ bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - consumer_
max_ intpoll_ records - Set consumer max.poll.records. The default is 500. Example:
500
. - producer_
batch_ intsize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - producer_
buffer_ intmemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - producer_
compression_ strtype - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer_
linger_ intms - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - producer_
max_ intrequest_ size - The maximum request size in bytes. Example:
1048576
.
- consumer
Auto StringOffset Reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - consumer
Fetch NumberMin Bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - consumer
Max NumberPoll Records - Set consumer max.poll.records. The default is 500. Example:
500
. - producer
Batch NumberSize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - producer
Buffer NumberMemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - producer
Compression StringType - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer
Linger NumberMs - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - producer
Max NumberRequest Size - The maximum request size in bytes. Example:
1048576
.
ServiceIntegrationLogsUserConfig, ServiceIntegrationLogsUserConfigArgs
- Elasticsearch
Index intDays Max - Elasticsearch index retention limit. Default:
3
. - Elasticsearch
Index stringPrefix - Elasticsearch index prefix. Default:
logs
. - Selected
Log List<string>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- Elasticsearch
Index intDays Max - Elasticsearch index retention limit. Default:
3
. - Elasticsearch
Index stringPrefix - Elasticsearch index prefix. Default:
logs
. - Selected
Log []stringFields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearch
Index IntegerDays Max - Elasticsearch index retention limit. Default:
3
. - elasticsearch
Index StringPrefix - Elasticsearch index prefix. Default:
logs
. - selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearch
Index numberDays Max - Elasticsearch index retention limit. Default:
3
. - elasticsearch
Index stringPrefix - Elasticsearch index prefix. Default:
logs
. - selected
Log string[]Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearch_
index_ intdays_ max - Elasticsearch index retention limit. Default:
3
. - elasticsearch_
index_ strprefix - Elasticsearch index prefix. Default:
logs
. - selected_
log_ Sequence[str]fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearch
Index NumberDays Max - Elasticsearch index retention limit. Default:
3
. - elasticsearch
Index StringPrefix - Elasticsearch index prefix. Default:
logs
. - selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
ServiceIntegrationMetricsUserConfig, ServiceIntegrationMetricsUserConfigArgs
- Database string
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - Retention
Days int - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- Ro
Username string - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - Source
Mysql ServiceIntegration Metrics User Config Source Mysql - Configuration options for metrics where source service is MySQL
- Username string
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- Database string
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - Retention
Days int - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- Ro
Username string - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - Source
Mysql ServiceIntegration Metrics User Config Source Mysql - Configuration options for metrics where source service is MySQL
- Username string
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database String
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - retention
Days Integer - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- ro
Username String - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - source
Mysql ServiceIntegration Metrics User Config Source Mysql - Configuration options for metrics where source service is MySQL
- username String
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database string
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - retention
Days number - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- ro
Username string - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - source
Mysql ServiceIntegration Metrics User Config Source Mysql - Configuration options for metrics where source service is MySQL
- username string
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database str
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - retention_
days int - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- ro_
username str - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - source_
mysql ServiceIntegration Metrics User Config Source Mysql - Configuration options for metrics where source service is MySQL
- username str
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database String
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - retention
Days Number - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- ro
Username String - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - source
Mysql Property Map - Configuration options for metrics where source service is MySQL
- username String
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
ServiceIntegrationMetricsUserConfigSourceMysql, ServiceIntegrationMetricsUserConfigSourceMysqlArgs
- Telegraf
Service
Integration Metrics User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- Telegraf
Service
Integration Metrics User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Service
Integration Metrics User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Service
Integration Metrics User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Service
Integration Metrics User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf Property Map
- Configuration options for Telegraf MySQL input plugin
ServiceIntegrationMetricsUserConfigSourceMysqlTelegraf, ServiceIntegrationMetricsUserConfigSourceMysqlTelegrafArgs
- Gather
Event boolWaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- Gather
File boolEvents Stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- Gather
Index boolIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- Gather
Info boolSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- Gather
Innodb boolMetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- Gather
Perf boolEvents Statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- Gather
Process boolList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- Gather
Slave boolStatus - Gather metrics from SHOW SLAVE STATUS command output.
- Gather
Table boolIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- Gather
Table boolLock Waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- Gather
Table boolSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- Perf
Events intStatements Digest Text Limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - Perf
Events intStatements Limit - Limits metrics from perfeventsstatements. Example:
250
. - Perf
Events intStatements Time Limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
- Gather
Event boolWaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- Gather
File boolEvents Stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- Gather
Index boolIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- Gather
Info boolSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- Gather
Innodb boolMetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- Gather
Perf boolEvents Statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- Gather
Process boolList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- Gather
Slave boolStatus - Gather metrics from SHOW SLAVE STATUS command output.
- Gather
Table boolIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- Gather
Table boolLock Waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- Gather
Table boolSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- Perf
Events intStatements Digest Text Limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - Perf
Events intStatements Limit - Limits metrics from perfeventsstatements. Example:
250
. - Perf
Events intStatements Time Limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event BooleanWaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- gather
File BooleanEvents Stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- gather
Index BooleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- gather
Info BooleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb BooleanMetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- gather
Perf BooleanEvents Statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- gather
Process BooleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave BooleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table BooleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- gather
Table BooleanLock Waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- gather
Table BooleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events IntegerStatements Digest Text Limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - perf
Events IntegerStatements Limit - Limits metrics from perfeventsstatements. Example:
250
. - perf
Events IntegerStatements Time Limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event booleanWaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- gather
File booleanEvents Stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- gather
Index booleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- gather
Info booleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb booleanMetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- gather
Perf booleanEvents Statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- gather
Process booleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave booleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table booleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- gather
Table booleanLock Waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- gather
Table booleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events numberStatements Digest Text Limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - perf
Events numberStatements Limit - Limits metrics from perfeventsstatements. Example:
250
. - perf
Events numberStatements Time Limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
- gather_
event_ boolwaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- gather_
file_ boolevents_ stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- gather_
index_ boolio_ waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- gather_
info_ boolschema_ auto_ inc - Gather auto_increment columns and max values from information schema.
- gather_
innodb_ boolmetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- gather_
perf_ boolevents_ statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- gather_
process_ boollist - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather_
slave_ boolstatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather_
table_ boolio_ waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- gather_
table_ boollock_ waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- gather_
table_ boolschema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf_
events_ intstatements_ digest_ text_ limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - perf_
events_ intstatements_ limit - Limits metrics from perfeventsstatements. Example:
250
. - perf_
events_ intstatements_ time_ limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event BooleanWaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- gather
File BooleanEvents Stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- gather
Index BooleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- gather
Info BooleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb BooleanMetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- gather
Perf BooleanEvents Statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- gather
Process BooleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave BooleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table BooleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- gather
Table BooleanLock Waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- gather
Table BooleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events NumberStatements Digest Text Limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - perf
Events NumberStatements Limit - Limits metrics from perfeventsstatements. Example:
250
. - perf
Events NumberStatements Time Limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
ServiceIntegrationPrometheusUserConfig, ServiceIntegrationPrometheusUserConfigArgs
- Source
Mysql ServiceIntegration Prometheus User Config Source Mysql - Configuration options for metrics where source service is MySQL
- Source
Mysql ServiceIntegration Prometheus User Config Source Mysql - Configuration options for metrics where source service is MySQL
- source
Mysql ServiceIntegration Prometheus User Config Source Mysql - Configuration options for metrics where source service is MySQL
- source
Mysql ServiceIntegration Prometheus User Config Source Mysql - Configuration options for metrics where source service is MySQL
- source_
mysql ServiceIntegration Prometheus User Config Source Mysql - Configuration options for metrics where source service is MySQL
- source
Mysql Property Map - Configuration options for metrics where source service is MySQL
ServiceIntegrationPrometheusUserConfigSourceMysql, ServiceIntegrationPrometheusUserConfigSourceMysqlArgs
- Telegraf
Service
Integration Prometheus User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- Telegraf
Service
Integration Prometheus User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Service
Integration Prometheus User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Service
Integration Prometheus User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Service
Integration Prometheus User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf Property Map
- Configuration options for Telegraf MySQL input plugin
ServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf, ServiceIntegrationPrometheusUserConfigSourceMysqlTelegrafArgs
- Gather
Event boolWaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- Gather
File boolEvents Stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- Gather
Index boolIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- Gather
Info boolSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- Gather
Innodb boolMetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- Gather
Perf boolEvents Statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- Gather
Process boolList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- Gather
Slave boolStatus - Gather metrics from SHOW SLAVE STATUS command output.
- Gather
Table boolIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- Gather
Table boolLock Waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- Gather
Table boolSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- Perf
Events intStatements Digest Text Limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - Perf
Events intStatements Limit - Limits metrics from perfeventsstatements. Example:
250
. - Perf
Events intStatements Time Limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
- Gather
Event boolWaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- Gather
File boolEvents Stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- Gather
Index boolIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- Gather
Info boolSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- Gather
Innodb boolMetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- Gather
Perf boolEvents Statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- Gather
Process boolList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- Gather
Slave boolStatus - Gather metrics from SHOW SLAVE STATUS command output.
- Gather
Table boolIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- Gather
Table boolLock Waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- Gather
Table boolSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- Perf
Events intStatements Digest Text Limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - Perf
Events intStatements Limit - Limits metrics from perfeventsstatements. Example:
250
. - Perf
Events intStatements Time Limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event BooleanWaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- gather
File BooleanEvents Stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- gather
Index BooleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- gather
Info BooleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb BooleanMetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- gather
Perf BooleanEvents Statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- gather
Process BooleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave BooleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table BooleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- gather
Table BooleanLock Waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- gather
Table BooleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events IntegerStatements Digest Text Limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - perf
Events IntegerStatements Limit - Limits metrics from perfeventsstatements. Example:
250
. - perf
Events IntegerStatements Time Limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event booleanWaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- gather
File booleanEvents Stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- gather
Index booleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- gather
Info booleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb booleanMetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- gather
Perf booleanEvents Statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- gather
Process booleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave booleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table booleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- gather
Table booleanLock Waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- gather
Table booleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events numberStatements Digest Text Limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - perf
Events numberStatements Limit - Limits metrics from perfeventsstatements. Example:
250
. - perf
Events numberStatements Time Limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
- gather_
event_ boolwaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- gather_
file_ boolevents_ stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- gather_
index_ boolio_ waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- gather_
info_ boolschema_ auto_ inc - Gather auto_increment columns and max values from information schema.
- gather_
innodb_ boolmetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- gather_
perf_ boolevents_ statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- gather_
process_ boollist - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather_
slave_ boolstatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather_
table_ boolio_ waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- gather_
table_ boollock_ waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- gather_
table_ boolschema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf_
events_ intstatements_ digest_ text_ limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - perf_
events_ intstatements_ limit - Limits metrics from perfeventsstatements. Example:
250
. - perf_
events_ intstatements_ time_ limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event BooleanWaits - Gather metrics from PERFORMANCESCHEMA.EVENTWAITS.
- gather
File BooleanEvents Stats - Gather metrics from PERFORMANCESCHEMA.FILESUMMARYBYEVENT_NAME.
- gather
Index BooleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYINDEX_USAGE.
- gather
Info BooleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb BooleanMetrics - Gather metrics from INFORMATIONSCHEMA.INNODBMETRICS.
- gather
Perf BooleanEvents Statements - Gather metrics from PERFORMANCESCHEMA.EVENTSSTATEMENTSSUMMARYBY_DIGEST.
- gather
Process BooleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave BooleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table BooleanIo Waits - Gather metrics from PERFORMANCESCHEMA.TABLEIOWAITSSUMMARYBYTABLE.
- gather
Table BooleanLock Waits - Gather metrics from PERFORMANCESCHEMA.TABLELOCK_WAITS.
- gather
Table BooleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events NumberStatements Digest Text Limit - Truncates digest text from perfeventsstatements into this many characters. Example:
120
. - perf
Events NumberStatements Limit - Limits metrics from perfeventsstatements. Example:
250
. - perf
Events NumberStatements Time Limit - Only include perfeventsstatements whose last seen is less than this many seconds. Example:
86400
.
Import
$ pulumi import aiven:index/serviceIntegration:ServiceIntegration example_integration PROJECT/INTEGRATION_ID
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Aiven pulumi/pulumi-aiven
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aiven
Terraform Provider.