Try AWS Native preview for resources not in the classic version.
aws.mskconnect.Connector
Explore with Pulumi AI
Try AWS Native preview for resources not in the classic version.
Provides an Amazon MSK Connect Connector resource.
Example Usage
Basic configuration
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.mskconnect.Connector("example", {
name: "example",
kafkaconnectVersion: "2.7.1",
capacity: {
autoscaling: {
mcuCount: 1,
minWorkerCount: 1,
maxWorkerCount: 2,
scaleInPolicy: {
cpuUtilizationPercentage: 20,
},
scaleOutPolicy: {
cpuUtilizationPercentage: 80,
},
},
},
connectorConfiguration: {
"connector.class": "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector",
"tasks.max": "1",
topics: "example",
},
kafkaCluster: {
apacheKafkaCluster: {
bootstrapServers: exampleAwsMskCluster.bootstrapBrokersTls,
vpc: {
securityGroups: [exampleAwsSecurityGroup.id],
subnets: [
example1.id,
example2.id,
example3.id,
],
},
},
},
kafkaClusterClientAuthentication: {
authenticationType: "NONE",
},
kafkaClusterEncryptionInTransit: {
encryptionType: "TLS",
},
plugins: [{
customPlugin: {
arn: exampleAwsMskconnectCustomPlugin.arn,
revision: exampleAwsMskconnectCustomPlugin.latestRevision,
},
}],
serviceExecutionRoleArn: exampleAwsIamRole.arn,
});
import pulumi
import pulumi_aws as aws
example = aws.mskconnect.Connector("example",
name="example",
kafkaconnect_version="2.7.1",
capacity={
"autoscaling": {
"mcuCount": 1,
"minWorkerCount": 1,
"maxWorkerCount": 2,
"scaleInPolicy": {
"cpuUtilizationPercentage": 20,
},
"scaleOutPolicy": {
"cpuUtilizationPercentage": 80,
},
},
},
connector_configuration={
"connector.class": "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector",
"tasks.max": "1",
"topics": "example",
},
kafka_cluster={
"apacheKafkaCluster": {
"bootstrapServers": example_aws_msk_cluster["bootstrapBrokersTls"],
"vpc": {
"securityGroups": [example_aws_security_group["id"]],
"subnets": [
example1["id"],
example2["id"],
example3["id"],
],
},
},
},
kafka_cluster_client_authentication={
"authenticationType": "NONE",
},
kafka_cluster_encryption_in_transit={
"encryptionType": "TLS",
},
plugins=[{
"customPlugin": {
"arn": example_aws_mskconnect_custom_plugin["arn"],
"revision": example_aws_mskconnect_custom_plugin["latestRevision"],
},
}],
service_execution_role_arn=example_aws_iam_role["arn"])
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/mskconnect"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := mskconnect.NewConnector(ctx, "example", &mskconnect.ConnectorArgs{
Name: pulumi.String("example"),
KafkaconnectVersion: pulumi.String("2.7.1"),
Capacity: &mskconnect.ConnectorCapacityArgs{
Autoscaling: &mskconnect.ConnectorCapacityAutoscalingArgs{
McuCount: pulumi.Int(1),
MinWorkerCount: pulumi.Int(1),
MaxWorkerCount: pulumi.Int(2),
ScaleInPolicy: &mskconnect.ConnectorCapacityAutoscalingScaleInPolicyArgs{
CpuUtilizationPercentage: pulumi.Int(20),
},
ScaleOutPolicy: &mskconnect.ConnectorCapacityAutoscalingScaleOutPolicyArgs{
CpuUtilizationPercentage: pulumi.Int(80),
},
},
},
ConnectorConfiguration: pulumi.StringMap{
"connector.class": pulumi.String("com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector"),
"tasks.max": pulumi.String("1"),
"topics": pulumi.String("example"),
},
KafkaCluster: &mskconnect.ConnectorKafkaClusterArgs{
ApacheKafkaCluster: &mskconnect.ConnectorKafkaClusterApacheKafkaClusterArgs{
BootstrapServers: pulumi.Any(exampleAwsMskCluster.BootstrapBrokersTls),
Vpc: &mskconnect.ConnectorKafkaClusterApacheKafkaClusterVpcArgs{
SecurityGroups: pulumi.StringArray{
exampleAwsSecurityGroup.Id,
},
Subnets: pulumi.StringArray{
example1.Id,
example2.Id,
example3.Id,
},
},
},
},
KafkaClusterClientAuthentication: &mskconnect.ConnectorKafkaClusterClientAuthenticationArgs{
AuthenticationType: pulumi.String("NONE"),
},
KafkaClusterEncryptionInTransit: &mskconnect.ConnectorKafkaClusterEncryptionInTransitArgs{
EncryptionType: pulumi.String("TLS"),
},
Plugins: mskconnect.ConnectorPluginArray{
&mskconnect.ConnectorPluginArgs{
CustomPlugin: &mskconnect.ConnectorPluginCustomPluginArgs{
Arn: pulumi.Any(exampleAwsMskconnectCustomPlugin.Arn),
Revision: pulumi.Any(exampleAwsMskconnectCustomPlugin.LatestRevision),
},
},
},
ServiceExecutionRoleArn: pulumi.Any(exampleAwsIamRole.Arn),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.MskConnect.Connector("example", new()
{
Name = "example",
KafkaconnectVersion = "2.7.1",
Capacity = new Aws.MskConnect.Inputs.ConnectorCapacityArgs
{
Autoscaling = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingArgs
{
McuCount = 1,
MinWorkerCount = 1,
MaxWorkerCount = 2,
ScaleInPolicy = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingScaleInPolicyArgs
{
CpuUtilizationPercentage = 20,
},
ScaleOutPolicy = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingScaleOutPolicyArgs
{
CpuUtilizationPercentage = 80,
},
},
},
ConnectorConfiguration =
{
{ "connector.class", "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector" },
{ "tasks.max", "1" },
{ "topics", "example" },
},
KafkaCluster = new Aws.MskConnect.Inputs.ConnectorKafkaClusterArgs
{
ApacheKafkaCluster = new Aws.MskConnect.Inputs.ConnectorKafkaClusterApacheKafkaClusterArgs
{
BootstrapServers = exampleAwsMskCluster.BootstrapBrokersTls,
Vpc = new Aws.MskConnect.Inputs.ConnectorKafkaClusterApacheKafkaClusterVpcArgs
{
SecurityGroups = new[]
{
exampleAwsSecurityGroup.Id,
},
Subnets = new[]
{
example1.Id,
example2.Id,
example3.Id,
},
},
},
},
KafkaClusterClientAuthentication = new Aws.MskConnect.Inputs.ConnectorKafkaClusterClientAuthenticationArgs
{
AuthenticationType = "NONE",
},
KafkaClusterEncryptionInTransit = new Aws.MskConnect.Inputs.ConnectorKafkaClusterEncryptionInTransitArgs
{
EncryptionType = "TLS",
},
Plugins = new[]
{
new Aws.MskConnect.Inputs.ConnectorPluginArgs
{
CustomPlugin = new Aws.MskConnect.Inputs.ConnectorPluginCustomPluginArgs
{
Arn = exampleAwsMskconnectCustomPlugin.Arn,
Revision = exampleAwsMskconnectCustomPlugin.LatestRevision,
},
},
},
ServiceExecutionRoleArn = exampleAwsIamRole.Arn,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.mskconnect.Connector;
import com.pulumi.aws.mskconnect.ConnectorArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorCapacityArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorCapacityAutoscalingArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorCapacityAutoscalingScaleInPolicyArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorCapacityAutoscalingScaleOutPolicyArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorKafkaClusterArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorKafkaClusterApacheKafkaClusterArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorKafkaClusterApacheKafkaClusterVpcArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorKafkaClusterClientAuthenticationArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorKafkaClusterEncryptionInTransitArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorPluginArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorPluginCustomPluginArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new Connector("example", ConnectorArgs.builder()
.name("example")
.kafkaconnectVersion("2.7.1")
.capacity(ConnectorCapacityArgs.builder()
.autoscaling(ConnectorCapacityAutoscalingArgs.builder()
.mcuCount(1)
.minWorkerCount(1)
.maxWorkerCount(2)
.scaleInPolicy(ConnectorCapacityAutoscalingScaleInPolicyArgs.builder()
.cpuUtilizationPercentage(20)
.build())
.scaleOutPolicy(ConnectorCapacityAutoscalingScaleOutPolicyArgs.builder()
.cpuUtilizationPercentage(80)
.build())
.build())
.build())
.connectorConfiguration(Map.ofEntries(
Map.entry("connector.class", "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector"),
Map.entry("tasks.max", "1"),
Map.entry("topics", "example")
))
.kafkaCluster(ConnectorKafkaClusterArgs.builder()
.apacheKafkaCluster(ConnectorKafkaClusterApacheKafkaClusterArgs.builder()
.bootstrapServers(exampleAwsMskCluster.bootstrapBrokersTls())
.vpc(ConnectorKafkaClusterApacheKafkaClusterVpcArgs.builder()
.securityGroups(exampleAwsSecurityGroup.id())
.subnets(
example1.id(),
example2.id(),
example3.id())
.build())
.build())
.build())
.kafkaClusterClientAuthentication(ConnectorKafkaClusterClientAuthenticationArgs.builder()
.authenticationType("NONE")
.build())
.kafkaClusterEncryptionInTransit(ConnectorKafkaClusterEncryptionInTransitArgs.builder()
.encryptionType("TLS")
.build())
.plugins(ConnectorPluginArgs.builder()
.customPlugin(ConnectorPluginCustomPluginArgs.builder()
.arn(exampleAwsMskconnectCustomPlugin.arn())
.revision(exampleAwsMskconnectCustomPlugin.latestRevision())
.build())
.build())
.serviceExecutionRoleArn(exampleAwsIamRole.arn())
.build());
}
}
resources:
example:
type: aws:mskconnect:Connector
properties:
name: example
kafkaconnectVersion: 2.7.1
capacity:
autoscaling:
mcuCount: 1
minWorkerCount: 1
maxWorkerCount: 2
scaleInPolicy:
cpuUtilizationPercentage: 20
scaleOutPolicy:
cpuUtilizationPercentage: 80
connectorConfiguration:
connector.class: com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector
tasks.max: '1'
topics: example
kafkaCluster:
apacheKafkaCluster:
bootstrapServers: ${exampleAwsMskCluster.bootstrapBrokersTls}
vpc:
securityGroups:
- ${exampleAwsSecurityGroup.id}
subnets:
- ${example1.id}
- ${example2.id}
- ${example3.id}
kafkaClusterClientAuthentication:
authenticationType: NONE
kafkaClusterEncryptionInTransit:
encryptionType: TLS
plugins:
- customPlugin:
arn: ${exampleAwsMskconnectCustomPlugin.arn}
revision: ${exampleAwsMskconnectCustomPlugin.latestRevision}
serviceExecutionRoleArn: ${exampleAwsIamRole.arn}
Create Connector Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Connector(name: string, args: ConnectorArgs, opts?: CustomResourceOptions);
@overload
def Connector(resource_name: str,
args: ConnectorArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Connector(resource_name: str,
opts: Optional[ResourceOptions] = None,
capacity: Optional[ConnectorCapacityArgs] = None,
connector_configuration: Optional[Mapping[str, str]] = None,
kafka_cluster: Optional[ConnectorKafkaClusterArgs] = None,
kafka_cluster_client_authentication: Optional[ConnectorKafkaClusterClientAuthenticationArgs] = None,
kafka_cluster_encryption_in_transit: Optional[ConnectorKafkaClusterEncryptionInTransitArgs] = None,
kafkaconnect_version: Optional[str] = None,
plugins: Optional[Sequence[ConnectorPluginArgs]] = None,
service_execution_role_arn: Optional[str] = None,
description: Optional[str] = None,
log_delivery: Optional[ConnectorLogDeliveryArgs] = None,
name: Optional[str] = None,
worker_configuration: Optional[ConnectorWorkerConfigurationArgs] = None)
func NewConnector(ctx *Context, name string, args ConnectorArgs, opts ...ResourceOption) (*Connector, error)
public Connector(string name, ConnectorArgs args, CustomResourceOptions? opts = null)
public Connector(String name, ConnectorArgs args)
public Connector(String name, ConnectorArgs args, CustomResourceOptions options)
type: aws:mskconnect:Connector
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var connectorResource = new Aws.MskConnect.Connector("connectorResource", new()
{
Capacity = new Aws.MskConnect.Inputs.ConnectorCapacityArgs
{
Autoscaling = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingArgs
{
MaxWorkerCount = 0,
MinWorkerCount = 0,
McuCount = 0,
ScaleInPolicy = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingScaleInPolicyArgs
{
CpuUtilizationPercentage = 0,
},
ScaleOutPolicy = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingScaleOutPolicyArgs
{
CpuUtilizationPercentage = 0,
},
},
ProvisionedCapacity = new Aws.MskConnect.Inputs.ConnectorCapacityProvisionedCapacityArgs
{
WorkerCount = 0,
McuCount = 0,
},
},
ConnectorConfiguration =
{
{ "string", "string" },
},
KafkaCluster = new Aws.MskConnect.Inputs.ConnectorKafkaClusterArgs
{
ApacheKafkaCluster = new Aws.MskConnect.Inputs.ConnectorKafkaClusterApacheKafkaClusterArgs
{
BootstrapServers = "string",
Vpc = new Aws.MskConnect.Inputs.ConnectorKafkaClusterApacheKafkaClusterVpcArgs
{
SecurityGroups = new[]
{
"string",
},
Subnets = new[]
{
"string",
},
},
},
},
KafkaClusterClientAuthentication = new Aws.MskConnect.Inputs.ConnectorKafkaClusterClientAuthenticationArgs
{
AuthenticationType = "string",
},
KafkaClusterEncryptionInTransit = new Aws.MskConnect.Inputs.ConnectorKafkaClusterEncryptionInTransitArgs
{
EncryptionType = "string",
},
KafkaconnectVersion = "string",
Plugins = new[]
{
new Aws.MskConnect.Inputs.ConnectorPluginArgs
{
CustomPlugin = new Aws.MskConnect.Inputs.ConnectorPluginCustomPluginArgs
{
Arn = "string",
Revision = 0,
},
},
},
ServiceExecutionRoleArn = "string",
Description = "string",
LogDelivery = new Aws.MskConnect.Inputs.ConnectorLogDeliveryArgs
{
WorkerLogDelivery = new Aws.MskConnect.Inputs.ConnectorLogDeliveryWorkerLogDeliveryArgs
{
CloudwatchLogs = new Aws.MskConnect.Inputs.ConnectorLogDeliveryWorkerLogDeliveryCloudwatchLogsArgs
{
Enabled = false,
LogGroup = "string",
},
Firehose = new Aws.MskConnect.Inputs.ConnectorLogDeliveryWorkerLogDeliveryFirehoseArgs
{
Enabled = false,
DeliveryStream = "string",
},
S3 = new Aws.MskConnect.Inputs.ConnectorLogDeliveryWorkerLogDeliveryS3Args
{
Enabled = false,
Bucket = "string",
Prefix = "string",
},
},
},
Name = "string",
WorkerConfiguration = new Aws.MskConnect.Inputs.ConnectorWorkerConfigurationArgs
{
Arn = "string",
Revision = 0,
},
});
example, err := mskconnect.NewConnector(ctx, "connectorResource", &mskconnect.ConnectorArgs{
Capacity: &mskconnect.ConnectorCapacityArgs{
Autoscaling: &mskconnect.ConnectorCapacityAutoscalingArgs{
MaxWorkerCount: pulumi.Int(0),
MinWorkerCount: pulumi.Int(0),
McuCount: pulumi.Int(0),
ScaleInPolicy: &mskconnect.ConnectorCapacityAutoscalingScaleInPolicyArgs{
CpuUtilizationPercentage: pulumi.Int(0),
},
ScaleOutPolicy: &mskconnect.ConnectorCapacityAutoscalingScaleOutPolicyArgs{
CpuUtilizationPercentage: pulumi.Int(0),
},
},
ProvisionedCapacity: &mskconnect.ConnectorCapacityProvisionedCapacityArgs{
WorkerCount: pulumi.Int(0),
McuCount: pulumi.Int(0),
},
},
ConnectorConfiguration: pulumi.StringMap{
"string": pulumi.String("string"),
},
KafkaCluster: &mskconnect.ConnectorKafkaClusterArgs{
ApacheKafkaCluster: &mskconnect.ConnectorKafkaClusterApacheKafkaClusterArgs{
BootstrapServers: pulumi.String("string"),
Vpc: &mskconnect.ConnectorKafkaClusterApacheKafkaClusterVpcArgs{
SecurityGroups: pulumi.StringArray{
pulumi.String("string"),
},
Subnets: pulumi.StringArray{
pulumi.String("string"),
},
},
},
},
KafkaClusterClientAuthentication: &mskconnect.ConnectorKafkaClusterClientAuthenticationArgs{
AuthenticationType: pulumi.String("string"),
},
KafkaClusterEncryptionInTransit: &mskconnect.ConnectorKafkaClusterEncryptionInTransitArgs{
EncryptionType: pulumi.String("string"),
},
KafkaconnectVersion: pulumi.String("string"),
Plugins: mskconnect.ConnectorPluginArray{
&mskconnect.ConnectorPluginArgs{
CustomPlugin: &mskconnect.ConnectorPluginCustomPluginArgs{
Arn: pulumi.String("string"),
Revision: pulumi.Int(0),
},
},
},
ServiceExecutionRoleArn: pulumi.String("string"),
Description: pulumi.String("string"),
LogDelivery: &mskconnect.ConnectorLogDeliveryArgs{
WorkerLogDelivery: &mskconnect.ConnectorLogDeliveryWorkerLogDeliveryArgs{
CloudwatchLogs: &mskconnect.ConnectorLogDeliveryWorkerLogDeliveryCloudwatchLogsArgs{
Enabled: pulumi.Bool(false),
LogGroup: pulumi.String("string"),
},
Firehose: &mskconnect.ConnectorLogDeliveryWorkerLogDeliveryFirehoseArgs{
Enabled: pulumi.Bool(false),
DeliveryStream: pulumi.String("string"),
},
S3: &mskconnect.ConnectorLogDeliveryWorkerLogDeliveryS3Args{
Enabled: pulumi.Bool(false),
Bucket: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
},
},
Name: pulumi.String("string"),
WorkerConfiguration: &mskconnect.ConnectorWorkerConfigurationArgs{
Arn: pulumi.String("string"),
Revision: pulumi.Int(0),
},
})
var connectorResource = new Connector("connectorResource", ConnectorArgs.builder()
.capacity(ConnectorCapacityArgs.builder()
.autoscaling(ConnectorCapacityAutoscalingArgs.builder()
.maxWorkerCount(0)
.minWorkerCount(0)
.mcuCount(0)
.scaleInPolicy(ConnectorCapacityAutoscalingScaleInPolicyArgs.builder()
.cpuUtilizationPercentage(0)
.build())
.scaleOutPolicy(ConnectorCapacityAutoscalingScaleOutPolicyArgs.builder()
.cpuUtilizationPercentage(0)
.build())
.build())
.provisionedCapacity(ConnectorCapacityProvisionedCapacityArgs.builder()
.workerCount(0)
.mcuCount(0)
.build())
.build())
.connectorConfiguration(Map.of("string", "string"))
.kafkaCluster(ConnectorKafkaClusterArgs.builder()
.apacheKafkaCluster(ConnectorKafkaClusterApacheKafkaClusterArgs.builder()
.bootstrapServers("string")
.vpc(ConnectorKafkaClusterApacheKafkaClusterVpcArgs.builder()
.securityGroups("string")
.subnets("string")
.build())
.build())
.build())
.kafkaClusterClientAuthentication(ConnectorKafkaClusterClientAuthenticationArgs.builder()
.authenticationType("string")
.build())
.kafkaClusterEncryptionInTransit(ConnectorKafkaClusterEncryptionInTransitArgs.builder()
.encryptionType("string")
.build())
.kafkaconnectVersion("string")
.plugins(ConnectorPluginArgs.builder()
.customPlugin(ConnectorPluginCustomPluginArgs.builder()
.arn("string")
.revision(0)
.build())
.build())
.serviceExecutionRoleArn("string")
.description("string")
.logDelivery(ConnectorLogDeliveryArgs.builder()
.workerLogDelivery(ConnectorLogDeliveryWorkerLogDeliveryArgs.builder()
.cloudwatchLogs(ConnectorLogDeliveryWorkerLogDeliveryCloudwatchLogsArgs.builder()
.enabled(false)
.logGroup("string")
.build())
.firehose(ConnectorLogDeliveryWorkerLogDeliveryFirehoseArgs.builder()
.enabled(false)
.deliveryStream("string")
.build())
.s3(ConnectorLogDeliveryWorkerLogDeliveryS3Args.builder()
.enabled(false)
.bucket("string")
.prefix("string")
.build())
.build())
.build())
.name("string")
.workerConfiguration(ConnectorWorkerConfigurationArgs.builder()
.arn("string")
.revision(0)
.build())
.build());
connector_resource = aws.mskconnect.Connector("connectorResource",
capacity={
"autoscaling": {
"maxWorkerCount": 0,
"minWorkerCount": 0,
"mcuCount": 0,
"scaleInPolicy": {
"cpuUtilizationPercentage": 0,
},
"scaleOutPolicy": {
"cpuUtilizationPercentage": 0,
},
},
"provisionedCapacity": {
"workerCount": 0,
"mcuCount": 0,
},
},
connector_configuration={
"string": "string",
},
kafka_cluster={
"apacheKafkaCluster": {
"bootstrapServers": "string",
"vpc": {
"securityGroups": ["string"],
"subnets": ["string"],
},
},
},
kafka_cluster_client_authentication={
"authenticationType": "string",
},
kafka_cluster_encryption_in_transit={
"encryptionType": "string",
},
kafkaconnect_version="string",
plugins=[{
"customPlugin": {
"arn": "string",
"revision": 0,
},
}],
service_execution_role_arn="string",
description="string",
log_delivery={
"workerLogDelivery": {
"cloudwatchLogs": {
"enabled": False,
"logGroup": "string",
},
"firehose": {
"enabled": False,
"deliveryStream": "string",
},
"s3": {
"enabled": False,
"bucket": "string",
"prefix": "string",
},
},
},
name="string",
worker_configuration={
"arn": "string",
"revision": 0,
})
const connectorResource = new aws.mskconnect.Connector("connectorResource", {
capacity: {
autoscaling: {
maxWorkerCount: 0,
minWorkerCount: 0,
mcuCount: 0,
scaleInPolicy: {
cpuUtilizationPercentage: 0,
},
scaleOutPolicy: {
cpuUtilizationPercentage: 0,
},
},
provisionedCapacity: {
workerCount: 0,
mcuCount: 0,
},
},
connectorConfiguration: {
string: "string",
},
kafkaCluster: {
apacheKafkaCluster: {
bootstrapServers: "string",
vpc: {
securityGroups: ["string"],
subnets: ["string"],
},
},
},
kafkaClusterClientAuthentication: {
authenticationType: "string",
},
kafkaClusterEncryptionInTransit: {
encryptionType: "string",
},
kafkaconnectVersion: "string",
plugins: [{
customPlugin: {
arn: "string",
revision: 0,
},
}],
serviceExecutionRoleArn: "string",
description: "string",
logDelivery: {
workerLogDelivery: {
cloudwatchLogs: {
enabled: false,
logGroup: "string",
},
firehose: {
enabled: false,
deliveryStream: "string",
},
s3: {
enabled: false,
bucket: "string",
prefix: "string",
},
},
},
name: "string",
workerConfiguration: {
arn: "string",
revision: 0,
},
});
type: aws:mskconnect:Connector
properties:
capacity:
autoscaling:
maxWorkerCount: 0
mcuCount: 0
minWorkerCount: 0
scaleInPolicy:
cpuUtilizationPercentage: 0
scaleOutPolicy:
cpuUtilizationPercentage: 0
provisionedCapacity:
mcuCount: 0
workerCount: 0
connectorConfiguration:
string: string
description: string
kafkaCluster:
apacheKafkaCluster:
bootstrapServers: string
vpc:
securityGroups:
- string
subnets:
- string
kafkaClusterClientAuthentication:
authenticationType: string
kafkaClusterEncryptionInTransit:
encryptionType: string
kafkaconnectVersion: string
logDelivery:
workerLogDelivery:
cloudwatchLogs:
enabled: false
logGroup: string
firehose:
deliveryStream: string
enabled: false
s3:
bucket: string
enabled: false
prefix: string
name: string
plugins:
- customPlugin:
arn: string
revision: 0
serviceExecutionRoleArn: string
workerConfiguration:
arn: string
revision: 0
Connector Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Connector resource accepts the following input properties:
- Capacity
Connector
Capacity - Information about the capacity allocated to the connector. See below.
- Connector
Configuration Dictionary<string, string> - A map of keys to values that represent the configuration for the connector.
- Kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See below.
- Kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See below.
- Kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See below.
- Kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- Plugins
List<Connector
Plugin> - Specifies which plugins to use for the connector. See below.
- Service
Execution stringRole Arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- Description string
- A summary description of the connector.
- Log
Delivery ConnectorLog Delivery - Details about log delivery. See below.
- Name string
- The name of the connector.
- Worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See below.
- Capacity
Connector
Capacity Args - Information about the capacity allocated to the connector. See below.
- Connector
Configuration map[string]string - A map of keys to values that represent the configuration for the connector.
- Kafka
Cluster ConnectorKafka Cluster Args - Specifies which Apache Kafka cluster to connect to. See below.
- Kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication Args - Details of the client authentication used by the Apache Kafka cluster. See below.
- Kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit Args - Details of encryption in transit to the Apache Kafka cluster. See below.
- Kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- Plugins
[]Connector
Plugin Args - Specifies which plugins to use for the connector. See below.
- Service
Execution stringRole Arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- Description string
- A summary description of the connector.
- Log
Delivery ConnectorLog Delivery Args - Details about log delivery. See below.
- Name string
- The name of the connector.
- Worker
Configuration ConnectorWorker Configuration Args - Specifies which worker configuration to use with the connector. See below.
- capacity
Connector
Capacity - Information about the capacity allocated to the connector. See below.
- connector
Configuration Map<String,String> - A map of keys to values that represent the configuration for the connector.
- kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See below.
- kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See below.
- kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See below.
- kafkaconnect
Version String - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- plugins
List<Connector
Plugin> - Specifies which plugins to use for the connector. See below.
- service
Execution StringRole Arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- description String
- A summary description of the connector.
- log
Delivery ConnectorLog Delivery - Details about log delivery. See below.
- name String
- The name of the connector.
- worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See below.
- capacity
Connector
Capacity - Information about the capacity allocated to the connector. See below.
- connector
Configuration {[key: string]: string} - A map of keys to values that represent the configuration for the connector.
- kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See below.
- kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See below.
- kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See below.
- kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- plugins
Connector
Plugin[] - Specifies which plugins to use for the connector. See below.
- service
Execution stringRole Arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- description string
- A summary description of the connector.
- log
Delivery ConnectorLog Delivery - Details about log delivery. See below.
- name string
- The name of the connector.
- worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See below.
- capacity
Connector
Capacity Args - Information about the capacity allocated to the connector. See below.
- connector_
configuration Mapping[str, str] - A map of keys to values that represent the configuration for the connector.
- kafka_
cluster ConnectorKafka Cluster Args - Specifies which Apache Kafka cluster to connect to. See below.
- kafka_
cluster_ Connectorclient_ authentication Kafka Cluster Client Authentication Args - Details of the client authentication used by the Apache Kafka cluster. See below.
- kafka_
cluster_ Connectorencryption_ in_ transit Kafka Cluster Encryption In Transit Args - Details of encryption in transit to the Apache Kafka cluster. See below.
- kafkaconnect_
version str - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- plugins
Sequence[Connector
Plugin Args] - Specifies which plugins to use for the connector. See below.
- service_
execution_ strrole_ arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- description str
- A summary description of the connector.
- log_
delivery ConnectorLog Delivery Args - Details about log delivery. See below.
- name str
- The name of the connector.
- worker_
configuration ConnectorWorker Configuration Args - Specifies which worker configuration to use with the connector. See below.
- capacity Property Map
- Information about the capacity allocated to the connector. See below.
- connector
Configuration Map<String> - A map of keys to values that represent the configuration for the connector.
- kafka
Cluster Property Map - Specifies which Apache Kafka cluster to connect to. See below.
- kafka
Cluster Property MapClient Authentication - Details of the client authentication used by the Apache Kafka cluster. See below.
- kafka
Cluster Property MapEncryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See below.
- kafkaconnect
Version String - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- plugins List<Property Map>
- Specifies which plugins to use for the connector. See below.
- service
Execution StringRole Arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- description String
- A summary description of the connector.
- log
Delivery Property Map - Details about log delivery. See below.
- name String
- The name of the connector.
- worker
Configuration Property Map - Specifies which worker configuration to use with the connector. See below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Connector resource produces the following output properties:
Look up Existing Connector Resource
Get an existing Connector resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ConnectorState, opts?: CustomResourceOptions): Connector
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
arn: Optional[str] = None,
capacity: Optional[ConnectorCapacityArgs] = None,
connector_configuration: Optional[Mapping[str, str]] = None,
description: Optional[str] = None,
kafka_cluster: Optional[ConnectorKafkaClusterArgs] = None,
kafka_cluster_client_authentication: Optional[ConnectorKafkaClusterClientAuthenticationArgs] = None,
kafka_cluster_encryption_in_transit: Optional[ConnectorKafkaClusterEncryptionInTransitArgs] = None,
kafkaconnect_version: Optional[str] = None,
log_delivery: Optional[ConnectorLogDeliveryArgs] = None,
name: Optional[str] = None,
plugins: Optional[Sequence[ConnectorPluginArgs]] = None,
service_execution_role_arn: Optional[str] = None,
version: Optional[str] = None,
worker_configuration: Optional[ConnectorWorkerConfigurationArgs] = None) -> Connector
func GetConnector(ctx *Context, name string, id IDInput, state *ConnectorState, opts ...ResourceOption) (*Connector, error)
public static Connector Get(string name, Input<string> id, ConnectorState? state, CustomResourceOptions? opts = null)
public static Connector get(String name, Output<String> id, ConnectorState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arn string
- The Amazon Resource Name (ARN) of the connector.
- Capacity
Connector
Capacity - Information about the capacity allocated to the connector. See below.
- Connector
Configuration Dictionary<string, string> - A map of keys to values that represent the configuration for the connector.
- Description string
- A summary description of the connector.
- Kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See below.
- Kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See below.
- Kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See below.
- Kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- Log
Delivery ConnectorLog Delivery - Details about log delivery. See below.
- Name string
- The name of the connector.
- Plugins
List<Connector
Plugin> - Specifies which plugins to use for the connector. See below.
- Service
Execution stringRole Arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- Version string
- The current version of the connector.
- Worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See below.
- Arn string
- The Amazon Resource Name (ARN) of the connector.
- Capacity
Connector
Capacity Args - Information about the capacity allocated to the connector. See below.
- Connector
Configuration map[string]string - A map of keys to values that represent the configuration for the connector.
- Description string
- A summary description of the connector.
- Kafka
Cluster ConnectorKafka Cluster Args - Specifies which Apache Kafka cluster to connect to. See below.
- Kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication Args - Details of the client authentication used by the Apache Kafka cluster. See below.
- Kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit Args - Details of encryption in transit to the Apache Kafka cluster. See below.
- Kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- Log
Delivery ConnectorLog Delivery Args - Details about log delivery. See below.
- Name string
- The name of the connector.
- Plugins
[]Connector
Plugin Args - Specifies which plugins to use for the connector. See below.
- Service
Execution stringRole Arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- Version string
- The current version of the connector.
- Worker
Configuration ConnectorWorker Configuration Args - Specifies which worker configuration to use with the connector. See below.
- arn String
- The Amazon Resource Name (ARN) of the connector.
- capacity
Connector
Capacity - Information about the capacity allocated to the connector. See below.
- connector
Configuration Map<String,String> - A map of keys to values that represent the configuration for the connector.
- description String
- A summary description of the connector.
- kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See below.
- kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See below.
- kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See below.
- kafkaconnect
Version String - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- log
Delivery ConnectorLog Delivery - Details about log delivery. See below.
- name String
- The name of the connector.
- plugins
List<Connector
Plugin> - Specifies which plugins to use for the connector. See below.
- service
Execution StringRole Arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- version String
- The current version of the connector.
- worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See below.
- arn string
- The Amazon Resource Name (ARN) of the connector.
- capacity
Connector
Capacity - Information about the capacity allocated to the connector. See below.
- connector
Configuration {[key: string]: string} - A map of keys to values that represent the configuration for the connector.
- description string
- A summary description of the connector.
- kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See below.
- kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See below.
- kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See below.
- kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- log
Delivery ConnectorLog Delivery - Details about log delivery. See below.
- name string
- The name of the connector.
- plugins
Connector
Plugin[] - Specifies which plugins to use for the connector. See below.
- service
Execution stringRole Arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- version string
- The current version of the connector.
- worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See below.
- arn str
- The Amazon Resource Name (ARN) of the connector.
- capacity
Connector
Capacity Args - Information about the capacity allocated to the connector. See below.
- connector_
configuration Mapping[str, str] - A map of keys to values that represent the configuration for the connector.
- description str
- A summary description of the connector.
- kafka_
cluster ConnectorKafka Cluster Args - Specifies which Apache Kafka cluster to connect to. See below.
- kafka_
cluster_ Connectorclient_ authentication Kafka Cluster Client Authentication Args - Details of the client authentication used by the Apache Kafka cluster. See below.
- kafka_
cluster_ Connectorencryption_ in_ transit Kafka Cluster Encryption In Transit Args - Details of encryption in transit to the Apache Kafka cluster. See below.
- kafkaconnect_
version str - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- log_
delivery ConnectorLog Delivery Args - Details about log delivery. See below.
- name str
- The name of the connector.
- plugins
Sequence[Connector
Plugin Args] - Specifies which plugins to use for the connector. See below.
- service_
execution_ strrole_ arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- version str
- The current version of the connector.
- worker_
configuration ConnectorWorker Configuration Args - Specifies which worker configuration to use with the connector. See below.
- arn String
- The Amazon Resource Name (ARN) of the connector.
- capacity Property Map
- Information about the capacity allocated to the connector. See below.
- connector
Configuration Map<String> - A map of keys to values that represent the configuration for the connector.
- description String
- A summary description of the connector.
- kafka
Cluster Property Map - Specifies which Apache Kafka cluster to connect to. See below.
- kafka
Cluster Property MapClient Authentication - Details of the client authentication used by the Apache Kafka cluster. See below.
- kafka
Cluster Property MapEncryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See below.
- kafkaconnect
Version String - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- log
Delivery Property Map - Details about log delivery. See below.
- name String
- The name of the connector.
- plugins List<Property Map>
- Specifies which plugins to use for the connector. See below.
- service
Execution StringRole Arn - The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
- version String
- The current version of the connector.
- worker
Configuration Property Map - Specifies which worker configuration to use with the connector. See below.
Supporting Types
ConnectorCapacity, ConnectorCapacityArgs
- Autoscaling
Connector
Capacity Autoscaling - Information about the auto scaling parameters for the connector. See below.
- Provisioned
Capacity ConnectorCapacity Provisioned Capacity - Details about a fixed capacity allocated to a connector. See below.
- Autoscaling
Connector
Capacity Autoscaling - Information about the auto scaling parameters for the connector. See below.
- Provisioned
Capacity ConnectorCapacity Provisioned Capacity - Details about a fixed capacity allocated to a connector. See below.
- autoscaling
Connector
Capacity Autoscaling - Information about the auto scaling parameters for the connector. See below.
- provisioned
Capacity ConnectorCapacity Provisioned Capacity - Details about a fixed capacity allocated to a connector. See below.
- autoscaling
Connector
Capacity Autoscaling - Information about the auto scaling parameters for the connector. See below.
- provisioned
Capacity ConnectorCapacity Provisioned Capacity - Details about a fixed capacity allocated to a connector. See below.
- autoscaling
Connector
Capacity Autoscaling - Information about the auto scaling parameters for the connector. See below.
- provisioned_
capacity ConnectorCapacity Provisioned Capacity - Details about a fixed capacity allocated to a connector. See below.
- autoscaling Property Map
- Information about the auto scaling parameters for the connector. See below.
- provisioned
Capacity Property Map - Details about a fixed capacity allocated to a connector. See below.
ConnectorCapacityAutoscaling, ConnectorCapacityAutoscalingArgs
- Max
Worker intCount - The maximum number of workers allocated to the connector.
- Min
Worker intCount - The minimum number of workers allocated to the connector.
- Mcu
Count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - Scale
In ConnectorPolicy Capacity Autoscaling Scale In Policy - The scale-in policy for the connector. See below.
- Scale
Out ConnectorPolicy Capacity Autoscaling Scale Out Policy - The scale-out policy for the connector. See below.
- Max
Worker intCount - The maximum number of workers allocated to the connector.
- Min
Worker intCount - The minimum number of workers allocated to the connector.
- Mcu
Count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - Scale
In ConnectorPolicy Capacity Autoscaling Scale In Policy - The scale-in policy for the connector. See below.
- Scale
Out ConnectorPolicy Capacity Autoscaling Scale Out Policy - The scale-out policy for the connector. See below.
- max
Worker IntegerCount - The maximum number of workers allocated to the connector.
- min
Worker IntegerCount - The minimum number of workers allocated to the connector.
- mcu
Count Integer - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - scale
In ConnectorPolicy Capacity Autoscaling Scale In Policy - The scale-in policy for the connector. See below.
- scale
Out ConnectorPolicy Capacity Autoscaling Scale Out Policy - The scale-out policy for the connector. See below.
- max
Worker numberCount - The maximum number of workers allocated to the connector.
- min
Worker numberCount - The minimum number of workers allocated to the connector.
- mcu
Count number - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - scale
In ConnectorPolicy Capacity Autoscaling Scale In Policy - The scale-in policy for the connector. See below.
- scale
Out ConnectorPolicy Capacity Autoscaling Scale Out Policy - The scale-out policy for the connector. See below.
- max_
worker_ intcount - The maximum number of workers allocated to the connector.
- min_
worker_ intcount - The minimum number of workers allocated to the connector.
- mcu_
count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - scale_
in_ Connectorpolicy Capacity Autoscaling Scale In Policy - The scale-in policy for the connector. See below.
- scale_
out_ Connectorpolicy Capacity Autoscaling Scale Out Policy - The scale-out policy for the connector. See below.
- max
Worker NumberCount - The maximum number of workers allocated to the connector.
- min
Worker NumberCount - The minimum number of workers allocated to the connector.
- mcu
Count Number - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - scale
In Property MapPolicy - The scale-in policy for the connector. See below.
- scale
Out Property MapPolicy - The scale-out policy for the connector. See below.
ConnectorCapacityAutoscalingScaleInPolicy, ConnectorCapacityAutoscalingScaleInPolicyArgs
- Cpu
Utilization intPercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
- Cpu
Utilization intPercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
- cpu
Utilization IntegerPercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
- cpu
Utilization numberPercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
- cpu_
utilization_ intpercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
- cpu
Utilization NumberPercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
ConnectorCapacityAutoscalingScaleOutPolicy, ConnectorCapacityAutoscalingScaleOutPolicyArgs
- Cpu
Utilization intPercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
- Cpu
Utilization intPercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
- cpu
Utilization IntegerPercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
- cpu
Utilization numberPercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
- cpu_
utilization_ intpercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
- cpu
Utilization NumberPercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
ConnectorCapacityProvisionedCapacity, ConnectorCapacityProvisionedCapacityArgs
- Worker
Count int - The number of workers that are allocated to the connector.
- Mcu
Count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
- Worker
Count int - The number of workers that are allocated to the connector.
- Mcu
Count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
- worker
Count Integer - The number of workers that are allocated to the connector.
- mcu
Count Integer - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
- worker
Count number - The number of workers that are allocated to the connector.
- mcu
Count number - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
- worker_
count int - The number of workers that are allocated to the connector.
- mcu_
count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
- worker
Count Number - The number of workers that are allocated to the connector.
- mcu
Count Number - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
ConnectorKafkaCluster, ConnectorKafkaClusterArgs
- Apache
Kafka ConnectorCluster Kafka Cluster Apache Kafka Cluster - The Apache Kafka cluster to which the connector is connected.
- Apache
Kafka ConnectorCluster Kafka Cluster Apache Kafka Cluster - The Apache Kafka cluster to which the connector is connected.
- apache
Kafka ConnectorCluster Kafka Cluster Apache Kafka Cluster - The Apache Kafka cluster to which the connector is connected.
- apache
Kafka ConnectorCluster Kafka Cluster Apache Kafka Cluster - The Apache Kafka cluster to which the connector is connected.
- apache_
kafka_ Connectorcluster Kafka Cluster Apache Kafka Cluster - The Apache Kafka cluster to which the connector is connected.
- apache
Kafka Property MapCluster - The Apache Kafka cluster to which the connector is connected.
ConnectorKafkaClusterApacheKafkaCluster, ConnectorKafkaClusterApacheKafkaClusterArgs
- Bootstrap
Servers string - The bootstrap servers of the cluster.
- Vpc
Connector
Kafka Cluster Apache Kafka Cluster Vpc - Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
- Bootstrap
Servers string - The bootstrap servers of the cluster.
- Vpc
Connector
Kafka Cluster Apache Kafka Cluster Vpc - Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
- bootstrap
Servers String - The bootstrap servers of the cluster.
- vpc
Connector
Kafka Cluster Apache Kafka Cluster Vpc - Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
- bootstrap
Servers string - The bootstrap servers of the cluster.
- vpc
Connector
Kafka Cluster Apache Kafka Cluster Vpc - Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
- bootstrap_
servers str - The bootstrap servers of the cluster.
- vpc
Connector
Kafka Cluster Apache Kafka Cluster Vpc - Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
- bootstrap
Servers String - The bootstrap servers of the cluster.
- vpc Property Map
- Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
ConnectorKafkaClusterApacheKafkaClusterVpc, ConnectorKafkaClusterApacheKafkaClusterVpcArgs
- Security
Groups List<string> - The security groups for the connector.
- Subnets List<string>
- The subnets for the connector.
- Security
Groups []string - The security groups for the connector.
- Subnets []string
- The subnets for the connector.
- security
Groups List<String> - The security groups for the connector.
- subnets List<String>
- The subnets for the connector.
- security
Groups string[] - The security groups for the connector.
- subnets string[]
- The subnets for the connector.
- security_
groups Sequence[str] - The security groups for the connector.
- subnets Sequence[str]
- The subnets for the connector.
- security
Groups List<String> - The security groups for the connector.
- subnets List<String>
- The subnets for the connector.
ConnectorKafkaClusterClientAuthentication, ConnectorKafkaClusterClientAuthenticationArgs
- Authentication
Type string - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
- Authentication
Type string - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
- authentication
Type String - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
- authentication
Type string - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
- authentication_
type str - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
- authentication
Type String - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
ConnectorKafkaClusterEncryptionInTransit, ConnectorKafkaClusterEncryptionInTransitArgs
- Encryption
Type string - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
- Encryption
Type string - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
- encryption
Type String - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
- encryption
Type string - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
- encryption_
type str - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
- encryption
Type String - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
ConnectorLogDelivery, ConnectorLogDeliveryArgs
- Worker
Log ConnectorDelivery Log Delivery Worker Log Delivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below.
- Worker
Log ConnectorDelivery Log Delivery Worker Log Delivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below.
- worker
Log ConnectorDelivery Log Delivery Worker Log Delivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below.
- worker
Log ConnectorDelivery Log Delivery Worker Log Delivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below.
- worker_
log_ Connectordelivery Log Delivery Worker Log Delivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below.
- worker
Log Property MapDelivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below.
ConnectorLogDeliveryWorkerLogDelivery, ConnectorLogDeliveryWorkerLogDeliveryArgs
- Cloudwatch
Logs ConnectorLog Delivery Worker Log Delivery Cloudwatch Logs - Details about delivering logs to Amazon CloudWatch Logs. See below.
- Firehose
Connector
Log Delivery Worker Log Delivery Firehose - Details about delivering logs to Amazon Kinesis Data Firehose. See below.
- S3
Connector
Log Delivery Worker Log Delivery S3 - Details about delivering logs to Amazon S3. See below.
- Cloudwatch
Logs ConnectorLog Delivery Worker Log Delivery Cloudwatch Logs - Details about delivering logs to Amazon CloudWatch Logs. See below.
- Firehose
Connector
Log Delivery Worker Log Delivery Firehose - Details about delivering logs to Amazon Kinesis Data Firehose. See below.
- S3
Connector
Log Delivery Worker Log Delivery S3 - Details about delivering logs to Amazon S3. See below.
- cloudwatch
Logs ConnectorLog Delivery Worker Log Delivery Cloudwatch Logs - Details about delivering logs to Amazon CloudWatch Logs. See below.
- firehose
Connector
Log Delivery Worker Log Delivery Firehose - Details about delivering logs to Amazon Kinesis Data Firehose. See below.
- s3
Connector
Log Delivery Worker Log Delivery S3 - Details about delivering logs to Amazon S3. See below.
- cloudwatch
Logs ConnectorLog Delivery Worker Log Delivery Cloudwatch Logs - Details about delivering logs to Amazon CloudWatch Logs. See below.
- firehose
Connector
Log Delivery Worker Log Delivery Firehose - Details about delivering logs to Amazon Kinesis Data Firehose. See below.
- s3
Connector
Log Delivery Worker Log Delivery S3 - Details about delivering logs to Amazon S3. See below.
- cloudwatch_
logs ConnectorLog Delivery Worker Log Delivery Cloudwatch Logs - Details about delivering logs to Amazon CloudWatch Logs. See below.
- firehose
Connector
Log Delivery Worker Log Delivery Firehose - Details about delivering logs to Amazon Kinesis Data Firehose. See below.
- s3
Connector
Log Delivery Worker Log Delivery S3 - Details about delivering logs to Amazon S3. See below.
- cloudwatch
Logs Property Map - Details about delivering logs to Amazon CloudWatch Logs. See below.
- firehose Property Map
- Details about delivering logs to Amazon Kinesis Data Firehose. See below.
- s3 Property Map
- Details about delivering logs to Amazon S3. See below.
ConnectorLogDeliveryWorkerLogDeliveryCloudwatchLogs, ConnectorLogDeliveryWorkerLogDeliveryCloudwatchLogsArgs
ConnectorLogDeliveryWorkerLogDeliveryFirehose, ConnectorLogDeliveryWorkerLogDeliveryFirehoseArgs
- Enabled bool
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- Delivery
Stream string - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
- Enabled bool
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- Delivery
Stream string - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
- enabled Boolean
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- delivery
Stream String - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
- enabled boolean
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- delivery
Stream string - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
- enabled bool
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- delivery_
stream str - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
- enabled Boolean
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- delivery
Stream String - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
ConnectorLogDeliveryWorkerLogDeliveryS3, ConnectorLogDeliveryWorkerLogDeliveryS3Args
ConnectorPlugin, ConnectorPluginArgs
- Custom
Plugin ConnectorPlugin Custom Plugin - Details about a custom plugin. See below.
- Custom
Plugin ConnectorPlugin Custom Plugin - Details about a custom plugin. See below.
- custom
Plugin ConnectorPlugin Custom Plugin - Details about a custom plugin. See below.
- custom
Plugin ConnectorPlugin Custom Plugin - Details about a custom plugin. See below.
- custom_
plugin ConnectorPlugin Custom Plugin - Details about a custom plugin. See below.
- custom
Plugin Property Map - Details about a custom plugin. See below.
ConnectorPluginCustomPlugin, ConnectorPluginCustomPluginArgs
ConnectorWorkerConfiguration, ConnectorWorkerConfigurationArgs
Import
Using pulumi import
, import MSK Connect Connector using the connector’s arn
. For example:
$ pulumi import aws:mskconnect/connector:Connector example 'arn:aws:kafkaconnect:eu-central-1:123456789012:connector/example/264edee4-17a3-412e-bd76-6681cfc93805-3'
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aws
Terraform Provider.
Try AWS Native preview for resources not in the classic version.