Try AWS Native preview for resources not in the classic version.
aws.dms.Endpoint
Explore with Pulumi AI
Try AWS Native preview for resources not in the classic version.
Provides a DMS (Data Migration Service) endpoint resource. DMS endpoints can be created, updated, deleted, and imported.
Note: All arguments including the password will be stored in the raw state as plain-text. > Note: The
s3_settings
argument is deprecated, may not be maintained, and will be removed in a future version. Use theaws.dms.S3Endpoint
resource instead.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
// Create a new endpoint
const test = new aws.dms.Endpoint("test", {
certificateArn: "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012",
databaseName: "test",
endpointId: "test-dms-endpoint-tf",
endpointType: "source",
engineName: "aurora",
extraConnectionAttributes: "",
kmsKeyArn: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012",
password: "test",
port: 3306,
serverName: "test",
sslMode: "none",
tags: {
Name: "test",
},
username: "test",
});
import pulumi
import pulumi_aws as aws
# Create a new endpoint
test = aws.dms.Endpoint("test",
certificate_arn="arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012",
database_name="test",
endpoint_id="test-dms-endpoint-tf",
endpoint_type="source",
engine_name="aurora",
extra_connection_attributes="",
kms_key_arn="arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012",
password="test",
port=3306,
server_name="test",
ssl_mode="none",
tags={
"Name": "test",
},
username="test")
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/dms"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
// Create a new endpoint
_, err := dms.NewEndpoint(ctx, "test", &dms.EndpointArgs{
CertificateArn: pulumi.String("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"),
DatabaseName: pulumi.String("test"),
EndpointId: pulumi.String("test-dms-endpoint-tf"),
EndpointType: pulumi.String("source"),
EngineName: pulumi.String("aurora"),
ExtraConnectionAttributes: pulumi.String(""),
KmsKeyArn: pulumi.String("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"),
Password: pulumi.String("test"),
Port: pulumi.Int(3306),
ServerName: pulumi.String("test"),
SslMode: pulumi.String("none"),
Tags: pulumi.StringMap{
"Name": pulumi.String("test"),
},
Username: pulumi.String("test"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
// Create a new endpoint
var test = new Aws.Dms.Endpoint("test", new()
{
CertificateArn = "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012",
DatabaseName = "test",
EndpointId = "test-dms-endpoint-tf",
EndpointType = "source",
EngineName = "aurora",
ExtraConnectionAttributes = "",
KmsKeyArn = "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012",
Password = "test",
Port = 3306,
ServerName = "test",
SslMode = "none",
Tags =
{
{ "Name", "test" },
},
Username = "test",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.dms.Endpoint;
import com.pulumi.aws.dms.EndpointArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
// Create a new endpoint
var test = new Endpoint("test", EndpointArgs.builder()
.certificateArn("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012")
.databaseName("test")
.endpointId("test-dms-endpoint-tf")
.endpointType("source")
.engineName("aurora")
.extraConnectionAttributes("")
.kmsKeyArn("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012")
.password("test")
.port(3306)
.serverName("test")
.sslMode("none")
.tags(Map.of("Name", "test"))
.username("test")
.build());
}
}
resources:
# Create a new endpoint
test:
type: aws:dms:Endpoint
properties:
certificateArn: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012
databaseName: test
endpointId: test-dms-endpoint-tf
endpointType: source
engineName: aurora
extraConnectionAttributes:
kmsKeyArn: arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
password: test
port: 3306
serverName: test
sslMode: none
tags:
Name: test
username: test
Create Endpoint Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Endpoint(name: string, args: EndpointArgs, opts?: CustomResourceOptions);
@overload
def Endpoint(resource_name: str,
args: EndpointArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Endpoint(resource_name: str,
opts: Optional[ResourceOptions] = None,
endpoint_id: Optional[str] = None,
engine_name: Optional[str] = None,
endpoint_type: Optional[str] = None,
pause_replication_tasks: Optional[bool] = None,
postgres_settings: Optional[EndpointPostgresSettingsArgs] = None,
database_name: Optional[str] = None,
extra_connection_attributes: Optional[str] = None,
kafka_settings: Optional[EndpointKafkaSettingsArgs] = None,
kinesis_settings: Optional[EndpointKinesisSettingsArgs] = None,
kms_key_arn: Optional[str] = None,
mongodb_settings: Optional[EndpointMongodbSettingsArgs] = None,
password: Optional[str] = None,
certificate_arn: Optional[str] = None,
port: Optional[int] = None,
elasticsearch_settings: Optional[EndpointElasticsearchSettingsArgs] = None,
redis_settings: Optional[EndpointRedisSettingsArgs] = None,
redshift_settings: Optional[EndpointRedshiftSettingsArgs] = None,
s3_settings: Optional[EndpointS3SettingsArgs] = None,
secrets_manager_access_role_arn: Optional[str] = None,
secrets_manager_arn: Optional[str] = None,
server_name: Optional[str] = None,
service_access_role: Optional[str] = None,
ssl_mode: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
username: Optional[str] = None)
func NewEndpoint(ctx *Context, name string, args EndpointArgs, opts ...ResourceOption) (*Endpoint, error)
public Endpoint(string name, EndpointArgs args, CustomResourceOptions? opts = null)
public Endpoint(String name, EndpointArgs args)
public Endpoint(String name, EndpointArgs args, CustomResourceOptions options)
type: aws:dms:Endpoint
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args EndpointArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args EndpointArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args EndpointArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args EndpointArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args EndpointArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var endpointResource = new Aws.Dms.Endpoint("endpointResource", new()
{
EndpointId = "string",
EngineName = "string",
EndpointType = "string",
PauseReplicationTasks = false,
PostgresSettings = new Aws.Dms.Inputs.EndpointPostgresSettingsArgs
{
AfterConnectScript = "string",
BabelfishDatabaseName = "string",
CaptureDdls = false,
DatabaseMode = "string",
DdlArtifactsSchema = "string",
ExecuteTimeout = 0,
FailTasksOnLobTruncation = false,
HeartbeatEnable = false,
HeartbeatFrequency = 0,
HeartbeatSchema = "string",
MapBooleanAsBoolean = false,
MapJsonbAsClob = false,
MapLongVarcharAs = "string",
MaxFileSize = 0,
PluginName = "string",
SlotName = "string",
},
DatabaseName = "string",
ExtraConnectionAttributes = "string",
KafkaSettings = new Aws.Dms.Inputs.EndpointKafkaSettingsArgs
{
Broker = "string",
IncludeControlDetails = false,
IncludeNullAndEmpty = false,
IncludePartitionValue = false,
IncludeTableAlterOperations = false,
IncludeTransactionDetails = false,
MessageFormat = "string",
MessageMaxBytes = 0,
NoHexPrefix = false,
PartitionIncludeSchemaTable = false,
SaslPassword = "string",
SaslUsername = "string",
SecurityProtocol = "string",
SslCaCertificateArn = "string",
SslClientCertificateArn = "string",
SslClientKeyArn = "string",
SslClientKeyPassword = "string",
Topic = "string",
},
KinesisSettings = new Aws.Dms.Inputs.EndpointKinesisSettingsArgs
{
IncludeControlDetails = false,
IncludeNullAndEmpty = false,
IncludePartitionValue = false,
IncludeTableAlterOperations = false,
IncludeTransactionDetails = false,
MessageFormat = "string",
PartitionIncludeSchemaTable = false,
ServiceAccessRoleArn = "string",
StreamArn = "string",
},
KmsKeyArn = "string",
MongodbSettings = new Aws.Dms.Inputs.EndpointMongodbSettingsArgs
{
AuthMechanism = "string",
AuthSource = "string",
AuthType = "string",
DocsToInvestigate = "string",
ExtractDocId = "string",
NestingLevel = "string",
},
Password = "string",
CertificateArn = "string",
Port = 0,
ElasticsearchSettings = new Aws.Dms.Inputs.EndpointElasticsearchSettingsArgs
{
EndpointUri = "string",
ServiceAccessRoleArn = "string",
ErrorRetryDuration = 0,
FullLoadErrorPercentage = 0,
UseNewMappingType = false,
},
RedisSettings = new Aws.Dms.Inputs.EndpointRedisSettingsArgs
{
AuthType = "string",
Port = 0,
ServerName = "string",
AuthPassword = "string",
AuthUserName = "string",
SslCaCertificateArn = "string",
SslSecurityProtocol = "string",
},
RedshiftSettings = new Aws.Dms.Inputs.EndpointRedshiftSettingsArgs
{
BucketFolder = "string",
BucketName = "string",
EncryptionMode = "string",
ServerSideEncryptionKmsKeyId = "string",
ServiceAccessRoleArn = "string",
},
S3Settings = new Aws.Dms.Inputs.EndpointS3SettingsArgs
{
AddColumnName = false,
BucketFolder = "string",
BucketName = "string",
CannedAclForObjects = "string",
CdcInsertsAndUpdates = false,
CdcInsertsOnly = false,
CdcMaxBatchInterval = 0,
CdcMinFileSize = 0,
CdcPath = "string",
CompressionType = "string",
CsvDelimiter = "string",
CsvNoSupValue = "string",
CsvNullValue = "string",
CsvRowDelimiter = "string",
DataFormat = "string",
DataPageSize = 0,
DatePartitionDelimiter = "string",
DatePartitionEnabled = false,
DatePartitionSequence = "string",
DictPageSizeLimit = 0,
EnableStatistics = false,
EncodingType = "string",
EncryptionMode = "string",
ExternalTableDefinition = "string",
GlueCatalogGeneration = false,
IgnoreHeaderRows = 0,
IncludeOpForFullLoad = false,
MaxFileSize = 0,
ParquetTimestampInMillisecond = false,
ParquetVersion = "string",
PreserveTransactions = false,
Rfc4180 = false,
RowGroupLength = 0,
ServerSideEncryptionKmsKeyId = "string",
ServiceAccessRoleArn = "string",
TimestampColumnName = "string",
UseCsvNoSupValue = false,
UseTaskStartTimeForFullLoadTimestamp = false,
},
SecretsManagerAccessRoleArn = "string",
SecretsManagerArn = "string",
ServerName = "string",
ServiceAccessRole = "string",
SslMode = "string",
Tags =
{
{ "string", "string" },
},
Username = "string",
});
example, err := dms.NewEndpoint(ctx, "endpointResource", &dms.EndpointArgs{
EndpointId: pulumi.String("string"),
EngineName: pulumi.String("string"),
EndpointType: pulumi.String("string"),
PauseReplicationTasks: pulumi.Bool(false),
PostgresSettings: &dms.EndpointPostgresSettingsArgs{
AfterConnectScript: pulumi.String("string"),
BabelfishDatabaseName: pulumi.String("string"),
CaptureDdls: pulumi.Bool(false),
DatabaseMode: pulumi.String("string"),
DdlArtifactsSchema: pulumi.String("string"),
ExecuteTimeout: pulumi.Int(0),
FailTasksOnLobTruncation: pulumi.Bool(false),
HeartbeatEnable: pulumi.Bool(false),
HeartbeatFrequency: pulumi.Int(0),
HeartbeatSchema: pulumi.String("string"),
MapBooleanAsBoolean: pulumi.Bool(false),
MapJsonbAsClob: pulumi.Bool(false),
MapLongVarcharAs: pulumi.String("string"),
MaxFileSize: pulumi.Int(0),
PluginName: pulumi.String("string"),
SlotName: pulumi.String("string"),
},
DatabaseName: pulumi.String("string"),
ExtraConnectionAttributes: pulumi.String("string"),
KafkaSettings: &dms.EndpointKafkaSettingsArgs{
Broker: pulumi.String("string"),
IncludeControlDetails: pulumi.Bool(false),
IncludeNullAndEmpty: pulumi.Bool(false),
IncludePartitionValue: pulumi.Bool(false),
IncludeTableAlterOperations: pulumi.Bool(false),
IncludeTransactionDetails: pulumi.Bool(false),
MessageFormat: pulumi.String("string"),
MessageMaxBytes: pulumi.Int(0),
NoHexPrefix: pulumi.Bool(false),
PartitionIncludeSchemaTable: pulumi.Bool(false),
SaslPassword: pulumi.String("string"),
SaslUsername: pulumi.String("string"),
SecurityProtocol: pulumi.String("string"),
SslCaCertificateArn: pulumi.String("string"),
SslClientCertificateArn: pulumi.String("string"),
SslClientKeyArn: pulumi.String("string"),
SslClientKeyPassword: pulumi.String("string"),
Topic: pulumi.String("string"),
},
KinesisSettings: &dms.EndpointKinesisSettingsArgs{
IncludeControlDetails: pulumi.Bool(false),
IncludeNullAndEmpty: pulumi.Bool(false),
IncludePartitionValue: pulumi.Bool(false),
IncludeTableAlterOperations: pulumi.Bool(false),
IncludeTransactionDetails: pulumi.Bool(false),
MessageFormat: pulumi.String("string"),
PartitionIncludeSchemaTable: pulumi.Bool(false),
ServiceAccessRoleArn: pulumi.String("string"),
StreamArn: pulumi.String("string"),
},
KmsKeyArn: pulumi.String("string"),
MongodbSettings: &dms.EndpointMongodbSettingsArgs{
AuthMechanism: pulumi.String("string"),
AuthSource: pulumi.String("string"),
AuthType: pulumi.String("string"),
DocsToInvestigate: pulumi.String("string"),
ExtractDocId: pulumi.String("string"),
NestingLevel: pulumi.String("string"),
},
Password: pulumi.String("string"),
CertificateArn: pulumi.String("string"),
Port: pulumi.Int(0),
ElasticsearchSettings: &dms.EndpointElasticsearchSettingsArgs{
EndpointUri: pulumi.String("string"),
ServiceAccessRoleArn: pulumi.String("string"),
ErrorRetryDuration: pulumi.Int(0),
FullLoadErrorPercentage: pulumi.Int(0),
UseNewMappingType: pulumi.Bool(false),
},
RedisSettings: &dms.EndpointRedisSettingsArgs{
AuthType: pulumi.String("string"),
Port: pulumi.Int(0),
ServerName: pulumi.String("string"),
AuthPassword: pulumi.String("string"),
AuthUserName: pulumi.String("string"),
SslCaCertificateArn: pulumi.String("string"),
SslSecurityProtocol: pulumi.String("string"),
},
RedshiftSettings: &dms.EndpointRedshiftSettingsArgs{
BucketFolder: pulumi.String("string"),
BucketName: pulumi.String("string"),
EncryptionMode: pulumi.String("string"),
ServerSideEncryptionKmsKeyId: pulumi.String("string"),
ServiceAccessRoleArn: pulumi.String("string"),
},
S3Settings: &dms.EndpointS3SettingsArgs{
AddColumnName: pulumi.Bool(false),
BucketFolder: pulumi.String("string"),
BucketName: pulumi.String("string"),
CannedAclForObjects: pulumi.String("string"),
CdcInsertsAndUpdates: pulumi.Bool(false),
CdcInsertsOnly: pulumi.Bool(false),
CdcMaxBatchInterval: pulumi.Int(0),
CdcMinFileSize: pulumi.Int(0),
CdcPath: pulumi.String("string"),
CompressionType: pulumi.String("string"),
CsvDelimiter: pulumi.String("string"),
CsvNoSupValue: pulumi.String("string"),
CsvNullValue: pulumi.String("string"),
CsvRowDelimiter: pulumi.String("string"),
DataFormat: pulumi.String("string"),
DataPageSize: pulumi.Int(0),
DatePartitionDelimiter: pulumi.String("string"),
DatePartitionEnabled: pulumi.Bool(false),
DatePartitionSequence: pulumi.String("string"),
DictPageSizeLimit: pulumi.Int(0),
EnableStatistics: pulumi.Bool(false),
EncodingType: pulumi.String("string"),
EncryptionMode: pulumi.String("string"),
ExternalTableDefinition: pulumi.String("string"),
GlueCatalogGeneration: pulumi.Bool(false),
IgnoreHeaderRows: pulumi.Int(0),
IncludeOpForFullLoad: pulumi.Bool(false),
MaxFileSize: pulumi.Int(0),
ParquetTimestampInMillisecond: pulumi.Bool(false),
ParquetVersion: pulumi.String("string"),
PreserveTransactions: pulumi.Bool(false),
Rfc4180: pulumi.Bool(false),
RowGroupLength: pulumi.Int(0),
ServerSideEncryptionKmsKeyId: pulumi.String("string"),
ServiceAccessRoleArn: pulumi.String("string"),
TimestampColumnName: pulumi.String("string"),
UseCsvNoSupValue: pulumi.Bool(false),
UseTaskStartTimeForFullLoadTimestamp: pulumi.Bool(false),
},
SecretsManagerAccessRoleArn: pulumi.String("string"),
SecretsManagerArn: pulumi.String("string"),
ServerName: pulumi.String("string"),
ServiceAccessRole: pulumi.String("string"),
SslMode: pulumi.String("string"),
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
Username: pulumi.String("string"),
})
var endpointResource = new Endpoint("endpointResource", EndpointArgs.builder()
.endpointId("string")
.engineName("string")
.endpointType("string")
.pauseReplicationTasks(false)
.postgresSettings(EndpointPostgresSettingsArgs.builder()
.afterConnectScript("string")
.babelfishDatabaseName("string")
.captureDdls(false)
.databaseMode("string")
.ddlArtifactsSchema("string")
.executeTimeout(0)
.failTasksOnLobTruncation(false)
.heartbeatEnable(false)
.heartbeatFrequency(0)
.heartbeatSchema("string")
.mapBooleanAsBoolean(false)
.mapJsonbAsClob(false)
.mapLongVarcharAs("string")
.maxFileSize(0)
.pluginName("string")
.slotName("string")
.build())
.databaseName("string")
.extraConnectionAttributes("string")
.kafkaSettings(EndpointKafkaSettingsArgs.builder()
.broker("string")
.includeControlDetails(false)
.includeNullAndEmpty(false)
.includePartitionValue(false)
.includeTableAlterOperations(false)
.includeTransactionDetails(false)
.messageFormat("string")
.messageMaxBytes(0)
.noHexPrefix(false)
.partitionIncludeSchemaTable(false)
.saslPassword("string")
.saslUsername("string")
.securityProtocol("string")
.sslCaCertificateArn("string")
.sslClientCertificateArn("string")
.sslClientKeyArn("string")
.sslClientKeyPassword("string")
.topic("string")
.build())
.kinesisSettings(EndpointKinesisSettingsArgs.builder()
.includeControlDetails(false)
.includeNullAndEmpty(false)
.includePartitionValue(false)
.includeTableAlterOperations(false)
.includeTransactionDetails(false)
.messageFormat("string")
.partitionIncludeSchemaTable(false)
.serviceAccessRoleArn("string")
.streamArn("string")
.build())
.kmsKeyArn("string")
.mongodbSettings(EndpointMongodbSettingsArgs.builder()
.authMechanism("string")
.authSource("string")
.authType("string")
.docsToInvestigate("string")
.extractDocId("string")
.nestingLevel("string")
.build())
.password("string")
.certificateArn("string")
.port(0)
.elasticsearchSettings(EndpointElasticsearchSettingsArgs.builder()
.endpointUri("string")
.serviceAccessRoleArn("string")
.errorRetryDuration(0)
.fullLoadErrorPercentage(0)
.useNewMappingType(false)
.build())
.redisSettings(EndpointRedisSettingsArgs.builder()
.authType("string")
.port(0)
.serverName("string")
.authPassword("string")
.authUserName("string")
.sslCaCertificateArn("string")
.sslSecurityProtocol("string")
.build())
.redshiftSettings(EndpointRedshiftSettingsArgs.builder()
.bucketFolder("string")
.bucketName("string")
.encryptionMode("string")
.serverSideEncryptionKmsKeyId("string")
.serviceAccessRoleArn("string")
.build())
.s3Settings(EndpointS3SettingsArgs.builder()
.addColumnName(false)
.bucketFolder("string")
.bucketName("string")
.cannedAclForObjects("string")
.cdcInsertsAndUpdates(false)
.cdcInsertsOnly(false)
.cdcMaxBatchInterval(0)
.cdcMinFileSize(0)
.cdcPath("string")
.compressionType("string")
.csvDelimiter("string")
.csvNoSupValue("string")
.csvNullValue("string")
.csvRowDelimiter("string")
.dataFormat("string")
.dataPageSize(0)
.datePartitionDelimiter("string")
.datePartitionEnabled(false)
.datePartitionSequence("string")
.dictPageSizeLimit(0)
.enableStatistics(false)
.encodingType("string")
.encryptionMode("string")
.externalTableDefinition("string")
.glueCatalogGeneration(false)
.ignoreHeaderRows(0)
.includeOpForFullLoad(false)
.maxFileSize(0)
.parquetTimestampInMillisecond(false)
.parquetVersion("string")
.preserveTransactions(false)
.rfc4180(false)
.rowGroupLength(0)
.serverSideEncryptionKmsKeyId("string")
.serviceAccessRoleArn("string")
.timestampColumnName("string")
.useCsvNoSupValue(false)
.useTaskStartTimeForFullLoadTimestamp(false)
.build())
.secretsManagerAccessRoleArn("string")
.secretsManagerArn("string")
.serverName("string")
.serviceAccessRole("string")
.sslMode("string")
.tags(Map.of("string", "string"))
.username("string")
.build());
endpoint_resource = aws.dms.Endpoint("endpointResource",
endpoint_id="string",
engine_name="string",
endpoint_type="string",
pause_replication_tasks=False,
postgres_settings={
"afterConnectScript": "string",
"babelfishDatabaseName": "string",
"captureDdls": False,
"databaseMode": "string",
"ddlArtifactsSchema": "string",
"executeTimeout": 0,
"failTasksOnLobTruncation": False,
"heartbeatEnable": False,
"heartbeatFrequency": 0,
"heartbeatSchema": "string",
"mapBooleanAsBoolean": False,
"mapJsonbAsClob": False,
"mapLongVarcharAs": "string",
"maxFileSize": 0,
"pluginName": "string",
"slotName": "string",
},
database_name="string",
extra_connection_attributes="string",
kafka_settings={
"broker": "string",
"includeControlDetails": False,
"includeNullAndEmpty": False,
"includePartitionValue": False,
"includeTableAlterOperations": False,
"includeTransactionDetails": False,
"messageFormat": "string",
"messageMaxBytes": 0,
"noHexPrefix": False,
"partitionIncludeSchemaTable": False,
"saslPassword": "string",
"saslUsername": "string",
"securityProtocol": "string",
"sslCaCertificateArn": "string",
"sslClientCertificateArn": "string",
"sslClientKeyArn": "string",
"sslClientKeyPassword": "string",
"topic": "string",
},
kinesis_settings={
"includeControlDetails": False,
"includeNullAndEmpty": False,
"includePartitionValue": False,
"includeTableAlterOperations": False,
"includeTransactionDetails": False,
"messageFormat": "string",
"partitionIncludeSchemaTable": False,
"serviceAccessRoleArn": "string",
"streamArn": "string",
},
kms_key_arn="string",
mongodb_settings={
"authMechanism": "string",
"authSource": "string",
"authType": "string",
"docsToInvestigate": "string",
"extractDocId": "string",
"nestingLevel": "string",
},
password="string",
certificate_arn="string",
port=0,
elasticsearch_settings={
"endpointUri": "string",
"serviceAccessRoleArn": "string",
"errorRetryDuration": 0,
"fullLoadErrorPercentage": 0,
"useNewMappingType": False,
},
redis_settings={
"authType": "string",
"port": 0,
"serverName": "string",
"authPassword": "string",
"authUserName": "string",
"sslCaCertificateArn": "string",
"sslSecurityProtocol": "string",
},
redshift_settings={
"bucketFolder": "string",
"bucketName": "string",
"encryptionMode": "string",
"serverSideEncryptionKmsKeyId": "string",
"serviceAccessRoleArn": "string",
},
s3_settings={
"addColumnName": False,
"bucketFolder": "string",
"bucketName": "string",
"cannedAclForObjects": "string",
"cdcInsertsAndUpdates": False,
"cdcInsertsOnly": False,
"cdcMaxBatchInterval": 0,
"cdcMinFileSize": 0,
"cdcPath": "string",
"compressionType": "string",
"csvDelimiter": "string",
"csvNoSupValue": "string",
"csvNullValue": "string",
"csvRowDelimiter": "string",
"dataFormat": "string",
"dataPageSize": 0,
"datePartitionDelimiter": "string",
"datePartitionEnabled": False,
"datePartitionSequence": "string",
"dictPageSizeLimit": 0,
"enableStatistics": False,
"encodingType": "string",
"encryptionMode": "string",
"externalTableDefinition": "string",
"glueCatalogGeneration": False,
"ignoreHeaderRows": 0,
"includeOpForFullLoad": False,
"maxFileSize": 0,
"parquetTimestampInMillisecond": False,
"parquetVersion": "string",
"preserveTransactions": False,
"rfc4180": False,
"rowGroupLength": 0,
"serverSideEncryptionKmsKeyId": "string",
"serviceAccessRoleArn": "string",
"timestampColumnName": "string",
"useCsvNoSupValue": False,
"useTaskStartTimeForFullLoadTimestamp": False,
},
secrets_manager_access_role_arn="string",
secrets_manager_arn="string",
server_name="string",
service_access_role="string",
ssl_mode="string",
tags={
"string": "string",
},
username="string")
const endpointResource = new aws.dms.Endpoint("endpointResource", {
endpointId: "string",
engineName: "string",
endpointType: "string",
pauseReplicationTasks: false,
postgresSettings: {
afterConnectScript: "string",
babelfishDatabaseName: "string",
captureDdls: false,
databaseMode: "string",
ddlArtifactsSchema: "string",
executeTimeout: 0,
failTasksOnLobTruncation: false,
heartbeatEnable: false,
heartbeatFrequency: 0,
heartbeatSchema: "string",
mapBooleanAsBoolean: false,
mapJsonbAsClob: false,
mapLongVarcharAs: "string",
maxFileSize: 0,
pluginName: "string",
slotName: "string",
},
databaseName: "string",
extraConnectionAttributes: "string",
kafkaSettings: {
broker: "string",
includeControlDetails: false,
includeNullAndEmpty: false,
includePartitionValue: false,
includeTableAlterOperations: false,
includeTransactionDetails: false,
messageFormat: "string",
messageMaxBytes: 0,
noHexPrefix: false,
partitionIncludeSchemaTable: false,
saslPassword: "string",
saslUsername: "string",
securityProtocol: "string",
sslCaCertificateArn: "string",
sslClientCertificateArn: "string",
sslClientKeyArn: "string",
sslClientKeyPassword: "string",
topic: "string",
},
kinesisSettings: {
includeControlDetails: false,
includeNullAndEmpty: false,
includePartitionValue: false,
includeTableAlterOperations: false,
includeTransactionDetails: false,
messageFormat: "string",
partitionIncludeSchemaTable: false,
serviceAccessRoleArn: "string",
streamArn: "string",
},
kmsKeyArn: "string",
mongodbSettings: {
authMechanism: "string",
authSource: "string",
authType: "string",
docsToInvestigate: "string",
extractDocId: "string",
nestingLevel: "string",
},
password: "string",
certificateArn: "string",
port: 0,
elasticsearchSettings: {
endpointUri: "string",
serviceAccessRoleArn: "string",
errorRetryDuration: 0,
fullLoadErrorPercentage: 0,
useNewMappingType: false,
},
redisSettings: {
authType: "string",
port: 0,
serverName: "string",
authPassword: "string",
authUserName: "string",
sslCaCertificateArn: "string",
sslSecurityProtocol: "string",
},
redshiftSettings: {
bucketFolder: "string",
bucketName: "string",
encryptionMode: "string",
serverSideEncryptionKmsKeyId: "string",
serviceAccessRoleArn: "string",
},
s3Settings: {
addColumnName: false,
bucketFolder: "string",
bucketName: "string",
cannedAclForObjects: "string",
cdcInsertsAndUpdates: false,
cdcInsertsOnly: false,
cdcMaxBatchInterval: 0,
cdcMinFileSize: 0,
cdcPath: "string",
compressionType: "string",
csvDelimiter: "string",
csvNoSupValue: "string",
csvNullValue: "string",
csvRowDelimiter: "string",
dataFormat: "string",
dataPageSize: 0,
datePartitionDelimiter: "string",
datePartitionEnabled: false,
datePartitionSequence: "string",
dictPageSizeLimit: 0,
enableStatistics: false,
encodingType: "string",
encryptionMode: "string",
externalTableDefinition: "string",
glueCatalogGeneration: false,
ignoreHeaderRows: 0,
includeOpForFullLoad: false,
maxFileSize: 0,
parquetTimestampInMillisecond: false,
parquetVersion: "string",
preserveTransactions: false,
rfc4180: false,
rowGroupLength: 0,
serverSideEncryptionKmsKeyId: "string",
serviceAccessRoleArn: "string",
timestampColumnName: "string",
useCsvNoSupValue: false,
useTaskStartTimeForFullLoadTimestamp: false,
},
secretsManagerAccessRoleArn: "string",
secretsManagerArn: "string",
serverName: "string",
serviceAccessRole: "string",
sslMode: "string",
tags: {
string: "string",
},
username: "string",
});
type: aws:dms:Endpoint
properties:
certificateArn: string
databaseName: string
elasticsearchSettings:
endpointUri: string
errorRetryDuration: 0
fullLoadErrorPercentage: 0
serviceAccessRoleArn: string
useNewMappingType: false
endpointId: string
endpointType: string
engineName: string
extraConnectionAttributes: string
kafkaSettings:
broker: string
includeControlDetails: false
includeNullAndEmpty: false
includePartitionValue: false
includeTableAlterOperations: false
includeTransactionDetails: false
messageFormat: string
messageMaxBytes: 0
noHexPrefix: false
partitionIncludeSchemaTable: false
saslPassword: string
saslUsername: string
securityProtocol: string
sslCaCertificateArn: string
sslClientCertificateArn: string
sslClientKeyArn: string
sslClientKeyPassword: string
topic: string
kinesisSettings:
includeControlDetails: false
includeNullAndEmpty: false
includePartitionValue: false
includeTableAlterOperations: false
includeTransactionDetails: false
messageFormat: string
partitionIncludeSchemaTable: false
serviceAccessRoleArn: string
streamArn: string
kmsKeyArn: string
mongodbSettings:
authMechanism: string
authSource: string
authType: string
docsToInvestigate: string
extractDocId: string
nestingLevel: string
password: string
pauseReplicationTasks: false
port: 0
postgresSettings:
afterConnectScript: string
babelfishDatabaseName: string
captureDdls: false
databaseMode: string
ddlArtifactsSchema: string
executeTimeout: 0
failTasksOnLobTruncation: false
heartbeatEnable: false
heartbeatFrequency: 0
heartbeatSchema: string
mapBooleanAsBoolean: false
mapJsonbAsClob: false
mapLongVarcharAs: string
maxFileSize: 0
pluginName: string
slotName: string
redisSettings:
authPassword: string
authType: string
authUserName: string
port: 0
serverName: string
sslCaCertificateArn: string
sslSecurityProtocol: string
redshiftSettings:
bucketFolder: string
bucketName: string
encryptionMode: string
serverSideEncryptionKmsKeyId: string
serviceAccessRoleArn: string
s3Settings:
addColumnName: false
bucketFolder: string
bucketName: string
cannedAclForObjects: string
cdcInsertsAndUpdates: false
cdcInsertsOnly: false
cdcMaxBatchInterval: 0
cdcMinFileSize: 0
cdcPath: string
compressionType: string
csvDelimiter: string
csvNoSupValue: string
csvNullValue: string
csvRowDelimiter: string
dataFormat: string
dataPageSize: 0
datePartitionDelimiter: string
datePartitionEnabled: false
datePartitionSequence: string
dictPageSizeLimit: 0
enableStatistics: false
encodingType: string
encryptionMode: string
externalTableDefinition: string
glueCatalogGeneration: false
ignoreHeaderRows: 0
includeOpForFullLoad: false
maxFileSize: 0
parquetTimestampInMillisecond: false
parquetVersion: string
preserveTransactions: false
rfc4180: false
rowGroupLength: 0
serverSideEncryptionKmsKeyId: string
serviceAccessRoleArn: string
timestampColumnName: string
useCsvNoSupValue: false
useTaskStartTimeForFullLoadTimestamp: false
secretsManagerAccessRoleArn: string
secretsManagerArn: string
serverName: string
serviceAccessRole: string
sslMode: string
tags:
string: string
username: string
Endpoint Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Endpoint resource accepts the following input properties:
- Endpoint
Id string - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- Endpoint
Type string - Type of endpoint. Valid values are
source
,target
. - Engine
Name string - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - Certificate
Arn string - ARN for the certificate.
- Database
Name string - Name of the endpoint database.
- Elasticsearch
Settings EndpointElasticsearch Settings - Configuration block for OpenSearch settings. See below.
- Extra
Connection stringAttributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - Kafka
Settings EndpointKafka Settings - Configuration block for Kafka settings. See below.
- Kinesis
Settings EndpointKinesis Settings - Configuration block for Kinesis settings. See below.
- Kms
Key stringArn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- Mongodb
Settings EndpointMongodb Settings - Configuration block for MongoDB settings. See below.
- Password string
- Password to be used to login to the endpoint database.
- Pause
Replication boolTasks - Port int
- Port used by the endpoint database.
- Postgres
Settings EndpointPostgres Settings - Configuration block for Postgres settings. See below.
- Redis
Settings EndpointRedis Settings - Redshift
Settings EndpointRedshift Settings - Configuration block for Redshift settings. See below.
- S3Settings
Endpoint
S3Settings - (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - Secrets
Manager stringAccess Role Arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- Secrets
Manager stringArn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - Server
Name string - Host name of the server.
- Service
Access stringRole - ARN used by the service access IAM role for dynamodb endpoints.
- Ssl
Mode string - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- Dictionary<string, string>
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Username string
- User name to be used to login to the endpoint database.
- Endpoint
Id string - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- Endpoint
Type string - Type of endpoint. Valid values are
source
,target
. - Engine
Name string - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - Certificate
Arn string - ARN for the certificate.
- Database
Name string - Name of the endpoint database.
- Elasticsearch
Settings EndpointElasticsearch Settings Args - Configuration block for OpenSearch settings. See below.
- Extra
Connection stringAttributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - Kafka
Settings EndpointKafka Settings Args - Configuration block for Kafka settings. See below.
- Kinesis
Settings EndpointKinesis Settings Args - Configuration block for Kinesis settings. See below.
- Kms
Key stringArn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- Mongodb
Settings EndpointMongodb Settings Args - Configuration block for MongoDB settings. See below.
- Password string
- Password to be used to login to the endpoint database.
- Pause
Replication boolTasks - Port int
- Port used by the endpoint database.
- Postgres
Settings EndpointPostgres Settings Args - Configuration block for Postgres settings. See below.
- Redis
Settings EndpointRedis Settings Args - Redshift
Settings EndpointRedshift Settings Args - Configuration block for Redshift settings. See below.
- S3Settings
Endpoint
S3Settings Args - (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - Secrets
Manager stringAccess Role Arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- Secrets
Manager stringArn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - Server
Name string - Host name of the server.
- Service
Access stringRole - ARN used by the service access IAM role for dynamodb endpoints.
- Ssl
Mode string - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- map[string]string
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Username string
- User name to be used to login to the endpoint database.
- endpoint
Id String - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- endpoint
Type String - Type of endpoint. Valid values are
source
,target
. - engine
Name String - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - certificate
Arn String - ARN for the certificate.
- database
Name String - Name of the endpoint database.
- elasticsearch
Settings EndpointElasticsearch Settings - Configuration block for OpenSearch settings. See below.
- extra
Connection StringAttributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - kafka
Settings EndpointKafka Settings - Configuration block for Kafka settings. See below.
- kinesis
Settings EndpointKinesis Settings - Configuration block for Kinesis settings. See below.
- kms
Key StringArn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- mongodb
Settings EndpointMongodb Settings - Configuration block for MongoDB settings. See below.
- password String
- Password to be used to login to the endpoint database.
- pause
Replication BooleanTasks - port Integer
- Port used by the endpoint database.
- postgres
Settings EndpointPostgres Settings - Configuration block for Postgres settings. See below.
- redis
Settings EndpointRedis Settings - redshift
Settings EndpointRedshift Settings - Configuration block for Redshift settings. See below.
- s3Settings
Endpoint
S3Settings - (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - secrets
Manager StringAccess Role Arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- secrets
Manager StringArn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - server
Name String - Host name of the server.
- service
Access StringRole - ARN used by the service access IAM role for dynamodb endpoints.
- ssl
Mode String - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- Map<String,String>
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - username String
- User name to be used to login to the endpoint database.
- endpoint
Id string - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- endpoint
Type string - Type of endpoint. Valid values are
source
,target
. - engine
Name string - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - certificate
Arn string - ARN for the certificate.
- database
Name string - Name of the endpoint database.
- elasticsearch
Settings EndpointElasticsearch Settings - Configuration block for OpenSearch settings. See below.
- extra
Connection stringAttributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - kafka
Settings EndpointKafka Settings - Configuration block for Kafka settings. See below.
- kinesis
Settings EndpointKinesis Settings - Configuration block for Kinesis settings. See below.
- kms
Key stringArn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- mongodb
Settings EndpointMongodb Settings - Configuration block for MongoDB settings. See below.
- password string
- Password to be used to login to the endpoint database.
- pause
Replication booleanTasks - port number
- Port used by the endpoint database.
- postgres
Settings EndpointPostgres Settings - Configuration block for Postgres settings. See below.
- redis
Settings EndpointRedis Settings - redshift
Settings EndpointRedshift Settings - Configuration block for Redshift settings. See below.
- s3Settings
Endpoint
S3Settings - (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - secrets
Manager stringAccess Role Arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- secrets
Manager stringArn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - server
Name string - Host name of the server.
- service
Access stringRole - ARN used by the service access IAM role for dynamodb endpoints.
- ssl
Mode string - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- {[key: string]: string}
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - username string
- User name to be used to login to the endpoint database.
- endpoint_
id str - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- endpoint_
type str - Type of endpoint. Valid values are
source
,target
. - engine_
name str - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - certificate_
arn str - ARN for the certificate.
- database_
name str - Name of the endpoint database.
- elasticsearch_
settings EndpointElasticsearch Settings Args - Configuration block for OpenSearch settings. See below.
- extra_
connection_ strattributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - kafka_
settings EndpointKafka Settings Args - Configuration block for Kafka settings. See below.
- kinesis_
settings EndpointKinesis Settings Args - Configuration block for Kinesis settings. See below.
- kms_
key_ strarn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- mongodb_
settings EndpointMongodb Settings Args - Configuration block for MongoDB settings. See below.
- password str
- Password to be used to login to the endpoint database.
- pause_
replication_ booltasks - port int
- Port used by the endpoint database.
- postgres_
settings EndpointPostgres Settings Args - Configuration block for Postgres settings. See below.
- redis_
settings EndpointRedis Settings Args - redshift_
settings EndpointRedshift Settings Args - Configuration block for Redshift settings. See below.
- s3_
settings EndpointS3Settings Args - (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - secrets_
manager_ straccess_ role_ arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- secrets_
manager_ strarn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - server_
name str - Host name of the server.
- service_
access_ strrole - ARN used by the service access IAM role for dynamodb endpoints.
- ssl_
mode str - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- Mapping[str, str]
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - username str
- User name to be used to login to the endpoint database.
- endpoint
Id String - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- endpoint
Type String - Type of endpoint. Valid values are
source
,target
. - engine
Name String - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - certificate
Arn String - ARN for the certificate.
- database
Name String - Name of the endpoint database.
- elasticsearch
Settings Property Map - Configuration block for OpenSearch settings. See below.
- extra
Connection StringAttributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - kafka
Settings Property Map - Configuration block for Kafka settings. See below.
- kinesis
Settings Property Map - Configuration block for Kinesis settings. See below.
- kms
Key StringArn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- mongodb
Settings Property Map - Configuration block for MongoDB settings. See below.
- password String
- Password to be used to login to the endpoint database.
- pause
Replication BooleanTasks - port Number
- Port used by the endpoint database.
- postgres
Settings Property Map - Configuration block for Postgres settings. See below.
- redis
Settings Property Map - redshift
Settings Property Map - Configuration block for Redshift settings. See below.
- s3Settings Property Map
- (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - secrets
Manager StringAccess Role Arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- secrets
Manager StringArn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - server
Name String - Host name of the server.
- service
Access StringRole - ARN used by the service access IAM role for dynamodb endpoints.
- ssl
Mode String - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- Map<String>
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - username String
- User name to be used to login to the endpoint database.
Outputs
All input properties are implicitly available as output properties. Additionally, the Endpoint resource produces the following output properties:
- Endpoint
Arn string - ARN for the endpoint.
- Id string
- The provider-assigned unique ID for this managed resource.
- Dictionary<string, string>
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- Endpoint
Arn string - ARN for the endpoint.
- Id string
- The provider-assigned unique ID for this managed resource.
- map[string]string
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- endpoint
Arn String - ARN for the endpoint.
- id String
- The provider-assigned unique ID for this managed resource.
- Map<String,String>
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- endpoint
Arn string - ARN for the endpoint.
- id string
- The provider-assigned unique ID for this managed resource.
- {[key: string]: string}
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- endpoint_
arn str - ARN for the endpoint.
- id str
- The provider-assigned unique ID for this managed resource.
- Mapping[str, str]
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- endpoint
Arn String - ARN for the endpoint.
- id String
- The provider-assigned unique ID for this managed resource.
- Map<String>
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
Look up Existing Endpoint Resource
Get an existing Endpoint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: EndpointState, opts?: CustomResourceOptions): Endpoint
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
certificate_arn: Optional[str] = None,
database_name: Optional[str] = None,
elasticsearch_settings: Optional[EndpointElasticsearchSettingsArgs] = None,
endpoint_arn: Optional[str] = None,
endpoint_id: Optional[str] = None,
endpoint_type: Optional[str] = None,
engine_name: Optional[str] = None,
extra_connection_attributes: Optional[str] = None,
kafka_settings: Optional[EndpointKafkaSettingsArgs] = None,
kinesis_settings: Optional[EndpointKinesisSettingsArgs] = None,
kms_key_arn: Optional[str] = None,
mongodb_settings: Optional[EndpointMongodbSettingsArgs] = None,
password: Optional[str] = None,
pause_replication_tasks: Optional[bool] = None,
port: Optional[int] = None,
postgres_settings: Optional[EndpointPostgresSettingsArgs] = None,
redis_settings: Optional[EndpointRedisSettingsArgs] = None,
redshift_settings: Optional[EndpointRedshiftSettingsArgs] = None,
s3_settings: Optional[EndpointS3SettingsArgs] = None,
secrets_manager_access_role_arn: Optional[str] = None,
secrets_manager_arn: Optional[str] = None,
server_name: Optional[str] = None,
service_access_role: Optional[str] = None,
ssl_mode: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
tags_all: Optional[Mapping[str, str]] = None,
username: Optional[str] = None) -> Endpoint
func GetEndpoint(ctx *Context, name string, id IDInput, state *EndpointState, opts ...ResourceOption) (*Endpoint, error)
public static Endpoint Get(string name, Input<string> id, EndpointState? state, CustomResourceOptions? opts = null)
public static Endpoint get(String name, Output<String> id, EndpointState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Certificate
Arn string - ARN for the certificate.
- Database
Name string - Name of the endpoint database.
- Elasticsearch
Settings EndpointElasticsearch Settings - Configuration block for OpenSearch settings. See below.
- Endpoint
Arn string - ARN for the endpoint.
- Endpoint
Id string - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- Endpoint
Type string - Type of endpoint. Valid values are
source
,target
. - Engine
Name string - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - Extra
Connection stringAttributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - Kafka
Settings EndpointKafka Settings - Configuration block for Kafka settings. See below.
- Kinesis
Settings EndpointKinesis Settings - Configuration block for Kinesis settings. See below.
- Kms
Key stringArn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- Mongodb
Settings EndpointMongodb Settings - Configuration block for MongoDB settings. See below.
- Password string
- Password to be used to login to the endpoint database.
- Pause
Replication boolTasks - Port int
- Port used by the endpoint database.
- Postgres
Settings EndpointPostgres Settings - Configuration block for Postgres settings. See below.
- Redis
Settings EndpointRedis Settings - Redshift
Settings EndpointRedshift Settings - Configuration block for Redshift settings. See below.
- S3Settings
Endpoint
S3Settings - (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - Secrets
Manager stringAccess Role Arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- Secrets
Manager stringArn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - Server
Name string - Host name of the server.
- Service
Access stringRole - ARN used by the service access IAM role for dynamodb endpoints.
- Ssl
Mode string - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- Dictionary<string, string>
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Dictionary<string, string>
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Username string
- User name to be used to login to the endpoint database.
- Certificate
Arn string - ARN for the certificate.
- Database
Name string - Name of the endpoint database.
- Elasticsearch
Settings EndpointElasticsearch Settings Args - Configuration block for OpenSearch settings. See below.
- Endpoint
Arn string - ARN for the endpoint.
- Endpoint
Id string - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- Endpoint
Type string - Type of endpoint. Valid values are
source
,target
. - Engine
Name string - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - Extra
Connection stringAttributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - Kafka
Settings EndpointKafka Settings Args - Configuration block for Kafka settings. See below.
- Kinesis
Settings EndpointKinesis Settings Args - Configuration block for Kinesis settings. See below.
- Kms
Key stringArn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- Mongodb
Settings EndpointMongodb Settings Args - Configuration block for MongoDB settings. See below.
- Password string
- Password to be used to login to the endpoint database.
- Pause
Replication boolTasks - Port int
- Port used by the endpoint database.
- Postgres
Settings EndpointPostgres Settings Args - Configuration block for Postgres settings. See below.
- Redis
Settings EndpointRedis Settings Args - Redshift
Settings EndpointRedshift Settings Args - Configuration block for Redshift settings. See below.
- S3Settings
Endpoint
S3Settings Args - (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - Secrets
Manager stringAccess Role Arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- Secrets
Manager stringArn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - Server
Name string - Host name of the server.
- Service
Access stringRole - ARN used by the service access IAM role for dynamodb endpoints.
- Ssl
Mode string - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- map[string]string
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - map[string]string
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Username string
- User name to be used to login to the endpoint database.
- certificate
Arn String - ARN for the certificate.
- database
Name String - Name of the endpoint database.
- elasticsearch
Settings EndpointElasticsearch Settings - Configuration block for OpenSearch settings. See below.
- endpoint
Arn String - ARN for the endpoint.
- endpoint
Id String - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- endpoint
Type String - Type of endpoint. Valid values are
source
,target
. - engine
Name String - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - extra
Connection StringAttributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - kafka
Settings EndpointKafka Settings - Configuration block for Kafka settings. See below.
- kinesis
Settings EndpointKinesis Settings - Configuration block for Kinesis settings. See below.
- kms
Key StringArn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- mongodb
Settings EndpointMongodb Settings - Configuration block for MongoDB settings. See below.
- password String
- Password to be used to login to the endpoint database.
- pause
Replication BooleanTasks - port Integer
- Port used by the endpoint database.
- postgres
Settings EndpointPostgres Settings - Configuration block for Postgres settings. See below.
- redis
Settings EndpointRedis Settings - redshift
Settings EndpointRedshift Settings - Configuration block for Redshift settings. See below.
- s3Settings
Endpoint
S3Settings - (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - secrets
Manager StringAccess Role Arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- secrets
Manager StringArn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - server
Name String - Host name of the server.
- service
Access StringRole - ARN used by the service access IAM role for dynamodb endpoints.
- ssl
Mode String - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- Map<String,String>
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String,String>
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - username String
- User name to be used to login to the endpoint database.
- certificate
Arn string - ARN for the certificate.
- database
Name string - Name of the endpoint database.
- elasticsearch
Settings EndpointElasticsearch Settings - Configuration block for OpenSearch settings. See below.
- endpoint
Arn string - ARN for the endpoint.
- endpoint
Id string - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- endpoint
Type string - Type of endpoint. Valid values are
source
,target
. - engine
Name string - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - extra
Connection stringAttributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - kafka
Settings EndpointKafka Settings - Configuration block for Kafka settings. See below.
- kinesis
Settings EndpointKinesis Settings - Configuration block for Kinesis settings. See below.
- kms
Key stringArn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- mongodb
Settings EndpointMongodb Settings - Configuration block for MongoDB settings. See below.
- password string
- Password to be used to login to the endpoint database.
- pause
Replication booleanTasks - port number
- Port used by the endpoint database.
- postgres
Settings EndpointPostgres Settings - Configuration block for Postgres settings. See below.
- redis
Settings EndpointRedis Settings - redshift
Settings EndpointRedshift Settings - Configuration block for Redshift settings. See below.
- s3Settings
Endpoint
S3Settings - (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - secrets
Manager stringAccess Role Arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- secrets
Manager stringArn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - server
Name string - Host name of the server.
- service
Access stringRole - ARN used by the service access IAM role for dynamodb endpoints.
- ssl
Mode string - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- {[key: string]: string}
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - {[key: string]: string}
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - username string
- User name to be used to login to the endpoint database.
- certificate_
arn str - ARN for the certificate.
- database_
name str - Name of the endpoint database.
- elasticsearch_
settings EndpointElasticsearch Settings Args - Configuration block for OpenSearch settings. See below.
- endpoint_
arn str - ARN for the endpoint.
- endpoint_
id str - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- endpoint_
type str - Type of endpoint. Valid values are
source
,target
. - engine_
name str - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - extra_
connection_ strattributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - kafka_
settings EndpointKafka Settings Args - Configuration block for Kafka settings. See below.
- kinesis_
settings EndpointKinesis Settings Args - Configuration block for Kinesis settings. See below.
- kms_
key_ strarn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- mongodb_
settings EndpointMongodb Settings Args - Configuration block for MongoDB settings. See below.
- password str
- Password to be used to login to the endpoint database.
- pause_
replication_ booltasks - port int
- Port used by the endpoint database.
- postgres_
settings EndpointPostgres Settings Args - Configuration block for Postgres settings. See below.
- redis_
settings EndpointRedis Settings Args - redshift_
settings EndpointRedshift Settings Args - Configuration block for Redshift settings. See below.
- s3_
settings EndpointS3Settings Args - (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - secrets_
manager_ straccess_ role_ arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- secrets_
manager_ strarn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - server_
name str - Host name of the server.
- service_
access_ strrole - ARN used by the service access IAM role for dynamodb endpoints.
- ssl_
mode str - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- Mapping[str, str]
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Mapping[str, str]
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - username str
- User name to be used to login to the endpoint database.
- certificate
Arn String - ARN for the certificate.
- database
Name String - Name of the endpoint database.
- elasticsearch
Settings Property Map - Configuration block for OpenSearch settings. See below.
- endpoint
Arn String - ARN for the endpoint.
- endpoint
Id String - Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
- endpoint
Type String - Type of endpoint. Valid values are
source
,target
. - engine
Name String - Type of engine for the endpoint. Valid values are
aurora
,aurora-postgresql
,azuredb
,azure-sql-managed-instance
,babelfish
,db2
,db2-zos
,docdb
,dynamodb
,elasticsearch
,kafka
,kinesis
,mariadb
,mongodb
,mysql
,opensearch
,oracle
,postgres
,redshift
,s3
,sqlserver
,sybase
. Please note that some of engine names are available only fortarget
endpoint type (e.g.redshift
). - extra
Connection StringAttributes - Additional attributes associated with the connection. For available attributes for a
source
Endpoint, see Sources for data migration. For available attributes for atarget
Endpoint, see Targets for data migration. - kafka
Settings Property Map - Configuration block for Kafka settings. See below.
- kinesis
Settings Property Map - Configuration block for Kinesis settings. See below.
- kms
Key StringArn ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for
kms_key_arn
, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameters3_settings.server_side_encryption_kms_key_id
. Whenengine_name
isredshift
,kms_key_arn
is the KMS Key for the Redshift target and the parameterredshift_settings.server_side_encryption_kms_key_id
encrypts the S3 intermediate storage.The following arguments are optional:
- mongodb
Settings Property Map - Configuration block for MongoDB settings. See below.
- password String
- Password to be used to login to the endpoint database.
- pause
Replication BooleanTasks - port Number
- Port used by the endpoint database.
- postgres
Settings Property Map - Configuration block for Postgres settings. See below.
- redis
Settings Property Map - redshift
Settings Property Map - Configuration block for Redshift settings. See below.
- s3Settings Property Map
- (Deprecated, use the
aws.dms.S3Endpoint
resource instead) Configuration block for S3 settings. See below. - secrets
Manager StringAccess Role Arn ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by
secrets_manager_arn
. The role must allow theiam:PassRole
action.Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and
secrets_manager_arn
. Or you can specify clear-text values forusername
,password
,server_name
, andport
. You can't specify both.- secrets
Manager StringArn - Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when
engine_name
isaurora
,aurora-postgresql
,mariadb
,mongodb
,mysql
,oracle
,postgres
,redshift
, orsqlserver
. - server
Name String - Host name of the server.
- service
Access StringRole - ARN used by the service access IAM role for dynamodb endpoints.
- ssl
Mode String - SSL mode to use for the connection. Valid values are
none
,require
,verify-ca
,verify-full
- Map<String>
- Map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String>
- Map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - username String
- User name to be used to login to the endpoint database.
Supporting Types
EndpointElasticsearchSettings, EndpointElasticsearchSettingsArgs
- Endpoint
Uri string - Endpoint for the OpenSearch cluster.
- Service
Access stringRole Arn - ARN of the IAM Role with permissions to write to the OpenSearch cluster.
- Error
Retry intDuration - Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is
300
. - Full
Load intError Percentage - Maximum percentage of records that can fail to be written before a full load operation stops. Default is
10
. - Use
New boolMapping Type - Enable to migrate documentation using the documentation type
_doc
. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value isfalse
.
- Endpoint
Uri string - Endpoint for the OpenSearch cluster.
- Service
Access stringRole Arn - ARN of the IAM Role with permissions to write to the OpenSearch cluster.
- Error
Retry intDuration - Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is
300
. - Full
Load intError Percentage - Maximum percentage of records that can fail to be written before a full load operation stops. Default is
10
. - Use
New boolMapping Type - Enable to migrate documentation using the documentation type
_doc
. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value isfalse
.
- endpoint
Uri String - Endpoint for the OpenSearch cluster.
- service
Access StringRole Arn - ARN of the IAM Role with permissions to write to the OpenSearch cluster.
- error
Retry IntegerDuration - Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is
300
. - full
Load IntegerError Percentage - Maximum percentage of records that can fail to be written before a full load operation stops. Default is
10
. - use
New BooleanMapping Type - Enable to migrate documentation using the documentation type
_doc
. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value isfalse
.
- endpoint
Uri string - Endpoint for the OpenSearch cluster.
- service
Access stringRole Arn - ARN of the IAM Role with permissions to write to the OpenSearch cluster.
- error
Retry numberDuration - Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is
300
. - full
Load numberError Percentage - Maximum percentage of records that can fail to be written before a full load operation stops. Default is
10
. - use
New booleanMapping Type - Enable to migrate documentation using the documentation type
_doc
. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value isfalse
.
- endpoint_
uri str - Endpoint for the OpenSearch cluster.
- service_
access_ strrole_ arn - ARN of the IAM Role with permissions to write to the OpenSearch cluster.
- error_
retry_ intduration - Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is
300
. - full_
load_ interror_ percentage - Maximum percentage of records that can fail to be written before a full load operation stops. Default is
10
. - use_
new_ boolmapping_ type - Enable to migrate documentation using the documentation type
_doc
. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value isfalse
.
- endpoint
Uri String - Endpoint for the OpenSearch cluster.
- service
Access StringRole Arn - ARN of the IAM Role with permissions to write to the OpenSearch cluster.
- error
Retry NumberDuration - Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is
300
. - full
Load NumberError Percentage - Maximum percentage of records that can fail to be written before a full load operation stops. Default is
10
. - use
New BooleanMapping Type - Enable to migrate documentation using the documentation type
_doc
. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value isfalse
.
EndpointKafkaSettings, EndpointKafkaSettingsArgs
- Broker string
- Kafka broker location. Specify in the form broker-hostname-or-ip:port.
- Include
Control boolDetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is
false
. - Include
Null boolAnd Empty - Include NULL and empty columns for records migrated to the endpoint. Default is
false
. - Include
Partition boolValue - Shows the partition value within the Kafka message output unless the partition type is
schema-table-type
. Default isfalse
. - Include
Table boolAlter Operations - Includes any data definition language (DDL) operations that change the table in the control data, such as
rename-table
,drop-table
,add-column
,drop-column
, andrename-column
. Default isfalse
. - Include
Transaction boolDetails - Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for
transaction_id
, previoustransaction_id
, andtransaction_record_id
(the record offset within a transaction). Default isfalse
. - Message
Format string - Output format for the records created on the endpoint. Message format is
JSON
(default) orJSON_UNFORMATTED
(a single line with no tab). - Message
Max intBytes - Maximum size in bytes for records created on the endpoint Default is
1,000,000
. - No
Hex boolPrefix - Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the
no_hex_prefix
endpoint setting to enable migration of RAW data type columns without adding the'0x'
prefix. - Partition
Include boolSchema Table - Prefixes schema and table names to partition values, when the partition type is
primary-key-type
. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default isfalse
. - Sasl
Password string - Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- Sasl
Username string - Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- Security
Protocol string - Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
ssl-encryption
,ssl-authentication
, andsasl-ssl
.sasl-ssl
requiressasl_username
andsasl_password
. - Ssl
Ca stringCertificate Arn - ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
- Ssl
Client stringCertificate Arn - ARN of the client certificate used to securely connect to a Kafka target endpoint.
- Ssl
Client stringKey Arn - ARN for the client private key used to securely connect to a Kafka target endpoint.
- Ssl
Client stringKey Password - Password for the client private key used to securely connect to a Kafka target endpoint.
- Topic string
- Kafka topic for migration. Default is
kafka-default-topic
.
- Broker string
- Kafka broker location. Specify in the form broker-hostname-or-ip:port.
- Include
Control boolDetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is
false
. - Include
Null boolAnd Empty - Include NULL and empty columns for records migrated to the endpoint. Default is
false
. - Include
Partition boolValue - Shows the partition value within the Kafka message output unless the partition type is
schema-table-type
. Default isfalse
. - Include
Table boolAlter Operations - Includes any data definition language (DDL) operations that change the table in the control data, such as
rename-table
,drop-table
,add-column
,drop-column
, andrename-column
. Default isfalse
. - Include
Transaction boolDetails - Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for
transaction_id
, previoustransaction_id
, andtransaction_record_id
(the record offset within a transaction). Default isfalse
. - Message
Format string - Output format for the records created on the endpoint. Message format is
JSON
(default) orJSON_UNFORMATTED
(a single line with no tab). - Message
Max intBytes - Maximum size in bytes for records created on the endpoint Default is
1,000,000
. - No
Hex boolPrefix - Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the
no_hex_prefix
endpoint setting to enable migration of RAW data type columns without adding the'0x'
prefix. - Partition
Include boolSchema Table - Prefixes schema and table names to partition values, when the partition type is
primary-key-type
. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default isfalse
. - Sasl
Password string - Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- Sasl
Username string - Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- Security
Protocol string - Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
ssl-encryption
,ssl-authentication
, andsasl-ssl
.sasl-ssl
requiressasl_username
andsasl_password
. - Ssl
Ca stringCertificate Arn - ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
- Ssl
Client stringCertificate Arn - ARN of the client certificate used to securely connect to a Kafka target endpoint.
- Ssl
Client stringKey Arn - ARN for the client private key used to securely connect to a Kafka target endpoint.
- Ssl
Client stringKey Password - Password for the client private key used to securely connect to a Kafka target endpoint.
- Topic string
- Kafka topic for migration. Default is
kafka-default-topic
.
- broker String
- Kafka broker location. Specify in the form broker-hostname-or-ip:port.
- include
Control BooleanDetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is
false
. - include
Null BooleanAnd Empty - Include NULL and empty columns for records migrated to the endpoint. Default is
false
. - include
Partition BooleanValue - Shows the partition value within the Kafka message output unless the partition type is
schema-table-type
. Default isfalse
. - include
Table BooleanAlter Operations - Includes any data definition language (DDL) operations that change the table in the control data, such as
rename-table
,drop-table
,add-column
,drop-column
, andrename-column
. Default isfalse
. - include
Transaction BooleanDetails - Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for
transaction_id
, previoustransaction_id
, andtransaction_record_id
(the record offset within a transaction). Default isfalse
. - message
Format String - Output format for the records created on the endpoint. Message format is
JSON
(default) orJSON_UNFORMATTED
(a single line with no tab). - message
Max IntegerBytes - Maximum size in bytes for records created on the endpoint Default is
1,000,000
. - no
Hex BooleanPrefix - Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the
no_hex_prefix
endpoint setting to enable migration of RAW data type columns without adding the'0x'
prefix. - partition
Include BooleanSchema Table - Prefixes schema and table names to partition values, when the partition type is
primary-key-type
. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default isfalse
. - sasl
Password String - Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- sasl
Username String - Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- security
Protocol String - Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
ssl-encryption
,ssl-authentication
, andsasl-ssl
.sasl-ssl
requiressasl_username
andsasl_password
. - ssl
Ca StringCertificate Arn - ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
- ssl
Client StringCertificate Arn - ARN of the client certificate used to securely connect to a Kafka target endpoint.
- ssl
Client StringKey Arn - ARN for the client private key used to securely connect to a Kafka target endpoint.
- ssl
Client StringKey Password - Password for the client private key used to securely connect to a Kafka target endpoint.
- topic String
- Kafka topic for migration. Default is
kafka-default-topic
.
- broker string
- Kafka broker location. Specify in the form broker-hostname-or-ip:port.
- include
Control booleanDetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is
false
. - include
Null booleanAnd Empty - Include NULL and empty columns for records migrated to the endpoint. Default is
false
. - include
Partition booleanValue - Shows the partition value within the Kafka message output unless the partition type is
schema-table-type
. Default isfalse
. - include
Table booleanAlter Operations - Includes any data definition language (DDL) operations that change the table in the control data, such as
rename-table
,drop-table
,add-column
,drop-column
, andrename-column
. Default isfalse
. - include
Transaction booleanDetails - Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for
transaction_id
, previoustransaction_id
, andtransaction_record_id
(the record offset within a transaction). Default isfalse
. - message
Format string - Output format for the records created on the endpoint. Message format is
JSON
(default) orJSON_UNFORMATTED
(a single line with no tab). - message
Max numberBytes - Maximum size in bytes for records created on the endpoint Default is
1,000,000
. - no
Hex booleanPrefix - Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the
no_hex_prefix
endpoint setting to enable migration of RAW data type columns without adding the'0x'
prefix. - partition
Include booleanSchema Table - Prefixes schema and table names to partition values, when the partition type is
primary-key-type
. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default isfalse
. - sasl
Password string - Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- sasl
Username string - Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- security
Protocol string - Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
ssl-encryption
,ssl-authentication
, andsasl-ssl
.sasl-ssl
requiressasl_username
andsasl_password
. - ssl
Ca stringCertificate Arn - ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
- ssl
Client stringCertificate Arn - ARN of the client certificate used to securely connect to a Kafka target endpoint.
- ssl
Client stringKey Arn - ARN for the client private key used to securely connect to a Kafka target endpoint.
- ssl
Client stringKey Password - Password for the client private key used to securely connect to a Kafka target endpoint.
- topic string
- Kafka topic for migration. Default is
kafka-default-topic
.
- broker str
- Kafka broker location. Specify in the form broker-hostname-or-ip:port.
- include_
control_ booldetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is
false
. - include_
null_ booland_ empty - Include NULL and empty columns for records migrated to the endpoint. Default is
false
. - include_
partition_ boolvalue - Shows the partition value within the Kafka message output unless the partition type is
schema-table-type
. Default isfalse
. - include_
table_ boolalter_ operations - Includes any data definition language (DDL) operations that change the table in the control data, such as
rename-table
,drop-table
,add-column
,drop-column
, andrename-column
. Default isfalse
. - include_
transaction_ booldetails - Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for
transaction_id
, previoustransaction_id
, andtransaction_record_id
(the record offset within a transaction). Default isfalse
. - message_
format str - Output format for the records created on the endpoint. Message format is
JSON
(default) orJSON_UNFORMATTED
(a single line with no tab). - message_
max_ intbytes - Maximum size in bytes for records created on the endpoint Default is
1,000,000
. - no_
hex_ boolprefix - Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the
no_hex_prefix
endpoint setting to enable migration of RAW data type columns without adding the'0x'
prefix. - partition_
include_ boolschema_ table - Prefixes schema and table names to partition values, when the partition type is
primary-key-type
. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default isfalse
. - sasl_
password str - Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- sasl_
username str - Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- security_
protocol str - Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
ssl-encryption
,ssl-authentication
, andsasl-ssl
.sasl-ssl
requiressasl_username
andsasl_password
. - ssl_
ca_ strcertificate_ arn - ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
- ssl_
client_ strcertificate_ arn - ARN of the client certificate used to securely connect to a Kafka target endpoint.
- ssl_
client_ strkey_ arn - ARN for the client private key used to securely connect to a Kafka target endpoint.
- ssl_
client_ strkey_ password - Password for the client private key used to securely connect to a Kafka target endpoint.
- topic str
- Kafka topic for migration. Default is
kafka-default-topic
.
- broker String
- Kafka broker location. Specify in the form broker-hostname-or-ip:port.
- include
Control BooleanDetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is
false
. - include
Null BooleanAnd Empty - Include NULL and empty columns for records migrated to the endpoint. Default is
false
. - include
Partition BooleanValue - Shows the partition value within the Kafka message output unless the partition type is
schema-table-type
. Default isfalse
. - include
Table BooleanAlter Operations - Includes any data definition language (DDL) operations that change the table in the control data, such as
rename-table
,drop-table
,add-column
,drop-column
, andrename-column
. Default isfalse
. - include
Transaction BooleanDetails - Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for
transaction_id
, previoustransaction_id
, andtransaction_record_id
(the record offset within a transaction). Default isfalse
. - message
Format String - Output format for the records created on the endpoint. Message format is
JSON
(default) orJSON_UNFORMATTED
(a single line with no tab). - message
Max NumberBytes - Maximum size in bytes for records created on the endpoint Default is
1,000,000
. - no
Hex BooleanPrefix - Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the
no_hex_prefix
endpoint setting to enable migration of RAW data type columns without adding the'0x'
prefix. - partition
Include BooleanSchema Table - Prefixes schema and table names to partition values, when the partition type is
primary-key-type
. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default isfalse
. - sasl
Password String - Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- sasl
Username String - Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
- security
Protocol String - Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
ssl-encryption
,ssl-authentication
, andsasl-ssl
.sasl-ssl
requiressasl_username
andsasl_password
. - ssl
Ca StringCertificate Arn - ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
- ssl
Client StringCertificate Arn - ARN of the client certificate used to securely connect to a Kafka target endpoint.
- ssl
Client StringKey Arn - ARN for the client private key used to securely connect to a Kafka target endpoint.
- ssl
Client StringKey Password - Password for the client private key used to securely connect to a Kafka target endpoint.
- topic String
- Kafka topic for migration. Default is
kafka-default-topic
.
EndpointKinesisSettings, EndpointKinesisSettingsArgs
- Include
Control boolDetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is
false
. - Include
Null boolAnd Empty - Include NULL and empty columns in the target. Default is
false
. - Include
Partition boolValue - Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is
false
. - Include
Table boolAlter Operations - Includes any data definition language (DDL) operations that change the table in the control data. Default is
false
. - Include
Transaction boolDetails - Provides detailed transaction information from the source database. Default is
false
. - Message
Format string - Output format for the records created. Default is
json
. Valid values arejson
andjson-unformatted
(a single line with no tab). - Partition
Include boolSchema Table - Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is
false
. - Service
Access stringRole Arn - ARN of the IAM Role with permissions to write to the Kinesis data stream.
- Stream
Arn string - ARN of the Kinesis data stream.
- Include
Control boolDetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is
false
. - Include
Null boolAnd Empty - Include NULL and empty columns in the target. Default is
false
. - Include
Partition boolValue - Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is
false
. - Include
Table boolAlter Operations - Includes any data definition language (DDL) operations that change the table in the control data. Default is
false
. - Include
Transaction boolDetails - Provides detailed transaction information from the source database. Default is
false
. - Message
Format string - Output format for the records created. Default is
json
. Valid values arejson
andjson-unformatted
(a single line with no tab). - Partition
Include boolSchema Table - Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is
false
. - Service
Access stringRole Arn - ARN of the IAM Role with permissions to write to the Kinesis data stream.
- Stream
Arn string - ARN of the Kinesis data stream.
- include
Control BooleanDetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is
false
. - include
Null BooleanAnd Empty - Include NULL and empty columns in the target. Default is
false
. - include
Partition BooleanValue - Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is
false
. - include
Table BooleanAlter Operations - Includes any data definition language (DDL) operations that change the table in the control data. Default is
false
. - include
Transaction BooleanDetails - Provides detailed transaction information from the source database. Default is
false
. - message
Format String - Output format for the records created. Default is
json
. Valid values arejson
andjson-unformatted
(a single line with no tab). - partition
Include BooleanSchema Table - Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is
false
. - service
Access StringRole Arn - ARN of the IAM Role with permissions to write to the Kinesis data stream.
- stream
Arn String - ARN of the Kinesis data stream.
- include
Control booleanDetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is
false
. - include
Null booleanAnd Empty - Include NULL and empty columns in the target. Default is
false
. - include
Partition booleanValue - Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is
false
. - include
Table booleanAlter Operations - Includes any data definition language (DDL) operations that change the table in the control data. Default is
false
. - include
Transaction booleanDetails - Provides detailed transaction information from the source database. Default is
false
. - message
Format string - Output format for the records created. Default is
json
. Valid values arejson
andjson-unformatted
(a single line with no tab). - partition
Include booleanSchema Table - Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is
false
. - service
Access stringRole Arn - ARN of the IAM Role with permissions to write to the Kinesis data stream.
- stream
Arn string - ARN of the Kinesis data stream.
- include_
control_ booldetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is
false
. - include_
null_ booland_ empty - Include NULL and empty columns in the target. Default is
false
. - include_
partition_ boolvalue - Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is
false
. - include_
table_ boolalter_ operations - Includes any data definition language (DDL) operations that change the table in the control data. Default is
false
. - include_
transaction_ booldetails - Provides detailed transaction information from the source database. Default is
false
. - message_
format str - Output format for the records created. Default is
json
. Valid values arejson
andjson-unformatted
(a single line with no tab). - partition_
include_ boolschema_ table - Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is
false
. - service_
access_ strrole_ arn - ARN of the IAM Role with permissions to write to the Kinesis data stream.
- stream_
arn str - ARN of the Kinesis data stream.
- include
Control BooleanDetails - Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is
false
. - include
Null BooleanAnd Empty - Include NULL and empty columns in the target. Default is
false
. - include
Partition BooleanValue - Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is
false
. - include
Table BooleanAlter Operations - Includes any data definition language (DDL) operations that change the table in the control data. Default is
false
. - include
Transaction BooleanDetails - Provides detailed transaction information from the source database. Default is
false
. - message
Format String - Output format for the records created. Default is
json
. Valid values arejson
andjson-unformatted
(a single line with no tab). - partition
Include BooleanSchema Table - Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is
false
. - service
Access StringRole Arn - ARN of the IAM Role with permissions to write to the Kinesis data stream.
- stream
Arn String - ARN of the Kinesis data stream.
EndpointMongodbSettings, EndpointMongodbSettingsArgs
- Auth
Mechanism string - Authentication mechanism to access the MongoDB source endpoint. Default is
default
. - Auth
Source string - Authentication database name. Not used when
auth_type
isno
. Default isadmin
. - Auth
Type string - Authentication type to access the MongoDB source endpoint. Default is
password
. - Docs
To stringInvestigate - Number of documents to preview to determine the document organization. Use this setting when
nesting_level
is set toone
. Default is1000
. - Extract
Doc stringId - Document ID. Use this setting when
nesting_level
is set tonone
. Default isfalse
. - Nesting
Level string - Specifies either document or table mode. Default is
none
. Valid values areone
(table mode) andnone
(document mode).
- Auth
Mechanism string - Authentication mechanism to access the MongoDB source endpoint. Default is
default
. - Auth
Source string - Authentication database name. Not used when
auth_type
isno
. Default isadmin
. - Auth
Type string - Authentication type to access the MongoDB source endpoint. Default is
password
. - Docs
To stringInvestigate - Number of documents to preview to determine the document organization. Use this setting when
nesting_level
is set toone
. Default is1000
. - Extract
Doc stringId - Document ID. Use this setting when
nesting_level
is set tonone
. Default isfalse
. - Nesting
Level string - Specifies either document or table mode. Default is
none
. Valid values areone
(table mode) andnone
(document mode).
- auth
Mechanism String - Authentication mechanism to access the MongoDB source endpoint. Default is
default
. - auth
Source String - Authentication database name. Not used when
auth_type
isno
. Default isadmin
. - auth
Type String - Authentication type to access the MongoDB source endpoint. Default is
password
. - docs
To StringInvestigate - Number of documents to preview to determine the document organization. Use this setting when
nesting_level
is set toone
. Default is1000
. - extract
Doc StringId - Document ID. Use this setting when
nesting_level
is set tonone
. Default isfalse
. - nesting
Level String - Specifies either document or table mode. Default is
none
. Valid values areone
(table mode) andnone
(document mode).
- auth
Mechanism string - Authentication mechanism to access the MongoDB source endpoint. Default is
default
. - auth
Source string - Authentication database name. Not used when
auth_type
isno
. Default isadmin
. - auth
Type string - Authentication type to access the MongoDB source endpoint. Default is
password
. - docs
To stringInvestigate - Number of documents to preview to determine the document organization. Use this setting when
nesting_level
is set toone
. Default is1000
. - extract
Doc stringId - Document ID. Use this setting when
nesting_level
is set tonone
. Default isfalse
. - nesting
Level string - Specifies either document or table mode. Default is
none
. Valid values areone
(table mode) andnone
(document mode).
- auth_
mechanism str - Authentication mechanism to access the MongoDB source endpoint. Default is
default
. - auth_
source str - Authentication database name. Not used when
auth_type
isno
. Default isadmin
. - auth_
type str - Authentication type to access the MongoDB source endpoint. Default is
password
. - docs_
to_ strinvestigate - Number of documents to preview to determine the document organization. Use this setting when
nesting_level
is set toone
. Default is1000
. - extract_
doc_ strid - Document ID. Use this setting when
nesting_level
is set tonone
. Default isfalse
. - nesting_
level str - Specifies either document or table mode. Default is
none
. Valid values areone
(table mode) andnone
(document mode).
- auth
Mechanism String - Authentication mechanism to access the MongoDB source endpoint. Default is
default
. - auth
Source String - Authentication database name. Not used when
auth_type
isno
. Default isadmin
. - auth
Type String - Authentication type to access the MongoDB source endpoint. Default is
password
. - docs
To StringInvestigate - Number of documents to preview to determine the document organization. Use this setting when
nesting_level
is set toone
. Default is1000
. - extract
Doc StringId - Document ID. Use this setting when
nesting_level
is set tonone
. Default isfalse
. - nesting
Level String - Specifies either document or table mode. Default is
none
. Valid values areone
(table mode) andnone
(document mode).
EndpointPostgresSettings, EndpointPostgresSettingsArgs
- After
Connect stringScript - For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
- Babelfish
Database stringName - The Babelfish for Aurora PostgreSQL database name for the endpoint.
- Capture
Ddls bool - To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
- Database
Mode string - Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
- Ddl
Artifacts stringSchema - Sets the schema in which the operational DDL database artifacts are created. Default is
public
. - Execute
Timeout int - Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is
60
. - Fail
Tasks boolOn Lob Truncation - When set to
true
, this value causes a task to fail if the actual size of a LOB column is greater than the specifiedLobMaxSize
. Default isfalse
. - Heartbeat
Enable bool - The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
- Heartbeat
Frequency int - Sets the WAL heartbeat frequency (in minutes). Default value is
5
. - Heartbeat
Schema string - Sets the schema in which the heartbeat artifacts are created. Default value is
public
. - Map
Boolean boolAs Boolean - You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is
false
. - Map
Jsonb boolAs Clob - Optional When true, DMS migrates JSONB values as CLOB.
- Map
Long stringVarchar As - Optional When true, DMS migrates LONG values as VARCHAR.
- Max
File intSize - Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is
32,768 KB
. - Plugin
Name string - Specifies the plugin to use to create a replication slot. Valid values:
pglogical
,test_decoding
. - Slot
Name string - Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
- After
Connect stringScript - For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
- Babelfish
Database stringName - The Babelfish for Aurora PostgreSQL database name for the endpoint.
- Capture
Ddls bool - To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
- Database
Mode string - Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
- Ddl
Artifacts stringSchema - Sets the schema in which the operational DDL database artifacts are created. Default is
public
. - Execute
Timeout int - Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is
60
. - Fail
Tasks boolOn Lob Truncation - When set to
true
, this value causes a task to fail if the actual size of a LOB column is greater than the specifiedLobMaxSize
. Default isfalse
. - Heartbeat
Enable bool - The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
- Heartbeat
Frequency int - Sets the WAL heartbeat frequency (in minutes). Default value is
5
. - Heartbeat
Schema string - Sets the schema in which the heartbeat artifacts are created. Default value is
public
. - Map
Boolean boolAs Boolean - You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is
false
. - Map
Jsonb boolAs Clob - Optional When true, DMS migrates JSONB values as CLOB.
- Map
Long stringVarchar As - Optional When true, DMS migrates LONG values as VARCHAR.
- Max
File intSize - Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is
32,768 KB
. - Plugin
Name string - Specifies the plugin to use to create a replication slot. Valid values:
pglogical
,test_decoding
. - Slot
Name string - Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
- after
Connect StringScript - For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
- babelfish
Database StringName - The Babelfish for Aurora PostgreSQL database name for the endpoint.
- capture
Ddls Boolean - To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
- database
Mode String - Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
- ddl
Artifacts StringSchema - Sets the schema in which the operational DDL database artifacts are created. Default is
public
. - execute
Timeout Integer - Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is
60
. - fail
Tasks BooleanOn Lob Truncation - When set to
true
, this value causes a task to fail if the actual size of a LOB column is greater than the specifiedLobMaxSize
. Default isfalse
. - heartbeat
Enable Boolean - The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
- heartbeat
Frequency Integer - Sets the WAL heartbeat frequency (in minutes). Default value is
5
. - heartbeat
Schema String - Sets the schema in which the heartbeat artifacts are created. Default value is
public
. - map
Boolean BooleanAs Boolean - You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is
false
. - map
Jsonb BooleanAs Clob - Optional When true, DMS migrates JSONB values as CLOB.
- map
Long StringVarchar As - Optional When true, DMS migrates LONG values as VARCHAR.
- max
File IntegerSize - Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is
32,768 KB
. - plugin
Name String - Specifies the plugin to use to create a replication slot. Valid values:
pglogical
,test_decoding
. - slot
Name String - Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
- after
Connect stringScript - For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
- babelfish
Database stringName - The Babelfish for Aurora PostgreSQL database name for the endpoint.
- capture
Ddls boolean - To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
- database
Mode string - Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
- ddl
Artifacts stringSchema - Sets the schema in which the operational DDL database artifacts are created. Default is
public
. - execute
Timeout number - Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is
60
. - fail
Tasks booleanOn Lob Truncation - When set to
true
, this value causes a task to fail if the actual size of a LOB column is greater than the specifiedLobMaxSize
. Default isfalse
. - heartbeat
Enable boolean - The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
- heartbeat
Frequency number - Sets the WAL heartbeat frequency (in minutes). Default value is
5
. - heartbeat
Schema string - Sets the schema in which the heartbeat artifacts are created. Default value is
public
. - map
Boolean booleanAs Boolean - You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is
false
. - map
Jsonb booleanAs Clob - Optional When true, DMS migrates JSONB values as CLOB.
- map
Long stringVarchar As - Optional When true, DMS migrates LONG values as VARCHAR.
- max
File numberSize - Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is
32,768 KB
. - plugin
Name string - Specifies the plugin to use to create a replication slot. Valid values:
pglogical
,test_decoding
. - slot
Name string - Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
- after_
connect_ strscript - For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
- babelfish_
database_ strname - The Babelfish for Aurora PostgreSQL database name for the endpoint.
- capture_
ddls bool - To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
- database_
mode str - Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
- ddl_
artifacts_ strschema - Sets the schema in which the operational DDL database artifacts are created. Default is
public
. - execute_
timeout int - Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is
60
. - fail_
tasks_ boolon_ lob_ truncation - When set to
true
, this value causes a task to fail if the actual size of a LOB column is greater than the specifiedLobMaxSize
. Default isfalse
. - heartbeat_
enable bool - The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
- heartbeat_
frequency int - Sets the WAL heartbeat frequency (in minutes). Default value is
5
. - heartbeat_
schema str - Sets the schema in which the heartbeat artifacts are created. Default value is
public
. - map_
boolean_ boolas_ boolean - You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is
false
. - map_
jsonb_ boolas_ clob - Optional When true, DMS migrates JSONB values as CLOB.
- map_
long_ strvarchar_ as - Optional When true, DMS migrates LONG values as VARCHAR.
- max_
file_ intsize - Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is
32,768 KB
. - plugin_
name str - Specifies the plugin to use to create a replication slot. Valid values:
pglogical
,test_decoding
. - slot_
name str - Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
- after
Connect StringScript - For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
- babelfish
Database StringName - The Babelfish for Aurora PostgreSQL database name for the endpoint.
- capture
Ddls Boolean - To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
- database
Mode String - Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
- ddl
Artifacts StringSchema - Sets the schema in which the operational DDL database artifacts are created. Default is
public
. - execute
Timeout Number - Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is
60
. - fail
Tasks BooleanOn Lob Truncation - When set to
true
, this value causes a task to fail if the actual size of a LOB column is greater than the specifiedLobMaxSize
. Default isfalse
. - heartbeat
Enable Boolean - The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
- heartbeat
Frequency Number - Sets the WAL heartbeat frequency (in minutes). Default value is
5
. - heartbeat
Schema String - Sets the schema in which the heartbeat artifacts are created. Default value is
public
. - map
Boolean BooleanAs Boolean - You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is
false
. - map
Jsonb BooleanAs Clob - Optional When true, DMS migrates JSONB values as CLOB.
- map
Long StringVarchar As - Optional When true, DMS migrates LONG values as VARCHAR.
- max
File NumberSize - Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is
32,768 KB
. - plugin
Name String - Specifies the plugin to use to create a replication slot. Valid values:
pglogical
,test_decoding
. - slot
Name String - Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
EndpointRedisSettings, EndpointRedisSettingsArgs
- Auth
Type string - The type of authentication to perform when connecting to a Redis target. Options include
none
,auth-token
, andauth-role
. Theauth-token
option requires anauth_password
value to be provided. Theauth-role
option requiresauth_user_name
andauth_password
values to be provided. - Port int
- Transmission Control Protocol (TCP) port for the endpoint.
- Server
Name string - Fully qualified domain name of the endpoint.
- Auth
Password string - The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
- Auth
User stringName - The username provided with the
auth-role
option of the AuthType setting for a Redis target endpoint. - Ssl
Ca stringCertificate Arn - The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
- Ssl
Security stringProtocol - The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include
plaintext
,ssl-encryption
. The default isssl-encryption
.
- Auth
Type string - The type of authentication to perform when connecting to a Redis target. Options include
none
,auth-token
, andauth-role
. Theauth-token
option requires anauth_password
value to be provided. Theauth-role
option requiresauth_user_name
andauth_password
values to be provided. - Port int
- Transmission Control Protocol (TCP) port for the endpoint.
- Server
Name string - Fully qualified domain name of the endpoint.
- Auth
Password string - The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
- Auth
User stringName - The username provided with the
auth-role
option of the AuthType setting for a Redis target endpoint. - Ssl
Ca stringCertificate Arn - The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
- Ssl
Security stringProtocol - The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include
plaintext
,ssl-encryption
. The default isssl-encryption
.
- auth
Type String - The type of authentication to perform when connecting to a Redis target. Options include
none
,auth-token
, andauth-role
. Theauth-token
option requires anauth_password
value to be provided. Theauth-role
option requiresauth_user_name
andauth_password
values to be provided. - port Integer
- Transmission Control Protocol (TCP) port for the endpoint.
- server
Name String - Fully qualified domain name of the endpoint.
- auth
Password String - The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
- auth
User StringName - The username provided with the
auth-role
option of the AuthType setting for a Redis target endpoint. - ssl
Ca StringCertificate Arn - The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
- ssl
Security StringProtocol - The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include
plaintext
,ssl-encryption
. The default isssl-encryption
.
- auth
Type string - The type of authentication to perform when connecting to a Redis target. Options include
none
,auth-token
, andauth-role
. Theauth-token
option requires anauth_password
value to be provided. Theauth-role
option requiresauth_user_name
andauth_password
values to be provided. - port number
- Transmission Control Protocol (TCP) port for the endpoint.
- server
Name string - Fully qualified domain name of the endpoint.
- auth
Password string - The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
- auth
User stringName - The username provided with the
auth-role
option of the AuthType setting for a Redis target endpoint. - ssl
Ca stringCertificate Arn - The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
- ssl
Security stringProtocol - The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include
plaintext
,ssl-encryption
. The default isssl-encryption
.
- auth_
type str - The type of authentication to perform when connecting to a Redis target. Options include
none
,auth-token
, andauth-role
. Theauth-token
option requires anauth_password
value to be provided. Theauth-role
option requiresauth_user_name
andauth_password
values to be provided. - port int
- Transmission Control Protocol (TCP) port for the endpoint.
- server_
name str - Fully qualified domain name of the endpoint.
- auth_
password str - The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
- auth_
user_ strname - The username provided with the
auth-role
option of the AuthType setting for a Redis target endpoint. - ssl_
ca_ strcertificate_ arn - The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
- ssl_
security_ strprotocol - The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include
plaintext
,ssl-encryption
. The default isssl-encryption
.
- auth
Type String - The type of authentication to perform when connecting to a Redis target. Options include
none
,auth-token
, andauth-role
. Theauth-token
option requires anauth_password
value to be provided. Theauth-role
option requiresauth_user_name
andauth_password
values to be provided. - port Number
- Transmission Control Protocol (TCP) port for the endpoint.
- server
Name String - Fully qualified domain name of the endpoint.
- auth
Password String - The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
- auth
User StringName - The username provided with the
auth-role
option of the AuthType setting for a Redis target endpoint. - ssl
Ca StringCertificate Arn - The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
- ssl
Security StringProtocol - The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include
plaintext
,ssl-encryption
. The default isssl-encryption
.
EndpointRedshiftSettings, EndpointRedshiftSettingsArgs
- Bucket
Folder string - Custom S3 Bucket Object prefix for intermediate storage.
- Bucket
Name string - Custom S3 Bucket name for intermediate storage.
- Encryption
Mode string - The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to
SSE_S3
. Valid values areSSE_S3
andSSE_KMS
. - Server
Side stringEncryption Kms Key Id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - Service
Access stringRole Arn - Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
- Bucket
Folder string - Custom S3 Bucket Object prefix for intermediate storage.
- Bucket
Name string - Custom S3 Bucket name for intermediate storage.
- Encryption
Mode string - The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to
SSE_S3
. Valid values areSSE_S3
andSSE_KMS
. - Server
Side stringEncryption Kms Key Id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - Service
Access stringRole Arn - Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
- bucket
Folder String - Custom S3 Bucket Object prefix for intermediate storage.
- bucket
Name String - Custom S3 Bucket name for intermediate storage.
- encryption
Mode String - The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to
SSE_S3
. Valid values areSSE_S3
andSSE_KMS
. - server
Side StringEncryption Kms Key Id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - service
Access StringRole Arn - Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
- bucket
Folder string - Custom S3 Bucket Object prefix for intermediate storage.
- bucket
Name string - Custom S3 Bucket name for intermediate storage.
- encryption
Mode string - The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to
SSE_S3
. Valid values areSSE_S3
andSSE_KMS
. - server
Side stringEncryption Kms Key Id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - service
Access stringRole Arn - Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
- bucket_
folder str - Custom S3 Bucket Object prefix for intermediate storage.
- bucket_
name str - Custom S3 Bucket name for intermediate storage.
- encryption_
mode str - The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to
SSE_S3
. Valid values areSSE_S3
andSSE_KMS
. - server_
side_ strencryption_ kms_ key_ id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - service_
access_ strrole_ arn - Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
- bucket
Folder String - Custom S3 Bucket Object prefix for intermediate storage.
- bucket
Name String - Custom S3 Bucket name for intermediate storage.
- encryption
Mode String - The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to
SSE_S3
. Valid values areSSE_S3
andSSE_KMS
. - server
Side StringEncryption Kms Key Id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - service
Access StringRole Arn - Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
EndpointS3Settings, EndpointS3SettingsArgs
- Add
Column boolName - Whether to add column name information to the .csv output file. Default is
false
. - Bucket
Folder string - S3 object prefix.
- Bucket
Name string - S3 bucket name.
- Canned
Acl stringFor Objects - Predefined (canned) access control list for objects created in an S3 bucket. Valid values include
none
,private
,public-read
,public-read-write
,authenticated-read
,aws-exec-read
,bucket-owner-read
, andbucket-owner-full-control
. Default isnone
. - Cdc
Inserts boolAnd Updates - Whether to write insert and update operations to .csv or .parquet output files. Default is
false
. - Cdc
Inserts boolOnly - Whether to write insert operations to .csv or .parquet output files. Default is
false
. - Cdc
Max intBatch Interval - Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is
60
. - Cdc
Min intFile Size - Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is
32000
. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. - Cdc
Path string - Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If
cdc_path
is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. - Compression
Type string - Set to compress target files. Default is
NONE
. Valid values areGZIP
andNONE
. - Csv
Delimiter string - Delimiter used to separate columns in the source files. Default is
,
. - Csv
No stringSup Value - String to use for all columns not included in the supplemental log.
- Csv
Null stringValue - String to as null when writing to the target.
- Csv
Row stringDelimiter - Delimiter used to separate rows in the source files. Default is
\n
. - Data
Format string - Output format for the files that AWS DMS uses to create S3 objects. Valid values are
csv
andparquet
. Default iscsv
. - Data
Page intSize - Size of one data page in bytes. Default is
1048576
(1 MiB). - Date
Partition stringDelimiter - Date separating delimiter to use during folder partitioning. Valid values are
SLASH
,UNDERSCORE
,DASH
, andNONE
. Default isSLASH
. - Date
Partition boolEnabled - Partition S3 bucket folders based on transaction commit dates. Default is
false
. - Date
Partition stringSequence - Date format to use during folder partitioning. Use this parameter when
date_partition_enabled
is set to true. Valid values areYYYYMMDD
,YYYYMMDDHH
,YYYYMM
,MMYYYYDD
, andDDMMYYYY
. Default isYYYYMMDD
. - Dict
Page intSize Limit - Maximum size in bytes of an encoded dictionary page of a column. Default is
1048576
(1 MiB). - Enable
Statistics bool - Whether to enable statistics for Parquet pages and row groups. Default is
true
. - Encoding
Type string - Type of encoding to use. Value values are
rle_dictionary
,plain
, andplain_dictionary
. Default isrle_dictionary
. - Encryption
Mode string - Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are
SSE_S3
andSSE_KMS
. Default isSSE_S3
. - External
Table stringDefinition - JSON document that describes how AWS DMS should interpret the data.
- Glue
Catalog boolGeneration - Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is
false
. - Ignore
Header intRows - When this value is set to
1
, DMS ignores the first row header in a .csv file. Default is0
. - Include
Op boolFor Full Load - Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is
false
. - Max
File intSize - Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from
1
to1048576
. Default is1048576
(1 GB). - Parquet
Timestamp boolIn Millisecond - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is
false
. - Parquet
Version string - Version of the .parquet file format. Default is
parquet-1-0
. Valid values areparquet-1-0
andparquet-2-0
. - Preserve
Transactions bool - Whether DMS saves the transaction order for a CDC load on the S3 target specified by
cdc_path
. Default isfalse
. - Rfc4180 bool
- For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is
true
. - Row
Group intLength - Number of rows in a row group. Default is
10000
. - Server
Side stringEncryption Kms Key Id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - Service
Access stringRole Arn - ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
- Timestamp
Column stringName - Column to add with timestamp information to the endpoint data for an Amazon S3 target.
- Use
Csv boolNo Sup Value - Whether to use
csv_no_sup_value
for columns not included in the supplemental log. - Use
Task boolStart Time For Full Load Timestamp - When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is
false
.
- Add
Column boolName - Whether to add column name information to the .csv output file. Default is
false
. - Bucket
Folder string - S3 object prefix.
- Bucket
Name string - S3 bucket name.
- Canned
Acl stringFor Objects - Predefined (canned) access control list for objects created in an S3 bucket. Valid values include
none
,private
,public-read
,public-read-write
,authenticated-read
,aws-exec-read
,bucket-owner-read
, andbucket-owner-full-control
. Default isnone
. - Cdc
Inserts boolAnd Updates - Whether to write insert and update operations to .csv or .parquet output files. Default is
false
. - Cdc
Inserts boolOnly - Whether to write insert operations to .csv or .parquet output files. Default is
false
. - Cdc
Max intBatch Interval - Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is
60
. - Cdc
Min intFile Size - Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is
32000
. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. - Cdc
Path string - Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If
cdc_path
is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. - Compression
Type string - Set to compress target files. Default is
NONE
. Valid values areGZIP
andNONE
. - Csv
Delimiter string - Delimiter used to separate columns in the source files. Default is
,
. - Csv
No stringSup Value - String to use for all columns not included in the supplemental log.
- Csv
Null stringValue - String to as null when writing to the target.
- Csv
Row stringDelimiter - Delimiter used to separate rows in the source files. Default is
\n
. - Data
Format string - Output format for the files that AWS DMS uses to create S3 objects. Valid values are
csv
andparquet
. Default iscsv
. - Data
Page intSize - Size of one data page in bytes. Default is
1048576
(1 MiB). - Date
Partition stringDelimiter - Date separating delimiter to use during folder partitioning. Valid values are
SLASH
,UNDERSCORE
,DASH
, andNONE
. Default isSLASH
. - Date
Partition boolEnabled - Partition S3 bucket folders based on transaction commit dates. Default is
false
. - Date
Partition stringSequence - Date format to use during folder partitioning. Use this parameter when
date_partition_enabled
is set to true. Valid values areYYYYMMDD
,YYYYMMDDHH
,YYYYMM
,MMYYYYDD
, andDDMMYYYY
. Default isYYYYMMDD
. - Dict
Page intSize Limit - Maximum size in bytes of an encoded dictionary page of a column. Default is
1048576
(1 MiB). - Enable
Statistics bool - Whether to enable statistics for Parquet pages and row groups. Default is
true
. - Encoding
Type string - Type of encoding to use. Value values are
rle_dictionary
,plain
, andplain_dictionary
. Default isrle_dictionary
. - Encryption
Mode string - Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are
SSE_S3
andSSE_KMS
. Default isSSE_S3
. - External
Table stringDefinition - JSON document that describes how AWS DMS should interpret the data.
- Glue
Catalog boolGeneration - Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is
false
. - Ignore
Header intRows - When this value is set to
1
, DMS ignores the first row header in a .csv file. Default is0
. - Include
Op boolFor Full Load - Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is
false
. - Max
File intSize - Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from
1
to1048576
. Default is1048576
(1 GB). - Parquet
Timestamp boolIn Millisecond - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is
false
. - Parquet
Version string - Version of the .parquet file format. Default is
parquet-1-0
. Valid values areparquet-1-0
andparquet-2-0
. - Preserve
Transactions bool - Whether DMS saves the transaction order for a CDC load on the S3 target specified by
cdc_path
. Default isfalse
. - Rfc4180 bool
- For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is
true
. - Row
Group intLength - Number of rows in a row group. Default is
10000
. - Server
Side stringEncryption Kms Key Id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - Service
Access stringRole Arn - ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
- Timestamp
Column stringName - Column to add with timestamp information to the endpoint data for an Amazon S3 target.
- Use
Csv boolNo Sup Value - Whether to use
csv_no_sup_value
for columns not included in the supplemental log. - Use
Task boolStart Time For Full Load Timestamp - When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is
false
.
- add
Column BooleanName - Whether to add column name information to the .csv output file. Default is
false
. - bucket
Folder String - S3 object prefix.
- bucket
Name String - S3 bucket name.
- canned
Acl StringFor Objects - Predefined (canned) access control list for objects created in an S3 bucket. Valid values include
none
,private
,public-read
,public-read-write
,authenticated-read
,aws-exec-read
,bucket-owner-read
, andbucket-owner-full-control
. Default isnone
. - cdc
Inserts BooleanAnd Updates - Whether to write insert and update operations to .csv or .parquet output files. Default is
false
. - cdc
Inserts BooleanOnly - Whether to write insert operations to .csv or .parquet output files. Default is
false
. - cdc
Max IntegerBatch Interval - Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is
60
. - cdc
Min IntegerFile Size - Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is
32000
. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. - cdc
Path String - Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If
cdc_path
is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. - compression
Type String - Set to compress target files. Default is
NONE
. Valid values areGZIP
andNONE
. - csv
Delimiter String - Delimiter used to separate columns in the source files. Default is
,
. - csv
No StringSup Value - String to use for all columns not included in the supplemental log.
- csv
Null StringValue - String to as null when writing to the target.
- csv
Row StringDelimiter - Delimiter used to separate rows in the source files. Default is
\n
. - data
Format String - Output format for the files that AWS DMS uses to create S3 objects. Valid values are
csv
andparquet
. Default iscsv
. - data
Page IntegerSize - Size of one data page in bytes. Default is
1048576
(1 MiB). - date
Partition StringDelimiter - Date separating delimiter to use during folder partitioning. Valid values are
SLASH
,UNDERSCORE
,DASH
, andNONE
. Default isSLASH
. - date
Partition BooleanEnabled - Partition S3 bucket folders based on transaction commit dates. Default is
false
. - date
Partition StringSequence - Date format to use during folder partitioning. Use this parameter when
date_partition_enabled
is set to true. Valid values areYYYYMMDD
,YYYYMMDDHH
,YYYYMM
,MMYYYYDD
, andDDMMYYYY
. Default isYYYYMMDD
. - dict
Page IntegerSize Limit - Maximum size in bytes of an encoded dictionary page of a column. Default is
1048576
(1 MiB). - enable
Statistics Boolean - Whether to enable statistics for Parquet pages and row groups. Default is
true
. - encoding
Type String - Type of encoding to use. Value values are
rle_dictionary
,plain
, andplain_dictionary
. Default isrle_dictionary
. - encryption
Mode String - Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are
SSE_S3
andSSE_KMS
. Default isSSE_S3
. - external
Table StringDefinition - JSON document that describes how AWS DMS should interpret the data.
- glue
Catalog BooleanGeneration - Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is
false
. - ignore
Header IntegerRows - When this value is set to
1
, DMS ignores the first row header in a .csv file. Default is0
. - include
Op BooleanFor Full Load - Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is
false
. - max
File IntegerSize - Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from
1
to1048576
. Default is1048576
(1 GB). - parquet
Timestamp BooleanIn Millisecond - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is
false
. - parquet
Version String - Version of the .parquet file format. Default is
parquet-1-0
. Valid values areparquet-1-0
andparquet-2-0
. - preserve
Transactions Boolean - Whether DMS saves the transaction order for a CDC load on the S3 target specified by
cdc_path
. Default isfalse
. - rfc4180 Boolean
- For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is
true
. - row
Group IntegerLength - Number of rows in a row group. Default is
10000
. - server
Side StringEncryption Kms Key Id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - service
Access StringRole Arn - ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
- timestamp
Column StringName - Column to add with timestamp information to the endpoint data for an Amazon S3 target.
- use
Csv BooleanNo Sup Value - Whether to use
csv_no_sup_value
for columns not included in the supplemental log. - use
Task BooleanStart Time For Full Load Timestamp - When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is
false
.
- add
Column booleanName - Whether to add column name information to the .csv output file. Default is
false
. - bucket
Folder string - S3 object prefix.
- bucket
Name string - S3 bucket name.
- canned
Acl stringFor Objects - Predefined (canned) access control list for objects created in an S3 bucket. Valid values include
none
,private
,public-read
,public-read-write
,authenticated-read
,aws-exec-read
,bucket-owner-read
, andbucket-owner-full-control
. Default isnone
. - cdc
Inserts booleanAnd Updates - Whether to write insert and update operations to .csv or .parquet output files. Default is
false
. - cdc
Inserts booleanOnly - Whether to write insert operations to .csv or .parquet output files. Default is
false
. - cdc
Max numberBatch Interval - Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is
60
. - cdc
Min numberFile Size - Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is
32000
. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. - cdc
Path string - Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If
cdc_path
is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. - compression
Type string - Set to compress target files. Default is
NONE
. Valid values areGZIP
andNONE
. - csv
Delimiter string - Delimiter used to separate columns in the source files. Default is
,
. - csv
No stringSup Value - String to use for all columns not included in the supplemental log.
- csv
Null stringValue - String to as null when writing to the target.
- csv
Row stringDelimiter - Delimiter used to separate rows in the source files. Default is
\n
. - data
Format string - Output format for the files that AWS DMS uses to create S3 objects. Valid values are
csv
andparquet
. Default iscsv
. - data
Page numberSize - Size of one data page in bytes. Default is
1048576
(1 MiB). - date
Partition stringDelimiter - Date separating delimiter to use during folder partitioning. Valid values are
SLASH
,UNDERSCORE
,DASH
, andNONE
. Default isSLASH
. - date
Partition booleanEnabled - Partition S3 bucket folders based on transaction commit dates. Default is
false
. - date
Partition stringSequence - Date format to use during folder partitioning. Use this parameter when
date_partition_enabled
is set to true. Valid values areYYYYMMDD
,YYYYMMDDHH
,YYYYMM
,MMYYYYDD
, andDDMMYYYY
. Default isYYYYMMDD
. - dict
Page numberSize Limit - Maximum size in bytes of an encoded dictionary page of a column. Default is
1048576
(1 MiB). - enable
Statistics boolean - Whether to enable statistics for Parquet pages and row groups. Default is
true
. - encoding
Type string - Type of encoding to use. Value values are
rle_dictionary
,plain
, andplain_dictionary
. Default isrle_dictionary
. - encryption
Mode string - Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are
SSE_S3
andSSE_KMS
. Default isSSE_S3
. - external
Table stringDefinition - JSON document that describes how AWS DMS should interpret the data.
- glue
Catalog booleanGeneration - Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is
false
. - ignore
Header numberRows - When this value is set to
1
, DMS ignores the first row header in a .csv file. Default is0
. - include
Op booleanFor Full Load - Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is
false
. - max
File numberSize - Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from
1
to1048576
. Default is1048576
(1 GB). - parquet
Timestamp booleanIn Millisecond - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is
false
. - parquet
Version string - Version of the .parquet file format. Default is
parquet-1-0
. Valid values areparquet-1-0
andparquet-2-0
. - preserve
Transactions boolean - Whether DMS saves the transaction order for a CDC load on the S3 target specified by
cdc_path
. Default isfalse
. - rfc4180 boolean
- For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is
true
. - row
Group numberLength - Number of rows in a row group. Default is
10000
. - server
Side stringEncryption Kms Key Id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - service
Access stringRole Arn - ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
- timestamp
Column stringName - Column to add with timestamp information to the endpoint data for an Amazon S3 target.
- use
Csv booleanNo Sup Value - Whether to use
csv_no_sup_value
for columns not included in the supplemental log. - use
Task booleanStart Time For Full Load Timestamp - When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is
false
.
- add_
column_ boolname - Whether to add column name information to the .csv output file. Default is
false
. - bucket_
folder str - S3 object prefix.
- bucket_
name str - S3 bucket name.
- canned_
acl_ strfor_ objects - Predefined (canned) access control list for objects created in an S3 bucket. Valid values include
none
,private
,public-read
,public-read-write
,authenticated-read
,aws-exec-read
,bucket-owner-read
, andbucket-owner-full-control
. Default isnone
. - cdc_
inserts_ booland_ updates - Whether to write insert and update operations to .csv or .parquet output files. Default is
false
. - cdc_
inserts_ boolonly - Whether to write insert operations to .csv or .parquet output files. Default is
false
. - cdc_
max_ intbatch_ interval - Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is
60
. - cdc_
min_ intfile_ size - Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is
32000
. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. - cdc_
path str - Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If
cdc_path
is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. - compression_
type str - Set to compress target files. Default is
NONE
. Valid values areGZIP
andNONE
. - csv_
delimiter str - Delimiter used to separate columns in the source files. Default is
,
. - csv_
no_ strsup_ value - String to use for all columns not included in the supplemental log.
- csv_
null_ strvalue - String to as null when writing to the target.
- csv_
row_ strdelimiter - Delimiter used to separate rows in the source files. Default is
\n
. - data_
format str - Output format for the files that AWS DMS uses to create S3 objects. Valid values are
csv
andparquet
. Default iscsv
. - data_
page_ intsize - Size of one data page in bytes. Default is
1048576
(1 MiB). - date_
partition_ strdelimiter - Date separating delimiter to use during folder partitioning. Valid values are
SLASH
,UNDERSCORE
,DASH
, andNONE
. Default isSLASH
. - date_
partition_ boolenabled - Partition S3 bucket folders based on transaction commit dates. Default is
false
. - date_
partition_ strsequence - Date format to use during folder partitioning. Use this parameter when
date_partition_enabled
is set to true. Valid values areYYYYMMDD
,YYYYMMDDHH
,YYYYMM
,MMYYYYDD
, andDDMMYYYY
. Default isYYYYMMDD
. - dict_
page_ intsize_ limit - Maximum size in bytes of an encoded dictionary page of a column. Default is
1048576
(1 MiB). - enable_
statistics bool - Whether to enable statistics for Parquet pages and row groups. Default is
true
. - encoding_
type str - Type of encoding to use. Value values are
rle_dictionary
,plain
, andplain_dictionary
. Default isrle_dictionary
. - encryption_
mode str - Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are
SSE_S3
andSSE_KMS
. Default isSSE_S3
. - external_
table_ strdefinition - JSON document that describes how AWS DMS should interpret the data.
- glue_
catalog_ boolgeneration - Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is
false
. - ignore_
header_ introws - When this value is set to
1
, DMS ignores the first row header in a .csv file. Default is0
. - include_
op_ boolfor_ full_ load - Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is
false
. - max_
file_ intsize - Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from
1
to1048576
. Default is1048576
(1 GB). - parquet_
timestamp_ boolin_ millisecond - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is
false
. - parquet_
version str - Version of the .parquet file format. Default is
parquet-1-0
. Valid values areparquet-1-0
andparquet-2-0
. - preserve_
transactions bool - Whether DMS saves the transaction order for a CDC load on the S3 target specified by
cdc_path
. Default isfalse
. - rfc4180 bool
- For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is
true
. - row_
group_ intlength - Number of rows in a row group. Default is
10000
. - server_
side_ strencryption_ kms_ key_ id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - service_
access_ strrole_ arn - ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
- timestamp_
column_ strname - Column to add with timestamp information to the endpoint data for an Amazon S3 target.
- use_
csv_ boolno_ sup_ value - Whether to use
csv_no_sup_value
for columns not included in the supplemental log. - use_
task_ boolstart_ time_ for_ full_ load_ timestamp - When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is
false
.
- add
Column BooleanName - Whether to add column name information to the .csv output file. Default is
false
. - bucket
Folder String - S3 object prefix.
- bucket
Name String - S3 bucket name.
- canned
Acl StringFor Objects - Predefined (canned) access control list for objects created in an S3 bucket. Valid values include
none
,private
,public-read
,public-read-write
,authenticated-read
,aws-exec-read
,bucket-owner-read
, andbucket-owner-full-control
. Default isnone
. - cdc
Inserts BooleanAnd Updates - Whether to write insert and update operations to .csv or .parquet output files. Default is
false
. - cdc
Inserts BooleanOnly - Whether to write insert operations to .csv or .parquet output files. Default is
false
. - cdc
Max NumberBatch Interval - Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is
60
. - cdc
Min NumberFile Size - Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is
32000
. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. - cdc
Path String - Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If
cdc_path
is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. - compression
Type String - Set to compress target files. Default is
NONE
. Valid values areGZIP
andNONE
. - csv
Delimiter String - Delimiter used to separate columns in the source files. Default is
,
. - csv
No StringSup Value - String to use for all columns not included in the supplemental log.
- csv
Null StringValue - String to as null when writing to the target.
- csv
Row StringDelimiter - Delimiter used to separate rows in the source files. Default is
\n
. - data
Format String - Output format for the files that AWS DMS uses to create S3 objects. Valid values are
csv
andparquet
. Default iscsv
. - data
Page NumberSize - Size of one data page in bytes. Default is
1048576
(1 MiB). - date
Partition StringDelimiter - Date separating delimiter to use during folder partitioning. Valid values are
SLASH
,UNDERSCORE
,DASH
, andNONE
. Default isSLASH
. - date
Partition BooleanEnabled - Partition S3 bucket folders based on transaction commit dates. Default is
false
. - date
Partition StringSequence - Date format to use during folder partitioning. Use this parameter when
date_partition_enabled
is set to true. Valid values areYYYYMMDD
,YYYYMMDDHH
,YYYYMM
,MMYYYYDD
, andDDMMYYYY
. Default isYYYYMMDD
. - dict
Page NumberSize Limit - Maximum size in bytes of an encoded dictionary page of a column. Default is
1048576
(1 MiB). - enable
Statistics Boolean - Whether to enable statistics for Parquet pages and row groups. Default is
true
. - encoding
Type String - Type of encoding to use. Value values are
rle_dictionary
,plain
, andplain_dictionary
. Default isrle_dictionary
. - encryption
Mode String - Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are
SSE_S3
andSSE_KMS
. Default isSSE_S3
. - external
Table StringDefinition - JSON document that describes how AWS DMS should interpret the data.
- glue
Catalog BooleanGeneration - Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is
false
. - ignore
Header NumberRows - When this value is set to
1
, DMS ignores the first row header in a .csv file. Default is0
. - include
Op BooleanFor Full Load - Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is
false
. - max
File NumberSize - Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from
1
to1048576
. Default is1048576
(1 GB). - parquet
Timestamp BooleanIn Millisecond - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is
false
. - parquet
Version String - Version of the .parquet file format. Default is
parquet-1-0
. Valid values areparquet-1-0
andparquet-2-0
. - preserve
Transactions Boolean - Whether DMS saves the transaction order for a CDC load on the S3 target specified by
cdc_path
. Default isfalse
. - rfc4180 Boolean
- For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is
true
. - row
Group NumberLength - Number of rows in a row group. Default is
10000
. - server
Side StringEncryption Kms Key Id - ARN or Id of KMS Key to use when
encryption_mode
isSSE_KMS
. - service
Access StringRole Arn - ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
- timestamp
Column StringName - Column to add with timestamp information to the endpoint data for an Amazon S3 target.
- use
Csv BooleanNo Sup Value - Whether to use
csv_no_sup_value
for columns not included in the supplemental log. - use
Task BooleanStart Time For Full Load Timestamp - When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is
false
.
Import
Using pulumi import
, import endpoints using the endpoint_id
. For example:
$ pulumi import aws:dms/endpoint:Endpoint test test-dms-endpoint-tf
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aws
Terraform Provider.
Try AWS Native preview for resources not in the classic version.