Aiven v6.18.0 published on Thursday, Jun 27, 2024 by Pulumi
aiven.getKafkaConnect
Explore with Pulumi AI
The Kafka Connect data source provides information about the existing Aiven Kafka Connect service.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as aiven from "@pulumi/aiven";
const kc1 = aiven.getKafkaConnect({
project: pr1.project,
serviceName: "my-kc1",
});
import pulumi
import pulumi_aiven as aiven
kc1 = aiven.get_kafka_connect(project=pr1["project"],
service_name="my-kc1")
package main
import (
"github.com/pulumi/pulumi-aiven/sdk/v6/go/aiven"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := aiven.LookupKafkaConnect(ctx, &aiven.LookupKafkaConnectArgs{
Project: pr1.Project,
ServiceName: "my-kc1",
}, nil)
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aiven = Pulumi.Aiven;
return await Deployment.RunAsync(() =>
{
var kc1 = Aiven.GetKafkaConnect.Invoke(new()
{
Project = pr1.Project,
ServiceName = "my-kc1",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aiven.AivenFunctions;
import com.pulumi.aiven.inputs.GetKafkaConnectArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var kc1 = AivenFunctions.getKafkaConnect(GetKafkaConnectArgs.builder()
.project(pr1.project())
.serviceName("my-kc1")
.build());
}
}
variables:
kc1:
fn::invoke:
Function: aiven:getKafkaConnect
Arguments:
project: ${pr1.project}
serviceName: my-kc1
Using getKafkaConnect
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getKafkaConnect(args: GetKafkaConnectArgs, opts?: InvokeOptions): Promise<GetKafkaConnectResult>
function getKafkaConnectOutput(args: GetKafkaConnectOutputArgs, opts?: InvokeOptions): Output<GetKafkaConnectResult>
def get_kafka_connect(project: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[InvokeOptions] = None) -> GetKafkaConnectResult
def get_kafka_connect_output(project: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[InvokeOptions] = None) -> Output[GetKafkaConnectResult]
func LookupKafkaConnect(ctx *Context, args *LookupKafkaConnectArgs, opts ...InvokeOption) (*LookupKafkaConnectResult, error)
func LookupKafkaConnectOutput(ctx *Context, args *LookupKafkaConnectOutputArgs, opts ...InvokeOption) LookupKafkaConnectResultOutput
> Note: This function is named LookupKafkaConnect
in the Go SDK.
public static class GetKafkaConnect
{
public static Task<GetKafkaConnectResult> InvokeAsync(GetKafkaConnectArgs args, InvokeOptions? opts = null)
public static Output<GetKafkaConnectResult> Invoke(GetKafkaConnectInvokeArgs args, InvokeOptions? opts = null)
}
public static CompletableFuture<GetKafkaConnectResult> getKafkaConnect(GetKafkaConnectArgs args, InvokeOptions options)
// Output-based functions aren't available in Java yet
fn::invoke:
function: aiven:index/getKafkaConnect:getKafkaConnect
arguments:
# arguments dictionary
The following arguments are supported:
- Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- Service
Name string - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- Service
Name string - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- service
Name String - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- service
Name string - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- project str
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- service_
name str - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- service
Name String - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
getKafkaConnect Result
The following output properties are available:
- Additional
Disk stringSpace - Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- Cloud
Name string - Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS. - Components
List<Get
Kafka Connect Component> - Service component information objects
- Disk
Space string - Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- Disk
Space stringCap - The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- Disk
Space stringDefault - The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- Disk
Space stringStep - The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size. - Disk
Space stringUsed - Disk space that service is currently using
- Id string
- The provider-assigned unique ID for this managed resource.
- Kafka
Connect List<GetUser Configs Kafka Connect Kafka Connect User Config> - KafkaConnect user configurable settings
- Maintenance
Window stringDow - Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- Maintenance
Window stringTime - Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page. - Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- Project
Vpc stringId - Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- Service
Host string - The hostname of the service.
- Service
Integrations List<GetKafka Connect Service Integration> - Service integrations to specify when creating a service. Not applied after initial service creation
- Service
Name string - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Service
Password string - Password used for connecting to the service, if applicable
- Service
Port int - The port of the service
- Service
Type string - Aiven internal service type code
- Service
Uri string - URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- Service
Username string - Username used for connecting to the service, if applicable
- State string
- Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- Static
Ips List<string> - Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- List<Get
Kafka Connect Tag> - Tags are key-value pairs that allow you to categorize services.
- Tech
Emails List<GetKafka Connect Tech Email> - The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- Termination
Protection bool - Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- Additional
Disk stringSpace - Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- Cloud
Name string - Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS. - Components
[]Get
Kafka Connect Component - Service component information objects
- Disk
Space string - Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- Disk
Space stringCap - The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- Disk
Space stringDefault - The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- Disk
Space stringStep - The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size. - Disk
Space stringUsed - Disk space that service is currently using
- Id string
- The provider-assigned unique ID for this managed resource.
- Kafka
Connect []GetUser Configs Kafka Connect Kafka Connect User Config - KafkaConnect user configurable settings
- Maintenance
Window stringDow - Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- Maintenance
Window stringTime - Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page. - Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- Project
Vpc stringId - Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- Service
Host string - The hostname of the service.
- Service
Integrations []GetKafka Connect Service Integration - Service integrations to specify when creating a service. Not applied after initial service creation
- Service
Name string - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Service
Password string - Password used for connecting to the service, if applicable
- Service
Port int - The port of the service
- Service
Type string - Aiven internal service type code
- Service
Uri string - URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- Service
Username string - Username used for connecting to the service, if applicable
- State string
- Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- Static
Ips []string - Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- []Get
Kafka Connect Tag - Tags are key-value pairs that allow you to categorize services.
- Tech
Emails []GetKafka Connect Tech Email - The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- Termination
Protection bool - Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additional
Disk StringSpace - Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud
Name String - Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS. - components
List<Get
Kafka Connect Component> - Service component information objects
- disk
Space String - Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- disk
Space StringCap - The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk
Space StringDefault - The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk
Space StringStep - The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size. - disk
Space StringUsed - Disk space that service is currently using
- id String
- The provider-assigned unique ID for this managed resource.
- kafka
Connect List<GetUser Configs Kafka Connect Kafka Connect User Config> - KafkaConnect user configurable settings
- maintenance
Window StringDow - Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance
Window StringTime - Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan String
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page. - project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- project
Vpc StringId - Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Host String - The hostname of the service.
- service
Integrations List<GetKafka Connect Service Integration> - Service integrations to specify when creating a service. Not applied after initial service creation
- service
Name String - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service
Password String - Password used for connecting to the service, if applicable
- service
Port Integer - The port of the service
- service
Type String - Aiven internal service type code
- service
Uri String - URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service
Username String - Username used for connecting to the service, if applicable
- state String
- Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- static
Ips List<String> - Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- List<Get
Kafka Connect Tag> - Tags are key-value pairs that allow you to categorize services.
- tech
Emails List<GetKafka Connect Tech Email> - The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- termination
Protection Boolean - Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additional
Disk stringSpace - Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud
Name string - Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS. - components
Get
Kafka Connect Component[] - Service component information objects
- disk
Space string - Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- disk
Space stringCap - The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk
Space stringDefault - The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk
Space stringStep - The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size. - disk
Space stringUsed - Disk space that service is currently using
- id string
- The provider-assigned unique ID for this managed resource.
- kafka
Connect GetUser Configs Kafka Connect Kafka Connect User Config[] - KafkaConnect user configurable settings
- maintenance
Window stringDow - Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance
Window stringTime - Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page. - project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- project
Vpc stringId - Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Host string - The hostname of the service.
- service
Integrations GetKafka Connect Service Integration[] - Service integrations to specify when creating a service. Not applied after initial service creation
- service
Name string - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service
Password string - Password used for connecting to the service, if applicable
- service
Port number - The port of the service
- service
Type string - Aiven internal service type code
- service
Uri string - URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service
Username string - Username used for connecting to the service, if applicable
- state string
- Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- static
Ips string[] - Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- Get
Kafka Connect Tag[] - Tags are key-value pairs that allow you to categorize services.
- tech
Emails GetKafka Connect Tech Email[] - The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- termination
Protection boolean - Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additional_
disk_ strspace - Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud_
name str - Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS. - components
Sequence[Get
Kafka Connect Component] - Service component information objects
- disk_
space str - Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- disk_
space_ strcap - The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk_
space_ strdefault - The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk_
space_ strstep - The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size. - disk_
space_ strused - Disk space that service is currently using
- id str
- The provider-assigned unique ID for this managed resource.
- kafka_
connect_ Sequence[Getuser_ configs Kafka Connect Kafka Connect User Config] - KafkaConnect user configurable settings
- maintenance_
window_ strdow - Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance_
window_ strtime - Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan str
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page. - project str
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- project_
vpc_ strid - Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service_
host str - The hostname of the service.
- service_
integrations Sequence[GetKafka Connect Service Integration] - Service integrations to specify when creating a service. Not applied after initial service creation
- service_
name str - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service_
password str - Password used for connecting to the service, if applicable
- service_
port int - The port of the service
- service_
type str - Aiven internal service type code
- service_
uri str - URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service_
username str - Username used for connecting to the service, if applicable
- state str
- Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- static_
ips Sequence[str] - Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- Sequence[Get
Kafka Connect Tag] - Tags are key-value pairs that allow you to categorize services.
- tech_
emails Sequence[GetKafka Connect Tech Email] - The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- termination_
protection bool - Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additional
Disk StringSpace - Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud
Name String - Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS. - components List<Property Map>
- Service component information objects
- disk
Space String - Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- disk
Space StringCap - The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk
Space StringDefault - The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk
Space StringStep - The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size. - disk
Space StringUsed - Disk space that service is currently using
- id String
- The provider-assigned unique ID for this managed resource.
- kafka
Connect List<Property Map>User Configs - KafkaConnect user configurable settings
- maintenance
Window StringDow - Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance
Window StringTime - Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan String
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page. - project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- project
Vpc StringId - Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Host String - The hostname of the service.
- service
Integrations List<Property Map> - Service integrations to specify when creating a service. Not applied after initial service creation
- service
Name String - Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service
Password String - Password used for connecting to the service, if applicable
- service
Port Number - The port of the service
- service
Type String - Aiven internal service type code
- service
Uri String - URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service
Username String - Username used for connecting to the service, if applicable
- state String
- Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- static
Ips List<String> - Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- List<Property Map>
- Tags are key-value pairs that allow you to categorize services.
- tech
Emails List<Property Map> - The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- termination
Protection Boolean - Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
Supporting Types
GetKafkaConnectComponent
- Component string
- Service component name
- Connection
Uri string - Connection info for connecting to the service component. This is a combination of host and port.
- Host string
- Host name for connecting to the service component
- Kafka
Authentication stringMethod - Kafka authentication method. This is a value specific to the 'kafka' service component
- Port int
- Port number for connecting to the service component
- Route string
- Network access route
- Ssl bool
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- Usage string
- DNS usage name
- Component string
- Service component name
- Connection
Uri string - Connection info for connecting to the service component. This is a combination of host and port.
- Host string
- Host name for connecting to the service component
- Kafka
Authentication stringMethod - Kafka authentication method. This is a value specific to the 'kafka' service component
- Port int
- Port number for connecting to the service component
- Route string
- Network access route
- Ssl bool
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- Usage string
- DNS usage name
- component String
- Service component name
- connection
Uri String - Connection info for connecting to the service component. This is a combination of host and port.
- host String
- Host name for connecting to the service component
- kafka
Authentication StringMethod - Kafka authentication method. This is a value specific to the 'kafka' service component
- port Integer
- Port number for connecting to the service component
- route String
- Network access route
- ssl Boolean
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage String
- DNS usage name
- component string
- Service component name
- connection
Uri string - Connection info for connecting to the service component. This is a combination of host and port.
- host string
- Host name for connecting to the service component
- kafka
Authentication stringMethod - Kafka authentication method. This is a value specific to the 'kafka' service component
- port number
- Port number for connecting to the service component
- route string
- Network access route
- ssl boolean
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage string
- DNS usage name
- component str
- Service component name
- connection_
uri str - Connection info for connecting to the service component. This is a combination of host and port.
- host str
- Host name for connecting to the service component
- kafka_
authentication_ strmethod - Kafka authentication method. This is a value specific to the 'kafka' service component
- port int
- Port number for connecting to the service component
- route str
- Network access route
- ssl bool
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage str
- DNS usage name
- component String
- Service component name
- connection
Uri String - Connection info for connecting to the service component. This is a combination of host and port.
- host String
- Host name for connecting to the service component
- kafka
Authentication StringMethod - Kafka authentication method. This is a value specific to the 'kafka' service component
- port Number
- Port number for connecting to the service component
- route String
- Network access route
- ssl Boolean
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage String
- DNS usage name
GetKafkaConnectKafkaConnectUserConfig
- Additional
Backup stringRegions - Additional Cloud Regions for Backup Replication.
- Ip
Filter List<GetObjects Kafka Connect Kafka Connect User Config Ip Filter Object> - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
- Ip
Filter List<string>Strings - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - Ip
Filters List<string> - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - Kafka
Connect GetKafka Connect Kafka Connect User Config Kafka Connect - Kafka Connect configuration values
- Private
Access GetKafka Connect Kafka Connect User Config Private Access - Allow access to selected service ports from private networks
- Privatelink
Access GetKafka Connect Kafka Connect User Config Privatelink Access - Allow access to selected service components through Privatelink
- Public
Access GetKafka Connect Kafka Connect User Config Public Access - Allow access to selected service ports from the public Internet
- Secret
Providers List<GetKafka Connect Kafka Connect User Config Secret Provider> - Service
Log bool - Store logs for the service so that they are available in the HTTP API and console.
- Static
Ips bool - Use static public IP addresses.
- Additional
Backup stringRegions - Additional Cloud Regions for Backup Replication.
- Ip
Filter []GetObjects Kafka Connect Kafka Connect User Config Ip Filter Object - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
- Ip
Filter []stringStrings - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - Ip
Filters []string - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - Kafka
Connect GetKafka Connect Kafka Connect User Config Kafka Connect - Kafka Connect configuration values
- Private
Access GetKafka Connect Kafka Connect User Config Private Access - Allow access to selected service ports from private networks
- Privatelink
Access GetKafka Connect Kafka Connect User Config Privatelink Access - Allow access to selected service components through Privatelink
- Public
Access GetKafka Connect Kafka Connect User Config Public Access - Allow access to selected service ports from the public Internet
- Secret
Providers []GetKafka Connect Kafka Connect User Config Secret Provider - Service
Log bool - Store logs for the service so that they are available in the HTTP API and console.
- Static
Ips bool - Use static public IP addresses.
- additional
Backup StringRegions - Additional Cloud Regions for Backup Replication.
- ip
Filter List<GetObjects Kafka Connect Kafka Connect User Config Ip Filter Object> - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
- ip
Filter List<String>Strings - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - ip
Filters List<String> - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - kafka
Connect GetKafka Connect Kafka Connect User Config Kafka Connect - Kafka Connect configuration values
- private
Access GetKafka Connect Kafka Connect User Config Private Access - Allow access to selected service ports from private networks
- privatelink
Access GetKafka Connect Kafka Connect User Config Privatelink Access - Allow access to selected service components through Privatelink
- public
Access GetKafka Connect Kafka Connect User Config Public Access - Allow access to selected service ports from the public Internet
- secret
Providers List<GetKafka Connect Kafka Connect User Config Secret Provider> - service
Log Boolean - Store logs for the service so that they are available in the HTTP API and console.
- static
Ips Boolean - Use static public IP addresses.
- additional
Backup stringRegions - Additional Cloud Regions for Backup Replication.
- ip
Filter GetObjects Kafka Connect Kafka Connect User Config Ip Filter Object[] - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
- ip
Filter string[]Strings - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - ip
Filters string[] - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - kafka
Connect GetKafka Connect Kafka Connect User Config Kafka Connect - Kafka Connect configuration values
- private
Access GetKafka Connect Kafka Connect User Config Private Access - Allow access to selected service ports from private networks
- privatelink
Access GetKafka Connect Kafka Connect User Config Privatelink Access - Allow access to selected service components through Privatelink
- public
Access GetKafka Connect Kafka Connect User Config Public Access - Allow access to selected service ports from the public Internet
- secret
Providers GetKafka Connect Kafka Connect User Config Secret Provider[] - service
Log boolean - Store logs for the service so that they are available in the HTTP API and console.
- static
Ips boolean - Use static public IP addresses.
- additional_
backup_ strregions - Additional Cloud Regions for Backup Replication.
- ip_
filter_ Sequence[Getobjects Kafka Connect Kafka Connect User Config Ip Filter Object] - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
- ip_
filter_ Sequence[str]strings - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - ip_
filters Sequence[str] - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - kafka_
connect GetKafka Connect Kafka Connect User Config Kafka Connect - Kafka Connect configuration values
- private_
access GetKafka Connect Kafka Connect User Config Private Access - Allow access to selected service ports from private networks
- privatelink_
access GetKafka Connect Kafka Connect User Config Privatelink Access - Allow access to selected service components through Privatelink
- public_
access GetKafka Connect Kafka Connect User Config Public Access - Allow access to selected service ports from the public Internet
- secret_
providers Sequence[GetKafka Connect Kafka Connect User Config Secret Provider] - service_
log bool - Store logs for the service so that they are available in the HTTP API and console.
- static_
ips bool - Use static public IP addresses.
- additional
Backup StringRegions - Additional Cloud Regions for Backup Replication.
- ip
Filter List<Property Map>Objects - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
- ip
Filter List<String>Strings - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - ip
Filters List<String> - Allow incoming connections from CIDR address block, e.g.
10.20.0.0/16
. - kafka
Connect Property Map - Kafka Connect configuration values
- private
Access Property Map - Allow access to selected service ports from private networks
- privatelink
Access Property Map - Allow access to selected service components through Privatelink
- public
Access Property Map - Allow access to selected service ports from the public Internet
- secret
Providers List<Property Map> - service
Log Boolean - Store logs for the service so that they are available in the HTTP API and console.
- static
Ips Boolean - Use static public IP addresses.
GetKafkaConnectKafkaConnectUserConfigIpFilterObject
- Network string
- CIDR address block. Example:
10.20.0.0/16
. - Description string
- Description for IP filter list entry. Example:
Production service IP range
.
- Network string
- CIDR address block. Example:
10.20.0.0/16
. - Description string
- Description for IP filter list entry. Example:
Production service IP range
.
- network String
- CIDR address block. Example:
10.20.0.0/16
. - description String
- Description for IP filter list entry. Example:
Production service IP range
.
- network string
- CIDR address block. Example:
10.20.0.0/16
. - description string
- Description for IP filter list entry. Example:
Production service IP range
.
- network str
- CIDR address block. Example:
10.20.0.0/16
. - description str
- Description for IP filter list entry. Example:
Production service IP range
.
- network String
- CIDR address block. Example:
10.20.0.0/16
. - description String
- Description for IP filter list entry. Example:
Production service IP range
.
GetKafkaConnectKafkaConnectUserConfigKafkaConnect
- Connector
Client stringConfig Override Policy - Enum:
None
,All
. Defines what client configurations can be overridden by the connector. Default is None. - Consumer
Auto stringOffset Reset - Enum:
earliest
,latest
. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - Consumer
Fetch intMax Bytes - Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example:
52428800
. - Consumer
Isolation stringLevel - Enum:
read_uncommitted
,read_committed
. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - Consumer
Max intPartition Fetch Bytes - Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example:
1048576
. - Consumer
Max intPoll Interval Ms - The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- Consumer
Max intPoll Records - The maximum number of records returned in a single call to poll() (defaults to 500).
- Offset
Flush intInterval Ms - The interval at which to try committing offsets for tasks (defaults to 60000).
- Offset
Flush intTimeout Ms - Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- Producer
Batch intSize - This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will
linger
for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - Producer
Buffer intMemory - The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- Producer
Compression stringType - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - Producer
Linger intMs - This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will
linger
for the specified time waiting for more records to show up. Defaults to 0. - Producer
Max intRequest Size - This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example:
1048576
. - Scheduled
Rebalance intMax Delay Ms - The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- Session
Timeout intMs - The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- Connector
Client stringConfig Override Policy - Enum:
None
,All
. Defines what client configurations can be overridden by the connector. Default is None. - Consumer
Auto stringOffset Reset - Enum:
earliest
,latest
. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - Consumer
Fetch intMax Bytes - Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example:
52428800
. - Consumer
Isolation stringLevel - Enum:
read_uncommitted
,read_committed
. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - Consumer
Max intPartition Fetch Bytes - Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example:
1048576
. - Consumer
Max intPoll Interval Ms - The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- Consumer
Max intPoll Records - The maximum number of records returned in a single call to poll() (defaults to 500).
- Offset
Flush intInterval Ms - The interval at which to try committing offsets for tasks (defaults to 60000).
- Offset
Flush intTimeout Ms - Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- Producer
Batch intSize - This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will
linger
for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - Producer
Buffer intMemory - The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- Producer
Compression stringType - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - Producer
Linger intMs - This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will
linger
for the specified time waiting for more records to show up. Defaults to 0. - Producer
Max intRequest Size - This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example:
1048576
. - Scheduled
Rebalance intMax Delay Ms - The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- Session
Timeout intMs - The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector
Client StringConfig Override Policy - Enum:
None
,All
. Defines what client configurations can be overridden by the connector. Default is None. - consumer
Auto StringOffset Reset - Enum:
earliest
,latest
. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - consumer
Fetch IntegerMax Bytes - Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example:
52428800
. - consumer
Isolation StringLevel - Enum:
read_uncommitted
,read_committed
. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - consumer
Max IntegerPartition Fetch Bytes - Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example:
1048576
. - consumer
Max IntegerPoll Interval Ms - The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer
Max IntegerPoll Records - The maximum number of records returned in a single call to poll() (defaults to 500).
- offset
Flush IntegerInterval Ms - The interval at which to try committing offsets for tasks (defaults to 60000).
- offset
Flush IntegerTimeout Ms - Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer
Batch IntegerSize - This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will
linger
for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - producer
Buffer IntegerMemory - The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producer
Compression StringType - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer
Linger IntegerMs - This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will
linger
for the specified time waiting for more records to show up. Defaults to 0. - producer
Max IntegerRequest Size - This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example:
1048576
. - scheduled
Rebalance IntegerMax Delay Ms - The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- session
Timeout IntegerMs - The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector
Client stringConfig Override Policy - Enum:
None
,All
. Defines what client configurations can be overridden by the connector. Default is None. - consumer
Auto stringOffset Reset - Enum:
earliest
,latest
. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - consumer
Fetch numberMax Bytes - Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example:
52428800
. - consumer
Isolation stringLevel - Enum:
read_uncommitted
,read_committed
. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - consumer
Max numberPartition Fetch Bytes - Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example:
1048576
. - consumer
Max numberPoll Interval Ms - The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer
Max numberPoll Records - The maximum number of records returned in a single call to poll() (defaults to 500).
- offset
Flush numberInterval Ms - The interval at which to try committing offsets for tasks (defaults to 60000).
- offset
Flush numberTimeout Ms - Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer
Batch numberSize - This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will
linger
for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - producer
Buffer numberMemory - The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producer
Compression stringType - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer
Linger numberMs - This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will
linger
for the specified time waiting for more records to show up. Defaults to 0. - producer
Max numberRequest Size - This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example:
1048576
. - scheduled
Rebalance numberMax Delay Ms - The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- session
Timeout numberMs - The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector_
client_ strconfig_ override_ policy - Enum:
None
,All
. Defines what client configurations can be overridden by the connector. Default is None. - consumer_
auto_ stroffset_ reset - Enum:
earliest
,latest
. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - consumer_
fetch_ intmax_ bytes - Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example:
52428800
. - consumer_
isolation_ strlevel - Enum:
read_uncommitted
,read_committed
. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - consumer_
max_ intpartition_ fetch_ bytes - Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example:
1048576
. - consumer_
max_ intpoll_ interval_ ms - The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer_
max_ intpoll_ records - The maximum number of records returned in a single call to poll() (defaults to 500).
- offset_
flush_ intinterval_ ms - The interval at which to try committing offsets for tasks (defaults to 60000).
- offset_
flush_ inttimeout_ ms - Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer_
batch_ intsize - This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will
linger
for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - producer_
buffer_ intmemory - The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producer_
compression_ strtype - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer_
linger_ intms - This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will
linger
for the specified time waiting for more records to show up. Defaults to 0. - producer_
max_ intrequest_ size - This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example:
1048576
. - scheduled_
rebalance_ intmax_ delay_ ms - The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- session_
timeout_ intms - The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector
Client StringConfig Override Policy - Enum:
None
,All
. Defines what client configurations can be overridden by the connector. Default is None. - consumer
Auto StringOffset Reset - Enum:
earliest
,latest
. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - consumer
Fetch NumberMax Bytes - Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example:
52428800
. - consumer
Isolation StringLevel - Enum:
read_uncommitted
,read_committed
. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - consumer
Max NumberPartition Fetch Bytes - Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example:
1048576
. - consumer
Max NumberPoll Interval Ms - The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer
Max NumberPoll Records - The maximum number of records returned in a single call to poll() (defaults to 500).
- offset
Flush NumberInterval Ms - The interval at which to try committing offsets for tasks (defaults to 60000).
- offset
Flush NumberTimeout Ms - Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer
Batch NumberSize - This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will
linger
for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - producer
Buffer NumberMemory - The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producer
Compression StringType - Enum:
gzip
,snappy
,lz4
,zstd
,none
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer
Linger NumberMs - This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will
linger
for the specified time waiting for more records to show up. Defaults to 0. - producer
Max NumberRequest Size - This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example:
1048576
. - scheduled
Rebalance NumberMax Delay Ms - The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- session
Timeout NumberMs - The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
GetKafkaConnectKafkaConnectUserConfigPrivateAccess
- Kafka
Connect bool - Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Prometheus bool
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Kafka
Connect bool - Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Prometheus bool
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka
Connect Boolean - Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus Boolean
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka
Connect boolean - Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus boolean
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka_
connect bool - Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus bool
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka
Connect Boolean - Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus Boolean
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
GetKafkaConnectKafkaConnectUserConfigPrivatelinkAccess
- Jolokia bool
- Enable jolokia.
- Kafka
Connect bool - Enable kafka_connect.
- Prometheus bool
- Enable prometheus.
- Jolokia bool
- Enable jolokia.
- Kafka
Connect bool - Enable kafka_connect.
- Prometheus bool
- Enable prometheus.
- jolokia Boolean
- Enable jolokia.
- kafka
Connect Boolean - Enable kafka_connect.
- prometheus Boolean
- Enable prometheus.
- jolokia boolean
- Enable jolokia.
- kafka
Connect boolean - Enable kafka_connect.
- prometheus boolean
- Enable prometheus.
- jolokia bool
- Enable jolokia.
- kafka_
connect bool - Enable kafka_connect.
- prometheus bool
- Enable prometheus.
- jolokia Boolean
- Enable jolokia.
- kafka
Connect Boolean - Enable kafka_connect.
- prometheus Boolean
- Enable prometheus.
GetKafkaConnectKafkaConnectUserConfigPublicAccess
- Kafka
Connect bool - Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- Prometheus bool
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- Kafka
Connect bool - Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- Prometheus bool
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka
Connect Boolean - Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus Boolean
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka
Connect boolean - Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus boolean
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka_
connect bool - Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus bool
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka
Connect Boolean - Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus Boolean
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
GetKafkaConnectKafkaConnectUserConfigSecretProvider
- Name string
- Name of the secret provider. Used to reference secrets in connector config.
- Aws
Get
Kafka Connect Kafka Connect User Config Secret Provider Aws - AWS config for Secret Provider
- Vault
Get
Kafka Connect Kafka Connect User Config Secret Provider Vault - Vault Config for Secret Provider
- Name string
- Name of the secret provider. Used to reference secrets in connector config.
- Aws
Get
Kafka Connect Kafka Connect User Config Secret Provider Aws - AWS config for Secret Provider
- Vault
Get
Kafka Connect Kafka Connect User Config Secret Provider Vault - Vault Config for Secret Provider
- name String
- Name of the secret provider. Used to reference secrets in connector config.
- aws
Get
Kafka Connect Kafka Connect User Config Secret Provider Aws - AWS config for Secret Provider
- vault
Get
Kafka Connect Kafka Connect User Config Secret Provider Vault - Vault Config for Secret Provider
- name string
- Name of the secret provider. Used to reference secrets in connector config.
- aws
Get
Kafka Connect Kafka Connect User Config Secret Provider Aws - AWS config for Secret Provider
- vault
Get
Kafka Connect Kafka Connect User Config Secret Provider Vault - Vault Config for Secret Provider
- name str
- Name of the secret provider. Used to reference secrets in connector config.
- aws
Get
Kafka Connect Kafka Connect User Config Secret Provider Aws - AWS config for Secret Provider
- vault
Get
Kafka Connect Kafka Connect User Config Secret Provider Vault - Vault Config for Secret Provider
- name String
- Name of the secret provider. Used to reference secrets in connector config.
- aws Property Map
- AWS config for Secret Provider
- vault Property Map
- Vault Config for Secret Provider
GetKafkaConnectKafkaConnectUserConfigSecretProviderAws
- Auth
Method string - Enum:
credentials
. Auth method of the vault secret provider. - Region string
- Region used to lookup secrets with AWS SecretManager.
- Access
Key string - Access key used to authenticate with aws.
- Secret
Key string - Secret key used to authenticate with aws.
- Auth
Method string - Enum:
credentials
. Auth method of the vault secret provider. - Region string
- Region used to lookup secrets with AWS SecretManager.
- Access
Key string - Access key used to authenticate with aws.
- Secret
Key string - Secret key used to authenticate with aws.
- auth
Method String - Enum:
credentials
. Auth method of the vault secret provider. - region String
- Region used to lookup secrets with AWS SecretManager.
- access
Key String - Access key used to authenticate with aws.
- secret
Key String - Secret key used to authenticate with aws.
- auth
Method string - Enum:
credentials
. Auth method of the vault secret provider. - region string
- Region used to lookup secrets with AWS SecretManager.
- access
Key string - Access key used to authenticate with aws.
- secret
Key string - Secret key used to authenticate with aws.
- auth_
method str - Enum:
credentials
. Auth method of the vault secret provider. - region str
- Region used to lookup secrets with AWS SecretManager.
- access_
key str - Access key used to authenticate with aws.
- secret_
key str - Secret key used to authenticate with aws.
- auth
Method String - Enum:
credentials
. Auth method of the vault secret provider. - region String
- Region used to lookup secrets with AWS SecretManager.
- access
Key String - Access key used to authenticate with aws.
- secret
Key String - Secret key used to authenticate with aws.
GetKafkaConnectKafkaConnectUserConfigSecretProviderVault
- Address string
- Address of the Vault server.
- Auth
Method string - Enum:
token
. Auth method of the vault secret provider. - Engine
Version int - Enum:
1
,2
, and newer. KV Secrets Engine version of the Vault server instance. - Token string
- Token used to authenticate with vault and auth method
token
.
- Address string
- Address of the Vault server.
- Auth
Method string - Enum:
token
. Auth method of the vault secret provider. - Engine
Version int - Enum:
1
,2
, and newer. KV Secrets Engine version of the Vault server instance. - Token string
- Token used to authenticate with vault and auth method
token
.
- address String
- Address of the Vault server.
- auth
Method String - Enum:
token
. Auth method of the vault secret provider. - engine
Version Integer - Enum:
1
,2
, and newer. KV Secrets Engine version of the Vault server instance. - token String
- Token used to authenticate with vault and auth method
token
.
- address string
- Address of the Vault server.
- auth
Method string - Enum:
token
. Auth method of the vault secret provider. - engine
Version number - Enum:
1
,2
, and newer. KV Secrets Engine version of the Vault server instance. - token string
- Token used to authenticate with vault and auth method
token
.
- address str
- Address of the Vault server.
- auth_
method str - Enum:
token
. Auth method of the vault secret provider. - engine_
version int - Enum:
1
,2
, and newer. KV Secrets Engine version of the Vault server instance. - token str
- Token used to authenticate with vault and auth method
token
.
- address String
- Address of the Vault server.
- auth
Method String - Enum:
token
. Auth method of the vault secret provider. - engine
Version Number - Enum:
1
,2
, and newer. KV Secrets Engine version of the Vault server instance. - token String
- Token used to authenticate with vault and auth method
token
.
GetKafkaConnectServiceIntegration
- Integration
Type string - Type of the service integration. The only supported value at the moment is
read_replica
- Source
Service stringName - Name of the source service
- Integration
Type string - Type of the service integration. The only supported value at the moment is
read_replica
- Source
Service stringName - Name of the source service
- integration
Type String - Type of the service integration. The only supported value at the moment is
read_replica
- source
Service StringName - Name of the source service
- integration
Type string - Type of the service integration. The only supported value at the moment is
read_replica
- source
Service stringName - Name of the source service
- integration_
type str - Type of the service integration. The only supported value at the moment is
read_replica
- source_
service_ strname - Name of the source service
- integration
Type String - Type of the service integration. The only supported value at the moment is
read_replica
- source
Service StringName - Name of the source service
GetKafkaConnectTag
GetKafkaConnectTechEmail
- Email string
- An email address to contact for technical issues
- Email string
- An email address to contact for technical issues
- email String
- An email address to contact for technical issues
- email string
- An email address to contact for technical issues
- email str
- An email address to contact for technical issues
- email String
- An email address to contact for technical issues
Package Details
- Repository
- Aiven pulumi/pulumi-aiven
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aiven
Terraform Provider.