aiven.Kafka
Explore with Pulumi AI
Creates and manages an Aiven for Apache Kafka® service.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as aiven from "@pulumi/aiven";
const exampleKafka = new aiven.Kafka("example_kafka", {
    project: exampleProject.project,
    cloudName: "google-europe-west1",
    plan: "business-4",
    serviceName: "example-kafka",
    maintenanceWindowDow: "monday",
    maintenanceWindowTime: "10:00:00",
    kafkaUserConfig: {
        kafkaRest: true,
        kafkaConnect: true,
        schemaRegistry: true,
        kafkaVersion: "3.8",
        kafka: {
            groupMaxSessionTimeoutMs: 70000,
            logRetentionBytes: 1000000000,
        },
        publicAccess: {
            kafkaRest: true,
            kafkaConnect: true,
        },
    },
});
import pulumi
import pulumi_aiven as aiven
example_kafka = aiven.Kafka("example_kafka",
    project=example_project["project"],
    cloud_name="google-europe-west1",
    plan="business-4",
    service_name="example-kafka",
    maintenance_window_dow="monday",
    maintenance_window_time="10:00:00",
    kafka_user_config={
        "kafka_rest": True,
        "kafka_connect": True,
        "schema_registry": True,
        "kafka_version": "3.8",
        "kafka": {
            "group_max_session_timeout_ms": 70000,
            "log_retention_bytes": 1000000000,
        },
        "public_access": {
            "kafka_rest": True,
            "kafka_connect": True,
        },
    })
package main
import (
	"github.com/pulumi/pulumi-aiven/sdk/v6/go/aiven"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := aiven.NewKafka(ctx, "example_kafka", &aiven.KafkaArgs{
			Project:               pulumi.Any(exampleProject.Project),
			CloudName:             pulumi.String("google-europe-west1"),
			Plan:                  pulumi.String("business-4"),
			ServiceName:           pulumi.String("example-kafka"),
			MaintenanceWindowDow:  pulumi.String("monday"),
			MaintenanceWindowTime: pulumi.String("10:00:00"),
			KafkaUserConfig: &aiven.KafkaKafkaUserConfigArgs{
				KafkaRest:      pulumi.Bool(true),
				KafkaConnect:   pulumi.Bool(true),
				SchemaRegistry: pulumi.Bool(true),
				KafkaVersion:   pulumi.String("3.8"),
				Kafka: &aiven.KafkaKafkaUserConfigKafkaArgs{
					GroupMaxSessionTimeoutMs: pulumi.Int(70000),
					LogRetentionBytes:        pulumi.Int(1000000000),
				},
				PublicAccess: &aiven.KafkaKafkaUserConfigPublicAccessArgs{
					KafkaRest:    pulumi.Bool(true),
					KafkaConnect: pulumi.Bool(true),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aiven = Pulumi.Aiven;
return await Deployment.RunAsync(() => 
{
    var exampleKafka = new Aiven.Kafka("example_kafka", new()
    {
        Project = exampleProject.Project,
        CloudName = "google-europe-west1",
        Plan = "business-4",
        ServiceName = "example-kafka",
        MaintenanceWindowDow = "monday",
        MaintenanceWindowTime = "10:00:00",
        KafkaUserConfig = new Aiven.Inputs.KafkaKafkaUserConfigArgs
        {
            KafkaRest = true,
            KafkaConnect = true,
            SchemaRegistry = true,
            KafkaVersion = "3.8",
            Kafka = new Aiven.Inputs.KafkaKafkaUserConfigKafkaArgs
            {
                GroupMaxSessionTimeoutMs = 70000,
                LogRetentionBytes = 1000000000,
            },
            PublicAccess = new Aiven.Inputs.KafkaKafkaUserConfigPublicAccessArgs
            {
                KafkaRest = true,
                KafkaConnect = true,
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aiven.Kafka;
import com.pulumi.aiven.KafkaArgs;
import com.pulumi.aiven.inputs.KafkaKafkaUserConfigArgs;
import com.pulumi.aiven.inputs.KafkaKafkaUserConfigKafkaArgs;
import com.pulumi.aiven.inputs.KafkaKafkaUserConfigPublicAccessArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var exampleKafka = new Kafka("exampleKafka", KafkaArgs.builder()
            .project(exampleProject.project())
            .cloudName("google-europe-west1")
            .plan("business-4")
            .serviceName("example-kafka")
            .maintenanceWindowDow("monday")
            .maintenanceWindowTime("10:00:00")
            .kafkaUserConfig(KafkaKafkaUserConfigArgs.builder()
                .kafkaRest(true)
                .kafkaConnect(true)
                .schemaRegistry(true)
                .kafkaVersion("3.8")
                .kafka(KafkaKafkaUserConfigKafkaArgs.builder()
                    .groupMaxSessionTimeoutMs(70000)
                    .logRetentionBytes(1000000000)
                    .build())
                .publicAccess(KafkaKafkaUserConfigPublicAccessArgs.builder()
                    .kafkaRest(true)
                    .kafkaConnect(true)
                    .build())
                .build())
            .build());
    }
}
resources:
  exampleKafka:
    type: aiven:Kafka
    name: example_kafka
    properties:
      project: ${exampleProject.project}
      cloudName: google-europe-west1
      plan: business-4
      serviceName: example-kafka
      maintenanceWindowDow: monday
      maintenanceWindowTime: 10:00:00
      kafkaUserConfig:
        kafkaRest: true
        kafkaConnect: true
        schemaRegistry: true
        kafkaVersion: '3.8'
        kafka:
          groupMaxSessionTimeoutMs: 70000
          logRetentionBytes: 1e+09
        publicAccess:
          kafkaRest: true
          kafkaConnect: true
Create Kafka Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Kafka(name: string, args: KafkaArgs, opts?: CustomResourceOptions);@overload
def Kafka(resource_name: str,
          args: KafkaArgs,
          opts: Optional[ResourceOptions] = None)
@overload
def Kafka(resource_name: str,
          opts: Optional[ResourceOptions] = None,
          plan: Optional[str] = None,
          service_name: Optional[str] = None,
          project: Optional[str] = None,
          maintenance_window_time: Optional[str] = None,
          default_acl: Optional[bool] = None,
          kafkas: Optional[Sequence[KafkaKafkaArgs]] = None,
          karapace: Optional[bool] = None,
          maintenance_window_dow: Optional[str] = None,
          additional_disk_space: Optional[str] = None,
          disk_space: Optional[str] = None,
          kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None,
          project_vpc_id: Optional[str] = None,
          service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None,
          cloud_name: Optional[str] = None,
          static_ips: Optional[Sequence[str]] = None,
          tags: Optional[Sequence[KafkaTagArgs]] = None,
          tech_emails: Optional[Sequence[KafkaTechEmailArgs]] = None,
          termination_protection: Optional[bool] = None)func NewKafka(ctx *Context, name string, args KafkaArgs, opts ...ResourceOption) (*Kafka, error)public Kafka(string name, KafkaArgs args, CustomResourceOptions? opts = null)type: aiven:Kafka
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var kafkaResource = new Aiven.Kafka("kafkaResource", new()
{
    Plan = "string",
    ServiceName = "string",
    Project = "string",
    MaintenanceWindowTime = "string",
    DefaultAcl = false,
    KafkaServer = new[]
    {
        new Aiven.Inputs.KafkaKafkaArgs
        {
            AccessCert = "string",
            AccessKey = "string",
            ConnectUri = "string",
            RestUri = "string",
            SchemaRegistryUri = "string",
            Uris = new[]
            {
                "string",
            },
        },
    },
    MaintenanceWindowDow = "string",
    AdditionalDiskSpace = "string",
    KafkaUserConfig = new Aiven.Inputs.KafkaKafkaUserConfigArgs
    {
        AivenKafkaTopicMessages = false,
        CustomDomain = "string",
        FollowerFetching = new Aiven.Inputs.KafkaKafkaUserConfigFollowerFetchingArgs
        {
            Enabled = false,
        },
        IpFilterObjects = new[]
        {
            new Aiven.Inputs.KafkaKafkaUserConfigIpFilterObjectArgs
            {
                Network = "string",
                Description = "string",
            },
        },
        IpFilterStrings = new[]
        {
            "string",
        },
        Kafka = new Aiven.Inputs.KafkaKafkaUserConfigKafkaArgs
        {
            AutoCreateTopicsEnable = false,
            CompressionType = "string",
            ConnectionsMaxIdleMs = 0,
            DefaultReplicationFactor = 0,
            GroupInitialRebalanceDelayMs = 0,
            GroupMaxSessionTimeoutMs = 0,
            GroupMinSessionTimeoutMs = 0,
            LogCleanerDeleteRetentionMs = 0,
            LogCleanerMaxCompactionLagMs = 0,
            LogCleanerMinCleanableRatio = 0,
            LogCleanerMinCompactionLagMs = 0,
            LogCleanupPolicy = "string",
            LogFlushIntervalMessages = 0,
            LogFlushIntervalMs = 0,
            LogIndexIntervalBytes = 0,
            LogIndexSizeMaxBytes = 0,
            LogLocalRetentionBytes = 0,
            LogLocalRetentionMs = 0,
            LogMessageDownconversionEnable = false,
            LogMessageTimestampDifferenceMaxMs = 0,
            LogMessageTimestampType = "string",
            LogPreallocate = false,
            LogRetentionBytes = 0,
            LogRetentionHours = 0,
            LogRetentionMs = 0,
            LogRollJitterMs = 0,
            LogRollMs = 0,
            LogSegmentBytes = 0,
            LogSegmentDeleteDelayMs = 0,
            MaxConnectionsPerIp = 0,
            MaxIncrementalFetchSessionCacheSlots = 0,
            MessageMaxBytes = 0,
            MinInsyncReplicas = 0,
            NumPartitions = 0,
            OffsetsRetentionMinutes = 0,
            ProducerPurgatoryPurgeIntervalRequests = 0,
            ReplicaFetchMaxBytes = 0,
            ReplicaFetchResponseMaxBytes = 0,
            SaslOauthbearerExpectedAudience = "string",
            SaslOauthbearerExpectedIssuer = "string",
            SaslOauthbearerJwksEndpointUrl = "string",
            SaslOauthbearerSubClaimName = "string",
            SocketRequestMaxBytes = 0,
            TransactionPartitionVerificationEnable = false,
            TransactionRemoveExpiredTransactionCleanupIntervalMs = 0,
            TransactionStateLogSegmentBytes = 0,
        },
        KafkaAuthenticationMethods = new Aiven.Inputs.KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs
        {
            Certificate = false,
            Sasl = false,
        },
        KafkaConnect = false,
        KafkaConnectConfig = new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectConfigArgs
        {
            ConnectorClientConfigOverridePolicy = "string",
            ConsumerAutoOffsetReset = "string",
            ConsumerFetchMaxBytes = 0,
            ConsumerIsolationLevel = "string",
            ConsumerMaxPartitionFetchBytes = 0,
            ConsumerMaxPollIntervalMs = 0,
            ConsumerMaxPollRecords = 0,
            OffsetFlushIntervalMs = 0,
            OffsetFlushTimeoutMs = 0,
            ProducerBatchSize = 0,
            ProducerBufferMemory = 0,
            ProducerCompressionType = "string",
            ProducerLingerMs = 0,
            ProducerMaxRequestSize = 0,
            ScheduledRebalanceMaxDelayMs = 0,
            SessionTimeoutMs = 0,
        },
        KafkaConnectSecretProviders = new[]
        {
            new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectSecretProviderArgs
            {
                Name = "string",
                Aws = new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs
                {
                    AuthMethod = "string",
                    Region = "string",
                    AccessKey = "string",
                    SecretKey = "string",
                },
                Vault = new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs
                {
                    Address = "string",
                    AuthMethod = "string",
                    EngineVersion = 0,
                    PrefixPathDepth = 0,
                    Token = "string",
                },
            },
        },
        KafkaRest = false,
        KafkaRestAuthorization = false,
        KafkaRestConfig = new Aiven.Inputs.KafkaKafkaUserConfigKafkaRestConfigArgs
        {
            ConsumerEnableAutoCommit = false,
            ConsumerIdleDisconnectTimeout = 0,
            ConsumerRequestMaxBytes = 0,
            ConsumerRequestTimeoutMs = 0,
            NameStrategy = "string",
            NameStrategyValidation = false,
            ProducerAcks = "string",
            ProducerCompressionType = "string",
            ProducerLingerMs = 0,
            ProducerMaxRequestSize = 0,
            SimpleconsumerPoolSizeMax = 0,
        },
        KafkaSaslMechanisms = new Aiven.Inputs.KafkaKafkaUserConfigKafkaSaslMechanismsArgs
        {
            Plain = false,
            ScramSha256 = false,
            ScramSha512 = false,
        },
        KafkaVersion = "string",
        LetsencryptSaslPrivatelink = false,
        PrivateAccess = new Aiven.Inputs.KafkaKafkaUserConfigPrivateAccessArgs
        {
            Kafka = false,
            KafkaConnect = false,
            KafkaRest = false,
            Prometheus = false,
            SchemaRegistry = false,
        },
        PrivatelinkAccess = new Aiven.Inputs.KafkaKafkaUserConfigPrivatelinkAccessArgs
        {
            Jolokia = false,
            Kafka = false,
            KafkaConnect = false,
            KafkaRest = false,
            Prometheus = false,
            SchemaRegistry = false,
        },
        PublicAccess = new Aiven.Inputs.KafkaKafkaUserConfigPublicAccessArgs
        {
            Kafka = false,
            KafkaConnect = false,
            KafkaRest = false,
            Prometheus = false,
            SchemaRegistry = false,
        },
        SchemaRegistry = false,
        SchemaRegistryConfig = new Aiven.Inputs.KafkaKafkaUserConfigSchemaRegistryConfigArgs
        {
            LeaderEligibility = false,
            RetriableErrorsSilenced = false,
            SchemaReaderStrictMode = false,
            TopicName = "string",
        },
        ServiceLog = false,
        SingleZone = new Aiven.Inputs.KafkaKafkaUserConfigSingleZoneArgs
        {
            Enabled = false,
        },
        StaticIps = false,
        TieredStorage = new Aiven.Inputs.KafkaKafkaUserConfigTieredStorageArgs
        {
            Enabled = false,
        },
    },
    ProjectVpcId = "string",
    ServiceIntegrations = new[]
    {
        new Aiven.Inputs.KafkaServiceIntegrationArgs
        {
            IntegrationType = "string",
            SourceServiceName = "string",
        },
    },
    CloudName = "string",
    StaticIps = new[]
    {
        "string",
    },
    Tags = new[]
    {
        new Aiven.Inputs.KafkaTagArgs
        {
            Key = "string",
            Value = "string",
        },
    },
    TechEmails = new[]
    {
        new Aiven.Inputs.KafkaTechEmailArgs
        {
            Email = "string",
        },
    },
    TerminationProtection = false,
});
example, err := aiven.NewKafka(ctx, "kafkaResource", &aiven.KafkaArgs{
	Plan:                  pulumi.String("string"),
	ServiceName:           pulumi.String("string"),
	Project:               pulumi.String("string"),
	MaintenanceWindowTime: pulumi.String("string"),
	DefaultAcl:            pulumi.Bool(false),
	Kafkas: aiven.KafkaKafkaArray{
		&aiven.KafkaKafkaArgs{
			AccessCert:        pulumi.String("string"),
			AccessKey:         pulumi.String("string"),
			ConnectUri:        pulumi.String("string"),
			RestUri:           pulumi.String("string"),
			SchemaRegistryUri: pulumi.String("string"),
			Uris: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
	},
	MaintenanceWindowDow: pulumi.String("string"),
	AdditionalDiskSpace:  pulumi.String("string"),
	KafkaUserConfig: &aiven.KafkaKafkaUserConfigArgs{
		AivenKafkaTopicMessages: pulumi.Bool(false),
		CustomDomain:            pulumi.String("string"),
		FollowerFetching: &aiven.KafkaKafkaUserConfigFollowerFetchingArgs{
			Enabled: pulumi.Bool(false),
		},
		IpFilterObjects: aiven.KafkaKafkaUserConfigIpFilterObjectArray{
			&aiven.KafkaKafkaUserConfigIpFilterObjectArgs{
				Network:     pulumi.String("string"),
				Description: pulumi.String("string"),
			},
		},
		IpFilterStrings: pulumi.StringArray{
			pulumi.String("string"),
		},
		Kafka: &aiven.KafkaKafkaUserConfigKafkaArgs{
			AutoCreateTopicsEnable:                               pulumi.Bool(false),
			CompressionType:                                      pulumi.String("string"),
			ConnectionsMaxIdleMs:                                 pulumi.Int(0),
			DefaultReplicationFactor:                             pulumi.Int(0),
			GroupInitialRebalanceDelayMs:                         pulumi.Int(0),
			GroupMaxSessionTimeoutMs:                             pulumi.Int(0),
			GroupMinSessionTimeoutMs:                             pulumi.Int(0),
			LogCleanerDeleteRetentionMs:                          pulumi.Int(0),
			LogCleanerMaxCompactionLagMs:                         pulumi.Int(0),
			LogCleanerMinCleanableRatio:                          pulumi.Float64(0),
			LogCleanerMinCompactionLagMs:                         pulumi.Int(0),
			LogCleanupPolicy:                                     pulumi.String("string"),
			LogFlushIntervalMessages:                             pulumi.Int(0),
			LogFlushIntervalMs:                                   pulumi.Int(0),
			LogIndexIntervalBytes:                                pulumi.Int(0),
			LogIndexSizeMaxBytes:                                 pulumi.Int(0),
			LogLocalRetentionBytes:                               pulumi.Int(0),
			LogLocalRetentionMs:                                  pulumi.Int(0),
			LogMessageDownconversionEnable:                       pulumi.Bool(false),
			LogMessageTimestampDifferenceMaxMs:                   pulumi.Int(0),
			LogMessageTimestampType:                              pulumi.String("string"),
			LogPreallocate:                                       pulumi.Bool(false),
			LogRetentionBytes:                                    pulumi.Int(0),
			LogRetentionHours:                                    pulumi.Int(0),
			LogRetentionMs:                                       pulumi.Int(0),
			LogRollJitterMs:                                      pulumi.Int(0),
			LogRollMs:                                            pulumi.Int(0),
			LogSegmentBytes:                                      pulumi.Int(0),
			LogSegmentDeleteDelayMs:                              pulumi.Int(0),
			MaxConnectionsPerIp:                                  pulumi.Int(0),
			MaxIncrementalFetchSessionCacheSlots:                 pulumi.Int(0),
			MessageMaxBytes:                                      pulumi.Int(0),
			MinInsyncReplicas:                                    pulumi.Int(0),
			NumPartitions:                                        pulumi.Int(0),
			OffsetsRetentionMinutes:                              pulumi.Int(0),
			ProducerPurgatoryPurgeIntervalRequests:               pulumi.Int(0),
			ReplicaFetchMaxBytes:                                 pulumi.Int(0),
			ReplicaFetchResponseMaxBytes:                         pulumi.Int(0),
			SaslOauthbearerExpectedAudience:                      pulumi.String("string"),
			SaslOauthbearerExpectedIssuer:                        pulumi.String("string"),
			SaslOauthbearerJwksEndpointUrl:                       pulumi.String("string"),
			SaslOauthbearerSubClaimName:                          pulumi.String("string"),
			SocketRequestMaxBytes:                                pulumi.Int(0),
			TransactionPartitionVerificationEnable:               pulumi.Bool(false),
			TransactionRemoveExpiredTransactionCleanupIntervalMs: pulumi.Int(0),
			TransactionStateLogSegmentBytes:                      pulumi.Int(0),
		},
		KafkaAuthenticationMethods: &aiven.KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs{
			Certificate: pulumi.Bool(false),
			Sasl:        pulumi.Bool(false),
		},
		KafkaConnect: pulumi.Bool(false),
		KafkaConnectConfig: &aiven.KafkaKafkaUserConfigKafkaConnectConfigArgs{
			ConnectorClientConfigOverridePolicy: pulumi.String("string"),
			ConsumerAutoOffsetReset:             pulumi.String("string"),
			ConsumerFetchMaxBytes:               pulumi.Int(0),
			ConsumerIsolationLevel:              pulumi.String("string"),
			ConsumerMaxPartitionFetchBytes:      pulumi.Int(0),
			ConsumerMaxPollIntervalMs:           pulumi.Int(0),
			ConsumerMaxPollRecords:              pulumi.Int(0),
			OffsetFlushIntervalMs:               pulumi.Int(0),
			OffsetFlushTimeoutMs:                pulumi.Int(0),
			ProducerBatchSize:                   pulumi.Int(0),
			ProducerBufferMemory:                pulumi.Int(0),
			ProducerCompressionType:             pulumi.String("string"),
			ProducerLingerMs:                    pulumi.Int(0),
			ProducerMaxRequestSize:              pulumi.Int(0),
			ScheduledRebalanceMaxDelayMs:        pulumi.Int(0),
			SessionTimeoutMs:                    pulumi.Int(0),
		},
		KafkaConnectSecretProviders: aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderArray{
			&aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderArgs{
				Name: pulumi.String("string"),
				Aws: &aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs{
					AuthMethod: pulumi.String("string"),
					Region:     pulumi.String("string"),
					AccessKey:  pulumi.String("string"),
					SecretKey:  pulumi.String("string"),
				},
				Vault: &aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs{
					Address:         pulumi.String("string"),
					AuthMethod:      pulumi.String("string"),
					EngineVersion:   pulumi.Int(0),
					PrefixPathDepth: pulumi.Int(0),
					Token:           pulumi.String("string"),
				},
			},
		},
		KafkaRest:              pulumi.Bool(false),
		KafkaRestAuthorization: pulumi.Bool(false),
		KafkaRestConfig: &aiven.KafkaKafkaUserConfigKafkaRestConfigArgs{
			ConsumerEnableAutoCommit:      pulumi.Bool(false),
			ConsumerIdleDisconnectTimeout: pulumi.Int(0),
			ConsumerRequestMaxBytes:       pulumi.Int(0),
			ConsumerRequestTimeoutMs:      pulumi.Int(0),
			NameStrategy:                  pulumi.String("string"),
			NameStrategyValidation:        pulumi.Bool(false),
			ProducerAcks:                  pulumi.String("string"),
			ProducerCompressionType:       pulumi.String("string"),
			ProducerLingerMs:              pulumi.Int(0),
			ProducerMaxRequestSize:        pulumi.Int(0),
			SimpleconsumerPoolSizeMax:     pulumi.Int(0),
		},
		KafkaSaslMechanisms: &aiven.KafkaKafkaUserConfigKafkaSaslMechanismsArgs{
			Plain:       pulumi.Bool(false),
			ScramSha256: pulumi.Bool(false),
			ScramSha512: pulumi.Bool(false),
		},
		KafkaVersion:               pulumi.String("string"),
		LetsencryptSaslPrivatelink: pulumi.Bool(false),
		PrivateAccess: &aiven.KafkaKafkaUserConfigPrivateAccessArgs{
			Kafka:          pulumi.Bool(false),
			KafkaConnect:   pulumi.Bool(false),
			KafkaRest:      pulumi.Bool(false),
			Prometheus:     pulumi.Bool(false),
			SchemaRegistry: pulumi.Bool(false),
		},
		PrivatelinkAccess: &aiven.KafkaKafkaUserConfigPrivatelinkAccessArgs{
			Jolokia:        pulumi.Bool(false),
			Kafka:          pulumi.Bool(false),
			KafkaConnect:   pulumi.Bool(false),
			KafkaRest:      pulumi.Bool(false),
			Prometheus:     pulumi.Bool(false),
			SchemaRegistry: pulumi.Bool(false),
		},
		PublicAccess: &aiven.KafkaKafkaUserConfigPublicAccessArgs{
			Kafka:          pulumi.Bool(false),
			KafkaConnect:   pulumi.Bool(false),
			KafkaRest:      pulumi.Bool(false),
			Prometheus:     pulumi.Bool(false),
			SchemaRegistry: pulumi.Bool(false),
		},
		SchemaRegistry: pulumi.Bool(false),
		SchemaRegistryConfig: &aiven.KafkaKafkaUserConfigSchemaRegistryConfigArgs{
			LeaderEligibility:       pulumi.Bool(false),
			RetriableErrorsSilenced: pulumi.Bool(false),
			SchemaReaderStrictMode:  pulumi.Bool(false),
			TopicName:               pulumi.String("string"),
		},
		ServiceLog: pulumi.Bool(false),
		SingleZone: &aiven.KafkaKafkaUserConfigSingleZoneArgs{
			Enabled: pulumi.Bool(false),
		},
		StaticIps: pulumi.Bool(false),
		TieredStorage: &aiven.KafkaKafkaUserConfigTieredStorageArgs{
			Enabled: pulumi.Bool(false),
		},
	},
	ProjectVpcId: pulumi.String("string"),
	ServiceIntegrations: aiven.KafkaServiceIntegrationArray{
		&aiven.KafkaServiceIntegrationArgs{
			IntegrationType:   pulumi.String("string"),
			SourceServiceName: pulumi.String("string"),
		},
	},
	CloudName: pulumi.String("string"),
	StaticIps: pulumi.StringArray{
		pulumi.String("string"),
	},
	Tags: aiven.KafkaTagArray{
		&aiven.KafkaTagArgs{
			Key:   pulumi.String("string"),
			Value: pulumi.String("string"),
		},
	},
	TechEmails: aiven.KafkaTechEmailArray{
		&aiven.KafkaTechEmailArgs{
			Email: pulumi.String("string"),
		},
	},
	TerminationProtection: pulumi.Bool(false),
})
var kafkaResource = new Kafka("kafkaResource", KafkaArgs.builder()
    .plan("string")
    .serviceName("string")
    .project("string")
    .maintenanceWindowTime("string")
    .defaultAcl(false)
    .kafkas(KafkaKafkaArgs.builder()
        .accessCert("string")
        .accessKey("string")
        .connectUri("string")
        .restUri("string")
        .schemaRegistryUri("string")
        .uris("string")
        .build())
    .maintenanceWindowDow("string")
    .additionalDiskSpace("string")
    .kafkaUserConfig(KafkaKafkaUserConfigArgs.builder()
        .aivenKafkaTopicMessages(false)
        .customDomain("string")
        .followerFetching(KafkaKafkaUserConfigFollowerFetchingArgs.builder()
            .enabled(false)
            .build())
        .ipFilterObjects(KafkaKafkaUserConfigIpFilterObjectArgs.builder()
            .network("string")
            .description("string")
            .build())
        .ipFilterStrings("string")
        .kafka(KafkaKafkaUserConfigKafkaArgs.builder()
            .autoCreateTopicsEnable(false)
            .compressionType("string")
            .connectionsMaxIdleMs(0)
            .defaultReplicationFactor(0)
            .groupInitialRebalanceDelayMs(0)
            .groupMaxSessionTimeoutMs(0)
            .groupMinSessionTimeoutMs(0)
            .logCleanerDeleteRetentionMs(0)
            .logCleanerMaxCompactionLagMs(0)
            .logCleanerMinCleanableRatio(0.0)
            .logCleanerMinCompactionLagMs(0)
            .logCleanupPolicy("string")
            .logFlushIntervalMessages(0)
            .logFlushIntervalMs(0)
            .logIndexIntervalBytes(0)
            .logIndexSizeMaxBytes(0)
            .logLocalRetentionBytes(0)
            .logLocalRetentionMs(0)
            .logMessageDownconversionEnable(false)
            .logMessageTimestampDifferenceMaxMs(0)
            .logMessageTimestampType("string")
            .logPreallocate(false)
            .logRetentionBytes(0)
            .logRetentionHours(0)
            .logRetentionMs(0)
            .logRollJitterMs(0)
            .logRollMs(0)
            .logSegmentBytes(0)
            .logSegmentDeleteDelayMs(0)
            .maxConnectionsPerIp(0)
            .maxIncrementalFetchSessionCacheSlots(0)
            .messageMaxBytes(0)
            .minInsyncReplicas(0)
            .numPartitions(0)
            .offsetsRetentionMinutes(0)
            .producerPurgatoryPurgeIntervalRequests(0)
            .replicaFetchMaxBytes(0)
            .replicaFetchResponseMaxBytes(0)
            .saslOauthbearerExpectedAudience("string")
            .saslOauthbearerExpectedIssuer("string")
            .saslOauthbearerJwksEndpointUrl("string")
            .saslOauthbearerSubClaimName("string")
            .socketRequestMaxBytes(0)
            .transactionPartitionVerificationEnable(false)
            .transactionRemoveExpiredTransactionCleanupIntervalMs(0)
            .transactionStateLogSegmentBytes(0)
            .build())
        .kafkaAuthenticationMethods(KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs.builder()
            .certificate(false)
            .sasl(false)
            .build())
        .kafkaConnect(false)
        .kafkaConnectConfig(KafkaKafkaUserConfigKafkaConnectConfigArgs.builder()
            .connectorClientConfigOverridePolicy("string")
            .consumerAutoOffsetReset("string")
            .consumerFetchMaxBytes(0)
            .consumerIsolationLevel("string")
            .consumerMaxPartitionFetchBytes(0)
            .consumerMaxPollIntervalMs(0)
            .consumerMaxPollRecords(0)
            .offsetFlushIntervalMs(0)
            .offsetFlushTimeoutMs(0)
            .producerBatchSize(0)
            .producerBufferMemory(0)
            .producerCompressionType("string")
            .producerLingerMs(0)
            .producerMaxRequestSize(0)
            .scheduledRebalanceMaxDelayMs(0)
            .sessionTimeoutMs(0)
            .build())
        .kafkaConnectSecretProviders(KafkaKafkaUserConfigKafkaConnectSecretProviderArgs.builder()
            .name("string")
            .aws(KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs.builder()
                .authMethod("string")
                .region("string")
                .accessKey("string")
                .secretKey("string")
                .build())
            .vault(KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs.builder()
                .address("string")
                .authMethod("string")
                .engineVersion(0)
                .prefixPathDepth(0)
                .token("string")
                .build())
            .build())
        .kafkaRest(false)
        .kafkaRestAuthorization(false)
        .kafkaRestConfig(KafkaKafkaUserConfigKafkaRestConfigArgs.builder()
            .consumerEnableAutoCommit(false)
            .consumerIdleDisconnectTimeout(0)
            .consumerRequestMaxBytes(0)
            .consumerRequestTimeoutMs(0)
            .nameStrategy("string")
            .nameStrategyValidation(false)
            .producerAcks("string")
            .producerCompressionType("string")
            .producerLingerMs(0)
            .producerMaxRequestSize(0)
            .simpleconsumerPoolSizeMax(0)
            .build())
        .kafkaSaslMechanisms(KafkaKafkaUserConfigKafkaSaslMechanismsArgs.builder()
            .plain(false)
            .scramSha256(false)
            .scramSha512(false)
            .build())
        .kafkaVersion("string")
        .letsencryptSaslPrivatelink(false)
        .privateAccess(KafkaKafkaUserConfigPrivateAccessArgs.builder()
            .kafka(false)
            .kafkaConnect(false)
            .kafkaRest(false)
            .prometheus(false)
            .schemaRegistry(false)
            .build())
        .privatelinkAccess(KafkaKafkaUserConfigPrivatelinkAccessArgs.builder()
            .jolokia(false)
            .kafka(false)
            .kafkaConnect(false)
            .kafkaRest(false)
            .prometheus(false)
            .schemaRegistry(false)
            .build())
        .publicAccess(KafkaKafkaUserConfigPublicAccessArgs.builder()
            .kafka(false)
            .kafkaConnect(false)
            .kafkaRest(false)
            .prometheus(false)
            .schemaRegistry(false)
            .build())
        .schemaRegistry(false)
        .schemaRegistryConfig(KafkaKafkaUserConfigSchemaRegistryConfigArgs.builder()
            .leaderEligibility(false)
            .retriableErrorsSilenced(false)
            .schemaReaderStrictMode(false)
            .topicName("string")
            .build())
        .serviceLog(false)
        .singleZone(KafkaKafkaUserConfigSingleZoneArgs.builder()
            .enabled(false)
            .build())
        .staticIps(false)
        .tieredStorage(KafkaKafkaUserConfigTieredStorageArgs.builder()
            .enabled(false)
            .build())
        .build())
    .projectVpcId("string")
    .serviceIntegrations(KafkaServiceIntegrationArgs.builder()
        .integrationType("string")
        .sourceServiceName("string")
        .build())
    .cloudName("string")
    .staticIps("string")
    .tags(KafkaTagArgs.builder()
        .key("string")
        .value("string")
        .build())
    .techEmails(KafkaTechEmailArgs.builder()
        .email("string")
        .build())
    .terminationProtection(false)
    .build());
kafka_resource = aiven.Kafka("kafkaResource",
    plan="string",
    service_name="string",
    project="string",
    maintenance_window_time="string",
    default_acl=False,
    kafkas=[{
        "access_cert": "string",
        "access_key": "string",
        "connect_uri": "string",
        "rest_uri": "string",
        "schema_registry_uri": "string",
        "uris": ["string"],
    }],
    maintenance_window_dow="string",
    additional_disk_space="string",
    kafka_user_config={
        "aiven_kafka_topic_messages": False,
        "custom_domain": "string",
        "follower_fetching": {
            "enabled": False,
        },
        "ip_filter_objects": [{
            "network": "string",
            "description": "string",
        }],
        "ip_filter_strings": ["string"],
        "kafka": {
            "auto_create_topics_enable": False,
            "compression_type": "string",
            "connections_max_idle_ms": 0,
            "default_replication_factor": 0,
            "group_initial_rebalance_delay_ms": 0,
            "group_max_session_timeout_ms": 0,
            "group_min_session_timeout_ms": 0,
            "log_cleaner_delete_retention_ms": 0,
            "log_cleaner_max_compaction_lag_ms": 0,
            "log_cleaner_min_cleanable_ratio": 0,
            "log_cleaner_min_compaction_lag_ms": 0,
            "log_cleanup_policy": "string",
            "log_flush_interval_messages": 0,
            "log_flush_interval_ms": 0,
            "log_index_interval_bytes": 0,
            "log_index_size_max_bytes": 0,
            "log_local_retention_bytes": 0,
            "log_local_retention_ms": 0,
            "log_message_downconversion_enable": False,
            "log_message_timestamp_difference_max_ms": 0,
            "log_message_timestamp_type": "string",
            "log_preallocate": False,
            "log_retention_bytes": 0,
            "log_retention_hours": 0,
            "log_retention_ms": 0,
            "log_roll_jitter_ms": 0,
            "log_roll_ms": 0,
            "log_segment_bytes": 0,
            "log_segment_delete_delay_ms": 0,
            "max_connections_per_ip": 0,
            "max_incremental_fetch_session_cache_slots": 0,
            "message_max_bytes": 0,
            "min_insync_replicas": 0,
            "num_partitions": 0,
            "offsets_retention_minutes": 0,
            "producer_purgatory_purge_interval_requests": 0,
            "replica_fetch_max_bytes": 0,
            "replica_fetch_response_max_bytes": 0,
            "sasl_oauthbearer_expected_audience": "string",
            "sasl_oauthbearer_expected_issuer": "string",
            "sasl_oauthbearer_jwks_endpoint_url": "string",
            "sasl_oauthbearer_sub_claim_name": "string",
            "socket_request_max_bytes": 0,
            "transaction_partition_verification_enable": False,
            "transaction_remove_expired_transaction_cleanup_interval_ms": 0,
            "transaction_state_log_segment_bytes": 0,
        },
        "kafka_authentication_methods": {
            "certificate": False,
            "sasl": False,
        },
        "kafka_connect": False,
        "kafka_connect_config": {
            "connector_client_config_override_policy": "string",
            "consumer_auto_offset_reset": "string",
            "consumer_fetch_max_bytes": 0,
            "consumer_isolation_level": "string",
            "consumer_max_partition_fetch_bytes": 0,
            "consumer_max_poll_interval_ms": 0,
            "consumer_max_poll_records": 0,
            "offset_flush_interval_ms": 0,
            "offset_flush_timeout_ms": 0,
            "producer_batch_size": 0,
            "producer_buffer_memory": 0,
            "producer_compression_type": "string",
            "producer_linger_ms": 0,
            "producer_max_request_size": 0,
            "scheduled_rebalance_max_delay_ms": 0,
            "session_timeout_ms": 0,
        },
        "kafka_connect_secret_providers": [{
            "name": "string",
            "aws": {
                "auth_method": "string",
                "region": "string",
                "access_key": "string",
                "secret_key": "string",
            },
            "vault": {
                "address": "string",
                "auth_method": "string",
                "engine_version": 0,
                "prefix_path_depth": 0,
                "token": "string",
            },
        }],
        "kafka_rest": False,
        "kafka_rest_authorization": False,
        "kafka_rest_config": {
            "consumer_enable_auto_commit": False,
            "consumer_idle_disconnect_timeout": 0,
            "consumer_request_max_bytes": 0,
            "consumer_request_timeout_ms": 0,
            "name_strategy": "string",
            "name_strategy_validation": False,
            "producer_acks": "string",
            "producer_compression_type": "string",
            "producer_linger_ms": 0,
            "producer_max_request_size": 0,
            "simpleconsumer_pool_size_max": 0,
        },
        "kafka_sasl_mechanisms": {
            "plain": False,
            "scram_sha256": False,
            "scram_sha512": False,
        },
        "kafka_version": "string",
        "letsencrypt_sasl_privatelink": False,
        "private_access": {
            "kafka": False,
            "kafka_connect": False,
            "kafka_rest": False,
            "prometheus": False,
            "schema_registry": False,
        },
        "privatelink_access": {
            "jolokia": False,
            "kafka": False,
            "kafka_connect": False,
            "kafka_rest": False,
            "prometheus": False,
            "schema_registry": False,
        },
        "public_access": {
            "kafka": False,
            "kafka_connect": False,
            "kafka_rest": False,
            "prometheus": False,
            "schema_registry": False,
        },
        "schema_registry": False,
        "schema_registry_config": {
            "leader_eligibility": False,
            "retriable_errors_silenced": False,
            "schema_reader_strict_mode": False,
            "topic_name": "string",
        },
        "service_log": False,
        "single_zone": {
            "enabled": False,
        },
        "static_ips": False,
        "tiered_storage": {
            "enabled": False,
        },
    },
    project_vpc_id="string",
    service_integrations=[{
        "integration_type": "string",
        "source_service_name": "string",
    }],
    cloud_name="string",
    static_ips=["string"],
    tags=[{
        "key": "string",
        "value": "string",
    }],
    tech_emails=[{
        "email": "string",
    }],
    termination_protection=False)
const kafkaResource = new aiven.Kafka("kafkaResource", {
    plan: "string",
    serviceName: "string",
    project: "string",
    maintenanceWindowTime: "string",
    defaultAcl: false,
    kafkas: [{
        accessCert: "string",
        accessKey: "string",
        connectUri: "string",
        restUri: "string",
        schemaRegistryUri: "string",
        uris: ["string"],
    }],
    maintenanceWindowDow: "string",
    additionalDiskSpace: "string",
    kafkaUserConfig: {
        aivenKafkaTopicMessages: false,
        customDomain: "string",
        followerFetching: {
            enabled: false,
        },
        ipFilterObjects: [{
            network: "string",
            description: "string",
        }],
        ipFilterStrings: ["string"],
        kafka: {
            autoCreateTopicsEnable: false,
            compressionType: "string",
            connectionsMaxIdleMs: 0,
            defaultReplicationFactor: 0,
            groupInitialRebalanceDelayMs: 0,
            groupMaxSessionTimeoutMs: 0,
            groupMinSessionTimeoutMs: 0,
            logCleanerDeleteRetentionMs: 0,
            logCleanerMaxCompactionLagMs: 0,
            logCleanerMinCleanableRatio: 0,
            logCleanerMinCompactionLagMs: 0,
            logCleanupPolicy: "string",
            logFlushIntervalMessages: 0,
            logFlushIntervalMs: 0,
            logIndexIntervalBytes: 0,
            logIndexSizeMaxBytes: 0,
            logLocalRetentionBytes: 0,
            logLocalRetentionMs: 0,
            logMessageDownconversionEnable: false,
            logMessageTimestampDifferenceMaxMs: 0,
            logMessageTimestampType: "string",
            logPreallocate: false,
            logRetentionBytes: 0,
            logRetentionHours: 0,
            logRetentionMs: 0,
            logRollJitterMs: 0,
            logRollMs: 0,
            logSegmentBytes: 0,
            logSegmentDeleteDelayMs: 0,
            maxConnectionsPerIp: 0,
            maxIncrementalFetchSessionCacheSlots: 0,
            messageMaxBytes: 0,
            minInsyncReplicas: 0,
            numPartitions: 0,
            offsetsRetentionMinutes: 0,
            producerPurgatoryPurgeIntervalRequests: 0,
            replicaFetchMaxBytes: 0,
            replicaFetchResponseMaxBytes: 0,
            saslOauthbearerExpectedAudience: "string",
            saslOauthbearerExpectedIssuer: "string",
            saslOauthbearerJwksEndpointUrl: "string",
            saslOauthbearerSubClaimName: "string",
            socketRequestMaxBytes: 0,
            transactionPartitionVerificationEnable: false,
            transactionRemoveExpiredTransactionCleanupIntervalMs: 0,
            transactionStateLogSegmentBytes: 0,
        },
        kafkaAuthenticationMethods: {
            certificate: false,
            sasl: false,
        },
        kafkaConnect: false,
        kafkaConnectConfig: {
            connectorClientConfigOverridePolicy: "string",
            consumerAutoOffsetReset: "string",
            consumerFetchMaxBytes: 0,
            consumerIsolationLevel: "string",
            consumerMaxPartitionFetchBytes: 0,
            consumerMaxPollIntervalMs: 0,
            consumerMaxPollRecords: 0,
            offsetFlushIntervalMs: 0,
            offsetFlushTimeoutMs: 0,
            producerBatchSize: 0,
            producerBufferMemory: 0,
            producerCompressionType: "string",
            producerLingerMs: 0,
            producerMaxRequestSize: 0,
            scheduledRebalanceMaxDelayMs: 0,
            sessionTimeoutMs: 0,
        },
        kafkaConnectSecretProviders: [{
            name: "string",
            aws: {
                authMethod: "string",
                region: "string",
                accessKey: "string",
                secretKey: "string",
            },
            vault: {
                address: "string",
                authMethod: "string",
                engineVersion: 0,
                prefixPathDepth: 0,
                token: "string",
            },
        }],
        kafkaRest: false,
        kafkaRestAuthorization: false,
        kafkaRestConfig: {
            consumerEnableAutoCommit: false,
            consumerIdleDisconnectTimeout: 0,
            consumerRequestMaxBytes: 0,
            consumerRequestTimeoutMs: 0,
            nameStrategy: "string",
            nameStrategyValidation: false,
            producerAcks: "string",
            producerCompressionType: "string",
            producerLingerMs: 0,
            producerMaxRequestSize: 0,
            simpleconsumerPoolSizeMax: 0,
        },
        kafkaSaslMechanisms: {
            plain: false,
            scramSha256: false,
            scramSha512: false,
        },
        kafkaVersion: "string",
        letsencryptSaslPrivatelink: false,
        privateAccess: {
            kafka: false,
            kafkaConnect: false,
            kafkaRest: false,
            prometheus: false,
            schemaRegistry: false,
        },
        privatelinkAccess: {
            jolokia: false,
            kafka: false,
            kafkaConnect: false,
            kafkaRest: false,
            prometheus: false,
            schemaRegistry: false,
        },
        publicAccess: {
            kafka: false,
            kafkaConnect: false,
            kafkaRest: false,
            prometheus: false,
            schemaRegistry: false,
        },
        schemaRegistry: false,
        schemaRegistryConfig: {
            leaderEligibility: false,
            retriableErrorsSilenced: false,
            schemaReaderStrictMode: false,
            topicName: "string",
        },
        serviceLog: false,
        singleZone: {
            enabled: false,
        },
        staticIps: false,
        tieredStorage: {
            enabled: false,
        },
    },
    projectVpcId: "string",
    serviceIntegrations: [{
        integrationType: "string",
        sourceServiceName: "string",
    }],
    cloudName: "string",
    staticIps: ["string"],
    tags: [{
        key: "string",
        value: "string",
    }],
    techEmails: [{
        email: "string",
    }],
    terminationProtection: false,
});
type: aiven:Kafka
properties:
    additionalDiskSpace: string
    cloudName: string
    defaultAcl: false
    kafkaUserConfig:
        aivenKafkaTopicMessages: false
        customDomain: string
        followerFetching:
            enabled: false
        ipFilterObjects:
            - description: string
              network: string
        ipFilterStrings:
            - string
        kafka:
            autoCreateTopicsEnable: false
            compressionType: string
            connectionsMaxIdleMs: 0
            defaultReplicationFactor: 0
            groupInitialRebalanceDelayMs: 0
            groupMaxSessionTimeoutMs: 0
            groupMinSessionTimeoutMs: 0
            logCleanerDeleteRetentionMs: 0
            logCleanerMaxCompactionLagMs: 0
            logCleanerMinCleanableRatio: 0
            logCleanerMinCompactionLagMs: 0
            logCleanupPolicy: string
            logFlushIntervalMessages: 0
            logFlushIntervalMs: 0
            logIndexIntervalBytes: 0
            logIndexSizeMaxBytes: 0
            logLocalRetentionBytes: 0
            logLocalRetentionMs: 0
            logMessageDownconversionEnable: false
            logMessageTimestampDifferenceMaxMs: 0
            logMessageTimestampType: string
            logPreallocate: false
            logRetentionBytes: 0
            logRetentionHours: 0
            logRetentionMs: 0
            logRollJitterMs: 0
            logRollMs: 0
            logSegmentBytes: 0
            logSegmentDeleteDelayMs: 0
            maxConnectionsPerIp: 0
            maxIncrementalFetchSessionCacheSlots: 0
            messageMaxBytes: 0
            minInsyncReplicas: 0
            numPartitions: 0
            offsetsRetentionMinutes: 0
            producerPurgatoryPurgeIntervalRequests: 0
            replicaFetchMaxBytes: 0
            replicaFetchResponseMaxBytes: 0
            saslOauthbearerExpectedAudience: string
            saslOauthbearerExpectedIssuer: string
            saslOauthbearerJwksEndpointUrl: string
            saslOauthbearerSubClaimName: string
            socketRequestMaxBytes: 0
            transactionPartitionVerificationEnable: false
            transactionRemoveExpiredTransactionCleanupIntervalMs: 0
            transactionStateLogSegmentBytes: 0
        kafkaAuthenticationMethods:
            certificate: false
            sasl: false
        kafkaConnect: false
        kafkaConnectConfig:
            connectorClientConfigOverridePolicy: string
            consumerAutoOffsetReset: string
            consumerFetchMaxBytes: 0
            consumerIsolationLevel: string
            consumerMaxPartitionFetchBytes: 0
            consumerMaxPollIntervalMs: 0
            consumerMaxPollRecords: 0
            offsetFlushIntervalMs: 0
            offsetFlushTimeoutMs: 0
            producerBatchSize: 0
            producerBufferMemory: 0
            producerCompressionType: string
            producerLingerMs: 0
            producerMaxRequestSize: 0
            scheduledRebalanceMaxDelayMs: 0
            sessionTimeoutMs: 0
        kafkaConnectSecretProviders:
            - aws:
                accessKey: string
                authMethod: string
                region: string
                secretKey: string
              name: string
              vault:
                address: string
                authMethod: string
                engineVersion: 0
                prefixPathDepth: 0
                token: string
        kafkaRest: false
        kafkaRestAuthorization: false
        kafkaRestConfig:
            consumerEnableAutoCommit: false
            consumerIdleDisconnectTimeout: 0
            consumerRequestMaxBytes: 0
            consumerRequestTimeoutMs: 0
            nameStrategy: string
            nameStrategyValidation: false
            producerAcks: string
            producerCompressionType: string
            producerLingerMs: 0
            producerMaxRequestSize: 0
            simpleconsumerPoolSizeMax: 0
        kafkaSaslMechanisms:
            plain: false
            scramSha256: false
            scramSha512: false
        kafkaVersion: string
        letsencryptSaslPrivatelink: false
        privateAccess:
            kafka: false
            kafkaConnect: false
            kafkaRest: false
            prometheus: false
            schemaRegistry: false
        privatelinkAccess:
            jolokia: false
            kafka: false
            kafkaConnect: false
            kafkaRest: false
            prometheus: false
            schemaRegistry: false
        publicAccess:
            kafka: false
            kafkaConnect: false
            kafkaRest: false
            prometheus: false
            schemaRegistry: false
        schemaRegistry: false
        schemaRegistryConfig:
            leaderEligibility: false
            retriableErrorsSilenced: false
            schemaReaderStrictMode: false
            topicName: string
        serviceLog: false
        singleZone:
            enabled: false
        staticIps: false
        tieredStorage:
            enabled: false
    kafkas:
        - accessCert: string
          accessKey: string
          connectUri: string
          restUri: string
          schemaRegistryUri: string
          uris:
            - string
    maintenanceWindowDow: string
    maintenanceWindowTime: string
    plan: string
    project: string
    projectVpcId: string
    serviceIntegrations:
        - integrationType: string
          sourceServiceName: string
    serviceName: string
    staticIps:
        - string
    tags:
        - key: string
          value: string
    techEmails:
        - email: string
    terminationProtection: false
Kafka Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Kafka resource accepts the following input properties:
- Plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- ServiceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- AdditionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- CloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- DefaultAcl bool
- Create a default wildcard Kafka ACL.
- DiskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- KafkaServer List<KafkaKafka> 
- Kafka server connection details.
- KafkaUser KafkaConfig Kafka User Config 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Karapace bool
- Switch the service to use Karapace for schema registry and REST proxy.
- MaintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- MaintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- ProjectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- ServiceIntegrations List<KafkaService Integration> 
- Service integrations to specify when creating a service. Not applied after initial service creation
- StaticIps List<string>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
List<KafkaTag> 
- Tags are key-value pairs that allow you to categorize services.
- TechEmails List<KafkaTech Email> 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- TerminationProtection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- Plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- ServiceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- AdditionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- CloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- DefaultAcl bool
- Create a default wildcard Kafka ACL.
- DiskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- KafkaUser KafkaConfig Kafka User Config Args 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Kafkas
[]KafkaKafka Args 
- Kafka server connection details.
- Karapace bool
- Switch the service to use Karapace for schema registry and REST proxy.
- MaintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- MaintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- ProjectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- ServiceIntegrations []KafkaService Integration Args 
- Service integrations to specify when creating a service. Not applied after initial service creation
- StaticIps []string
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
[]KafkaTag Args 
- Tags are key-value pairs that allow you to categorize services.
- TechEmails []KafkaTech Email Args 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- TerminationProtection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan String
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- serviceName String
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additionalDisk StringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- cloudName String
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- defaultAcl Boolean
- Create a default wildcard Kafka ACL.
- diskSpace String
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- kafkaUser KafkaConfig Kafka User Config 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkas
List<KafkaKafka> 
- Kafka server connection details.
- karapace Boolean
- Switch the service to use Karapace for schema registry and REST proxy.
- maintenanceWindow StringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow StringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- projectVpc StringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceIntegrations List<KafkaService Integration> 
- Service integrations to specify when creating a service. Not applied after initial service creation
- staticIps List<String>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
List<KafkaTag> 
- Tags are key-value pairs that allow you to categorize services.
- techEmails List<KafkaTech Email> 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection Boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- serviceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- cloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- defaultAcl boolean
- Create a default wildcard Kafka ACL.
- diskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- kafkaUser KafkaConfig Kafka User Config 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkas
KafkaKafka[] 
- Kafka server connection details.
- karapace boolean
- Switch the service to use Karapace for schema registry and REST proxy.
- maintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- projectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceIntegrations KafkaService Integration[] 
- Service integrations to specify when creating a service. Not applied after initial service creation
- staticIps string[]
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
KafkaTag[] 
- Tags are key-value pairs that allow you to categorize services.
- techEmails KafkaTech Email[] 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan str
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project str
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- service_name str
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additional_disk_ strspace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- cloud_name str
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- default_acl bool
- Create a default wildcard Kafka ACL.
- disk_space str
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- kafka_user_ Kafkaconfig Kafka User Config Args 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkas
Sequence[KafkaKafka Args] 
- Kafka server connection details.
- karapace bool
- Switch the service to use Karapace for schema registry and REST proxy.
- maintenance_window_ strdow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance_window_ strtime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- project_vpc_ strid 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service_integrations Sequence[KafkaService Integration Args] 
- Service integrations to specify when creating a service. Not applied after initial service creation
- static_ips Sequence[str]
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
Sequence[KafkaTag Args] 
- Tags are key-value pairs that allow you to categorize services.
- tech_emails Sequence[KafkaTech Email Args] 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- termination_protection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan String
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- serviceName String
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additionalDisk StringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- cloudName String
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- defaultAcl Boolean
- Create a default wildcard Kafka ACL.
- diskSpace String
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- kafkaUser Property MapConfig 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkas List<Property Map>
- Kafka server connection details.
- karapace Boolean
- Switch the service to use Karapace for schema registry and REST proxy.
- maintenanceWindow StringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow StringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- projectVpc StringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceIntegrations List<Property Map>
- Service integrations to specify when creating a service. Not applied after initial service creation
- staticIps List<String>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- List<Property Map>
- Tags are key-value pairs that allow you to categorize services.
- techEmails List<Property Map>
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection Boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
Outputs
All input properties are implicitly available as output properties. Additionally, the Kafka resource produces the following output properties:
- Components
List<KafkaComponent> 
- Service component information objects
- DiskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- DiskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- DiskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- DiskSpace stringUsed 
- Disk space that service is currently using
- Id string
- The provider-assigned unique ID for this managed resource.
- ServiceHost string
- The hostname of the service.
- ServicePassword string
- Password used for connecting to the service, if applicable
- ServicePort int
- The port of the service
- ServiceType string
- Aiven internal service type code
- ServiceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- ServiceUsername string
- Username used for connecting to the service, if applicable
- State string
- Components
[]KafkaComponent 
- Service component information objects
- DiskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- DiskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- DiskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- DiskSpace stringUsed 
- Disk space that service is currently using
- Id string
- The provider-assigned unique ID for this managed resource.
- ServiceHost string
- The hostname of the service.
- ServicePassword string
- Password used for connecting to the service, if applicable
- ServicePort int
- The port of the service
- ServiceType string
- Aiven internal service type code
- ServiceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- ServiceUsername string
- Username used for connecting to the service, if applicable
- State string
- components
List<KafkaComponent> 
- Service component information objects
- diskSpace StringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace StringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace StringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace StringUsed 
- Disk space that service is currently using
- id String
- The provider-assigned unique ID for this managed resource.
- serviceHost String
- The hostname of the service.
- servicePassword String
- Password used for connecting to the service, if applicable
- servicePort Integer
- The port of the service
- serviceType String
- Aiven internal service type code
- serviceUri String
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername String
- Username used for connecting to the service, if applicable
- state String
- components
KafkaComponent[] 
- Service component information objects
- diskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace stringUsed 
- Disk space that service is currently using
- id string
- The provider-assigned unique ID for this managed resource.
- serviceHost string
- The hostname of the service.
- servicePassword string
- Password used for connecting to the service, if applicable
- servicePort number
- The port of the service
- serviceType string
- Aiven internal service type code
- serviceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername string
- Username used for connecting to the service, if applicable
- state string
- components
Sequence[KafkaComponent] 
- Service component information objects
- disk_space_ strcap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk_space_ strdefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- disk_space_ strstep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- disk_space_ strused 
- Disk space that service is currently using
- id str
- The provider-assigned unique ID for this managed resource.
- service_host str
- The hostname of the service.
- service_password str
- Password used for connecting to the service, if applicable
- service_port int
- The port of the service
- service_type str
- Aiven internal service type code
- service_uri str
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service_username str
- Username used for connecting to the service, if applicable
- state str
- components List<Property Map>
- Service component information objects
- diskSpace StringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace StringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace StringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace StringUsed 
- Disk space that service is currently using
- id String
- The provider-assigned unique ID for this managed resource.
- serviceHost String
- The hostname of the service.
- servicePassword String
- Password used for connecting to the service, if applicable
- servicePort Number
- The port of the service
- serviceType String
- Aiven internal service type code
- serviceUri String
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername String
- Username used for connecting to the service, if applicable
- state String
Look up Existing Kafka Resource
Get an existing Kafka resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: KafkaState, opts?: CustomResourceOptions): Kafka@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        additional_disk_space: Optional[str] = None,
        cloud_name: Optional[str] = None,
        components: Optional[Sequence[KafkaComponentArgs]] = None,
        default_acl: Optional[bool] = None,
        disk_space: Optional[str] = None,
        disk_space_cap: Optional[str] = None,
        disk_space_default: Optional[str] = None,
        disk_space_step: Optional[str] = None,
        disk_space_used: Optional[str] = None,
        kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None,
        kafkas: Optional[Sequence[KafkaKafkaArgs]] = None,
        karapace: Optional[bool] = None,
        maintenance_window_dow: Optional[str] = None,
        maintenance_window_time: Optional[str] = None,
        plan: Optional[str] = None,
        project: Optional[str] = None,
        project_vpc_id: Optional[str] = None,
        service_host: Optional[str] = None,
        service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None,
        service_name: Optional[str] = None,
        service_password: Optional[str] = None,
        service_port: Optional[int] = None,
        service_type: Optional[str] = None,
        service_uri: Optional[str] = None,
        service_username: Optional[str] = None,
        state: Optional[str] = None,
        static_ips: Optional[Sequence[str]] = None,
        tags: Optional[Sequence[KafkaTagArgs]] = None,
        tech_emails: Optional[Sequence[KafkaTechEmailArgs]] = None,
        termination_protection: Optional[bool] = None) -> Kafkafunc GetKafka(ctx *Context, name string, id IDInput, state *KafkaState, opts ...ResourceOption) (*Kafka, error)public static Kafka Get(string name, Input<string> id, KafkaState? state, CustomResourceOptions? opts = null)public static Kafka get(String name, Output<String> id, KafkaState state, CustomResourceOptions options)resources:  _:    type: aiven:Kafka    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- AdditionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- CloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- Components
List<KafkaComponent> 
- Service component information objects
- DefaultAcl bool
- Create a default wildcard Kafka ACL.
- DiskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- DiskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- DiskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- DiskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- DiskSpace stringUsed 
- Disk space that service is currently using
- KafkaServer List<KafkaKafka> 
- Kafka server connection details.
- KafkaUser KafkaConfig Kafka User Config 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Karapace bool
- Switch the service to use Karapace for schema registry and REST proxy.
- MaintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- MaintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- ProjectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- ServiceHost string
- The hostname of the service.
- ServiceIntegrations List<KafkaService Integration> 
- Service integrations to specify when creating a service. Not applied after initial service creation
- ServiceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- ServicePassword string
- Password used for connecting to the service, if applicable
- ServicePort int
- The port of the service
- ServiceType string
- Aiven internal service type code
- ServiceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- ServiceUsername string
- Username used for connecting to the service, if applicable
- State string
- StaticIps List<string>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
List<KafkaTag> 
- Tags are key-value pairs that allow you to categorize services.
- TechEmails List<KafkaTech Email> 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- TerminationProtection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- AdditionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- CloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- Components
[]KafkaComponent Args 
- Service component information objects
- DefaultAcl bool
- Create a default wildcard Kafka ACL.
- DiskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- DiskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- DiskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- DiskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- DiskSpace stringUsed 
- Disk space that service is currently using
- KafkaUser KafkaConfig Kafka User Config Args 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Kafkas
[]KafkaKafka Args 
- Kafka server connection details.
- Karapace bool
- Switch the service to use Karapace for schema registry and REST proxy.
- MaintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- MaintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- ProjectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- ServiceHost string
- The hostname of the service.
- ServiceIntegrations []KafkaService Integration Args 
- Service integrations to specify when creating a service. Not applied after initial service creation
- ServiceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- ServicePassword string
- Password used for connecting to the service, if applicable
- ServicePort int
- The port of the service
- ServiceType string
- Aiven internal service type code
- ServiceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- ServiceUsername string
- Username used for connecting to the service, if applicable
- State string
- StaticIps []string
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
[]KafkaTag Args 
- Tags are key-value pairs that allow you to categorize services.
- TechEmails []KafkaTech Email Args 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- TerminationProtection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additionalDisk StringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- cloudName String
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- components
List<KafkaComponent> 
- Service component information objects
- defaultAcl Boolean
- Create a default wildcard Kafka ACL.
- diskSpace String
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- diskSpace StringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace StringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace StringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace StringUsed 
- Disk space that service is currently using
- kafkaUser KafkaConfig Kafka User Config 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkas
List<KafkaKafka> 
- Kafka server connection details.
- karapace Boolean
- Switch the service to use Karapace for schema registry and REST proxy.
- maintenanceWindow StringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow StringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan String
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- projectVpc StringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceHost String
- The hostname of the service.
- serviceIntegrations List<KafkaService Integration> 
- Service integrations to specify when creating a service. Not applied after initial service creation
- serviceName String
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- servicePassword String
- Password used for connecting to the service, if applicable
- servicePort Integer
- The port of the service
- serviceType String
- Aiven internal service type code
- serviceUri String
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername String
- Username used for connecting to the service, if applicable
- state String
- staticIps List<String>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
List<KafkaTag> 
- Tags are key-value pairs that allow you to categorize services.
- techEmails List<KafkaTech Email> 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection Boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- cloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- components
KafkaComponent[] 
- Service component information objects
- defaultAcl boolean
- Create a default wildcard Kafka ACL.
- diskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- diskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace stringUsed 
- Disk space that service is currently using
- kafkaUser KafkaConfig Kafka User Config 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkas
KafkaKafka[] 
- Kafka server connection details.
- karapace boolean
- Switch the service to use Karapace for schema registry and REST proxy.
- maintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- projectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceHost string
- The hostname of the service.
- serviceIntegrations KafkaService Integration[] 
- Service integrations to specify when creating a service. Not applied after initial service creation
- serviceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- servicePassword string
- Password used for connecting to the service, if applicable
- servicePort number
- The port of the service
- serviceType string
- Aiven internal service type code
- serviceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername string
- Username used for connecting to the service, if applicable
- state string
- staticIps string[]
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
KafkaTag[] 
- Tags are key-value pairs that allow you to categorize services.
- techEmails KafkaTech Email[] 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additional_disk_ strspace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- cloud_name str
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- components
Sequence[KafkaComponent Args] 
- Service component information objects
- default_acl bool
- Create a default wildcard Kafka ACL.
- disk_space str
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- disk_space_ strcap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk_space_ strdefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- disk_space_ strstep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- disk_space_ strused 
- Disk space that service is currently using
- kafka_user_ Kafkaconfig Kafka User Config Args 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkas
Sequence[KafkaKafka Args] 
- Kafka server connection details.
- karapace bool
- Switch the service to use Karapace for schema registry and REST proxy.
- maintenance_window_ strdow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance_window_ strtime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan str
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project str
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- project_vpc_ strid 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service_host str
- The hostname of the service.
- service_integrations Sequence[KafkaService Integration Args] 
- Service integrations to specify when creating a service. Not applied after initial service creation
- service_name str
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service_password str
- Password used for connecting to the service, if applicable
- service_port int
- The port of the service
- service_type str
- Aiven internal service type code
- service_uri str
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service_username str
- Username used for connecting to the service, if applicable
- state str
- static_ips Sequence[str]
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
Sequence[KafkaTag Args] 
- Tags are key-value pairs that allow you to categorize services.
- tech_emails Sequence[KafkaTech Email Args] 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- termination_protection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additionalDisk StringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
- cloudName String
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- components List<Property Map>
- Service component information objects
- defaultAcl Boolean
- Create a default wildcard Kafka ACL.
- diskSpace String
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- diskSpace StringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace StringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace StringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace StringUsed 
- Disk space that service is currently using
- kafkaUser Property MapConfig 
- Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkas List<Property Map>
- Kafka server connection details.
- karapace Boolean
- Switch the service to use Karapace for schema registry and REST proxy.
- maintenanceWindow StringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow StringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan String
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- projectVpc StringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceHost String
- The hostname of the service.
- serviceIntegrations List<Property Map>
- Service integrations to specify when creating a service. Not applied after initial service creation
- serviceName String
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- servicePassword String
- Password used for connecting to the service, if applicable
- servicePort Number
- The port of the service
- serviceType String
- Aiven internal service type code
- serviceUri String
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername String
- Username used for connecting to the service, if applicable
- state String
- staticIps List<String>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- List<Property Map>
- Tags are key-value pairs that allow you to categorize services.
- techEmails List<Property Map>
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection Boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
Supporting Types
KafkaComponent, KafkaComponentArgs    
- Component string
- Service component name
- ConnectionUri string
- Connection info for connecting to the service component. This is a combination of host and port.
- Host string
- Host name for connecting to the service component
- KafkaAuthentication stringMethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- KafkaSsl stringCa 
- Kafka certificate used. The possible values are letsencryptandproject_ca.
- Port int
- Port number for connecting to the service component
- Route string
- Network access route
- Ssl bool
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- Usage string
- DNS usage name
- Component string
- Service component name
- ConnectionUri string
- Connection info for connecting to the service component. This is a combination of host and port.
- Host string
- Host name for connecting to the service component
- KafkaAuthentication stringMethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- KafkaSsl stringCa 
- Kafka certificate used. The possible values are letsencryptandproject_ca.
- Port int
- Port number for connecting to the service component
- Route string
- Network access route
- Ssl bool
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- Usage string
- DNS usage name
- component String
- Service component name
- connectionUri String
- Connection info for connecting to the service component. This is a combination of host and port.
- host String
- Host name for connecting to the service component
- kafkaAuthentication StringMethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- kafkaSsl StringCa 
- Kafka certificate used. The possible values are letsencryptandproject_ca.
- port Integer
- Port number for connecting to the service component
- route String
- Network access route
- ssl Boolean
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage String
- DNS usage name
- component string
- Service component name
- connectionUri string
- Connection info for connecting to the service component. This is a combination of host and port.
- host string
- Host name for connecting to the service component
- kafkaAuthentication stringMethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- kafkaSsl stringCa 
- Kafka certificate used. The possible values are letsencryptandproject_ca.
- port number
- Port number for connecting to the service component
- route string
- Network access route
- ssl boolean
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage string
- DNS usage name
- component str
- Service component name
- connection_uri str
- Connection info for connecting to the service component. This is a combination of host and port.
- host str
- Host name for connecting to the service component
- kafka_authentication_ strmethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- kafka_ssl_ strca 
- Kafka certificate used. The possible values are letsencryptandproject_ca.
- port int
- Port number for connecting to the service component
- route str
- Network access route
- ssl bool
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage str
- DNS usage name
- component String
- Service component name
- connectionUri String
- Connection info for connecting to the service component. This is a combination of host and port.
- host String
- Host name for connecting to the service component
- kafkaAuthentication StringMethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- kafkaSsl StringCa 
- Kafka certificate used. The possible values are letsencryptandproject_ca.
- port Number
- Port number for connecting to the service component
- route String
- Network access route
- ssl Boolean
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage String
- DNS usage name
KafkaKafka, KafkaKafkaArgs    
- AccessCert string
- The Kafka client certificate.
- AccessKey string
- The Kafka client certificate key.
- ConnectUri string
- The Kafka Connect URI.
- RestUri string
- The Kafka REST URI.
- SchemaRegistry stringUri 
- The Schema Registry URI.
- Uris List<string>
- Kafka server URIs.
- AccessCert string
- The Kafka client certificate.
- AccessKey string
- The Kafka client certificate key.
- ConnectUri string
- The Kafka Connect URI.
- RestUri string
- The Kafka REST URI.
- SchemaRegistry stringUri 
- The Schema Registry URI.
- Uris []string
- Kafka server URIs.
- accessCert String
- The Kafka client certificate.
- accessKey String
- The Kafka client certificate key.
- connectUri String
- The Kafka Connect URI.
- restUri String
- The Kafka REST URI.
- schemaRegistry StringUri 
- The Schema Registry URI.
- uris List<String>
- Kafka server URIs.
- accessCert string
- The Kafka client certificate.
- accessKey string
- The Kafka client certificate key.
- connectUri string
- The Kafka Connect URI.
- restUri string
- The Kafka REST URI.
- schemaRegistry stringUri 
- The Schema Registry URI.
- uris string[]
- Kafka server URIs.
- access_cert str
- The Kafka client certificate.
- access_key str
- The Kafka client certificate key.
- connect_uri str
- The Kafka Connect URI.
- rest_uri str
- The Kafka REST URI.
- schema_registry_ struri 
- The Schema Registry URI.
- uris Sequence[str]
- Kafka server URIs.
- accessCert String
- The Kafka client certificate.
- accessKey String
- The Kafka client certificate key.
- connectUri String
- The Kafka Connect URI.
- restUri String
- The Kafka REST URI.
- schemaRegistry StringUri 
- The Schema Registry URI.
- uris List<String>
- Kafka server URIs.
KafkaKafkaUserConfig, KafkaKafkaUserConfigArgs        
- AdditionalBackup stringRegions 
- Additional Cloud Regions for Backup Replication.
- AivenKafka boolTopic Messages 
- Allow access to read Kafka topic messages in the Aiven Console and REST API.
- CustomDomain string
- Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
- FollowerFetching KafkaKafka User Config Follower Fetching 
- Enable follower fetching
- IpFilter List<KafkaObjects Kafka User Config Ip Filter Object> 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- IpFilter List<string>Strings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- IpFilters List<string>
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- Kafka
KafkaKafka User Config Kafka 
- Kafka broker configuration values
- KafkaAuthentication KafkaMethods Kafka User Config Kafka Authentication Methods 
- Kafka authentication methods
- KafkaConnect bool
- Enable Kafka Connect service. Default: false.
- KafkaConnect KafkaConfig Kafka User Config Kafka Connect Config 
- Kafka Connect configuration values
- KafkaConnect List<KafkaSecret Providers Kafka User Config Kafka Connect Secret Provider> 
- KafkaRest bool
- Enable Kafka-REST service. Default: false.
- bool
- Enable authorization in Kafka-REST service.
- KafkaRest KafkaConfig Kafka User Config Kafka Rest Config 
- Kafka REST configuration
- KafkaSasl KafkaMechanisms Kafka User Config Kafka Sasl Mechanisms 
- Kafka SASL mechanisms
- KafkaVersion string
- Enum: 3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9, and newer. Kafka major version.
- LetsencryptSasl boolPrivatelink 
- Use Letsencrypt CA for Kafka SASL via Privatelink.
- PrivateAccess KafkaKafka User Config Private Access 
- Allow access to selected service ports from private networks
- PrivatelinkAccess KafkaKafka User Config Privatelink Access 
- Allow access to selected service components through Privatelink
- PublicAccess KafkaKafka User Config Public Access 
- Allow access to selected service ports from the public Internet
- SchemaRegistry bool
- Enable Schema-Registry service. Default: false.
- SchemaRegistry KafkaConfig Kafka User Config Schema Registry Config 
- Schema Registry configuration
- ServiceLog bool
- Store logs for the service so that they are available in the HTTP API and console.
- SingleZone KafkaKafka User Config Single Zone 
- Single-zone configuration
- StaticIps bool
- Use static public IP addresses.
- TieredStorage KafkaKafka User Config Tiered Storage 
- Tiered storage configuration
- AdditionalBackup stringRegions 
- Additional Cloud Regions for Backup Replication.
- AivenKafka boolTopic Messages 
- Allow access to read Kafka topic messages in the Aiven Console and REST API.
- CustomDomain string
- Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
- FollowerFetching KafkaKafka User Config Follower Fetching 
- Enable follower fetching
- IpFilter []KafkaObjects Kafka User Config Ip Filter Object 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- IpFilter []stringStrings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- IpFilters []string
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- Kafka
KafkaKafka User Config Kafka 
- Kafka broker configuration values
- KafkaAuthentication KafkaMethods Kafka User Config Kafka Authentication Methods 
- Kafka authentication methods
- KafkaConnect bool
- Enable Kafka Connect service. Default: false.
- KafkaConnect KafkaConfig Kafka User Config Kafka Connect Config 
- Kafka Connect configuration values
- KafkaConnect []KafkaSecret Providers Kafka User Config Kafka Connect Secret Provider 
- KafkaRest bool
- Enable Kafka-REST service. Default: false.
- bool
- Enable authorization in Kafka-REST service.
- KafkaRest KafkaConfig Kafka User Config Kafka Rest Config 
- Kafka REST configuration
- KafkaSasl KafkaMechanisms Kafka User Config Kafka Sasl Mechanisms 
- Kafka SASL mechanisms
- KafkaVersion string
- Enum: 3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9, and newer. Kafka major version.
- LetsencryptSasl boolPrivatelink 
- Use Letsencrypt CA for Kafka SASL via Privatelink.
- PrivateAccess KafkaKafka User Config Private Access 
- Allow access to selected service ports from private networks
- PrivatelinkAccess KafkaKafka User Config Privatelink Access 
- Allow access to selected service components through Privatelink
- PublicAccess KafkaKafka User Config Public Access 
- Allow access to selected service ports from the public Internet
- SchemaRegistry bool
- Enable Schema-Registry service. Default: false.
- SchemaRegistry KafkaConfig Kafka User Config Schema Registry Config 
- Schema Registry configuration
- ServiceLog bool
- Store logs for the service so that they are available in the HTTP API and console.
- SingleZone KafkaKafka User Config Single Zone 
- Single-zone configuration
- StaticIps bool
- Use static public IP addresses.
- TieredStorage KafkaKafka User Config Tiered Storage 
- Tiered storage configuration
- additionalBackup StringRegions 
- Additional Cloud Regions for Backup Replication.
- aivenKafka BooleanTopic Messages 
- Allow access to read Kafka topic messages in the Aiven Console and REST API.
- customDomain String
- Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
- followerFetching KafkaKafka User Config Follower Fetching 
- Enable follower fetching
- ipFilter List<KafkaObjects Kafka User Config Ip Filter Object> 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- ipFilter List<String>Strings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- ipFilters List<String>
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- kafka
KafkaKafka User Config Kafka 
- Kafka broker configuration values
- kafkaAuthentication KafkaMethods Kafka User Config Kafka Authentication Methods 
- Kafka authentication methods
- kafkaConnect Boolean
- Enable Kafka Connect service. Default: false.
- kafkaConnect KafkaConfig Kafka User Config Kafka Connect Config 
- Kafka Connect configuration values
- kafkaConnect List<KafkaSecret Providers Kafka User Config Kafka Connect Secret Provider> 
- kafkaRest Boolean
- Enable Kafka-REST service. Default: false.
- Boolean
- Enable authorization in Kafka-REST service.
- kafkaRest KafkaConfig Kafka User Config Kafka Rest Config 
- Kafka REST configuration
- kafkaSasl KafkaMechanisms Kafka User Config Kafka Sasl Mechanisms 
- Kafka SASL mechanisms
- kafkaVersion String
- Enum: 3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9, and newer. Kafka major version.
- letsencryptSasl BooleanPrivatelink 
- Use Letsencrypt CA for Kafka SASL via Privatelink.
- privateAccess KafkaKafka User Config Private Access 
- Allow access to selected service ports from private networks
- privatelinkAccess KafkaKafka User Config Privatelink Access 
- Allow access to selected service components through Privatelink
- publicAccess KafkaKafka User Config Public Access 
- Allow access to selected service ports from the public Internet
- schemaRegistry Boolean
- Enable Schema-Registry service. Default: false.
- schemaRegistry KafkaConfig Kafka User Config Schema Registry Config 
- Schema Registry configuration
- serviceLog Boolean
- Store logs for the service so that they are available in the HTTP API and console.
- singleZone KafkaKafka User Config Single Zone 
- Single-zone configuration
- staticIps Boolean
- Use static public IP addresses.
- tieredStorage KafkaKafka User Config Tiered Storage 
- Tiered storage configuration
- additionalBackup stringRegions 
- Additional Cloud Regions for Backup Replication.
- aivenKafka booleanTopic Messages 
- Allow access to read Kafka topic messages in the Aiven Console and REST API.
- customDomain string
- Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
- followerFetching KafkaKafka User Config Follower Fetching 
- Enable follower fetching
- ipFilter KafkaObjects Kafka User Config Ip Filter Object[] 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- ipFilter string[]Strings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- ipFilters string[]
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- kafka
KafkaKafka User Config Kafka 
- Kafka broker configuration values
- kafkaAuthentication KafkaMethods Kafka User Config Kafka Authentication Methods 
- Kafka authentication methods
- kafkaConnect boolean
- Enable Kafka Connect service. Default: false.
- kafkaConnect KafkaConfig Kafka User Config Kafka Connect Config 
- Kafka Connect configuration values
- kafkaConnect KafkaSecret Providers Kafka User Config Kafka Connect Secret Provider[] 
- kafkaRest boolean
- Enable Kafka-REST service. Default: false.
- boolean
- Enable authorization in Kafka-REST service.
- kafkaRest KafkaConfig Kafka User Config Kafka Rest Config 
- Kafka REST configuration
- kafkaSasl KafkaMechanisms Kafka User Config Kafka Sasl Mechanisms 
- Kafka SASL mechanisms
- kafkaVersion string
- Enum: 3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9, and newer. Kafka major version.
- letsencryptSasl booleanPrivatelink 
- Use Letsencrypt CA for Kafka SASL via Privatelink.
- privateAccess KafkaKafka User Config Private Access 
- Allow access to selected service ports from private networks
- privatelinkAccess KafkaKafka User Config Privatelink Access 
- Allow access to selected service components through Privatelink
- publicAccess KafkaKafka User Config Public Access 
- Allow access to selected service ports from the public Internet
- schemaRegistry boolean
- Enable Schema-Registry service. Default: false.
- schemaRegistry KafkaConfig Kafka User Config Schema Registry Config 
- Schema Registry configuration
- serviceLog boolean
- Store logs for the service so that they are available in the HTTP API and console.
- singleZone KafkaKafka User Config Single Zone 
- Single-zone configuration
- staticIps boolean
- Use static public IP addresses.
- tieredStorage KafkaKafka User Config Tiered Storage 
- Tiered storage configuration
- additional_backup_ strregions 
- Additional Cloud Regions for Backup Replication.
- aiven_kafka_ booltopic_ messages 
- Allow access to read Kafka topic messages in the Aiven Console and REST API.
- custom_domain str
- Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
- follower_fetching KafkaKafka User Config Follower Fetching 
- Enable follower fetching
- ip_filter_ Sequence[Kafkaobjects Kafka User Config Ip Filter Object] 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- ip_filter_ Sequence[str]strings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- ip_filters Sequence[str]
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- kafka
KafkaKafka User Config Kafka 
- Kafka broker configuration values
- kafka_authentication_ Kafkamethods Kafka User Config Kafka Authentication Methods 
- Kafka authentication methods
- kafka_connect bool
- Enable Kafka Connect service. Default: false.
- kafka_connect_ Kafkaconfig Kafka User Config Kafka Connect Config 
- Kafka Connect configuration values
- kafka_connect_ Sequence[Kafkasecret_ providers Kafka User Config Kafka Connect Secret Provider] 
- kafka_rest bool
- Enable Kafka-REST service. Default: false.
- bool
- Enable authorization in Kafka-REST service.
- kafka_rest_ Kafkaconfig Kafka User Config Kafka Rest Config 
- Kafka REST configuration
- kafka_sasl_ Kafkamechanisms Kafka User Config Kafka Sasl Mechanisms 
- Kafka SASL mechanisms
- kafka_version str
- Enum: 3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9, and newer. Kafka major version.
- letsencrypt_sasl_ boolprivatelink 
- Use Letsencrypt CA for Kafka SASL via Privatelink.
- private_access KafkaKafka User Config Private Access 
- Allow access to selected service ports from private networks
- privatelink_access KafkaKafka User Config Privatelink Access 
- Allow access to selected service components through Privatelink
- public_access KafkaKafka User Config Public Access 
- Allow access to selected service ports from the public Internet
- schema_registry bool
- Enable Schema-Registry service. Default: false.
- schema_registry_ Kafkaconfig Kafka User Config Schema Registry Config 
- Schema Registry configuration
- service_log bool
- Store logs for the service so that they are available in the HTTP API and console.
- single_zone KafkaKafka User Config Single Zone 
- Single-zone configuration
- static_ips bool
- Use static public IP addresses.
- tiered_storage KafkaKafka User Config Tiered Storage 
- Tiered storage configuration
- additionalBackup StringRegions 
- Additional Cloud Regions for Backup Replication.
- aivenKafka BooleanTopic Messages 
- Allow access to read Kafka topic messages in the Aiven Console and REST API.
- customDomain String
- Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
- followerFetching Property Map
- Enable follower fetching
- ipFilter List<Property Map>Objects 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- ipFilter List<String>Strings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- ipFilters List<String>
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- kafka Property Map
- Kafka broker configuration values
- kafkaAuthentication Property MapMethods 
- Kafka authentication methods
- kafkaConnect Boolean
- Enable Kafka Connect service. Default: false.
- kafkaConnect Property MapConfig 
- Kafka Connect configuration values
- kafkaConnect List<Property Map>Secret Providers 
- kafkaRest Boolean
- Enable Kafka-REST service. Default: false.
- Boolean
- Enable authorization in Kafka-REST service.
- kafkaRest Property MapConfig 
- Kafka REST configuration
- kafkaSasl Property MapMechanisms 
- Kafka SASL mechanisms
- kafkaVersion String
- Enum: 3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9, and newer. Kafka major version.
- letsencryptSasl BooleanPrivatelink 
- Use Letsencrypt CA for Kafka SASL via Privatelink.
- privateAccess Property Map
- Allow access to selected service ports from private networks
- privatelinkAccess Property Map
- Allow access to selected service components through Privatelink
- publicAccess Property Map
- Allow access to selected service ports from the public Internet
- schemaRegistry Boolean
- Enable Schema-Registry service. Default: false.
- schemaRegistry Property MapConfig 
- Schema Registry configuration
- serviceLog Boolean
- Store logs for the service so that they are available in the HTTP API and console.
- singleZone Property Map
- Single-zone configuration
- staticIps Boolean
- Use static public IP addresses.
- tieredStorage Property Map
- Tiered storage configuration
KafkaKafkaUserConfigFollowerFetching, KafkaKafkaUserConfigFollowerFetchingArgs            
- Enabled bool
- Whether to enable the follower fetching functionality.
- Enabled bool
- Whether to enable the follower fetching functionality.
- enabled Boolean
- Whether to enable the follower fetching functionality.
- enabled boolean
- Whether to enable the follower fetching functionality.
- enabled bool
- Whether to enable the follower fetching functionality.
- enabled Boolean
- Whether to enable the follower fetching functionality.
KafkaKafkaUserConfigIpFilterObject, KafkaKafkaUserConfigIpFilterObjectArgs              
- Network string
- CIDR address block. Example: 10.20.0.0/16.
- Description string
- Description for IP filter list entry. Example: Production service IP range.
- Network string
- CIDR address block. Example: 10.20.0.0/16.
- Description string
- Description for IP filter list entry. Example: Production service IP range.
- network String
- CIDR address block. Example: 10.20.0.0/16.
- description String
- Description for IP filter list entry. Example: Production service IP range.
- network string
- CIDR address block. Example: 10.20.0.0/16.
- description string
- Description for IP filter list entry. Example: Production service IP range.
- network str
- CIDR address block. Example: 10.20.0.0/16.
- description str
- Description for IP filter list entry. Example: Production service IP range.
- network String
- CIDR address block. Example: 10.20.0.0/16.
- description String
- Description for IP filter list entry. Example: Production service IP range.
KafkaKafkaUserConfigKafka, KafkaKafkaUserConfigKafkaArgs          
- AutoCreate boolTopics Enable 
- Enable auto-creation of topics. (Default: true).
- CompressionType string
- Enum: gzip,lz4,producer,snappy,uncompressed,zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsuncompressedwhich is equivalent to no compression; andproducerwhich means retain the original compression codec set by the producer.(Default: producer).
- ConnectionsMax intIdle Ms 
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
- DefaultReplication intFactor 
- Replication factor for auto-created topics (Default: 3).
- GroupInitial intRebalance Delay Ms 
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
- GroupMax intSession Timeout Ms 
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
- GroupMin intSession Timeout Ms 
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
- LogCleaner intDelete Retention Ms 
- How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
- LogCleaner intMax Compaction Lag Ms 
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
- LogCleaner doubleMin Cleanable Ratio 
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
- LogCleaner intMin Compaction Lag Ms 
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
- LogCleanup stringPolicy 
- Enum: compact,compact,delete,delete. The default cleanup policy for segments beyond the retention window (Default: delete).
- LogFlush intInterval Messages 
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
- LogFlush intInterval Ms 
- The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
- LogIndex intInterval Bytes 
- The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
- LogIndex intSize Max Bytes 
- The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
- LogLocal intRetention Bytes 
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
- LogLocal intRetention Ms 
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
- LogMessage boolDownconversion Enable 
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
- LogMessage intTimestamp Difference Max Ms 
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
- LogMessage stringTimestamp Type 
- Enum: CreateTime,LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
- LogPreallocate bool
- Should pre allocate file when create new segment? (Default: false).
- LogRetention intBytes 
- The maximum size of the log before deleting messages (Default: -1).
- LogRetention intHours 
- The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
- LogRetention intMs 
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
- LogRoll intJitter Ms 
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
- LogRoll intMs 
- The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
- LogSegment intBytes 
- The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
- LogSegment intDelete Delay Ms 
- The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
- MaxConnections intPer Ip 
- The maximum number of connections allowed from each ip address (Default: 2147483647).
- MaxIncremental intFetch Session Cache Slots 
- The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
- MessageMax intBytes 
- The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
- MinInsync intReplicas 
- When a producer sets acks to all(or-1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example:1.
- NumPartitions int
- Number of partitions for auto-created topics (Default: 1).
- OffsetsRetention intMinutes 
- Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
- ProducerPurgatory intPurge Interval Requests 
- The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
- ReplicaFetch intMax Bytes 
- The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
- ReplicaFetch intResponse Max Bytes 
- Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
- SaslOauthbearer stringExpected Audience 
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
- SaslOauthbearer stringExpected Issuer 
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
- SaslOauthbearer stringJwks Endpoint Url 
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
- SaslOauthbearer stringSub Claim Name 
- Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
- SocketRequest intMax Bytes 
- The maximum number of bytes in a socket request (Default: 104857600 bytes).
- TransactionPartition boolVerification Enable 
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
- TransactionRemove intExpired Transaction Cleanup Interval Ms 
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
- TransactionState intLog Segment Bytes 
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
- AutoCreate boolTopics Enable 
- Enable auto-creation of topics. (Default: true).
- CompressionType string
- Enum: gzip,lz4,producer,snappy,uncompressed,zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsuncompressedwhich is equivalent to no compression; andproducerwhich means retain the original compression codec set by the producer.(Default: producer).
- ConnectionsMax intIdle Ms 
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
- DefaultReplication intFactor 
- Replication factor for auto-created topics (Default: 3).
- GroupInitial intRebalance Delay Ms 
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
- GroupMax intSession Timeout Ms 
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
- GroupMin intSession Timeout Ms 
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
- LogCleaner intDelete Retention Ms 
- How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
- LogCleaner intMax Compaction Lag Ms 
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
- LogCleaner float64Min Cleanable Ratio 
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
- LogCleaner intMin Compaction Lag Ms 
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
- LogCleanup stringPolicy 
- Enum: compact,compact,delete,delete. The default cleanup policy for segments beyond the retention window (Default: delete).
- LogFlush intInterval Messages 
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
- LogFlush intInterval Ms 
- The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
- LogIndex intInterval Bytes 
- The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
- LogIndex intSize Max Bytes 
- The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
- LogLocal intRetention Bytes 
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
- LogLocal intRetention Ms 
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
- LogMessage boolDownconversion Enable 
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
- LogMessage intTimestamp Difference Max Ms 
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
- LogMessage stringTimestamp Type 
- Enum: CreateTime,LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
- LogPreallocate bool
- Should pre allocate file when create new segment? (Default: false).
- LogRetention intBytes 
- The maximum size of the log before deleting messages (Default: -1).
- LogRetention intHours 
- The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
- LogRetention intMs 
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
- LogRoll intJitter Ms 
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
- LogRoll intMs 
- The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
- LogSegment intBytes 
- The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
- LogSegment intDelete Delay Ms 
- The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
- MaxConnections intPer Ip 
- The maximum number of connections allowed from each ip address (Default: 2147483647).
- MaxIncremental intFetch Session Cache Slots 
- The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
- MessageMax intBytes 
- The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
- MinInsync intReplicas 
- When a producer sets acks to all(or-1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example:1.
- NumPartitions int
- Number of partitions for auto-created topics (Default: 1).
- OffsetsRetention intMinutes 
- Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
- ProducerPurgatory intPurge Interval Requests 
- The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
- ReplicaFetch intMax Bytes 
- The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
- ReplicaFetch intResponse Max Bytes 
- Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
- SaslOauthbearer stringExpected Audience 
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
- SaslOauthbearer stringExpected Issuer 
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
- SaslOauthbearer stringJwks Endpoint Url 
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
- SaslOauthbearer stringSub Claim Name 
- Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
- SocketRequest intMax Bytes 
- The maximum number of bytes in a socket request (Default: 104857600 bytes).
- TransactionPartition boolVerification Enable 
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
- TransactionRemove intExpired Transaction Cleanup Interval Ms 
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
- TransactionState intLog Segment Bytes 
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
- autoCreate BooleanTopics Enable 
- Enable auto-creation of topics. (Default: true).
- compressionType String
- Enum: gzip,lz4,producer,snappy,uncompressed,zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsuncompressedwhich is equivalent to no compression; andproducerwhich means retain the original compression codec set by the producer.(Default: producer).
- connectionsMax IntegerIdle Ms 
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
- defaultReplication IntegerFactor 
- Replication factor for auto-created topics (Default: 3).
- groupInitial IntegerRebalance Delay Ms 
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
- groupMax IntegerSession Timeout Ms 
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
- groupMin IntegerSession Timeout Ms 
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
- logCleaner IntegerDelete Retention Ms 
- How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
- logCleaner IntegerMax Compaction Lag Ms 
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
- logCleaner DoubleMin Cleanable Ratio 
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
- logCleaner IntegerMin Compaction Lag Ms 
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
- logCleanup StringPolicy 
- Enum: compact,compact,delete,delete. The default cleanup policy for segments beyond the retention window (Default: delete).
- logFlush IntegerInterval Messages 
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
- logFlush IntegerInterval Ms 
- The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
- logIndex IntegerInterval Bytes 
- The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
- logIndex IntegerSize Max Bytes 
- The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
- logLocal IntegerRetention Bytes 
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
- logLocal IntegerRetention Ms 
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
- logMessage BooleanDownconversion Enable 
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
- logMessage IntegerTimestamp Difference Max Ms 
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
- logMessage StringTimestamp Type 
- Enum: CreateTime,LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
- logPreallocate Boolean
- Should pre allocate file when create new segment? (Default: false).
- logRetention IntegerBytes 
- The maximum size of the log before deleting messages (Default: -1).
- logRetention IntegerHours 
- The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
- logRetention IntegerMs 
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
- logRoll IntegerJitter Ms 
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
- logRoll IntegerMs 
- The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
- logSegment IntegerBytes 
- The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
- logSegment IntegerDelete Delay Ms 
- The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
- maxConnections IntegerPer Ip 
- The maximum number of connections allowed from each ip address (Default: 2147483647).
- maxIncremental IntegerFetch Session Cache Slots 
- The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
- messageMax IntegerBytes 
- The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
- minInsync IntegerReplicas 
- When a producer sets acks to all(or-1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example:1.
- numPartitions Integer
- Number of partitions for auto-created topics (Default: 1).
- offsetsRetention IntegerMinutes 
- Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
- producerPurgatory IntegerPurge Interval Requests 
- The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
- replicaFetch IntegerMax Bytes 
- The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
- replicaFetch IntegerResponse Max Bytes 
- Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
- saslOauthbearer StringExpected Audience 
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
- saslOauthbearer StringExpected Issuer 
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
- saslOauthbearer StringJwks Endpoint Url 
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
- saslOauthbearer StringSub Claim Name 
- Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
- socketRequest IntegerMax Bytes 
- The maximum number of bytes in a socket request (Default: 104857600 bytes).
- transactionPartition BooleanVerification Enable 
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
- transactionRemove IntegerExpired Transaction Cleanup Interval Ms 
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
- transactionState IntegerLog Segment Bytes 
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
- autoCreate booleanTopics Enable 
- Enable auto-creation of topics. (Default: true).
- compressionType string
- Enum: gzip,lz4,producer,snappy,uncompressed,zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsuncompressedwhich is equivalent to no compression; andproducerwhich means retain the original compression codec set by the producer.(Default: producer).
- connectionsMax numberIdle Ms 
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
- defaultReplication numberFactor 
- Replication factor for auto-created topics (Default: 3).
- groupInitial numberRebalance Delay Ms 
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
- groupMax numberSession Timeout Ms 
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
- groupMin numberSession Timeout Ms 
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
- logCleaner numberDelete Retention Ms 
- How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
- logCleaner numberMax Compaction Lag Ms 
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
- logCleaner numberMin Cleanable Ratio 
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
- logCleaner numberMin Compaction Lag Ms 
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
- logCleanup stringPolicy 
- Enum: compact,compact,delete,delete. The default cleanup policy for segments beyond the retention window (Default: delete).
- logFlush numberInterval Messages 
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
- logFlush numberInterval Ms 
- The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
- logIndex numberInterval Bytes 
- The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
- logIndex numberSize Max Bytes 
- The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
- logLocal numberRetention Bytes 
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
- logLocal numberRetention Ms 
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
- logMessage booleanDownconversion Enable 
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
- logMessage numberTimestamp Difference Max Ms 
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
- logMessage stringTimestamp Type 
- Enum: CreateTime,LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
- logPreallocate boolean
- Should pre allocate file when create new segment? (Default: false).
- logRetention numberBytes 
- The maximum size of the log before deleting messages (Default: -1).
- logRetention numberHours 
- The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
- logRetention numberMs 
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
- logRoll numberJitter Ms 
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
- logRoll numberMs 
- The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
- logSegment numberBytes 
- The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
- logSegment numberDelete Delay Ms 
- The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
- maxConnections numberPer Ip 
- The maximum number of connections allowed from each ip address (Default: 2147483647).
- maxIncremental numberFetch Session Cache Slots 
- The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
- messageMax numberBytes 
- The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
- minInsync numberReplicas 
- When a producer sets acks to all(or-1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example:1.
- numPartitions number
- Number of partitions for auto-created topics (Default: 1).
- offsetsRetention numberMinutes 
- Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
- producerPurgatory numberPurge Interval Requests 
- The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
- replicaFetch numberMax Bytes 
- The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
- replicaFetch numberResponse Max Bytes 
- Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
- saslOauthbearer stringExpected Audience 
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
- saslOauthbearer stringExpected Issuer 
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
- saslOauthbearer stringJwks Endpoint Url 
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
- saslOauthbearer stringSub Claim Name 
- Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
- socketRequest numberMax Bytes 
- The maximum number of bytes in a socket request (Default: 104857600 bytes).
- transactionPartition booleanVerification Enable 
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
- transactionRemove numberExpired Transaction Cleanup Interval Ms 
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
- transactionState numberLog Segment Bytes 
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
- auto_create_ booltopics_ enable 
- Enable auto-creation of topics. (Default: true).
- compression_type str
- Enum: gzip,lz4,producer,snappy,uncompressed,zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsuncompressedwhich is equivalent to no compression; andproducerwhich means retain the original compression codec set by the producer.(Default: producer).
- connections_max_ intidle_ ms 
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
- default_replication_ intfactor 
- Replication factor for auto-created topics (Default: 3).
- group_initial_ intrebalance_ delay_ ms 
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
- group_max_ intsession_ timeout_ ms 
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
- group_min_ intsession_ timeout_ ms 
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
- log_cleaner_ intdelete_ retention_ ms 
- How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
- log_cleaner_ intmax_ compaction_ lag_ ms 
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
- log_cleaner_ floatmin_ cleanable_ ratio 
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
- log_cleaner_ intmin_ compaction_ lag_ ms 
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
- log_cleanup_ strpolicy 
- Enum: compact,compact,delete,delete. The default cleanup policy for segments beyond the retention window (Default: delete).
- log_flush_ intinterval_ messages 
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
- log_flush_ intinterval_ ms 
- The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
- log_index_ intinterval_ bytes 
- The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
- log_index_ intsize_ max_ bytes 
- The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
- log_local_ intretention_ bytes 
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
- log_local_ intretention_ ms 
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
- log_message_ booldownconversion_ enable 
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
- log_message_ inttimestamp_ difference_ max_ ms 
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
- log_message_ strtimestamp_ type 
- Enum: CreateTime,LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
- log_preallocate bool
- Should pre allocate file when create new segment? (Default: false).
- log_retention_ intbytes 
- The maximum size of the log before deleting messages (Default: -1).
- log_retention_ inthours 
- The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
- log_retention_ intms 
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
- log_roll_ intjitter_ ms 
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
- log_roll_ intms 
- The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
- log_segment_ intbytes 
- The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
- log_segment_ intdelete_ delay_ ms 
- The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
- max_connections_ intper_ ip 
- The maximum number of connections allowed from each ip address (Default: 2147483647).
- max_incremental_ intfetch_ session_ cache_ slots 
- The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
- message_max_ intbytes 
- The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
- min_insync_ intreplicas 
- When a producer sets acks to all(or-1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example:1.
- num_partitions int
- Number of partitions for auto-created topics (Default: 1).
- offsets_retention_ intminutes 
- Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
- producer_purgatory_ intpurge_ interval_ requests 
- The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
- replica_fetch_ intmax_ bytes 
- The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
- replica_fetch_ intresponse_ max_ bytes 
- Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
- sasl_oauthbearer_ strexpected_ audience 
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
- sasl_oauthbearer_ strexpected_ issuer 
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
- sasl_oauthbearer_ strjwks_ endpoint_ url 
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
- sasl_oauthbearer_ strsub_ claim_ name 
- Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
- socket_request_ intmax_ bytes 
- The maximum number of bytes in a socket request (Default: 104857600 bytes).
- transaction_partition_ boolverification_ enable 
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
- transaction_remove_ intexpired_ transaction_ cleanup_ interval_ ms 
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
- transaction_state_ intlog_ segment_ bytes 
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
- autoCreate BooleanTopics Enable 
- Enable auto-creation of topics. (Default: true).
- compressionType String
- Enum: gzip,lz4,producer,snappy,uncompressed,zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsuncompressedwhich is equivalent to no compression; andproducerwhich means retain the original compression codec set by the producer.(Default: producer).
- connectionsMax NumberIdle Ms 
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
- defaultReplication NumberFactor 
- Replication factor for auto-created topics (Default: 3).
- groupInitial NumberRebalance Delay Ms 
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
- groupMax NumberSession Timeout Ms 
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
- groupMin NumberSession Timeout Ms 
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
- logCleaner NumberDelete Retention Ms 
- How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
- logCleaner NumberMax Compaction Lag Ms 
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
- logCleaner NumberMin Cleanable Ratio 
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
- logCleaner NumberMin Compaction Lag Ms 
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
- logCleanup StringPolicy 
- Enum: compact,compact,delete,delete. The default cleanup policy for segments beyond the retention window (Default: delete).
- logFlush NumberInterval Messages 
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
- logFlush NumberInterval Ms 
- The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
- logIndex NumberInterval Bytes 
- The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
- logIndex NumberSize Max Bytes 
- The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
- logLocal NumberRetention Bytes 
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
- logLocal NumberRetention Ms 
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
- logMessage BooleanDownconversion Enable 
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
- logMessage NumberTimestamp Difference Max Ms 
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
- logMessage StringTimestamp Type 
- Enum: CreateTime,LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
- logPreallocate Boolean
- Should pre allocate file when create new segment? (Default: false).
- logRetention NumberBytes 
- The maximum size of the log before deleting messages (Default: -1).
- logRetention NumberHours 
- The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
- logRetention NumberMs 
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
- logRoll NumberJitter Ms 
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
- logRoll NumberMs 
- The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
- logSegment NumberBytes 
- The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
- logSegment NumberDelete Delay Ms 
- The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
- maxConnections NumberPer Ip 
- The maximum number of connections allowed from each ip address (Default: 2147483647).
- maxIncremental NumberFetch Session Cache Slots 
- The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
- messageMax NumberBytes 
- The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
- minInsync NumberReplicas 
- When a producer sets acks to all(or-1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example:1.
- numPartitions Number
- Number of partitions for auto-created topics (Default: 1).
- offsetsRetention NumberMinutes 
- Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
- producerPurgatory NumberPurge Interval Requests 
- The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
- replicaFetch NumberMax Bytes 
- The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
- replicaFetch NumberResponse Max Bytes 
- Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
- saslOauthbearer StringExpected Audience 
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
- saslOauthbearer StringExpected Issuer 
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
- saslOauthbearer StringJwks Endpoint Url 
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
- saslOauthbearer StringSub Claim Name 
- Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
- socketRequest NumberMax Bytes 
- The maximum number of bytes in a socket request (Default: 104857600 bytes).
- transactionPartition BooleanVerification Enable 
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
- transactionRemove NumberExpired Transaction Cleanup Interval Ms 
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
- transactionState NumberLog Segment Bytes 
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
KafkaKafkaUserConfigKafkaAuthenticationMethods, KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs              
- Certificate bool
- Enable certificate/SSL authentication. Default: true.
- Sasl bool
- Enable SASL authentication. Default: false.
- Certificate bool
- Enable certificate/SSL authentication. Default: true.
- Sasl bool
- Enable SASL authentication. Default: false.
- certificate Boolean
- Enable certificate/SSL authentication. Default: true.
- sasl Boolean
- Enable SASL authentication. Default: false.
- certificate boolean
- Enable certificate/SSL authentication. Default: true.
- sasl boolean
- Enable SASL authentication. Default: false.
- certificate bool
- Enable certificate/SSL authentication. Default: true.
- sasl bool
- Enable SASL authentication. Default: false.
- certificate Boolean
- Enable certificate/SSL authentication. Default: true.
- sasl Boolean
- Enable SASL authentication. Default: false.
KafkaKafkaUserConfigKafkaConnectConfig, KafkaKafkaUserConfigKafkaConnectConfigArgs              
- ConnectorClient stringConfig Override Policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- ConsumerAuto stringOffset Reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- ConsumerFetch intMax Bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- ConsumerIsolation stringLevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- ConsumerMax intPartition Fetch Bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- ConsumerMax intPoll Interval Ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- ConsumerMax intPoll Records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- OffsetFlush intInterval Ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- OffsetFlush intTimeout Ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- ProducerBatch intSize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- ProducerBuffer intMemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- ProducerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- ProducerLinger intMs 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- ProducerMax intRequest Size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- ScheduledRebalance intMax Delay Ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- SessionTimeout intMs 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- ConnectorClient stringConfig Override Policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- ConsumerAuto stringOffset Reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- ConsumerFetch intMax Bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- ConsumerIsolation stringLevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- ConsumerMax intPartition Fetch Bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- ConsumerMax intPoll Interval Ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- ConsumerMax intPoll Records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- OffsetFlush intInterval Ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- OffsetFlush intTimeout Ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- ProducerBatch intSize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- ProducerBuffer intMemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- ProducerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- ProducerLinger intMs 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- ProducerMax intRequest Size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- ScheduledRebalance intMax Delay Ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- SessionTimeout intMs 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connectorClient StringConfig Override Policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- consumerAuto StringOffset Reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumerFetch IntegerMax Bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- consumerIsolation StringLevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumerMax IntegerPartition Fetch Bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- consumerMax IntegerPoll Interval Ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumerMax IntegerPoll Records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- offsetFlush IntegerInterval Ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- offsetFlush IntegerTimeout Ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producerBatch IntegerSize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producerBuffer IntegerMemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producerCompression StringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger IntegerMs 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- producerMax IntegerRequest Size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- scheduledRebalance IntegerMax Delay Ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- sessionTimeout IntegerMs 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connectorClient stringConfig Override Policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- consumerAuto stringOffset Reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumerFetch numberMax Bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- consumerIsolation stringLevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumerMax numberPartition Fetch Bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- consumerMax numberPoll Interval Ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumerMax numberPoll Records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- offsetFlush numberInterval Ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- offsetFlush numberTimeout Ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producerBatch numberSize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producerBuffer numberMemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger numberMs 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- producerMax numberRequest Size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- scheduledRebalance numberMax Delay Ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- sessionTimeout numberMs 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector_client_ strconfig_ override_ policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- consumer_auto_ stroffset_ reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumer_fetch_ intmax_ bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- consumer_isolation_ strlevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumer_max_ intpartition_ fetch_ bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- consumer_max_ intpoll_ interval_ ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer_max_ intpoll_ records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- offset_flush_ intinterval_ ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- offset_flush_ inttimeout_ ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer_batch_ intsize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producer_buffer_ intmemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producer_compression_ strtype 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producer_linger_ intms 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- producer_max_ intrequest_ size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- scheduled_rebalance_ intmax_ delay_ ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- session_timeout_ intms 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connectorClient StringConfig Override Policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- consumerAuto StringOffset Reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumerFetch NumberMax Bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- consumerIsolation StringLevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumerMax NumberPartition Fetch Bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- consumerMax NumberPoll Interval Ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumerMax NumberPoll Records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- offsetFlush NumberInterval Ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- offsetFlush NumberTimeout Ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producerBatch NumberSize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producerBuffer NumberMemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producerCompression StringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger NumberMs 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- producerMax NumberRequest Size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- scheduledRebalance NumberMax Delay Ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- sessionTimeout NumberMs 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
KafkaKafkaUserConfigKafkaConnectSecretProvider, KafkaKafkaUserConfigKafkaConnectSecretProviderArgs                
- Name string
- Name of the secret provider. Used to reference secrets in connector config.
- Aws
KafkaKafka User Config Kafka Connect Secret Provider Aws 
- AWS secret provider configuration
- Vault
KafkaKafka User Config Kafka Connect Secret Provider Vault 
- Vault secret provider configuration
- Name string
- Name of the secret provider. Used to reference secrets in connector config.
- Aws
KafkaKafka User Config Kafka Connect Secret Provider Aws 
- AWS secret provider configuration
- Vault
KafkaKafka User Config Kafka Connect Secret Provider Vault 
- Vault secret provider configuration
- name String
- Name of the secret provider. Used to reference secrets in connector config.
- aws
KafkaKafka User Config Kafka Connect Secret Provider Aws 
- AWS secret provider configuration
- vault
KafkaKafka User Config Kafka Connect Secret Provider Vault 
- Vault secret provider configuration
- name string
- Name of the secret provider. Used to reference secrets in connector config.
- aws
KafkaKafka User Config Kafka Connect Secret Provider Aws 
- AWS secret provider configuration
- vault
KafkaKafka User Config Kafka Connect Secret Provider Vault 
- Vault secret provider configuration
- name str
- Name of the secret provider. Used to reference secrets in connector config.
- aws
KafkaKafka User Config Kafka Connect Secret Provider Aws 
- AWS secret provider configuration
- vault
KafkaKafka User Config Kafka Connect Secret Provider Vault 
- Vault secret provider configuration
- name String
- Name of the secret provider. Used to reference secrets in connector config.
- aws Property Map
- AWS secret provider configuration
- vault Property Map
- Vault secret provider configuration
KafkaKafkaUserConfigKafkaConnectSecretProviderAws, KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs                  
- AuthMethod string
- Enum: credentials. Auth method of the vault secret provider.
- Region string
- Region used to lookup secrets with AWS SecretManager.
- AccessKey string
- Access key used to authenticate with aws.
- SecretKey string
- Secret key used to authenticate with aws.
- AuthMethod string
- Enum: credentials. Auth method of the vault secret provider.
- Region string
- Region used to lookup secrets with AWS SecretManager.
- AccessKey string
- Access key used to authenticate with aws.
- SecretKey string
- Secret key used to authenticate with aws.
- authMethod String
- Enum: credentials. Auth method of the vault secret provider.
- region String
- Region used to lookup secrets with AWS SecretManager.
- accessKey String
- Access key used to authenticate with aws.
- secretKey String
- Secret key used to authenticate with aws.
- authMethod string
- Enum: credentials. Auth method of the vault secret provider.
- region string
- Region used to lookup secrets with AWS SecretManager.
- accessKey string
- Access key used to authenticate with aws.
- secretKey string
- Secret key used to authenticate with aws.
- auth_method str
- Enum: credentials. Auth method of the vault secret provider.
- region str
- Region used to lookup secrets with AWS SecretManager.
- access_key str
- Access key used to authenticate with aws.
- secret_key str
- Secret key used to authenticate with aws.
- authMethod String
- Enum: credentials. Auth method of the vault secret provider.
- region String
- Region used to lookup secrets with AWS SecretManager.
- accessKey String
- Access key used to authenticate with aws.
- secretKey String
- Secret key used to authenticate with aws.
KafkaKafkaUserConfigKafkaConnectSecretProviderVault, KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs                  
- Address string
- Address of the Vault server.
- AuthMethod string
- Enum: token. Auth method of the vault secret provider.
- EngineVersion int
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- PrefixPath intDepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- Token string
- Token used to authenticate with vault and auth method token.
- Address string
- Address of the Vault server.
- AuthMethod string
- Enum: token. Auth method of the vault secret provider.
- EngineVersion int
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- PrefixPath intDepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- Token string
- Token used to authenticate with vault and auth method token.
- address String
- Address of the Vault server.
- authMethod String
- Enum: token. Auth method of the vault secret provider.
- engineVersion Integer
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- prefixPath IntegerDepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- token String
- Token used to authenticate with vault and auth method token.
- address string
- Address of the Vault server.
- authMethod string
- Enum: token. Auth method of the vault secret provider.
- engineVersion number
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- prefixPath numberDepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- token string
- Token used to authenticate with vault and auth method token.
- address str
- Address of the Vault server.
- auth_method str
- Enum: token. Auth method of the vault secret provider.
- engine_version int
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- prefix_path_ intdepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- token str
- Token used to authenticate with vault and auth method token.
- address String
- Address of the Vault server.
- authMethod String
- Enum: token. Auth method of the vault secret provider.
- engineVersion Number
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- prefixPath NumberDepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- token String
- Token used to authenticate with vault and auth method token.
KafkaKafkaUserConfigKafkaRestConfig, KafkaKafkaUserConfigKafkaRestConfigArgs              
- ConsumerEnable boolAuto Commit 
- If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
- ConsumerIdle intDisconnect Timeout 
- Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
- ConsumerRequest intMax Bytes 
- Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
- ConsumerRequest intTimeout Ms 
- Enum: 1000,15000,30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default:1000.
- NameStrategy string
- Enum: record_name,topic_name,topic_record_name. Name strategy to use when selecting subject for storing schemas. Default:topic_name.
- NameStrategy boolValidation 
- If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
- ProducerAcks string
- Enum: -1,0,1,all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set toallor-1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default:1.
- ProducerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- ProducerLinger intMs 
- Wait for up to the given delay to allow batching records together. Default: 0.
- ProducerMax intRequest Size 
- The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
- SimpleconsumerPool intSize Max 
- Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
- ConsumerEnable boolAuto Commit 
- If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
- ConsumerIdle intDisconnect Timeout 
- Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
- ConsumerRequest intMax Bytes 
- Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
- ConsumerRequest intTimeout Ms 
- Enum: 1000,15000,30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default:1000.
- NameStrategy string
- Enum: record_name,topic_name,topic_record_name. Name strategy to use when selecting subject for storing schemas. Default:topic_name.
- NameStrategy boolValidation 
- If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
- ProducerAcks string
- Enum: -1,0,1,all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set toallor-1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default:1.
- ProducerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- ProducerLinger intMs 
- Wait for up to the given delay to allow batching records together. Default: 0.
- ProducerMax intRequest Size 
- The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
- SimpleconsumerPool intSize Max 
- Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
- consumerEnable BooleanAuto Commit 
- If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
- consumerIdle IntegerDisconnect Timeout 
- Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
- consumerRequest IntegerMax Bytes 
- Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
- consumerRequest IntegerTimeout Ms 
- Enum: 1000,15000,30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default:1000.
- nameStrategy String
- Enum: record_name,topic_name,topic_record_name. Name strategy to use when selecting subject for storing schemas. Default:topic_name.
- nameStrategy BooleanValidation 
- If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
- producerAcks String
- Enum: -1,0,1,all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set toallor-1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default:1.
- producerCompression StringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger IntegerMs 
- Wait for up to the given delay to allow batching records together. Default: 0.
- producerMax IntegerRequest Size 
- The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
- simpleconsumerPool IntegerSize Max 
- Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
- consumerEnable booleanAuto Commit 
- If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
- consumerIdle numberDisconnect Timeout 
- Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
- consumerRequest numberMax Bytes 
- Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
- consumerRequest numberTimeout Ms 
- Enum: 1000,15000,30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default:1000.
- nameStrategy string
- Enum: record_name,topic_name,topic_record_name. Name strategy to use when selecting subject for storing schemas. Default:topic_name.
- nameStrategy booleanValidation 
- If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
- producerAcks string
- Enum: -1,0,1,all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set toallor-1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default:1.
- producerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger numberMs 
- Wait for up to the given delay to allow batching records together. Default: 0.
- producerMax numberRequest Size 
- The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
- simpleconsumerPool numberSize Max 
- Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
- consumer_enable_ boolauto_ commit 
- If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
- consumer_idle_ intdisconnect_ timeout 
- Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
- consumer_request_ intmax_ bytes 
- Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
- consumer_request_ inttimeout_ ms 
- Enum: 1000,15000,30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default:1000.
- name_strategy str
- Enum: record_name,topic_name,topic_record_name. Name strategy to use when selecting subject for storing schemas. Default:topic_name.
- name_strategy_ boolvalidation 
- If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
- producer_acks str
- Enum: -1,0,1,all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set toallor-1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default:1.
- producer_compression_ strtype 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producer_linger_ intms 
- Wait for up to the given delay to allow batching records together. Default: 0.
- producer_max_ intrequest_ size 
- The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
- simpleconsumer_pool_ intsize_ max 
- Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
- consumerEnable BooleanAuto Commit 
- If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
- consumerIdle NumberDisconnect Timeout 
- Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
- consumerRequest NumberMax Bytes 
- Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
- consumerRequest NumberTimeout Ms 
- Enum: 1000,15000,30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default:1000.
- nameStrategy String
- Enum: record_name,topic_name,topic_record_name. Name strategy to use when selecting subject for storing schemas. Default:topic_name.
- nameStrategy BooleanValidation 
- If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
- producerAcks String
- Enum: -1,0,1,all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set toallor-1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default:1.
- producerCompression StringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger NumberMs 
- Wait for up to the given delay to allow batching records together. Default: 0.
- producerMax NumberRequest Size 
- The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
- simpleconsumerPool NumberSize Max 
- Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
KafkaKafkaUserConfigKafkaSaslMechanisms, KafkaKafkaUserConfigKafkaSaslMechanismsArgs              
- Plain bool
- Enable PLAIN mechanism. Default: true.
- ScramSha256 bool
- Enable SCRAM-SHA-256 mechanism. Default: true.
- ScramSha512 bool
- Enable SCRAM-SHA-512 mechanism. Default: true.
- Plain bool
- Enable PLAIN mechanism. Default: true.
- ScramSha256 bool
- Enable SCRAM-SHA-256 mechanism. Default: true.
- ScramSha512 bool
- Enable SCRAM-SHA-512 mechanism. Default: true.
- plain Boolean
- Enable PLAIN mechanism. Default: true.
- scramSha256 Boolean
- Enable SCRAM-SHA-256 mechanism. Default: true.
- scramSha512 Boolean
- Enable SCRAM-SHA-512 mechanism. Default: true.
- plain boolean
- Enable PLAIN mechanism. Default: true.
- scramSha256 boolean
- Enable SCRAM-SHA-256 mechanism. Default: true.
- scramSha512 boolean
- Enable SCRAM-SHA-512 mechanism. Default: true.
- plain bool
- Enable PLAIN mechanism. Default: true.
- scram_sha256 bool
- Enable SCRAM-SHA-256 mechanism. Default: true.
- scram_sha512 bool
- Enable SCRAM-SHA-512 mechanism. Default: true.
- plain Boolean
- Enable PLAIN mechanism. Default: true.
- scramSha256 Boolean
- Enable SCRAM-SHA-256 mechanism. Default: true.
- scramSha512 Boolean
- Enable SCRAM-SHA-512 mechanism. Default: true.
KafkaKafkaUserConfigPrivateAccess, KafkaKafkaUserConfigPrivateAccessArgs            
- Kafka bool
- Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- KafkaConnect bool
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- KafkaRest bool
- Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Prometheus bool
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- SchemaRegistry bool
- Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Kafka bool
- Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- KafkaConnect bool
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- KafkaRest bool
- Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Prometheus bool
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- SchemaRegistry bool
- Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka Boolean
- Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafkaConnect Boolean
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafkaRest Boolean
- Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus Boolean
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schemaRegistry Boolean
- Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka boolean
- Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafkaConnect boolean
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafkaRest boolean
- Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus boolean
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schemaRegistry boolean
- Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka bool
- Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka_connect bool
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka_rest bool
- Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus bool
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema_registry bool
- Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka Boolean
- Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafkaConnect Boolean
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafkaRest Boolean
- Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus Boolean
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schemaRegistry Boolean
- Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
KafkaKafkaUserConfigPrivatelinkAccess, KafkaKafkaUserConfigPrivatelinkAccessArgs            
- Jolokia bool
- Enable jolokia.
- Kafka bool
- Enable kafka.
- KafkaConnect bool
- Enable kafka_connect.
- KafkaRest bool
- Enable kafka_rest.
- Prometheus bool
- Enable prometheus.
- SchemaRegistry bool
- Enable schema_registry.
- Jolokia bool
- Enable jolokia.
- Kafka bool
- Enable kafka.
- KafkaConnect bool
- Enable kafka_connect.
- KafkaRest bool
- Enable kafka_rest.
- Prometheus bool
- Enable prometheus.
- SchemaRegistry bool
- Enable schema_registry.
- jolokia Boolean
- Enable jolokia.
- kafka Boolean
- Enable kafka.
- kafkaConnect Boolean
- Enable kafka_connect.
- kafkaRest Boolean
- Enable kafka_rest.
- prometheus Boolean
- Enable prometheus.
- schemaRegistry Boolean
- Enable schema_registry.
- jolokia boolean
- Enable jolokia.
- kafka boolean
- Enable kafka.
- kafkaConnect boolean
- Enable kafka_connect.
- kafkaRest boolean
- Enable kafka_rest.
- prometheus boolean
- Enable prometheus.
- schemaRegistry boolean
- Enable schema_registry.
- jolokia bool
- Enable jolokia.
- kafka bool
- Enable kafka.
- kafka_connect bool
- Enable kafka_connect.
- kafka_rest bool
- Enable kafka_rest.
- prometheus bool
- Enable prometheus.
- schema_registry bool
- Enable schema_registry.
- jolokia Boolean
- Enable jolokia.
- kafka Boolean
- Enable kafka.
- kafkaConnect Boolean
- Enable kafka_connect.
- kafkaRest Boolean
- Enable kafka_rest.
- prometheus Boolean
- Enable prometheus.
- schemaRegistry Boolean
- Enable schema_registry.
KafkaKafkaUserConfigPublicAccess, KafkaKafkaUserConfigPublicAccessArgs            
- Kafka bool
- Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
- KafkaConnect bool
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- KafkaRest bool
- Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
- Prometheus bool
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- SchemaRegistry bool
- Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
- Kafka bool
- Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
- KafkaConnect bool
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- KafkaRest bool
- Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
- Prometheus bool
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- SchemaRegistry bool
- Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka Boolean
- Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
- kafkaConnect Boolean
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- kafkaRest Boolean
- Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus Boolean
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- schemaRegistry Boolean
- Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka boolean
- Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
- kafkaConnect boolean
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- kafkaRest boolean
- Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus boolean
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- schemaRegistry boolean
- Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka bool
- Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka_connect bool
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka_rest bool
- Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus bool
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- schema_registry bool
- Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka Boolean
- Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
- kafkaConnect Boolean
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- kafkaRest Boolean
- Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus Boolean
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- schemaRegistry Boolean
- Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
KafkaKafkaUserConfigSchemaRegistryConfig, KafkaKafkaUserConfigSchemaRegistryConfigArgs              
- LeaderEligibility bool
- If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
- RetriableErrors boolSilenced 
- If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
- SchemaReader boolStrict Mode 
- If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemastopic. Defaults tofalse.
- TopicName string
- The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
- LeaderEligibility bool
- If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
- RetriableErrors boolSilenced 
- If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
- SchemaReader boolStrict Mode 
- If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemastopic. Defaults tofalse.
- TopicName string
- The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
- leaderEligibility Boolean
- If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
- retriableErrors BooleanSilenced 
- If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
- schemaReader BooleanStrict Mode 
- If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemastopic. Defaults tofalse.
- topicName String
- The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
- leaderEligibility boolean
- If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
- retriableErrors booleanSilenced 
- If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
- schemaReader booleanStrict Mode 
- If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemastopic. Defaults tofalse.
- topicName string
- The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
- leader_eligibility bool
- If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
- retriable_errors_ boolsilenced 
- If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
- schema_reader_ boolstrict_ mode 
- If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemastopic. Defaults tofalse.
- topic_name str
- The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
- leaderEligibility Boolean
- If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
- retriableErrors BooleanSilenced 
- If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
- schemaReader BooleanStrict Mode 
- If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemastopic. Defaults tofalse.
- topicName String
- The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
KafkaKafkaUserConfigSingleZone, KafkaKafkaUserConfigSingleZoneArgs            
- Enabled bool
- Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
- Enabled bool
- Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
- enabled Boolean
- Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
- enabled boolean
- Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
- enabled bool
- Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
- enabled Boolean
- Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
KafkaKafkaUserConfigTieredStorage, KafkaKafkaUserConfigTieredStorageArgs            
- Enabled bool
- Whether to enable the tiered storage functionality.
- LocalCache KafkaKafka User Config Tiered Storage Local Cache 
- Local cache configuration
- Enabled bool
- Whether to enable the tiered storage functionality.
- LocalCache KafkaKafka User Config Tiered Storage Local Cache 
- Local cache configuration
- enabled Boolean
- Whether to enable the tiered storage functionality.
- localCache KafkaKafka User Config Tiered Storage Local Cache 
- Local cache configuration
- enabled boolean
- Whether to enable the tiered storage functionality.
- localCache KafkaKafka User Config Tiered Storage Local Cache 
- Local cache configuration
- enabled bool
- Whether to enable the tiered storage functionality.
- local_cache KafkaKafka User Config Tiered Storage Local Cache 
- Local cache configuration
- enabled Boolean
- Whether to enable the tiered storage functionality.
- localCache Property Map
- Local cache configuration
KafkaKafkaUserConfigTieredStorageLocalCache, KafkaKafkaUserConfigTieredStorageLocalCacheArgs                
- Size int
- Local cache size in bytes. Example: 1073741824.
- Size int
- Local cache size in bytes. Example: 1073741824.
- size Integer
- Local cache size in bytes. Example: 1073741824.
- size number
- Local cache size in bytes. Example: 1073741824.
- size int
- Local cache size in bytes. Example: 1073741824.
- size Number
- Local cache size in bytes. Example: 1073741824.
KafkaServiceIntegration, KafkaServiceIntegrationArgs      
- IntegrationType string
- Type of the service integration
- SourceService stringName 
- Name of the source service
- IntegrationType string
- Type of the service integration
- SourceService stringName 
- Name of the source service
- integrationType String
- Type of the service integration
- sourceService StringName 
- Name of the source service
- integrationType string
- Type of the service integration
- sourceService stringName 
- Name of the source service
- integration_type str
- Type of the service integration
- source_service_ strname 
- Name of the source service
- integrationType String
- Type of the service integration
- sourceService StringName 
- Name of the source service
KafkaTag, KafkaTagArgs    
KafkaTechEmail, KafkaTechEmailArgs      
- Email string
- An email address to contact for technical issues
- Email string
- An email address to contact for technical issues
- email String
- An email address to contact for technical issues
- email string
- An email address to contact for technical issues
- email str
- An email address to contact for technical issues
- email String
- An email address to contact for technical issues
Import
$ pulumi import aiven:index/kafka:Kafka example_kafka PROJECT/SERVICE_NAME
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Aiven pulumi/pulumi-aiven
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the aivenTerraform Provider.