gcp.container.NodePool
Explore with Pulumi AI
Manages a node pool in a Google Kubernetes Engine (GKE) cluster separately from the cluster control plane. For more information see the official documentation and the API reference.
Example Usage
Using A Separately Managed Node Pool (Recommended)
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.serviceaccount.Account("default", {
accountId: "service-account-id",
displayName: "Service Account",
});
const primary = new gcp.container.Cluster("primary", {
name: "my-gke-cluster",
location: "us-central1",
removeDefaultNodePool: true,
initialNodeCount: 1,
});
const primaryPreemptibleNodes = new gcp.container.NodePool("primary_preemptible_nodes", {
name: "my-node-pool",
cluster: primary.id,
nodeCount: 1,
nodeConfig: {
preemptible: true,
machineType: "e2-medium",
serviceAccount: _default.email,
oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
},
});
import pulumi
import pulumi_gcp as gcp
default = gcp.serviceaccount.Account("default",
account_id="service-account-id",
display_name="Service Account")
primary = gcp.container.Cluster("primary",
name="my-gke-cluster",
location="us-central1",
remove_default_node_pool=True,
initial_node_count=1)
primary_preemptible_nodes = gcp.container.NodePool("primary_preemptible_nodes",
name="my-node-pool",
cluster=primary.id,
node_count=1,
node_config=gcp.container.NodePoolNodeConfigArgs(
preemptible=True,
machine_type="e2-medium",
service_account=default.email,
oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"],
))
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/container"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/serviceaccount"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
AccountId: pulumi.String("service-account-id"),
DisplayName: pulumi.String("Service Account"),
})
if err != nil {
return err
}
primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
Name: pulumi.String("my-gke-cluster"),
Location: pulumi.String("us-central1"),
RemoveDefaultNodePool: pulumi.Bool(true),
InitialNodeCount: pulumi.Int(1),
})
if err != nil {
return err
}
_, err = container.NewNodePool(ctx, "primary_preemptible_nodes", &container.NodePoolArgs{
Name: pulumi.String("my-node-pool"),
Cluster: primary.ID(),
NodeCount: pulumi.Int(1),
NodeConfig: &container.NodePoolNodeConfigArgs{
Preemptible: pulumi.Bool(true),
MachineType: pulumi.String("e2-medium"),
ServiceAccount: _default.Email,
OauthScopes: pulumi.StringArray{
pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var @default = new Gcp.ServiceAccount.Account("default", new()
{
AccountId = "service-account-id",
DisplayName = "Service Account",
});
var primary = new Gcp.Container.Cluster("primary", new()
{
Name = "my-gke-cluster",
Location = "us-central1",
RemoveDefaultNodePool = true,
InitialNodeCount = 1,
});
var primaryPreemptibleNodes = new Gcp.Container.NodePool("primary_preemptible_nodes", new()
{
Name = "my-node-pool",
Cluster = primary.Id,
NodeCount = 1,
NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
{
Preemptible = true,
MachineType = "e2-medium",
ServiceAccount = @default.Email,
OauthScopes = new[]
{
"https://www.googleapis.com/auth/cloud-platform",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceaccount.Account;
import com.pulumi.gcp.serviceaccount.AccountArgs;
import com.pulumi.gcp.container.Cluster;
import com.pulumi.gcp.container.ClusterArgs;
import com.pulumi.gcp.container.NodePool;
import com.pulumi.gcp.container.NodePoolArgs;
import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new Account("default", AccountArgs.builder()
.accountId("service-account-id")
.displayName("Service Account")
.build());
var primary = new Cluster("primary", ClusterArgs.builder()
.name("my-gke-cluster")
.location("us-central1")
.removeDefaultNodePool(true)
.initialNodeCount(1)
.build());
var primaryPreemptibleNodes = new NodePool("primaryPreemptibleNodes", NodePoolArgs.builder()
.name("my-node-pool")
.cluster(primary.id())
.nodeCount(1)
.nodeConfig(NodePoolNodeConfigArgs.builder()
.preemptible(true)
.machineType("e2-medium")
.serviceAccount(default_.email())
.oauthScopes("https://www.googleapis.com/auth/cloud-platform")
.build())
.build());
}
}
resources:
default:
type: gcp:serviceaccount:Account
properties:
accountId: service-account-id
displayName: Service Account
primary:
type: gcp:container:Cluster
properties:
name: my-gke-cluster
location: us-central1
removeDefaultNodePool: true
initialNodeCount: 1
primaryPreemptibleNodes:
type: gcp:container:NodePool
name: primary_preemptible_nodes
properties:
name: my-node-pool
cluster: ${primary.id}
nodeCount: 1
nodeConfig:
preemptible: true
machineType: e2-medium
serviceAccount: ${default.email}
oauthScopes:
- https://www.googleapis.com/auth/cloud-platform
2 Node Pools, 1 Separately Managed + The Default Node Pool
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.serviceaccount.Account("default", {
accountId: "service-account-id",
displayName: "Service Account",
});
const primary = new gcp.container.Cluster("primary", {
name: "marcellus-wallace",
location: "us-central1-a",
initialNodeCount: 3,
nodeLocations: ["us-central1-c"],
nodeConfig: {
serviceAccount: _default.email,
oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
guestAccelerators: [{
type: "nvidia-tesla-k80",
count: 1,
}],
},
});
const np = new gcp.container.NodePool("np", {
name: "my-node-pool",
cluster: primary.id,
nodeConfig: {
machineType: "e2-medium",
serviceAccount: _default.email,
oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
},
});
import pulumi
import pulumi_gcp as gcp
default = gcp.serviceaccount.Account("default",
account_id="service-account-id",
display_name="Service Account")
primary = gcp.container.Cluster("primary",
name="marcellus-wallace",
location="us-central1-a",
initial_node_count=3,
node_locations=["us-central1-c"],
node_config=gcp.container.ClusterNodeConfigArgs(
service_account=default.email,
oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"],
guest_accelerators=[gcp.container.ClusterNodeConfigGuestAcceleratorArgs(
type="nvidia-tesla-k80",
count=1,
)],
))
np = gcp.container.NodePool("np",
name="my-node-pool",
cluster=primary.id,
node_config=gcp.container.NodePoolNodeConfigArgs(
machine_type="e2-medium",
service_account=default.email,
oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"],
))
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/container"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/serviceaccount"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
AccountId: pulumi.String("service-account-id"),
DisplayName: pulumi.String("Service Account"),
})
if err != nil {
return err
}
primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
Name: pulumi.String("marcellus-wallace"),
Location: pulumi.String("us-central1-a"),
InitialNodeCount: pulumi.Int(3),
NodeLocations: pulumi.StringArray{
pulumi.String("us-central1-c"),
},
NodeConfig: &container.ClusterNodeConfigArgs{
ServiceAccount: _default.Email,
OauthScopes: pulumi.StringArray{
pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
},
GuestAccelerators: container.ClusterNodeConfigGuestAcceleratorArray{
&container.ClusterNodeConfigGuestAcceleratorArgs{
Type: pulumi.String("nvidia-tesla-k80"),
Count: pulumi.Int(1),
},
},
},
})
if err != nil {
return err
}
_, err = container.NewNodePool(ctx, "np", &container.NodePoolArgs{
Name: pulumi.String("my-node-pool"),
Cluster: primary.ID(),
NodeConfig: &container.NodePoolNodeConfigArgs{
MachineType: pulumi.String("e2-medium"),
ServiceAccount: _default.Email,
OauthScopes: pulumi.StringArray{
pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var @default = new Gcp.ServiceAccount.Account("default", new()
{
AccountId = "service-account-id",
DisplayName = "Service Account",
});
var primary = new Gcp.Container.Cluster("primary", new()
{
Name = "marcellus-wallace",
Location = "us-central1-a",
InitialNodeCount = 3,
NodeLocations = new[]
{
"us-central1-c",
},
NodeConfig = new Gcp.Container.Inputs.ClusterNodeConfigArgs
{
ServiceAccount = @default.Email,
OauthScopes = new[]
{
"https://www.googleapis.com/auth/cloud-platform",
},
GuestAccelerators = new[]
{
new Gcp.Container.Inputs.ClusterNodeConfigGuestAcceleratorArgs
{
Type = "nvidia-tesla-k80",
Count = 1,
},
},
},
});
var np = new Gcp.Container.NodePool("np", new()
{
Name = "my-node-pool",
Cluster = primary.Id,
NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
{
MachineType = "e2-medium",
ServiceAccount = @default.Email,
OauthScopes = new[]
{
"https://www.googleapis.com/auth/cloud-platform",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceaccount.Account;
import com.pulumi.gcp.serviceaccount.AccountArgs;
import com.pulumi.gcp.container.Cluster;
import com.pulumi.gcp.container.ClusterArgs;
import com.pulumi.gcp.container.inputs.ClusterNodeConfigArgs;
import com.pulumi.gcp.container.NodePool;
import com.pulumi.gcp.container.NodePoolArgs;
import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new Account("default", AccountArgs.builder()
.accountId("service-account-id")
.displayName("Service Account")
.build());
var primary = new Cluster("primary", ClusterArgs.builder()
.name("marcellus-wallace")
.location("us-central1-a")
.initialNodeCount(3)
.nodeLocations("us-central1-c")
.nodeConfig(ClusterNodeConfigArgs.builder()
.serviceAccount(default_.email())
.oauthScopes("https://www.googleapis.com/auth/cloud-platform")
.guestAccelerators(ClusterNodeConfigGuestAcceleratorArgs.builder()
.type("nvidia-tesla-k80")
.count(1)
.build())
.build())
.build());
var np = new NodePool("np", NodePoolArgs.builder()
.name("my-node-pool")
.cluster(primary.id())
.nodeConfig(NodePoolNodeConfigArgs.builder()
.machineType("e2-medium")
.serviceAccount(default_.email())
.oauthScopes("https://www.googleapis.com/auth/cloud-platform")
.build())
.build());
}
}
resources:
default:
type: gcp:serviceaccount:Account
properties:
accountId: service-account-id
displayName: Service Account
np:
type: gcp:container:NodePool
properties:
name: my-node-pool
cluster: ${primary.id}
nodeConfig:
machineType: e2-medium
serviceAccount: ${default.email}
oauthScopes:
- https://www.googleapis.com/auth/cloud-platform
primary:
type: gcp:container:Cluster
properties:
name: marcellus-wallace
location: us-central1-a
initialNodeCount: 3
nodeLocations:
- us-central1-c
nodeConfig:
serviceAccount: ${default.email}
oauthScopes:
- https://www.googleapis.com/auth/cloud-platform
guestAccelerators:
- type: nvidia-tesla-k80
count: 1
Create NodePool Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new NodePool(name: string, args: NodePoolArgs, opts?: CustomResourceOptions);
@overload
def NodePool(resource_name: str,
args: NodePoolArgs,
opts: Optional[ResourceOptions] = None)
@overload
def NodePool(resource_name: str,
opts: Optional[ResourceOptions] = None,
cluster: Optional[str] = None,
network_config: Optional[NodePoolNetworkConfigArgs] = None,
name_prefix: Optional[str] = None,
location: Optional[str] = None,
management: Optional[NodePoolManagementArgs] = None,
node_config: Optional[NodePoolNodeConfigArgs] = None,
name: Optional[str] = None,
initial_node_count: Optional[int] = None,
autoscaling: Optional[NodePoolAutoscalingArgs] = None,
max_pods_per_node: Optional[int] = None,
node_count: Optional[int] = None,
node_locations: Optional[Sequence[str]] = None,
placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
project: Optional[str] = None,
queued_provisioning: Optional[NodePoolQueuedProvisioningArgs] = None,
upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
version: Optional[str] = None)
func NewNodePool(ctx *Context, name string, args NodePoolArgs, opts ...ResourceOption) (*NodePool, error)
public NodePool(string name, NodePoolArgs args, CustomResourceOptions? opts = null)
public NodePool(String name, NodePoolArgs args)
public NodePool(String name, NodePoolArgs args, CustomResourceOptions options)
type: gcp:container:NodePool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var nodePoolResource = new Gcp.Container.NodePool("nodePoolResource", new()
{
Cluster = "string",
NetworkConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigArgs
{
AdditionalNodeNetworkConfigs = new[]
{
new Gcp.Container.Inputs.NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs
{
Network = "string",
Subnetwork = "string",
},
},
AdditionalPodNetworkConfigs = new[]
{
new Gcp.Container.Inputs.NodePoolNetworkConfigAdditionalPodNetworkConfigArgs
{
MaxPodsPerNode = 0,
SecondaryPodRange = "string",
Subnetwork = "string",
},
},
CreatePodRange = false,
EnablePrivateNodes = false,
NetworkPerformanceConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigNetworkPerformanceConfigArgs
{
TotalEgressBandwidthTier = "string",
},
PodCidrOverprovisionConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigPodCidrOverprovisionConfigArgs
{
Disabled = false,
},
PodIpv4CidrBlock = "string",
PodRange = "string",
},
NamePrefix = "string",
Location = "string",
Management = new Gcp.Container.Inputs.NodePoolManagementArgs
{
AutoRepair = false,
AutoUpgrade = false,
},
NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
{
AdvancedMachineFeatures = new Gcp.Container.Inputs.NodePoolNodeConfigAdvancedMachineFeaturesArgs
{
ThreadsPerCore = 0,
EnableNestedVirtualization = false,
},
BootDiskKmsKey = "string",
ConfidentialNodes = new Gcp.Container.Inputs.NodePoolNodeConfigConfidentialNodesArgs
{
Enabled = false,
},
ContainerdConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigArgs
{
PrivateRegistryAccessConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs
{
Enabled = false,
CertificateAuthorityDomainConfigs = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs
{
Fqdns = new[]
{
"string",
},
GcpSecretManagerCertificateConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs
{
SecretUri = "string",
},
},
},
},
},
DiskSizeGb = 0,
DiskType = "string",
EffectiveTaints = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigEffectiveTaintArgs
{
Effect = "string",
Key = "string",
Value = "string",
},
},
EnableConfidentialStorage = false,
EphemeralStorageConfig = new Gcp.Container.Inputs.NodePoolNodeConfigEphemeralStorageConfigArgs
{
LocalSsdCount = 0,
},
EphemeralStorageLocalSsdConfig = new Gcp.Container.Inputs.NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs
{
LocalSsdCount = 0,
},
FastSocket = new Gcp.Container.Inputs.NodePoolNodeConfigFastSocketArgs
{
Enabled = false,
},
GcfsConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGcfsConfigArgs
{
Enabled = false,
},
GuestAccelerators = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorArgs
{
Count = 0,
Type = "string",
GpuDriverInstallationConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs
{
GpuDriverVersion = "string",
},
GpuPartitionSize = "string",
GpuSharingConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs
{
GpuSharingStrategy = "string",
MaxSharedClientsPerGpu = 0,
},
},
},
Gvnic = new Gcp.Container.Inputs.NodePoolNodeConfigGvnicArgs
{
Enabled = false,
},
HostMaintenancePolicy = new Gcp.Container.Inputs.NodePoolNodeConfigHostMaintenancePolicyArgs
{
MaintenanceInterval = "string",
},
ImageType = "string",
KubeletConfig = new Gcp.Container.Inputs.NodePoolNodeConfigKubeletConfigArgs
{
CpuManagerPolicy = "string",
CpuCfsQuota = false,
CpuCfsQuotaPeriod = "string",
PodPidsLimit = 0,
},
Labels =
{
{ "string", "string" },
},
LinuxNodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLinuxNodeConfigArgs
{
CgroupMode = "string",
Sysctls =
{
{ "string", "string" },
},
},
LocalNvmeSsdBlockConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs
{
LocalSsdCount = 0,
},
LocalSsdCount = 0,
LoggingVariant = "string",
MachineType = "string",
Metadata =
{
{ "string", "string" },
},
MinCpuPlatform = "string",
NodeGroup = "string",
OauthScopes = new[]
{
"string",
},
Preemptible = false,
ReservationAffinity = new Gcp.Container.Inputs.NodePoolNodeConfigReservationAffinityArgs
{
ConsumeReservationType = "string",
Key = "string",
Values = new[]
{
"string",
},
},
ResourceLabels =
{
{ "string", "string" },
},
ResourceManagerTags =
{
{ "string", "any" },
},
SandboxConfig = new Gcp.Container.Inputs.NodePoolNodeConfigSandboxConfigArgs
{
SandboxType = "string",
},
SecondaryBootDisks = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigSecondaryBootDiskArgs
{
DiskImage = "string",
Mode = "string",
},
},
ServiceAccount = "string",
ShieldedInstanceConfig = new Gcp.Container.Inputs.NodePoolNodeConfigShieldedInstanceConfigArgs
{
EnableIntegrityMonitoring = false,
EnableSecureBoot = false,
},
SoleTenantConfig = new Gcp.Container.Inputs.NodePoolNodeConfigSoleTenantConfigArgs
{
NodeAffinities = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs
{
Key = "string",
Operator = "string",
Values = new[]
{
"string",
},
},
},
},
Spot = false,
Tags = new[]
{
"string",
},
Taints = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigTaintArgs
{
Effect = "string",
Key = "string",
Value = "string",
},
},
WorkloadMetadataConfig = new Gcp.Container.Inputs.NodePoolNodeConfigWorkloadMetadataConfigArgs
{
Mode = "string",
},
},
Name = "string",
InitialNodeCount = 0,
Autoscaling = new Gcp.Container.Inputs.NodePoolAutoscalingArgs
{
LocationPolicy = "string",
MaxNodeCount = 0,
MinNodeCount = 0,
TotalMaxNodeCount = 0,
TotalMinNodeCount = 0,
},
MaxPodsPerNode = 0,
NodeCount = 0,
NodeLocations = new[]
{
"string",
},
PlacementPolicy = new Gcp.Container.Inputs.NodePoolPlacementPolicyArgs
{
Type = "string",
PolicyName = "string",
TpuTopology = "string",
},
Project = "string",
QueuedProvisioning = new Gcp.Container.Inputs.NodePoolQueuedProvisioningArgs
{
Enabled = false,
},
UpgradeSettings = new Gcp.Container.Inputs.NodePoolUpgradeSettingsArgs
{
BlueGreenSettings = new Gcp.Container.Inputs.NodePoolUpgradeSettingsBlueGreenSettingsArgs
{
StandardRolloutPolicy = new Gcp.Container.Inputs.NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs
{
BatchNodeCount = 0,
BatchPercentage = 0,
BatchSoakDuration = "string",
},
NodePoolSoakDuration = "string",
},
MaxSurge = 0,
MaxUnavailable = 0,
Strategy = "string",
},
Version = "string",
});
example, err := container.NewNodePool(ctx, "nodePoolResource", &container.NodePoolArgs{
Cluster: pulumi.String("string"),
NetworkConfig: &container.NodePoolNetworkConfigArgs{
AdditionalNodeNetworkConfigs: container.NodePoolNetworkConfigAdditionalNodeNetworkConfigArray{
&container.NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs{
Network: pulumi.String("string"),
Subnetwork: pulumi.String("string"),
},
},
AdditionalPodNetworkConfigs: container.NodePoolNetworkConfigAdditionalPodNetworkConfigArray{
&container.NodePoolNetworkConfigAdditionalPodNetworkConfigArgs{
MaxPodsPerNode: pulumi.Int(0),
SecondaryPodRange: pulumi.String("string"),
Subnetwork: pulumi.String("string"),
},
},
CreatePodRange: pulumi.Bool(false),
EnablePrivateNodes: pulumi.Bool(false),
NetworkPerformanceConfig: &container.NodePoolNetworkConfigNetworkPerformanceConfigArgs{
TotalEgressBandwidthTier: pulumi.String("string"),
},
PodCidrOverprovisionConfig: &container.NodePoolNetworkConfigPodCidrOverprovisionConfigArgs{
Disabled: pulumi.Bool(false),
},
PodIpv4CidrBlock: pulumi.String("string"),
PodRange: pulumi.String("string"),
},
NamePrefix: pulumi.String("string"),
Location: pulumi.String("string"),
Management: &container.NodePoolManagementArgs{
AutoRepair: pulumi.Bool(false),
AutoUpgrade: pulumi.Bool(false),
},
NodeConfig: &container.NodePoolNodeConfigArgs{
AdvancedMachineFeatures: &container.NodePoolNodeConfigAdvancedMachineFeaturesArgs{
ThreadsPerCore: pulumi.Int(0),
EnableNestedVirtualization: pulumi.Bool(false),
},
BootDiskKmsKey: pulumi.String("string"),
ConfidentialNodes: &container.NodePoolNodeConfigConfidentialNodesArgs{
Enabled: pulumi.Bool(false),
},
ContainerdConfig: &container.NodePoolNodeConfigContainerdConfigArgs{
PrivateRegistryAccessConfig: &container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs{
Enabled: pulumi.Bool(false),
CertificateAuthorityDomainConfigs: container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArray{
&container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs{
Fqdns: pulumi.StringArray{
pulumi.String("string"),
},
GcpSecretManagerCertificateConfig: &container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs{
SecretUri: pulumi.String("string"),
},
},
},
},
},
DiskSizeGb: pulumi.Int(0),
DiskType: pulumi.String("string"),
EffectiveTaints: container.NodePoolNodeConfigEffectiveTaintArray{
&container.NodePoolNodeConfigEffectiveTaintArgs{
Effect: pulumi.String("string"),
Key: pulumi.String("string"),
Value: pulumi.String("string"),
},
},
EnableConfidentialStorage: pulumi.Bool(false),
EphemeralStorageConfig: &container.NodePoolNodeConfigEphemeralStorageConfigArgs{
LocalSsdCount: pulumi.Int(0),
},
EphemeralStorageLocalSsdConfig: &container.NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs{
LocalSsdCount: pulumi.Int(0),
},
FastSocket: &container.NodePoolNodeConfigFastSocketArgs{
Enabled: pulumi.Bool(false),
},
GcfsConfig: &container.NodePoolNodeConfigGcfsConfigArgs{
Enabled: pulumi.Bool(false),
},
GuestAccelerators: container.NodePoolNodeConfigGuestAcceleratorArray{
&container.NodePoolNodeConfigGuestAcceleratorArgs{
Count: pulumi.Int(0),
Type: pulumi.String("string"),
GpuDriverInstallationConfig: &container.NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs{
GpuDriverVersion: pulumi.String("string"),
},
GpuPartitionSize: pulumi.String("string"),
GpuSharingConfig: &container.NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs{
GpuSharingStrategy: pulumi.String("string"),
MaxSharedClientsPerGpu: pulumi.Int(0),
},
},
},
Gvnic: &container.NodePoolNodeConfigGvnicArgs{
Enabled: pulumi.Bool(false),
},
HostMaintenancePolicy: &container.NodePoolNodeConfigHostMaintenancePolicyArgs{
MaintenanceInterval: pulumi.String("string"),
},
ImageType: pulumi.String("string"),
KubeletConfig: &container.NodePoolNodeConfigKubeletConfigArgs{
CpuManagerPolicy: pulumi.String("string"),
CpuCfsQuota: pulumi.Bool(false),
CpuCfsQuotaPeriod: pulumi.String("string"),
PodPidsLimit: pulumi.Int(0),
},
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
LinuxNodeConfig: &container.NodePoolNodeConfigLinuxNodeConfigArgs{
CgroupMode: pulumi.String("string"),
Sysctls: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
LocalNvmeSsdBlockConfig: &container.NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs{
LocalSsdCount: pulumi.Int(0),
},
LocalSsdCount: pulumi.Int(0),
LoggingVariant: pulumi.String("string"),
MachineType: pulumi.String("string"),
Metadata: pulumi.StringMap{
"string": pulumi.String("string"),
},
MinCpuPlatform: pulumi.String("string"),
NodeGroup: pulumi.String("string"),
OauthScopes: pulumi.StringArray{
pulumi.String("string"),
},
Preemptible: pulumi.Bool(false),
ReservationAffinity: &container.NodePoolNodeConfigReservationAffinityArgs{
ConsumeReservationType: pulumi.String("string"),
Key: pulumi.String("string"),
Values: pulumi.StringArray{
pulumi.String("string"),
},
},
ResourceLabels: pulumi.StringMap{
"string": pulumi.String("string"),
},
ResourceManagerTags: pulumi.Map{
"string": pulumi.Any("any"),
},
SandboxConfig: &container.NodePoolNodeConfigSandboxConfigArgs{
SandboxType: pulumi.String("string"),
},
SecondaryBootDisks: container.NodePoolNodeConfigSecondaryBootDiskArray{
&container.NodePoolNodeConfigSecondaryBootDiskArgs{
DiskImage: pulumi.String("string"),
Mode: pulumi.String("string"),
},
},
ServiceAccount: pulumi.String("string"),
ShieldedInstanceConfig: &container.NodePoolNodeConfigShieldedInstanceConfigArgs{
EnableIntegrityMonitoring: pulumi.Bool(false),
EnableSecureBoot: pulumi.Bool(false),
},
SoleTenantConfig: &container.NodePoolNodeConfigSoleTenantConfigArgs{
NodeAffinities: container.NodePoolNodeConfigSoleTenantConfigNodeAffinityArray{
&container.NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs{
Key: pulumi.String("string"),
Operator: pulumi.String("string"),
Values: pulumi.StringArray{
pulumi.String("string"),
},
},
},
},
Spot: pulumi.Bool(false),
Tags: pulumi.StringArray{
pulumi.String("string"),
},
Taints: container.NodePoolNodeConfigTaintArray{
&container.NodePoolNodeConfigTaintArgs{
Effect: pulumi.String("string"),
Key: pulumi.String("string"),
Value: pulumi.String("string"),
},
},
WorkloadMetadataConfig: &container.NodePoolNodeConfigWorkloadMetadataConfigArgs{
Mode: pulumi.String("string"),
},
},
Name: pulumi.String("string"),
InitialNodeCount: pulumi.Int(0),
Autoscaling: &container.NodePoolAutoscalingArgs{
LocationPolicy: pulumi.String("string"),
MaxNodeCount: pulumi.Int(0),
MinNodeCount: pulumi.Int(0),
TotalMaxNodeCount: pulumi.Int(0),
TotalMinNodeCount: pulumi.Int(0),
},
MaxPodsPerNode: pulumi.Int(0),
NodeCount: pulumi.Int(0),
NodeLocations: pulumi.StringArray{
pulumi.String("string"),
},
PlacementPolicy: &container.NodePoolPlacementPolicyArgs{
Type: pulumi.String("string"),
PolicyName: pulumi.String("string"),
TpuTopology: pulumi.String("string"),
},
Project: pulumi.String("string"),
QueuedProvisioning: &container.NodePoolQueuedProvisioningArgs{
Enabled: pulumi.Bool(false),
},
UpgradeSettings: &container.NodePoolUpgradeSettingsArgs{
BlueGreenSettings: &container.NodePoolUpgradeSettingsBlueGreenSettingsArgs{
StandardRolloutPolicy: &container.NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs{
BatchNodeCount: pulumi.Int(0),
BatchPercentage: pulumi.Float64(0),
BatchSoakDuration: pulumi.String("string"),
},
NodePoolSoakDuration: pulumi.String("string"),
},
MaxSurge: pulumi.Int(0),
MaxUnavailable: pulumi.Int(0),
Strategy: pulumi.String("string"),
},
Version: pulumi.String("string"),
})
var nodePoolResource = new NodePool("nodePoolResource", NodePoolArgs.builder()
.cluster("string")
.networkConfig(NodePoolNetworkConfigArgs.builder()
.additionalNodeNetworkConfigs(NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs.builder()
.network("string")
.subnetwork("string")
.build())
.additionalPodNetworkConfigs(NodePoolNetworkConfigAdditionalPodNetworkConfigArgs.builder()
.maxPodsPerNode(0)
.secondaryPodRange("string")
.subnetwork("string")
.build())
.createPodRange(false)
.enablePrivateNodes(false)
.networkPerformanceConfig(NodePoolNetworkConfigNetworkPerformanceConfigArgs.builder()
.totalEgressBandwidthTier("string")
.build())
.podCidrOverprovisionConfig(NodePoolNetworkConfigPodCidrOverprovisionConfigArgs.builder()
.disabled(false)
.build())
.podIpv4CidrBlock("string")
.podRange("string")
.build())
.namePrefix("string")
.location("string")
.management(NodePoolManagementArgs.builder()
.autoRepair(false)
.autoUpgrade(false)
.build())
.nodeConfig(NodePoolNodeConfigArgs.builder()
.advancedMachineFeatures(NodePoolNodeConfigAdvancedMachineFeaturesArgs.builder()
.threadsPerCore(0)
.enableNestedVirtualization(false)
.build())
.bootDiskKmsKey("string")
.confidentialNodes(NodePoolNodeConfigConfidentialNodesArgs.builder()
.enabled(false)
.build())
.containerdConfig(NodePoolNodeConfigContainerdConfigArgs.builder()
.privateRegistryAccessConfig(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs.builder()
.enabled(false)
.certificateAuthorityDomainConfigs(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs.builder()
.fqdns("string")
.gcpSecretManagerCertificateConfig(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs.builder()
.secretUri("string")
.build())
.build())
.build())
.build())
.diskSizeGb(0)
.diskType("string")
.effectiveTaints(NodePoolNodeConfigEffectiveTaintArgs.builder()
.effect("string")
.key("string")
.value("string")
.build())
.enableConfidentialStorage(false)
.ephemeralStorageConfig(NodePoolNodeConfigEphemeralStorageConfigArgs.builder()
.localSsdCount(0)
.build())
.ephemeralStorageLocalSsdConfig(NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs.builder()
.localSsdCount(0)
.build())
.fastSocket(NodePoolNodeConfigFastSocketArgs.builder()
.enabled(false)
.build())
.gcfsConfig(NodePoolNodeConfigGcfsConfigArgs.builder()
.enabled(false)
.build())
.guestAccelerators(NodePoolNodeConfigGuestAcceleratorArgs.builder()
.count(0)
.type("string")
.gpuDriverInstallationConfig(NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs.builder()
.gpuDriverVersion("string")
.build())
.gpuPartitionSize("string")
.gpuSharingConfig(NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs.builder()
.gpuSharingStrategy("string")
.maxSharedClientsPerGpu(0)
.build())
.build())
.gvnic(NodePoolNodeConfigGvnicArgs.builder()
.enabled(false)
.build())
.hostMaintenancePolicy(NodePoolNodeConfigHostMaintenancePolicyArgs.builder()
.maintenanceInterval("string")
.build())
.imageType("string")
.kubeletConfig(NodePoolNodeConfigKubeletConfigArgs.builder()
.cpuManagerPolicy("string")
.cpuCfsQuota(false)
.cpuCfsQuotaPeriod("string")
.podPidsLimit(0)
.build())
.labels(Map.of("string", "string"))
.linuxNodeConfig(NodePoolNodeConfigLinuxNodeConfigArgs.builder()
.cgroupMode("string")
.sysctls(Map.of("string", "string"))
.build())
.localNvmeSsdBlockConfig(NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs.builder()
.localSsdCount(0)
.build())
.localSsdCount(0)
.loggingVariant("string")
.machineType("string")
.metadata(Map.of("string", "string"))
.minCpuPlatform("string")
.nodeGroup("string")
.oauthScopes("string")
.preemptible(false)
.reservationAffinity(NodePoolNodeConfigReservationAffinityArgs.builder()
.consumeReservationType("string")
.key("string")
.values("string")
.build())
.resourceLabels(Map.of("string", "string"))
.resourceManagerTags(Map.of("string", "any"))
.sandboxConfig(NodePoolNodeConfigSandboxConfigArgs.builder()
.sandboxType("string")
.build())
.secondaryBootDisks(NodePoolNodeConfigSecondaryBootDiskArgs.builder()
.diskImage("string")
.mode("string")
.build())
.serviceAccount("string")
.shieldedInstanceConfig(NodePoolNodeConfigShieldedInstanceConfigArgs.builder()
.enableIntegrityMonitoring(false)
.enableSecureBoot(false)
.build())
.soleTenantConfig(NodePoolNodeConfigSoleTenantConfigArgs.builder()
.nodeAffinities(NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs.builder()
.key("string")
.operator("string")
.values("string")
.build())
.build())
.spot(false)
.tags("string")
.taints(NodePoolNodeConfigTaintArgs.builder()
.effect("string")
.key("string")
.value("string")
.build())
.workloadMetadataConfig(NodePoolNodeConfigWorkloadMetadataConfigArgs.builder()
.mode("string")
.build())
.build())
.name("string")
.initialNodeCount(0)
.autoscaling(NodePoolAutoscalingArgs.builder()
.locationPolicy("string")
.maxNodeCount(0)
.minNodeCount(0)
.totalMaxNodeCount(0)
.totalMinNodeCount(0)
.build())
.maxPodsPerNode(0)
.nodeCount(0)
.nodeLocations("string")
.placementPolicy(NodePoolPlacementPolicyArgs.builder()
.type("string")
.policyName("string")
.tpuTopology("string")
.build())
.project("string")
.queuedProvisioning(NodePoolQueuedProvisioningArgs.builder()
.enabled(false)
.build())
.upgradeSettings(NodePoolUpgradeSettingsArgs.builder()
.blueGreenSettings(NodePoolUpgradeSettingsBlueGreenSettingsArgs.builder()
.standardRolloutPolicy(NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs.builder()
.batchNodeCount(0)
.batchPercentage(0)
.batchSoakDuration("string")
.build())
.nodePoolSoakDuration("string")
.build())
.maxSurge(0)
.maxUnavailable(0)
.strategy("string")
.build())
.version("string")
.build());
node_pool_resource = gcp.container.NodePool("nodePoolResource",
cluster="string",
network_config=gcp.container.NodePoolNetworkConfigArgs(
additional_node_network_configs=[gcp.container.NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs(
network="string",
subnetwork="string",
)],
additional_pod_network_configs=[gcp.container.NodePoolNetworkConfigAdditionalPodNetworkConfigArgs(
max_pods_per_node=0,
secondary_pod_range="string",
subnetwork="string",
)],
create_pod_range=False,
enable_private_nodes=False,
network_performance_config=gcp.container.NodePoolNetworkConfigNetworkPerformanceConfigArgs(
total_egress_bandwidth_tier="string",
),
pod_cidr_overprovision_config=gcp.container.NodePoolNetworkConfigPodCidrOverprovisionConfigArgs(
disabled=False,
),
pod_ipv4_cidr_block="string",
pod_range="string",
),
name_prefix="string",
location="string",
management=gcp.container.NodePoolManagementArgs(
auto_repair=False,
auto_upgrade=False,
),
node_config=gcp.container.NodePoolNodeConfigArgs(
advanced_machine_features=gcp.container.NodePoolNodeConfigAdvancedMachineFeaturesArgs(
threads_per_core=0,
enable_nested_virtualization=False,
),
boot_disk_kms_key="string",
confidential_nodes=gcp.container.NodePoolNodeConfigConfidentialNodesArgs(
enabled=False,
),
containerd_config=gcp.container.NodePoolNodeConfigContainerdConfigArgs(
private_registry_access_config=gcp.container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs(
enabled=False,
certificate_authority_domain_configs=[gcp.container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs(
fqdns=["string"],
gcp_secret_manager_certificate_config=gcp.container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs(
secret_uri="string",
),
)],
),
),
disk_size_gb=0,
disk_type="string",
effective_taints=[gcp.container.NodePoolNodeConfigEffectiveTaintArgs(
effect="string",
key="string",
value="string",
)],
enable_confidential_storage=False,
ephemeral_storage_config=gcp.container.NodePoolNodeConfigEphemeralStorageConfigArgs(
local_ssd_count=0,
),
ephemeral_storage_local_ssd_config=gcp.container.NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs(
local_ssd_count=0,
),
fast_socket=gcp.container.NodePoolNodeConfigFastSocketArgs(
enabled=False,
),
gcfs_config=gcp.container.NodePoolNodeConfigGcfsConfigArgs(
enabled=False,
),
guest_accelerators=[gcp.container.NodePoolNodeConfigGuestAcceleratorArgs(
count=0,
type="string",
gpu_driver_installation_config=gcp.container.NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs(
gpu_driver_version="string",
),
gpu_partition_size="string",
gpu_sharing_config=gcp.container.NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs(
gpu_sharing_strategy="string",
max_shared_clients_per_gpu=0,
),
)],
gvnic=gcp.container.NodePoolNodeConfigGvnicArgs(
enabled=False,
),
host_maintenance_policy=gcp.container.NodePoolNodeConfigHostMaintenancePolicyArgs(
maintenance_interval="string",
),
image_type="string",
kubelet_config=gcp.container.NodePoolNodeConfigKubeletConfigArgs(
cpu_manager_policy="string",
cpu_cfs_quota=False,
cpu_cfs_quota_period="string",
pod_pids_limit=0,
),
labels={
"string": "string",
},
linux_node_config=gcp.container.NodePoolNodeConfigLinuxNodeConfigArgs(
cgroup_mode="string",
sysctls={
"string": "string",
},
),
local_nvme_ssd_block_config=gcp.container.NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs(
local_ssd_count=0,
),
local_ssd_count=0,
logging_variant="string",
machine_type="string",
metadata={
"string": "string",
},
min_cpu_platform="string",
node_group="string",
oauth_scopes=["string"],
preemptible=False,
reservation_affinity=gcp.container.NodePoolNodeConfigReservationAffinityArgs(
consume_reservation_type="string",
key="string",
values=["string"],
),
resource_labels={
"string": "string",
},
resource_manager_tags={
"string": "any",
},
sandbox_config=gcp.container.NodePoolNodeConfigSandboxConfigArgs(
sandbox_type="string",
),
secondary_boot_disks=[gcp.container.NodePoolNodeConfigSecondaryBootDiskArgs(
disk_image="string",
mode="string",
)],
service_account="string",
shielded_instance_config=gcp.container.NodePoolNodeConfigShieldedInstanceConfigArgs(
enable_integrity_monitoring=False,
enable_secure_boot=False,
),
sole_tenant_config=gcp.container.NodePoolNodeConfigSoleTenantConfigArgs(
node_affinities=[gcp.container.NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs(
key="string",
operator="string",
values=["string"],
)],
),
spot=False,
tags=["string"],
taints=[gcp.container.NodePoolNodeConfigTaintArgs(
effect="string",
key="string",
value="string",
)],
workload_metadata_config=gcp.container.NodePoolNodeConfigWorkloadMetadataConfigArgs(
mode="string",
),
),
name="string",
initial_node_count=0,
autoscaling=gcp.container.NodePoolAutoscalingArgs(
location_policy="string",
max_node_count=0,
min_node_count=0,
total_max_node_count=0,
total_min_node_count=0,
),
max_pods_per_node=0,
node_count=0,
node_locations=["string"],
placement_policy=gcp.container.NodePoolPlacementPolicyArgs(
type="string",
policy_name="string",
tpu_topology="string",
),
project="string",
queued_provisioning=gcp.container.NodePoolQueuedProvisioningArgs(
enabled=False,
),
upgrade_settings=gcp.container.NodePoolUpgradeSettingsArgs(
blue_green_settings=gcp.container.NodePoolUpgradeSettingsBlueGreenSettingsArgs(
standard_rollout_policy=gcp.container.NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs(
batch_node_count=0,
batch_percentage=0,
batch_soak_duration="string",
),
node_pool_soak_duration="string",
),
max_surge=0,
max_unavailable=0,
strategy="string",
),
version="string")
const nodePoolResource = new gcp.container.NodePool("nodePoolResource", {
cluster: "string",
networkConfig: {
additionalNodeNetworkConfigs: [{
network: "string",
subnetwork: "string",
}],
additionalPodNetworkConfigs: [{
maxPodsPerNode: 0,
secondaryPodRange: "string",
subnetwork: "string",
}],
createPodRange: false,
enablePrivateNodes: false,
networkPerformanceConfig: {
totalEgressBandwidthTier: "string",
},
podCidrOverprovisionConfig: {
disabled: false,
},
podIpv4CidrBlock: "string",
podRange: "string",
},
namePrefix: "string",
location: "string",
management: {
autoRepair: false,
autoUpgrade: false,
},
nodeConfig: {
advancedMachineFeatures: {
threadsPerCore: 0,
enableNestedVirtualization: false,
},
bootDiskKmsKey: "string",
confidentialNodes: {
enabled: false,
},
containerdConfig: {
privateRegistryAccessConfig: {
enabled: false,
certificateAuthorityDomainConfigs: [{
fqdns: ["string"],
gcpSecretManagerCertificateConfig: {
secretUri: "string",
},
}],
},
},
diskSizeGb: 0,
diskType: "string",
effectiveTaints: [{
effect: "string",
key: "string",
value: "string",
}],
enableConfidentialStorage: false,
ephemeralStorageConfig: {
localSsdCount: 0,
},
ephemeralStorageLocalSsdConfig: {
localSsdCount: 0,
},
fastSocket: {
enabled: false,
},
gcfsConfig: {
enabled: false,
},
guestAccelerators: [{
count: 0,
type: "string",
gpuDriverInstallationConfig: {
gpuDriverVersion: "string",
},
gpuPartitionSize: "string",
gpuSharingConfig: {
gpuSharingStrategy: "string",
maxSharedClientsPerGpu: 0,
},
}],
gvnic: {
enabled: false,
},
hostMaintenancePolicy: {
maintenanceInterval: "string",
},
imageType: "string",
kubeletConfig: {
cpuManagerPolicy: "string",
cpuCfsQuota: false,
cpuCfsQuotaPeriod: "string",
podPidsLimit: 0,
},
labels: {
string: "string",
},
linuxNodeConfig: {
cgroupMode: "string",
sysctls: {
string: "string",
},
},
localNvmeSsdBlockConfig: {
localSsdCount: 0,
},
localSsdCount: 0,
loggingVariant: "string",
machineType: "string",
metadata: {
string: "string",
},
minCpuPlatform: "string",
nodeGroup: "string",
oauthScopes: ["string"],
preemptible: false,
reservationAffinity: {
consumeReservationType: "string",
key: "string",
values: ["string"],
},
resourceLabels: {
string: "string",
},
resourceManagerTags: {
string: "any",
},
sandboxConfig: {
sandboxType: "string",
},
secondaryBootDisks: [{
diskImage: "string",
mode: "string",
}],
serviceAccount: "string",
shieldedInstanceConfig: {
enableIntegrityMonitoring: false,
enableSecureBoot: false,
},
soleTenantConfig: {
nodeAffinities: [{
key: "string",
operator: "string",
values: ["string"],
}],
},
spot: false,
tags: ["string"],
taints: [{
effect: "string",
key: "string",
value: "string",
}],
workloadMetadataConfig: {
mode: "string",
},
},
name: "string",
initialNodeCount: 0,
autoscaling: {
locationPolicy: "string",
maxNodeCount: 0,
minNodeCount: 0,
totalMaxNodeCount: 0,
totalMinNodeCount: 0,
},
maxPodsPerNode: 0,
nodeCount: 0,
nodeLocations: ["string"],
placementPolicy: {
type: "string",
policyName: "string",
tpuTopology: "string",
},
project: "string",
queuedProvisioning: {
enabled: false,
},
upgradeSettings: {
blueGreenSettings: {
standardRolloutPolicy: {
batchNodeCount: 0,
batchPercentage: 0,
batchSoakDuration: "string",
},
nodePoolSoakDuration: "string",
},
maxSurge: 0,
maxUnavailable: 0,
strategy: "string",
},
version: "string",
});
type: gcp:container:NodePool
properties:
autoscaling:
locationPolicy: string
maxNodeCount: 0
minNodeCount: 0
totalMaxNodeCount: 0
totalMinNodeCount: 0
cluster: string
initialNodeCount: 0
location: string
management:
autoRepair: false
autoUpgrade: false
maxPodsPerNode: 0
name: string
namePrefix: string
networkConfig:
additionalNodeNetworkConfigs:
- network: string
subnetwork: string
additionalPodNetworkConfigs:
- maxPodsPerNode: 0
secondaryPodRange: string
subnetwork: string
createPodRange: false
enablePrivateNodes: false
networkPerformanceConfig:
totalEgressBandwidthTier: string
podCidrOverprovisionConfig:
disabled: false
podIpv4CidrBlock: string
podRange: string
nodeConfig:
advancedMachineFeatures:
enableNestedVirtualization: false
threadsPerCore: 0
bootDiskKmsKey: string
confidentialNodes:
enabled: false
containerdConfig:
privateRegistryAccessConfig:
certificateAuthorityDomainConfigs:
- fqdns:
- string
gcpSecretManagerCertificateConfig:
secretUri: string
enabled: false
diskSizeGb: 0
diskType: string
effectiveTaints:
- effect: string
key: string
value: string
enableConfidentialStorage: false
ephemeralStorageConfig:
localSsdCount: 0
ephemeralStorageLocalSsdConfig:
localSsdCount: 0
fastSocket:
enabled: false
gcfsConfig:
enabled: false
guestAccelerators:
- count: 0
gpuDriverInstallationConfig:
gpuDriverVersion: string
gpuPartitionSize: string
gpuSharingConfig:
gpuSharingStrategy: string
maxSharedClientsPerGpu: 0
type: string
gvnic:
enabled: false
hostMaintenancePolicy:
maintenanceInterval: string
imageType: string
kubeletConfig:
cpuCfsQuota: false
cpuCfsQuotaPeriod: string
cpuManagerPolicy: string
podPidsLimit: 0
labels:
string: string
linuxNodeConfig:
cgroupMode: string
sysctls:
string: string
localNvmeSsdBlockConfig:
localSsdCount: 0
localSsdCount: 0
loggingVariant: string
machineType: string
metadata:
string: string
minCpuPlatform: string
nodeGroup: string
oauthScopes:
- string
preemptible: false
reservationAffinity:
consumeReservationType: string
key: string
values:
- string
resourceLabels:
string: string
resourceManagerTags:
string: any
sandboxConfig:
sandboxType: string
secondaryBootDisks:
- diskImage: string
mode: string
serviceAccount: string
shieldedInstanceConfig:
enableIntegrityMonitoring: false
enableSecureBoot: false
soleTenantConfig:
nodeAffinities:
- key: string
operator: string
values:
- string
spot: false
tags:
- string
taints:
- effect: string
key: string
value: string
workloadMetadataConfig:
mode: string
nodeCount: 0
nodeLocations:
- string
placementPolicy:
policyName: string
tpuTopology: string
type: string
project: string
queuedProvisioning:
enabled: false
upgradeSettings:
blueGreenSettings:
nodePoolSoakDuration: string
standardRolloutPolicy:
batchNodeCount: 0
batchPercentage: 0
batchSoakDuration: string
maxSurge: 0
maxUnavailable: 0
strategy: string
version: string
NodePool Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The NodePool resource accepts the following input properties:
- Cluster string
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - Autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Initial
Node intCount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Location string
- The location (region or zone) of the cluster.
- Management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods intPer Node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix string - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - Network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config NodePool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - Node
Locations List<string> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Placement
Policy NodePool Placement Policy - Specifies a custom placement policy for the nodes.
- Project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Queued
Provisioning NodePool Queued Provisioning Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- Upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- Cluster string
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - Autoscaling
Node
Pool Autoscaling Args - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Initial
Node intCount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Location string
- The location (region or zone) of the cluster.
- Management
Node
Pool Management Args - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods intPer Node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix string - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - Network
Config NodePool Network Config Args - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config NodePool Node Config Args - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - Node
Locations []string The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Placement
Policy NodePool Placement Policy Args - Specifies a custom placement policy for the nodes.
- Project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Queued
Provisioning NodePool Queued Provisioning Args Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- Upgrade
Settings NodePool Upgrade Settings Args - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster String
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial
Node IntegerCount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location String
- The location (region or zone) of the cluster.
- management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods IntegerPer Node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix String - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config NodePool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Integer - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement
Policy NodePool Placement Policy - Specifies a custom placement policy for the nodes.
- project String
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning NodePool Queued Provisioning Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster string
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial
Node numberCount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location string
- The location (region or zone) of the cluster.
- management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods numberPer Node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix string - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config NodePool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count number - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations string[] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement
Policy NodePool Placement Policy - Specifies a custom placement policy for the nodes.
- project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning NodePool Queued Provisioning Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster str
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - autoscaling
Node
Pool Autoscaling Args - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial_
node_ intcount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location str
- The location (region or zone) of the cluster.
- management
Node
Pool Management Args - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max_
pods_ intper_ node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name str
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name_
prefix str - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network_
config NodePool Network Config Args - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node_
config NodePool Node Config Args - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node_
count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node_
locations Sequence[str] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement_
policy NodePool Placement Policy Args - Specifies a custom placement policy for the nodes.
- project str
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued_
provisioning NodePool Queued Provisioning Args Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- upgrade_
settings NodePool Upgrade Settings Args - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version str
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster String
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - autoscaling Property Map
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial
Node NumberCount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location String
- The location (region or zone) of the cluster.
- management Property Map
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods NumberPer Node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix String - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config Property Map - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config Property Map - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Number - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement
Policy Property Map - Specifies a custom placement policy for the nodes.
- project String
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning Property Map Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- upgrade
Settings Property Map - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
Outputs
All input properties are implicitly available as output properties. Additionally, the NodePool resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Instance
Group List<string>Urls - The resource URLs of the managed instance groups associated with this node pool.
- Managed
Instance List<string>Group Urls - List of instance group URLs which have been assigned to this node pool.
- Operation string
- Id string
- The provider-assigned unique ID for this managed resource.
- Instance
Group []stringUrls - The resource URLs of the managed instance groups associated with this node pool.
- Managed
Instance []stringGroup Urls - List of instance group URLs which have been assigned to this node pool.
- Operation string
- id String
- The provider-assigned unique ID for this managed resource.
- instance
Group List<String>Urls - The resource URLs of the managed instance groups associated with this node pool.
- managed
Instance List<String>Group Urls - List of instance group URLs which have been assigned to this node pool.
- operation String
- id string
- The provider-assigned unique ID for this managed resource.
- instance
Group string[]Urls - The resource URLs of the managed instance groups associated with this node pool.
- managed
Instance string[]Group Urls - List of instance group URLs which have been assigned to this node pool.
- operation string
- id str
- The provider-assigned unique ID for this managed resource.
- instance_
group_ Sequence[str]urls - The resource URLs of the managed instance groups associated with this node pool.
- managed_
instance_ Sequence[str]group_ urls - List of instance group URLs which have been assigned to this node pool.
- operation str
- id String
- The provider-assigned unique ID for this managed resource.
- instance
Group List<String>Urls - The resource URLs of the managed instance groups associated with this node pool.
- managed
Instance List<String>Group Urls - List of instance group URLs which have been assigned to this node pool.
- operation String
Look up Existing NodePool Resource
Get an existing NodePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: NodePoolState, opts?: CustomResourceOptions): NodePool
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
autoscaling: Optional[NodePoolAutoscalingArgs] = None,
cluster: Optional[str] = None,
initial_node_count: Optional[int] = None,
instance_group_urls: Optional[Sequence[str]] = None,
location: Optional[str] = None,
managed_instance_group_urls: Optional[Sequence[str]] = None,
management: Optional[NodePoolManagementArgs] = None,
max_pods_per_node: Optional[int] = None,
name: Optional[str] = None,
name_prefix: Optional[str] = None,
network_config: Optional[NodePoolNetworkConfigArgs] = None,
node_config: Optional[NodePoolNodeConfigArgs] = None,
node_count: Optional[int] = None,
node_locations: Optional[Sequence[str]] = None,
operation: Optional[str] = None,
placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
project: Optional[str] = None,
queued_provisioning: Optional[NodePoolQueuedProvisioningArgs] = None,
upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
version: Optional[str] = None) -> NodePool
func GetNodePool(ctx *Context, name string, id IDInput, state *NodePoolState, opts ...ResourceOption) (*NodePool, error)
public static NodePool Get(string name, Input<string> id, NodePoolState? state, CustomResourceOptions? opts = null)
public static NodePool get(String name, Output<String> id, NodePoolState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Cluster string
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - Initial
Node intCount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Instance
Group List<string>Urls - The resource URLs of the managed instance groups associated with this node pool.
- Location string
- The location (region or zone) of the cluster.
- Managed
Instance List<string>Group Urls - List of instance group URLs which have been assigned to this node pool.
- Management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods intPer Node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix string - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - Network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config NodePool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - Node
Locations List<string> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Operation string
- Placement
Policy NodePool Placement Policy - Specifies a custom placement policy for the nodes.
- Project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Queued
Provisioning NodePool Queued Provisioning Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- Upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- Autoscaling
Node
Pool Autoscaling Args - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Cluster string
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - Initial
Node intCount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Instance
Group []stringUrls - The resource URLs of the managed instance groups associated with this node pool.
- Location string
- The location (region or zone) of the cluster.
- Managed
Instance []stringGroup Urls - List of instance group URLs which have been assigned to this node pool.
- Management
Node
Pool Management Args - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods intPer Node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix string - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - Network
Config NodePool Network Config Args - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config NodePool Node Config Args - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - Node
Locations []string The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Operation string
- Placement
Policy NodePool Placement Policy Args - Specifies a custom placement policy for the nodes.
- Project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Queued
Provisioning NodePool Queued Provisioning Args Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- Upgrade
Settings NodePool Upgrade Settings Args - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster String
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - initial
Node IntegerCount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance
Group List<String>Urls - The resource URLs of the managed instance groups associated with this node pool.
- location String
- The location (region or zone) of the cluster.
- managed
Instance List<String>Group Urls - List of instance group URLs which have been assigned to this node pool.
- management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods IntegerPer Node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix String - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config NodePool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Integer - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation String
- placement
Policy NodePool Placement Policy - Specifies a custom placement policy for the nodes.
- project String
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning NodePool Queued Provisioning Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster string
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - initial
Node numberCount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance
Group string[]Urls - The resource URLs of the managed instance groups associated with this node pool.
- location string
- The location (region or zone) of the cluster.
- managed
Instance string[]Group Urls - List of instance group URLs which have been assigned to this node pool.
- management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods numberPer Node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name string
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix string - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config NodePool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count number - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations string[] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation string
- placement
Policy NodePool Placement Policy - Specifies a custom placement policy for the nodes.
- project string
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning NodePool Queued Provisioning Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling
Node
Pool Autoscaling Args - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster str
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - initial_
node_ intcount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance_
group_ Sequence[str]urls - The resource URLs of the managed instance groups associated with this node pool.
- location str
- The location (region or zone) of the cluster.
- managed_
instance_ Sequence[str]group_ urls - List of instance group URLs which have been assigned to this node pool.
- management
Node
Pool Management Args - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max_
pods_ intper_ node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name str
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name_
prefix str - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network_
config NodePool Network Config Args - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node_
config NodePool Node Config Args - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node_
count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node_
locations Sequence[str] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation str
- placement_
policy NodePool Placement Policy Args - Specifies a custom placement policy for the nodes.
- project str
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued_
provisioning NodePool Queued Provisioning Args Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- upgrade_
settings NodePool Upgrade Settings Args - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version str
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling Property Map
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster String
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - initial
Node NumberCount - The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance
Group List<String>Urls - The resource URLs of the managed instance groups associated with this node pool.
- location String
- The location (region or zone) of the cluster.
- managed
Instance List<String>Group Urls - List of instance group URLs which have been assigned to this node pool.
- management Property Map
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods NumberPer Node - The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix String - Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config Property Map - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config Property Map - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Number - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation String
- placement
Policy Property Map - Specifies a custom placement policy for the nodes.
- project String
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning Property Map Specifies node pool-level settings of queued provisioning. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- upgrade
Settings Property Map - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
Supporting Types
NodePoolAutoscaling, NodePoolAutoscalingArgs
- Location
Policy string - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- Max
Node intCount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- Min
Node intCount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - Total
Max intNode Count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- Total
Min intNode Count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- Location
Policy string - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- Max
Node intCount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- Min
Node intCount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - Total
Max intNode Count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- Total
Min intNode Count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location
Policy String - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max
Node IntegerCount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min
Node IntegerCount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - total
Max IntegerNode Count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total
Min IntegerNode Count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location
Policy string - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max
Node numberCount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min
Node numberCount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - total
Max numberNode Count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total
Min numberNode Count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location_
policy str - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max_
node_ intcount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min_
node_ intcount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - total_
max_ intnode_ count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total_
min_ intnode_ count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location
Policy String - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max
Node NumberCount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min
Node NumberCount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - total
Max NumberNode Count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total
Min NumberNode Count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
NodePoolManagement, NodePoolManagementArgs
- Auto
Repair bool - Whether the nodes will be automatically repaired. Enabled by default.
- Auto
Upgrade bool - Whether the nodes will be automatically upgraded. Enabled by default.
- Auto
Repair bool - Whether the nodes will be automatically repaired. Enabled by default.
- Auto
Upgrade bool - Whether the nodes will be automatically upgraded. Enabled by default.
- auto
Repair Boolean - Whether the nodes will be automatically repaired. Enabled by default.
- auto
Upgrade Boolean - Whether the nodes will be automatically upgraded. Enabled by default.
- auto
Repair boolean - Whether the nodes will be automatically repaired. Enabled by default.
- auto
Upgrade boolean - Whether the nodes will be automatically upgraded. Enabled by default.
- auto_
repair bool - Whether the nodes will be automatically repaired. Enabled by default.
- auto_
upgrade bool - Whether the nodes will be automatically upgraded. Enabled by default.
- auto
Repair Boolean - Whether the nodes will be automatically repaired. Enabled by default.
- auto
Upgrade Boolean - Whether the nodes will be automatically upgraded. Enabled by default.
NodePoolNetworkConfig, NodePoolNetworkConfigArgs
- Additional
Node List<NodeNetwork Configs Pool Network Config Additional Node Network Config> - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- Additional
Pod List<NodeNetwork Configs Pool Network Config Additional Pod Network Config> - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- Create
Pod boolRange - Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - Enable
Private boolNodes - Whether nodes have internal IP addresses only.
- Network
Performance NodeConfig Pool Network Config Network Performance Config - Network bandwidth tier configuration. Structure is documented below.
- Pod
Cidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- Pod
Ipv4Cidr stringBlock - The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- Pod
Range string - The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- Additional
Node []NodeNetwork Configs Pool Network Config Additional Node Network Config - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- Additional
Pod []NodeNetwork Configs Pool Network Config Additional Pod Network Config - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- Create
Pod boolRange - Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - Enable
Private boolNodes - Whether nodes have internal IP addresses only.
- Network
Performance NodeConfig Pool Network Config Network Performance Config - Network bandwidth tier configuration. Structure is documented below.
- Pod
Cidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- Pod
Ipv4Cidr stringBlock - The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- Pod
Range string - The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- additional
Node List<NodeNetwork Configs Pool Network Config Additional Node Network Config> - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additional
Pod List<NodeNetwork Configs Pool Network Config Additional Pod Network Config> - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- create
Pod BooleanRange - Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - enable
Private BooleanNodes - Whether nodes have internal IP addresses only.
- network
Performance NodeConfig Pool Network Config Network Performance Config - Network bandwidth tier configuration. Structure is documented below.
- pod
Cidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- pod
Ipv4Cidr StringBlock - The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod
Range String - The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- additional
Node NodeNetwork Configs Pool Network Config Additional Node Network Config[] - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additional
Pod NodeNetwork Configs Pool Network Config Additional Pod Network Config[] - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- create
Pod booleanRange - Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - enable
Private booleanNodes - Whether nodes have internal IP addresses only.
- network
Performance NodeConfig Pool Network Config Network Performance Config - Network bandwidth tier configuration. Structure is documented below.
- pod
Cidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- pod
Ipv4Cidr stringBlock - The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod
Range string - The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- additional_
node_ Sequence[Nodenetwork_ configs Pool Network Config Additional Node Network Config] - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additional_
pod_ Sequence[Nodenetwork_ configs Pool Network Config Additional Pod Network Config] - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- create_
pod_ boolrange - Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - enable_
private_ boolnodes - Whether nodes have internal IP addresses only.
- network_
performance_ Nodeconfig Pool Network Config Network Performance Config - Network bandwidth tier configuration. Structure is documented below.
- pod_
cidr_ Nodeoverprovision_ config Pool Network Config Pod Cidr Overprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- pod_
ipv4_ strcidr_ block - The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod_
range str - The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- additional
Node List<Property Map>Network Configs - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additional
Pod List<Property Map>Network Configs - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- create
Pod BooleanRange - Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - enable
Private BooleanNodes - Whether nodes have internal IP addresses only.
- network
Performance Property MapConfig - Network bandwidth tier configuration. Structure is documented below.
- pod
Cidr Property MapOverprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- pod
Ipv4Cidr StringBlock - The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod
Range String - The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
NodePoolNetworkConfigAdditionalNodeNetworkConfig, NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs
- Network string
- Name of the VPC where the additional interface belongs.
- Subnetwork string
- Name of the subnetwork where the additional interface belongs.
- Network string
- Name of the VPC where the additional interface belongs.
- Subnetwork string
- Name of the subnetwork where the additional interface belongs.
- network String
- Name of the VPC where the additional interface belongs.
- subnetwork String
- Name of the subnetwork where the additional interface belongs.
- network string
- Name of the VPC where the additional interface belongs.
- subnetwork string
- Name of the subnetwork where the additional interface belongs.
- network str
- Name of the VPC where the additional interface belongs.
- subnetwork str
- Name of the subnetwork where the additional interface belongs.
- network String
- Name of the VPC where the additional interface belongs.
- subnetwork String
- Name of the subnetwork where the additional interface belongs.
NodePoolNetworkConfigAdditionalPodNetworkConfig, NodePoolNetworkConfigAdditionalPodNetworkConfigArgs
- Max
Pods intPer Node - The maximum number of pods per node which use this pod network.
- Secondary
Pod stringRange - The name of the secondary range on the subnet which provides IP address for this pod range.
- Subnetwork string
- Name of the subnetwork where the additional pod network belongs.
- Max
Pods intPer Node - The maximum number of pods per node which use this pod network.
- Secondary
Pod stringRange - The name of the secondary range on the subnet which provides IP address for this pod range.
- Subnetwork string
- Name of the subnetwork where the additional pod network belongs.
- max
Pods IntegerPer Node - The maximum number of pods per node which use this pod network.
- secondary
Pod StringRange - The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork String
- Name of the subnetwork where the additional pod network belongs.
- max
Pods numberPer Node - The maximum number of pods per node which use this pod network.
- secondary
Pod stringRange - The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork string
- Name of the subnetwork where the additional pod network belongs.
- max_
pods_ intper_ node - The maximum number of pods per node which use this pod network.
- secondary_
pod_ strrange - The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork str
- Name of the subnetwork where the additional pod network belongs.
- max
Pods NumberPer Node - The maximum number of pods per node which use this pod network.
- secondary
Pod StringRange - The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork String
- Name of the subnetwork where the additional pod network belongs.
NodePoolNetworkConfigNetworkPerformanceConfig, NodePoolNetworkConfigNetworkPerformanceConfigArgs
- Total
Egress stringBandwidth Tier - Specifies the total network bandwidth tier for the NodePool.
- Total
Egress stringBandwidth Tier - Specifies the total network bandwidth tier for the NodePool.
- total
Egress StringBandwidth Tier - Specifies the total network bandwidth tier for the NodePool.
- total
Egress stringBandwidth Tier - Specifies the total network bandwidth tier for the NodePool.
- total_
egress_ strbandwidth_ tier - Specifies the total network bandwidth tier for the NodePool.
- total
Egress StringBandwidth Tier - Specifies the total network bandwidth tier for the NodePool.
NodePoolNetworkConfigPodCidrOverprovisionConfig, NodePoolNetworkConfigPodCidrOverprovisionConfigArgs
- Disabled bool
- Whether pod cidr overprovision is disabled.
- Disabled bool
- Whether pod cidr overprovision is disabled.
- disabled Boolean
- Whether pod cidr overprovision is disabled.
- disabled boolean
- Whether pod cidr overprovision is disabled.
- disabled bool
- Whether pod cidr overprovision is disabled.
- disabled Boolean
- Whether pod cidr overprovision is disabled.
NodePoolNodeConfig, NodePoolNodeConfigArgs
- Advanced
Machine NodeFeatures Pool Node Config Advanced Machine Features - Specifies options for controlling advanced machine features.
- Boot
Disk stringKms Key - The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- Confidential
Nodes NodePool Node Config Confidential Nodes - Configuration for Confidential Nodes feature. Structure is documented below.
- Containerd
Config NodePool Node Config Containerd Config - Parameters for containerd configuration.
- Disk
Size intGb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- Disk
Type string - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- Effective
Taints List<NodePool Node Config Effective Taint> - List of kubernetes taints applied to each node.
- Enable
Confidential boolStorage - If enabled boot disks are configured with confidential mode.
- Ephemeral
Storage NodeConfig Pool Node Config Ephemeral Storage Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- Ephemeral
Storage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- Fast
Socket NodePool Node Config Fast Socket - Enable or disable NCCL Fast Socket in the node pool.
- Gcfs
Config NodePool Node Config Gcfs Config - GCFS configuration for this node.
- Guest
Accelerators List<NodePool Node Config Guest Accelerator> - List of the type and count of accelerator cards attached to the instance.
- Gvnic
Node
Pool Node Config Gvnic - Enable or disable gvnic in the node pool.
- Host
Maintenance NodePolicy Pool Node Config Host Maintenance Policy - The maintenance policy for the hosts on which the GKE VMs run on.
- Image
Type string - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- Kubelet
Config NodePool Node Config Kubelet Config - Node kubelet configs.
- Labels Dictionary<string, string>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- Linux
Node NodeConfig Pool Node Config Linux Node Config - Parameters that can be configured on Linux nodes.
- Local
Nvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config - Parameters for raw-block local NVMe SSDs.
- Local
Ssd intCount - The number of local SSD disks to be attached to the node.
- Logging
Variant string - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- Machine
Type string - The name of a Google Compute Engine machine type.
- Metadata Dictionary<string, string>
- The metadata key/value pairs assigned to instances in the cluster.
- Min
Cpu stringPlatform - Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- Node
Group string - Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- Oauth
Scopes List<string> - The set of Google API scopes to be made available on all of the node VMs.
- Preemptible bool
- Whether the nodes are created as preemptible VM instances.
- Reservation
Affinity NodePool Node Config Reservation Affinity - The reservation affinity configuration for the node pool.
- Resource
Labels Dictionary<string, string> - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Dictionary<string, object>
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- Sandbox
Config NodePool Node Config Sandbox Config - Sandbox configuration for this node.
- Secondary
Boot List<NodeDisks Pool Node Config Secondary Boot Disk> - Secondary boot disks for preloading data or container images.
- Service
Account string - The Google Cloud Platform Service Account to be used by the node VMs.
- Shielded
Instance NodeConfig Pool Node Config Shielded Instance Config - Shielded Instance options.
- Sole
Tenant NodeConfig Pool Node Config Sole Tenant Config - Node affinity options for sole tenant node pools.
- Spot bool
- Whether the nodes are created as spot VM instances.
- List<string>
- The list of instance tags applied to all nodes.
- Taints
List<Node
Pool Node Config Taint> - List of Kubernetes taints to be applied to each node.
- Workload
Metadata NodeConfig Pool Node Config Workload Metadata Config - The workload metadata configuration for this node.
- Advanced
Machine NodeFeatures Pool Node Config Advanced Machine Features - Specifies options for controlling advanced machine features.
- Boot
Disk stringKms Key - The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- Confidential
Nodes NodePool Node Config Confidential Nodes - Configuration for Confidential Nodes feature. Structure is documented below.
- Containerd
Config NodePool Node Config Containerd Config - Parameters for containerd configuration.
- Disk
Size intGb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- Disk
Type string - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- Effective
Taints []NodePool Node Config Effective Taint - List of kubernetes taints applied to each node.
- Enable
Confidential boolStorage - If enabled boot disks are configured with confidential mode.
- Ephemeral
Storage NodeConfig Pool Node Config Ephemeral Storage Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- Ephemeral
Storage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- Fast
Socket NodePool Node Config Fast Socket - Enable or disable NCCL Fast Socket in the node pool.
- Gcfs
Config NodePool Node Config Gcfs Config - GCFS configuration for this node.
- Guest
Accelerators []NodePool Node Config Guest Accelerator - List of the type and count of accelerator cards attached to the instance.
- Gvnic
Node
Pool Node Config Gvnic - Enable or disable gvnic in the node pool.
- Host
Maintenance NodePolicy Pool Node Config Host Maintenance Policy - The maintenance policy for the hosts on which the GKE VMs run on.
- Image
Type string - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- Kubelet
Config NodePool Node Config Kubelet Config - Node kubelet configs.
- Labels map[string]string
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- Linux
Node NodeConfig Pool Node Config Linux Node Config - Parameters that can be configured on Linux nodes.
- Local
Nvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config - Parameters for raw-block local NVMe SSDs.
- Local
Ssd intCount - The number of local SSD disks to be attached to the node.
- Logging
Variant string - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- Machine
Type string - The name of a Google Compute Engine machine type.
- Metadata map[string]string
- The metadata key/value pairs assigned to instances in the cluster.
- Min
Cpu stringPlatform - Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- Node
Group string - Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- Oauth
Scopes []string - The set of Google API scopes to be made available on all of the node VMs.
- Preemptible bool
- Whether the nodes are created as preemptible VM instances.
- Reservation
Affinity NodePool Node Config Reservation Affinity - The reservation affinity configuration for the node pool.
- Resource
Labels map[string]string - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- map[string]interface{}
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- Sandbox
Config NodePool Node Config Sandbox Config - Sandbox configuration for this node.
- Secondary
Boot []NodeDisks Pool Node Config Secondary Boot Disk - Secondary boot disks for preloading data or container images.
- Service
Account string - The Google Cloud Platform Service Account to be used by the node VMs.
- Shielded
Instance NodeConfig Pool Node Config Shielded Instance Config - Shielded Instance options.
- Sole
Tenant NodeConfig Pool Node Config Sole Tenant Config - Node affinity options for sole tenant node pools.
- Spot bool
- Whether the nodes are created as spot VM instances.
- []string
- The list of instance tags applied to all nodes.
- Taints
[]Node
Pool Node Config Taint - List of Kubernetes taints to be applied to each node.
- Workload
Metadata NodeConfig Pool Node Config Workload Metadata Config - The workload metadata configuration for this node.
- advanced
Machine NodeFeatures Pool Node Config Advanced Machine Features - Specifies options for controlling advanced machine features.
- boot
Disk StringKms Key - The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidential
Nodes NodePool Node Config Confidential Nodes - Configuration for Confidential Nodes feature. Structure is documented below.
- containerd
Config NodePool Node Config Containerd Config - Parameters for containerd configuration.
- disk
Size IntegerGb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- disk
Type String - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effective
Taints List<NodePool Node Config Effective Taint> - List of kubernetes taints applied to each node.
- enable
Confidential BooleanStorage - If enabled boot disks are configured with confidential mode.
- ephemeral
Storage NodeConfig Pool Node Config Ephemeral Storage Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeral
Storage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fast
Socket NodePool Node Config Fast Socket - Enable or disable NCCL Fast Socket in the node pool.
- gcfs
Config NodePool Node Config Gcfs Config - GCFS configuration for this node.
- guest
Accelerators List<NodePool Node Config Guest Accelerator> - List of the type and count of accelerator cards attached to the instance.
- gvnic
Node
Pool Node Config Gvnic - Enable or disable gvnic in the node pool.
- host
Maintenance NodePolicy Pool Node Config Host Maintenance Policy - The maintenance policy for the hosts on which the GKE VMs run on.
- image
Type String - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubelet
Config NodePool Node Config Kubelet Config - Node kubelet configs.
- labels Map<String,String>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linux
Node NodeConfig Pool Node Config Linux Node Config - Parameters that can be configured on Linux nodes.
- local
Nvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config - Parameters for raw-block local NVMe SSDs.
- local
Ssd IntegerCount - The number of local SSD disks to be attached to the node.
- logging
Variant String - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machine
Type String - The name of a Google Compute Engine machine type.
- metadata Map<String,String>
- The metadata key/value pairs assigned to instances in the cluster.
- min
Cpu StringPlatform - Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- node
Group String - Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauth
Scopes List<String> - The set of Google API scopes to be made available on all of the node VMs.
- preemptible Boolean
- Whether the nodes are created as preemptible VM instances.
- reservation
Affinity NodePool Node Config Reservation Affinity - The reservation affinity configuration for the node pool.
- resource
Labels Map<String,String> - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Map<String,Object>
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandbox
Config NodePool Node Config Sandbox Config - Sandbox configuration for this node.
- secondary
Boot List<NodeDisks Pool Node Config Secondary Boot Disk> - Secondary boot disks for preloading data or container images.
- service
Account String - The Google Cloud Platform Service Account to be used by the node VMs.
- shielded
Instance NodeConfig Pool Node Config Shielded Instance Config - Shielded Instance options.
- sole
Tenant NodeConfig Pool Node Config Sole Tenant Config - Node affinity options for sole tenant node pools.
- spot Boolean
- Whether the nodes are created as spot VM instances.
- List<String>
- The list of instance tags applied to all nodes.
- taints
List<Node
Pool Node Config Taint> - List of Kubernetes taints to be applied to each node.
- workload
Metadata NodeConfig Pool Node Config Workload Metadata Config - The workload metadata configuration for this node.
- advanced
Machine NodeFeatures Pool Node Config Advanced Machine Features - Specifies options for controlling advanced machine features.
- boot
Disk stringKms Key - The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidential
Nodes NodePool Node Config Confidential Nodes - Configuration for Confidential Nodes feature. Structure is documented below.
- containerd
Config NodePool Node Config Containerd Config - Parameters for containerd configuration.
- disk
Size numberGb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- disk
Type string - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effective
Taints NodePool Node Config Effective Taint[] - List of kubernetes taints applied to each node.
- enable
Confidential booleanStorage - If enabled boot disks are configured with confidential mode.
- ephemeral
Storage NodeConfig Pool Node Config Ephemeral Storage Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeral
Storage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fast
Socket NodePool Node Config Fast Socket - Enable or disable NCCL Fast Socket in the node pool.
- gcfs
Config NodePool Node Config Gcfs Config - GCFS configuration for this node.
- guest
Accelerators NodePool Node Config Guest Accelerator[] - List of the type and count of accelerator cards attached to the instance.
- gvnic
Node
Pool Node Config Gvnic - Enable or disable gvnic in the node pool.
- host
Maintenance NodePolicy Pool Node Config Host Maintenance Policy - The maintenance policy for the hosts on which the GKE VMs run on.
- image
Type string - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubelet
Config NodePool Node Config Kubelet Config - Node kubelet configs.
- labels {[key: string]: string}
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linux
Node NodeConfig Pool Node Config Linux Node Config - Parameters that can be configured on Linux nodes.
- local
Nvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config - Parameters for raw-block local NVMe SSDs.
- local
Ssd numberCount - The number of local SSD disks to be attached to the node.
- logging
Variant string - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machine
Type string - The name of a Google Compute Engine machine type.
- metadata {[key: string]: string}
- The metadata key/value pairs assigned to instances in the cluster.
- min
Cpu stringPlatform - Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- node
Group string - Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauth
Scopes string[] - The set of Google API scopes to be made available on all of the node VMs.
- preemptible boolean
- Whether the nodes are created as preemptible VM instances.
- reservation
Affinity NodePool Node Config Reservation Affinity - The reservation affinity configuration for the node pool.
- resource
Labels {[key: string]: string} - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- {[key: string]: any}
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandbox
Config NodePool Node Config Sandbox Config - Sandbox configuration for this node.
- secondary
Boot NodeDisks Pool Node Config Secondary Boot Disk[] - Secondary boot disks for preloading data or container images.
- service
Account string - The Google Cloud Platform Service Account to be used by the node VMs.
- shielded
Instance NodeConfig Pool Node Config Shielded Instance Config - Shielded Instance options.
- sole
Tenant NodeConfig Pool Node Config Sole Tenant Config - Node affinity options for sole tenant node pools.
- spot boolean
- Whether the nodes are created as spot VM instances.
- string[]
- The list of instance tags applied to all nodes.
- taints
Node
Pool Node Config Taint[] - List of Kubernetes taints to be applied to each node.
- workload
Metadata NodeConfig Pool Node Config Workload Metadata Config - The workload metadata configuration for this node.
- advanced_
machine_ Nodefeatures Pool Node Config Advanced Machine Features - Specifies options for controlling advanced machine features.
- boot_
disk_ strkms_ key - The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidential_
nodes NodePool Node Config Confidential Nodes - Configuration for Confidential Nodes feature. Structure is documented below.
- containerd_
config NodePool Node Config Containerd Config - Parameters for containerd configuration.
- disk_
size_ intgb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- disk_
type str - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effective_
taints Sequence[NodePool Node Config Effective Taint] - List of kubernetes taints applied to each node.
- enable_
confidential_ boolstorage - If enabled boot disks are configured with confidential mode.
- ephemeral_
storage_ Nodeconfig Pool Node Config Ephemeral Storage Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeral_
storage_ Nodelocal_ ssd_ config Pool Node Config Ephemeral Storage Local Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fast_
socket NodePool Node Config Fast Socket - Enable or disable NCCL Fast Socket in the node pool.
- gcfs_
config NodePool Node Config Gcfs Config - GCFS configuration for this node.
- guest_
accelerators Sequence[NodePool Node Config Guest Accelerator] - List of the type and count of accelerator cards attached to the instance.
- gvnic
Node
Pool Node Config Gvnic - Enable or disable gvnic in the node pool.
- host_
maintenance_ Nodepolicy Pool Node Config Host Maintenance Policy - The maintenance policy for the hosts on which the GKE VMs run on.
- image_
type str - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubelet_
config NodePool Node Config Kubelet Config - Node kubelet configs.
- labels Mapping[str, str]
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linux_
node_ Nodeconfig Pool Node Config Linux Node Config - Parameters that can be configured on Linux nodes.
- local_
nvme_ Nodessd_ block_ config Pool Node Config Local Nvme Ssd Block Config - Parameters for raw-block local NVMe SSDs.
- local_
ssd_ intcount - The number of local SSD disks to be attached to the node.
- logging_
variant str - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machine_
type str - The name of a Google Compute Engine machine type.
- metadata Mapping[str, str]
- The metadata key/value pairs assigned to instances in the cluster.
- min_
cpu_ strplatform - Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- node_
group str - Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauth_
scopes Sequence[str] - The set of Google API scopes to be made available on all of the node VMs.
- preemptible bool
- Whether the nodes are created as preemptible VM instances.
- reservation_
affinity NodePool Node Config Reservation Affinity - The reservation affinity configuration for the node pool.
- resource_
labels Mapping[str, str] - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Mapping[str, Any]
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandbox_
config NodePool Node Config Sandbox Config - Sandbox configuration for this node.
- secondary_
boot_ Sequence[Nodedisks Pool Node Config Secondary Boot Disk] - Secondary boot disks for preloading data or container images.
- service_
account str - The Google Cloud Platform Service Account to be used by the node VMs.
- shielded_
instance_ Nodeconfig Pool Node Config Shielded Instance Config - Shielded Instance options.
- sole_
tenant_ Nodeconfig Pool Node Config Sole Tenant Config - Node affinity options for sole tenant node pools.
- spot bool
- Whether the nodes are created as spot VM instances.
- Sequence[str]
- The list of instance tags applied to all nodes.
- taints
Sequence[Node
Pool Node Config Taint] - List of Kubernetes taints to be applied to each node.
- workload_
metadata_ Nodeconfig Pool Node Config Workload Metadata Config - The workload metadata configuration for this node.
- advanced
Machine Property MapFeatures - Specifies options for controlling advanced machine features.
- boot
Disk StringKms Key - The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidential
Nodes Property Map - Configuration for Confidential Nodes feature. Structure is documented below.
- containerd
Config Property Map - Parameters for containerd configuration.
- disk
Size NumberGb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- disk
Type String - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effective
Taints List<Property Map> - List of kubernetes taints applied to each node.
- enable
Confidential BooleanStorage - If enabled boot disks are configured with confidential mode.
- ephemeral
Storage Property MapConfig - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeral
Storage Property MapLocal Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fast
Socket Property Map - Enable or disable NCCL Fast Socket in the node pool.
- gcfs
Config Property Map - GCFS configuration for this node.
- guest
Accelerators List<Property Map> - List of the type and count of accelerator cards attached to the instance.
- gvnic Property Map
- Enable or disable gvnic in the node pool.
- host
Maintenance Property MapPolicy - The maintenance policy for the hosts on which the GKE VMs run on.
- image
Type String - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubelet
Config Property Map - Node kubelet configs.
- labels Map<String>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linux
Node Property MapConfig - Parameters that can be configured on Linux nodes.
- local
Nvme Property MapSsd Block Config - Parameters for raw-block local NVMe SSDs.
- local
Ssd NumberCount - The number of local SSD disks to be attached to the node.
- logging
Variant String - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machine
Type String - The name of a Google Compute Engine machine type.
- metadata Map<String>
- The metadata key/value pairs assigned to instances in the cluster.
- min
Cpu StringPlatform - Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- node
Group String - Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauth
Scopes List<String> - The set of Google API scopes to be made available on all of the node VMs.
- preemptible Boolean
- Whether the nodes are created as preemptible VM instances.
- reservation
Affinity Property Map - The reservation affinity configuration for the node pool.
- resource
Labels Map<String> - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Map<Any>
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandbox
Config Property Map - Sandbox configuration for this node.
- secondary
Boot List<Property Map>Disks - Secondary boot disks for preloading data or container images.
- service
Account String - The Google Cloud Platform Service Account to be used by the node VMs.
- shielded
Instance Property MapConfig - Shielded Instance options.
- sole
Tenant Property MapConfig - Node affinity options for sole tenant node pools.
- spot Boolean
- Whether the nodes are created as spot VM instances.
- List<String>
- The list of instance tags applied to all nodes.
- taints List<Property Map>
- List of Kubernetes taints to be applied to each node.
- workload
Metadata Property MapConfig - The workload metadata configuration for this node.
NodePoolNodeConfigAdvancedMachineFeatures, NodePoolNodeConfigAdvancedMachineFeaturesArgs
- Threads
Per intCore - The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- Enable
Nested boolVirtualization - Whether the node should have nested virtualization enabled.
- Threads
Per intCore - The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- Enable
Nested boolVirtualization - Whether the node should have nested virtualization enabled.
- threads
Per IntegerCore - The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enable
Nested BooleanVirtualization - Whether the node should have nested virtualization enabled.
- threads
Per numberCore - The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enable
Nested booleanVirtualization - Whether the node should have nested virtualization enabled.
- threads_
per_ intcore - The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enable_
nested_ boolvirtualization - Whether the node should have nested virtualization enabled.
- threads
Per NumberCore - The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enable
Nested BooleanVirtualization - Whether the node should have nested virtualization enabled.
NodePoolNodeConfigConfidentialNodes, NodePoolNodeConfigConfidentialNodesArgs
- Enabled bool
- Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
- Enabled bool
- Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
- enabled Boolean
- Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
- enabled boolean
- Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
- enabled bool
- Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
- enabled Boolean
- Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
NodePoolNodeConfigContainerdConfig, NodePoolNodeConfigContainerdConfigArgs
- Private
Registry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config - Parameters for private container registries configuration.
- Private
Registry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config - Parameters for private container registries configuration.
- private
Registry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config - Parameters for private container registries configuration.
- private
Registry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config - Parameters for private container registries configuration.
- private_
registry_ Nodeaccess_ config Pool Node Config Containerd Config Private Registry Access Config - Parameters for private container registries configuration.
- private
Registry Property MapAccess Config - Parameters for private container registries configuration.
NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs
- Enabled bool
- Whether or not private registries are configured.
- List<Node
Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config> - Parameters for configuring CA certificate and domains.
- Enabled bool
- Whether or not private registries are configured.
- []Node
Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config - Parameters for configuring CA certificate and domains.
- enabled Boolean
- Whether or not private registries are configured.
- List<Node
Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config> - Parameters for configuring CA certificate and domains.
- enabled boolean
- Whether or not private registries are configured.
- Node
Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config[] - Parameters for configuring CA certificate and domains.
- enabled bool
- Whether or not private registries are configured.
- Sequence[Node
Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config] - Parameters for configuring CA certificate and domains.
- enabled Boolean
- Whether or not private registries are configured.
- List<Property Map>
- Parameters for configuring CA certificate and domains.
NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs
- Fqdns List<string>
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- Gcp
Secret NodeManager Certificate Config Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
- Fqdns []string
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- Gcp
Secret NodeManager Certificate Config Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns List<String>
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcp
Secret NodeManager Certificate Config Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns string[]
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcp
Secret NodeManager Certificate Config Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns Sequence[str]
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcp_
secret_ Nodemanager_ certificate_ config Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns List<String>
- List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcp
Secret Property MapManager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs
- Secret
Uri string - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- Secret
Uri string - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secret
Uri String - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secret
Uri string - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secret_
uri str - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secret
Uri String - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
NodePoolNodeConfigEffectiveTaint, NodePoolNodeConfigEffectiveTaintArgs
NodePoolNodeConfigEphemeralStorageConfig, NodePoolNodeConfigEphemeralStorageConfigArgs
- Local
Ssd intCount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- Local
Ssd intCount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd IntegerCount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd numberCount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local_
ssd_ intcount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd NumberCount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
NodePoolNodeConfigEphemeralStorageLocalSsdConfig, NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs
- Local
Ssd intCount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- Local
Ssd intCount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd IntegerCount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd numberCount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local_
ssd_ intcount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd NumberCount - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
NodePoolNodeConfigFastSocket, NodePoolNodeConfigFastSocketArgs
- Enabled bool
- Whether or not NCCL Fast Socket is enabled
- Enabled bool
- Whether or not NCCL Fast Socket is enabled
- enabled Boolean
- Whether or not NCCL Fast Socket is enabled
- enabled boolean
- Whether or not NCCL Fast Socket is enabled
- enabled bool
- Whether or not NCCL Fast Socket is enabled
- enabled Boolean
- Whether or not NCCL Fast Socket is enabled
NodePoolNodeConfigGcfsConfig, NodePoolNodeConfigGcfsConfigArgs
- Enabled bool
- Whether or not GCFS is enabled
- Enabled bool
- Whether or not GCFS is enabled
- enabled Boolean
- Whether or not GCFS is enabled
- enabled boolean
- Whether or not GCFS is enabled
- enabled bool
- Whether or not GCFS is enabled
- enabled Boolean
- Whether or not GCFS is enabled
NodePoolNodeConfigGuestAccelerator, NodePoolNodeConfigGuestAcceleratorArgs
- Count int
- The number of the accelerator cards exposed to an instance.
- Type string
- The accelerator type resource name.
- Gpu
Driver NodeInstallation Config Pool Node Config Guest Accelerator Gpu Driver Installation Config - Configuration for auto installation of GPU driver.
- Gpu
Partition stringSize - Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- Gpu
Sharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config - Configuration for GPU sharing.
- Count int
- The number of the accelerator cards exposed to an instance.
- Type string
- The accelerator type resource name.
- Gpu
Driver NodeInstallation Config Pool Node Config Guest Accelerator Gpu Driver Installation Config - Configuration for auto installation of GPU driver.
- Gpu
Partition stringSize - Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- Gpu
Sharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config - Configuration for GPU sharing.
- count Integer
- The number of the accelerator cards exposed to an instance.
- type String
- The accelerator type resource name.
- gpu
Driver NodeInstallation Config Pool Node Config Guest Accelerator Gpu Driver Installation Config - Configuration for auto installation of GPU driver.
- gpu
Partition StringSize - Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpu
Sharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config - Configuration for GPU sharing.
- count number
- The number of the accelerator cards exposed to an instance.
- type string
- The accelerator type resource name.
- gpu
Driver NodeInstallation Config Pool Node Config Guest Accelerator Gpu Driver Installation Config - Configuration for auto installation of GPU driver.
- gpu
Partition stringSize - Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpu
Sharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config - Configuration for GPU sharing.
- count int
- The number of the accelerator cards exposed to an instance.
- type str
- The accelerator type resource name.
- gpu_
driver_ Nodeinstallation_ config Pool Node Config Guest Accelerator Gpu Driver Installation Config - Configuration for auto installation of GPU driver.
- gpu_
partition_ strsize - Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpu_
sharing_ Nodeconfig Pool Node Config Guest Accelerator Gpu Sharing Config - Configuration for GPU sharing.
- count Number
- The number of the accelerator cards exposed to an instance.
- type String
- The accelerator type resource name.
- gpu
Driver Property MapInstallation Config - Configuration for auto installation of GPU driver.
- gpu
Partition StringSize - Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpu
Sharing Property MapConfig - Configuration for GPU sharing.
NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig, NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs
- Gpu
Driver stringVersion - Mode for how the GPU driver is installed.
- Gpu
Driver stringVersion - Mode for how the GPU driver is installed.
- gpu
Driver StringVersion - Mode for how the GPU driver is installed.
- gpu
Driver stringVersion - Mode for how the GPU driver is installed.
- gpu_
driver_ strversion - Mode for how the GPU driver is installed.
- gpu
Driver StringVersion - Mode for how the GPU driver is installed.
NodePoolNodeConfigGuestAcceleratorGpuSharingConfig, NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs
- Gpu
Sharing stringStrategy - The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- int
- The maximum number of containers that can share a GPU.
- Gpu
Sharing stringStrategy - The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- int
- The maximum number of containers that can share a GPU.
- gpu
Sharing StringStrategy - The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- Integer
- The maximum number of containers that can share a GPU.
- gpu
Sharing stringStrategy - The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- number
- The maximum number of containers that can share a GPU.
- gpu_
sharing_ strstrategy - The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- int
- The maximum number of containers that can share a GPU.
- gpu
Sharing StringStrategy - The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
- Number
- The maximum number of containers that can share a GPU.
NodePoolNodeConfigGvnic, NodePoolNodeConfigGvnicArgs
- Enabled bool
- Whether or not gvnic is enabled
- Enabled bool
- Whether or not gvnic is enabled
- enabled Boolean
- Whether or not gvnic is enabled
- enabled boolean
- Whether or not gvnic is enabled
- enabled bool
- Whether or not gvnic is enabled
- enabled Boolean
- Whether or not gvnic is enabled
NodePoolNodeConfigHostMaintenancePolicy, NodePoolNodeConfigHostMaintenancePolicyArgs
- Maintenance
Interval string - .
- Maintenance
Interval string - .
- maintenance
Interval String - .
- maintenance
Interval string - .
- maintenance_
interval str - .
- maintenance
Interval String - .
NodePoolNodeConfigKubeletConfig, NodePoolNodeConfigKubeletConfigArgs
- Cpu
Manager stringPolicy - Control the CPU management policy on the node.
- Cpu
Cfs boolQuota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- Cpu
Cfs stringQuota Period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- Pod
Pids intLimit - Controls the maximum number of processes allowed to run in a pod.
- Cpu
Manager stringPolicy - Control the CPU management policy on the node.
- Cpu
Cfs boolQuota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- Cpu
Cfs stringQuota Period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- Pod
Pids intLimit - Controls the maximum number of processes allowed to run in a pod.
- cpu
Manager StringPolicy - Control the CPU management policy on the node.
- cpu
Cfs BooleanQuota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpu
Cfs StringQuota Period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- pod
Pids IntegerLimit - Controls the maximum number of processes allowed to run in a pod.
- cpu
Manager stringPolicy - Control the CPU management policy on the node.
- cpu
Cfs booleanQuota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpu
Cfs stringQuota Period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- pod
Pids numberLimit - Controls the maximum number of processes allowed to run in a pod.
- cpu_
manager_ strpolicy - Control the CPU management policy on the node.
- cpu_
cfs_ boolquota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpu_
cfs_ strquota_ period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- pod_
pids_ intlimit - Controls the maximum number of processes allowed to run in a pod.
- cpu
Manager StringPolicy - Control the CPU management policy on the node.
- cpu
Cfs BooleanQuota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpu
Cfs StringQuota Period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- pod
Pids NumberLimit - Controls the maximum number of processes allowed to run in a pod.
NodePoolNodeConfigLinuxNodeConfig, NodePoolNodeConfigLinuxNodeConfigArgs
- Cgroup
Mode string - cgroupMode specifies the cgroup mode to be used on the node.
- Sysctls Dictionary<string, string>
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- Cgroup
Mode string - cgroupMode specifies the cgroup mode to be used on the node.
- Sysctls map[string]string
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroup
Mode String - cgroupMode specifies the cgroup mode to be used on the node.
- sysctls Map<String,String>
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroup
Mode string - cgroupMode specifies the cgroup mode to be used on the node.
- sysctls {[key: string]: string}
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroup_
mode str - cgroupMode specifies the cgroup mode to be used on the node.
- sysctls Mapping[str, str]
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroup
Mode String - cgroupMode specifies the cgroup mode to be used on the node.
- sysctls Map<String>
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
NodePoolNodeConfigLocalNvmeSsdBlockConfig, NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs
- Local
Ssd intCount - Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- Local
Ssd intCount - Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- local
Ssd IntegerCount - Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- local
Ssd numberCount - Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- local_
ssd_ intcount - Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- local
Ssd NumberCount - Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
NodePoolNodeConfigReservationAffinity, NodePoolNodeConfigReservationAffinityArgs
- Consume
Reservation stringType - Corresponds to the type of reservation consumption.
- Key string
- The label key of a reservation resource.
- Values List<string>
- The label values of the reservation resource.
- Consume
Reservation stringType - Corresponds to the type of reservation consumption.
- Key string
- The label key of a reservation resource.
- Values []string
- The label values of the reservation resource.
- consume
Reservation StringType - Corresponds to the type of reservation consumption.
- key String
- The label key of a reservation resource.
- values List<String>
- The label values of the reservation resource.
- consume
Reservation stringType - Corresponds to the type of reservation consumption.
- key string
- The label key of a reservation resource.
- values string[]
- The label values of the reservation resource.
- consume_
reservation_ strtype - Corresponds to the type of reservation consumption.
- key str
- The label key of a reservation resource.
- values Sequence[str]
- The label values of the reservation resource.
- consume
Reservation StringType - Corresponds to the type of reservation consumption.
- key String
- The label key of a reservation resource.
- values List<String>
- The label values of the reservation resource.
NodePoolNodeConfigSandboxConfig, NodePoolNodeConfigSandboxConfigArgs
- Sandbox
Type string - Type of the sandbox to use for the node (e.g. 'gvisor')
- Sandbox
Type string - Type of the sandbox to use for the node (e.g. 'gvisor')
- sandbox
Type String - Type of the sandbox to use for the node (e.g. 'gvisor')
- sandbox
Type string - Type of the sandbox to use for the node (e.g. 'gvisor')
- sandbox_
type str - Type of the sandbox to use for the node (e.g. 'gvisor')
- sandbox
Type String - Type of the sandbox to use for the node (e.g. 'gvisor')
NodePoolNodeConfigSecondaryBootDisk, NodePoolNodeConfigSecondaryBootDiskArgs
- disk_
image str - Disk image to create the secondary boot disk from
- mode str
- Mode for how the secondary boot disk is used.
NodePoolNodeConfigShieldedInstanceConfig, NodePoolNodeConfigShieldedInstanceConfigArgs
- Enable
Integrity boolMonitoring - Defines whether the instance has integrity monitoring enabled.
- Enable
Secure boolBoot - Defines whether the instance has Secure Boot enabled.
- Enable
Integrity boolMonitoring - Defines whether the instance has integrity monitoring enabled.
- Enable
Secure boolBoot - Defines whether the instance has Secure Boot enabled.
- enable
Integrity BooleanMonitoring - Defines whether the instance has integrity monitoring enabled.
- enable
Secure BooleanBoot - Defines whether the instance has Secure Boot enabled.
- enable
Integrity booleanMonitoring - Defines whether the instance has integrity monitoring enabled.
- enable
Secure booleanBoot - Defines whether the instance has Secure Boot enabled.
- enable_
integrity_ boolmonitoring - Defines whether the instance has integrity monitoring enabled.
- enable_
secure_ boolboot - Defines whether the instance has Secure Boot enabled.
- enable
Integrity BooleanMonitoring - Defines whether the instance has integrity monitoring enabled.
- enable
Secure BooleanBoot - Defines whether the instance has Secure Boot enabled.
NodePoolNodeConfigSoleTenantConfig, NodePoolNodeConfigSoleTenantConfigArgs
NodePoolNodeConfigSoleTenantConfigNodeAffinity, NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs
NodePoolNodeConfigTaint, NodePoolNodeConfigTaintArgs
NodePoolNodeConfigWorkloadMetadataConfig, NodePoolNodeConfigWorkloadMetadataConfigArgs
- Mode string
- Mode is the configuration for how to expose metadata to workloads running on the node.
- Mode string
- Mode is the configuration for how to expose metadata to workloads running on the node.
- mode String
- Mode is the configuration for how to expose metadata to workloads running on the node.
- mode string
- Mode is the configuration for how to expose metadata to workloads running on the node.
- mode str
- Mode is the configuration for how to expose metadata to workloads running on the node.
- mode String
- Mode is the configuration for how to expose metadata to workloads running on the node.
NodePoolPlacementPolicy, NodePoolPlacementPolicyArgs
- Type string
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- Policy
Name string - If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- Tpu
Topology string - The TPU placement topology for pod slice node pool.
- Type string
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- Policy
Name string - If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- Tpu
Topology string - The TPU placement topology for pod slice node pool.
- type String
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policy
Name String - If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpu
Topology String - The TPU placement topology for pod slice node pool.
- type string
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policy
Name string - If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpu
Topology string - The TPU placement topology for pod slice node pool.
- type str
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policy_
name str - If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpu_
topology str - The TPU placement topology for pod slice node pool.
- type String
- The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policy
Name String - If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpu
Topology String - The TPU placement topology for pod slice node pool.
NodePoolQueuedProvisioning, NodePoolQueuedProvisioningArgs
- Enabled bool
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- Enabled bool
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled Boolean
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled boolean
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled bool
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled Boolean
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
NodePoolUpgradeSettings, NodePoolUpgradeSettingsArgs
- Blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings - The settings to adjust blue green upgrades. Structure is documented below
- Max
Surge int - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - int
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- Strategy string
- The upgrade stragey to be used for upgrading the nodes.
- Blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings - The settings to adjust blue green upgrades. Structure is documented below
- Max
Surge int - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - int
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- Strategy string
- The upgrade stragey to be used for upgrading the nodes.
- blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings - The settings to adjust blue green upgrades. Structure is documented below
- max
Surge Integer - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - Integer
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy String
- The upgrade stragey to be used for upgrading the nodes.
- blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings - The settings to adjust blue green upgrades. Structure is documented below
- max
Surge number - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - number
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy string
- The upgrade stragey to be used for upgrading the nodes.
- blue_
green_ Nodesettings Pool Upgrade Settings Blue Green Settings - The settings to adjust blue green upgrades. Structure is documented below
- max_
surge int - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - int
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy str
- The upgrade stragey to be used for upgrading the nodes.
- blue
Green Property MapSettings - The settings to adjust blue green upgrades. Structure is documented below
- max
Surge Number - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - Number
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy String
- The upgrade stragey to be used for upgrading the nodes.
NodePoolUpgradeSettingsBlueGreenSettings, NodePoolUpgradeSettingsBlueGreenSettingsArgs
- Standard
Rollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy - Specifies the standard policy settings for blue-green upgrades.
- Node
Pool stringSoak Duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- Standard
Rollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy - Specifies the standard policy settings for blue-green upgrades.
- Node
Pool stringSoak Duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard
Rollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy - Specifies the standard policy settings for blue-green upgrades.
- node
Pool StringSoak Duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard
Rollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy - Specifies the standard policy settings for blue-green upgrades.
- node
Pool stringSoak Duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard_
rollout_ Nodepolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy - Specifies the standard policy settings for blue-green upgrades.
- node_
pool_ strsoak_ duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard
Rollout Property MapPolicy - Specifies the standard policy settings for blue-green upgrades.
- node
Pool StringSoak Duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy, NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs
- Batch
Node intCount - Number of blue nodes to drain in a batch.
- Batch
Percentage double - Percentage of the blue pool nodes to drain in a batch.
- Batch
Soak stringDuration - Soak time after each batch gets drained.
- Batch
Node intCount - Number of blue nodes to drain in a batch.
- Batch
Percentage float64 - Percentage of the blue pool nodes to drain in a batch.
- Batch
Soak stringDuration - Soak time after each batch gets drained.
- batch
Node IntegerCount - Number of blue nodes to drain in a batch.
- batch
Percentage Double - Percentage of the blue pool nodes to drain in a batch.
- batch
Soak StringDuration - Soak time after each batch gets drained.
- batch
Node numberCount - Number of blue nodes to drain in a batch.
- batch
Percentage number - Percentage of the blue pool nodes to drain in a batch.
- batch
Soak stringDuration - Soak time after each batch gets drained.
- batch_
node_ intcount - Number of blue nodes to drain in a batch.
- batch_
percentage float - Percentage of the blue pool nodes to drain in a batch.
- batch_
soak_ strduration - Soak time after each batch gets drained.
- batch
Node NumberCount - Number of blue nodes to drain in a batch.
- batch
Percentage Number - Percentage of the blue pool nodes to drain in a batch.
- batch
Soak StringDuration - Soak time after each batch gets drained.
Import
Node pools can be imported using the project
, location
, cluster
and name
. If
the project is omitted, the project value in the provider configuration will be used. Examples:
{{project_id}}/{{location}}/{{cluster_id}}/{{pool_id}}
{{location}}/{{cluster_id}}/{{pool_id}}
When using the pulumi import
command, node pools can be imported using one of the formats above. For example:
$ pulumi import gcp:container/nodePool:NodePool default {{project_id}}/{{location}}/{{cluster_id}}/{{pool_id}}
$ pulumi import gcp:container/nodePool:NodePool default {{location}}/{{cluster_id}}/{{pool_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.