Try AWS Native preview for resources not in the classic version.
aws.batch.JobDefinition
Explore with Pulumi AI
Try AWS Native preview for resources not in the classic version.
Provides a Batch Job Definition resource.
Example Usage
Job definition of type container
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const test = new aws.batch.JobDefinition("test", {
name: "my_test_batch_job_definition",
type: "container",
containerProperties: JSON.stringify({
command: [
"ls",
"-la",
],
image: "busybox",
resourceRequirements: [
{
type: "VCPU",
value: "0.25",
},
{
type: "MEMORY",
value: "512",
},
],
volumes: [{
host: {
sourcePath: "/tmp",
},
name: "tmp",
}],
environment: [{
name: "VARNAME",
value: "VARVAL",
}],
mountPoints: [{
sourceVolume: "tmp",
containerPath: "/tmp",
readOnly: false,
}],
ulimits: [{
hardLimit: 1024,
name: "nofile",
softLimit: 1024,
}],
}),
});
import pulumi
import json
import pulumi_aws as aws
test = aws.batch.JobDefinition("test",
name="my_test_batch_job_definition",
type="container",
container_properties=json.dumps({
"command": [
"ls",
"-la",
],
"image": "busybox",
"resourceRequirements": [
{
"type": "VCPU",
"value": "0.25",
},
{
"type": "MEMORY",
"value": "512",
},
],
"volumes": [{
"host": {
"sourcePath": "/tmp",
},
"name": "tmp",
}],
"environment": [{
"name": "VARNAME",
"value": "VARVAL",
}],
"mountPoints": [{
"sourceVolume": "tmp",
"containerPath": "/tmp",
"readOnly": False,
}],
"ulimits": [{
"hardLimit": 1024,
"name": "nofile",
"softLimit": 1024,
}],
}))
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/batch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
tmpJSON0, err := json.Marshal(map[string]interface{}{
"command": []string{
"ls",
"-la",
},
"image": "busybox",
"resourceRequirements": []map[string]interface{}{
map[string]interface{}{
"type": "VCPU",
"value": "0.25",
},
map[string]interface{}{
"type": "MEMORY",
"value": "512",
},
},
"volumes": []map[string]interface{}{
map[string]interface{}{
"host": map[string]interface{}{
"sourcePath": "/tmp",
},
"name": "tmp",
},
},
"environment": []map[string]interface{}{
map[string]interface{}{
"name": "VARNAME",
"value": "VARVAL",
},
},
"mountPoints": []map[string]interface{}{
map[string]interface{}{
"sourceVolume": "tmp",
"containerPath": "/tmp",
"readOnly": false,
},
},
"ulimits": []map[string]interface{}{
map[string]interface{}{
"hardLimit": 1024,
"name": "nofile",
"softLimit": 1024,
},
},
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
_, err = batch.NewJobDefinition(ctx, "test", &batch.JobDefinitionArgs{
Name: pulumi.String("my_test_batch_job_definition"),
Type: pulumi.String("container"),
ContainerProperties: pulumi.String(json0),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var test = new Aws.Batch.JobDefinition("test", new()
{
Name = "my_test_batch_job_definition",
Type = "container",
ContainerProperties = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["command"] = new[]
{
"ls",
"-la",
},
["image"] = "busybox",
["resourceRequirements"] = new[]
{
new Dictionary<string, object?>
{
["type"] = "VCPU",
["value"] = "0.25",
},
new Dictionary<string, object?>
{
["type"] = "MEMORY",
["value"] = "512",
},
},
["volumes"] = new[]
{
new Dictionary<string, object?>
{
["host"] = new Dictionary<string, object?>
{
["sourcePath"] = "/tmp",
},
["name"] = "tmp",
},
},
["environment"] = new[]
{
new Dictionary<string, object?>
{
["name"] = "VARNAME",
["value"] = "VARVAL",
},
},
["mountPoints"] = new[]
{
new Dictionary<string, object?>
{
["sourceVolume"] = "tmp",
["containerPath"] = "/tmp",
["readOnly"] = false,
},
},
["ulimits"] = new[]
{
new Dictionary<string, object?>
{
["hardLimit"] = 1024,
["name"] = "nofile",
["softLimit"] = 1024,
},
},
}),
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.batch.JobDefinition;
import com.pulumi.aws.batch.JobDefinitionArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var test = new JobDefinition("test", JobDefinitionArgs.builder()
.name("my_test_batch_job_definition")
.type("container")
.containerProperties(serializeJson(
jsonObject(
jsonProperty("command", jsonArray(
"ls",
"-la"
)),
jsonProperty("image", "busybox"),
jsonProperty("resourceRequirements", jsonArray(
jsonObject(
jsonProperty("type", "VCPU"),
jsonProperty("value", "0.25")
),
jsonObject(
jsonProperty("type", "MEMORY"),
jsonProperty("value", "512")
)
)),
jsonProperty("volumes", jsonArray(jsonObject(
jsonProperty("host", jsonObject(
jsonProperty("sourcePath", "/tmp")
)),
jsonProperty("name", "tmp")
))),
jsonProperty("environment", jsonArray(jsonObject(
jsonProperty("name", "VARNAME"),
jsonProperty("value", "VARVAL")
))),
jsonProperty("mountPoints", jsonArray(jsonObject(
jsonProperty("sourceVolume", "tmp"),
jsonProperty("containerPath", "/tmp"),
jsonProperty("readOnly", false)
))),
jsonProperty("ulimits", jsonArray(jsonObject(
jsonProperty("hardLimit", 1024),
jsonProperty("name", "nofile"),
jsonProperty("softLimit", 1024)
)))
)))
.build());
}
}
resources:
test:
type: aws:batch:JobDefinition
properties:
name: my_test_batch_job_definition
type: container
containerProperties:
fn::toJSON:
command:
- ls
- -la
image: busybox
resourceRequirements:
- type: VCPU
value: '0.25'
- type: MEMORY
value: '512'
volumes:
- host:
sourcePath: /tmp
name: tmp
environment:
- name: VARNAME
value: VARVAL
mountPoints:
- sourceVolume: tmp
containerPath: /tmp
readOnly: false
ulimits:
- hardLimit: 1024
name: nofile
softLimit: 1024
Job definition of type multinode
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const test = new aws.batch.JobDefinition("test", {
name: "tf_test_batch_job_definition_multinode",
type: "multinode",
nodeProperties: JSON.stringify({
mainNode: 0,
nodeRangeProperties: [
{
container: {
command: [
"ls",
"-la",
],
image: "busybox",
memory: 128,
vcpus: 1,
},
targetNodes: "0:",
},
{
container: {
command: [
"echo",
"test",
],
image: "busybox",
memory: 128,
vcpus: 1,
},
targetNodes: "1:",
},
],
numNodes: 2,
}),
});
import pulumi
import json
import pulumi_aws as aws
test = aws.batch.JobDefinition("test",
name="tf_test_batch_job_definition_multinode",
type="multinode",
node_properties=json.dumps({
"mainNode": 0,
"nodeRangeProperties": [
{
"container": {
"command": [
"ls",
"-la",
],
"image": "busybox",
"memory": 128,
"vcpus": 1,
},
"targetNodes": "0:",
},
{
"container": {
"command": [
"echo",
"test",
],
"image": "busybox",
"memory": 128,
"vcpus": 1,
},
"targetNodes": "1:",
},
],
"numNodes": 2,
}))
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/batch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
tmpJSON0, err := json.Marshal(map[string]interface{}{
"mainNode": 0,
"nodeRangeProperties": []map[string]interface{}{
map[string]interface{}{
"container": map[string]interface{}{
"command": []string{
"ls",
"-la",
},
"image": "busybox",
"memory": 128,
"vcpus": 1,
},
"targetNodes": "0:",
},
map[string]interface{}{
"container": map[string]interface{}{
"command": []string{
"echo",
"test",
},
"image": "busybox",
"memory": 128,
"vcpus": 1,
},
"targetNodes": "1:",
},
},
"numNodes": 2,
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
_, err = batch.NewJobDefinition(ctx, "test", &batch.JobDefinitionArgs{
Name: pulumi.String("tf_test_batch_job_definition_multinode"),
Type: pulumi.String("multinode"),
NodeProperties: pulumi.String(json0),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var test = new Aws.Batch.JobDefinition("test", new()
{
Name = "tf_test_batch_job_definition_multinode",
Type = "multinode",
NodeProperties = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["mainNode"] = 0,
["nodeRangeProperties"] = new[]
{
new Dictionary<string, object?>
{
["container"] = new Dictionary<string, object?>
{
["command"] = new[]
{
"ls",
"-la",
},
["image"] = "busybox",
["memory"] = 128,
["vcpus"] = 1,
},
["targetNodes"] = "0:",
},
new Dictionary<string, object?>
{
["container"] = new Dictionary<string, object?>
{
["command"] = new[]
{
"echo",
"test",
},
["image"] = "busybox",
["memory"] = 128,
["vcpus"] = 1,
},
["targetNodes"] = "1:",
},
},
["numNodes"] = 2,
}),
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.batch.JobDefinition;
import com.pulumi.aws.batch.JobDefinitionArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var test = new JobDefinition("test", JobDefinitionArgs.builder()
.name("tf_test_batch_job_definition_multinode")
.type("multinode")
.nodeProperties(serializeJson(
jsonObject(
jsonProperty("mainNode", 0),
jsonProperty("nodeRangeProperties", jsonArray(
jsonObject(
jsonProperty("container", jsonObject(
jsonProperty("command", jsonArray(
"ls",
"-la"
)),
jsonProperty("image", "busybox"),
jsonProperty("memory", 128),
jsonProperty("vcpus", 1)
)),
jsonProperty("targetNodes", "0:")
),
jsonObject(
jsonProperty("container", jsonObject(
jsonProperty("command", jsonArray(
"echo",
"test"
)),
jsonProperty("image", "busybox"),
jsonProperty("memory", 128),
jsonProperty("vcpus", 1)
)),
jsonProperty("targetNodes", "1:")
)
)),
jsonProperty("numNodes", 2)
)))
.build());
}
}
resources:
test:
type: aws:batch:JobDefinition
properties:
name: tf_test_batch_job_definition_multinode
type: multinode
nodeProperties:
fn::toJSON:
mainNode: 0
nodeRangeProperties:
- container:
command:
- ls
- -la
image: busybox
memory: 128
vcpus: 1
targetNodes: '0:'
- container:
command:
- echo
- test
image: busybox
memory: 128
vcpus: 1
targetNodes: '1:'
numNodes: 2
Job Definitionn of type EKS
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const test = new aws.batch.JobDefinition("test", {
name: " tf_test_batch_job_definition_eks",
type: "container",
eksProperties: {
podProperties: {
hostNetwork: true,
containers: {
image: "public.ecr.aws/amazonlinux/amazonlinux:1",
commands: [
"sleep",
"60",
],
resources: {
limits: {
cpu: "1",
memory: "1024Mi",
},
},
},
metadata: {
labels: {
environment: "test",
},
},
},
},
});
import pulumi
import pulumi_aws as aws
test = aws.batch.JobDefinition("test",
name=" tf_test_batch_job_definition_eks",
type="container",
eks_properties={
"podProperties": {
"hostNetwork": True,
"containers": {
"image": "public.ecr.aws/amazonlinux/amazonlinux:1",
"commands": [
"sleep",
"60",
],
"resources": {
"limits": {
"cpu": "1",
"memory": "1024Mi",
},
},
},
"metadata": {
"labels": {
"environment": "test",
},
},
},
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/batch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := batch.NewJobDefinition(ctx, "test", &batch.JobDefinitionArgs{
Name: pulumi.String(" tf_test_batch_job_definition_eks"),
Type: pulumi.String("container"),
EksProperties: &batch.JobDefinitionEksPropertiesArgs{
PodProperties: &batch.JobDefinitionEksPropertiesPodPropertiesArgs{
HostNetwork: pulumi.Bool(true),
Containers: &batch.JobDefinitionEksPropertiesPodPropertiesContainersArgs{
Image: pulumi.String("public.ecr.aws/amazonlinux/amazonlinux:1"),
Commands: pulumi.StringArray{
pulumi.String("sleep"),
pulumi.String("60"),
},
Resources: &batch.JobDefinitionEksPropertiesPodPropertiesContainersResourcesArgs{
Limits: pulumi.StringMap{
"cpu": pulumi.String("1"),
"memory": pulumi.String("1024Mi"),
},
},
},
Metadata: &batch.JobDefinitionEksPropertiesPodPropertiesMetadataArgs{
Labels: pulumi.StringMap{
"environment": pulumi.String("test"),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var test = new Aws.Batch.JobDefinition("test", new()
{
Name = " tf_test_batch_job_definition_eks",
Type = "container",
EksProperties = new Aws.Batch.Inputs.JobDefinitionEksPropertiesArgs
{
PodProperties = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesArgs
{
HostNetwork = true,
Containers = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesContainersArgs
{
Image = "public.ecr.aws/amazonlinux/amazonlinux:1",
Commands = new[]
{
"sleep",
"60",
},
Resources = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesContainersResourcesArgs
{
Limits =
{
{ "cpu", "1" },
{ "memory", "1024Mi" },
},
},
},
Metadata = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesMetadataArgs
{
Labels =
{
{ "environment", "test" },
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.batch.JobDefinition;
import com.pulumi.aws.batch.JobDefinitionArgs;
import com.pulumi.aws.batch.inputs.JobDefinitionEksPropertiesArgs;
import com.pulumi.aws.batch.inputs.JobDefinitionEksPropertiesPodPropertiesArgs;
import com.pulumi.aws.batch.inputs.JobDefinitionEksPropertiesPodPropertiesContainersArgs;
import com.pulumi.aws.batch.inputs.JobDefinitionEksPropertiesPodPropertiesContainersResourcesArgs;
import com.pulumi.aws.batch.inputs.JobDefinitionEksPropertiesPodPropertiesMetadataArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var test = new JobDefinition("test", JobDefinitionArgs.builder()
.name(" tf_test_batch_job_definition_eks")
.type("container")
.eksProperties(JobDefinitionEksPropertiesArgs.builder()
.podProperties(JobDefinitionEksPropertiesPodPropertiesArgs.builder()
.hostNetwork(true)
.containers(JobDefinitionEksPropertiesPodPropertiesContainersArgs.builder()
.image("public.ecr.aws/amazonlinux/amazonlinux:1")
.commands(
"sleep",
"60")
.resources(JobDefinitionEksPropertiesPodPropertiesContainersResourcesArgs.builder()
.limits(Map.ofEntries(
Map.entry("cpu", "1"),
Map.entry("memory", "1024Mi")
))
.build())
.build())
.metadata(JobDefinitionEksPropertiesPodPropertiesMetadataArgs.builder()
.labels(Map.of("environment", "test"))
.build())
.build())
.build())
.build());
}
}
resources:
test:
type: aws:batch:JobDefinition
properties:
name: ' tf_test_batch_job_definition_eks'
type: container
eksProperties:
podProperties:
hostNetwork: true
containers:
image: public.ecr.aws/amazonlinux/amazonlinux:1
commands:
- sleep
- '60'
resources:
limits:
cpu: '1'
memory: 1024Mi
metadata:
labels:
environment: test
Fargate Platform Capability
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const assumeRolePolicy = aws.iam.getPolicyDocument({
statements: [{
actions: ["sts:AssumeRole"],
principals: [{
type: "Service",
identifiers: ["ecs-tasks.amazonaws.com"],
}],
}],
});
const ecsTaskExecutionRole = new aws.iam.Role("ecs_task_execution_role", {
name: "my_test_batch_exec_role",
assumeRolePolicy: assumeRolePolicy.then(assumeRolePolicy => assumeRolePolicy.json),
});
const ecsTaskExecutionRolePolicy = new aws.iam.RolePolicyAttachment("ecs_task_execution_role_policy", {
role: ecsTaskExecutionRole.name,
policyArn: "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy",
});
const test = new aws.batch.JobDefinition("test", {
name: "my_test_batch_job_definition",
type: "container",
platformCapabilities: ["FARGATE"],
containerProperties: pulumi.jsonStringify({
command: [
"echo",
"test",
],
image: "busybox",
jobRoleArn: "arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly",
fargatePlatformConfiguration: {
platformVersion: "LATEST",
},
resourceRequirements: [
{
type: "VCPU",
value: "0.25",
},
{
type: "MEMORY",
value: "512",
},
],
executionRoleArn: ecsTaskExecutionRole.arn,
}),
});
import pulumi
import json
import pulumi_aws as aws
assume_role_policy = aws.iam.get_policy_document(statements=[{
"actions": ["sts:AssumeRole"],
"principals": [{
"type": "Service",
"identifiers": ["ecs-tasks.amazonaws.com"],
}],
}])
ecs_task_execution_role = aws.iam.Role("ecs_task_execution_role",
name="my_test_batch_exec_role",
assume_role_policy=assume_role_policy.json)
ecs_task_execution_role_policy = aws.iam.RolePolicyAttachment("ecs_task_execution_role_policy",
role=ecs_task_execution_role.name,
policy_arn="arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy")
test = aws.batch.JobDefinition("test",
name="my_test_batch_job_definition",
type="container",
platform_capabilities=["FARGATE"],
container_properties=pulumi.Output.json_dumps({
"command": [
"echo",
"test",
],
"image": "busybox",
"jobRoleArn": "arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly",
"fargatePlatformConfiguration": {
"platformVersion": "LATEST",
},
"resourceRequirements": [
{
"type": "VCPU",
"value": "0.25",
},
{
"type": "MEMORY",
"value": "512",
},
],
"executionRoleArn": ecs_task_execution_role.arn,
}))
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/batch"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
assumeRolePolicy, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
Statements: []iam.GetPolicyDocumentStatement{
{
Actions: []string{
"sts:AssumeRole",
},
Principals: []iam.GetPolicyDocumentStatementPrincipal{
{
Type: "Service",
Identifiers: []string{
"ecs-tasks.amazonaws.com",
},
},
},
},
},
}, nil)
if err != nil {
return err
}
ecsTaskExecutionRole, err := iam.NewRole(ctx, "ecs_task_execution_role", &iam.RoleArgs{
Name: pulumi.String("my_test_batch_exec_role"),
AssumeRolePolicy: pulumi.String(assumeRolePolicy.Json),
})
if err != nil {
return err
}
_, err = iam.NewRolePolicyAttachment(ctx, "ecs_task_execution_role_policy", &iam.RolePolicyAttachmentArgs{
Role: ecsTaskExecutionRole.Name,
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"),
})
if err != nil {
return err
}
_, err = batch.NewJobDefinition(ctx, "test", &batch.JobDefinitionArgs{
Name: pulumi.String("my_test_batch_job_definition"),
Type: pulumi.String("container"),
PlatformCapabilities: pulumi.StringArray{
pulumi.String("FARGATE"),
},
ContainerProperties: ecsTaskExecutionRole.Arn.ApplyT(func(arn string) (pulumi.String, error) {
var _zero pulumi.String
tmpJSON0, err := json.Marshal(map[string]interface{}{
"command": []string{
"echo",
"test",
},
"image": "busybox",
"jobRoleArn": "arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly",
"fargatePlatformConfiguration": map[string]interface{}{
"platformVersion": "LATEST",
},
"resourceRequirements": []map[string]interface{}{
map[string]interface{}{
"type": "VCPU",
"value": "0.25",
},
map[string]interface{}{
"type": "MEMORY",
"value": "512",
},
},
"executionRoleArn": arn,
})
if err != nil {
return _zero, err
}
json0 := string(tmpJSON0)
return pulumi.String(json0), nil
}).(pulumi.StringOutput),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var assumeRolePolicy = Aws.Iam.GetPolicyDocument.Invoke(new()
{
Statements = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Actions = new[]
{
"sts:AssumeRole",
},
Principals = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
{
Type = "Service",
Identifiers = new[]
{
"ecs-tasks.amazonaws.com",
},
},
},
},
},
});
var ecsTaskExecutionRole = new Aws.Iam.Role("ecs_task_execution_role", new()
{
Name = "my_test_batch_exec_role",
AssumeRolePolicy = assumeRolePolicy.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
});
var ecsTaskExecutionRolePolicy = new Aws.Iam.RolePolicyAttachment("ecs_task_execution_role_policy", new()
{
Role = ecsTaskExecutionRole.Name,
PolicyArn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy",
});
var test = new Aws.Batch.JobDefinition("test", new()
{
Name = "my_test_batch_job_definition",
Type = "container",
PlatformCapabilities = new[]
{
"FARGATE",
},
ContainerProperties = Output.JsonSerialize(Output.Create(new Dictionary<string, object?>
{
["command"] = new[]
{
"echo",
"test",
},
["image"] = "busybox",
["jobRoleArn"] = "arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly",
["fargatePlatformConfiguration"] = new Dictionary<string, object?>
{
["platformVersion"] = "LATEST",
},
["resourceRequirements"] = new[]
{
new Dictionary<string, object?>
{
["type"] = "VCPU",
["value"] = "0.25",
},
new Dictionary<string, object?>
{
["type"] = "MEMORY",
["value"] = "512",
},
},
["executionRoleArn"] = ecsTaskExecutionRole.Arn,
})),
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.iam.RolePolicyAttachment;
import com.pulumi.aws.iam.RolePolicyAttachmentArgs;
import com.pulumi.aws.batch.JobDefinition;
import com.pulumi.aws.batch.JobDefinitionArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var assumeRolePolicy = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
.statements(GetPolicyDocumentStatementArgs.builder()
.actions("sts:AssumeRole")
.principals(GetPolicyDocumentStatementPrincipalArgs.builder()
.type("Service")
.identifiers("ecs-tasks.amazonaws.com")
.build())
.build())
.build());
var ecsTaskExecutionRole = new Role("ecsTaskExecutionRole", RoleArgs.builder()
.name("my_test_batch_exec_role")
.assumeRolePolicy(assumeRolePolicy.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
.build());
var ecsTaskExecutionRolePolicy = new RolePolicyAttachment("ecsTaskExecutionRolePolicy", RolePolicyAttachmentArgs.builder()
.role(ecsTaskExecutionRole.name())
.policyArn("arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy")
.build());
var test = new JobDefinition("test", JobDefinitionArgs.builder()
.name("my_test_batch_job_definition")
.type("container")
.platformCapabilities("FARGATE")
.containerProperties(ecsTaskExecutionRole.arn().applyValue(arn -> serializeJson(
jsonObject(
jsonProperty("command", jsonArray(
"echo",
"test"
)),
jsonProperty("image", "busybox"),
jsonProperty("jobRoleArn", "arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly"),
jsonProperty("fargatePlatformConfiguration", jsonObject(
jsonProperty("platformVersion", "LATEST")
)),
jsonProperty("resourceRequirements", jsonArray(
jsonObject(
jsonProperty("type", "VCPU"),
jsonProperty("value", "0.25")
),
jsonObject(
jsonProperty("type", "MEMORY"),
jsonProperty("value", "512")
)
)),
jsonProperty("executionRoleArn", arn)
))))
.build());
}
}
resources:
ecsTaskExecutionRole:
type: aws:iam:Role
name: ecs_task_execution_role
properties:
name: my_test_batch_exec_role
assumeRolePolicy: ${assumeRolePolicy.json}
ecsTaskExecutionRolePolicy:
type: aws:iam:RolePolicyAttachment
name: ecs_task_execution_role_policy
properties:
role: ${ecsTaskExecutionRole.name}
policyArn: arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy
test:
type: aws:batch:JobDefinition
properties:
name: my_test_batch_job_definition
type: container
platformCapabilities:
- FARGATE
containerProperties:
fn::toJSON:
command:
- echo
- test
image: busybox
jobRoleArn: arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly
fargatePlatformConfiguration:
platformVersion: LATEST
resourceRequirements:
- type: VCPU
value: '0.25'
- type: MEMORY
value: '512'
executionRoleArn: ${ecsTaskExecutionRole.arn}
variables:
assumeRolePolicy:
fn::invoke:
Function: aws:iam:getPolicyDocument
Arguments:
statements:
- actions:
- sts:AssumeRole
principals:
- type: Service
identifiers:
- ecs-tasks.amazonaws.com
Create JobDefinition Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new JobDefinition(name: string, args: JobDefinitionArgs, opts?: CustomResourceOptions);
@overload
def JobDefinition(resource_name: str,
args: JobDefinitionArgs,
opts: Optional[ResourceOptions] = None)
@overload
def JobDefinition(resource_name: str,
opts: Optional[ResourceOptions] = None,
type: Optional[str] = None,
platform_capabilities: Optional[Sequence[str]] = None,
eks_properties: Optional[JobDefinitionEksPropertiesArgs] = None,
name: Optional[str] = None,
node_properties: Optional[str] = None,
parameters: Optional[Mapping[str, str]] = None,
container_properties: Optional[str] = None,
propagate_tags: Optional[bool] = None,
retry_strategy: Optional[JobDefinitionRetryStrategyArgs] = None,
scheduling_priority: Optional[int] = None,
tags: Optional[Mapping[str, str]] = None,
timeout: Optional[JobDefinitionTimeoutArgs] = None,
deregister_on_new_revision: Optional[bool] = None)
func NewJobDefinition(ctx *Context, name string, args JobDefinitionArgs, opts ...ResourceOption) (*JobDefinition, error)
public JobDefinition(string name, JobDefinitionArgs args, CustomResourceOptions? opts = null)
public JobDefinition(String name, JobDefinitionArgs args)
public JobDefinition(String name, JobDefinitionArgs args, CustomResourceOptions options)
type: aws:batch:JobDefinition
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args JobDefinitionArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args JobDefinitionArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args JobDefinitionArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args JobDefinitionArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args JobDefinitionArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var jobDefinitionResource = new Aws.Batch.JobDefinition("jobDefinitionResource", new()
{
Type = "string",
PlatformCapabilities = new[]
{
"string",
},
EksProperties = new Aws.Batch.Inputs.JobDefinitionEksPropertiesArgs
{
PodProperties = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesArgs
{
Containers = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesContainersArgs
{
Image = "string",
Args = new[]
{
"string",
},
Commands = new[]
{
"string",
},
Envs = new[]
{
new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesContainersEnvArgs
{
Name = "string",
Value = "string",
},
},
ImagePullPolicy = "string",
Name = "string",
Resources = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesContainersResourcesArgs
{
Limits =
{
{ "string", "string" },
},
Requests =
{
{ "string", "string" },
},
},
SecurityContext = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesContainersSecurityContextArgs
{
Privileged = false,
ReadOnlyRootFileSystem = false,
RunAsGroup = 0,
RunAsNonRoot = false,
RunAsUser = 0,
},
VolumeMounts = new[]
{
new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesContainersVolumeMountArgs
{
MountPath = "string",
Name = "string",
ReadOnly = false,
},
},
},
DnsPolicy = "string",
HostNetwork = false,
Metadata = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesMetadataArgs
{
Labels =
{
{ "string", "string" },
},
},
ServiceAccountName = "string",
Volumes = new[]
{
new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesVolumeArgs
{
EmptyDir = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesVolumeEmptyDirArgs
{
SizeLimit = "string",
Medium = "string",
},
HostPath = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesVolumeHostPathArgs
{
Path = "string",
},
Name = "string",
Secret = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesVolumeSecretArgs
{
SecretName = "string",
Optional = false,
},
},
},
},
},
Name = "string",
NodeProperties = "string",
Parameters =
{
{ "string", "string" },
},
ContainerProperties = "string",
PropagateTags = false,
RetryStrategy = new Aws.Batch.Inputs.JobDefinitionRetryStrategyArgs
{
Attempts = 0,
EvaluateOnExits = new[]
{
new Aws.Batch.Inputs.JobDefinitionRetryStrategyEvaluateOnExitArgs
{
Action = "string",
OnExitCode = "string",
OnReason = "string",
OnStatusReason = "string",
},
},
},
SchedulingPriority = 0,
Tags =
{
{ "string", "string" },
},
Timeout = new Aws.Batch.Inputs.JobDefinitionTimeoutArgs
{
AttemptDurationSeconds = 0,
},
DeregisterOnNewRevision = false,
});
example, err := batch.NewJobDefinition(ctx, "jobDefinitionResource", &batch.JobDefinitionArgs{
Type: pulumi.String("string"),
PlatformCapabilities: pulumi.StringArray{
pulumi.String("string"),
},
EksProperties: &batch.JobDefinitionEksPropertiesArgs{
PodProperties: &batch.JobDefinitionEksPropertiesPodPropertiesArgs{
Containers: &batch.JobDefinitionEksPropertiesPodPropertiesContainersArgs{
Image: pulumi.String("string"),
Args: pulumi.StringArray{
pulumi.String("string"),
},
Commands: pulumi.StringArray{
pulumi.String("string"),
},
Envs: batch.JobDefinitionEksPropertiesPodPropertiesContainersEnvArray{
&batch.JobDefinitionEksPropertiesPodPropertiesContainersEnvArgs{
Name: pulumi.String("string"),
Value: pulumi.String("string"),
},
},
ImagePullPolicy: pulumi.String("string"),
Name: pulumi.String("string"),
Resources: &batch.JobDefinitionEksPropertiesPodPropertiesContainersResourcesArgs{
Limits: pulumi.StringMap{
"string": pulumi.String("string"),
},
Requests: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
SecurityContext: &batch.JobDefinitionEksPropertiesPodPropertiesContainersSecurityContextArgs{
Privileged: pulumi.Bool(false),
ReadOnlyRootFileSystem: pulumi.Bool(false),
RunAsGroup: pulumi.Int(0),
RunAsNonRoot: pulumi.Bool(false),
RunAsUser: pulumi.Int(0),
},
VolumeMounts: batch.JobDefinitionEksPropertiesPodPropertiesContainersVolumeMountArray{
&batch.JobDefinitionEksPropertiesPodPropertiesContainersVolumeMountArgs{
MountPath: pulumi.String("string"),
Name: pulumi.String("string"),
ReadOnly: pulumi.Bool(false),
},
},
},
DnsPolicy: pulumi.String("string"),
HostNetwork: pulumi.Bool(false),
Metadata: &batch.JobDefinitionEksPropertiesPodPropertiesMetadataArgs{
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
ServiceAccountName: pulumi.String("string"),
Volumes: batch.JobDefinitionEksPropertiesPodPropertiesVolumeArray{
&batch.JobDefinitionEksPropertiesPodPropertiesVolumeArgs{
EmptyDir: &batch.JobDefinitionEksPropertiesPodPropertiesVolumeEmptyDirArgs{
SizeLimit: pulumi.String("string"),
Medium: pulumi.String("string"),
},
HostPath: &batch.JobDefinitionEksPropertiesPodPropertiesVolumeHostPathArgs{
Path: pulumi.String("string"),
},
Name: pulumi.String("string"),
Secret: &batch.JobDefinitionEksPropertiesPodPropertiesVolumeSecretArgs{
SecretName: pulumi.String("string"),
Optional: pulumi.Bool(false),
},
},
},
},
},
Name: pulumi.String("string"),
NodeProperties: pulumi.String("string"),
Parameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
ContainerProperties: pulumi.String("string"),
PropagateTags: pulumi.Bool(false),
RetryStrategy: &batch.JobDefinitionRetryStrategyArgs{
Attempts: pulumi.Int(0),
EvaluateOnExits: batch.JobDefinitionRetryStrategyEvaluateOnExitArray{
&batch.JobDefinitionRetryStrategyEvaluateOnExitArgs{
Action: pulumi.String("string"),
OnExitCode: pulumi.String("string"),
OnReason: pulumi.String("string"),
OnStatusReason: pulumi.String("string"),
},
},
},
SchedulingPriority: pulumi.Int(0),
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
Timeout: &batch.JobDefinitionTimeoutArgs{
AttemptDurationSeconds: pulumi.Int(0),
},
DeregisterOnNewRevision: pulumi.Bool(false),
})
var jobDefinitionResource = new JobDefinition("jobDefinitionResource", JobDefinitionArgs.builder()
.type("string")
.platformCapabilities("string")
.eksProperties(JobDefinitionEksPropertiesArgs.builder()
.podProperties(JobDefinitionEksPropertiesPodPropertiesArgs.builder()
.containers(JobDefinitionEksPropertiesPodPropertiesContainersArgs.builder()
.image("string")
.args("string")
.commands("string")
.envs(JobDefinitionEksPropertiesPodPropertiesContainersEnvArgs.builder()
.name("string")
.value("string")
.build())
.imagePullPolicy("string")
.name("string")
.resources(JobDefinitionEksPropertiesPodPropertiesContainersResourcesArgs.builder()
.limits(Map.of("string", "string"))
.requests(Map.of("string", "string"))
.build())
.securityContext(JobDefinitionEksPropertiesPodPropertiesContainersSecurityContextArgs.builder()
.privileged(false)
.readOnlyRootFileSystem(false)
.runAsGroup(0)
.runAsNonRoot(false)
.runAsUser(0)
.build())
.volumeMounts(JobDefinitionEksPropertiesPodPropertiesContainersVolumeMountArgs.builder()
.mountPath("string")
.name("string")
.readOnly(false)
.build())
.build())
.dnsPolicy("string")
.hostNetwork(false)
.metadata(JobDefinitionEksPropertiesPodPropertiesMetadataArgs.builder()
.labels(Map.of("string", "string"))
.build())
.serviceAccountName("string")
.volumes(JobDefinitionEksPropertiesPodPropertiesVolumeArgs.builder()
.emptyDir(JobDefinitionEksPropertiesPodPropertiesVolumeEmptyDirArgs.builder()
.sizeLimit("string")
.medium("string")
.build())
.hostPath(JobDefinitionEksPropertiesPodPropertiesVolumeHostPathArgs.builder()
.path("string")
.build())
.name("string")
.secret(JobDefinitionEksPropertiesPodPropertiesVolumeSecretArgs.builder()
.secretName("string")
.optional(false)
.build())
.build())
.build())
.build())
.name("string")
.nodeProperties("string")
.parameters(Map.of("string", "string"))
.containerProperties("string")
.propagateTags(false)
.retryStrategy(JobDefinitionRetryStrategyArgs.builder()
.attempts(0)
.evaluateOnExits(JobDefinitionRetryStrategyEvaluateOnExitArgs.builder()
.action("string")
.onExitCode("string")
.onReason("string")
.onStatusReason("string")
.build())
.build())
.schedulingPriority(0)
.tags(Map.of("string", "string"))
.timeout(JobDefinitionTimeoutArgs.builder()
.attemptDurationSeconds(0)
.build())
.deregisterOnNewRevision(false)
.build());
job_definition_resource = aws.batch.JobDefinition("jobDefinitionResource",
type="string",
platform_capabilities=["string"],
eks_properties={
"podProperties": {
"containers": {
"image": "string",
"args": ["string"],
"commands": ["string"],
"envs": [{
"name": "string",
"value": "string",
}],
"imagePullPolicy": "string",
"name": "string",
"resources": {
"limits": {
"string": "string",
},
"requests": {
"string": "string",
},
},
"securityContext": {
"privileged": False,
"readOnlyRootFileSystem": False,
"runAsGroup": 0,
"runAsNonRoot": False,
"runAsUser": 0,
},
"volumeMounts": [{
"mountPath": "string",
"name": "string",
"readOnly": False,
}],
},
"dnsPolicy": "string",
"hostNetwork": False,
"metadata": {
"labels": {
"string": "string",
},
},
"serviceAccountName": "string",
"volumes": [{
"emptyDir": {
"sizeLimit": "string",
"medium": "string",
},
"hostPath": {
"path": "string",
},
"name": "string",
"secret": {
"secretName": "string",
"optional": False,
},
}],
},
},
name="string",
node_properties="string",
parameters={
"string": "string",
},
container_properties="string",
propagate_tags=False,
retry_strategy={
"attempts": 0,
"evaluateOnExits": [{
"action": "string",
"onExitCode": "string",
"onReason": "string",
"onStatusReason": "string",
}],
},
scheduling_priority=0,
tags={
"string": "string",
},
timeout={
"attemptDurationSeconds": 0,
},
deregister_on_new_revision=False)
const jobDefinitionResource = new aws.batch.JobDefinition("jobDefinitionResource", {
type: "string",
platformCapabilities: ["string"],
eksProperties: {
podProperties: {
containers: {
image: "string",
args: ["string"],
commands: ["string"],
envs: [{
name: "string",
value: "string",
}],
imagePullPolicy: "string",
name: "string",
resources: {
limits: {
string: "string",
},
requests: {
string: "string",
},
},
securityContext: {
privileged: false,
readOnlyRootFileSystem: false,
runAsGroup: 0,
runAsNonRoot: false,
runAsUser: 0,
},
volumeMounts: [{
mountPath: "string",
name: "string",
readOnly: false,
}],
},
dnsPolicy: "string",
hostNetwork: false,
metadata: {
labels: {
string: "string",
},
},
serviceAccountName: "string",
volumes: [{
emptyDir: {
sizeLimit: "string",
medium: "string",
},
hostPath: {
path: "string",
},
name: "string",
secret: {
secretName: "string",
optional: false,
},
}],
},
},
name: "string",
nodeProperties: "string",
parameters: {
string: "string",
},
containerProperties: "string",
propagateTags: false,
retryStrategy: {
attempts: 0,
evaluateOnExits: [{
action: "string",
onExitCode: "string",
onReason: "string",
onStatusReason: "string",
}],
},
schedulingPriority: 0,
tags: {
string: "string",
},
timeout: {
attemptDurationSeconds: 0,
},
deregisterOnNewRevision: false,
});
type: aws:batch:JobDefinition
properties:
containerProperties: string
deregisterOnNewRevision: false
eksProperties:
podProperties:
containers:
args:
- string
commands:
- string
envs:
- name: string
value: string
image: string
imagePullPolicy: string
name: string
resources:
limits:
string: string
requests:
string: string
securityContext:
privileged: false
readOnlyRootFileSystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
volumeMounts:
- mountPath: string
name: string
readOnly: false
dnsPolicy: string
hostNetwork: false
metadata:
labels:
string: string
serviceAccountName: string
volumes:
- emptyDir:
medium: string
sizeLimit: string
hostPath:
path: string
name: string
secret:
optional: false
secretName: string
name: string
nodeProperties: string
parameters:
string: string
platformCapabilities:
- string
propagateTags: false
retryStrategy:
attempts: 0
evaluateOnExits:
- action: string
onExitCode: string
onReason: string
onStatusReason: string
schedulingPriority: 0
tags:
string: string
timeout:
attemptDurationSeconds: 0
type: string
JobDefinition Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The JobDefinition resource accepts the following input properties:
- Type string
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- Container
Properties string - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - Deregister
On boolNew Revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - Eks
Properties JobDefinition Eks Properties - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - Name string
- Specifies the name of the job definition.
- Node
Properties string - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - Parameters Dictionary<string, string>
- Specifies the parameter substitution placeholders to set in the job definition.
- Platform
Capabilities List<string> - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - bool
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - Retry
Strategy JobDefinition Retry Strategy - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - Scheduling
Priority int - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - Dictionary<string, string>
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Timeout
Job
Definition Timeout - Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below.
- Type string
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- Container
Properties string - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - Deregister
On boolNew Revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - Eks
Properties JobDefinition Eks Properties Args - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - Name string
- Specifies the name of the job definition.
- Node
Properties string - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - Parameters map[string]string
- Specifies the parameter substitution placeholders to set in the job definition.
- Platform
Capabilities []string - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - bool
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - Retry
Strategy JobDefinition Retry Strategy Args - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - Scheduling
Priority int - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - map[string]string
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Timeout
Job
Definition Timeout Args - Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below.
- type String
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- container
Properties String - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - deregister
On BooleanNew Revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - eks
Properties JobDefinition Eks Properties - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - name String
- Specifies the name of the job definition.
- node
Properties String - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - parameters Map<String,String>
- Specifies the parameter substitution placeholders to set in the job definition.
- platform
Capabilities List<String> - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - Boolean
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - retry
Strategy JobDefinition Retry Strategy - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - scheduling
Priority Integer - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - Map<String,String>
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - timeout
Job
Definition Timeout - Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below.
- type string
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- container
Properties string - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - deregister
On booleanNew Revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - eks
Properties JobDefinition Eks Properties - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - name string
- Specifies the name of the job definition.
- node
Properties string - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - parameters {[key: string]: string}
- Specifies the parameter substitution placeholders to set in the job definition.
- platform
Capabilities string[] - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - boolean
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - retry
Strategy JobDefinition Retry Strategy - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - scheduling
Priority number - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - {[key: string]: string}
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - timeout
Job
Definition Timeout - Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below.
- type str
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- container_
properties str - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - deregister_
on_ boolnew_ revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - eks_
properties JobDefinition Eks Properties Args - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - name str
- Specifies the name of the job definition.
- node_
properties str - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - parameters Mapping[str, str]
- Specifies the parameter substitution placeholders to set in the job definition.
- platform_
capabilities Sequence[str] - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - bool
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - retry_
strategy JobDefinition Retry Strategy Args - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - scheduling_
priority int - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - Mapping[str, str]
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - timeout
Job
Definition Timeout Args - Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below.
- type String
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- container
Properties String - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - deregister
On BooleanNew Revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - eks
Properties Property Map - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - name String
- Specifies the name of the job definition.
- node
Properties String - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - parameters Map<String>
- Specifies the parameter substitution placeholders to set in the job definition.
- platform
Capabilities List<String> - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - Boolean
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - retry
Strategy Property Map - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - scheduling
Priority Number - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - Map<String>
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - timeout Property Map
- Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below.
Outputs
All input properties are implicitly available as output properties. Additionally, the JobDefinition resource produces the following output properties:
- Arn string
- The Amazon Resource Name of the job definition, includes revision (
:#
). - Arn
Prefix string - The ARN without the revision number.
- Id string
- The provider-assigned unique ID for this managed resource.
- Revision int
- The revision of the job definition.
- Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- Arn string
- The Amazon Resource Name of the job definition, includes revision (
:#
). - Arn
Prefix string - The ARN without the revision number.
- Id string
- The provider-assigned unique ID for this managed resource.
- Revision int
- The revision of the job definition.
- map[string]string
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn String
- The Amazon Resource Name of the job definition, includes revision (
:#
). - arn
Prefix String - The ARN without the revision number.
- id String
- The provider-assigned unique ID for this managed resource.
- revision Integer
- The revision of the job definition.
- Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn string
- The Amazon Resource Name of the job definition, includes revision (
:#
). - arn
Prefix string - The ARN without the revision number.
- id string
- The provider-assigned unique ID for this managed resource.
- revision number
- The revision of the job definition.
- {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn str
- The Amazon Resource Name of the job definition, includes revision (
:#
). - arn_
prefix str - The ARN without the revision number.
- id str
- The provider-assigned unique ID for this managed resource.
- revision int
- The revision of the job definition.
- Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn String
- The Amazon Resource Name of the job definition, includes revision (
:#
). - arn
Prefix String - The ARN without the revision number.
- id String
- The provider-assigned unique ID for this managed resource.
- revision Number
- The revision of the job definition.
- Map<String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
Look up Existing JobDefinition Resource
Get an existing JobDefinition resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: JobDefinitionState, opts?: CustomResourceOptions): JobDefinition
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
arn: Optional[str] = None,
arn_prefix: Optional[str] = None,
container_properties: Optional[str] = None,
deregister_on_new_revision: Optional[bool] = None,
eks_properties: Optional[JobDefinitionEksPropertiesArgs] = None,
name: Optional[str] = None,
node_properties: Optional[str] = None,
parameters: Optional[Mapping[str, str]] = None,
platform_capabilities: Optional[Sequence[str]] = None,
propagate_tags: Optional[bool] = None,
retry_strategy: Optional[JobDefinitionRetryStrategyArgs] = None,
revision: Optional[int] = None,
scheduling_priority: Optional[int] = None,
tags: Optional[Mapping[str, str]] = None,
tags_all: Optional[Mapping[str, str]] = None,
timeout: Optional[JobDefinitionTimeoutArgs] = None,
type: Optional[str] = None) -> JobDefinition
func GetJobDefinition(ctx *Context, name string, id IDInput, state *JobDefinitionState, opts ...ResourceOption) (*JobDefinition, error)
public static JobDefinition Get(string name, Input<string> id, JobDefinitionState? state, CustomResourceOptions? opts = null)
public static JobDefinition get(String name, Output<String> id, JobDefinitionState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arn string
- The Amazon Resource Name of the job definition, includes revision (
:#
). - Arn
Prefix string - The ARN without the revision number.
- Container
Properties string - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - Deregister
On boolNew Revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - Eks
Properties JobDefinition Eks Properties - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - Name string
- Specifies the name of the job definition.
- Node
Properties string - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - Parameters Dictionary<string, string>
- Specifies the parameter substitution placeholders to set in the job definition.
- Platform
Capabilities List<string> - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - bool
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - Retry
Strategy JobDefinition Retry Strategy - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - Revision int
- The revision of the job definition.
- Scheduling
Priority int - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - Dictionary<string, string>
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Timeout
Job
Definition Timeout - Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below. - Type string
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- Arn string
- The Amazon Resource Name of the job definition, includes revision (
:#
). - Arn
Prefix string - The ARN without the revision number.
- Container
Properties string - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - Deregister
On boolNew Revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - Eks
Properties JobDefinition Eks Properties Args - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - Name string
- Specifies the name of the job definition.
- Node
Properties string - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - Parameters map[string]string
- Specifies the parameter substitution placeholders to set in the job definition.
- Platform
Capabilities []string - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - bool
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - Retry
Strategy JobDefinition Retry Strategy Args - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - Revision int
- The revision of the job definition.
- Scheduling
Priority int - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - map[string]string
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - map[string]string
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Timeout
Job
Definition Timeout Args - Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below. - Type string
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- arn String
- The Amazon Resource Name of the job definition, includes revision (
:#
). - arn
Prefix String - The ARN without the revision number.
- container
Properties String - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - deregister
On BooleanNew Revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - eks
Properties JobDefinition Eks Properties - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - name String
- Specifies the name of the job definition.
- node
Properties String - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - parameters Map<String,String>
- Specifies the parameter substitution placeholders to set in the job definition.
- platform
Capabilities List<String> - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - Boolean
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - retry
Strategy JobDefinition Retry Strategy - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - revision Integer
- The revision of the job definition.
- scheduling
Priority Integer - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - Map<String,String>
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - timeout
Job
Definition Timeout - Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below. - type String
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- arn string
- The Amazon Resource Name of the job definition, includes revision (
:#
). - arn
Prefix string - The ARN without the revision number.
- container
Properties string - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - deregister
On booleanNew Revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - eks
Properties JobDefinition Eks Properties - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - name string
- Specifies the name of the job definition.
- node
Properties string - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - parameters {[key: string]: string}
- Specifies the parameter substitution placeholders to set in the job definition.
- platform
Capabilities string[] - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - boolean
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - retry
Strategy JobDefinition Retry Strategy - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - revision number
- The revision of the job definition.
- scheduling
Priority number - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - {[key: string]: string}
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - timeout
Job
Definition Timeout - Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below. - type string
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- arn str
- The Amazon Resource Name of the job definition, includes revision (
:#
). - arn_
prefix str - The ARN without the revision number.
- container_
properties str - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - deregister_
on_ boolnew_ revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - eks_
properties JobDefinition Eks Properties Args - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - name str
- Specifies the name of the job definition.
- node_
properties str - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - parameters Mapping[str, str]
- Specifies the parameter substitution placeholders to set in the job definition.
- platform_
capabilities Sequence[str] - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - bool
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - retry_
strategy JobDefinition Retry Strategy Args - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - revision int
- The revision of the job definition.
- scheduling_
priority int - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - Mapping[str, str]
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - timeout
Job
Definition Timeout Args - Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below. - type str
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
- arn String
- The Amazon Resource Name of the job definition, includes revision (
:#
). - arn
Prefix String - The ARN without the revision number.
- container
Properties String - A valid container properties provided as a single valid JSON document. This parameter is only valid if the
type
parameter iscontainer
. - deregister
On BooleanNew Revision - When updating a job definition a new revision is created. This parameter determines if the previous version is
deregistered
(INACTIVE
) or leftACTIVE
. Defaults totrue
. - eks
Properties Property Map - A valid eks properties. This parameter is only valid if the
type
parameter iscontainer
. - name String
- Specifies the name of the job definition.
- node
Properties String - A valid node properties provided as a single valid JSON document. This parameter is required if the
type
parameter ismultinode
. - parameters Map<String>
- Specifies the parameter substitution placeholders to set in the job definition.
- platform
Capabilities List<String> - The platform capabilities required by the job definition. If no value is specified, it defaults to
EC2
. To run the job on Fargate resources, specifyFARGATE
. - Boolean
- Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is
false
. - retry
Strategy Property Map - Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of
retry_strategy
is1
. Defined below. - revision Number
- The revision of the job definition.
- scheduling
Priority Number - The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values
0
through9999
. - Map<String>
- Key-value map of resource tags. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - timeout Property Map
- Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of
timeout
is1
. Defined below. - type String
The type of job definition. Must be
container
ormultinode
.The following arguments are optional:
Supporting Types
JobDefinitionEksProperties, JobDefinitionEksPropertiesArgs
- Pod
Properties JobDefinition Eks Properties Pod Properties - The properties for the Kubernetes pod resources of a job. See
pod_properties
below.
- Pod
Properties JobDefinition Eks Properties Pod Properties - The properties for the Kubernetes pod resources of a job. See
pod_properties
below.
- pod
Properties JobDefinition Eks Properties Pod Properties - The properties for the Kubernetes pod resources of a job. See
pod_properties
below.
- pod
Properties JobDefinition Eks Properties Pod Properties - The properties for the Kubernetes pod resources of a job. See
pod_properties
below.
- pod_
properties JobDefinition Eks Properties Pod Properties - The properties for the Kubernetes pod resources of a job. See
pod_properties
below.
- pod
Properties Property Map - The properties for the Kubernetes pod resources of a job. See
pod_properties
below.
JobDefinitionEksPropertiesPodProperties, JobDefinitionEksPropertiesPodPropertiesArgs
- Containers
Job
Definition Eks Properties Pod Properties Containers - The properties of the container that's used on the Amazon EKS pod. See containers below.
- Dns
Policy string - The DNS policy for the pod. The default value is
ClusterFirst
. If thehost_network
argument is not specified, the default isClusterFirstWithHostNet
.ClusterFirst
indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. - Host
Network bool - Indicates if the pod uses the hosts' network IP address. The default value is
true
. Setting this tofalse
enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. - Metadata
Job
Definition Eks Properties Pod Properties Metadata - Metadata about the Kubernetes pod.
- Service
Account stringName - The name of the service account that's used to run the pod.
- Volumes
List<Job
Definition Eks Properties Pod Properties Volume> - Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports emptyDir, hostPath, and secret volume types.
- Containers
Job
Definition Eks Properties Pod Properties Containers - The properties of the container that's used on the Amazon EKS pod. See containers below.
- Dns
Policy string - The DNS policy for the pod. The default value is
ClusterFirst
. If thehost_network
argument is not specified, the default isClusterFirstWithHostNet
.ClusterFirst
indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. - Host
Network bool - Indicates if the pod uses the hosts' network IP address. The default value is
true
. Setting this tofalse
enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. - Metadata
Job
Definition Eks Properties Pod Properties Metadata - Metadata about the Kubernetes pod.
- Service
Account stringName - The name of the service account that's used to run the pod.
- Volumes
[]Job
Definition Eks Properties Pod Properties Volume - Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports emptyDir, hostPath, and secret volume types.
- containers
Job
Definition Eks Properties Pod Properties Containers - The properties of the container that's used on the Amazon EKS pod. See containers below.
- dns
Policy String - The DNS policy for the pod. The default value is
ClusterFirst
. If thehost_network
argument is not specified, the default isClusterFirstWithHostNet
.ClusterFirst
indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. - host
Network Boolean - Indicates if the pod uses the hosts' network IP address. The default value is
true
. Setting this tofalse
enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. - metadata
Job
Definition Eks Properties Pod Properties Metadata - Metadata about the Kubernetes pod.
- service
Account StringName - The name of the service account that's used to run the pod.
- volumes
List<Job
Definition Eks Properties Pod Properties Volume> - Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports emptyDir, hostPath, and secret volume types.
- containers
Job
Definition Eks Properties Pod Properties Containers - The properties of the container that's used on the Amazon EKS pod. See containers below.
- dns
Policy string - The DNS policy for the pod. The default value is
ClusterFirst
. If thehost_network
argument is not specified, the default isClusterFirstWithHostNet
.ClusterFirst
indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. - host
Network boolean - Indicates if the pod uses the hosts' network IP address. The default value is
true
. Setting this tofalse
enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. - metadata
Job
Definition Eks Properties Pod Properties Metadata - Metadata about the Kubernetes pod.
- service
Account stringName - The name of the service account that's used to run the pod.
- volumes
Job
Definition Eks Properties Pod Properties Volume[] - Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports emptyDir, hostPath, and secret volume types.
- containers
Job
Definition Eks Properties Pod Properties Containers - The properties of the container that's used on the Amazon EKS pod. See containers below.
- dns_
policy str - The DNS policy for the pod. The default value is
ClusterFirst
. If thehost_network
argument is not specified, the default isClusterFirstWithHostNet
.ClusterFirst
indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. - host_
network bool - Indicates if the pod uses the hosts' network IP address. The default value is
true
. Setting this tofalse
enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. - metadata
Job
Definition Eks Properties Pod Properties Metadata - Metadata about the Kubernetes pod.
- service_
account_ strname - The name of the service account that's used to run the pod.
- volumes
Sequence[Job
Definition Eks Properties Pod Properties Volume] - Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports emptyDir, hostPath, and secret volume types.
- containers Property Map
- The properties of the container that's used on the Amazon EKS pod. See containers below.
- dns
Policy String - The DNS policy for the pod. The default value is
ClusterFirst
. If thehost_network
argument is not specified, the default isClusterFirstWithHostNet
.ClusterFirst
indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. - host
Network Boolean - Indicates if the pod uses the hosts' network IP address. The default value is
true
. Setting this tofalse
enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. - metadata Property Map
- Metadata about the Kubernetes pod.
- service
Account StringName - The name of the service account that's used to run the pod.
- volumes List<Property Map>
- Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports emptyDir, hostPath, and secret volume types.
JobDefinitionEksPropertiesPodPropertiesContainers, JobDefinitionEksPropertiesPodPropertiesContainersArgs
- Image string
- The Docker image used to start the container.
- Args List<string>
- An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment.
- Commands List<string>
- The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment.
- Envs
List<Job
Definition Eks Properties Pod Properties Containers Env> - The environment variables to pass to a container. See EKS Environment below.
- Image
Pull stringPolicy - The image pull policy for the container. Supported values are
Always
,IfNotPresent
, andNever
. - Name string
- The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name.
- Resources
Job
Definition Eks Properties Pod Properties Containers Resources - The type and amount of resources to assign to a container. The supported resources include
memory
,cpu
, andnvidia.com/gpu
. - Security
Context JobDefinition Eks Properties Pod Properties Containers Security Context - The security context for a job.
- Volume
Mounts List<JobDefinition Eks Properties Pod Properties Containers Volume Mount> - The volume mounts for the container.
- Image string
- The Docker image used to start the container.
- Args []string
- An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment.
- Commands []string
- The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment.
- Envs
[]Job
Definition Eks Properties Pod Properties Containers Env - The environment variables to pass to a container. See EKS Environment below.
- Image
Pull stringPolicy - The image pull policy for the container. Supported values are
Always
,IfNotPresent
, andNever
. - Name string
- The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name.
- Resources
Job
Definition Eks Properties Pod Properties Containers Resources - The type and amount of resources to assign to a container. The supported resources include
memory
,cpu
, andnvidia.com/gpu
. - Security
Context JobDefinition Eks Properties Pod Properties Containers Security Context - The security context for a job.
- Volume
Mounts []JobDefinition Eks Properties Pod Properties Containers Volume Mount - The volume mounts for the container.
- image String
- The Docker image used to start the container.
- args List<String>
- An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment.
- commands List<String>
- The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment.
- envs
List<Job
Definition Eks Properties Pod Properties Containers Env> - The environment variables to pass to a container. See EKS Environment below.
- image
Pull StringPolicy - The image pull policy for the container. Supported values are
Always
,IfNotPresent
, andNever
. - name String
- The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name.
- resources
Job
Definition Eks Properties Pod Properties Containers Resources - The type and amount of resources to assign to a container. The supported resources include
memory
,cpu
, andnvidia.com/gpu
. - security
Context JobDefinition Eks Properties Pod Properties Containers Security Context - The security context for a job.
- volume
Mounts List<JobDefinition Eks Properties Pod Properties Containers Volume Mount> - The volume mounts for the container.
- image string
- The Docker image used to start the container.
- args string[]
- An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment.
- commands string[]
- The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment.
- envs
Job
Definition Eks Properties Pod Properties Containers Env[] - The environment variables to pass to a container. See EKS Environment below.
- image
Pull stringPolicy - The image pull policy for the container. Supported values are
Always
,IfNotPresent
, andNever
. - name string
- The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name.
- resources
Job
Definition Eks Properties Pod Properties Containers Resources - The type and amount of resources to assign to a container. The supported resources include
memory
,cpu
, andnvidia.com/gpu
. - security
Context JobDefinition Eks Properties Pod Properties Containers Security Context - The security context for a job.
- volume
Mounts JobDefinition Eks Properties Pod Properties Containers Volume Mount[] - The volume mounts for the container.
- image str
- The Docker image used to start the container.
- args Sequence[str]
- An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment.
- commands Sequence[str]
- The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment.
- envs
Sequence[Job
Definition Eks Properties Pod Properties Containers Env] - The environment variables to pass to a container. See EKS Environment below.
- image_
pull_ strpolicy - The image pull policy for the container. Supported values are
Always
,IfNotPresent
, andNever
. - name str
- The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name.
- resources
Job
Definition Eks Properties Pod Properties Containers Resources - The type and amount of resources to assign to a container. The supported resources include
memory
,cpu
, andnvidia.com/gpu
. - security_
context JobDefinition Eks Properties Pod Properties Containers Security Context - The security context for a job.
- volume_
mounts Sequence[JobDefinition Eks Properties Pod Properties Containers Volume Mount] - The volume mounts for the container.
- image String
- The Docker image used to start the container.
- args List<String>
- An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment.
- commands List<String>
- The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment.
- envs List<Property Map>
- The environment variables to pass to a container. See EKS Environment below.
- image
Pull StringPolicy - The image pull policy for the container. Supported values are
Always
,IfNotPresent
, andNever
. - name String
- The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name.
- resources Property Map
- The type and amount of resources to assign to a container. The supported resources include
memory
,cpu
, andnvidia.com/gpu
. - security
Context Property Map - The security context for a job.
- volume
Mounts List<Property Map> - The volume mounts for the container.
JobDefinitionEksPropertiesPodPropertiesContainersEnv, JobDefinitionEksPropertiesPodPropertiesContainersEnvArgs
JobDefinitionEksPropertiesPodPropertiesContainersResources, JobDefinitionEksPropertiesPodPropertiesContainersResourcesArgs
JobDefinitionEksPropertiesPodPropertiesContainersSecurityContext, JobDefinitionEksPropertiesPodPropertiesContainersSecurityContextArgs
- Privileged bool
- Read
Only boolRoot File System - Run
As intGroup - Run
As boolNon Root - Run
As intUser
- Privileged bool
- Read
Only boolRoot File System - Run
As intGroup - Run
As boolNon Root - Run
As intUser
- privileged Boolean
- read
Only BooleanRoot File System - run
As IntegerGroup - run
As BooleanNon Root - run
As IntegerUser
- privileged boolean
- read
Only booleanRoot File System - run
As numberGroup - run
As booleanNon Root - run
As numberUser
- privileged bool
- read_
only_ boolroot_ file_ system - run_
as_ intgroup - run_
as_ boolnon_ root - run_
as_ intuser
- privileged Boolean
- read
Only BooleanRoot File System - run
As NumberGroup - run
As BooleanNon Root - run
As NumberUser
JobDefinitionEksPropertiesPodPropertiesContainersVolumeMount, JobDefinitionEksPropertiesPodPropertiesContainersVolumeMountArgs
- mount_
path str - name str
- Specifies the name of the job definition.
- read_
only bool
JobDefinitionEksPropertiesPodPropertiesMetadata, JobDefinitionEksPropertiesPodPropertiesMetadataArgs
- Labels Dictionary<string, string>
- Labels map[string]string
- labels Map<String,String>
- labels {[key: string]: string}
- labels Mapping[str, str]
- labels Map<String>
JobDefinitionEksPropertiesPodPropertiesVolume, JobDefinitionEksPropertiesPodPropertiesVolumeArgs
- Empty
Dir JobDefinition Eks Properties Pod Properties Volume Empty Dir - Host
Path JobDefinition Eks Properties Pod Properties Volume Host Path - Name string
- Specifies the name of the job definition.
- Secret
Job
Definition Eks Properties Pod Properties Volume Secret
- Empty
Dir JobDefinition Eks Properties Pod Properties Volume Empty Dir - Host
Path JobDefinition Eks Properties Pod Properties Volume Host Path - Name string
- Specifies the name of the job definition.
- Secret
Job
Definition Eks Properties Pod Properties Volume Secret
- empty
Dir JobDefinition Eks Properties Pod Properties Volume Empty Dir - host
Path JobDefinition Eks Properties Pod Properties Volume Host Path - name String
- Specifies the name of the job definition.
- secret
Job
Definition Eks Properties Pod Properties Volume Secret
- empty
Dir JobDefinition Eks Properties Pod Properties Volume Empty Dir - host
Path JobDefinition Eks Properties Pod Properties Volume Host Path - name string
- Specifies the name of the job definition.
- secret
Job
Definition Eks Properties Pod Properties Volume Secret
- empty_
dir JobDefinition Eks Properties Pod Properties Volume Empty Dir - host_
path JobDefinition Eks Properties Pod Properties Volume Host Path - name str
- Specifies the name of the job definition.
- secret
Job
Definition Eks Properties Pod Properties Volume Secret
- empty
Dir Property Map - host
Path Property Map - name String
- Specifies the name of the job definition.
- secret Property Map
JobDefinitionEksPropertiesPodPropertiesVolumeEmptyDir, JobDefinitionEksPropertiesPodPropertiesVolumeEmptyDirArgs
- size_
limit str - medium str
JobDefinitionEksPropertiesPodPropertiesVolumeHostPath, JobDefinitionEksPropertiesPodPropertiesVolumeHostPathArgs
- Path string
- Path string
- path String
- path string
- path str
- path String
JobDefinitionEksPropertiesPodPropertiesVolumeSecret, JobDefinitionEksPropertiesPodPropertiesVolumeSecretArgs
- Secret
Name string - Optional bool
- Secret
Name string - Optional bool
- secret
Name String - optional Boolean
- secret
Name string - optional boolean
- secret_
name str - optional bool
- secret
Name String - optional Boolean
JobDefinitionRetryStrategy, JobDefinitionRetryStrategyArgs
- Attempts int
- The number of times to move a job to the
RUNNABLE
status. You may specify between1
and10
attempts. - Evaluate
On List<JobExits Definition Retry Strategy Evaluate On Exit> - The evaluate on exit conditions under which the job should be retried or failed. If this parameter is specified, then the
attempts
parameter must also be specified. You may specify up to 5 configuration blocks.
- Attempts int
- The number of times to move a job to the
RUNNABLE
status. You may specify between1
and10
attempts. - Evaluate
On []JobExits Definition Retry Strategy Evaluate On Exit - The evaluate on exit conditions under which the job should be retried or failed. If this parameter is specified, then the
attempts
parameter must also be specified. You may specify up to 5 configuration blocks.
- attempts Integer
- The number of times to move a job to the
RUNNABLE
status. You may specify between1
and10
attempts. - evaluate
On List<JobExits Definition Retry Strategy Evaluate On Exit> - The evaluate on exit conditions under which the job should be retried or failed. If this parameter is specified, then the
attempts
parameter must also be specified. You may specify up to 5 configuration blocks.
- attempts number
- The number of times to move a job to the
RUNNABLE
status. You may specify between1
and10
attempts. - evaluate
On JobExits Definition Retry Strategy Evaluate On Exit[] - The evaluate on exit conditions under which the job should be retried or failed. If this parameter is specified, then the
attempts
parameter must also be specified. You may specify up to 5 configuration blocks.
- attempts int
- The number of times to move a job to the
RUNNABLE
status. You may specify between1
and10
attempts. - evaluate_
on_ Sequence[Jobexits Definition Retry Strategy Evaluate On Exit] - The evaluate on exit conditions under which the job should be retried or failed. If this parameter is specified, then the
attempts
parameter must also be specified. You may specify up to 5 configuration blocks.
- attempts Number
- The number of times to move a job to the
RUNNABLE
status. You may specify between1
and10
attempts. - evaluate
On List<Property Map>Exits - The evaluate on exit conditions under which the job should be retried or failed. If this parameter is specified, then the
attempts
parameter must also be specified. You may specify up to 5 configuration blocks.
JobDefinitionRetryStrategyEvaluateOnExit, JobDefinitionRetryStrategyEvaluateOnExitArgs
- Action string
- Specifies the action to take if all of the specified conditions are met. The values are not case sensitive. Valid values:
retry
,exit
. - On
Exit stringCode - A glob pattern to match against the decimal representation of the exit code returned for a job.
- On
Reason string - A glob pattern to match against the reason returned for a job.
- On
Status stringReason - A glob pattern to match against the status reason returned for a job.
- Action string
- Specifies the action to take if all of the specified conditions are met. The values are not case sensitive. Valid values:
retry
,exit
. - On
Exit stringCode - A glob pattern to match against the decimal representation of the exit code returned for a job.
- On
Reason string - A glob pattern to match against the reason returned for a job.
- On
Status stringReason - A glob pattern to match against the status reason returned for a job.
- action String
- Specifies the action to take if all of the specified conditions are met. The values are not case sensitive. Valid values:
retry
,exit
. - on
Exit StringCode - A glob pattern to match against the decimal representation of the exit code returned for a job.
- on
Reason String - A glob pattern to match against the reason returned for a job.
- on
Status StringReason - A glob pattern to match against the status reason returned for a job.
- action string
- Specifies the action to take if all of the specified conditions are met. The values are not case sensitive. Valid values:
retry
,exit
. - on
Exit stringCode - A glob pattern to match against the decimal representation of the exit code returned for a job.
- on
Reason string - A glob pattern to match against the reason returned for a job.
- on
Status stringReason - A glob pattern to match against the status reason returned for a job.
- action str
- Specifies the action to take if all of the specified conditions are met. The values are not case sensitive. Valid values:
retry
,exit
. - on_
exit_ strcode - A glob pattern to match against the decimal representation of the exit code returned for a job.
- on_
reason str - A glob pattern to match against the reason returned for a job.
- on_
status_ strreason - A glob pattern to match against the status reason returned for a job.
- action String
- Specifies the action to take if all of the specified conditions are met. The values are not case sensitive. Valid values:
retry
,exit
. - on
Exit StringCode - A glob pattern to match against the decimal representation of the exit code returned for a job.
- on
Reason String - A glob pattern to match against the reason returned for a job.
- on
Status StringReason - A glob pattern to match against the status reason returned for a job.
JobDefinitionTimeout, JobDefinitionTimeoutArgs
- Attempt
Duration intSeconds - The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is
60
seconds.
- Attempt
Duration intSeconds - The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is
60
seconds.
- attempt
Duration IntegerSeconds - The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is
60
seconds.
- attempt
Duration numberSeconds - The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is
60
seconds.
- attempt_
duration_ intseconds - The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is
60
seconds.
- attempt
Duration NumberSeconds - The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is
60
seconds.
Import
Using pulumi import
, import Batch Job Definition using the arn
. For example:
$ pulumi import aws:batch/jobDefinition:JobDefinition test arn:aws:batch:us-east-1:123456789012:job-definition/sample
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aws
Terraform Provider.
Try AWS Native preview for resources not in the classic version.