gcp.bigquery.Job
Explore with Pulumi AI
Jobs are actions that BigQuery runs on your behalf to load data, export data, query data, or copy data. Once a BigQuery job is created, it cannot be changed or deleted.
To get more information about Job, see:
- API documentation
- How-to Guides
Example Usage
Bigquery Job Query
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const bar = new gcp.bigquery.Dataset("bar", {
datasetId: "job_query_dataset",
friendlyName: "test",
description: "This is a test description",
location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
deletionProtection: false,
datasetId: bar.datasetId,
tableId: "job_query_table",
});
const job = new gcp.bigquery.Job("job", {
jobId: "job_query",
labels: {
"example-label": "example-value",
},
query: {
query: "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
destinationTable: {
projectId: foo.project,
datasetId: foo.datasetId,
tableId: foo.tableId,
},
allowLargeResults: true,
flattenResults: true,
scriptOptions: {
keyResultStatement: "LAST",
},
},
});
import pulumi
import pulumi_gcp as gcp
bar = gcp.bigquery.Dataset("bar",
dataset_id="job_query_dataset",
friendly_name="test",
description="This is a test description",
location="US")
foo = gcp.bigquery.Table("foo",
deletion_protection=False,
dataset_id=bar.dataset_id,
table_id="job_query_table")
job = gcp.bigquery.Job("job",
job_id="job_query",
labels={
"example-label": "example-value",
},
query=gcp.bigquery.JobQueryArgs(
query="SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
destination_table=gcp.bigquery.JobQueryDestinationTableArgs(
project_id=foo.project,
dataset_id=foo.dataset_id,
table_id=foo.table_id,
),
allow_large_results=True,
flatten_results=True,
script_options=gcp.bigquery.JobQueryScriptOptionsArgs(
key_result_statement="LAST",
),
))
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
DatasetId: pulumi.String("job_query_dataset"),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("US"),
})
if err != nil {
return err
}
foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
DeletionProtection: pulumi.Bool(false),
DatasetId: bar.DatasetId,
TableId: pulumi.String("job_query_table"),
})
if err != nil {
return err
}
_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
JobId: pulumi.String("job_query"),
Labels: pulumi.StringMap{
"example-label": pulumi.String("example-value"),
},
Query: &bigquery.JobQueryArgs{
Query: pulumi.String("SELECT state FROM [lookerdata:cdc.project_tycho_reports]"),
DestinationTable: &bigquery.JobQueryDestinationTableArgs{
ProjectId: foo.Project,
DatasetId: foo.DatasetId,
TableId: foo.TableId,
},
AllowLargeResults: pulumi.Bool(true),
FlattenResults: pulumi.Bool(true),
ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
KeyResultStatement: pulumi.String("LAST"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var bar = new Gcp.BigQuery.Dataset("bar", new()
{
DatasetId = "job_query_dataset",
FriendlyName = "test",
Description = "This is a test description",
Location = "US",
});
var foo = new Gcp.BigQuery.Table("foo", new()
{
DeletionProtection = false,
DatasetId = bar.DatasetId,
TableId = "job_query_table",
});
var job = new Gcp.BigQuery.Job("job", new()
{
JobId = "job_query",
Labels =
{
{ "example-label", "example-value" },
},
Query = new Gcp.BigQuery.Inputs.JobQueryArgs
{
Query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
{
ProjectId = foo.Project,
DatasetId = foo.DatasetId,
TableId = foo.TableId,
},
AllowLargeResults = true,
FlattenResults = true,
ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
{
KeyResultStatement = "LAST",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bar = new Dataset("bar", DatasetArgs.builder()
.datasetId("job_query_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var foo = new Table("foo", TableArgs.builder()
.deletionProtection(false)
.datasetId(bar.datasetId())
.tableId("job_query_table")
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_query")
.labels(Map.of("example-label", "example-value"))
.query(JobQueryArgs.builder()
.query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
.destinationTable(JobQueryDestinationTableArgs.builder()
.projectId(foo.project())
.datasetId(foo.datasetId())
.tableId(foo.tableId())
.build())
.allowLargeResults(true)
.flattenResults(true)
.scriptOptions(JobQueryScriptOptionsArgs.builder()
.keyResultStatement("LAST")
.build())
.build())
.build());
}
}
resources:
foo:
type: gcp:bigquery:Table
properties:
deletionProtection: false
datasetId: ${bar.datasetId}
tableId: job_query_table
bar:
type: gcp:bigquery:Dataset
properties:
datasetId: job_query_dataset
friendlyName: test
description: This is a test description
location: US
job:
type: gcp:bigquery:Job
properties:
jobId: job_query
labels:
example-label: example-value
query:
query: SELECT state FROM [lookerdata:cdc.project_tycho_reports]
destinationTable:
projectId: ${foo.project}
datasetId: ${foo.datasetId}
tableId: ${foo.tableId}
allowLargeResults: true
flattenResults: true
scriptOptions:
keyResultStatement: LAST
Bigquery Job Query Table Reference
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const bar = new gcp.bigquery.Dataset("bar", {
datasetId: "job_query_dataset",
friendlyName: "test",
description: "This is a test description",
location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
deletionProtection: false,
datasetId: bar.datasetId,
tableId: "job_query_table",
});
const job = new gcp.bigquery.Job("job", {
jobId: "job_query",
labels: {
"example-label": "example-value",
},
query: {
query: "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
destinationTable: {
tableId: foo.id,
},
defaultDataset: {
datasetId: bar.id,
},
allowLargeResults: true,
flattenResults: true,
scriptOptions: {
keyResultStatement: "LAST",
},
},
});
import pulumi
import pulumi_gcp as gcp
bar = gcp.bigquery.Dataset("bar",
dataset_id="job_query_dataset",
friendly_name="test",
description="This is a test description",
location="US")
foo = gcp.bigquery.Table("foo",
deletion_protection=False,
dataset_id=bar.dataset_id,
table_id="job_query_table")
job = gcp.bigquery.Job("job",
job_id="job_query",
labels={
"example-label": "example-value",
},
query=gcp.bigquery.JobQueryArgs(
query="SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
destination_table=gcp.bigquery.JobQueryDestinationTableArgs(
table_id=foo.id,
),
default_dataset=gcp.bigquery.JobQueryDefaultDatasetArgs(
dataset_id=bar.id,
),
allow_large_results=True,
flatten_results=True,
script_options=gcp.bigquery.JobQueryScriptOptionsArgs(
key_result_statement="LAST",
),
))
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
DatasetId: pulumi.String("job_query_dataset"),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("US"),
})
if err != nil {
return err
}
foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
DeletionProtection: pulumi.Bool(false),
DatasetId: bar.DatasetId,
TableId: pulumi.String("job_query_table"),
})
if err != nil {
return err
}
_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
JobId: pulumi.String("job_query"),
Labels: pulumi.StringMap{
"example-label": pulumi.String("example-value"),
},
Query: &bigquery.JobQueryArgs{
Query: pulumi.String("SELECT state FROM [lookerdata:cdc.project_tycho_reports]"),
DestinationTable: &bigquery.JobQueryDestinationTableArgs{
TableId: foo.ID(),
},
DefaultDataset: &bigquery.JobQueryDefaultDatasetArgs{
DatasetId: bar.ID(),
},
AllowLargeResults: pulumi.Bool(true),
FlattenResults: pulumi.Bool(true),
ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
KeyResultStatement: pulumi.String("LAST"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var bar = new Gcp.BigQuery.Dataset("bar", new()
{
DatasetId = "job_query_dataset",
FriendlyName = "test",
Description = "This is a test description",
Location = "US",
});
var foo = new Gcp.BigQuery.Table("foo", new()
{
DeletionProtection = false,
DatasetId = bar.DatasetId,
TableId = "job_query_table",
});
var job = new Gcp.BigQuery.Job("job", new()
{
JobId = "job_query",
Labels =
{
{ "example-label", "example-value" },
},
Query = new Gcp.BigQuery.Inputs.JobQueryArgs
{
Query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
{
TableId = foo.Id,
},
DefaultDataset = new Gcp.BigQuery.Inputs.JobQueryDefaultDatasetArgs
{
DatasetId = bar.Id,
},
AllowLargeResults = true,
FlattenResults = true,
ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
{
KeyResultStatement = "LAST",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDefaultDatasetArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bar = new Dataset("bar", DatasetArgs.builder()
.datasetId("job_query_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var foo = new Table("foo", TableArgs.builder()
.deletionProtection(false)
.datasetId(bar.datasetId())
.tableId("job_query_table")
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_query")
.labels(Map.of("example-label", "example-value"))
.query(JobQueryArgs.builder()
.query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
.destinationTable(JobQueryDestinationTableArgs.builder()
.tableId(foo.id())
.build())
.defaultDataset(JobQueryDefaultDatasetArgs.builder()
.datasetId(bar.id())
.build())
.allowLargeResults(true)
.flattenResults(true)
.scriptOptions(JobQueryScriptOptionsArgs.builder()
.keyResultStatement("LAST")
.build())
.build())
.build());
}
}
resources:
foo:
type: gcp:bigquery:Table
properties:
deletionProtection: false
datasetId: ${bar.datasetId}
tableId: job_query_table
bar:
type: gcp:bigquery:Dataset
properties:
datasetId: job_query_dataset
friendlyName: test
description: This is a test description
location: US
job:
type: gcp:bigquery:Job
properties:
jobId: job_query
labels:
example-label: example-value
query:
query: SELECT state FROM [lookerdata:cdc.project_tycho_reports]
destinationTable:
tableId: ${foo.id}
defaultDataset:
datasetId: ${bar.id}
allowLargeResults: true
flattenResults: true
scriptOptions:
keyResultStatement: LAST
Bigquery Job Load
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const bar = new gcp.bigquery.Dataset("bar", {
datasetId: "job_load_dataset",
friendlyName: "test",
description: "This is a test description",
location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
deletionProtection: false,
datasetId: bar.datasetId,
tableId: "job_load_table",
});
const job = new gcp.bigquery.Job("job", {
jobId: "job_load",
labels: {
my_job: "load",
},
load: {
sourceUris: ["gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"],
destinationTable: {
projectId: foo.project,
datasetId: foo.datasetId,
tableId: foo.tableId,
},
skipLeadingRows: 1,
schemaUpdateOptions: [
"ALLOW_FIELD_RELAXATION",
"ALLOW_FIELD_ADDITION",
],
writeDisposition: "WRITE_APPEND",
autodetect: true,
},
});
import pulumi
import pulumi_gcp as gcp
bar = gcp.bigquery.Dataset("bar",
dataset_id="job_load_dataset",
friendly_name="test",
description="This is a test description",
location="US")
foo = gcp.bigquery.Table("foo",
deletion_protection=False,
dataset_id=bar.dataset_id,
table_id="job_load_table")
job = gcp.bigquery.Job("job",
job_id="job_load",
labels={
"my_job": "load",
},
load=gcp.bigquery.JobLoadArgs(
source_uris=["gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"],
destination_table=gcp.bigquery.JobLoadDestinationTableArgs(
project_id=foo.project,
dataset_id=foo.dataset_id,
table_id=foo.table_id,
),
skip_leading_rows=1,
schema_update_options=[
"ALLOW_FIELD_RELAXATION",
"ALLOW_FIELD_ADDITION",
],
write_disposition="WRITE_APPEND",
autodetect=True,
))
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
DatasetId: pulumi.String("job_load_dataset"),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("US"),
})
if err != nil {
return err
}
foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
DeletionProtection: pulumi.Bool(false),
DatasetId: bar.DatasetId,
TableId: pulumi.String("job_load_table"),
})
if err != nil {
return err
}
_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
JobId: pulumi.String("job_load"),
Labels: pulumi.StringMap{
"my_job": pulumi.String("load"),
},
Load: &bigquery.JobLoadArgs{
SourceUris: pulumi.StringArray{
pulumi.String("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"),
},
DestinationTable: &bigquery.JobLoadDestinationTableArgs{
ProjectId: foo.Project,
DatasetId: foo.DatasetId,
TableId: foo.TableId,
},
SkipLeadingRows: pulumi.Int(1),
SchemaUpdateOptions: pulumi.StringArray{
pulumi.String("ALLOW_FIELD_RELAXATION"),
pulumi.String("ALLOW_FIELD_ADDITION"),
},
WriteDisposition: pulumi.String("WRITE_APPEND"),
Autodetect: pulumi.Bool(true),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var bar = new Gcp.BigQuery.Dataset("bar", new()
{
DatasetId = "job_load_dataset",
FriendlyName = "test",
Description = "This is a test description",
Location = "US",
});
var foo = new Gcp.BigQuery.Table("foo", new()
{
DeletionProtection = false,
DatasetId = bar.DatasetId,
TableId = "job_load_table",
});
var job = new Gcp.BigQuery.Job("job", new()
{
JobId = "job_load",
Labels =
{
{ "my_job", "load" },
},
Load = new Gcp.BigQuery.Inputs.JobLoadArgs
{
SourceUris = new[]
{
"gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv",
},
DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
{
ProjectId = foo.Project,
DatasetId = foo.DatasetId,
TableId = foo.TableId,
},
SkipLeadingRows = 1,
SchemaUpdateOptions = new[]
{
"ALLOW_FIELD_RELAXATION",
"ALLOW_FIELD_ADDITION",
},
WriteDisposition = "WRITE_APPEND",
Autodetect = true,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bar = new Dataset("bar", DatasetArgs.builder()
.datasetId("job_load_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var foo = new Table("foo", TableArgs.builder()
.deletionProtection(false)
.datasetId(bar.datasetId())
.tableId("job_load_table")
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_load")
.labels(Map.of("my_job", "load"))
.load(JobLoadArgs.builder()
.sourceUris("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv")
.destinationTable(JobLoadDestinationTableArgs.builder()
.projectId(foo.project())
.datasetId(foo.datasetId())
.tableId(foo.tableId())
.build())
.skipLeadingRows(1)
.schemaUpdateOptions(
"ALLOW_FIELD_RELAXATION",
"ALLOW_FIELD_ADDITION")
.writeDisposition("WRITE_APPEND")
.autodetect(true)
.build())
.build());
}
}
resources:
foo:
type: gcp:bigquery:Table
properties:
deletionProtection: false
datasetId: ${bar.datasetId}
tableId: job_load_table
bar:
type: gcp:bigquery:Dataset
properties:
datasetId: job_load_dataset
friendlyName: test
description: This is a test description
location: US
job:
type: gcp:bigquery:Job
properties:
jobId: job_load
labels:
my_job: load
load:
sourceUris:
- gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv
destinationTable:
projectId: ${foo.project}
datasetId: ${foo.datasetId}
tableId: ${foo.tableId}
skipLeadingRows: 1
schemaUpdateOptions:
- ALLOW_FIELD_RELAXATION
- ALLOW_FIELD_ADDITION
writeDisposition: WRITE_APPEND
autodetect: true
Bigquery Job Load Geojson
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = "my-project-name";
const bucket = new gcp.storage.Bucket("bucket", {
name: `${project}-bq-geojson`,
location: "US",
uniformBucketLevelAccess: true,
});
const object = new gcp.storage.BucketObject("object", {
name: "geojson-data.jsonl",
bucket: bucket.name,
content: `{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
`,
});
const bar = new gcp.bigquery.Dataset("bar", {
datasetId: "job_load_dataset",
friendlyName: "test",
description: "This is a test description",
location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
deletionProtection: false,
datasetId: bar.datasetId,
tableId: "job_load_table",
});
const job = new gcp.bigquery.Job("job", {
jobId: "job_load",
labels: {
my_job: "load",
},
load: {
sourceUris: [pulumi.interpolate`gs://${object.bucket}/${object.name}`],
destinationTable: {
projectId: foo.project,
datasetId: foo.datasetId,
tableId: foo.tableId,
},
writeDisposition: "WRITE_TRUNCATE",
autodetect: true,
sourceFormat: "NEWLINE_DELIMITED_JSON",
jsonExtension: "GEOJSON",
},
}, {
dependsOn: [object],
});
import pulumi
import pulumi_gcp as gcp
project = "my-project-name"
bucket = gcp.storage.Bucket("bucket",
name=f"{project}-bq-geojson",
location="US",
uniform_bucket_level_access=True)
object = gcp.storage.BucketObject("object",
name="geojson-data.jsonl",
bucket=bucket.name,
content="""{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
""")
bar = gcp.bigquery.Dataset("bar",
dataset_id="job_load_dataset",
friendly_name="test",
description="This is a test description",
location="US")
foo = gcp.bigquery.Table("foo",
deletion_protection=False,
dataset_id=bar.dataset_id,
table_id="job_load_table")
job = gcp.bigquery.Job("job",
job_id="job_load",
labels={
"my_job": "load",
},
load=gcp.bigquery.JobLoadArgs(
source_uris=[pulumi.Output.all(object.bucket, object.name).apply(lambda bucket, name: f"gs://{bucket}/{name}")],
destination_table=gcp.bigquery.JobLoadDestinationTableArgs(
project_id=foo.project,
dataset_id=foo.dataset_id,
table_id=foo.table_id,
),
write_disposition="WRITE_TRUNCATE",
autodetect=True,
source_format="NEWLINE_DELIMITED_JSON",
json_extension="GEOJSON",
),
opts = pulumi.ResourceOptions(depends_on=[object]))
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
project := "my-project-name"
bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
Name: pulumi.String(fmt.Sprintf("%v-bq-geojson", project)),
Location: pulumi.String("US"),
UniformBucketLevelAccess: pulumi.Bool(true),
})
if err != nil {
return err
}
object, err := storage.NewBucketObject(ctx, "object", &storage.BucketObjectArgs{
Name: pulumi.String("geojson-data.jsonl"),
Bucket: bucket.Name,
Content: pulumi.String("{\"type\":\"Feature\",\"properties\":{\"continent\":\"Europe\",\"region\":\"Scandinavia\"},\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}\n{\"type\":\"Feature\",\"properties\":{\"continent\":\"Africa\",\"region\":\"West Africa\"},\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}\n"),
})
if err != nil {
return err
}
bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
DatasetId: pulumi.String("job_load_dataset"),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("US"),
})
if err != nil {
return err
}
foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
DeletionProtection: pulumi.Bool(false),
DatasetId: bar.DatasetId,
TableId: pulumi.String("job_load_table"),
})
if err != nil {
return err
}
_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
JobId: pulumi.String("job_load"),
Labels: pulumi.StringMap{
"my_job": pulumi.String("load"),
},
Load: &bigquery.JobLoadArgs{
SourceUris: pulumi.StringArray{
pulumi.All(object.Bucket, object.Name).ApplyT(func(_args []interface{}) (string, error) {
bucket := _args[0].(string)
name := _args[1].(string)
return fmt.Sprintf("gs://%v/%v", bucket, name), nil
}).(pulumi.StringOutput),
},
DestinationTable: &bigquery.JobLoadDestinationTableArgs{
ProjectId: foo.Project,
DatasetId: foo.DatasetId,
TableId: foo.TableId,
},
WriteDisposition: pulumi.String("WRITE_TRUNCATE"),
Autodetect: pulumi.Bool(true),
SourceFormat: pulumi.String("NEWLINE_DELIMITED_JSON"),
JsonExtension: pulumi.String("GEOJSON"),
},
}, pulumi.DependsOn([]pulumi.Resource{
object,
}))
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var project = "my-project-name";
var bucket = new Gcp.Storage.Bucket("bucket", new()
{
Name = $"{project}-bq-geojson",
Location = "US",
UniformBucketLevelAccess = true,
});
var @object = new Gcp.Storage.BucketObject("object", new()
{
Name = "geojson-data.jsonl",
Bucket = bucket.Name,
Content = @"{""type"":""Feature"",""properties"":{""continent"":""Europe"",""region"":""Scandinavia""},""geometry"":{""type"":""Polygon"",""coordinates"":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{""type"":""Feature"",""properties"":{""continent"":""Africa"",""region"":""West Africa""},""geometry"":{""type"":""Polygon"",""coordinates"":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
",
});
var bar = new Gcp.BigQuery.Dataset("bar", new()
{
DatasetId = "job_load_dataset",
FriendlyName = "test",
Description = "This is a test description",
Location = "US",
});
var foo = new Gcp.BigQuery.Table("foo", new()
{
DeletionProtection = false,
DatasetId = bar.DatasetId,
TableId = "job_load_table",
});
var job = new Gcp.BigQuery.Job("job", new()
{
JobId = "job_load",
Labels =
{
{ "my_job", "load" },
},
Load = new Gcp.BigQuery.Inputs.JobLoadArgs
{
SourceUris = new[]
{
Output.Tuple(@object.Bucket, @object.Name).Apply(values =>
{
var bucket = values.Item1;
var name = values.Item2;
return $"gs://{bucket}/{name}";
}),
},
DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
{
ProjectId = foo.Project,
DatasetId = foo.DatasetId,
TableId = foo.TableId,
},
WriteDisposition = "WRITE_TRUNCATE",
Autodetect = true,
SourceFormat = "NEWLINE_DELIMITED_JSON",
JsonExtension = "GEOJSON",
},
}, new CustomResourceOptions
{
DependsOn =
{
@object,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var project = "my-project-name";
var bucket = new Bucket("bucket", BucketArgs.builder()
.name(String.format("%s-bq-geojson", project))
.location("US")
.uniformBucketLevelAccess(true)
.build());
var object = new BucketObject("object", BucketObjectArgs.builder()
.name("geojson-data.jsonl")
.bucket(bucket.name())
.content("""
{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
""")
.build());
var bar = new Dataset("bar", DatasetArgs.builder()
.datasetId("job_load_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var foo = new Table("foo", TableArgs.builder()
.deletionProtection(false)
.datasetId(bar.datasetId())
.tableId("job_load_table")
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_load")
.labels(Map.of("my_job", "load"))
.load(JobLoadArgs.builder()
.sourceUris(Output.tuple(object.bucket(), object.name()).applyValue(values -> {
var bucket = values.t1;
var name = values.t2;
return String.format("gs://%s/%s", bucket,name);
}))
.destinationTable(JobLoadDestinationTableArgs.builder()
.projectId(foo.project())
.datasetId(foo.datasetId())
.tableId(foo.tableId())
.build())
.writeDisposition("WRITE_TRUNCATE")
.autodetect(true)
.sourceFormat("NEWLINE_DELIMITED_JSON")
.jsonExtension("GEOJSON")
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(object)
.build());
}
}
resources:
bucket:
type: gcp:storage:Bucket
properties:
name: ${project}-bq-geojson
location: US
uniformBucketLevelAccess: true
object:
type: gcp:storage:BucketObject
properties:
name: geojson-data.jsonl
bucket: ${bucket.name}
content: |
{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
foo:
type: gcp:bigquery:Table
properties:
deletionProtection: false
datasetId: ${bar.datasetId}
tableId: job_load_table
bar:
type: gcp:bigquery:Dataset
properties:
datasetId: job_load_dataset
friendlyName: test
description: This is a test description
location: US
job:
type: gcp:bigquery:Job
properties:
jobId: job_load
labels:
my_job: load
load:
sourceUris:
- gs://${object.bucket}/${object.name}
destinationTable:
projectId: ${foo.project}
datasetId: ${foo.datasetId}
tableId: ${foo.tableId}
writeDisposition: WRITE_TRUNCATE
autodetect: true
sourceFormat: NEWLINE_DELIMITED_JSON
jsonExtension: GEOJSON
options:
dependson:
- ${object}
variables:
project: my-project-name
Bigquery Job Load Parquet
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const test = new gcp.storage.Bucket("test", {
name: "job_load_bucket",
location: "US",
uniformBucketLevelAccess: true,
});
const testBucketObject = new gcp.storage.BucketObject("test", {
name: "job_load_bucket_object",
source: new pulumi.asset.FileAsset("./test-fixtures/test.parquet.gzip"),
bucket: test.name,
});
const testDataset = new gcp.bigquery.Dataset("test", {
datasetId: "job_load_dataset",
friendlyName: "test",
description: "This is a test description",
location: "US",
});
const testTable = new gcp.bigquery.Table("test", {
deletionProtection: false,
tableId: "job_load_table",
datasetId: testDataset.datasetId,
});
const job = new gcp.bigquery.Job("job", {
jobId: "job_load",
labels: {
my_job: "load",
},
load: {
sourceUris: [pulumi.interpolate`gs://${testBucketObject.bucket}/${testBucketObject.name}`],
destinationTable: {
projectId: testTable.project,
datasetId: testTable.datasetId,
tableId: testTable.tableId,
},
schemaUpdateOptions: [
"ALLOW_FIELD_RELAXATION",
"ALLOW_FIELD_ADDITION",
],
writeDisposition: "WRITE_APPEND",
sourceFormat: "PARQUET",
autodetect: true,
parquetOptions: {
enumAsString: true,
enableListInference: true,
},
},
});
import pulumi
import pulumi_gcp as gcp
test = gcp.storage.Bucket("test",
name="job_load_bucket",
location="US",
uniform_bucket_level_access=True)
test_bucket_object = gcp.storage.BucketObject("test",
name="job_load_bucket_object",
source=pulumi.FileAsset("./test-fixtures/test.parquet.gzip"),
bucket=test.name)
test_dataset = gcp.bigquery.Dataset("test",
dataset_id="job_load_dataset",
friendly_name="test",
description="This is a test description",
location="US")
test_table = gcp.bigquery.Table("test",
deletion_protection=False,
table_id="job_load_table",
dataset_id=test_dataset.dataset_id)
job = gcp.bigquery.Job("job",
job_id="job_load",
labels={
"my_job": "load",
},
load=gcp.bigquery.JobLoadArgs(
source_uris=[pulumi.Output.all(test_bucket_object.bucket, test_bucket_object.name).apply(lambda bucket, name: f"gs://{bucket}/{name}")],
destination_table=gcp.bigquery.JobLoadDestinationTableArgs(
project_id=test_table.project,
dataset_id=test_table.dataset_id,
table_id=test_table.table_id,
),
schema_update_options=[
"ALLOW_FIELD_RELAXATION",
"ALLOW_FIELD_ADDITION",
],
write_disposition="WRITE_APPEND",
source_format="PARQUET",
autodetect=True,
parquet_options=gcp.bigquery.JobLoadParquetOptionsArgs(
enum_as_string=True,
enable_list_inference=True,
),
))
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
test, err := storage.NewBucket(ctx, "test", &storage.BucketArgs{
Name: pulumi.String("job_load_bucket"),
Location: pulumi.String("US"),
UniformBucketLevelAccess: pulumi.Bool(true),
})
if err != nil {
return err
}
testBucketObject, err := storage.NewBucketObject(ctx, "test", &storage.BucketObjectArgs{
Name: pulumi.String("job_load_bucket_object"),
Source: pulumi.NewFileAsset("./test-fixtures/test.parquet.gzip"),
Bucket: test.Name,
})
if err != nil {
return err
}
testDataset, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
DatasetId: pulumi.String("job_load_dataset"),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("US"),
})
if err != nil {
return err
}
testTable, err := bigquery.NewTable(ctx, "test", &bigquery.TableArgs{
DeletionProtection: pulumi.Bool(false),
TableId: pulumi.String("job_load_table"),
DatasetId: testDataset.DatasetId,
})
if err != nil {
return err
}
_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
JobId: pulumi.String("job_load"),
Labels: pulumi.StringMap{
"my_job": pulumi.String("load"),
},
Load: &bigquery.JobLoadArgs{
SourceUris: pulumi.StringArray{
pulumi.All(testBucketObject.Bucket, testBucketObject.Name).ApplyT(func(_args []interface{}) (string, error) {
bucket := _args[0].(string)
name := _args[1].(string)
return fmt.Sprintf("gs://%v/%v", bucket, name), nil
}).(pulumi.StringOutput),
},
DestinationTable: &bigquery.JobLoadDestinationTableArgs{
ProjectId: testTable.Project,
DatasetId: testTable.DatasetId,
TableId: testTable.TableId,
},
SchemaUpdateOptions: pulumi.StringArray{
pulumi.String("ALLOW_FIELD_RELAXATION"),
pulumi.String("ALLOW_FIELD_ADDITION"),
},
WriteDisposition: pulumi.String("WRITE_APPEND"),
SourceFormat: pulumi.String("PARQUET"),
Autodetect: pulumi.Bool(true),
ParquetOptions: &bigquery.JobLoadParquetOptionsArgs{
EnumAsString: pulumi.Bool(true),
EnableListInference: pulumi.Bool(true),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var test = new Gcp.Storage.Bucket("test", new()
{
Name = "job_load_bucket",
Location = "US",
UniformBucketLevelAccess = true,
});
var testBucketObject = new Gcp.Storage.BucketObject("test", new()
{
Name = "job_load_bucket_object",
Source = new FileAsset("./test-fixtures/test.parquet.gzip"),
Bucket = test.Name,
});
var testDataset = new Gcp.BigQuery.Dataset("test", new()
{
DatasetId = "job_load_dataset",
FriendlyName = "test",
Description = "This is a test description",
Location = "US",
});
var testTable = new Gcp.BigQuery.Table("test", new()
{
DeletionProtection = false,
TableId = "job_load_table",
DatasetId = testDataset.DatasetId,
});
var job = new Gcp.BigQuery.Job("job", new()
{
JobId = "job_load",
Labels =
{
{ "my_job", "load" },
},
Load = new Gcp.BigQuery.Inputs.JobLoadArgs
{
SourceUris = new[]
{
Output.Tuple(testBucketObject.Bucket, testBucketObject.Name).Apply(values =>
{
var bucket = values.Item1;
var name = values.Item2;
return $"gs://{bucket}/{name}";
}),
},
DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
{
ProjectId = testTable.Project,
DatasetId = testTable.DatasetId,
TableId = testTable.TableId,
},
SchemaUpdateOptions = new[]
{
"ALLOW_FIELD_RELAXATION",
"ALLOW_FIELD_ADDITION",
},
WriteDisposition = "WRITE_APPEND",
SourceFormat = "PARQUET",
Autodetect = true,
ParquetOptions = new Gcp.BigQuery.Inputs.JobLoadParquetOptionsArgs
{
EnumAsString = true,
EnableListInference = true,
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadParquetOptionsArgs;
import com.pulumi.asset.FileAsset;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var test = new Bucket("test", BucketArgs.builder()
.name("job_load_bucket")
.location("US")
.uniformBucketLevelAccess(true)
.build());
var testBucketObject = new BucketObject("testBucketObject", BucketObjectArgs.builder()
.name("job_load_bucket_object")
.source(new FileAsset("./test-fixtures/test.parquet.gzip"))
.bucket(test.name())
.build());
var testDataset = new Dataset("testDataset", DatasetArgs.builder()
.datasetId("job_load_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var testTable = new Table("testTable", TableArgs.builder()
.deletionProtection(false)
.tableId("job_load_table")
.datasetId(testDataset.datasetId())
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_load")
.labels(Map.of("my_job", "load"))
.load(JobLoadArgs.builder()
.sourceUris(Output.tuple(testBucketObject.bucket(), testBucketObject.name()).applyValue(values -> {
var bucket = values.t1;
var name = values.t2;
return String.format("gs://%s/%s", bucket,name);
}))
.destinationTable(JobLoadDestinationTableArgs.builder()
.projectId(testTable.project())
.datasetId(testTable.datasetId())
.tableId(testTable.tableId())
.build())
.schemaUpdateOptions(
"ALLOW_FIELD_RELAXATION",
"ALLOW_FIELD_ADDITION")
.writeDisposition("WRITE_APPEND")
.sourceFormat("PARQUET")
.autodetect(true)
.parquetOptions(JobLoadParquetOptionsArgs.builder()
.enumAsString(true)
.enableListInference(true)
.build())
.build())
.build());
}
}
resources:
test:
type: gcp:storage:Bucket
properties:
name: job_load_bucket
location: US
uniformBucketLevelAccess: true
testBucketObject:
type: gcp:storage:BucketObject
name: test
properties:
name: job_load_bucket_object
source:
fn::FileAsset: ./test-fixtures/test.parquet.gzip
bucket: ${test.name}
testDataset:
type: gcp:bigquery:Dataset
name: test
properties:
datasetId: job_load_dataset
friendlyName: test
description: This is a test description
location: US
testTable:
type: gcp:bigquery:Table
name: test
properties:
deletionProtection: false
tableId: job_load_table
datasetId: ${testDataset.datasetId}
job:
type: gcp:bigquery:Job
properties:
jobId: job_load
labels:
my_job: load
load:
sourceUris:
- gs://${testBucketObject.bucket}/${testBucketObject.name}
destinationTable:
projectId: ${testTable.project}
datasetId: ${testTable.datasetId}
tableId: ${testTable.tableId}
schemaUpdateOptions:
- ALLOW_FIELD_RELAXATION
- ALLOW_FIELD_ADDITION
writeDisposition: WRITE_APPEND
sourceFormat: PARQUET
autodetect: true
parquetOptions:
enumAsString: true
enableListInference: true
Bigquery Job Copy
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const count = 2;
const sourceDataset: gcp.bigquery.Dataset[] = [];
for (const range = {value: 0}; range.value < count; range.value++) {
sourceDataset.push(new gcp.bigquery.Dataset(`source-${range.value}`, {
datasetId: `job_copy_${range.value}_dataset`,
friendlyName: "test",
description: "This is a test description",
location: "US",
}));
}
const source: gcp.bigquery.Table[] = [];
for (const range = {value: 0}; range.value < count; range.value++) {
source.push(new gcp.bigquery.Table(`source-${range.value}`, {
deletionProtection: false,
datasetId: sourceDataset[range.value].datasetId,
tableId: `job_copy_${range.value}_table`,
schema: `[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
`,
}));
}
const destDataset = new gcp.bigquery.Dataset("dest", {
datasetId: "job_copy_dest_dataset",
friendlyName: "test",
description: "This is a test description",
location: "US",
});
const keyRing = new gcp.kms.KeyRing("key_ring", {
name: "example-keyring",
location: "global",
});
const cryptoKey = new gcp.kms.CryptoKey("crypto_key", {
name: "example-key",
keyRing: keyRing.id,
});
const project = gcp.organizations.getProject({
projectId: "my-project-name",
});
const encryptRole = new gcp.projects.IAMMember("encrypt_role", {
project: project.then(project => project.projectId),
role: "roles/cloudkms.cryptoKeyEncrypterDecrypter",
member: project.then(project => `serviceAccount:bq-${project.number}@bigquery-encryption.iam.gserviceaccount.com`),
});
const dest = new gcp.bigquery.Table("dest", {
deletionProtection: false,
datasetId: destDataset.datasetId,
tableId: "job_copy_dest_table",
schema: `[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
`,
encryptionConfiguration: {
kmsKeyName: cryptoKey.id,
},
}, {
dependsOn: [encryptRole],
});
const job = new gcp.bigquery.Job("job", {
jobId: "job_copy",
copy: {
sourceTables: [
{
projectId: source[0].project,
datasetId: source[0].datasetId,
tableId: source[0].tableId,
},
{
projectId: source[1].project,
datasetId: source[1].datasetId,
tableId: source[1].tableId,
},
],
destinationTable: {
projectId: dest.project,
datasetId: dest.datasetId,
tableId: dest.tableId,
},
destinationEncryptionConfiguration: {
kmsKeyName: cryptoKey.id,
},
},
}, {
dependsOn: [encryptRole],
});
import pulumi
import pulumi_gcp as gcp
count = 2
source_dataset = []
for range in [{"value": i} for i in range(0, count)]:
source_dataset.append(gcp.bigquery.Dataset(f"source-{range['value']}",
dataset_id=f"job_copy_{range['value']}_dataset",
friendly_name="test",
description="This is a test description",
location="US"))
source = []
for range in [{"value": i} for i in range(0, count)]:
source.append(gcp.bigquery.Table(f"source-{range['value']}",
deletion_protection=False,
dataset_id=source_dataset[range["value"]].dataset_id,
table_id=f"job_copy_{range['value']}_table",
schema="""[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
"""))
dest_dataset = gcp.bigquery.Dataset("dest",
dataset_id="job_copy_dest_dataset",
friendly_name="test",
description="This is a test description",
location="US")
key_ring = gcp.kms.KeyRing("key_ring",
name="example-keyring",
location="global")
crypto_key = gcp.kms.CryptoKey("crypto_key",
name="example-key",
key_ring=key_ring.id)
project = gcp.organizations.get_project(project_id="my-project-name")
encrypt_role = gcp.projects.IAMMember("encrypt_role",
project=project.project_id,
role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
member=f"serviceAccount:bq-{project.number}@bigquery-encryption.iam.gserviceaccount.com")
dest = gcp.bigquery.Table("dest",
deletion_protection=False,
dataset_id=dest_dataset.dataset_id,
table_id="job_copy_dest_table",
schema="""[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
""",
encryption_configuration=gcp.bigquery.TableEncryptionConfigurationArgs(
kms_key_name=crypto_key.id,
),
opts = pulumi.ResourceOptions(depends_on=[encrypt_role]))
job = gcp.bigquery.Job("job",
job_id="job_copy",
copy=gcp.bigquery.JobCopyArgs(
source_tables=[
gcp.bigquery.JobCopySourceTableArgs(
project_id=source[0].project,
dataset_id=source[0].dataset_id,
table_id=source[0].table_id,
),
gcp.bigquery.JobCopySourceTableArgs(
project_id=source[1].project,
dataset_id=source[1].dataset_id,
table_id=source[1].table_id,
),
],
destination_table=gcp.bigquery.JobCopyDestinationTableArgs(
project_id=dest.project,
dataset_id=dest.dataset_id,
table_id=dest.table_id,
),
destination_encryption_configuration=gcp.bigquery.JobCopyDestinationEncryptionConfigurationArgs(
kms_key_name=crypto_key.id,
),
),
opts = pulumi.ResourceOptions(depends_on=[encrypt_role]))
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/kms"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/organizations"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/projects"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
count := 2
var sourceDataset []*bigquery.Dataset
for index := 0; index < count; index++ {
key0 := index
val0 := index
__res, err := bigquery.NewDataset(ctx, fmt.Sprintf("source-%v", key0), &bigquery.DatasetArgs{
DatasetId: pulumi.String(fmt.Sprintf("job_copy_%v_dataset", val0)),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("US"),
})
if err != nil {
return err
}
sourceDataset = append(sourceDataset, __res)
}
var source []*bigquery.Table
for index := 0; index < count; index++ {
key0 := index
val0 := index
__res, err := bigquery.NewTable(ctx, fmt.Sprintf("source-%v", key0), &bigquery.TableArgs{
DeletionProtection: pulumi.Bool(false),
DatasetId: sourceDataset[val0].DatasetId,
TableId: pulumi.String(fmt.Sprintf("job_copy_%v_table", val0)),
Schema: pulumi.String(`[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
`),
})
if err != nil {
return err
}
source = append(source, __res)
}
destDataset, err := bigquery.NewDataset(ctx, "dest", &bigquery.DatasetArgs{
DatasetId: pulumi.String("job_copy_dest_dataset"),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("US"),
})
if err != nil {
return err
}
keyRing, err := kms.NewKeyRing(ctx, "key_ring", &kms.KeyRingArgs{
Name: pulumi.String("example-keyring"),
Location: pulumi.String("global"),
})
if err != nil {
return err
}
cryptoKey, err := kms.NewCryptoKey(ctx, "crypto_key", &kms.CryptoKeyArgs{
Name: pulumi.String("example-key"),
KeyRing: keyRing.ID(),
})
if err != nil {
return err
}
project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{
ProjectId: pulumi.StringRef("my-project-name"),
}, nil)
if err != nil {
return err
}
encryptRole, err := projects.NewIAMMember(ctx, "encrypt_role", &projects.IAMMemberArgs{
Project: pulumi.String(project.ProjectId),
Role: pulumi.String("roles/cloudkms.cryptoKeyEncrypterDecrypter"),
Member: pulumi.String(fmt.Sprintf("serviceAccount:bq-%v@bigquery-encryption.iam.gserviceaccount.com", project.Number)),
})
if err != nil {
return err
}
dest, err := bigquery.NewTable(ctx, "dest", &bigquery.TableArgs{
DeletionProtection: pulumi.Bool(false),
DatasetId: destDataset.DatasetId,
TableId: pulumi.String("job_copy_dest_table"),
Schema: pulumi.String(`[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
`),
EncryptionConfiguration: &bigquery.TableEncryptionConfigurationArgs{
KmsKeyName: cryptoKey.ID(),
},
}, pulumi.DependsOn([]pulumi.Resource{
encryptRole,
}))
if err != nil {
return err
}
_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
JobId: pulumi.String("job_copy"),
Copy: &bigquery.JobCopyArgs{
SourceTables: bigquery.JobCopySourceTableArray{
&bigquery.JobCopySourceTableArgs{
ProjectId: source[0].Project,
DatasetId: source[0].DatasetId,
TableId: source[0].TableId,
},
&bigquery.JobCopySourceTableArgs{
ProjectId: source[1].Project,
DatasetId: source[1].DatasetId,
TableId: source[1].TableId,
},
},
DestinationTable: &bigquery.JobCopyDestinationTableArgs{
ProjectId: dest.Project,
DatasetId: dest.DatasetId,
TableId: dest.TableId,
},
DestinationEncryptionConfiguration: &bigquery.JobCopyDestinationEncryptionConfigurationArgs{
KmsKeyName: cryptoKey.ID(),
},
},
}, pulumi.DependsOn([]pulumi.Resource{
encryptRole,
}))
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var count = 2;
var sourceDataset = new List<Gcp.BigQuery.Dataset>();
for (var rangeIndex = 0; rangeIndex < count; rangeIndex++)
{
var range = new { Value = rangeIndex };
sourceDataset.Add(new Gcp.BigQuery.Dataset($"source-{range.Value}", new()
{
DatasetId = $"job_copy_{range.Value}_dataset",
FriendlyName = "test",
Description = "This is a test description",
Location = "US",
}));
}
var source = new List<Gcp.BigQuery.Table>();
for (var rangeIndex = 0; rangeIndex < count; rangeIndex++)
{
var range = new { Value = rangeIndex };
source.Add(new Gcp.BigQuery.Table($"source-{range.Value}", new()
{
DeletionProtection = false,
DatasetId = sourceDataset[range.Value].DatasetId,
TableId = $"job_copy_{range.Value}_table",
Schema = @"[
{
""name"": ""name"",
""type"": ""STRING"",
""mode"": ""NULLABLE""
},
{
""name"": ""post_abbr"",
""type"": ""STRING"",
""mode"": ""NULLABLE""
},
{
""name"": ""date"",
""type"": ""DATE"",
""mode"": ""NULLABLE""
}
]
",
}));
}
var destDataset = new Gcp.BigQuery.Dataset("dest", new()
{
DatasetId = "job_copy_dest_dataset",
FriendlyName = "test",
Description = "This is a test description",
Location = "US",
});
var keyRing = new Gcp.Kms.KeyRing("key_ring", new()
{
Name = "example-keyring",
Location = "global",
});
var cryptoKey = new Gcp.Kms.CryptoKey("crypto_key", new()
{
Name = "example-key",
KeyRing = keyRing.Id,
});
var project = Gcp.Organizations.GetProject.Invoke(new()
{
ProjectId = "my-project-name",
});
var encryptRole = new Gcp.Projects.IAMMember("encrypt_role", new()
{
Project = project.Apply(getProjectResult => getProjectResult.ProjectId),
Role = "roles/cloudkms.cryptoKeyEncrypterDecrypter",
Member = $"serviceAccount:bq-{project.Apply(getProjectResult => getProjectResult.Number)}@bigquery-encryption.iam.gserviceaccount.com",
});
var dest = new Gcp.BigQuery.Table("dest", new()
{
DeletionProtection = false,
DatasetId = destDataset.DatasetId,
TableId = "job_copy_dest_table",
Schema = @"[
{
""name"": ""name"",
""type"": ""STRING"",
""mode"": ""NULLABLE""
},
{
""name"": ""post_abbr"",
""type"": ""STRING"",
""mode"": ""NULLABLE""
},
{
""name"": ""date"",
""type"": ""DATE"",
""mode"": ""NULLABLE""
}
]
",
EncryptionConfiguration = new Gcp.BigQuery.Inputs.TableEncryptionConfigurationArgs
{
KmsKeyName = cryptoKey.Id,
},
}, new CustomResourceOptions
{
DependsOn =
{
encryptRole,
},
});
var job = new Gcp.BigQuery.Job("job", new()
{
JobId = "job_copy",
Copy = new Gcp.BigQuery.Inputs.JobCopyArgs
{
SourceTables = new[]
{
new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
{
ProjectId = source[0].Project,
DatasetId = source[0].DatasetId,
TableId = source[0].TableId,
},
new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
{
ProjectId = source[1].Project,
DatasetId = source[1].DatasetId,
TableId = source[1].TableId,
},
},
DestinationTable = new Gcp.BigQuery.Inputs.JobCopyDestinationTableArgs
{
ProjectId = dest.Project,
DatasetId = dest.DatasetId,
TableId = dest.TableId,
},
DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobCopyDestinationEncryptionConfigurationArgs
{
KmsKeyName = cryptoKey.Id,
},
},
}, new CustomResourceOptions
{
DependsOn =
{
encryptRole,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.kms.KeyRing;
import com.pulumi.gcp.kms.KeyRingArgs;
import com.pulumi.gcp.kms.CryptoKey;
import com.pulumi.gcp.kms.CryptoKeyArgs;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.projects.IAMMember;
import com.pulumi.gcp.projects.IAMMemberArgs;
import com.pulumi.gcp.bigquery.inputs.TableEncryptionConfigurationArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobCopyArgs;
import com.pulumi.gcp.bigquery.inputs.JobCopyDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobCopyDestinationEncryptionConfigurationArgs;
import com.pulumi.codegen.internal.KeyedValue;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var count = 2;
for (var i = 0; i < count; i++) {
new Dataset("sourceDataset-" + i, DatasetArgs.builder()
.datasetId(String.format("job_copy_%s_dataset", range.value()))
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
}
for (var i = 0; i < count; i++) {
new Table("source-" + i, TableArgs.builder()
.deletionProtection(false)
.datasetId(sourceDataset[range.value()].datasetId())
.tableId(String.format("job_copy_%s_table", range.value()))
.schema("""
[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
""")
.build());
}
var destDataset = new Dataset("destDataset", DatasetArgs.builder()
.datasetId("job_copy_dest_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var keyRing = new KeyRing("keyRing", KeyRingArgs.builder()
.name("example-keyring")
.location("global")
.build());
var cryptoKey = new CryptoKey("cryptoKey", CryptoKeyArgs.builder()
.name("example-key")
.keyRing(keyRing.id())
.build());
final var project = OrganizationsFunctions.getProject(GetProjectArgs.builder()
.projectId("my-project-name")
.build());
var encryptRole = new IAMMember("encryptRole", IAMMemberArgs.builder()
.project(project.applyValue(getProjectResult -> getProjectResult.projectId()))
.role("roles/cloudkms.cryptoKeyEncrypterDecrypter")
.member(String.format("serviceAccount:bq-%s@bigquery-encryption.iam.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
.build());
var dest = new Table("dest", TableArgs.builder()
.deletionProtection(false)
.datasetId(destDataset.datasetId())
.tableId("job_copy_dest_table")
.schema("""
[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
""")
.encryptionConfiguration(TableEncryptionConfigurationArgs.builder()
.kmsKeyName(cryptoKey.id())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(encryptRole)
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_copy")
.copy(JobCopyArgs.builder()
.sourceTables(
JobCopySourceTableArgs.builder()
.projectId(source[0].project())
.datasetId(source[0].datasetId())
.tableId(source[0].tableId())
.build(),
JobCopySourceTableArgs.builder()
.projectId(source[1].project())
.datasetId(source[1].datasetId())
.tableId(source[1].tableId())
.build())
.destinationTable(JobCopyDestinationTableArgs.builder()
.projectId(dest.project())
.datasetId(dest.datasetId())
.tableId(dest.tableId())
.build())
.destinationEncryptionConfiguration(JobCopyDestinationEncryptionConfigurationArgs.builder()
.kmsKeyName(cryptoKey.id())
.build())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(encryptRole)
.build());
}
}
Coming soon!
Bigquery Job Extract
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const source_oneDataset = new gcp.bigquery.Dataset("source-one", {
datasetId: "job_extract_dataset",
friendlyName: "test",
description: "This is a test description",
location: "US",
});
const source_one = new gcp.bigquery.Table("source-one", {
deletionProtection: false,
datasetId: source_oneDataset.datasetId,
tableId: "job_extract_table",
schema: `[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
`,
});
const dest = new gcp.storage.Bucket("dest", {
name: "job_extract_bucket",
location: "US",
forceDestroy: true,
});
const job = new gcp.bigquery.Job("job", {
jobId: "job_extract",
extract: {
destinationUris: [pulumi.interpolate`${dest.url}/extract`],
sourceTable: {
projectId: source_one.project,
datasetId: source_one.datasetId,
tableId: source_one.tableId,
},
destinationFormat: "NEWLINE_DELIMITED_JSON",
compression: "GZIP",
},
});
import pulumi
import pulumi_gcp as gcp
source_one_dataset = gcp.bigquery.Dataset("source-one",
dataset_id="job_extract_dataset",
friendly_name="test",
description="This is a test description",
location="US")
source_one = gcp.bigquery.Table("source-one",
deletion_protection=False,
dataset_id=source_one_dataset.dataset_id,
table_id="job_extract_table",
schema="""[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
""")
dest = gcp.storage.Bucket("dest",
name="job_extract_bucket",
location="US",
force_destroy=True)
job = gcp.bigquery.Job("job",
job_id="job_extract",
extract=gcp.bigquery.JobExtractArgs(
destination_uris=[dest.url.apply(lambda url: f"{url}/extract")],
source_table=gcp.bigquery.JobExtractSourceTableArgs(
project_id=source_one.project,
dataset_id=source_one.dataset_id,
table_id=source_one.table_id,
),
destination_format="NEWLINE_DELIMITED_JSON",
compression="GZIP",
))
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := bigquery.NewDataset(ctx, "source-one", &bigquery.DatasetArgs{
DatasetId: pulumi.String("job_extract_dataset"),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("US"),
})
if err != nil {
return err
}
_, err = bigquery.NewTable(ctx, "source-one", &bigquery.TableArgs{
DeletionProtection: pulumi.Bool(false),
DatasetId: source_oneDataset.DatasetId,
TableId: pulumi.String("job_extract_table"),
Schema: pulumi.String(`[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
`),
})
if err != nil {
return err
}
dest, err := storage.NewBucket(ctx, "dest", &storage.BucketArgs{
Name: pulumi.String("job_extract_bucket"),
Location: pulumi.String("US"),
ForceDestroy: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
JobId: pulumi.String("job_extract"),
Extract: &bigquery.JobExtractArgs{
DestinationUris: pulumi.StringArray{
dest.Url.ApplyT(func(url string) (string, error) {
return fmt.Sprintf("%v/extract", url), nil
}).(pulumi.StringOutput),
},
SourceTable: &bigquery.JobExtractSourceTableArgs{
ProjectId: source_one.Project,
DatasetId: source_one.DatasetId,
TableId: source_one.TableId,
},
DestinationFormat: pulumi.String("NEWLINE_DELIMITED_JSON"),
Compression: pulumi.String("GZIP"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var source_oneDataset = new Gcp.BigQuery.Dataset("source-one", new()
{
DatasetId = "job_extract_dataset",
FriendlyName = "test",
Description = "This is a test description",
Location = "US",
});
var source_one = new Gcp.BigQuery.Table("source-one", new()
{
DeletionProtection = false,
DatasetId = source_oneDataset.DatasetId,
TableId = "job_extract_table",
Schema = @"[
{
""name"": ""name"",
""type"": ""STRING"",
""mode"": ""NULLABLE""
},
{
""name"": ""post_abbr"",
""type"": ""STRING"",
""mode"": ""NULLABLE""
},
{
""name"": ""date"",
""type"": ""DATE"",
""mode"": ""NULLABLE""
}
]
",
});
var dest = new Gcp.Storage.Bucket("dest", new()
{
Name = "job_extract_bucket",
Location = "US",
ForceDestroy = true,
});
var job = new Gcp.BigQuery.Job("job", new()
{
JobId = "job_extract",
Extract = new Gcp.BigQuery.Inputs.JobExtractArgs
{
DestinationUris = new[]
{
dest.Url.Apply(url => $"{url}/extract"),
},
SourceTable = new Gcp.BigQuery.Inputs.JobExtractSourceTableArgs
{
ProjectId = source_one.Project,
DatasetId = source_one.DatasetId,
TableId = source_one.TableId,
},
DestinationFormat = "NEWLINE_DELIMITED_JSON",
Compression = "GZIP",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobExtractArgs;
import com.pulumi.gcp.bigquery.inputs.JobExtractSourceTableArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var source_oneDataset = new Dataset("source-oneDataset", DatasetArgs.builder()
.datasetId("job_extract_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var source_one = new Table("source-one", TableArgs.builder()
.deletionProtection(false)
.datasetId(source_oneDataset.datasetId())
.tableId("job_extract_table")
.schema("""
[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
""")
.build());
var dest = new Bucket("dest", BucketArgs.builder()
.name("job_extract_bucket")
.location("US")
.forceDestroy(true)
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_extract")
.extract(JobExtractArgs.builder()
.destinationUris(dest.url().applyValue(url -> String.format("%s/extract", url)))
.sourceTable(JobExtractSourceTableArgs.builder()
.projectId(source_one.project())
.datasetId(source_one.datasetId())
.tableId(source_one.tableId())
.build())
.destinationFormat("NEWLINE_DELIMITED_JSON")
.compression("GZIP")
.build())
.build());
}
}
resources:
source-one:
type: gcp:bigquery:Table
properties:
deletionProtection: false
datasetId: ${["source-oneDataset"].datasetId}
tableId: job_extract_table
schema: |
[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
source-oneDataset:
type: gcp:bigquery:Dataset
name: source-one
properties:
datasetId: job_extract_dataset
friendlyName: test
description: This is a test description
location: US
dest:
type: gcp:storage:Bucket
properties:
name: job_extract_bucket
location: US
forceDestroy: true
job:
type: gcp:bigquery:Job
properties:
jobId: job_extract
extract:
destinationUris:
- ${dest.url}/extract
sourceTable:
projectId: ${["source-one"].project}
datasetId: ${["source-one"].datasetId}
tableId: ${["source-one"].tableId}
destinationFormat: NEWLINE_DELIMITED_JSON
compression: GZIP
Create Job Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Job(name: string, args: JobArgs, opts?: CustomResourceOptions);
@overload
def Job(resource_name: str,
args: JobArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Job(resource_name: str,
opts: Optional[ResourceOptions] = None,
job_id: Optional[str] = None,
copy: Optional[JobCopyArgs] = None,
extract: Optional[JobExtractArgs] = None,
job_timeout_ms: Optional[str] = None,
labels: Optional[Mapping[str, str]] = None,
load: Optional[JobLoadArgs] = None,
location: Optional[str] = None,
project: Optional[str] = None,
query: Optional[JobQueryArgs] = None)
func NewJob(ctx *Context, name string, args JobArgs, opts ...ResourceOption) (*Job, error)
public Job(string name, JobArgs args, CustomResourceOptions? opts = null)
type: gcp:bigquery:Job
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var jobResource = new Gcp.BigQuery.Job("jobResource", new()
{
JobId = "string",
Copy = new Gcp.BigQuery.Inputs.JobCopyArgs
{
SourceTables = new[]
{
new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
{
TableId = "string",
DatasetId = "string",
ProjectId = "string",
},
},
CreateDisposition = "string",
DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobCopyDestinationEncryptionConfigurationArgs
{
KmsKeyName = "string",
KmsKeyVersion = "string",
},
DestinationTable = new Gcp.BigQuery.Inputs.JobCopyDestinationTableArgs
{
TableId = "string",
DatasetId = "string",
ProjectId = "string",
},
WriteDisposition = "string",
},
Extract = new Gcp.BigQuery.Inputs.JobExtractArgs
{
DestinationUris = new[]
{
"string",
},
Compression = "string",
DestinationFormat = "string",
FieldDelimiter = "string",
PrintHeader = false,
SourceModel = new Gcp.BigQuery.Inputs.JobExtractSourceModelArgs
{
DatasetId = "string",
ModelId = "string",
ProjectId = "string",
},
SourceTable = new Gcp.BigQuery.Inputs.JobExtractSourceTableArgs
{
TableId = "string",
DatasetId = "string",
ProjectId = "string",
},
UseAvroLogicalTypes = false,
},
JobTimeoutMs = "string",
Labels =
{
{ "string", "string" },
},
Load = new Gcp.BigQuery.Inputs.JobLoadArgs
{
DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
{
TableId = "string",
DatasetId = "string",
ProjectId = "string",
},
SourceUris = new[]
{
"string",
},
MaxBadRecords = 0,
NullMarker = "string",
DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobLoadDestinationEncryptionConfigurationArgs
{
KmsKeyName = "string",
KmsKeyVersion = "string",
},
Autodetect = false,
Encoding = "string",
FieldDelimiter = "string",
IgnoreUnknownValues = false,
JsonExtension = "string",
AllowJaggedRows = false,
CreateDisposition = "string",
ParquetOptions = new Gcp.BigQuery.Inputs.JobLoadParquetOptionsArgs
{
EnableListInference = false,
EnumAsString = false,
},
ProjectionFields = new[]
{
"string",
},
Quote = "string",
SchemaUpdateOptions = new[]
{
"string",
},
SkipLeadingRows = 0,
SourceFormat = "string",
AllowQuotedNewlines = false,
TimePartitioning = new Gcp.BigQuery.Inputs.JobLoadTimePartitioningArgs
{
Type = "string",
ExpirationMs = "string",
Field = "string",
},
WriteDisposition = "string",
},
Location = "string",
Project = "string",
Query = new Gcp.BigQuery.Inputs.JobQueryArgs
{
Query = "string",
ParameterMode = "string",
MaximumBytesBilled = "string",
DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobQueryDestinationEncryptionConfigurationArgs
{
KmsKeyName = "string",
KmsKeyVersion = "string",
},
DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
{
TableId = "string",
DatasetId = "string",
ProjectId = "string",
},
Priority = "string",
MaximumBillingTier = 0,
DefaultDataset = new Gcp.BigQuery.Inputs.JobQueryDefaultDatasetArgs
{
DatasetId = "string",
ProjectId = "string",
},
AllowLargeResults = false,
FlattenResults = false,
CreateDisposition = "string",
SchemaUpdateOptions = new[]
{
"string",
},
ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
{
KeyResultStatement = "string",
StatementByteBudget = "string",
StatementTimeoutMs = "string",
},
UseLegacySql = false,
UseQueryCache = false,
UserDefinedFunctionResources = new[]
{
new Gcp.BigQuery.Inputs.JobQueryUserDefinedFunctionResourceArgs
{
InlineCode = "string",
ResourceUri = "string",
},
},
WriteDisposition = "string",
},
});
example, err := bigquery.NewJob(ctx, "jobResource", &bigquery.JobArgs{
JobId: pulumi.String("string"),
Copy: &bigquery.JobCopyArgs{
SourceTables: bigquery.JobCopySourceTableArray{
&bigquery.JobCopySourceTableArgs{
TableId: pulumi.String("string"),
DatasetId: pulumi.String("string"),
ProjectId: pulumi.String("string"),
},
},
CreateDisposition: pulumi.String("string"),
DestinationEncryptionConfiguration: &bigquery.JobCopyDestinationEncryptionConfigurationArgs{
KmsKeyName: pulumi.String("string"),
KmsKeyVersion: pulumi.String("string"),
},
DestinationTable: &bigquery.JobCopyDestinationTableArgs{
TableId: pulumi.String("string"),
DatasetId: pulumi.String("string"),
ProjectId: pulumi.String("string"),
},
WriteDisposition: pulumi.String("string"),
},
Extract: &bigquery.JobExtractArgs{
DestinationUris: pulumi.StringArray{
pulumi.String("string"),
},
Compression: pulumi.String("string"),
DestinationFormat: pulumi.String("string"),
FieldDelimiter: pulumi.String("string"),
PrintHeader: pulumi.Bool(false),
SourceModel: &bigquery.JobExtractSourceModelArgs{
DatasetId: pulumi.String("string"),
ModelId: pulumi.String("string"),
ProjectId: pulumi.String("string"),
},
SourceTable: &bigquery.JobExtractSourceTableArgs{
TableId: pulumi.String("string"),
DatasetId: pulumi.String("string"),
ProjectId: pulumi.String("string"),
},
UseAvroLogicalTypes: pulumi.Bool(false),
},
JobTimeoutMs: pulumi.String("string"),
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
Load: &bigquery.JobLoadArgs{
DestinationTable: &bigquery.JobLoadDestinationTableArgs{
TableId: pulumi.String("string"),
DatasetId: pulumi.String("string"),
ProjectId: pulumi.String("string"),
},
SourceUris: pulumi.StringArray{
pulumi.String("string"),
},
MaxBadRecords: pulumi.Int(0),
NullMarker: pulumi.String("string"),
DestinationEncryptionConfiguration: &bigquery.JobLoadDestinationEncryptionConfigurationArgs{
KmsKeyName: pulumi.String("string"),
KmsKeyVersion: pulumi.String("string"),
},
Autodetect: pulumi.Bool(false),
Encoding: pulumi.String("string"),
FieldDelimiter: pulumi.String("string"),
IgnoreUnknownValues: pulumi.Bool(false),
JsonExtension: pulumi.String("string"),
AllowJaggedRows: pulumi.Bool(false),
CreateDisposition: pulumi.String("string"),
ParquetOptions: &bigquery.JobLoadParquetOptionsArgs{
EnableListInference: pulumi.Bool(false),
EnumAsString: pulumi.Bool(false),
},
ProjectionFields: pulumi.StringArray{
pulumi.String("string"),
},
Quote: pulumi.String("string"),
SchemaUpdateOptions: pulumi.StringArray{
pulumi.String("string"),
},
SkipLeadingRows: pulumi.Int(0),
SourceFormat: pulumi.String("string"),
AllowQuotedNewlines: pulumi.Bool(false),
TimePartitioning: &bigquery.JobLoadTimePartitioningArgs{
Type: pulumi.String("string"),
ExpirationMs: pulumi.String("string"),
Field: pulumi.String("string"),
},
WriteDisposition: pulumi.String("string"),
},
Location: pulumi.String("string"),
Project: pulumi.String("string"),
Query: &bigquery.JobQueryArgs{
Query: pulumi.String("string"),
ParameterMode: pulumi.String("string"),
MaximumBytesBilled: pulumi.String("string"),
DestinationEncryptionConfiguration: &bigquery.JobQueryDestinationEncryptionConfigurationArgs{
KmsKeyName: pulumi.String("string"),
KmsKeyVersion: pulumi.String("string"),
},
DestinationTable: &bigquery.JobQueryDestinationTableArgs{
TableId: pulumi.String("string"),
DatasetId: pulumi.String("string"),
ProjectId: pulumi.String("string"),
},
Priority: pulumi.String("string"),
MaximumBillingTier: pulumi.Int(0),
DefaultDataset: &bigquery.JobQueryDefaultDatasetArgs{
DatasetId: pulumi.String("string"),
ProjectId: pulumi.String("string"),
},
AllowLargeResults: pulumi.Bool(false),
FlattenResults: pulumi.Bool(false),
CreateDisposition: pulumi.String("string"),
SchemaUpdateOptions: pulumi.StringArray{
pulumi.String("string"),
},
ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
KeyResultStatement: pulumi.String("string"),
StatementByteBudget: pulumi.String("string"),
StatementTimeoutMs: pulumi.String("string"),
},
UseLegacySql: pulumi.Bool(false),
UseQueryCache: pulumi.Bool(false),
UserDefinedFunctionResources: bigquery.JobQueryUserDefinedFunctionResourceArray{
&bigquery.JobQueryUserDefinedFunctionResourceArgs{
InlineCode: pulumi.String("string"),
ResourceUri: pulumi.String("string"),
},
},
WriteDisposition: pulumi.String("string"),
},
})
var jobResource = new Job("jobResource", JobArgs.builder()
.jobId("string")
.copy(JobCopyArgs.builder()
.sourceTables(JobCopySourceTableArgs.builder()
.tableId("string")
.datasetId("string")
.projectId("string")
.build())
.createDisposition("string")
.destinationEncryptionConfiguration(JobCopyDestinationEncryptionConfigurationArgs.builder()
.kmsKeyName("string")
.kmsKeyVersion("string")
.build())
.destinationTable(JobCopyDestinationTableArgs.builder()
.tableId("string")
.datasetId("string")
.projectId("string")
.build())
.writeDisposition("string")
.build())
.extract(JobExtractArgs.builder()
.destinationUris("string")
.compression("string")
.destinationFormat("string")
.fieldDelimiter("string")
.printHeader(false)
.sourceModel(JobExtractSourceModelArgs.builder()
.datasetId("string")
.modelId("string")
.projectId("string")
.build())
.sourceTable(JobExtractSourceTableArgs.builder()
.tableId("string")
.datasetId("string")
.projectId("string")
.build())
.useAvroLogicalTypes(false)
.build())
.jobTimeoutMs("string")
.labels(Map.of("string", "string"))
.load(JobLoadArgs.builder()
.destinationTable(JobLoadDestinationTableArgs.builder()
.tableId("string")
.datasetId("string")
.projectId("string")
.build())
.sourceUris("string")
.maxBadRecords(0)
.nullMarker("string")
.destinationEncryptionConfiguration(JobLoadDestinationEncryptionConfigurationArgs.builder()
.kmsKeyName("string")
.kmsKeyVersion("string")
.build())
.autodetect(false)
.encoding("string")
.fieldDelimiter("string")
.ignoreUnknownValues(false)
.jsonExtension("string")
.allowJaggedRows(false)
.createDisposition("string")
.parquetOptions(JobLoadParquetOptionsArgs.builder()
.enableListInference(false)
.enumAsString(false)
.build())
.projectionFields("string")
.quote("string")
.schemaUpdateOptions("string")
.skipLeadingRows(0)
.sourceFormat("string")
.allowQuotedNewlines(false)
.timePartitioning(JobLoadTimePartitioningArgs.builder()
.type("string")
.expirationMs("string")
.field("string")
.build())
.writeDisposition("string")
.build())
.location("string")
.project("string")
.query(JobQueryArgs.builder()
.query("string")
.parameterMode("string")
.maximumBytesBilled("string")
.destinationEncryptionConfiguration(JobQueryDestinationEncryptionConfigurationArgs.builder()
.kmsKeyName("string")
.kmsKeyVersion("string")
.build())
.destinationTable(JobQueryDestinationTableArgs.builder()
.tableId("string")
.datasetId("string")
.projectId("string")
.build())
.priority("string")
.maximumBillingTier(0)
.defaultDataset(JobQueryDefaultDatasetArgs.builder()
.datasetId("string")
.projectId("string")
.build())
.allowLargeResults(false)
.flattenResults(false)
.createDisposition("string")
.schemaUpdateOptions("string")
.scriptOptions(JobQueryScriptOptionsArgs.builder()
.keyResultStatement("string")
.statementByteBudget("string")
.statementTimeoutMs("string")
.build())
.useLegacySql(false)
.useQueryCache(false)
.userDefinedFunctionResources(JobQueryUserDefinedFunctionResourceArgs.builder()
.inlineCode("string")
.resourceUri("string")
.build())
.writeDisposition("string")
.build())
.build());
job_resource = gcp.bigquery.Job("jobResource",
job_id="string",
copy=gcp.bigquery.JobCopyArgs(
source_tables=[gcp.bigquery.JobCopySourceTableArgs(
table_id="string",
dataset_id="string",
project_id="string",
)],
create_disposition="string",
destination_encryption_configuration=gcp.bigquery.JobCopyDestinationEncryptionConfigurationArgs(
kms_key_name="string",
kms_key_version="string",
),
destination_table=gcp.bigquery.JobCopyDestinationTableArgs(
table_id="string",
dataset_id="string",
project_id="string",
),
write_disposition="string",
),
extract=gcp.bigquery.JobExtractArgs(
destination_uris=["string"],
compression="string",
destination_format="string",
field_delimiter="string",
print_header=False,
source_model=gcp.bigquery.JobExtractSourceModelArgs(
dataset_id="string",
model_id="string",
project_id="string",
),
source_table=gcp.bigquery.JobExtractSourceTableArgs(
table_id="string",
dataset_id="string",
project_id="string",
),
use_avro_logical_types=False,
),
job_timeout_ms="string",
labels={
"string": "string",
},
load=gcp.bigquery.JobLoadArgs(
destination_table=gcp.bigquery.JobLoadDestinationTableArgs(
table_id="string",
dataset_id="string",
project_id="string",
),
source_uris=["string"],
max_bad_records=0,
null_marker="string",
destination_encryption_configuration=gcp.bigquery.JobLoadDestinationEncryptionConfigurationArgs(
kms_key_name="string",
kms_key_version="string",
),
autodetect=False,
encoding="string",
field_delimiter="string",
ignore_unknown_values=False,
json_extension="string",
allow_jagged_rows=False,
create_disposition="string",
parquet_options=gcp.bigquery.JobLoadParquetOptionsArgs(
enable_list_inference=False,
enum_as_string=False,
),
projection_fields=["string"],
quote="string",
schema_update_options=["string"],
skip_leading_rows=0,
source_format="string",
allow_quoted_newlines=False,
time_partitioning=gcp.bigquery.JobLoadTimePartitioningArgs(
type="string",
expiration_ms="string",
field="string",
),
write_disposition="string",
),
location="string",
project="string",
query=gcp.bigquery.JobQueryArgs(
query="string",
parameter_mode="string",
maximum_bytes_billed="string",
destination_encryption_configuration=gcp.bigquery.JobQueryDestinationEncryptionConfigurationArgs(
kms_key_name="string",
kms_key_version="string",
),
destination_table=gcp.bigquery.JobQueryDestinationTableArgs(
table_id="string",
dataset_id="string",
project_id="string",
),
priority="string",
maximum_billing_tier=0,
default_dataset=gcp.bigquery.JobQueryDefaultDatasetArgs(
dataset_id="string",
project_id="string",
),
allow_large_results=False,
flatten_results=False,
create_disposition="string",
schema_update_options=["string"],
script_options=gcp.bigquery.JobQueryScriptOptionsArgs(
key_result_statement="string",
statement_byte_budget="string",
statement_timeout_ms="string",
),
use_legacy_sql=False,
use_query_cache=False,
user_defined_function_resources=[gcp.bigquery.JobQueryUserDefinedFunctionResourceArgs(
inline_code="string",
resource_uri="string",
)],
write_disposition="string",
))
const jobResource = new gcp.bigquery.Job("jobResource", {
jobId: "string",
copy: {
sourceTables: [{
tableId: "string",
datasetId: "string",
projectId: "string",
}],
createDisposition: "string",
destinationEncryptionConfiguration: {
kmsKeyName: "string",
kmsKeyVersion: "string",
},
destinationTable: {
tableId: "string",
datasetId: "string",
projectId: "string",
},
writeDisposition: "string",
},
extract: {
destinationUris: ["string"],
compression: "string",
destinationFormat: "string",
fieldDelimiter: "string",
printHeader: false,
sourceModel: {
datasetId: "string",
modelId: "string",
projectId: "string",
},
sourceTable: {
tableId: "string",
datasetId: "string",
projectId: "string",
},
useAvroLogicalTypes: false,
},
jobTimeoutMs: "string",
labels: {
string: "string",
},
load: {
destinationTable: {
tableId: "string",
datasetId: "string",
projectId: "string",
},
sourceUris: ["string"],
maxBadRecords: 0,
nullMarker: "string",
destinationEncryptionConfiguration: {
kmsKeyName: "string",
kmsKeyVersion: "string",
},
autodetect: false,
encoding: "string",
fieldDelimiter: "string",
ignoreUnknownValues: false,
jsonExtension: "string",
allowJaggedRows: false,
createDisposition: "string",
parquetOptions: {
enableListInference: false,
enumAsString: false,
},
projectionFields: ["string"],
quote: "string",
schemaUpdateOptions: ["string"],
skipLeadingRows: 0,
sourceFormat: "string",
allowQuotedNewlines: false,
timePartitioning: {
type: "string",
expirationMs: "string",
field: "string",
},
writeDisposition: "string",
},
location: "string",
project: "string",
query: {
query: "string",
parameterMode: "string",
maximumBytesBilled: "string",
destinationEncryptionConfiguration: {
kmsKeyName: "string",
kmsKeyVersion: "string",
},
destinationTable: {
tableId: "string",
datasetId: "string",
projectId: "string",
},
priority: "string",
maximumBillingTier: 0,
defaultDataset: {
datasetId: "string",
projectId: "string",
},
allowLargeResults: false,
flattenResults: false,
createDisposition: "string",
schemaUpdateOptions: ["string"],
scriptOptions: {
keyResultStatement: "string",
statementByteBudget: "string",
statementTimeoutMs: "string",
},
useLegacySql: false,
useQueryCache: false,
userDefinedFunctionResources: [{
inlineCode: "string",
resourceUri: "string",
}],
writeDisposition: "string",
},
});
type: gcp:bigquery:Job
properties:
copy:
createDisposition: string
destinationEncryptionConfiguration:
kmsKeyName: string
kmsKeyVersion: string
destinationTable:
datasetId: string
projectId: string
tableId: string
sourceTables:
- datasetId: string
projectId: string
tableId: string
writeDisposition: string
extract:
compression: string
destinationFormat: string
destinationUris:
- string
fieldDelimiter: string
printHeader: false
sourceModel:
datasetId: string
modelId: string
projectId: string
sourceTable:
datasetId: string
projectId: string
tableId: string
useAvroLogicalTypes: false
jobId: string
jobTimeoutMs: string
labels:
string: string
load:
allowJaggedRows: false
allowQuotedNewlines: false
autodetect: false
createDisposition: string
destinationEncryptionConfiguration:
kmsKeyName: string
kmsKeyVersion: string
destinationTable:
datasetId: string
projectId: string
tableId: string
encoding: string
fieldDelimiter: string
ignoreUnknownValues: false
jsonExtension: string
maxBadRecords: 0
nullMarker: string
parquetOptions:
enableListInference: false
enumAsString: false
projectionFields:
- string
quote: string
schemaUpdateOptions:
- string
skipLeadingRows: 0
sourceFormat: string
sourceUris:
- string
timePartitioning:
expirationMs: string
field: string
type: string
writeDisposition: string
location: string
project: string
query:
allowLargeResults: false
createDisposition: string
defaultDataset:
datasetId: string
projectId: string
destinationEncryptionConfiguration:
kmsKeyName: string
kmsKeyVersion: string
destinationTable:
datasetId: string
projectId: string
tableId: string
flattenResults: false
maximumBillingTier: 0
maximumBytesBilled: string
parameterMode: string
priority: string
query: string
schemaUpdateOptions:
- string
scriptOptions:
keyResultStatement: string
statementByteBudget: string
statementTimeoutMs: string
useLegacySql: false
useQueryCache: false
userDefinedFunctionResources:
- inlineCode: string
resourceUri: string
writeDisposition: string
Job Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Job resource accepts the following input properties:
- Job
Id string - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- Copy
Job
Copy - Copies a table.
- Extract
Job
Extract - Configures an extract job.
- Job
Timeout stringMs - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- Labels Dictionary<string, string>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Load
Job
Load - Configures a load job.
- Location string
- Specifies where the error occurred, if present.
- Project string
- Query
Job
Query - Configures a query job.
- Job
Id string - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- Copy
Job
Copy Args - Copies a table.
- Extract
Job
Extract Args - Configures an extract job.
- Job
Timeout stringMs - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- Labels map[string]string
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Load
Job
Load Args - Configures a load job.
- Location string
- Specifies where the error occurred, if present.
- Project string
- Query
Job
Query Args - Configures a query job.
- job
Id String - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- copy
Job
Copy - Copies a table.
- extract
Job
Extract - Configures an extract job.
- job
Timeout StringMs - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- labels Map<String,String>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
Job
Load - Configures a load job.
- location String
- Specifies where the error occurred, if present.
- project String
- query
Job
Query - Configures a query job.
- job
Id string - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- copy
Job
Copy - Copies a table.
- extract
Job
Extract - Configures an extract job.
- job
Timeout stringMs - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- labels {[key: string]: string}
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
Job
Load - Configures a load job.
- location string
- Specifies where the error occurred, if present.
- project string
- query
Job
Query - Configures a query job.
- job_
id str - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- copy
Job
Copy Args - Copies a table.
- extract
Job
Extract Args - Configures an extract job.
- job_
timeout_ strms - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- labels Mapping[str, str]
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
Job
Load Args - Configures a load job.
- location str
- Specifies where the error occurred, if present.
- project str
- query
Job
Query Args - Configures a query job.
- job
Id String - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- copy Property Map
- Copies a table.
- extract Property Map
- Configures an extract job.
- job
Timeout StringMs - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- labels Map<String>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load Property Map
- Configures a load job.
- location String
- Specifies where the error occurred, if present.
- project String
- query Property Map
- Configures a query job.
Outputs
All input properties are implicitly available as output properties. Additionally, the Job resource produces the following output properties:
- Effective
Labels Dictionary<string, string> - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Id string
- The provider-assigned unique ID for this managed resource.
- Job
Type string - (Output) The type of the job.
- Pulumi
Labels Dictionary<string, string> - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- Statuses
List<Job
Status> - The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- User
Email string - Email address of the user who ran the job.
- Effective
Labels map[string]string - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Id string
- The provider-assigned unique ID for this managed resource.
- Job
Type string - (Output) The type of the job.
- Pulumi
Labels map[string]string - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- Statuses
[]Job
Status - The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- User
Email string - Email address of the user who ran the job.
- effective
Labels Map<String,String> - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id String
- The provider-assigned unique ID for this managed resource.
- job
Type String - (Output) The type of the job.
- pulumi
Labels Map<String,String> - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- statuses
List<Job
Status> - The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- user
Email String - Email address of the user who ran the job.
- effective
Labels {[key: string]: string} - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id string
- The provider-assigned unique ID for this managed resource.
- job
Type string - (Output) The type of the job.
- pulumi
Labels {[key: string]: string} - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- statuses
Job
Status[] - The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- user
Email string - Email address of the user who ran the job.
- effective_
labels Mapping[str, str] - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id str
- The provider-assigned unique ID for this managed resource.
- job_
type str - (Output) The type of the job.
- pulumi_
labels Mapping[str, str] - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- statuses
Sequence[Job
Status] - The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- user_
email str - Email address of the user who ran the job.
- effective
Labels Map<String> - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id String
- The provider-assigned unique ID for this managed resource.
- job
Type String - (Output) The type of the job.
- pulumi
Labels Map<String> - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- statuses List<Property Map>
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- user
Email String - Email address of the user who ran the job.
Look up Existing Job Resource
Get an existing Job resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: JobState, opts?: CustomResourceOptions): Job
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
copy: Optional[JobCopyArgs] = None,
effective_labels: Optional[Mapping[str, str]] = None,
extract: Optional[JobExtractArgs] = None,
job_id: Optional[str] = None,
job_timeout_ms: Optional[str] = None,
job_type: Optional[str] = None,
labels: Optional[Mapping[str, str]] = None,
load: Optional[JobLoadArgs] = None,
location: Optional[str] = None,
project: Optional[str] = None,
pulumi_labels: Optional[Mapping[str, str]] = None,
query: Optional[JobQueryArgs] = None,
statuses: Optional[Sequence[JobStatusArgs]] = None,
user_email: Optional[str] = None) -> Job
func GetJob(ctx *Context, name string, id IDInput, state *JobState, opts ...ResourceOption) (*Job, error)
public static Job Get(string name, Input<string> id, JobState? state, CustomResourceOptions? opts = null)
public static Job get(String name, Output<String> id, JobState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Copy
Job
Copy - Copies a table.
- Effective
Labels Dictionary<string, string> - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Extract
Job
Extract - Configures an extract job.
- Job
Id string - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- Job
Timeout stringMs - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- Job
Type string - (Output) The type of the job.
- Labels Dictionary<string, string>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Load
Job
Load - Configures a load job.
- Location string
- Specifies where the error occurred, if present.
- Project string
- Pulumi
Labels Dictionary<string, string> - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- Query
Job
Query - Configures a query job.
- Statuses
List<Job
Status> - The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- User
Email string - Email address of the user who ran the job.
- Copy
Job
Copy Args - Copies a table.
- Effective
Labels map[string]string - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Extract
Job
Extract Args - Configures an extract job.
- Job
Id string - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- Job
Timeout stringMs - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- Job
Type string - (Output) The type of the job.
- Labels map[string]string
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Load
Job
Load Args - Configures a load job.
- Location string
- Specifies where the error occurred, if present.
- Project string
- Pulumi
Labels map[string]string - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- Query
Job
Query Args - Configures a query job.
- Statuses
[]Job
Status Args - The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- User
Email string - Email address of the user who ran the job.
- copy
Job
Copy - Copies a table.
- effective
Labels Map<String,String> - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- extract
Job
Extract - Configures an extract job.
- job
Id String - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- job
Timeout StringMs - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- job
Type String - (Output) The type of the job.
- labels Map<String,String>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
Job
Load - Configures a load job.
- location String
- Specifies where the error occurred, if present.
- project String
- pulumi
Labels Map<String,String> - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- query
Job
Query - Configures a query job.
- statuses
List<Job
Status> - The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- user
Email String - Email address of the user who ran the job.
- copy
Job
Copy - Copies a table.
- effective
Labels {[key: string]: string} - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- extract
Job
Extract - Configures an extract job.
- job
Id string - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- job
Timeout stringMs - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- job
Type string - (Output) The type of the job.
- labels {[key: string]: string}
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
Job
Load - Configures a load job.
- location string
- Specifies where the error occurred, if present.
- project string
- pulumi
Labels {[key: string]: string} - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- query
Job
Query - Configures a query job.
- statuses
Job
Status[] - The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- user
Email string - Email address of the user who ran the job.
- copy
Job
Copy Args - Copies a table.
- effective_
labels Mapping[str, str] - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- extract
Job
Extract Args - Configures an extract job.
- job_
id str - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- job_
timeout_ strms - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- job_
type str - (Output) The type of the job.
- labels Mapping[str, str]
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load
Job
Load Args - Configures a load job.
- location str
- Specifies where the error occurred, if present.
- project str
- pulumi_
labels Mapping[str, str] - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- query
Job
Query Args - Configures a query job.
- statuses
Sequence[Job
Status Args] - The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- user_
email str - Email address of the user who ran the job.
- copy Property Map
- Copies a table.
- effective
Labels Map<String> - (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- extract Property Map
- Configures an extract job.
- job
Id String - The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
- job
Timeout StringMs - Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
- job
Type String - (Output) The type of the job.
- labels Map<String>
- The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- load Property Map
- Configures a load job.
- location String
- Specifies where the error occurred, if present.
- project String
- pulumi
Labels Map<String> - (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
- query Property Map
- Configures a query job.
- statuses List<Property Map>
- The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
- user
Email String - Email address of the user who ran the job.
Supporting Types
JobCopy, JobCopyArgs
- Source
Tables List<JobCopy Source Table> - Source tables to copy. Structure is documented below.
- Create
Disposition string - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - Destination
Encryption JobConfiguration Copy Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- Destination
Table JobCopy Destination Table - The destination table. Structure is documented below.
- Write
Disposition string - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- Source
Tables []JobCopy Source Table - Source tables to copy. Structure is documented below.
- Create
Disposition string - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - Destination
Encryption JobConfiguration Copy Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- Destination
Table JobCopy Destination Table - The destination table. Structure is documented below.
- Write
Disposition string - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- source
Tables List<JobCopy Source Table> - Source tables to copy. Structure is documented below.
- create
Disposition String - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - destination
Encryption JobConfiguration Copy Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destination
Table JobCopy Destination Table - The destination table. Structure is documented below.
- write
Disposition String - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- source
Tables JobCopy Source Table[] - Source tables to copy. Structure is documented below.
- create
Disposition string - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - destination
Encryption JobConfiguration Copy Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destination
Table JobCopy Destination Table - The destination table. Structure is documented below.
- write
Disposition string - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- source_
tables Sequence[JobCopy Source Table] - Source tables to copy. Structure is documented below.
- create_
disposition str - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - destination_
encryption_ Jobconfiguration Copy Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destination_
table JobCopy Destination Table - The destination table. Structure is documented below.
- write_
disposition str - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- source
Tables List<Property Map> - Source tables to copy. Structure is documented below.
- create
Disposition String - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - destination
Encryption Property MapConfiguration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destination
Table Property Map - The destination table. Structure is documented below.
- write
Disposition String - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
JobCopyDestinationEncryptionConfiguration, JobCopyDestinationEncryptionConfigurationArgs
- Kms
Key stringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- Kms
Key stringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- Kms
Key stringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- Kms
Key stringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms
Key StringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms
Key StringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms
Key stringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms
Key stringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms_
key_ strname - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms_
key_ strversion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms
Key StringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms
Key StringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
JobCopyDestinationTable, JobCopyDestinationTableArgs
- Table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- Table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- table
Id String - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
- table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id string - The ID of the dataset containing this table.
- project
Id string - The ID of the project containing this table.
- table_
id str - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset_
id str - The ID of the dataset containing this table.
- project_
id str - The ID of the project containing this table.
- table
Id String - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
JobCopySourceTable, JobCopySourceTableArgs
- Table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- Table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- table
Id String - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
- table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id string - The ID of the dataset containing this table.
- project
Id string - The ID of the project containing this table.
- table_
id str - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset_
id str - The ID of the dataset containing this table.
- project_
id str - The ID of the project containing this table.
- table
Id String - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
JobExtract, JobExtractArgs
- Destination
Uris List<string> - A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- Compression string
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- Destination
Format string - The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- Field
Delimiter string - When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- Print
Header bool - Whether to print out a header row in the results. Default is true.
- Source
Model JobExtract Source Model - A reference to the model being exported. Structure is documented below.
- Source
Table JobExtract Source Table - A reference to the table being exported. Structure is documented below.
- Use
Avro boolLogical Types - Whether to use logical types when extracting to AVRO format.
- Destination
Uris []string - A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- Compression string
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- Destination
Format string - The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- Field
Delimiter string - When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- Print
Header bool - Whether to print out a header row in the results. Default is true.
- Source
Model JobExtract Source Model - A reference to the model being exported. Structure is documented below.
- Source
Table JobExtract Source Table - A reference to the table being exported. Structure is documented below.
- Use
Avro boolLogical Types - Whether to use logical types when extracting to AVRO format.
- destination
Uris List<String> - A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- compression String
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- destination
Format String - The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- field
Delimiter String - When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- print
Header Boolean - Whether to print out a header row in the results. Default is true.
- source
Model JobExtract Source Model - A reference to the model being exported. Structure is documented below.
- source
Table JobExtract Source Table - A reference to the table being exported. Structure is documented below.
- use
Avro BooleanLogical Types - Whether to use logical types when extracting to AVRO format.
- destination
Uris string[] - A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- compression string
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- destination
Format string - The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- field
Delimiter string - When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- print
Header boolean - Whether to print out a header row in the results. Default is true.
- source
Model JobExtract Source Model - A reference to the model being exported. Structure is documented below.
- source
Table JobExtract Source Table - A reference to the table being exported. Structure is documented below.
- use
Avro booleanLogical Types - Whether to use logical types when extracting to AVRO format.
- destination_
uris Sequence[str] - A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- compression str
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- destination_
format str - The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- field_
delimiter str - When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- print_
header bool - Whether to print out a header row in the results. Default is true.
- source_
model JobExtract Source Model - A reference to the model being exported. Structure is documented below.
- source_
table JobExtract Source Table - A reference to the table being exported. Structure is documented below.
- use_
avro_ boollogical_ types - Whether to use logical types when extracting to AVRO format.
- destination
Uris List<String> - A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
- compression String
- The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
- destination
Format String - The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
- field
Delimiter String - When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
- print
Header Boolean - Whether to print out a header row in the results. Default is true.
- source
Model Property Map - A reference to the model being exported. Structure is documented below.
- source
Table Property Map - A reference to the table being exported. Structure is documented below.
- use
Avro BooleanLogical Types - Whether to use logical types when extracting to AVRO format.
JobExtractSourceModel, JobExtractSourceModelArgs
- dataset_
id str - The ID of the dataset containing this model.
- model_
id str - The ID of the model.
- project_
id str - The ID of the project containing this model.
JobExtractSourceTable, JobExtractSourceTableArgs
- Table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- Table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- table
Id String - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
- table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id string - The ID of the dataset containing this table.
- project
Id string - The ID of the project containing this table.
- table_
id str - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset_
id str - The ID of the dataset containing this table.
- project_
id str - The ID of the project containing this table.
- table
Id String - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
JobLoad, JobLoadArgs
- Destination
Table JobLoad Destination Table - The destination table to load the data into. Structure is documented below.
- Source
Uris List<string> - The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- Allow
Jagged boolRows - Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- Allow
Quoted boolNewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- Autodetect bool
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- Create
Disposition string - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - Destination
Encryption JobConfiguration Load Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- Encoding string
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- Field
Delimiter string - The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- Ignore
Unknown boolValues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- Json
Extension string - If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- Max
Bad intRecords - The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- Null
Marker string - Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- Parquet
Options JobLoad Parquet Options - Parquet Options for load and make external tables. Structure is documented below.
- Projection
Fields List<string> - If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- Quote string
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- Schema
Update List<string>Options - Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- Skip
Leading intRows - The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- Source
Format string - The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- Time
Partitioning JobLoad Time Partitioning - Time-based partitioning specification for the destination table. Structure is documented below.
- Write
Disposition string - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- Destination
Table JobLoad Destination Table - The destination table to load the data into. Structure is documented below.
- Source
Uris []string - The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- Allow
Jagged boolRows - Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- Allow
Quoted boolNewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- Autodetect bool
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- Create
Disposition string - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - Destination
Encryption JobConfiguration Load Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- Encoding string
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- Field
Delimiter string - The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- Ignore
Unknown boolValues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- Json
Extension string - If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- Max
Bad intRecords - The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- Null
Marker string - Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- Parquet
Options JobLoad Parquet Options - Parquet Options for load and make external tables. Structure is documented below.
- Projection
Fields []string - If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- Quote string
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- Schema
Update []stringOptions - Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- Skip
Leading intRows - The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- Source
Format string - The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- Time
Partitioning JobLoad Time Partitioning - Time-based partitioning specification for the destination table. Structure is documented below.
- Write
Disposition string - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- destination
Table JobLoad Destination Table - The destination table to load the data into. Structure is documented below.
- source
Uris List<String> - The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- allow
Jagged BooleanRows - Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- allow
Quoted BooleanNewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- autodetect Boolean
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- create
Disposition String - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - destination
Encryption JobConfiguration Load Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- encoding String
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- field
Delimiter String - The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- ignore
Unknown BooleanValues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- json
Extension String - If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- max
Bad IntegerRecords - The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- null
Marker String - Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- parquet
Options JobLoad Parquet Options - Parquet Options for load and make external tables. Structure is documented below.
- projection
Fields List<String> - If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- quote String
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- schema
Update List<String>Options - Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- skip
Leading IntegerRows - The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- source
Format String - The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- time
Partitioning JobLoad Time Partitioning - Time-based partitioning specification for the destination table. Structure is documented below.
- write
Disposition String - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- destination
Table JobLoad Destination Table - The destination table to load the data into. Structure is documented below.
- source
Uris string[] - The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- allow
Jagged booleanRows - Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- allow
Quoted booleanNewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- autodetect boolean
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- create
Disposition string - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - destination
Encryption JobConfiguration Load Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- encoding string
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- field
Delimiter string - The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- ignore
Unknown booleanValues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- json
Extension string - If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- max
Bad numberRecords - The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- null
Marker string - Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- parquet
Options JobLoad Parquet Options - Parquet Options for load and make external tables. Structure is documented below.
- projection
Fields string[] - If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- quote string
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- schema
Update string[]Options - Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- skip
Leading numberRows - The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- source
Format string - The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- time
Partitioning JobLoad Time Partitioning - Time-based partitioning specification for the destination table. Structure is documented below.
- write
Disposition string - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- destination_
table JobLoad Destination Table - The destination table to load the data into. Structure is documented below.
- source_
uris Sequence[str] - The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- allow_
jagged_ boolrows - Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- allow_
quoted_ boolnewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- autodetect bool
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- create_
disposition str - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - destination_
encryption_ Jobconfiguration Load Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- encoding str
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- field_
delimiter str - The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- ignore_
unknown_ boolvalues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- json_
extension str - If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- max_
bad_ intrecords - The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- null_
marker str - Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- parquet_
options JobLoad Parquet Options - Parquet Options for load and make external tables. Structure is documented below.
- projection_
fields Sequence[str] - If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- quote str
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- schema_
update_ Sequence[str]options - Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- skip_
leading_ introws - The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- source_
format str - The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- time_
partitioning JobLoad Time Partitioning - Time-based partitioning specification for the destination table. Structure is documented below.
- write_
disposition str - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- destination
Table Property Map - The destination table to load the data into. Structure is documented below.
- source
Uris List<String> - The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
- allow
Jagged BooleanRows - Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
- allow
Quoted BooleanNewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- autodetect Boolean
- Indicates if we should automatically infer the options and schema for CSV and JSON sources.
- create
Disposition String - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - destination
Encryption Property MapConfiguration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- encoding String
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
- field
Delimiter String - The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
- ignore
Unknown BooleanValues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
- json
Extension String - If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
- max
Bad NumberRecords - The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
- null
Marker String - Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
- parquet
Options Property Map - Parquet Options for load and make external tables. Structure is documented below.
- projection
Fields List<String> - If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
- quote String
- The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
- schema
Update List<String>Options - Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- skip
Leading NumberRows - The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
- source
Format String - The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
- time
Partitioning Property Map - Time-based partitioning specification for the destination table. Structure is documented below.
- write
Disposition String - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
JobLoadDestinationEncryptionConfiguration, JobLoadDestinationEncryptionConfigurationArgs
- Kms
Key stringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- Kms
Key stringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- Kms
Key stringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- Kms
Key stringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms
Key StringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms
Key StringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms
Key stringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms
Key stringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms_
key_ strname - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms_
key_ strversion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms
Key StringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms
Key StringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
JobLoadDestinationTable, JobLoadDestinationTableArgs
- Table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- Table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- table
Id String - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
- table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id string - The ID of the dataset containing this table.
- project
Id string - The ID of the project containing this table.
- table_
id str - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset_
id str - The ID of the dataset containing this table.
- project_
id str - The ID of the project containing this table.
- table
Id String - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
JobLoadParquetOptions, JobLoadParquetOptionsArgs
- Enable
List boolInference - If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- Enum
As boolString - If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- Enable
List boolInference - If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- Enum
As boolString - If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable
List BooleanInference - If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum
As BooleanString - If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable
List booleanInference - If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum
As booleanString - If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable_
list_ boolinference - If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum_
as_ boolstring - If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable
List BooleanInference - If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum
As BooleanString - If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
JobLoadTimePartitioning, JobLoadTimePartitioningArgs
- Type string
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- Expiration
Ms string - Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- Field string
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
- Type string
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- Expiration
Ms string - Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- Field string
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
- type String
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- expiration
Ms String - Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- field String
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
- type string
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- expiration
Ms string - Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- field string
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
- type str
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- expiration_
ms str - Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- field str
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
- type String
- The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
- expiration
Ms String - Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
- field String
- If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
JobQuery, JobQueryArgs
- Query string
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(
DELETE
,UPDATE
,MERGE
,INSERT
) must specifycreate_disposition = ""
andwrite_disposition = ""
. - Allow
Large boolResults - If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- Create
Disposition string - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - Default
Dataset JobQuery Default Dataset - Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- Destination
Encryption JobConfiguration Query Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- Destination
Table JobQuery Destination Table - Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- Flatten
Results bool - If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- Maximum
Billing intTier - Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- Maximum
Bytes stringBilled - Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- Parameter
Mode string - Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- Priority string
- Specifies a priority for the query.
Default value is
INTERACTIVE
. Possible values are:INTERACTIVE
,BATCH
. - Schema
Update List<string>Options - Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- Script
Options JobQuery Script Options - Options controlling the execution of scripts. Structure is documented below.
- Use
Legacy boolSql - Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- Use
Query boolCache - Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- User
Defined List<JobFunction Resources Query User Defined Function Resource> - Describes user-defined function resources used in the query. Structure is documented below.
- Write
Disposition string - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- Query string
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(
DELETE
,UPDATE
,MERGE
,INSERT
) must specifycreate_disposition = ""
andwrite_disposition = ""
. - Allow
Large boolResults - If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- Create
Disposition string - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - Default
Dataset JobQuery Default Dataset - Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- Destination
Encryption JobConfiguration Query Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- Destination
Table JobQuery Destination Table - Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- Flatten
Results bool - If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- Maximum
Billing intTier - Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- Maximum
Bytes stringBilled - Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- Parameter
Mode string - Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- Priority string
- Specifies a priority for the query.
Default value is
INTERACTIVE
. Possible values are:INTERACTIVE
,BATCH
. - Schema
Update []stringOptions - Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- Script
Options JobQuery Script Options - Options controlling the execution of scripts. Structure is documented below.
- Use
Legacy boolSql - Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- Use
Query boolCache - Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- User
Defined []JobFunction Resources Query User Defined Function Resource - Describes user-defined function resources used in the query. Structure is documented below.
- Write
Disposition string - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- query String
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(
DELETE
,UPDATE
,MERGE
,INSERT
) must specifycreate_disposition = ""
andwrite_disposition = ""
. - allow
Large BooleanResults - If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- create
Disposition String - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - default
Dataset JobQuery Default Dataset - Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- destination
Encryption JobConfiguration Query Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destination
Table JobQuery Destination Table - Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- flatten
Results Boolean - If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- maximum
Billing IntegerTier - Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- maximum
Bytes StringBilled - Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- parameter
Mode String - Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- priority String
- Specifies a priority for the query.
Default value is
INTERACTIVE
. Possible values are:INTERACTIVE
,BATCH
. - schema
Update List<String>Options - Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- script
Options JobQuery Script Options - Options controlling the execution of scripts. Structure is documented below.
- use
Legacy BooleanSql - Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- use
Query BooleanCache - Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- user
Defined List<JobFunction Resources Query User Defined Function Resource> - Describes user-defined function resources used in the query. Structure is documented below.
- write
Disposition String - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- query string
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(
DELETE
,UPDATE
,MERGE
,INSERT
) must specifycreate_disposition = ""
andwrite_disposition = ""
. - allow
Large booleanResults - If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- create
Disposition string - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - default
Dataset JobQuery Default Dataset - Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- destination
Encryption JobConfiguration Query Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destination
Table JobQuery Destination Table - Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- flatten
Results boolean - If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- maximum
Billing numberTier - Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- maximum
Bytes stringBilled - Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- parameter
Mode string - Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- priority string
- Specifies a priority for the query.
Default value is
INTERACTIVE
. Possible values are:INTERACTIVE
,BATCH
. - schema
Update string[]Options - Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- script
Options JobQuery Script Options - Options controlling the execution of scripts. Structure is documented below.
- use
Legacy booleanSql - Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- use
Query booleanCache - Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- user
Defined JobFunction Resources Query User Defined Function Resource[] - Describes user-defined function resources used in the query. Structure is documented below.
- write
Disposition string - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- query str
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(
DELETE
,UPDATE
,MERGE
,INSERT
) must specifycreate_disposition = ""
andwrite_disposition = ""
. - allow_
large_ boolresults - If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- create_
disposition str - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - default_
dataset JobQuery Default Dataset - Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- destination_
encryption_ Jobconfiguration Query Destination Encryption Configuration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destination_
table JobQuery Destination Table - Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- flatten_
results bool - If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- maximum_
billing_ inttier - Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- maximum_
bytes_ strbilled - Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- parameter_
mode str - Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- priority str
- Specifies a priority for the query.
Default value is
INTERACTIVE
. Possible values are:INTERACTIVE
,BATCH
. - schema_
update_ Sequence[str]options - Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- script_
options JobQuery Script Options - Options controlling the execution of scripts. Structure is documented below.
- use_
legacy_ boolsql - Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- use_
query_ boolcache - Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- user_
defined_ Sequence[Jobfunction_ resources Query User Defined Function Resource] - Describes user-defined function resources used in the query. Structure is documented below.
- write_
disposition str - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
- query String
- SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL.
NOTE: queries containing DML language
(
DELETE
,UPDATE
,MERGE
,INSERT
) must specifycreate_disposition = ""
andwrite_disposition = ""
. - allow
Large BooleanResults - If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
- create
Disposition String - Specifies whether the job is allowed to create new tables. The following values are supported:
CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
Creation, truncation and append actions occur as one atomic update upon job completion
Default value is
CREATE_IF_NEEDED
. Possible values are:CREATE_IF_NEEDED
,CREATE_NEVER
. - default
Dataset Property Map - Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
- destination
Encryption Property MapConfiguration - Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
- destination
Table Property Map - Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
- flatten
Results Boolean - If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
- maximum
Billing NumberTier - Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
- maximum
Bytes StringBilled - Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
- parameter
Mode String - Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
- priority String
- Specifies a priority for the query.
Default value is
INTERACTIVE
. Possible values are:INTERACTIVE
,BATCH
. - schema
Update List<String>Options - Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
- script
Options Property Map - Options controlling the execution of scripts. Structure is documented below.
- use
Legacy BooleanSql - Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
- use
Query BooleanCache - Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
- user
Defined List<Property Map>Function Resources - Describes user-defined function resources used in the query. Structure is documented below.
- write
Disposition String - Specifies the action that occurs if the destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
Creation, truncation and append actions occur as one atomic update upon job completion.
Default value is
WRITE_EMPTY
. Possible values are:WRITE_TRUNCATE
,WRITE_APPEND
,WRITE_EMPTY
.
JobQueryDefaultDataset, JobQueryDefaultDatasetArgs
- dataset_
id str - The dataset. Can be specified
{{dataset_id}}
ifproject_id
is also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}
if not. - project_
id str - The ID of the project containing this table.
JobQueryDestinationEncryptionConfiguration, JobQueryDestinationEncryptionConfigurationArgs
- Kms
Key stringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- Kms
Key stringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- Kms
Key stringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- Kms
Key stringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms
Key StringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms
Key StringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms
Key stringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms
Key stringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms_
key_ strname - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms_
key_ strversion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
- kms
Key StringName - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
- kms
Key StringVersion - (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
JobQueryDestinationTable, JobQueryDestinationTableArgs
- Table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- Table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- table
Id String - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
- table
Id string - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id string - The ID of the dataset containing this table.
- project
Id string - The ID of the project containing this table.
- table_
id str - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset_
id str - The ID of the dataset containing this table.
- project_
id str - The ID of the project containing this table.
- table
Id String - The table. Can be specified
{{table_id}}
ifproject_id
anddataset_id
are also set, or of the formprojects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
if not. - dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
JobQueryScriptOptions, JobQueryScriptOptionsArgs
- Key
Result stringStatement - Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are:
LAST
,FIRST_SELECT
. - Statement
Byte stringBudget - Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- Statement
Timeout stringMs - Timeout period for each statement in a script.
- Key
Result stringStatement - Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are:
LAST
,FIRST_SELECT
. - Statement
Byte stringBudget - Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- Statement
Timeout stringMs - Timeout period for each statement in a script.
- key
Result StringStatement - Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are:
LAST
,FIRST_SELECT
. - statement
Byte StringBudget - Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- statement
Timeout StringMs - Timeout period for each statement in a script.
- key
Result stringStatement - Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are:
LAST
,FIRST_SELECT
. - statement
Byte stringBudget - Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- statement
Timeout stringMs - Timeout period for each statement in a script.
- key_
result_ strstatement - Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are:
LAST
,FIRST_SELECT
. - statement_
byte_ strbudget - Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- statement_
timeout_ strms - Timeout period for each statement in a script.
- key
Result StringStatement - Determines which statement in the script represents the "key result",
used to populate the schema and query results of the script job.
Possible values are:
LAST
,FIRST_SELECT
. - statement
Byte StringBudget - Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
- statement
Timeout StringMs - Timeout period for each statement in a script.
JobQueryUserDefinedFunctionResource, JobQueryUserDefinedFunctionResourceArgs
- Inline
Code string - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- Resource
Uri string - A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
- Inline
Code string - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- Resource
Uri string - A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
- inline
Code String - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- resource
Uri String - A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
- inline
Code string - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- resource
Uri string - A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
- inline_
code str - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- resource_
uri str - A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
- inline
Code String - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
- resource
Uri String - A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
JobStatus, JobStatusArgs
- Error
Results List<JobStatus Error Result> - (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- Errors
List<Job
Status Error> - (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- State string
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
- Error
Results []JobStatus Error Result - (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- Errors
[]Job
Status Error - (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- State string
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
- error
Results List<JobStatus Error Result> - (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- errors
List<Job
Status Error> - (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- state String
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
- error
Results JobStatus Error Result[] - (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- errors
Job
Status Error[] - (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- state string
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
- error_
results Sequence[JobStatus Error Result] - (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- errors
Sequence[Job
Status Error] - (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- state str
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
- error
Results List<Property Map> - (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
- errors List<Property Map>
- (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
- state String
- (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
JobStatusError, JobStatusErrorArgs
JobStatusErrorResult, JobStatusErrorResultArgs
Import
Job can be imported using any of these accepted formats:
projects/{{project}}/jobs/{{job_id}}/location/{{location}}
projects/{{project}}/jobs/{{job_id}}
{{project}}/{{job_id}}/{{location}}
{{job_id}}/{{location}}
{{project}}/{{job_id}}
{{job_id}}
When using the pulumi import
command, Job can be imported using one of the formats above. For example:
$ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}/location/{{location}}
$ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}
$ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}/{{location}}
$ pulumi import gcp:bigquery/job:Job default {{job_id}}/{{location}}
$ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}
$ pulumi import gcp:bigquery/job:Job default {{job_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.