google_bigquery_job

Jobs are actions that BigQuery runs on your behalf to load data, export data, query data, or copy data. Once a BigQuery job is created, it cannot be changed or deleted.

To get more information about Job, see:

Open in Cloud Shell

Example Usage - Bigquery Job Query

resource "google_bigquery_table" "foo" {
  deletion_protection = false
  dataset_id = google_bigquery_dataset.bar.dataset_id
  table_id   = "job_query_table"
}

resource "google_bigquery_dataset" "bar" {
  dataset_id                  = "job_query_dataset"
  friendly_name               = "test"
  description                 = "This is a test description"
  location                    = "US"
}

resource "google_bigquery_job" "job" {
  job_id     = "job_query"

  labels = {
    "example-label" ="example-value"
  }

  query {
    query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]"

    destination_table {
      project_id = google_bigquery_table.foo.project
      dataset_id = google_bigquery_table.foo.dataset_id
      table_id   = google_bigquery_table.foo.table_id
    }

    allow_large_results = true
    flatten_results = true

    script_options {
      key_result_statement = "LAST"
    }
  }
}
Open in Cloud Shell

Example Usage - Bigquery Job Query Table Reference

resource "google_bigquery_table" "foo" {
  deletion_protection = false
  dataset_id = google_bigquery_dataset.bar.dataset_id
  table_id   = "job_query_table"
}

resource "google_bigquery_dataset" "bar" {
  dataset_id                  = "job_query_dataset"
  friendly_name               = "test"
  description                 = "This is a test description"
  location                    = "US"
}

resource "google_bigquery_job" "job" {
  job_id     = "job_query"

  labels = {
    "example-label" ="example-value"
  }

  query {
    query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]"

    destination_table {
      table_id = google_bigquery_table.foo.id
    }

    default_dataset {
      dataset_id = google_bigquery_dataset.bar.id
    }

    allow_large_results = true
    flatten_results = true

    script_options {
      key_result_statement = "LAST"
    }
  }
}
Open in Cloud Shell

Example Usage - Bigquery Job Load

resource "google_bigquery_table" "foo" {
  deletion_protection = false
  dataset_id = google_bigquery_dataset.bar.dataset_id
  table_id   = "job_load_table"
}

resource "google_bigquery_dataset" "bar" {
  dataset_id                  = "job_load_dataset"
  friendly_name               = "test"
  description                 = "This is a test description"
  location                    = "US"
}

resource "google_bigquery_job" "job" {
  job_id     = "job_load"

  labels = {
    "my_job" ="load"
  }

  load {
    source_uris = [
      "gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv",
    ]

    destination_table {
      project_id = google_bigquery_table.foo.project
      dataset_id = google_bigquery_table.foo.dataset_id
      table_id   = google_bigquery_table.foo.table_id
    }

    skip_leading_rows = 1
    schema_update_options = ["ALLOW_FIELD_RELAXATION", "ALLOW_FIELD_ADDITION"]

    write_disposition = "WRITE_APPEND"
    autodetect = true
  }
}
## Example Usage - Bigquery Job Load Geojson
locals {
  project = "my-project-name" # Google Cloud Platform Project ID
}

resource "google_storage_bucket" "bucket" {
  name     = "${local.project}-bq-geojson"  # Every bucket name must be globally unique
  location = "US"
  uniform_bucket_level_access = true
}

resource "google_storage_bucket_object" "object" {
  name   = "geojson-data.jsonl"
  bucket = google_storage_bucket.bucket.name
  content = <<EOF
{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
EOF
}

resource "google_bigquery_table" "foo" {
  deletion_protection = false
  dataset_id = google_bigquery_dataset.bar.dataset_id
  table_id   = "job_load_table"
}

resource "google_bigquery_dataset" "bar" {
  dataset_id                  = "job_load_dataset"
  friendly_name               = "test"
  description                 = "This is a test description"
  location                    = "US"
}

resource "google_bigquery_job" "job" {
  job_id     = "job_load"

  labels = {
    "my_job" = "load"
  }

  load {
    source_uris = [
      "gs://${google_storage_bucket_object.object.bucket}/${google_storage_bucket_object.object.name}"
    ]

    destination_table {
      project_id = google_bigquery_table.foo.project
      dataset_id = google_bigquery_table.foo.dataset_id
      table_id   = google_bigquery_table.foo.table_id
    }

    write_disposition = "WRITE_TRUNCATE"
    autodetect = true
    source_format = "NEWLINE_DELIMITED_JSON"
    json_extension = "GEOJSON"
  }

  depends_on = ["google_storage_bucket_object.object"]
}
Open in Cloud Shell

Example Usage - Bigquery Job Load Parquet

resource "google_storage_bucket" "test" {
  name                        = "job_load_bucket"
  location                    = "US"
  uniform_bucket_level_access = true
}

resource "google_storage_bucket_object" "test" {
  name   =  "job_load_bucket_object"
  source = "./test-fixtures/test.parquet.gzip"
  bucket = google_storage_bucket.test.name
}

resource "google_bigquery_dataset" "test" {
  dataset_id                  = "job_load_dataset"
  friendly_name               = "test"
  description                 = "This is a test description"
  location                    = "US"
}

resource "google_bigquery_table" "test" {
  deletion_protection = false
  table_id            = "job_load_table"
  dataset_id          = google_bigquery_dataset.test.dataset_id
}

resource "google_bigquery_job" "job" {
  job_id = "job_load"

  labels = {
    "my_job" ="load"
  }

  load {
    source_uris = [
      "gs://${google_storage_bucket_object.test.bucket}/${google_storage_bucket_object.test.name}"
    ]

    destination_table {
      project_id = google_bigquery_table.test.project
      dataset_id = google_bigquery_table.test.dataset_id
      table_id   = google_bigquery_table.test.table_id
    }

    schema_update_options = ["ALLOW_FIELD_RELAXATION", "ALLOW_FIELD_ADDITION"]
    write_disposition     = "WRITE_APPEND"
    source_format         = "PARQUET"
    autodetect            = true

    parquet_options {
      enum_as_string        = true
      enable_list_inference = true
    }
  }
}
## Example Usage - Bigquery Job Copy
locals {
  count = 2
}

resource "google_bigquery_table" "source" {
  deletion_protection = false
  count = local.count

  dataset_id = google_bigquery_dataset.source[count.index].dataset_id
  table_id   = "job_copy_${count.index}_table"

  schema = <<EOF
[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
EOF
}

resource "google_bigquery_dataset" "source" {
  count = local.count

  dataset_id                  = "job_copy_${count.index}_dataset"
  friendly_name               = "test"
  description                 = "This is a test description"
  location                    = "US"
}

resource "google_bigquery_table" "dest" {
  deletion_protection = false
  dataset_id = google_bigquery_dataset.dest.dataset_id
  table_id   = "job_copy_dest_table"

  schema = <<EOF
[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
EOF

  encryption_configuration {
    kms_key_name = google_kms_crypto_key.crypto_key.id
  }

  depends_on = ["google_project_iam_member.encrypt_role"]
}

resource "google_bigquery_dataset" "dest" {
  dataset_id    = "job_copy_dest_dataset"
  friendly_name = "test"
  description   = "This is a test description"
  location      = "US"
}

resource "google_kms_crypto_key" "crypto_key" {
  name     = "example-key"
  key_ring = google_kms_key_ring.key_ring.id
}

resource "google_kms_key_ring" "key_ring" {
  name     = "example-keyring"
  location = "global"
}

data "google_project" "project" {
  project_id = "my-project-name"
}

resource "google_project_iam_member" "encrypt_role" {
  project = data.google_project.project.project_id
  role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"
  member = "serviceAccount:bq-${data.google_project.project.number}@bigquery-encryption.iam.gserviceaccount.com"
}

resource "google_bigquery_job" "job" {
  job_id     = "job_copy"

  copy {
    source_tables {
      project_id = google_bigquery_table.source.0.project
      dataset_id = google_bigquery_table.source.0.dataset_id
      table_id   = google_bigquery_table.source.0.table_id
    }

    source_tables {
      project_id = google_bigquery_table.source.1.project
      dataset_id = google_bigquery_table.source.1.dataset_id
      table_id   = google_bigquery_table.source.1.table_id
    }

    destination_table {
      project_id = google_bigquery_table.dest.project
      dataset_id = google_bigquery_table.dest.dataset_id
      table_id   = google_bigquery_table.dest.table_id
    }

    destination_encryption_configuration {
      kms_key_name = google_kms_crypto_key.crypto_key.id
    }
  }

  depends_on = ["google_project_iam_member.encrypt_role"]
}
Open in Cloud Shell

Example Usage - Bigquery Job Extract

resource "google_bigquery_table" "source-one" {
  deletion_protection = false
  dataset_id = google_bigquery_dataset.source-one.dataset_id
  table_id   = "job_extract_table"

  schema = <<EOF
[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
EOF
}

resource "google_bigquery_dataset" "source-one" {
  dataset_id    = "job_extract_dataset"
  friendly_name = "test"
  description   = "This is a test description"
  location      = "US"
}

resource "google_storage_bucket" "dest" {
  name          = "job_extract_bucket"
  location      = "US"
  force_destroy = true
}

resource "google_bigquery_job" "job" {
  job_id     = "job_extract"

  extract {
    destination_uris = ["${google_storage_bucket.dest.url}/extract"]

    source_table {
      project_id = google_bigquery_table.source-one.project
      dataset_id = google_bigquery_table.source-one.dataset_id
      table_id   = google_bigquery_table.source-one.table_id
    }

    destination_format = "NEWLINE_DELIMITED_JSON"
    compression = "GZIP"
  }
}

Argument Reference

The following arguments are supported:

The query block supports:

The destination_table block supports:

The user_defined_function_resources block supports:

The default_dataset block supports:

The destination_encryption_configuration block supports:

The script_options block supports:

The load block supports:

The destination_table block supports:

The time_partitioning block supports:

The destination_encryption_configuration block supports:

The parquet_options block supports:

The copy block supports:

The source_tables block supports:

The destination_table block supports:

The destination_encryption_configuration block supports:

The extract block supports:

The source_table block supports:

The source_model block supports:


Attributes Reference

In addition to the arguments listed above, the following computed attributes are exported:

The status block contains:

The error_result block contains:

The errors block contains:

Timeouts

This resource provides the following Timeouts configuration options:

Import

Job can be imported using any of these accepted formats:

In Terraform v1.5.0 and later, use an import block to import Job using one of the formats above. For example:

import {
  id = "projects/{{project}}/jobs/{{job_id}}/location/{{location}}"
  to = google_bigquery_job.default
}

When using the terraform import command, Job can be imported using one of the formats above. For example:

$ terraform import google_bigquery_job.default projects/{{project}}/jobs/{{job_id}}/location/{{location}}
$ terraform import google_bigquery_job.default projects/{{project}}/jobs/{{job_id}}
$ terraform import google_bigquery_job.default {{project}}/{{job_id}}/{{location}}
$ terraform import google_bigquery_job.default {{job_id}}/{{location}}
$ terraform import google_bigquery_job.default {{project}}/{{job_id}}
$ terraform import google_bigquery_job.default {{job_id}}

User Project Overrides

This resource supports User Project Overrides.