databricks.Mount
Explore with Pulumi AI
This resource will mount your cloud storage
- gs- to mount Google Cloud Storage
- abfs- to mount ADLS Gen2 using Azure Blob Filesystem (ABFS) driver
- adl- to mount ADLS Gen1 using Azure Data Lake (ADL) driver
- wasb- to mount Azure Blob Storage using Windows Azure Storage Blob (WASB) driver
- Use generic arguments - you have a responsibility for providing all necessary parameters that are required to mount specific storage. This is most flexible option
Common arguments
- cluster_id- (Optional, String) Cluster to use for mounting. If no cluster is specified, a new cluster will be created and will mount the bucket for all of the clusters in this workspace. If the cluster is not running - it’s going to be started, so be aware to set auto-termination rules on it.
- name- (Optional, String) Name, under which mount will be accessible in- dbfs:/mnt/<MOUNT_NAME>. If not specified, provider will try to infer it from depending on the resource type:- bucket_namefor AWS S3 and Google Cloud Storage
- container_namefor ADLS Gen2 and Azure Blob Storage
- storage_resource_namefor ADLS Gen1
 
- uri- (Optional, String) the URI for accessing specific storage (- s3a://....,- abfss://....,- gs://...., etc.)
- extra_configs- (Optional, String map) configuration parameters that are necessary for mounting of specific storage
- resource_id- (Optional, String) resource ID for a given storage account. Could be used to fill defaults, such as storage account & container names on Azure.
- encryption_type- (Optional, String) encryption type. Currently used only for AWS S3 mounts
Example mounting ADLS Gen2 using uri and extra_configs
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const tenantId = "00000000-1111-2222-3333-444444444444";
const clientId = "55555555-6666-7777-8888-999999999999";
const secretScope = "some-kv";
const secretKey = "some-sp-secret";
const container = "test";
const storageAcc = "lrs";
const _this = new databricks.Mount("this", {
    name: "tf-abfss",
    uri: `abfss://${container}@${storageAcc}.dfs.core.windows.net`,
    extraConfigs: {
        "fs.azure.account.auth.type": "OAuth",
        "fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider",
        "fs.azure.account.oauth2.client.id": clientId,
        "fs.azure.account.oauth2.client.secret": `{{secrets/${secretScope}/${secretKey}}}`,
        "fs.azure.account.oauth2.client.endpoint": `https://login.microsoftonline.com/${tenantId}/oauth2/token`,
        "fs.azure.createRemoteFileSystemDuringInitialization": "false",
    },
});
import pulumi
import pulumi_databricks as databricks
tenant_id = "00000000-1111-2222-3333-444444444444"
client_id = "55555555-6666-7777-8888-999999999999"
secret_scope = "some-kv"
secret_key = "some-sp-secret"
container = "test"
storage_acc = "lrs"
this = databricks.Mount("this",
    name="tf-abfss",
    uri=f"abfss://{container}@{storage_acc}.dfs.core.windows.net",
    extra_configs={
        "fs.azure.account.auth.type": "OAuth",
        "fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider",
        "fs.azure.account.oauth2.client.id": client_id,
        "fs.azure.account.oauth2.client.secret": f"{{{{secrets/{secret_scope}/{secret_key}}}}}",
        "fs.azure.account.oauth2.client.endpoint": f"https://login.microsoftonline.com/{tenant_id}/oauth2/token",
        "fs.azure.createRemoteFileSystemDuringInitialization": "false",
    })
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		tenantId := "00000000-1111-2222-3333-444444444444"
		clientId := "55555555-6666-7777-8888-999999999999"
		secretScope := "some-kv"
		secretKey := "some-sp-secret"
		container := "test"
		storageAcc := "lrs"
		_, err := databricks.NewMount(ctx, "this", &databricks.MountArgs{
			Name: pulumi.String("tf-abfss"),
			Uri:  pulumi.String(fmt.Sprintf("abfss://%v@%v.dfs.core.windows.net", container, storageAcc)),
			ExtraConfigs: pulumi.Map{
				"fs.azure.account.auth.type":                          pulumi.Any("OAuth"),
				"fs.azure.account.oauth.provider.type":                pulumi.Any("org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider"),
				"fs.azure.account.oauth2.client.id":                   pulumi.String(clientId),
				"fs.azure.account.oauth2.client.secret":               pulumi.Any(fmt.Sprintf("{{secrets/%v/%v}}", secretScope, secretKey)),
				"fs.azure.account.oauth2.client.endpoint":             pulumi.Any(fmt.Sprintf("https://login.microsoftonline.com/%v/oauth2/token", tenantId)),
				"fs.azure.createRemoteFileSystemDuringInitialization": pulumi.Any("false"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var tenantId = "00000000-1111-2222-3333-444444444444";
    var clientId = "55555555-6666-7777-8888-999999999999";
    var secretScope = "some-kv";
    var secretKey = "some-sp-secret";
    var container = "test";
    var storageAcc = "lrs";
    var @this = new Databricks.Mount("this", new()
    {
        Name = "tf-abfss",
        Uri = $"abfss://{container}@{storageAcc}.dfs.core.windows.net",
        ExtraConfigs = 
        {
            { "fs.azure.account.auth.type", "OAuth" },
            { "fs.azure.account.oauth.provider.type", "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider" },
            { "fs.azure.account.oauth2.client.id", clientId },
            { "fs.azure.account.oauth2.client.secret", $"{{{{secrets/{secretScope}/{secretKey}}}}}" },
            { "fs.azure.account.oauth2.client.endpoint", $"https://login.microsoftonline.com/{tenantId}/oauth2/token" },
            { "fs.azure.createRemoteFileSystemDuringInitialization", "false" },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Mount;
import com.pulumi.databricks.MountArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        final var tenantId = "00000000-1111-2222-3333-444444444444";
        final var clientId = "55555555-6666-7777-8888-999999999999";
        final var secretScope = "some-kv";
        final var secretKey = "some-sp-secret";
        final var container = "test";
        final var storageAcc = "lrs";
        var this_ = new Mount("this", MountArgs.builder()
            .name("tf-abfss")
            .uri(String.format("abfss://%s@%s.dfs.core.windows.net", container,storageAcc))
            .extraConfigs(Map.ofEntries(
                Map.entry("fs.azure.account.auth.type", "OAuth"),
                Map.entry("fs.azure.account.oauth.provider.type", "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider"),
                Map.entry("fs.azure.account.oauth2.client.id", clientId),
                Map.entry("fs.azure.account.oauth2.client.secret", String.format("{{{{secrets/%s/%s}}}}", secretScope,secretKey)),
                Map.entry("fs.azure.account.oauth2.client.endpoint", String.format("https://login.microsoftonline.com/%s/oauth2/token", tenantId)),
                Map.entry("fs.azure.createRemoteFileSystemDuringInitialization", "false")
            ))
            .build());
    }
}
resources:
  this:
    type: databricks:Mount
    properties:
      name: tf-abfss
      uri: abfss://${container}@${storageAcc}.dfs.core.windows.net
      extraConfigs:
        fs.azure.account.auth.type: OAuth
        fs.azure.account.oauth.provider.type: org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider
        fs.azure.account.oauth2.client.id: ${clientId}
        fs.azure.account.oauth2.client.secret: '{{secrets/${secretScope}/${secretKey}}}'
        fs.azure.account.oauth2.client.endpoint: https://login.microsoftonline.com/${tenantId}/oauth2/token
        fs.azure.createRemoteFileSystemDuringInitialization: 'false'
variables:
  tenantId: 00000000-1111-2222-3333-444444444444
  clientId: 55555555-6666-7777-8888-999999999999
  secretScope: some-kv
  secretKey: some-sp-secret
  container: test
  storageAcc: lrs
Example mounting ADLS Gen2 with AAD passthrough
Note AAD passthrough is considered a legacy data access pattern. Use Unity Catalog for fine-grained data access control.
Note Mounts using AAD passthrough cannot be created using a service principal.
To mount ALDS Gen2 with Azure Active Directory Credentials passthrough we need to execute the mount commands using the cluster configured with AAD Credentials passthrough & provide necessary configuration parameters (see documentation for more details).
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
import * as databricks from "@pulumi/databricks";
const config = new pulumi.Config();
// Resource group for Databricks Workspace
const resourceGroup = config.require("resourceGroup");
// Name of the Databricks Workspace
const workspaceName = config.require("workspaceName");
const this = azure.databricks.getWorkspace({
    name: workspaceName,
    resourceGroupName: resourceGroup,
});
const smallest = databricks.getNodeType({
    localDisk: true,
});
const latest = databricks.getSparkVersion({});
const sharedPassthrough = new databricks.Cluster("shared_passthrough", {
    clusterName: "Shared Passthrough for mount",
    sparkVersion: latest.then(latest => latest.id),
    nodeTypeId: smallest.then(smallest => smallest.id),
    autoterminationMinutes: 10,
    numWorkers: 1,
    sparkConf: {
        "spark.databricks.cluster.profile": "serverless",
        "spark.databricks.repl.allowedLanguages": "python,sql",
        "spark.databricks.passthrough.enabled": "true",
        "spark.databricks.pyspark.enableProcessIsolation": "true",
    },
    customTags: {
        ResourceClass: "Serverless",
    },
});
// Name of the ADLS Gen2 storage container
const storageAcc = config.require("storageAcc");
// Name of container inside storage account
const container = config.require("container");
const passthrough = new databricks.Mount("passthrough", {
    name: "passthrough-test",
    clusterId: sharedPassthrough.id,
    uri: `abfss://${container}@${storageAcc}.dfs.core.windows.net`,
    extraConfigs: {
        "fs.azure.account.auth.type": "CustomAccessToken",
        "fs.azure.account.custom.token.provider.class": "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}",
    },
});
import pulumi
import pulumi_azure as azure
import pulumi_databricks as databricks
config = pulumi.Config()
# Resource group for Databricks Workspace
resource_group = config.require("resourceGroup")
# Name of the Databricks Workspace
workspace_name = config.require("workspaceName")
this = azure.databricks.get_workspace(name=workspace_name,
    resource_group_name=resource_group)
smallest = databricks.get_node_type(local_disk=True)
latest = databricks.get_spark_version()
shared_passthrough = databricks.Cluster("shared_passthrough",
    cluster_name="Shared Passthrough for mount",
    spark_version=latest.id,
    node_type_id=smallest.id,
    autotermination_minutes=10,
    num_workers=1,
    spark_conf={
        "spark.databricks.cluster.profile": "serverless",
        "spark.databricks.repl.allowedLanguages": "python,sql",
        "spark.databricks.passthrough.enabled": "true",
        "spark.databricks.pyspark.enableProcessIsolation": "true",
    },
    custom_tags={
        "ResourceClass": "Serverless",
    })
# Name of the ADLS Gen2 storage container
storage_acc = config.require("storageAcc")
# Name of container inside storage account
container = config.require("container")
passthrough = databricks.Mount("passthrough",
    name="passthrough-test",
    cluster_id=shared_passthrough.id,
    uri=f"abfss://{container}@{storage_acc}.dfs.core.windows.net",
    extra_configs={
        "fs.azure.account.auth.type": "CustomAccessToken",
        "fs.azure.account.custom.token.provider.class": "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}",
    })
package main
import (
	"fmt"
	azuredatabricks "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/databricks"
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		// Resource group for Databricks Workspace
		resourceGroup := cfg.Require("resourceGroup")
		// Name of the Databricks Workspace
		workspaceName := cfg.Require("workspaceName")
		_, err := azuredatabricks.LookupWorkspace(ctx, &databricks.LookupWorkspaceArgs{
			Name:              workspaceName,
			ResourceGroupName: resourceGroup,
		}, nil)
		if err != nil {
			return err
		}
		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
			LocalDisk: pulumi.BoolRef(true),
		}, nil)
		if err != nil {
			return err
		}
		latest, err := databricks.GetSparkVersion(ctx, nil, nil)
		if err != nil {
			return err
		}
		sharedPassthrough, err := databricks.NewCluster(ctx, "shared_passthrough", &databricks.ClusterArgs{
			ClusterName:            pulumi.String("Shared Passthrough for mount"),
			SparkVersion:           pulumi.String(latest.Id),
			NodeTypeId:             pulumi.String(smallest.Id),
			AutoterminationMinutes: pulumi.Int(10),
			NumWorkers:             pulumi.Int(1),
			SparkConf: pulumi.Map{
				"spark.databricks.cluster.profile":                pulumi.Any("serverless"),
				"spark.databricks.repl.allowedLanguages":          pulumi.Any("python,sql"),
				"spark.databricks.passthrough.enabled":            pulumi.Any("true"),
				"spark.databricks.pyspark.enableProcessIsolation": pulumi.Any("true"),
			},
			CustomTags: pulumi.Map{
				"ResourceClass": pulumi.Any("Serverless"),
			},
		})
		if err != nil {
			return err
		}
		// Name of the ADLS Gen2 storage container
		storageAcc := cfg.Require("storageAcc")
		// Name of container inside storage account
		container := cfg.Require("container")
		_, err = databricks.NewMount(ctx, "passthrough", &databricks.MountArgs{
			Name:      pulumi.String("passthrough-test"),
			ClusterId: sharedPassthrough.ID(),
			Uri:       pulumi.String(fmt.Sprintf("abfss://%v@%v.dfs.core.windows.net", container, storageAcc)),
			ExtraConfigs: pulumi.Map{
				"fs.azure.account.auth.type":                   pulumi.Any("CustomAccessToken"),
				"fs.azure.account.custom.token.provider.class": pulumi.Any("{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var config = new Config();
    // Resource group for Databricks Workspace
    var resourceGroup = config.Require("resourceGroup");
    // Name of the Databricks Workspace
    var workspaceName = config.Require("workspaceName");
    var @this = Azure.DataBricks.GetWorkspace.Invoke(new()
    {
        Name = workspaceName,
        ResourceGroupName = resourceGroup,
    });
    var smallest = Databricks.GetNodeType.Invoke(new()
    {
        LocalDisk = true,
    });
    var latest = Databricks.GetSparkVersion.Invoke();
    var sharedPassthrough = new Databricks.Cluster("shared_passthrough", new()
    {
        ClusterName = "Shared Passthrough for mount",
        SparkVersion = latest.Apply(getSparkVersionResult => getSparkVersionResult.Id),
        NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
        AutoterminationMinutes = 10,
        NumWorkers = 1,
        SparkConf = 
        {
            { "spark.databricks.cluster.profile", "serverless" },
            { "spark.databricks.repl.allowedLanguages", "python,sql" },
            { "spark.databricks.passthrough.enabled", "true" },
            { "spark.databricks.pyspark.enableProcessIsolation", "true" },
        },
        CustomTags = 
        {
            { "ResourceClass", "Serverless" },
        },
    });
    // Name of the ADLS Gen2 storage container
    var storageAcc = config.Require("storageAcc");
    // Name of container inside storage account
    var container = config.Require("container");
    var passthrough = new Databricks.Mount("passthrough", new()
    {
        Name = "passthrough-test",
        ClusterId = sharedPassthrough.Id,
        Uri = $"abfss://{container}@{storageAcc}.dfs.core.windows.net",
        ExtraConfigs = 
        {
            { "fs.azure.account.auth.type", "CustomAccessToken" },
            { "fs.azure.account.custom.token.provider.class", "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}" },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.databricks.DatabricksFunctions;
import com.pulumi.azure.databricks.inputs.GetWorkspaceArgs;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetNodeTypeArgs;
import com.pulumi.databricks.inputs.GetSparkVersionArgs;
import com.pulumi.databricks.Cluster;
import com.pulumi.databricks.ClusterArgs;
import com.pulumi.databricks.Mount;
import com.pulumi.databricks.MountArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        final var config = ctx.config();
        final var resourceGroup = config.get("resourceGroup");
        final var workspaceName = config.get("workspaceName");
        final var this = DatabricksFunctions.getWorkspace(GetWorkspaceArgs.builder()
            .name(workspaceName)
            .resourceGroupName(resourceGroup)
            .build());
        final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
            .localDisk(true)
            .build());
        final var latest = DatabricksFunctions.getSparkVersion();
        var sharedPassthrough = new Cluster("sharedPassthrough", ClusterArgs.builder()
            .clusterName("Shared Passthrough for mount")
            .sparkVersion(latest.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
            .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
            .autoterminationMinutes(10)
            .numWorkers(1)
            .sparkConf(Map.ofEntries(
                Map.entry("spark.databricks.cluster.profile", "serverless"),
                Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                Map.entry("spark.databricks.passthrough.enabled", "true"),
                Map.entry("spark.databricks.pyspark.enableProcessIsolation", "true")
            ))
            .customTags(Map.of("ResourceClass", "Serverless"))
            .build());
        final var storageAcc = config.get("storageAcc");
        final var container = config.get("container");
        var passthrough = new Mount("passthrough", MountArgs.builder()
            .name("passthrough-test")
            .clusterId(sharedPassthrough.id())
            .uri(String.format("abfss://%s@%s.dfs.core.windows.net", container,storageAcc))
            .extraConfigs(Map.ofEntries(
                Map.entry("fs.azure.account.auth.type", "CustomAccessToken"),
                Map.entry("fs.azure.account.custom.token.provider.class", "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}")
            ))
            .build());
    }
}
configuration:
  resourceGroup:
    type: string
  workspaceName:
    type: string
  storageAcc:
    type: string
  container:
    type: string
resources:
  sharedPassthrough:
    type: databricks:Cluster
    name: shared_passthrough
    properties:
      clusterName: Shared Passthrough for mount
      sparkVersion: ${latest.id}
      nodeTypeId: ${smallest.id}
      autoterminationMinutes: 10
      numWorkers: 1
      sparkConf:
        spark.databricks.cluster.profile: serverless
        spark.databricks.repl.allowedLanguages: python,sql
        spark.databricks.passthrough.enabled: 'true'
        spark.databricks.pyspark.enableProcessIsolation: 'true'
      customTags:
        ResourceClass: Serverless
  passthrough:
    type: databricks:Mount
    properties:
      name: passthrough-test
      clusterId: ${sharedPassthrough.id}
      uri: abfss://${container}@${storageAcc}.dfs.core.windows.net
      extraConfigs:
        fs.azure.account.auth.type: CustomAccessToken
        fs.azure.account.custom.token.provider.class: '{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}'
variables:
  this:
    fn::invoke:
      Function: azure:databricks:getWorkspace
      Arguments:
        name: ${workspaceName}
        resourceGroupName: ${resourceGroup}
  smallest:
    fn::invoke:
      Function: databricks:getNodeType
      Arguments:
        localDisk: true
  latest:
    fn::invoke:
      Function: databricks:getSparkVersion
      Arguments: {}
s3 block
This block allows specifying parameters for mounting of the ADLS Gen2. The following arguments are required inside the s3 block:
- instance_profile- (Optional) (String) ARN of registered instance profile for data access. If it’s not specified, then the- cluster_idshould be provided, and the cluster should have an instance profile attached to it. If both- cluster_id&- instance_profileare specified, then- cluster_idtakes precedence.
- bucket_name- (Required) (String) S3 bucket name to be mounted.
Example of mounting S3
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
// now you can do `%fs ls /mnt/experiments` in notebooks
const _this = new databricks.Mount("this", {
    name: "experiments",
    s3: {
        instanceProfile: ds.id,
        bucketName: thisAwsS3Bucket.bucket,
    },
});
import pulumi
import pulumi_databricks as databricks
# now you can do `%fs ls /mnt/experiments` in notebooks
this = databricks.Mount("this",
    name="experiments",
    s3=databricks.MountS3Args(
        instance_profile=ds["id"],
        bucket_name=this_aws_s3_bucket["bucket"],
    ))
package main
import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		// now you can do `%fs ls /mnt/experiments` in notebooks
		_, err := databricks.NewMount(ctx, "this", &databricks.MountArgs{
			Name: pulumi.String("experiments"),
			S3: &databricks.MountS3Args{
				InstanceProfile: pulumi.Any(ds.Id),
				BucketName:      pulumi.Any(thisAwsS3Bucket.Bucket),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    // now you can do `%fs ls /mnt/experiments` in notebooks
    var @this = new Databricks.Mount("this", new()
    {
        Name = "experiments",
        S3 = new Databricks.Inputs.MountS3Args
        {
            InstanceProfile = ds.Id,
            BucketName = thisAwsS3Bucket.Bucket,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Mount;
import com.pulumi.databricks.MountArgs;
import com.pulumi.databricks.inputs.MountS3Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        // now you can do `%fs ls /mnt/experiments` in notebooks
        var this_ = new Mount("this", MountArgs.builder()
            .name("experiments")
            .s3(MountS3Args.builder()
                .instanceProfile(ds.id())
                .bucketName(thisAwsS3Bucket.bucket())
                .build())
            .build());
    }
}
resources:
  # now you can do `%fs ls /mnt/experiments` in notebooks
  this:
    type: databricks:Mount
    properties:
      name: experiments
      s3:
        instanceProfile: ${ds.id}
        bucketName: ${thisAwsS3Bucket.bucket}
abfs block
This block allows specifying parameters for mounting of the ADLS Gen2. The following arguments are required inside the abfs block:
- client_id- (Required) (String) This is the client_id (Application Object ID) for the enterprise application for the service principal.
- tenant_id- (Optional) (String) This is your azure directory tenant id. It is required for creating the mount. (Could be omitted if Azure authentication is used, and we can extract- tenant_idfrom it).
- client_secret_key- (Required) (String) This is the secret key in which your service principal/enterprise app client secret will be stored.
- client_secret_scope- (Required) (String) This is the secret scope in which your service principal/enterprise app client secret will be stored.
- container_name- (Required) (String) ADLS gen2 container name. (Could be omitted if- resource_idis provided)
- storage_account_name- (Required) (String) The name of the storage resource in which the data is. (Could be omitted if- resource_idis provided)
- directory- (Computed) (String) This is optional if you don’t want to add an additional directory that you wish to mount. This must start with a “/”.
- initialize_file_system- (Required) (Bool) either or not initialize FS for the first use
Creating mount for ADLS Gen2 using abfs block
In this example, we’re using Azure authentication, so we can omit some parameters (tenant_id, storage_account_name, and container_name) that will be detected automatically.
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
import * as databricks from "@pulumi/databricks";
const terraform = new databricks.SecretScope("terraform", {
    name: "application",
    initialManagePrincipal: "users",
});
const servicePrincipalKey = new databricks.Secret("service_principal_key", {
    key: "service_principal_key",
    stringValue: ARM_CLIENT_SECRET,
    scope: terraform.name,
});
const _this = new azure.storage.Account("this", {
    name: `${prefix}datalake`,
    resourceGroupName: resourceGroupName,
    location: resourceGroupLocation,
    accountTier: "Standard",
    accountReplicationType: "GRS",
    accountKind: "StorageV2",
    isHnsEnabled: true,
});
const thisAssignment = new azure.authorization.Assignment("this", {
    scope: _this.id,
    roleDefinitionName: "Storage Blob Data Contributor",
    principalId: current.objectId,
});
const thisContainer = new azure.storage.Container("this", {
    name: "marketing",
    storageAccountName: _this.name,
    containerAccessType: "private",
});
const marketing = new databricks.Mount("marketing", {
    name: "marketing",
    resourceId: thisContainer.resourceManagerId,
    abfs: {
        clientId: current.clientId,
        clientSecretScope: terraform.name,
        clientSecretKey: servicePrincipalKey.key,
        initializeFileSystem: true,
    },
});
import pulumi
import pulumi_azure as azure
import pulumi_databricks as databricks
terraform = databricks.SecretScope("terraform",
    name="application",
    initial_manage_principal="users")
service_principal_key = databricks.Secret("service_principal_key",
    key="service_principal_key",
    string_value=ar_m__clien_t__secret,
    scope=terraform.name)
this = azure.storage.Account("this",
    name=f"{prefix}datalake",
    resource_group_name=resource_group_name,
    location=resource_group_location,
    account_tier="Standard",
    account_replication_type="GRS",
    account_kind="StorageV2",
    is_hns_enabled=True)
this_assignment = azure.authorization.Assignment("this",
    scope=this.id,
    role_definition_name="Storage Blob Data Contributor",
    principal_id=current["objectId"])
this_container = azure.storage.Container("this",
    name="marketing",
    storage_account_name=this.name,
    container_access_type="private")
marketing = databricks.Mount("marketing",
    name="marketing",
    resource_id=this_container.resource_manager_id,
    abfs=databricks.MountAbfsArgs(
        client_id=current["clientId"],
        client_secret_scope=terraform.name,
        client_secret_key=service_principal_key.key,
        initialize_file_system=True,
    ))
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/authorization"
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		terraform, err := databricks.NewSecretScope(ctx, "terraform", &databricks.SecretScopeArgs{
			Name:                   pulumi.String("application"),
			InitialManagePrincipal: pulumi.String("users"),
		})
		if err != nil {
			return err
		}
		servicePrincipalKey, err := databricks.NewSecret(ctx, "service_principal_key", &databricks.SecretArgs{
			Key:         pulumi.String("service_principal_key"),
			StringValue: pulumi.Any(ARM_CLIENT_SECRET),
			Scope:       terraform.Name,
		})
		if err != nil {
			return err
		}
		this, err := storage.NewAccount(ctx, "this", &storage.AccountArgs{
			Name:                   pulumi.String(fmt.Sprintf("%vdatalake", prefix)),
			ResourceGroupName:      pulumi.Any(resourceGroupName),
			Location:               pulumi.Any(resourceGroupLocation),
			AccountTier:            pulumi.String("Standard"),
			AccountReplicationType: pulumi.String("GRS"),
			AccountKind:            pulumi.String("StorageV2"),
			IsHnsEnabled:           pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		_, err = authorization.NewAssignment(ctx, "this", &authorization.AssignmentArgs{
			Scope:              this.ID(),
			RoleDefinitionName: pulumi.String("Storage Blob Data Contributor"),
			PrincipalId:        pulumi.Any(current.ObjectId),
		})
		if err != nil {
			return err
		}
		thisContainer, err := storage.NewContainer(ctx, "this", &storage.ContainerArgs{
			Name:                pulumi.String("marketing"),
			StorageAccountName:  this.Name,
			ContainerAccessType: pulumi.String("private"),
		})
		if err != nil {
			return err
		}
		_, err = databricks.NewMount(ctx, "marketing", &databricks.MountArgs{
			Name:       pulumi.String("marketing"),
			ResourceId: thisContainer.ResourceManagerId,
			Abfs: &databricks.MountAbfsArgs{
				ClientId:             pulumi.Any(current.ClientId),
				ClientSecretScope:    terraform.Name,
				ClientSecretKey:      servicePrincipalKey.Key,
				InitializeFileSystem: pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var terraform = new Databricks.SecretScope("terraform", new()
    {
        Name = "application",
        InitialManagePrincipal = "users",
    });
    var servicePrincipalKey = new Databricks.Secret("service_principal_key", new()
    {
        Key = "service_principal_key",
        StringValue = ARM_CLIENT_SECRET,
        Scope = terraform.Name,
    });
    var @this = new Azure.Storage.Account("this", new()
    {
        Name = $"{prefix}datalake",
        ResourceGroupName = resourceGroupName,
        Location = resourceGroupLocation,
        AccountTier = "Standard",
        AccountReplicationType = "GRS",
        AccountKind = "StorageV2",
        IsHnsEnabled = true,
    });
    var thisAssignment = new Azure.Authorization.Assignment("this", new()
    {
        Scope = @this.Id,
        RoleDefinitionName = "Storage Blob Data Contributor",
        PrincipalId = current.ObjectId,
    });
    var thisContainer = new Azure.Storage.Container("this", new()
    {
        Name = "marketing",
        StorageAccountName = @this.Name,
        ContainerAccessType = "private",
    });
    var marketing = new Databricks.Mount("marketing", new()
    {
        Name = "marketing",
        ResourceId = thisContainer.ResourceManagerId,
        Abfs = new Databricks.Inputs.MountAbfsArgs
        {
            ClientId = current.ClientId,
            ClientSecretScope = terraform.Name,
            ClientSecretKey = servicePrincipalKey.Key,
            InitializeFileSystem = true,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.SecretScope;
import com.pulumi.databricks.SecretScopeArgs;
import com.pulumi.databricks.Secret;
import com.pulumi.databricks.SecretArgs;
import com.pulumi.azure.storage.Account;
import com.pulumi.azure.storage.AccountArgs;
import com.pulumi.azure.authorization.Assignment;
import com.pulumi.azure.authorization.AssignmentArgs;
import com.pulumi.azure.storage.Container;
import com.pulumi.azure.storage.ContainerArgs;
import com.pulumi.databricks.Mount;
import com.pulumi.databricks.MountArgs;
import com.pulumi.databricks.inputs.MountAbfsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var terraform = new SecretScope("terraform", SecretScopeArgs.builder()
            .name("application")
            .initialManagePrincipal("users")
            .build());
        var servicePrincipalKey = new Secret("servicePrincipalKey", SecretArgs.builder()
            .key("service_principal_key")
            .stringValue(ARM_CLIENT_SECRET)
            .scope(terraform.name())
            .build());
        var this_ = new Account("this", AccountArgs.builder()
            .name(String.format("%sdatalake", prefix))
            .resourceGroupName(resourceGroupName)
            .location(resourceGroupLocation)
            .accountTier("Standard")
            .accountReplicationType("GRS")
            .accountKind("StorageV2")
            .isHnsEnabled(true)
            .build());
        var thisAssignment = new Assignment("thisAssignment", AssignmentArgs.builder()
            .scope(this_.id())
            .roleDefinitionName("Storage Blob Data Contributor")
            .principalId(current.objectId())
            .build());
        var thisContainer = new Container("thisContainer", ContainerArgs.builder()
            .name("marketing")
            .storageAccountName(this_.name())
            .containerAccessType("private")
            .build());
        var marketing = new Mount("marketing", MountArgs.builder()
            .name("marketing")
            .resourceId(thisContainer.resourceManagerId())
            .abfs(MountAbfsArgs.builder()
                .clientId(current.clientId())
                .clientSecretScope(terraform.name())
                .clientSecretKey(servicePrincipalKey.key())
                .initializeFileSystem(true)
                .build())
            .build());
    }
}
resources:
  terraform:
    type: databricks:SecretScope
    properties:
      name: application
      initialManagePrincipal: users
  servicePrincipalKey:
    type: databricks:Secret
    name: service_principal_key
    properties:
      key: service_principal_key
      stringValue: ${ARM_CLIENT_SECRET}
      scope: ${terraform.name}
  this:
    type: azure:storage:Account
    properties:
      name: ${prefix}datalake
      resourceGroupName: ${resourceGroupName}
      location: ${resourceGroupLocation}
      accountTier: Standard
      accountReplicationType: GRS
      accountKind: StorageV2
      isHnsEnabled: true
  thisAssignment:
    type: azure:authorization:Assignment
    name: this
    properties:
      scope: ${this.id}
      roleDefinitionName: Storage Blob Data Contributor
      principalId: ${current.objectId}
  thisContainer:
    type: azure:storage:Container
    name: this
    properties:
      name: marketing
      storageAccountName: ${this.name}
      containerAccessType: private
  marketing:
    type: databricks:Mount
    properties:
      name: marketing
      resourceId: ${thisContainer.resourceManagerId}
      abfs:
        clientId: ${current.clientId}
        clientSecretScope: ${terraform.name}
        clientSecretKey: ${servicePrincipalKey.key}
        initializeFileSystem: true
gs block
This block allows specifying parameters for mounting of the Google Cloud Storage. The following arguments are required inside the gs block:
- service_account- (Optional) (String) email of registered Google Service Account for data access. If it’s not specified, then the- cluster_idshould be provided, and the cluster should have a Google service account attached to it.
- bucket_name- (Required) (String) GCS bucket name to be mounted.
Example mounting Google Cloud Storage
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const thisGs = new databricks.Mount("this_gs", {
    name: "gs-mount",
    gs: {
        serviceAccount: "acc@company.iam.gserviceaccount.com",
        bucketName: "mybucket",
    },
});
import pulumi
import pulumi_databricks as databricks
this_gs = databricks.Mount("this_gs",
    name="gs-mount",
    gs=databricks.MountGsArgs(
        service_account="acc@company.iam.gserviceaccount.com",
        bucket_name="mybucket",
    ))
package main
import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewMount(ctx, "this_gs", &databricks.MountArgs{
			Name: pulumi.String("gs-mount"),
			Gs: &databricks.MountGsArgs{
				ServiceAccount: pulumi.String("acc@company.iam.gserviceaccount.com"),
				BucketName:     pulumi.String("mybucket"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var thisGs = new Databricks.Mount("this_gs", new()
    {
        Name = "gs-mount",
        Gs = new Databricks.Inputs.MountGsArgs
        {
            ServiceAccount = "acc@company.iam.gserviceaccount.com",
            BucketName = "mybucket",
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Mount;
import com.pulumi.databricks.MountArgs;
import com.pulumi.databricks.inputs.MountGsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var thisGs = new Mount("thisGs", MountArgs.builder()
            .name("gs-mount")
            .gs(MountGsArgs.builder()
                .serviceAccount("acc@company.iam.gserviceaccount.com")
                .bucketName("mybucket")
                .build())
            .build());
    }
}
resources:
  thisGs:
    type: databricks:Mount
    name: this_gs
    properties:
      name: gs-mount
      gs:
        serviceAccount: acc@company.iam.gserviceaccount.com
        bucketName: mybucket
adl block
This block allows specifying parameters for mounting of the ADLS Gen1. The following arguments are required inside the adl block:
- client_id- (Required) (String) This is the client_id for the enterprise application for the service principal.
- tenant_id- (Optional) (String) This is your azure directory tenant id. It is required for creating the mount. (Could be omitted if Azure authentication is used, and we can extract- tenant_idfrom it)
- client_secret_key- (Required) (String) This is the secret key in which your service principal/enterprise app client secret will be stored.
- client_secret_scope- (Required) (String) This is the secret scope in which your service principal/enterprise app client secret will be stored.
- storage_resource_name- (Required) (String) The name of the storage resource in which the data is for ADLS gen 1. This is what you are trying to mount. (Could be omitted if- resource_idis provided)
- spark_conf_prefix- (Optional) (String) This is the spark configuration prefix for adls gen 1 mount. The options are- fs.adl,- dfs.adls. Use- fs.adlfor runtime 6.0 and above for the clusters. Otherwise use- dfs.adls. The default value is:- fs.adl.
- directory- (Computed) (String) This is optional if you don’t want to add an additional directory that you wish to mount. This must start with a “/”.
Example mounting ADLS Gen1
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const mount = new databricks.Mount("mount", {
    name: "{var.RANDOM}",
    adl: {
        storageResourceName: "{env.TEST_STORAGE_ACCOUNT_NAME}",
        tenantId: current.tenantId,
        clientId: current.clientId,
        clientSecretScope: terraform.name,
        clientSecretKey: servicePrincipalKey.key,
        sparkConfPrefix: "fs.adl",
    },
});
import pulumi
import pulumi_databricks as databricks
mount = databricks.Mount("mount",
    name="{var.RANDOM}",
    adl=databricks.MountAdlArgs(
        storage_resource_name="{env.TEST_STORAGE_ACCOUNT_NAME}",
        tenant_id=current["tenantId"],
        client_id=current["clientId"],
        client_secret_scope=terraform["name"],
        client_secret_key=service_principal_key["key"],
        spark_conf_prefix="fs.adl",
    ))
package main
import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewMount(ctx, "mount", &databricks.MountArgs{
			Name: pulumi.String("{var.RANDOM}"),
			Adl: &databricks.MountAdlArgs{
				StorageResourceName: pulumi.String("{env.TEST_STORAGE_ACCOUNT_NAME}"),
				TenantId:            pulumi.Any(current.TenantId),
				ClientId:            pulumi.Any(current.ClientId),
				ClientSecretScope:   pulumi.Any(terraform.Name),
				ClientSecretKey:     pulumi.Any(servicePrincipalKey.Key),
				SparkConfPrefix:     pulumi.String("fs.adl"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var mount = new Databricks.Mount("mount", new()
    {
        Name = "{var.RANDOM}",
        Adl = new Databricks.Inputs.MountAdlArgs
        {
            StorageResourceName = "{env.TEST_STORAGE_ACCOUNT_NAME}",
            TenantId = current.TenantId,
            ClientId = current.ClientId,
            ClientSecretScope = terraform.Name,
            ClientSecretKey = servicePrincipalKey.Key,
            SparkConfPrefix = "fs.adl",
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Mount;
import com.pulumi.databricks.MountArgs;
import com.pulumi.databricks.inputs.MountAdlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var mount = new Mount("mount", MountArgs.builder()
            .name("{var.RANDOM}")
            .adl(MountAdlArgs.builder()
                .storageResourceName("{env.TEST_STORAGE_ACCOUNT_NAME}")
                .tenantId(current.tenantId())
                .clientId(current.clientId())
                .clientSecretScope(terraform.name())
                .clientSecretKey(servicePrincipalKey.key())
                .sparkConfPrefix("fs.adl")
                .build())
            .build());
    }
}
resources:
  mount:
    type: databricks:Mount
    properties:
      name: '{var.RANDOM}'
      adl:
        storageResourceName: '{env.TEST_STORAGE_ACCOUNT_NAME}'
        tenantId: ${current.tenantId}
        clientId: ${current.clientId}
        clientSecretScope: ${terraform.name}
        clientSecretKey: ${servicePrincipalKey.key}
        sparkConfPrefix: fs.adl
wasb block
This block allows specifying parameters for mounting of the Azure Blob Storage. The following arguments are required inside the wasb block:
- auth_type- (Required) (String) This is the auth type for blob storage. This can either be SAS tokens (- SAS) or account access keys (- ACCESS_KEY).
- token_secret_scope- (Required) (String) This is the secret scope in which your auth type token is stored.
- token_secret_key- (Required) (String) This is the secret key in which your auth type token is stored.
- container_name- (Required) (String) The container in which the data is. This is what you are trying to mount. (Could be omitted if- resource_idis provided)
- storage_account_name- (Required) (String) The name of the storage resource in which the data is. (Could be omitted if- resource_idis provided)
- directory- (Computed) (String) This is optional if you don’t want to add an additional directory that you wish to mount. This must start with a “/”.
Example mounting Azure Blob Storage
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
import * as databricks from "@pulumi/databricks";
const blobaccount = new azure.storage.Account("blobaccount", {
    name: `${prefix}blob`,
    resourceGroupName: resourceGroupName,
    location: resourceGroupLocation,
    accountTier: "Standard",
    accountReplicationType: "LRS",
    accountKind: "StorageV2",
});
const marketing = new azure.storage.Container("marketing", {
    name: "marketing",
    storageAccountName: blobaccount.name,
    containerAccessType: "private",
});
const terraform = new databricks.SecretScope("terraform", {
    name: "application",
    initialManagePrincipal: "users",
});
const storageKey = new databricks.Secret("storage_key", {
    key: "blob_storage_key",
    stringValue: blobaccount.primaryAccessKey,
    scope: terraform.name,
});
const marketingMount = new databricks.Mount("marketing", {
    name: "marketing",
    wasb: {
        containerName: marketing.name,
        storageAccountName: blobaccount.name,
        authType: "ACCESS_KEY",
        tokenSecretScope: terraform.name,
        tokenSecretKey: storageKey.key,
    },
});
import pulumi
import pulumi_azure as azure
import pulumi_databricks as databricks
blobaccount = azure.storage.Account("blobaccount",
    name=f"{prefix}blob",
    resource_group_name=resource_group_name,
    location=resource_group_location,
    account_tier="Standard",
    account_replication_type="LRS",
    account_kind="StorageV2")
marketing = azure.storage.Container("marketing",
    name="marketing",
    storage_account_name=blobaccount.name,
    container_access_type="private")
terraform = databricks.SecretScope("terraform",
    name="application",
    initial_manage_principal="users")
storage_key = databricks.Secret("storage_key",
    key="blob_storage_key",
    string_value=blobaccount.primary_access_key,
    scope=terraform.name)
marketing_mount = databricks.Mount("marketing",
    name="marketing",
    wasb=databricks.MountWasbArgs(
        container_name=marketing.name,
        storage_account_name=blobaccount.name,
        auth_type="ACCESS_KEY",
        token_secret_scope=terraform.name,
        token_secret_key=storage_key.key,
    ))
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		blobaccount, err := storage.NewAccount(ctx, "blobaccount", &storage.AccountArgs{
			Name:                   pulumi.String(fmt.Sprintf("%vblob", prefix)),
			ResourceGroupName:      pulumi.Any(resourceGroupName),
			Location:               pulumi.Any(resourceGroupLocation),
			AccountTier:            pulumi.String("Standard"),
			AccountReplicationType: pulumi.String("LRS"),
			AccountKind:            pulumi.String("StorageV2"),
		})
		if err != nil {
			return err
		}
		marketing, err := storage.NewContainer(ctx, "marketing", &storage.ContainerArgs{
			Name:                pulumi.String("marketing"),
			StorageAccountName:  blobaccount.Name,
			ContainerAccessType: pulumi.String("private"),
		})
		if err != nil {
			return err
		}
		terraform, err := databricks.NewSecretScope(ctx, "terraform", &databricks.SecretScopeArgs{
			Name:                   pulumi.String("application"),
			InitialManagePrincipal: pulumi.String("users"),
		})
		if err != nil {
			return err
		}
		storageKey, err := databricks.NewSecret(ctx, "storage_key", &databricks.SecretArgs{
			Key:         pulumi.String("blob_storage_key"),
			StringValue: blobaccount.PrimaryAccessKey,
			Scope:       terraform.Name,
		})
		if err != nil {
			return err
		}
		_, err = databricks.NewMount(ctx, "marketing", &databricks.MountArgs{
			Name: pulumi.String("marketing"),
			Wasb: &databricks.MountWasbArgs{
				ContainerName:      marketing.Name,
				StorageAccountName: blobaccount.Name,
				AuthType:           pulumi.String("ACCESS_KEY"),
				TokenSecretScope:   terraform.Name,
				TokenSecretKey:     storageKey.Key,
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var blobaccount = new Azure.Storage.Account("blobaccount", new()
    {
        Name = $"{prefix}blob",
        ResourceGroupName = resourceGroupName,
        Location = resourceGroupLocation,
        AccountTier = "Standard",
        AccountReplicationType = "LRS",
        AccountKind = "StorageV2",
    });
    var marketing = new Azure.Storage.Container("marketing", new()
    {
        Name = "marketing",
        StorageAccountName = blobaccount.Name,
        ContainerAccessType = "private",
    });
    var terraform = new Databricks.SecretScope("terraform", new()
    {
        Name = "application",
        InitialManagePrincipal = "users",
    });
    var storageKey = new Databricks.Secret("storage_key", new()
    {
        Key = "blob_storage_key",
        StringValue = blobaccount.PrimaryAccessKey,
        Scope = terraform.Name,
    });
    var marketingMount = new Databricks.Mount("marketing", new()
    {
        Name = "marketing",
        Wasb = new Databricks.Inputs.MountWasbArgs
        {
            ContainerName = marketing.Name,
            StorageAccountName = blobaccount.Name,
            AuthType = "ACCESS_KEY",
            TokenSecretScope = terraform.Name,
            TokenSecretKey = storageKey.Key,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.storage.Account;
import com.pulumi.azure.storage.AccountArgs;
import com.pulumi.azure.storage.Container;
import com.pulumi.azure.storage.ContainerArgs;
import com.pulumi.databricks.SecretScope;
import com.pulumi.databricks.SecretScopeArgs;
import com.pulumi.databricks.Secret;
import com.pulumi.databricks.SecretArgs;
import com.pulumi.databricks.Mount;
import com.pulumi.databricks.MountArgs;
import com.pulumi.databricks.inputs.MountWasbArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var blobaccount = new Account("blobaccount", AccountArgs.builder()
            .name(String.format("%sblob", prefix))
            .resourceGroupName(resourceGroupName)
            .location(resourceGroupLocation)
            .accountTier("Standard")
            .accountReplicationType("LRS")
            .accountKind("StorageV2")
            .build());
        var marketing = new Container("marketing", ContainerArgs.builder()
            .name("marketing")
            .storageAccountName(blobaccount.name())
            .containerAccessType("private")
            .build());
        var terraform = new SecretScope("terraform", SecretScopeArgs.builder()
            .name("application")
            .initialManagePrincipal("users")
            .build());
        var storageKey = new Secret("storageKey", SecretArgs.builder()
            .key("blob_storage_key")
            .stringValue(blobaccount.primaryAccessKey())
            .scope(terraform.name())
            .build());
        var marketingMount = new Mount("marketingMount", MountArgs.builder()
            .name("marketing")
            .wasb(MountWasbArgs.builder()
                .containerName(marketing.name())
                .storageAccountName(blobaccount.name())
                .authType("ACCESS_KEY")
                .tokenSecretScope(terraform.name())
                .tokenSecretKey(storageKey.key())
                .build())
            .build());
    }
}
resources:
  blobaccount:
    type: azure:storage:Account
    properties:
      name: ${prefix}blob
      resourceGroupName: ${resourceGroupName}
      location: ${resourceGroupLocation}
      accountTier: Standard
      accountReplicationType: LRS
      accountKind: StorageV2
  marketing:
    type: azure:storage:Container
    properties:
      name: marketing
      storageAccountName: ${blobaccount.name}
      containerAccessType: private
  terraform:
    type: databricks:SecretScope
    properties:
      name: application
      initialManagePrincipal: users
  storageKey:
    type: databricks:Secret
    name: storage_key
    properties:
      key: blob_storage_key
      stringValue: ${blobaccount.primaryAccessKey}
      scope: ${terraform.name}
  marketingMount:
    type: databricks:Mount
    name: marketing
    properties:
      name: marketing
      wasb:
        containerName: ${marketing.name}
        storageAccountName: ${blobaccount.name}
        authType: ACCESS_KEY
        tokenSecretScope: ${terraform.name}
        tokenSecretKey: ${storageKey.key}
Migration from other mount resources
Migration from the specific mount resource is straightforward:
- rename mount_nametoname
- wrap storage-specific settings (container_name, …) into corresponding block (adl,abfs,s3,wasbs)
- for S3 mounts, rename s3_bucket_nametobucket_name
Related Resources
The following resources are often used in the same context:
- End to end workspace management guide.
- databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it.
- databricks.Cluster to create Databricks Clusters.
- databricks.DbfsFile data to get file content from Databricks File System (DBFS).
- databricks.getDbfsFilePaths data to get list of file names from get file content from Databricks File System (DBFS).
- databricks.DbfsFile to manage relatively small files on Databricks File System (DBFS).
- databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
- databricks.Library to install a library on databricks_cluster.
Create Mount Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Mount(name: string, args?: MountArgs, opts?: CustomResourceOptions);@overload
def Mount(resource_name: str,
          args: Optional[MountArgs] = None,
          opts: Optional[ResourceOptions] = None)
@overload
def Mount(resource_name: str,
          opts: Optional[ResourceOptions] = None,
          abfs: Optional[MountAbfsArgs] = None,
          adl: Optional[MountAdlArgs] = None,
          cluster_id: Optional[str] = None,
          encryption_type: Optional[str] = None,
          extra_configs: Optional[Mapping[str, Any]] = None,
          gs: Optional[MountGsArgs] = None,
          name: Optional[str] = None,
          resource_id: Optional[str] = None,
          s3: Optional[MountS3Args] = None,
          uri: Optional[str] = None,
          wasb: Optional[MountWasbArgs] = None)func NewMount(ctx *Context, name string, args *MountArgs, opts ...ResourceOption) (*Mount, error)public Mount(string name, MountArgs? args = null, CustomResourceOptions? opts = null)type: databricks:Mount
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MountArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MountArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MountArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MountArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MountArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var mountResource = new Databricks.Mount("mountResource", new()
{
    Abfs = new Databricks.Inputs.MountAbfsArgs
    {
        ClientId = "string",
        ClientSecretKey = "string",
        ClientSecretScope = "string",
        InitializeFileSystem = false,
        ContainerName = "string",
        Directory = "string",
        StorageAccountName = "string",
        TenantId = "string",
    },
    Adl = new Databricks.Inputs.MountAdlArgs
    {
        ClientId = "string",
        ClientSecretKey = "string",
        ClientSecretScope = "string",
        Directory = "string",
        SparkConfPrefix = "string",
        StorageResourceName = "string",
        TenantId = "string",
    },
    ClusterId = "string",
    EncryptionType = "string",
    ExtraConfigs = 
    {
        { "string", "any" },
    },
    Gs = new Databricks.Inputs.MountGsArgs
    {
        BucketName = "string",
        ServiceAccount = "string",
    },
    Name = "string",
    ResourceId = "string",
    S3 = new Databricks.Inputs.MountS3Args
    {
        BucketName = "string",
        InstanceProfile = "string",
    },
    Uri = "string",
    Wasb = new Databricks.Inputs.MountWasbArgs
    {
        AuthType = "string",
        TokenSecretKey = "string",
        TokenSecretScope = "string",
        ContainerName = "string",
        Directory = "string",
        StorageAccountName = "string",
    },
});
example, err := databricks.NewMount(ctx, "mountResource", &databricks.MountArgs{
	Abfs: &databricks.MountAbfsArgs{
		ClientId:             pulumi.String("string"),
		ClientSecretKey:      pulumi.String("string"),
		ClientSecretScope:    pulumi.String("string"),
		InitializeFileSystem: pulumi.Bool(false),
		ContainerName:        pulumi.String("string"),
		Directory:            pulumi.String("string"),
		StorageAccountName:   pulumi.String("string"),
		TenantId:             pulumi.String("string"),
	},
	Adl: &databricks.MountAdlArgs{
		ClientId:            pulumi.String("string"),
		ClientSecretKey:     pulumi.String("string"),
		ClientSecretScope:   pulumi.String("string"),
		Directory:           pulumi.String("string"),
		SparkConfPrefix:     pulumi.String("string"),
		StorageResourceName: pulumi.String("string"),
		TenantId:            pulumi.String("string"),
	},
	ClusterId:      pulumi.String("string"),
	EncryptionType: pulumi.String("string"),
	ExtraConfigs: pulumi.Map{
		"string": pulumi.Any("any"),
	},
	Gs: &databricks.MountGsArgs{
		BucketName:     pulumi.String("string"),
		ServiceAccount: pulumi.String("string"),
	},
	Name:       pulumi.String("string"),
	ResourceId: pulumi.String("string"),
	S3: &databricks.MountS3Args{
		BucketName:      pulumi.String("string"),
		InstanceProfile: pulumi.String("string"),
	},
	Uri: pulumi.String("string"),
	Wasb: &databricks.MountWasbArgs{
		AuthType:           pulumi.String("string"),
		TokenSecretKey:     pulumi.String("string"),
		TokenSecretScope:   pulumi.String("string"),
		ContainerName:      pulumi.String("string"),
		Directory:          pulumi.String("string"),
		StorageAccountName: pulumi.String("string"),
	},
})
var mountResource = new Mount("mountResource", MountArgs.builder()
    .abfs(MountAbfsArgs.builder()
        .clientId("string")
        .clientSecretKey("string")
        .clientSecretScope("string")
        .initializeFileSystem(false)
        .containerName("string")
        .directory("string")
        .storageAccountName("string")
        .tenantId("string")
        .build())
    .adl(MountAdlArgs.builder()
        .clientId("string")
        .clientSecretKey("string")
        .clientSecretScope("string")
        .directory("string")
        .sparkConfPrefix("string")
        .storageResourceName("string")
        .tenantId("string")
        .build())
    .clusterId("string")
    .encryptionType("string")
    .extraConfigs(Map.of("string", "any"))
    .gs(MountGsArgs.builder()
        .bucketName("string")
        .serviceAccount("string")
        .build())
    .name("string")
    .resourceId("string")
    .s3(MountS3Args.builder()
        .bucketName("string")
        .instanceProfile("string")
        .build())
    .uri("string")
    .wasb(MountWasbArgs.builder()
        .authType("string")
        .tokenSecretKey("string")
        .tokenSecretScope("string")
        .containerName("string")
        .directory("string")
        .storageAccountName("string")
        .build())
    .build());
mount_resource = databricks.Mount("mountResource",
    abfs=databricks.MountAbfsArgs(
        client_id="string",
        client_secret_key="string",
        client_secret_scope="string",
        initialize_file_system=False,
        container_name="string",
        directory="string",
        storage_account_name="string",
        tenant_id="string",
    ),
    adl=databricks.MountAdlArgs(
        client_id="string",
        client_secret_key="string",
        client_secret_scope="string",
        directory="string",
        spark_conf_prefix="string",
        storage_resource_name="string",
        tenant_id="string",
    ),
    cluster_id="string",
    encryption_type="string",
    extra_configs={
        "string": "any",
    },
    gs=databricks.MountGsArgs(
        bucket_name="string",
        service_account="string",
    ),
    name="string",
    resource_id="string",
    s3=databricks.MountS3Args(
        bucket_name="string",
        instance_profile="string",
    ),
    uri="string",
    wasb=databricks.MountWasbArgs(
        auth_type="string",
        token_secret_key="string",
        token_secret_scope="string",
        container_name="string",
        directory="string",
        storage_account_name="string",
    ))
const mountResource = new databricks.Mount("mountResource", {
    abfs: {
        clientId: "string",
        clientSecretKey: "string",
        clientSecretScope: "string",
        initializeFileSystem: false,
        containerName: "string",
        directory: "string",
        storageAccountName: "string",
        tenantId: "string",
    },
    adl: {
        clientId: "string",
        clientSecretKey: "string",
        clientSecretScope: "string",
        directory: "string",
        sparkConfPrefix: "string",
        storageResourceName: "string",
        tenantId: "string",
    },
    clusterId: "string",
    encryptionType: "string",
    extraConfigs: {
        string: "any",
    },
    gs: {
        bucketName: "string",
        serviceAccount: "string",
    },
    name: "string",
    resourceId: "string",
    s3: {
        bucketName: "string",
        instanceProfile: "string",
    },
    uri: "string",
    wasb: {
        authType: "string",
        tokenSecretKey: "string",
        tokenSecretScope: "string",
        containerName: "string",
        directory: "string",
        storageAccountName: "string",
    },
});
type: databricks:Mount
properties:
    abfs:
        clientId: string
        clientSecretKey: string
        clientSecretScope: string
        containerName: string
        directory: string
        initializeFileSystem: false
        storageAccountName: string
        tenantId: string
    adl:
        clientId: string
        clientSecretKey: string
        clientSecretScope: string
        directory: string
        sparkConfPrefix: string
        storageResourceName: string
        tenantId: string
    clusterId: string
    encryptionType: string
    extraConfigs:
        string: any
    gs:
        bucketName: string
        serviceAccount: string
    name: string
    resourceId: string
    s3:
        bucketName: string
        instanceProfile: string
    uri: string
    wasb:
        authType: string
        containerName: string
        directory: string
        storageAccountName: string
        tokenSecretKey: string
        tokenSecretScope: string
Mount Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Mount resource accepts the following input properties:
- Abfs
MountAbfs Args 
- Adl
MountAdl Args 
- ClusterId string
- EncryptionType string
- ExtraConfigs map[string]interface{}
- Gs
MountGs Args 
- Name string
- ResourceId string
- S3
MountS3Args 
- Uri string
- Wasb
MountWasb Args 
- abfs
MountAbfs Args 
- adl
MountAdl Args 
- cluster_id str
- encryption_type str
- extra_configs Mapping[str, Any]
- gs
MountGs Args 
- name str
- resource_id str
- s3
MountS3Args 
- uri str
- wasb
MountWasb Args 
- abfs Property Map
- adl Property Map
- clusterId String
- encryptionType String
- extraConfigs Map<Any>
- gs Property Map
- name String
- resourceId String
- s3 Property Map
- uri String
- wasb Property Map
Outputs
All input properties are implicitly available as output properties. Additionally, the Mount resource produces the following output properties:
Look up Existing Mount Resource
Get an existing Mount resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MountState, opts?: CustomResourceOptions): Mount@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        abfs: Optional[MountAbfsArgs] = None,
        adl: Optional[MountAdlArgs] = None,
        cluster_id: Optional[str] = None,
        encryption_type: Optional[str] = None,
        extra_configs: Optional[Mapping[str, Any]] = None,
        gs: Optional[MountGsArgs] = None,
        name: Optional[str] = None,
        resource_id: Optional[str] = None,
        s3: Optional[MountS3Args] = None,
        source: Optional[str] = None,
        uri: Optional[str] = None,
        wasb: Optional[MountWasbArgs] = None) -> Mountfunc GetMount(ctx *Context, name string, id IDInput, state *MountState, opts ...ResourceOption) (*Mount, error)public static Mount Get(string name, Input<string> id, MountState? state, CustomResourceOptions? opts = null)public static Mount get(String name, Output<String> id, MountState state, CustomResourceOptions options)Resource lookup is not supported in YAML- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Abfs
MountAbfs Args 
- Adl
MountAdl Args 
- ClusterId string
- EncryptionType string
- ExtraConfigs map[string]interface{}
- Gs
MountGs Args 
- Name string
- ResourceId string
- S3
MountS3Args 
- Source string
- (String) HDFS-compatible url
- Uri string
- Wasb
MountWasb Args 
- abfs
MountAbfs Args 
- adl
MountAdl Args 
- cluster_id str
- encryption_type str
- extra_configs Mapping[str, Any]
- gs
MountGs Args 
- name str
- resource_id str
- s3
MountS3Args 
- source str
- (String) HDFS-compatible url
- uri str
- wasb
MountWasb Args 
- abfs Property Map
- adl Property Map
- clusterId String
- encryptionType String
- extraConfigs Map<Any>
- gs Property Map
- name String
- resourceId String
- s3 Property Map
- source String
- (String) HDFS-compatible url
- uri String
- wasb Property Map
Supporting Types
MountAbfs, MountAbfsArgs    
- ClientId string
- ClientSecret stringKey 
- ClientSecret stringScope 
- InitializeFile boolSystem 
- ContainerName string
- Directory string
- StorageAccount stringName 
- TenantId string
- ClientId string
- ClientSecret stringKey 
- ClientSecret stringScope 
- InitializeFile boolSystem 
- ContainerName string
- Directory string
- StorageAccount stringName 
- TenantId string
- clientId String
- clientSecret StringKey 
- clientSecret StringScope 
- initializeFile BooleanSystem 
- containerName String
- directory String
- storageAccount StringName 
- tenantId String
- clientId string
- clientSecret stringKey 
- clientSecret stringScope 
- initializeFile booleanSystem 
- containerName string
- directory string
- storageAccount stringName 
- tenantId string
- client_id str
- client_secret_ strkey 
- client_secret_ strscope 
- initialize_file_ boolsystem 
- container_name str
- directory str
- storage_account_ strname 
- tenant_id str
- clientId String
- clientSecret StringKey 
- clientSecret StringScope 
- initializeFile BooleanSystem 
- containerName String
- directory String
- storageAccount StringName 
- tenantId String
MountAdl, MountAdlArgs    
- ClientId string
- ClientSecret stringKey 
- ClientSecret stringScope 
- Directory string
- SparkConf stringPrefix 
- StorageResource stringName 
- TenantId string
- ClientId string
- ClientSecret stringKey 
- ClientSecret stringScope 
- Directory string
- SparkConf stringPrefix 
- StorageResource stringName 
- TenantId string
- clientId String
- clientSecret StringKey 
- clientSecret StringScope 
- directory String
- sparkConf StringPrefix 
- storageResource StringName 
- tenantId String
- clientId string
- clientSecret stringKey 
- clientSecret stringScope 
- directory string
- sparkConf stringPrefix 
- storageResource stringName 
- tenantId string
- client_id str
- client_secret_ strkey 
- client_secret_ strscope 
- directory str
- spark_conf_ strprefix 
- storage_resource_ strname 
- tenant_id str
- clientId String
- clientSecret StringKey 
- clientSecret StringScope 
- directory String
- sparkConf StringPrefix 
- storageResource StringName 
- tenantId String
MountGs, MountGsArgs    
- BucketName string
- ServiceAccount string
- BucketName string
- ServiceAccount string
- bucketName String
- serviceAccount String
- bucketName string
- serviceAccount string
- bucket_name str
- service_account str
- bucketName String
- serviceAccount String
MountS3, MountS3Args    
- BucketName string
- InstanceProfile string
- BucketName string
- InstanceProfile string
- bucketName String
- instanceProfile String
- bucketName string
- instanceProfile string
- bucket_name str
- instance_profile str
- bucketName String
- instanceProfile String
MountWasb, MountWasbArgs    
- AuthType string
- TokenSecret stringKey 
- TokenSecret stringScope 
- ContainerName string
- Directory string
- StorageAccount stringName 
- AuthType string
- TokenSecret stringKey 
- TokenSecret stringScope 
- ContainerName string
- Directory string
- StorageAccount stringName 
- authType String
- tokenSecret StringKey 
- tokenSecret StringScope 
- containerName String
- directory String
- storageAccount StringName 
- authType string
- tokenSecret stringKey 
- tokenSecret stringScope 
- containerName string
- directory string
- storageAccount stringName 
- auth_type str
- token_secret_ strkey 
- token_secret_ strscope 
- container_name str
- directory str
- storage_account_ strname 
- authType String
- tokenSecret StringKey 
- tokenSecret StringScope 
- containerName String
- directory String
- storageAccount StringName 
Import
-> Note Importing this resource is not currently supported.
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the databricksTerraform Provider.