alicloud.log.OssExport
Explore with Pulumi AI
Log service data delivery management, this service provides the function of delivering data in logstore to oss product storage. Refer to details.
NOTE: Available in 1.187.0+
Example Usage
Basic Usage
import * as pulumi from "@pulumi/pulumi";
import * as alicloud from "@pulumi/alicloud";
import * as random from "@pulumi/random";
const _default = new random.index.Integer("default", {
max: 99999,
min: 10000,
});
const example = new alicloud.log.Project("example", {
name: `terraform-example-${_default.result}`,
description: "terraform-example",
tags: {
Created: "TF",
For: "example",
},
});
const exampleStore = new alicloud.log.Store("example", {
project: example.name,
name: "example-store",
retentionPeriod: 3650,
shardCount: 3,
autoSplit: true,
maxSplitShardCount: 60,
appendMeta: true,
});
const exampleOssExport = new alicloud.log.OssExport("example", {
projectName: example.name,
logstoreName: exampleStore.name,
exportName: "terraform-example",
displayName: "terraform-example",
bucket: "example-bucket",
prefix: "root",
suffix: "",
bufferInterval: 300,
bufferSize: 250,
compressType: "none",
pathFormat: "%Y/%m/%d/%H/%M",
contentType: "json",
jsonEnableTag: true,
roleArn: "role_arn_for_oss_write",
logReadRoleArn: "role_arn_for_sls_read",
timeZone: "+0800",
});
import pulumi
import pulumi_alicloud as alicloud
import pulumi_random as random
default = random.index.Integer("default",
max=99999,
min=10000)
example = alicloud.log.Project("example",
name=f"terraform-example-{default['result']}",
description="terraform-example",
tags={
"Created": "TF",
"For": "example",
})
example_store = alicloud.log.Store("example",
project=example.name,
name="example-store",
retention_period=3650,
shard_count=3,
auto_split=True,
max_split_shard_count=60,
append_meta=True)
example_oss_export = alicloud.log.OssExport("example",
project_name=example.name,
logstore_name=example_store.name,
export_name="terraform-example",
display_name="terraform-example",
bucket="example-bucket",
prefix="root",
suffix="",
buffer_interval=300,
buffer_size=250,
compress_type="none",
path_format="%Y/%m/%d/%H/%M",
content_type="json",
json_enable_tag=True,
role_arn="role_arn_for_oss_write",
log_read_role_arn="role_arn_for_sls_read",
time_zone="+0800")
package main
import (
"fmt"
"github.com/pulumi/pulumi-alicloud/sdk/v3/go/alicloud/log"
"github.com/pulumi/pulumi-random/sdk/v4/go/random"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := random.NewInteger(ctx, "default", &random.IntegerArgs{
Max: 99999,
Min: 10000,
})
if err != nil {
return err
}
example, err := log.NewProject(ctx, "example", &log.ProjectArgs{
Name: pulumi.String(fmt.Sprintf("terraform-example-%v", _default.Result)),
Description: pulumi.String("terraform-example"),
Tags: pulumi.Map{
"Created": pulumi.Any("TF"),
"For": pulumi.Any("example"),
},
})
if err != nil {
return err
}
exampleStore, err := log.NewStore(ctx, "example", &log.StoreArgs{
Project: example.Name,
Name: pulumi.String("example-store"),
RetentionPeriod: pulumi.Int(3650),
ShardCount: pulumi.Int(3),
AutoSplit: pulumi.Bool(true),
MaxSplitShardCount: pulumi.Int(60),
AppendMeta: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = log.NewOssExport(ctx, "example", &log.OssExportArgs{
ProjectName: example.Name,
LogstoreName: exampleStore.Name,
ExportName: pulumi.String("terraform-example"),
DisplayName: pulumi.String("terraform-example"),
Bucket: pulumi.String("example-bucket"),
Prefix: pulumi.String("root"),
Suffix: pulumi.String(""),
BufferInterval: pulumi.Int(300),
BufferSize: pulumi.Int(250),
CompressType: pulumi.String("none"),
PathFormat: pulumi.String("%Y/%m/%d/%H/%M"),
ContentType: pulumi.String("json"),
JsonEnableTag: pulumi.Bool(true),
RoleArn: pulumi.String("role_arn_for_oss_write"),
LogReadRoleArn: pulumi.String("role_arn_for_sls_read"),
TimeZone: pulumi.String("+0800"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AliCloud = Pulumi.AliCloud;
using Random = Pulumi.Random;
return await Deployment.RunAsync(() =>
{
var @default = new Random.Index.Integer("default", new()
{
Max = 99999,
Min = 10000,
});
var example = new AliCloud.Log.Project("example", new()
{
Name = $"terraform-example-{@default.Result}",
Description = "terraform-example",
Tags =
{
{ "Created", "TF" },
{ "For", "example" },
},
});
var exampleStore = new AliCloud.Log.Store("example", new()
{
Project = example.Name,
Name = "example-store",
RetentionPeriod = 3650,
ShardCount = 3,
AutoSplit = true,
MaxSplitShardCount = 60,
AppendMeta = true,
});
var exampleOssExport = new AliCloud.Log.OssExport("example", new()
{
ProjectName = example.Name,
LogstoreName = exampleStore.Name,
ExportName = "terraform-example",
DisplayName = "terraform-example",
Bucket = "example-bucket",
Prefix = "root",
Suffix = "",
BufferInterval = 300,
BufferSize = 250,
CompressType = "none",
PathFormat = "%Y/%m/%d/%H/%M",
ContentType = "json",
JsonEnableTag = true,
RoleArn = "role_arn_for_oss_write",
LogReadRoleArn = "role_arn_for_sls_read",
TimeZone = "+0800",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.random.integer;
import com.pulumi.random.IntegerArgs;
import com.pulumi.alicloud.log.Project;
import com.pulumi.alicloud.log.ProjectArgs;
import com.pulumi.alicloud.log.Store;
import com.pulumi.alicloud.log.StoreArgs;
import com.pulumi.alicloud.log.OssExport;
import com.pulumi.alicloud.log.OssExportArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new Integer("default", IntegerArgs.builder()
.max(99999)
.min(10000)
.build());
var example = new Project("example", ProjectArgs.builder()
.name(String.format("terraform-example-%s", default_.result()))
.description("terraform-example")
.tags(Map.ofEntries(
Map.entry("Created", "TF"),
Map.entry("For", "example")
))
.build());
var exampleStore = new Store("exampleStore", StoreArgs.builder()
.project(example.name())
.name("example-store")
.retentionPeriod(3650)
.shardCount(3)
.autoSplit(true)
.maxSplitShardCount(60)
.appendMeta(true)
.build());
var exampleOssExport = new OssExport("exampleOssExport", OssExportArgs.builder()
.projectName(example.name())
.logstoreName(exampleStore.name())
.exportName("terraform-example")
.displayName("terraform-example")
.bucket("example-bucket")
.prefix("root")
.suffix("")
.bufferInterval(300)
.bufferSize(250)
.compressType("none")
.pathFormat("%Y/%m/%d/%H/%M")
.contentType("json")
.jsonEnableTag(true)
.roleArn("role_arn_for_oss_write")
.logReadRoleArn("role_arn_for_sls_read")
.timeZone("+0800")
.build());
}
}
resources:
default:
type: random:integer
properties:
max: 99999
min: 10000
example:
type: alicloud:log:Project
properties:
name: terraform-example-${default.result}
description: terraform-example
tags:
Created: TF
For: example
exampleStore:
type: alicloud:log:Store
name: example
properties:
project: ${example.name}
name: example-store
retentionPeriod: 3650
shardCount: 3
autoSplit: true
maxSplitShardCount: 60
appendMeta: true
exampleOssExport:
type: alicloud:log:OssExport
name: example
properties:
projectName: ${example.name}
logstoreName: ${exampleStore.name}
exportName: terraform-example
displayName: terraform-example
bucket: example-bucket
prefix: root
suffix:
bufferInterval: 300
bufferSize: 250
compressType: none
pathFormat: '%Y/%m/%d/%H/%M'
contentType: json
jsonEnableTag: true
roleArn: role_arn_for_oss_write
logReadRoleArn: role_arn_for_sls_read
timeZone: '+0800'
Create OssExport Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new OssExport(name: string, args: OssExportArgs, opts?: CustomResourceOptions);
@overload
def OssExport(resource_name: str,
args: OssExportArgs,
opts: Optional[ResourceOptions] = None)
@overload
def OssExport(resource_name: str,
opts: Optional[ResourceOptions] = None,
export_name: Optional[str] = None,
buffer_interval: Optional[int] = None,
buffer_size: Optional[int] = None,
time_zone: Optional[str] = None,
project_name: Optional[str] = None,
content_type: Optional[str] = None,
bucket: Optional[str] = None,
path_format: Optional[str] = None,
logstore_name: Optional[str] = None,
csv_config_columns: Optional[Sequence[str]] = None,
json_enable_tag: Optional[bool] = None,
csv_config_null: Optional[str] = None,
csv_config_quote: Optional[str] = None,
display_name: Optional[str] = None,
csv_config_header: Optional[bool] = None,
from_time: Optional[int] = None,
csv_config_linefeed: Optional[str] = None,
log_read_role_arn: Optional[str] = None,
csv_config_escape: Optional[str] = None,
csv_config_delimiter: Optional[str] = None,
prefix: Optional[str] = None,
config_columns: Optional[Sequence[OssExportConfigColumnArgs]] = None,
role_arn: Optional[str] = None,
suffix: Optional[str] = None,
compress_type: Optional[str] = None)
func NewOssExport(ctx *Context, name string, args OssExportArgs, opts ...ResourceOption) (*OssExport, error)
public OssExport(string name, OssExportArgs args, CustomResourceOptions? opts = null)
public OssExport(String name, OssExportArgs args)
public OssExport(String name, OssExportArgs args, CustomResourceOptions options)
type: alicloud:log:OssExport
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args OssExportArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args OssExportArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args OssExportArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args OssExportArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args OssExportArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var ossExportResource = new AliCloud.Log.OssExport("ossExportResource", new()
{
ExportName = "string",
BufferInterval = 0,
BufferSize = 0,
TimeZone = "string",
ProjectName = "string",
ContentType = "string",
Bucket = "string",
PathFormat = "string",
LogstoreName = "string",
CsvConfigColumns = new[]
{
"string",
},
JsonEnableTag = false,
CsvConfigNull = "string",
CsvConfigQuote = "string",
DisplayName = "string",
CsvConfigHeader = false,
FromTime = 0,
CsvConfigLinefeed = "string",
LogReadRoleArn = "string",
CsvConfigEscape = "string",
CsvConfigDelimiter = "string",
Prefix = "string",
ConfigColumns = new[]
{
new AliCloud.Log.Inputs.OssExportConfigColumnArgs
{
Name = "string",
Type = "string",
},
},
RoleArn = "string",
Suffix = "string",
CompressType = "string",
});
example, err := log.NewOssExport(ctx, "ossExportResource", &log.OssExportArgs{
ExportName: pulumi.String("string"),
BufferInterval: pulumi.Int(0),
BufferSize: pulumi.Int(0),
TimeZone: pulumi.String("string"),
ProjectName: pulumi.String("string"),
ContentType: pulumi.String("string"),
Bucket: pulumi.String("string"),
PathFormat: pulumi.String("string"),
LogstoreName: pulumi.String("string"),
CsvConfigColumns: pulumi.StringArray{
pulumi.String("string"),
},
JsonEnableTag: pulumi.Bool(false),
CsvConfigNull: pulumi.String("string"),
CsvConfigQuote: pulumi.String("string"),
DisplayName: pulumi.String("string"),
CsvConfigHeader: pulumi.Bool(false),
FromTime: pulumi.Int(0),
CsvConfigLinefeed: pulumi.String("string"),
LogReadRoleArn: pulumi.String("string"),
CsvConfigEscape: pulumi.String("string"),
CsvConfigDelimiter: pulumi.String("string"),
Prefix: pulumi.String("string"),
ConfigColumns: log.OssExportConfigColumnArray{
&log.OssExportConfigColumnArgs{
Name: pulumi.String("string"),
Type: pulumi.String("string"),
},
},
RoleArn: pulumi.String("string"),
Suffix: pulumi.String("string"),
CompressType: pulumi.String("string"),
})
var ossExportResource = new OssExport("ossExportResource", OssExportArgs.builder()
.exportName("string")
.bufferInterval(0)
.bufferSize(0)
.timeZone("string")
.projectName("string")
.contentType("string")
.bucket("string")
.pathFormat("string")
.logstoreName("string")
.csvConfigColumns("string")
.jsonEnableTag(false)
.csvConfigNull("string")
.csvConfigQuote("string")
.displayName("string")
.csvConfigHeader(false)
.fromTime(0)
.csvConfigLinefeed("string")
.logReadRoleArn("string")
.csvConfigEscape("string")
.csvConfigDelimiter("string")
.prefix("string")
.configColumns(OssExportConfigColumnArgs.builder()
.name("string")
.type("string")
.build())
.roleArn("string")
.suffix("string")
.compressType("string")
.build());
oss_export_resource = alicloud.log.OssExport("ossExportResource",
export_name="string",
buffer_interval=0,
buffer_size=0,
time_zone="string",
project_name="string",
content_type="string",
bucket="string",
path_format="string",
logstore_name="string",
csv_config_columns=["string"],
json_enable_tag=False,
csv_config_null="string",
csv_config_quote="string",
display_name="string",
csv_config_header=False,
from_time=0,
csv_config_linefeed="string",
log_read_role_arn="string",
csv_config_escape="string",
csv_config_delimiter="string",
prefix="string",
config_columns=[alicloud.log.OssExportConfigColumnArgs(
name="string",
type="string",
)],
role_arn="string",
suffix="string",
compress_type="string")
const ossExportResource = new alicloud.log.OssExport("ossExportResource", {
exportName: "string",
bufferInterval: 0,
bufferSize: 0,
timeZone: "string",
projectName: "string",
contentType: "string",
bucket: "string",
pathFormat: "string",
logstoreName: "string",
csvConfigColumns: ["string"],
jsonEnableTag: false,
csvConfigNull: "string",
csvConfigQuote: "string",
displayName: "string",
csvConfigHeader: false,
fromTime: 0,
csvConfigLinefeed: "string",
logReadRoleArn: "string",
csvConfigEscape: "string",
csvConfigDelimiter: "string",
prefix: "string",
configColumns: [{
name: "string",
type: "string",
}],
roleArn: "string",
suffix: "string",
compressType: "string",
});
type: alicloud:log:OssExport
properties:
bucket: string
bufferInterval: 0
bufferSize: 0
compressType: string
configColumns:
- name: string
type: string
contentType: string
csvConfigColumns:
- string
csvConfigDelimiter: string
csvConfigEscape: string
csvConfigHeader: false
csvConfigLinefeed: string
csvConfigNull: string
csvConfigQuote: string
displayName: string
exportName: string
fromTime: 0
jsonEnableTag: false
logReadRoleArn: string
logstoreName: string
pathFormat: string
prefix: string
projectName: string
roleArn: string
suffix: string
timeZone: string
OssExport Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The OssExport resource accepts the following input properties:
- Bucket string
- The name of the oss bucket.
- Buffer
Interval int - How often is it delivered every interval.
- Buffer
Size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - Content
Type string - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - Export
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - Logstore
Name string - The name of the log logstore.
- Path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - Project
Name string - The name of the log project. It is the only in one Alicloud account.
- Time
Zone string - This time zone that is used to format the time,
+0800
e.g. - Compress
Type string - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - Config
Columns List<Pulumi.Ali Cloud. Log. Inputs. Oss Export Config Column> - Configure columns when
content_type
isparquet
ororc
. - Csv
Config List<string>Columns - Field configuration in csv content_type.
- Csv
Config stringDelimiter - Separator configuration in csv content_type.
- Csv
Config stringEscape - escape in csv content_type.
- Csv
Config boolHeader - Indicates whether to write the field name to the CSV file, the default value is
false
. - Csv
Config stringLinefeed - lineFeed in csv content_type.
- Csv
Config stringNull - Invalid field content in csv content_type.
- Csv
Config stringQuote - Escape character in csv content_type.
- Display
Name string - The display name for oss export.
- From
Time int - The log from when to export to oss.
- Json
Enable boolTag - Whether to deliver the label when
content_type
=json
. - Log
Read stringRole Arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - Prefix string
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- Role
Arn string - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - Suffix string
- The suffix for the objects in which the shipped data is stored.
- Bucket string
- The name of the oss bucket.
- Buffer
Interval int - How often is it delivered every interval.
- Buffer
Size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - Content
Type string - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - Export
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - Logstore
Name string - The name of the log logstore.
- Path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - Project
Name string - The name of the log project. It is the only in one Alicloud account.
- Time
Zone string - This time zone that is used to format the time,
+0800
e.g. - Compress
Type string - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - Config
Columns []OssExport Config Column Args - Configure columns when
content_type
isparquet
ororc
. - Csv
Config []stringColumns - Field configuration in csv content_type.
- Csv
Config stringDelimiter - Separator configuration in csv content_type.
- Csv
Config stringEscape - escape in csv content_type.
- Csv
Config boolHeader - Indicates whether to write the field name to the CSV file, the default value is
false
. - Csv
Config stringLinefeed - lineFeed in csv content_type.
- Csv
Config stringNull - Invalid field content in csv content_type.
- Csv
Config stringQuote - Escape character in csv content_type.
- Display
Name string - The display name for oss export.
- From
Time int - The log from when to export to oss.
- Json
Enable boolTag - Whether to deliver the label when
content_type
=json
. - Log
Read stringRole Arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - Prefix string
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- Role
Arn string - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - Suffix string
- The suffix for the objects in which the shipped data is stored.
- bucket String
- The name of the oss bucket.
- buffer
Interval Integer - How often is it delivered every interval.
- buffer
Size Integer - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - content
Type String - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - export
Name String - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - logstore
Name String - The name of the log logstore.
- path
Format String - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project
Name String - The name of the log project. It is the only in one Alicloud account.
- time
Zone String - This time zone that is used to format the time,
+0800
e.g. - compress
Type String - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - config
Columns List<OssExport Config Column> - Configure columns when
content_type
isparquet
ororc
. - csv
Config List<String>Columns - Field configuration in csv content_type.
- csv
Config StringDelimiter - Separator configuration in csv content_type.
- csv
Config StringEscape - escape in csv content_type.
- csv
Config BooleanHeader - Indicates whether to write the field name to the CSV file, the default value is
false
. - csv
Config StringLinefeed - lineFeed in csv content_type.
- csv
Config StringNull - Invalid field content in csv content_type.
- csv
Config StringQuote - Escape character in csv content_type.
- display
Name String - The display name for oss export.
- from
Time Integer - The log from when to export to oss.
- json
Enable BooleanTag - Whether to deliver the label when
content_type
=json
. - log
Read StringRole Arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - prefix String
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- role
Arn String - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - suffix String
- The suffix for the objects in which the shipped data is stored.
- bucket string
- The name of the oss bucket.
- buffer
Interval number - How often is it delivered every interval.
- buffer
Size number - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - content
Type string - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - export
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - logstore
Name string - The name of the log logstore.
- path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project
Name string - The name of the log project. It is the only in one Alicloud account.
- time
Zone string - This time zone that is used to format the time,
+0800
e.g. - compress
Type string - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - config
Columns OssExport Config Column[] - Configure columns when
content_type
isparquet
ororc
. - csv
Config string[]Columns - Field configuration in csv content_type.
- csv
Config stringDelimiter - Separator configuration in csv content_type.
- csv
Config stringEscape - escape in csv content_type.
- csv
Config booleanHeader - Indicates whether to write the field name to the CSV file, the default value is
false
. - csv
Config stringLinefeed - lineFeed in csv content_type.
- csv
Config stringNull - Invalid field content in csv content_type.
- csv
Config stringQuote - Escape character in csv content_type.
- display
Name string - The display name for oss export.
- from
Time number - The log from when to export to oss.
- json
Enable booleanTag - Whether to deliver the label when
content_type
=json
. - log
Read stringRole Arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - prefix string
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- role
Arn string - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - suffix string
- The suffix for the objects in which the shipped data is stored.
- bucket str
- The name of the oss bucket.
- buffer_
interval int - How often is it delivered every interval.
- buffer_
size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - content_
type str - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - export_
name str - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - logstore_
name str - The name of the log logstore.
- path_
format str - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project_
name str - The name of the log project. It is the only in one Alicloud account.
- time_
zone str - This time zone that is used to format the time,
+0800
e.g. - compress_
type str - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - config_
columns Sequence[OssExport Config Column Args] - Configure columns when
content_type
isparquet
ororc
. - csv_
config_ Sequence[str]columns - Field configuration in csv content_type.
- csv_
config_ strdelimiter - Separator configuration in csv content_type.
- csv_
config_ strescape - escape in csv content_type.
- csv_
config_ boolheader - Indicates whether to write the field name to the CSV file, the default value is
false
. - csv_
config_ strlinefeed - lineFeed in csv content_type.
- csv_
config_ strnull - Invalid field content in csv content_type.
- csv_
config_ strquote - Escape character in csv content_type.
- display_
name str - The display name for oss export.
- from_
time int - The log from when to export to oss.
- json_
enable_ booltag - Whether to deliver the label when
content_type
=json
. - log_
read_ strrole_ arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - prefix str
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- role_
arn str - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - suffix str
- The suffix for the objects in which the shipped data is stored.
- bucket String
- The name of the oss bucket.
- buffer
Interval Number - How often is it delivered every interval.
- buffer
Size Number - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - content
Type String - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - export
Name String - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - logstore
Name String - The name of the log logstore.
- path
Format String - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project
Name String - The name of the log project. It is the only in one Alicloud account.
- time
Zone String - This time zone that is used to format the time,
+0800
e.g. - compress
Type String - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - config
Columns List<Property Map> - Configure columns when
content_type
isparquet
ororc
. - csv
Config List<String>Columns - Field configuration in csv content_type.
- csv
Config StringDelimiter - Separator configuration in csv content_type.
- csv
Config StringEscape - escape in csv content_type.
- csv
Config BooleanHeader - Indicates whether to write the field name to the CSV file, the default value is
false
. - csv
Config StringLinefeed - lineFeed in csv content_type.
- csv
Config StringNull - Invalid field content in csv content_type.
- csv
Config StringQuote - Escape character in csv content_type.
- display
Name String - The display name for oss export.
- from
Time Number - The log from when to export to oss.
- json
Enable BooleanTag - Whether to deliver the label when
content_type
=json
. - log
Read StringRole Arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - prefix String
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- role
Arn String - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - suffix String
- The suffix for the objects in which the shipped data is stored.
Outputs
All input properties are implicitly available as output properties. Additionally, the OssExport resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing OssExport Resource
Get an existing OssExport resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: OssExportState, opts?: CustomResourceOptions): OssExport
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
bucket: Optional[str] = None,
buffer_interval: Optional[int] = None,
buffer_size: Optional[int] = None,
compress_type: Optional[str] = None,
config_columns: Optional[Sequence[OssExportConfigColumnArgs]] = None,
content_type: Optional[str] = None,
csv_config_columns: Optional[Sequence[str]] = None,
csv_config_delimiter: Optional[str] = None,
csv_config_escape: Optional[str] = None,
csv_config_header: Optional[bool] = None,
csv_config_linefeed: Optional[str] = None,
csv_config_null: Optional[str] = None,
csv_config_quote: Optional[str] = None,
display_name: Optional[str] = None,
export_name: Optional[str] = None,
from_time: Optional[int] = None,
json_enable_tag: Optional[bool] = None,
log_read_role_arn: Optional[str] = None,
logstore_name: Optional[str] = None,
path_format: Optional[str] = None,
prefix: Optional[str] = None,
project_name: Optional[str] = None,
role_arn: Optional[str] = None,
suffix: Optional[str] = None,
time_zone: Optional[str] = None) -> OssExport
func GetOssExport(ctx *Context, name string, id IDInput, state *OssExportState, opts ...ResourceOption) (*OssExport, error)
public static OssExport Get(string name, Input<string> id, OssExportState? state, CustomResourceOptions? opts = null)
public static OssExport get(String name, Output<String> id, OssExportState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Bucket string
- The name of the oss bucket.
- Buffer
Interval int - How often is it delivered every interval.
- Buffer
Size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - Compress
Type string - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - Config
Columns List<Pulumi.Ali Cloud. Log. Inputs. Oss Export Config Column> - Configure columns when
content_type
isparquet
ororc
. - Content
Type string - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - Csv
Config List<string>Columns - Field configuration in csv content_type.
- Csv
Config stringDelimiter - Separator configuration in csv content_type.
- Csv
Config stringEscape - escape in csv content_type.
- Csv
Config boolHeader - Indicates whether to write the field name to the CSV file, the default value is
false
. - Csv
Config stringLinefeed - lineFeed in csv content_type.
- Csv
Config stringNull - Invalid field content in csv content_type.
- Csv
Config stringQuote - Escape character in csv content_type.
- Display
Name string - The display name for oss export.
- Export
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - From
Time int - The log from when to export to oss.
- Json
Enable boolTag - Whether to deliver the label when
content_type
=json
. - Log
Read stringRole Arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - Logstore
Name string - The name of the log logstore.
- Path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - Prefix string
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- Project
Name string - The name of the log project. It is the only in one Alicloud account.
- Role
Arn string - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - Suffix string
- The suffix for the objects in which the shipped data is stored.
- Time
Zone string - This time zone that is used to format the time,
+0800
e.g.
- Bucket string
- The name of the oss bucket.
- Buffer
Interval int - How often is it delivered every interval.
- Buffer
Size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - Compress
Type string - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - Config
Columns []OssExport Config Column Args - Configure columns when
content_type
isparquet
ororc
. - Content
Type string - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - Csv
Config []stringColumns - Field configuration in csv content_type.
- Csv
Config stringDelimiter - Separator configuration in csv content_type.
- Csv
Config stringEscape - escape in csv content_type.
- Csv
Config boolHeader - Indicates whether to write the field name to the CSV file, the default value is
false
. - Csv
Config stringLinefeed - lineFeed in csv content_type.
- Csv
Config stringNull - Invalid field content in csv content_type.
- Csv
Config stringQuote - Escape character in csv content_type.
- Display
Name string - The display name for oss export.
- Export
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - From
Time int - The log from when to export to oss.
- Json
Enable boolTag - Whether to deliver the label when
content_type
=json
. - Log
Read stringRole Arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - Logstore
Name string - The name of the log logstore.
- Path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - Prefix string
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- Project
Name string - The name of the log project. It is the only in one Alicloud account.
- Role
Arn string - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - Suffix string
- The suffix for the objects in which the shipped data is stored.
- Time
Zone string - This time zone that is used to format the time,
+0800
e.g.
- bucket String
- The name of the oss bucket.
- buffer
Interval Integer - How often is it delivered every interval.
- buffer
Size Integer - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - compress
Type String - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - config
Columns List<OssExport Config Column> - Configure columns when
content_type
isparquet
ororc
. - content
Type String - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - csv
Config List<String>Columns - Field configuration in csv content_type.
- csv
Config StringDelimiter - Separator configuration in csv content_type.
- csv
Config StringEscape - escape in csv content_type.
- csv
Config BooleanHeader - Indicates whether to write the field name to the CSV file, the default value is
false
. - csv
Config StringLinefeed - lineFeed in csv content_type.
- csv
Config StringNull - Invalid field content in csv content_type.
- csv
Config StringQuote - Escape character in csv content_type.
- display
Name String - The display name for oss export.
- export
Name String - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - from
Time Integer - The log from when to export to oss.
- json
Enable BooleanTag - Whether to deliver the label when
content_type
=json
. - log
Read StringRole Arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - logstore
Name String - The name of the log logstore.
- path
Format String - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - prefix String
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- project
Name String - The name of the log project. It is the only in one Alicloud account.
- role
Arn String - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - suffix String
- The suffix for the objects in which the shipped data is stored.
- time
Zone String - This time zone that is used to format the time,
+0800
e.g.
- bucket string
- The name of the oss bucket.
- buffer
Interval number - How often is it delivered every interval.
- buffer
Size number - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - compress
Type string - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - config
Columns OssExport Config Column[] - Configure columns when
content_type
isparquet
ororc
. - content
Type string - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - csv
Config string[]Columns - Field configuration in csv content_type.
- csv
Config stringDelimiter - Separator configuration in csv content_type.
- csv
Config stringEscape - escape in csv content_type.
- csv
Config booleanHeader - Indicates whether to write the field name to the CSV file, the default value is
false
. - csv
Config stringLinefeed - lineFeed in csv content_type.
- csv
Config stringNull - Invalid field content in csv content_type.
- csv
Config stringQuote - Escape character in csv content_type.
- display
Name string - The display name for oss export.
- export
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - from
Time number - The log from when to export to oss.
- json
Enable booleanTag - Whether to deliver the label when
content_type
=json
. - log
Read stringRole Arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - logstore
Name string - The name of the log logstore.
- path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - prefix string
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- project
Name string - The name of the log project. It is the only in one Alicloud account.
- role
Arn string - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - suffix string
- The suffix for the objects in which the shipped data is stored.
- time
Zone string - This time zone that is used to format the time,
+0800
e.g.
- bucket str
- The name of the oss bucket.
- buffer_
interval int - How often is it delivered every interval.
- buffer_
size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - compress_
type str - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - config_
columns Sequence[OssExport Config Column Args] - Configure columns when
content_type
isparquet
ororc
. - content_
type str - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - csv_
config_ Sequence[str]columns - Field configuration in csv content_type.
- csv_
config_ strdelimiter - Separator configuration in csv content_type.
- csv_
config_ strescape - escape in csv content_type.
- csv_
config_ boolheader - Indicates whether to write the field name to the CSV file, the default value is
false
. - csv_
config_ strlinefeed - lineFeed in csv content_type.
- csv_
config_ strnull - Invalid field content in csv content_type.
- csv_
config_ strquote - Escape character in csv content_type.
- display_
name str - The display name for oss export.
- export_
name str - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - from_
time int - The log from when to export to oss.
- json_
enable_ booltag - Whether to deliver the label when
content_type
=json
. - log_
read_ strrole_ arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - logstore_
name str - The name of the log logstore.
- path_
format str - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - prefix str
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- project_
name str - The name of the log project. It is the only in one Alicloud account.
- role_
arn str - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - suffix str
- The suffix for the objects in which the shipped data is stored.
- time_
zone str - This time zone that is used to format the time,
+0800
e.g.
- bucket String
- The name of the oss bucket.
- buffer
Interval Number - How often is it delivered every interval.
- buffer
Size Number - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - compress
Type String - OSS data storage compression method, support:
none
,snappy
,zstd
,gzip
. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of theOSS Bucket
. - config
Columns List<Property Map> - Configure columns when
content_type
isparquet
ororc
. - content
Type String - Storage format, only supports three types:
json
,parquet
,orc
,csv
. According to the different format, please select the following parameters - csv
Config List<String>Columns - Field configuration in csv content_type.
- csv
Config StringDelimiter - Separator configuration in csv content_type.
- csv
Config StringEscape - escape in csv content_type.
- csv
Config BooleanHeader - Indicates whether to write the field name to the CSV file, the default value is
false
. - csv
Config StringLinefeed - lineFeed in csv content_type.
- csv
Config StringNull - Invalid field content in csv content_type.
- csv
Config StringQuote - Escape character in csv content_type.
- display
Name String - The display name for oss export.
- export
Name String - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - from
Time Number - The log from when to export to oss.
- json
Enable BooleanTag - Whether to deliver the label when
content_type
=json
. - log
Read StringRole Arn - Used for logstore reading, the role should have log read policy, such as
acs:ram::13234:role/logrole
, iflog_read_role_arn
is not set,role_arn
is used to read logstore. - logstore
Name String - The name of the log logstore.
- path
Format String - The OSS Bucket directory is dynamically generated according to the creation time of the export task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - prefix String
- The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- project
Name String - The name of the log project. It is the only in one Alicloud account.
- role
Arn String - Used to write to oss bucket, the OSS Bucket owner creates the role mark which has the oss bucket write policy, such as
acs:ram::13234:role/logrole
. - suffix String
- The suffix for the objects in which the shipped data is stored.
- time
Zone String - This time zone that is used to format the time,
+0800
e.g.
Supporting Types
OssExportConfigColumn, OssExportConfigColumnArgs
Import
Log oss export can be imported using the id or name, e.g.
$ pulumi import alicloud:log/ossExport:OssExport example tf-log-project:tf-log-logstore:tf-log-export
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Alibaba Cloud pulumi/pulumi-alicloud
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
alicloud
Terraform Provider.