我使用 AWS,主要使用 terraform 来做很多事情。 我想要实施 Amazon Appflow 并能够从 salesforce 到 S3 存储桶。 AppFlow 是一个向导,需要逐步设置。
我假设你不能使用 Terraform 来实现这个,对吗?这是 想法正确吗?
是的,您可以使用 Terraform 部署 AppFlow 资源。您可以使用两个提供商。 AWS 提供商或 AWS 云控制提供商。目前,我在 AWS Cloud Control 方面运气比较好,因为它旨在更快地支持新资源。它支持连接器、配置文件和流程,并支持自定义连接器。 AWS 提供商仅支持 Connectos 和配置文件(无流)。我还发现它对自定义连接器还没有很好的支持。
现在我推荐云控制
这里有一个很好的介绍。
https://www.hashicorp.com/resources/using-the-terraform-aws-cloud-control-provider
以及 AWS 云控制提供商。
https://registry.terraform.io/providers/hashicorp/awscc/latest/docs/resources/appflow_connector
以下是 AWS Provider AppFlow 资源。
resource "aws_s3_bucket" "example_source" {
bucket = "example_source"
}
resource "aws_s3_bucket_policy" "example_source" {
bucket = aws_s3_bucket.example_source.id
policy = <<EOF
{
"Statement": [
{
"Effect": "Allow",
"Sid": "AllowAppFlowSourceActions",
"Principal": {
"Service": "appflow.amazonaws.com"
},
"Action": [
"s3:ListBucket",
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::example_source",
"arn:aws:s3:::example_source/*"
]
}
],
"Version": "2012-10-17"
}
EOF
}
resource "aws_s3_object" "example" {
bucket = aws_s3_bucket.example_source.id
key = "example_source.csv"
source = "example_source.csv"
}
resource "aws_s3_bucket" "example_destination" {
bucket = "example_destination"
}
resource "aws_s3_bucket_policy" "example_destination" {
bucket = aws_s3_bucket.example_destination.id
policy = <<EOF
{
"Statement": [
{
"Effect": "Allow",
"Sid": "AllowAppFlowDestinationActions",
"Principal": {
"Service": "appflow.amazonaws.com"
},
"Action": [
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts",
"s3:ListBucketMultipartUploads",
"s3:GetBucketAcl",
"s3:PutObjectAcl"
],
"Resource": [
"arn:aws:s3:::example_destination",
"arn:aws:s3:::example_destination/*"
]
}
],
"Version": "2012-10-17"
}
EOF
}
resource "aws_appflow_flow" "example" {
name = "example"
source_flow_config {
connector_type = "S3"
source_connector_properties {
s3 {
bucket_name = aws_s3_bucket_policy.example_source.bucket
bucket_prefix = "example"
}
}
}
destination_flow_config {
connector_type = "S3"
destination_connector_properties {
s3 {
bucket_name = aws_s3_bucket_policy.example_destination.bucket
s3_output_format_config {
prefix_config {
prefix_type = "PATH"
}
}
}
}
}
task {
source_fields = ["exampleField"]
destination_field = "exampleField"
task_type = "Map"
connector_operator {
s3 = "NO_OP"
}
}
trigger_config {
trigger_type = "OnDemand"
}
}