Created
June 23, 2015 15:20
-
-
Save cdelorme/42f03e8c36a90fadfd7c to your computer and use it in GitHub Desktop.
ellucian terraform
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # service iam role | |
| resource "aws_iam_role" "services" { | |
| name = "${var.target_env}-services" | |
| assume_role_policy = <<EOF | |
| { | |
| "Version": "2012-10-17", | |
| "Statement": [ | |
| { | |
| "Action": "sts:AssumeRole", | |
| "Principal": { | |
| "Service": "ec2.amazonaws.com" | |
| }, | |
| "Effect": "Allow", | |
| "Sid": "" | |
| } | |
| ] | |
| } | |
| EOF | |
| } | |
| resource "aws_iam_role_policy" "services" { | |
| name = "${var.target_env}-services" | |
| role = "${aws_iam_role.services.name}" | |
| policy = <<EOF | |
| { | |
| "Version": "2012-10-17", | |
| "Statement": [ | |
| { | |
| "Action": "s3:GetObject", | |
| "Effect": "Allow", | |
| "Resource": "arn:aws:s3:::${var.docker_image_bucket}" | |
| }, | |
| { | |
| "Action": [ | |
| "s3:GetObject" | |
| ], | |
| "Effect": "Allow", | |
| "Resource": "arn:aws:s3:::${var.docker_image_bucket}/*" | |
| }, | |
| { | |
| "Action": [ | |
| "s3:GetObject" | |
| ], | |
| "Effect": "Allow", | |
| "Resource": "arn:aws:s3:::${var.deployable_artifact_bucket}" | |
| }, | |
| { | |
| "Action": [ | |
| "s3:GetObject" | |
| ], | |
| "Effect": "Allow", | |
| "Resource": "arn:aws:s3:::${var.deployable_artifact_bucket}/*" | |
| }, | |
| { | |
| "Action": [ | |
| "s3:GetObject" | |
| ], | |
| "Effect": "Allow", | |
| "Resource": "arn:aws:s3:::${var.identification_bucket}" | |
| }, | |
| { | |
| "Action": [ | |
| "s3:GetObject" | |
| ], | |
| "Effect": "Allow", | |
| "Resource": "arn:aws:s3:::${var.identification_bucket}/*" | |
| }, | |
| { | |
| "Action": [ | |
| "ec2:DescribeTags" | |
| ], | |
| "Effect": "Allow", | |
| "Resource": "*" | |
| } | |
| ] | |
| } | |
| EOF | |
| } | |
| resource "aws_iam_instance_profile" "services" { | |
| name = "${var.target_env}-services" | |
| roles = ["${aws_iam_role.services.name}"] | |
| provisioner "local-exec" { | |
| command = "sleep 10" | |
| } | |
| } | |
| # default service security group | |
| resource "aws_security_group" "service_default" { | |
| depends_on = ["aws_instance.consul_1", "aws_instance.consul_2", "aws_instance.consul_3", "aws_db_instance.db_admin", "aws_db_instance.db_queue"] | |
| name = "ServiceSecurityGroup" | |
| description = "Secure service network traffic" | |
| vpc_id = "${aws_vpc.default.id}" | |
| tags = { | |
| Name = "ServiceSecurityGroup" | |
| Environment = "${var.target_env}" | |
| } | |
| # allow ssh from bastion | |
| ingress { | |
| from_port = 22 | |
| to_port = 22 | |
| protocol = "tcp" | |
| security_groups = ["${aws_security_group.bastion.id}"] | |
| } | |
| # allow dns | |
| ingress { | |
| from_port = 53 | |
| to_port = 53 | |
| protocol = "tcp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| ingress { | |
| from_port = 53 | |
| to_port = 53 | |
| protocol = "udp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| # allow web & service traffic | |
| ingress { | |
| from_port = 80 | |
| to_port = 80 | |
| protocol = "tcp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| ingress { | |
| from_port = 3000 | |
| to_port = 3000 | |
| protocol = "tcp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| # allow ntp traffic | |
| ingress { | |
| from_port = 123 | |
| to_port = 123 | |
| protocol = "tcp" | |
| cidr_blocks = ["0.0.0.0/0"] | |
| } | |
| ingress { | |
| from_port = 123 | |
| to_port = 123 | |
| protocol = "udp" | |
| cidr_blocks = ["0.0.0.0/0"] | |
| } | |
| # consul traffic | |
| ingress { | |
| from_port = 8300 | |
| to_port = 8302 | |
| protocol = "tcp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| ingress { | |
| from_port = 8300 | |
| to_port = 8302 | |
| protocol = "udp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| ingress { | |
| from_port = 8400 | |
| to_port = 8400 | |
| protocol = "tcp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| ingress { | |
| from_port = 8400 | |
| to_port = 8400 | |
| protocol = "udp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| ingress { | |
| from_port = 8500 | |
| to_port = 8500 | |
| protocol = "tcp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| ingress { | |
| from_port = 8500 | |
| to_port = 8500 | |
| protocol = "udp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| ingress { | |
| from_port = 8600 | |
| to_port = 8600 | |
| protocol = "tcp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| ingress { | |
| from_port = 8600 | |
| to_port = 8600 | |
| protocol = "udp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| # allow ping traffic between boxes | |
| ingress { | |
| from_port = 0 | |
| to_port = 0 | |
| protocol = "icmp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| # allow internal communication | |
| # @todo: investigate using service-centric security groups for more explicit communication controls | |
| ingress { | |
| from_port = 0 | |
| to_port = 0 | |
| protocol = "-1" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| # allow all outbound traffic | |
| egress { | |
| from_port = 0 | |
| to_port = 0 | |
| protocol = "-1" | |
| cidr_blocks = ["0.0.0.0/0"] | |
| } | |
| } | |
| # private service security group | |
| resource "aws_security_group" "private_service_elb" { | |
| name = "PrivateServiceSecurityGroup" | |
| description = "Enable internal HTTP access on port 80 between services" | |
| vpc_id = "${aws_vpc.default.id}" | |
| tags = { | |
| Name = "PrivateServiceSecurityGroup" | |
| Environment = "${var.target_env}" | |
| } | |
| ingress { | |
| from_port = 80 | |
| to_port = 80 | |
| protocol = "tcp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| egress { | |
| from_port = 80 | |
| to_port = 80 | |
| protocol = "tcp" | |
| cidr_blocks = ["10.0.0.0/8"] | |
| } | |
| } | |
| # public service security group | |
| resource "aws_security_group" "public_service_elb" { | |
| name = "PublicServiceSecurityGroup" | |
| description = "Enable HTTP access on port 80 from ELBs" | |
| vpc_id = "${aws_vpc.default.id}" | |
| tags = { | |
| Name = "PublicServiceSecurityGroup" | |
| Environment = "${var.target_env}" | |
| } | |
| ingress { | |
| from_port = 443 | |
| to_port = 443 | |
| protocol = "tcp" | |
| cidr_blocks = ["0.0.0.0/0"] | |
| } | |
| egress { | |
| from_port = 80 | |
| to_port = 80 | |
| protocol = "tcp" | |
| security_groups = ["${aws_security_group.service_default.id}"] | |
| } | |
| } | |
| # @todo: implement services using iteration | |
| # document persistence service | |
| resource "template_file" "document-persistence-service" { | |
| filename = "templates/document-persistence-service.sh.tpl" | |
| vars { | |
| env = "${var.target_env}" | |
| version = "${var.service_version.document-persistence-service}" | |
| } | |
| } | |
| resource "aws_launch_configuration" "document-persistence-service" { | |
| # name = "${var.target_env}-document-persistence-service" | |
| image_id = "${lookup(var.docker_amis, var.region)}" | |
| instance_type = "${var.docker_instance_type}" | |
| security_groups = ["${aws_security_group.service_default.id}"] | |
| depends_on = ["aws_iam_instance_profile.services"] | |
| iam_instance_profile = "${aws_iam_instance_profile.services.name}" | |
| user_data = "${template_file.document-persistence-service.rendered}" | |
| } | |
| resource "aws_autoscaling_group" "document-persistence-service" { | |
| depends_on = ["aws_db_instance.db_queue", "aws_db_instance.db_admin"] | |
| name = "${var.target_env}-document-persistence-service-asg" | |
| launch_configuration = "${aws_launch_configuration.document-persistence-service.name}" | |
| availability_zones = ["${concat(var.region, "${element(split(",", var.zones), 0)}")}", "${concat(var.region, "${element(split(",", var.zones), 1)}")}", "${concat(var.region, "${element(split(",", var.zones), 2)}")}"] | |
| max_size = "${var.asg_max_size}" | |
| min_size = "${var.asg_min_size}" | |
| vpc_zone_identifier = ["${aws_subnet.private_1.id}", "${aws_subnet.private_2.id}", "${aws_subnet.private_3.id}"] | |
| # @note: is this redundant on healthchecks from ELB? | |
| health_check_grace_period = 3600 | |
| health_check_type = "EC2" | |
| tag { | |
| key = "Environment" | |
| value = "${var.target_env}" | |
| propagate_at_launch = true | |
| } | |
| tag { | |
| key = "Name" | |
| value = "document-persistence-service" | |
| propagate_at_launch = true | |
| } | |
| } | |
| # mock auth | |
| resource "template_file" "mock-auth" { | |
| filename = "templates/mock-auth.sh.tpl" | |
| vars { | |
| version = "${var.service_version.mock-auth}" | |
| } | |
| } | |
| resource "aws_launch_configuration" "mock-auth" { | |
| # name = "${var.target_env}-mock-auth" | |
| image_id = "${lookup(var.docker_amis, var.region)}" | |
| instance_type = "${var.docker_instance_type}" | |
| security_groups = ["${aws_security_group.service_default.id}"] | |
| depends_on = ["aws_iam_instance_profile.services"] | |
| iam_instance_profile = "${aws_iam_instance_profile.services.name}" | |
| user_data = "${template_file.mock-auth.rendered}" | |
| } | |
| resource "aws_elb" "mock-auth" { | |
| name = "${var.target_env}-ma" | |
| subnets = ["${aws_subnet.public_1.id}", "${aws_subnet.public_2.id}", "${aws_subnet.public_3.id}"] | |
| security_groups = ["${aws_security_group.public_service_elb.id}"] | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "http" | |
| lb_port = 80 | |
| lb_protocol = "http" | |
| } | |
| # @note: tcp/ssl for websocket support | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "tcp" | |
| lb_port = 443 | |
| lb_protocol = "ssl" | |
| ssl_certificate_id = "${var.ssl_cert_name}" | |
| } | |
| health_check { | |
| healthy_threshold = 3 | |
| unhealthy_threshold = 5 | |
| timeout = 5 | |
| target = "TCP:80" | |
| interval = 30 | |
| } | |
| } | |
| resource "aws_route53_record" "mock-auth" { | |
| zone_id = "${var.hosted_zone_id}" | |
| name = "${var.target_env}mock-auth.2020ar.com" | |
| type = "CNAME" | |
| ttl = 300 | |
| records = ["${aws_elb.mock-auth.dns_name}"] | |
| } | |
| resource "aws_autoscaling_group" "mock-auth" { | |
| depends_on = ["aws_db_instance.db_queue", "aws_db_instance.db_admin"] | |
| name = "${var.target_env}-mock-auth-asg" | |
| launch_configuration = "${aws_launch_configuration.mock-auth.name}" | |
| availability_zones = ["${concat(var.region, "${element(split(",", var.zones), 0)}")}", "${concat(var.region, "${element(split(",", var.zones), 1)}")}", "${concat(var.region, "${element(split(",", var.zones), 2)}")}"] | |
| max_size = "${var.asg_max_size}" | |
| min_size = "${var.asg_min_size}" | |
| vpc_zone_identifier = ["${aws_subnet.private_1.id}", "${aws_subnet.private_2.id}", "${aws_subnet.private_3.id}"] | |
| load_balancers = ["${aws_elb.mock-auth.name}"] | |
| # @note: is this redundant on healthchecks from ELB? | |
| health_check_grace_period = 3600 | |
| health_check_type = "EC2" | |
| tag { | |
| key = "Environment" | |
| value = "${var.target_env}" | |
| propagate_at_launch = true | |
| } | |
| tag { | |
| key = "Name" | |
| value = "mock-auth" | |
| propagate_at_launch = true | |
| } | |
| } | |
| # hub www auth | |
| resource "template_file" "hub-www-auth" { | |
| filename = "templates/hub-www-auth.sh.tpl" | |
| vars { | |
| version = "${var.service_version.hub-www-auth}" | |
| } | |
| } | |
| resource "aws_launch_configuration" "hub-www-auth" { | |
| # name = "${var.target_env}-hub-www-auth" | |
| image_id = "${lookup(var.docker_amis, var.region)}" | |
| instance_type = "${var.docker_instance_type}" | |
| security_groups = ["${aws_security_group.service_default.id}"] | |
| depends_on = ["aws_iam_instance_profile.services"] | |
| iam_instance_profile = "${aws_iam_instance_profile.services.name}" | |
| user_data = "${template_file.hub-www-auth.rendered}" | |
| } | |
| resource "aws_elb" "hub-www-auth" { | |
| name = "${var.target_env}-hwa" | |
| subnets = ["${aws_subnet.public_1.id}", "${aws_subnet.public_2.id}", "${aws_subnet.public_3.id}"] | |
| security_groups = ["${aws_security_group.public_service_elb.id}"] | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "http" | |
| lb_port = 80 | |
| lb_protocol = "http" | |
| } | |
| # @note: tcp/ssl for websocket support | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "tcp" | |
| lb_port = 443 | |
| lb_protocol = "ssl" | |
| ssl_certificate_id = "${var.ssl_cert_name}" | |
| } | |
| health_check { | |
| healthy_threshold = 3 | |
| unhealthy_threshold = 5 | |
| timeout = 5 | |
| target = "TCP:80" | |
| interval = 30 | |
| } | |
| } | |
| resource "aws_route53_record" "hub-www-auth" { | |
| zone_id = "${var.hosted_zone_id}" | |
| name = "${var.target_env}hub-www-auth.2020ar.com" | |
| type = "CNAME" | |
| ttl = 300 | |
| records = ["${aws_elb.hub-www-auth.dns_name}"] | |
| } | |
| resource "aws_autoscaling_group" "hub-www-auth" { | |
| depends_on = ["aws_db_instance.db_queue", "aws_db_instance.db_admin"] | |
| name = "${var.target_env}-hub-www-auth-asg" | |
| launch_configuration = "${aws_launch_configuration.hub-www-auth.name}" | |
| availability_zones = ["${concat(var.region, "${element(split(",", var.zones), 0)}")}", "${concat(var.region, "${element(split(",", var.zones), 1)}")}", "${concat(var.region, "${element(split(",", var.zones), 2)}")}"] | |
| max_size = "${var.asg_max_size}" | |
| min_size = "${var.asg_min_size}" | |
| vpc_zone_identifier = ["${aws_subnet.private_1.id}", "${aws_subnet.private_2.id}", "${aws_subnet.private_3.id}"] | |
| load_balancers = ["${aws_elb.hub-www-auth.name}"] | |
| # @note: is this redundant on healthchecks from ELB? | |
| health_check_grace_period = 3600 | |
| health_check_type = "EC2" | |
| tag { | |
| key = "Environment" | |
| value = "${var.target_env}" | |
| propagate_at_launch = true | |
| } | |
| tag { | |
| key = "Name" | |
| value = "hub-www-auth" | |
| propagate_at_launch = true | |
| } | |
| } | |
| # hub configuration api | |
| resource "template_file" "hub-configuration-api" { | |
| filename = "templates/hub-configuration-api.sh.tpl" | |
| vars { | |
| version = "${var.service_version.hub-configuration-api}" | |
| } | |
| } | |
| resource "aws_launch_configuration" "hub-configuration-api" { | |
| # name = "${var.target_env}-hub-configuration-api" | |
| image_id = "${lookup(var.docker_amis, var.region)}" | |
| instance_type = "${var.docker_instance_type}" | |
| security_groups = ["${aws_security_group.service_default.id}"] | |
| depends_on = ["aws_iam_instance_profile.services"] | |
| iam_instance_profile = "${aws_iam_instance_profile.services.name}" | |
| user_data = "${template_file.hub-configuration-api.rendered}" | |
| } | |
| resource "aws_elb" "hub-configuration-api" { | |
| name = "${var.target_env}-hca" | |
| subnets = ["${aws_subnet.public_1.id}", "${aws_subnet.public_2.id}", "${aws_subnet.public_3.id}"] | |
| security_groups = ["${aws_security_group.public_service_elb.id}"] | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "http" | |
| lb_port = 80 | |
| lb_protocol = "http" | |
| } | |
| # @note: tcp/ssl for websocket support | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "tcp" | |
| lb_port = 443 | |
| lb_protocol = "ssl" | |
| ssl_certificate_id = "${var.ssl_cert_name}" | |
| } | |
| health_check { | |
| healthy_threshold = 3 | |
| unhealthy_threshold = 5 | |
| timeout = 5 | |
| target = "TCP:80" | |
| interval = 30 | |
| } | |
| } | |
| resource "aws_route53_record" "hub-configuration-api" { | |
| zone_id = "${var.hosted_zone_id}" | |
| name = "${var.target_env}hub-configuration-api.2020ar.com" | |
| type = "CNAME" | |
| ttl = 300 | |
| records = ["${aws_elb.hub-configuration-api.dns_name}"] | |
| } | |
| resource "aws_autoscaling_group" "hub-configuration-api" { | |
| depends_on = ["aws_db_instance.db_queue", "aws_db_instance.db_admin"] | |
| name = "${var.target_env}-hub-configuration-api-asg" | |
| launch_configuration = "${aws_launch_configuration.hub-configuration-api.name}" | |
| availability_zones = ["${concat(var.region, "${element(split(",", var.zones), 0)}")}", "${concat(var.region, "${element(split(",", var.zones), 1)}")}", "${concat(var.region, "${element(split(",", var.zones), 2)}")}"] | |
| max_size = "${var.asg_max_size}" | |
| min_size = "${var.asg_min_size}" | |
| vpc_zone_identifier = ["${aws_subnet.private_1.id}", "${aws_subnet.private_2.id}", "${aws_subnet.private_3.id}"] | |
| load_balancers = ["${aws_elb.hub-configuration-api.name}"] | |
| # @note: is this redundant on healthchecks from ELB? | |
| health_check_grace_period = 3600 | |
| health_check_type = "EC2" | |
| tag { | |
| key = "Environment" | |
| value = "${var.target_env}" | |
| propagate_at_launch = true | |
| } | |
| tag { | |
| key = "Name" | |
| value = "hub-configuration-api" | |
| propagate_at_launch = true | |
| } | |
| } | |
| # hub configuration ui | |
| resource "template_file" "hub-configuration-ui" { | |
| filename = "templates/hub-configuration-ui.sh.tpl" | |
| vars { | |
| env = "${var.target_env}" | |
| version = "${var.service_version.hub-configuration-ui}" | |
| } | |
| } | |
| resource "aws_launch_configuration" "hub-configuration-ui" { | |
| # name = "${var.target_env}-hub-configuration-ui" | |
| image_id = "${lookup(var.docker_amis, var.region)}" | |
| instance_type = "${var.docker_instance_type}" | |
| security_groups = ["${aws_security_group.service_default.id}"] | |
| depends_on = ["aws_iam_instance_profile.services"] | |
| iam_instance_profile = "${aws_iam_instance_profile.services.name}" | |
| user_data = "${template_file.hub-configuration-ui.rendered}" | |
| } | |
| resource "aws_elb" "hub-configuration-ui" { | |
| name = "${var.target_env}-hcu" | |
| subnets = ["${aws_subnet.public_1.id}", "${aws_subnet.public_2.id}", "${aws_subnet.public_3.id}"] | |
| security_groups = ["${aws_security_group.public_service_elb.id}"] | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "http" | |
| lb_port = 80 | |
| lb_protocol = "http" | |
| } | |
| # @note: tcp/ssl for websocket support | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "tcp" | |
| lb_port = 443 | |
| lb_protocol = "ssl" | |
| ssl_certificate_id = "${var.ssl_cert_name}" | |
| } | |
| health_check { | |
| healthy_threshold = 3 | |
| unhealthy_threshold = 5 | |
| timeout = 5 | |
| target = "TCP:80" | |
| interval = 30 | |
| } | |
| } | |
| resource "aws_route53_record" "hub-configuration-ui" { | |
| zone_id = "${var.hosted_zone_id}" | |
| name = "${var.target_env}hub-configuration-ui.2020ar.com" | |
| type = "CNAME" | |
| ttl = 300 | |
| records = ["${aws_elb.hub-configuration-ui.dns_name}"] | |
| } | |
| resource "aws_autoscaling_group" "hub-configuration-ui" { | |
| depends_on = ["aws_db_instance.db_queue", "aws_db_instance.db_admin"] | |
| name = "${var.target_env}-hub-configuration-ui-asg" | |
| launch_configuration = "${aws_launch_configuration.hub-configuration-ui.name}" | |
| availability_zones = ["${concat(var.region, "${element(split(",", var.zones), 0)}")}", "${concat(var.region, "${element(split(",", var.zones), 1)}")}", "${concat(var.region, "${element(split(",", var.zones), 2)}")}"] | |
| max_size = "${var.asg_max_size}" | |
| min_size = "${var.asg_min_size}" | |
| vpc_zone_identifier = ["${aws_subnet.private_1.id}", "${aws_subnet.private_2.id}", "${aws_subnet.private_3.id}"] | |
| load_balancers = ["${aws_elb.hub-configuration-ui.name}"] | |
| # @note: is this redundant on healthchecks from ELB? | |
| health_check_grace_period = 3600 | |
| health_check_type = "EC2" | |
| tag { | |
| key = "Environment" | |
| value = "${var.target_env}" | |
| propagate_at_launch = true | |
| } | |
| tag { | |
| key = "Name" | |
| value = "hub-configuration-ui" | |
| propagate_at_launch = true | |
| } | |
| } | |
| # hub message queue service | |
| resource "template_file" "hub-message-queue-service" { | |
| filename = "templates/hub-message-queue-service.sh.tpl" | |
| vars { | |
| env = "${var.target_env}" | |
| version = "${var.service_version.hub-message-queue-service}" | |
| } | |
| } | |
| resource "aws_launch_configuration" "hub-message-queue-service" { | |
| # name = "${var.target_env}-hub-message-queue-service" | |
| image_id = "${lookup(var.docker_amis, var.region)}" | |
| instance_type = "${var.docker_instance_type}" | |
| security_groups = ["${aws_security_group.service_default.id}"] | |
| depends_on = ["aws_iam_instance_profile.services"] | |
| iam_instance_profile = "${aws_iam_instance_profile.services.name}" | |
| user_data = "${template_file.hub-message-queue-service.rendered}" | |
| } | |
| resource "aws_elb" "hub-message-queue-service" { | |
| name = "${var.target_env}-hmqs" | |
| subnets = ["${aws_subnet.public_1.id}", "${aws_subnet.public_2.id}", "${aws_subnet.public_3.id}"] | |
| security_groups = ["${aws_security_group.public_service_elb.id}"] | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "http" | |
| lb_port = 80 | |
| lb_protocol = "http" | |
| } | |
| # @note: tcp/ssl for websocket support | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "tcp" | |
| lb_port = 443 | |
| lb_protocol = "ssl" | |
| ssl_certificate_id = "${var.ssl_cert_name}" | |
| } | |
| health_check { | |
| healthy_threshold = 3 | |
| unhealthy_threshold = 5 | |
| timeout = 5 | |
| target = "TCP:80" | |
| interval = 30 | |
| } | |
| } | |
| resource "aws_route53_record" "hub-message-queue-service" { | |
| zone_id = "${var.hosted_zone_id}" | |
| name = "${var.target_env}hub-message-queue-service.2020ar.com" | |
| type = "CNAME" | |
| ttl = 300 | |
| records = ["${aws_elb.hub-message-queue-service.dns_name}"] | |
| } | |
| resource "aws_autoscaling_group" "hub-message-queue-service" { | |
| depends_on = ["aws_db_instance.db_queue", "aws_db_instance.db_admin"] | |
| name = "${var.target_env}-hub-message-queue-service-asg" | |
| launch_configuration = "${aws_launch_configuration.hub-message-queue-service.name}" | |
| availability_zones = ["${concat(var.region, "${element(split(",", var.zones), 0)}")}", "${concat(var.region, "${element(split(",", var.zones), 1)}")}", "${concat(var.region, "${element(split(",", var.zones), 2)}")}"] | |
| max_size = "${var.asg_max_size}" | |
| min_size = "${var.asg_min_size}" | |
| vpc_zone_identifier = ["${aws_subnet.private_1.id}", "${aws_subnet.private_2.id}", "${aws_subnet.private_3.id}"] | |
| load_balancers = ["${aws_elb.hub-message-queue-service.name}"] | |
| # @note: is this redundant on healthchecks from ELB? | |
| health_check_grace_period = 3600 | |
| health_check_type = "EC2" | |
| tag { | |
| key = "Environment" | |
| value = "${var.target_env}" | |
| propagate_at_launch = true | |
| } | |
| tag { | |
| key = "Name" | |
| value = "hub-message-queue-service" | |
| propagate_at_launch = true | |
| } | |
| } | |
| # hub publish api | |
| resource "template_file" "hub-publish-api" { | |
| filename = "templates/hub-publish-api.sh.tpl" | |
| vars { | |
| env = "${var.target_env}" | |
| version = "${var.service_version.hub-publish-api}" | |
| } | |
| } | |
| resource "aws_launch_configuration" "hub-publish-api" { | |
| # name = "${var.target_env}-hub-publish-api" | |
| image_id = "${lookup(var.docker_amis, var.region)}" | |
| instance_type = "${var.docker_instance_type}" | |
| security_groups = ["${aws_security_group.service_default.id}"] | |
| depends_on = ["aws_iam_instance_profile.services"] | |
| iam_instance_profile = "${aws_iam_instance_profile.services.name}" | |
| user_data = "${template_file.hub-publish-api.rendered}" | |
| } | |
| resource "aws_elb" "hub-publish-api" { | |
| name = "${var.target_env}-hpa" | |
| subnets = ["${aws_subnet.public_1.id}", "${aws_subnet.public_2.id}", "${aws_subnet.public_3.id}"] | |
| security_groups = ["${aws_security_group.public_service_elb.id}"] | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "http" | |
| lb_port = 80 | |
| lb_protocol = "http" | |
| } | |
| # @note: tcp/ssl for websocket support | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "tcp" | |
| lb_port = 443 | |
| lb_protocol = "ssl" | |
| ssl_certificate_id = "${var.ssl_cert_name}" | |
| } | |
| health_check { | |
| healthy_threshold = 3 | |
| unhealthy_threshold = 5 | |
| timeout = 5 | |
| target = "TCP:80" | |
| interval = 30 | |
| } | |
| } | |
| resource "aws_route53_record" "hub-publish-api" { | |
| zone_id = "${var.hosted_zone_id}" | |
| name = "${var.target_env}hub-publish-api.2020ar.com" | |
| type = "CNAME" | |
| ttl = 300 | |
| records = ["${aws_elb.hub-publish-api.dns_name}"] | |
| } | |
| resource "aws_autoscaling_group" "hub-publish-api" { | |
| depends_on = ["aws_db_instance.db_queue", "aws_db_instance.db_admin"] | |
| name = "${var.target_env}-hub-publish-api-asg" | |
| launch_configuration = "${aws_launch_configuration.hub-publish-api.name}" | |
| availability_zones = ["${concat(var.region, "${element(split(",", var.zones), 0)}")}", "${concat(var.region, "${element(split(",", var.zones), 1)}")}", "${concat(var.region, "${element(split(",", var.zones), 2)}")}"] | |
| max_size = "${var.asg_max_size}" | |
| min_size = "${var.asg_min_size}" | |
| vpc_zone_identifier = ["${aws_subnet.private_1.id}", "${aws_subnet.private_2.id}", "${aws_subnet.private_3.id}"] | |
| load_balancers = ["${aws_elb.hub-publish-api.name}"] | |
| # @note: is this redundant on healthchecks from ELB? | |
| health_check_grace_period = 3600 | |
| health_check_type = "EC2" | |
| tag { | |
| key = "Environment" | |
| value = "${var.target_env}" | |
| propagate_at_launch = true | |
| } | |
| tag { | |
| key = "Name" | |
| value = "hub-publish-api" | |
| propagate_at_launch = true | |
| } | |
| } | |
| # hub proxy api | |
| resource "template_file" "hub-proxy-api" { | |
| filename = "templates/hub-proxy-api.sh.tpl" | |
| vars { | |
| version = "${var.service_version.hub-proxy-api}" | |
| } | |
| } | |
| resource "aws_launch_configuration" "hub-proxy-api" { | |
| # name = "${var.target_env}-hub-proxy-api" | |
| image_id = "${lookup(var.docker_amis, var.region)}" | |
| instance_type = "${var.docker_instance_type}" | |
| security_groups = ["${aws_security_group.service_default.id}"] | |
| depends_on = ["aws_iam_instance_profile.services"] | |
| iam_instance_profile = "${aws_iam_instance_profile.services.name}" | |
| user_data = "${template_file.hub-proxy-api.rendered}" | |
| } | |
| resource "aws_elb" "hub-proxy-api" { | |
| name = "${var.target_env}-hpra" | |
| subnets = ["${aws_subnet.public_1.id}", "${aws_subnet.public_2.id}", "${aws_subnet.public_3.id}"] | |
| security_groups = ["${aws_security_group.public_service_elb.id}"] | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "http" | |
| lb_port = 80 | |
| lb_protocol = "http" | |
| } | |
| # @note: tcp/ssl for websocket support | |
| listener { | |
| instance_port = 80 | |
| instance_protocol = "tcp" | |
| lb_port = 443 | |
| lb_protocol = "ssl" | |
| ssl_certificate_id = "${var.ssl_cert_name}" | |
| } | |
| health_check { | |
| healthy_threshold = 3 | |
| unhealthy_threshold = 5 | |
| timeout = 5 | |
| target = "TCP:80" | |
| interval = 30 | |
| } | |
| } | |
| resource "aws_route53_record" "hub-proxy-api" { | |
| zone_id = "${var.hosted_zone_id}" | |
| name = "${var.target_env}hub-proxy-api.2020ar.com" | |
| type = "CNAME" | |
| ttl = 300 | |
| records = ["${aws_elb.hub-proxy-api.dns_name}"] | |
| } | |
| resource "aws_autoscaling_group" "hub-proxy-api" { | |
| depends_on = ["aws_db_instance.db_queue", "aws_db_instance.db_admin"] | |
| name = "${var.target_env}-hub-proxy-api-asg" | |
| launch_configuration = "${aws_launch_configuration.hub-proxy-api.name}" | |
| availability_zones = ["${concat(var.region, "${element(split(",", var.zones), 0)}")}", "${concat(var.region, "${element(split(",", var.zones), 1)}")}", "${concat(var.region, "${element(split(",", var.zones), 2)}")}"] | |
| max_size = "${var.asg_max_size}" | |
| min_size = "${var.asg_min_size}" | |
| vpc_zone_identifier = ["${aws_subnet.private_1.id}", "${aws_subnet.private_2.id}", "${aws_subnet.private_3.id}"] | |
| load_balancers = ["${aws_elb.hub-proxy-api.name}"] | |
| # @note: is this redundant on healthchecks from ELB? | |
| health_check_grace_period = 3600 | |
| health_check_type = "EC2" | |
| tag { | |
| key = "Environment" | |
| value = "${var.target_env}" | |
| propagate_at_launch = true | |
| } | |
| tag { | |
| key = "Name" | |
| value = "hub-proxy-api" | |
| propagate_at_launch = true | |
| } | |
| } | |
| # TODO: terraform scaling policy | |
| # TODO: terraform CloudWatch metrics |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment