Creating an Infrastructure using Terraform with the help of EFS instead of EBS
Vinit Sharma
Computer science Engineer || Data Science || DevOps practitioner || AWS CSA ||
Terraform is the best Infrastructure As Code tool in the current market. In my previous post I Created a complete web page setup using terraform. The best part of terraform is minimal human interference so no more errors
Amazon Elastic File System (Amazon EFS) provides a simple, scalable, fully managed elastic NFS file system for use with AWS Cloud services and on-premises resources.Amazon Elastic File System is a cloud-based file storage service for applications and workloads that run in the Amazon Web Services public cloud.
TASK AIM:-
Perform the task using EFS instead of EBS service on the AWS as, Create/launch Application using Terraform
1. Create Security group which allow the port 80.
2. Launch EC2 instance.
3. In this Ec2 instance use the existing key or provided key and security group which we have created in step 1.
4. Launch one Volume using the EFS service and attach it in your vpc, then mount that volume into /var/www/html
5. Developer have uploded the code into github repo also the repo has some images.
6. Copy the github repo code into /var/www/html
7. Create S3 bucket, and copy/deploy the images from github repo into the s3 bucket and change the permission to public readable.
8 Create a Cloudfront using s3 bucket(which contains images) and use the Cloudfront URL to update in code in /var/www/html
Let's Start:-
1.Creating profile for aws , specify region for the profile and install all plugins:-
provider "aws" { region = "ap-south-1" profile = "myprofile"
}
2 Creating a Key-Pair for aws instances:-
resource "tls_private_key" "tk2pkey" { algorithm = "RSA" rsa_bits = "2048" } resource "aws_key_pair" "tk2key" { depends_on = [ tls_private_key.tk2pkey, ] key_name = "tk2key" public_key = tls_private_key.tk2pkey.public_key_openssh
}
3 .Creating a Security-groups for SSH, HTTP and NFS for port 80 :-
resource "aws_security_group" "tk2_sg" { depends_on = [ aws_key_pair.tk2key,] name = "tk2_sg" description = "Allow SSH AND HTTP and NFS inbound traffic" vpc_id = "vpc-00ac92e8e060045b6" ingress { description = "SSH" from_port = 22 to_port = 22 protocol = "tcp" cidr_blocks = [ "0.0.0.0/0" ] } ingress { description = "HTTP" from_port = 80 to_port = 80 protocol = "tcp" cidr_blocks = [ "0.0.0.0/0" ] } ingress { description = "NFS" from_port = 2049 to_port = 2049 protocol = "tcp" cidr_blocks = [ "0.0.0.0/0" ] } egress { from_port = 0 to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } tags = { Name = "tk2_sg" }
}
4 Launching Ec2 instance for Task 2 :-
resource "aws_instance" "tk2_os" { depends_on = [ aws_key_pair.tk2key, aws_security_group.tk2_sg, ] ami = "ami-0447a12f28fddb066" instance_type = "t2.micro" key_name = "tk2key" security_groups = [ "tk2_sg" ] connection { type = "ssh" user = "ec2-user" private_key = tls_private_key.tk2pkey.private_key_pem host = aws_instance.tk2_os.public_ip } provisioner "remote-exec" { inline = [ "sudo yum update -y", "sudo yum install httpd php git -y ", "sudo systemctl restart httpd", "sudo systemctl enable httpd", ] } tags = { Name = "tk2_os" }
}
5 Creating EFS File System because EFS can be used with multiple OS :-
resource "aws_efs_file_system" "allow_nfs" { depends_on = [ aws_security_group.tk2_sg, aws_instance.tk2_os, ] creation_token = "allow_nfs" tags = { Name = "allow_nfs" }
}
7 Now we must Configure our Ec2 Instance for EFS Mounting :-
resource "aws_efs_mount_target" "alpha" { depends_on = [ aws_efs_file_system.allow_nfs, ] file_system_id = aws_efs_file_system.allow_nfs.id subnet_id = aws_instance.tk2_os.subnet_id security_groups = ["${aws_security_group.tk2_sg.id}"] } resource "null_resource" "null-remote-1" { depends_on = [ aws_efs_mount_target.alpha, ] connection { type = "ssh" user = "ec2-user" private_key = tls_private_key.tk2pkey.private_key_pem host = aws_instance.tk2_os.public_ip } provisioner "remote-exec" { inline = [ "sudo echo ${aws_efs_file_system.allow_nfs.dns_name}:/var/www/html efs defaults,_netdev 0 0 >> sudo /etc/fstab", "sudo mount ${aws_efs_file_system.allow_nfs.dns_name}:/ /var/www/html", "sudo curl https://github.com/vinitsharma16/hybrid-task-2.git > index.html", "sudo cp index.html /var/www/html/", ]
}
8 Creating S3 Bucket for my AWS Ec2 Instances :-
resource "aws_s3_bucket" "tk2-s3bucket" { depends_on = [ null_resource.null-remote-1, ] bucket = "tk2-s3bucket" force_destroy = true acl = "public-read" policy = <<POLICY { "Version": "2012-10-17", "Id": "MYBUCKETPOLICY", "Statement": [ { "Sid": "PublicReadGetObject", "Effect": "Allow", "Principal": "*", "Action": "s3:*", "Resource": "arn:aws:s3:::tk2-s3bucket/*" } ] } POLICY
}
9 Creating object in S3 Bucket which we have launched :-
resource "aws_s3_bucket_object" "tk2-object" { depends_on = [ aws_s3_bucket.tk2-s3bucket, null_resource.null-remote-1, ] bucket = aws_s3_bucket.tk2-s3bucket.id key = "one" source = "C:/Users/vinit/OneDrive/Desktop/pic.jpg" etag = "C:/Users/vinit/OneDrive/Desktop/pic.jpg" acl = "public-read" content_type = "image/jpg" } locals { s3_origin_id = "aws_s3_bucket.tk2-s3bucket.id" }
10 Creating CloudFront for access and distribution of storage :-
resource "aws_cloudfront_origin_access_identity" "o" { comment = "Bingo this is done" } resource "aws_cloudfront_distribution" "tk2-s3_distribution" { origin { domain_name = aws_s3_bucket.tk2-s3bucket.bucket_regional_domain_name origin_id = local.s3_origin_id s3_origin_config { origin_access_identity = aws_cloudfront_origin_access_identity.o.cloudfront_access_identity_path } } enabled = true is_ipv6_enabled = true comment = "Some comment" default_root_object = "pic.png" logging_config { include_cookies = false bucket = aws_s3_bucket.tk2-s3bucket.bucket_domain_name } default_cache_behavior { allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] cached_methods = ["GET", "HEAD"] target_origin_id = local.s3_origin_id forwarded_values { query_string = false cookies { forward = "none" } } viewer_protocol_policy = "allow-all" min_ttl = 0 default_ttl = 3600 max_ttl = 86400 } # Cache behavior with precedence 0 ordered_cache_behavior { path_pattern = "/content/*" allowed_methods = ["GET", "HEAD", "OPTIONS"] cached_methods = ["GET", "HEAD", "OPTIONS"] target_origin_id = local.s3_origin_id forwarded_values { query_string = false cookies { forward = "none" } } min_ttl = 0 default_ttl = 86400 max_ttl = 31536000 compress = true viewer_protocol_policy = "redirect-to-https" } price_class = "PriceClass_200" restrictions { geo_restriction { restriction_type = "whitelist" locations = ["US", "IN","CA", "GB", "DE"] } } tags = { Environment = "production" } viewer_certificate { cloudfront_default_certificate = true } } output "out3" { value = aws_cloudfront_distribution.tk2-s3_distribution.domain_name
}
11 .Integrating our CloudFront with using the Ec2 instances :-
resource "null_resource" "null-remote2" { depends_on = [ aws_cloudfront_distribution.tk2-s3_distribution, ] connection { type = "ssh" user = "ec2-user" private_key = tls_private_key.tk2pkey.private_key_pem host = aws_instance.tk2_os.public_ip } provisioner "remote-exec" { inline = [ "sudo su << EOF", "echo \"<img src='https://${aws_cloudfront_distribution.tk2-s3_distribution.domain_name}/${aws_s3_bucket_object.tk2-object.key }'>\" >> /var/www/html/index.html", "EOF" ] }
}