AWS Cloud Services: Complete Guide for Developers
Amazon Web Services (AWS) is the world's most comprehensive cloud platform. This guide covers essential AWS services and best practices for building scalable, reliable applications.
Why AWS?
- ☁️ Comprehensive - 200+ services for any use case
- 🌍 Global - Data centers in 30+ regions worldwide
- 💰 Cost-Effective - Pay only for what you use
- 🔒 Secure - Industry-leading security and compliance
- 📈 Scalable - From startup to enterprise scale
Getting Started
AWS Account Setup
# Install AWS CLI
# macOS
brew install awscli
# Windows
choco install awscli
# Configure AWS CLI
aws configure
# Enter: Access Key ID, Secret Access Key, Region, Output format
AWS CLI Basics
# List S3 buckets
aws s3 ls
# List EC2 instances
aws ec2 describe-instances
# Get caller identity
aws sts get-caller-identity
Amazon EC2 (Elastic Compute Cloud)
Launching an EC2 Instance
# Create key pair
aws ec2 create-key-pair \
--key-name MyKeyPair \
--query 'KeyMaterial' \
--output text > MyKeyPair.pem
# Set permissions
chmod 400 MyKeyPair.pem
# Launch instance
aws ec2 run-instances \
--image-id ami-0c55b159cbfafe1f0 \
--instance-type t2.micro \
--key-name MyKeyPair \
--security-group-ids sg-903004f8 \
--subnet-id subnet-6e7f829e
Managing EC2 with Terraform
# main.tf
provider "aws" {
region = "us-east-1"
}
resource "aws_instance" "web_server" {
ami = "ami-0c55b159cbfafe1f0"
instance_type = "t2.micro"
tags = {
Name = "WebServer"
Environment = "Production"
}
user_data = <<-EOF
#!/bin/bash
yum update -y
yum install -y httpd
systemctl start httpd
systemctl enable httpd
echo "Hello from EC2" > /var/www/html/index.html
EOF
}
resource "aws_security_group" "web_sg" {
name = "web-security-group"
description = "Allow HTTP and SSH"
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
Amazon S3 (Simple Storage Service)
S3 Operations with AWS CLI
# Create bucket
aws s3 mb s3://my-unique-bucket-name
# Upload file
aws s3 cp myfile.txt s3://my-bucket/
# Upload directory
aws s3 sync ./local-folder s3://my-bucket/remote-folder
# Download file
aws s3 cp s3://my-bucket/myfile.txt ./
# List objects
aws s3 ls s3://my-bucket/
# Delete object
aws s3 rm s3://my-bucket/myfile.txt
# Delete bucket
aws s3 rb s3://my-bucket --force
S3 with Node.js SDK
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
// Upload file
async function uploadFile(bucketName, key, body) {
const params = {
Bucket: bucketName,
Key: key,
Body: body,
ContentType: 'text/plain'
};
try {
const data = await s3.upload(params).promise();
console.log('File uploaded successfully:', data.Location);
return data;
} catch (error) {
console.error('Error uploading file:', error);
throw error;
}
}
// Download file
async function downloadFile(bucketName, key) {
const params = {
Bucket: bucketName,
Key: key
};
try {
const data = await s3.getObject(params).promise();
return data.Body.toString('utf-8');
} catch (error) {
console.error('Error downloading file:', error);
throw error;
}
}
// Generate presigned URL
function getPresignedUrl(bucketName, key, expiresIn = 3600) {
const params = {
Bucket: bucketName,
Key: key,
Expires: expiresIn
};
return s3.getSignedUrl('getObject', params);
}
// List objects
async function listObjects(bucketName, prefix = '') {
const params = {
Bucket: bucketName,
Prefix: prefix
};
try {
const data = await s3.listObjectsV2(params).promise();
return data.Contents;
} catch (error) {
console.error('Error listing objects:', error);
throw error;
}
}
S3 Bucket Policy
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PublicReadGetObject",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::my-bucket/*"
}
]
}
AWS Lambda (Serverless Functions)
Creating a Lambda Function
// index.js
exports.handler = async (event) => {
console.log('Event:', JSON.stringify(event, null, 2));
const response = {
statusCode: 200,
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
body: JSON.stringify({
message: 'Hello from Lambda!',
input: event
})
};
return response;
};
Lambda with API Gateway
// API Gateway + Lambda
exports.handler = async (event) => {
const { httpMethod, path, body } = event;
// Parse request body
const requestBody = body ? JSON.parse(body) : {};
// Route handling
if (httpMethod === 'GET' && path === '/users') {
return {
statusCode: 200,
body: JSON.stringify({ users: ['Alice', 'Bob', 'Charlie'] })
};
}
if (httpMethod === 'POST' && path === '/users') {
const { name } = requestBody;
return {
statusCode: 201,
body: JSON.stringify({ message: `User ${name} created` })
};
}
return {
statusCode: 404,
body: JSON.stringify({ error: 'Not Found' })
};
};
Lambda with DynamoDB
const AWS = require('aws-sdk');
const dynamodb = new AWS.DynamoDB.DocumentClient();
exports.handler = async (event) => {
const { httpMethod, body } = event;
const tableName = process.env.TABLE_NAME;
if (httpMethod === 'POST') {
const item = JSON.parse(body);
const params = {
TableName: tableName,
Item: {
id: Date.now().toString(),
...item,
createdAt: new Date().toISOString()
}
};
await dynamodb.put(params).promise();
return {
statusCode: 201,
body: JSON.stringify({ message: 'Item created' })
};
}
if (httpMethod === 'GET') {
const params = {
TableName: tableName
};
const result = await dynamodb.scan(params).promise();
return {
statusCode: 200,
body: JSON.stringify(result.Items)
};
}
};
Amazon RDS (Relational Database Service)
Creating RDS Instance with Terraform
resource "aws_db_instance" "postgres" {
identifier = "myapp-db"
engine = "postgres"
engine_version = "14.7"
instance_class = "db.t3.micro"
allocated_storage = 20
storage_type = "gp2"
db_name = "myappdb"
username = "admin"
password = var.db_password
vpc_security_group_ids = [aws_security_group.db_sg.id]
db_subnet_group_name = aws_db_subnet_group.main.name
backup_retention_period = 7
skip_final_snapshot = true
tags = {
Name = "MyApp Database"
}
}
resource "aws_security_group" "db_sg" {
name = "database-security-group"
description = "Allow PostgreSQL access"
ingress {
from_port = 5432
to_port = 5432
protocol = "tcp"
cidr_blocks = ["10.0.0.0/16"]
}
}
Connecting to RDS from Lambda
const { Pool } = require('pg');
const pool = new Pool({
host: process.env.DB_HOST,
port: 5432,
database: process.env.DB_NAME,
user: process.env.DB_USER,
password: process.env.DB_PASSWORD,
max: 1 // Lambda connections should be minimal
});
exports.handler = async (event) => {
const client = await pool.connect();
try {
const result = await client.query('SELECT * FROM users LIMIT 10');
return {
statusCode: 200,
body: JSON.stringify(result.rows)
};
} finally {
client.release();
}
};
Amazon DynamoDB
DynamoDB Operations
const AWS = require('aws-sdk');
const dynamodb = new AWS.DynamoDB.DocumentClient();
// Put item
async function createUser(user) {
const params = {
TableName: 'Users',
Item: {
userId: user.id,
name: user.name,
email: user.email,
createdAt: new Date().toISOString()
}
};
await dynamodb.put(params).promise();
}
// Get item
async function getUser(userId) {
const params = {
TableName: 'Users',
Key: { userId }
};
const result = await dynamodb.get(params).promise();
return result.Item;
}
// Query with index
async function getUsersByEmail(email) {
const params = {
TableName: 'Users',
IndexName: 'EmailIndex',
KeyConditionExpression: 'email = :email',
ExpressionAttributeValues: {
':email': email
}
};
const result = await dynamodb.query(params).promise();
return result.Items;
}
// Update item
async function updateUser(userId, updates) {
const params = {
TableName: 'Users',
Key: { userId },
UpdateExpression: 'set #name = :name, #email = :email',
ExpressionAttributeNames: {
'#name': 'name',
'#email': 'email'
},
ExpressionAttributeValues: {
':name': updates.name,
':email': updates.email
},
ReturnValues: 'ALL_NEW'
};
const result = await dynamodb.update(params).promise();
return result.Attributes;
}
// Delete item
async function deleteUser(userId) {
const params = {
TableName: 'Users',
Key: { userId }
};
await dynamodb.delete(params).promise();
}
AWS CloudFormation
Infrastructure as Code
AWSTemplateFormatVersion: '2010-09-09'
Description: 'Web Application Stack'
Resources:
WebServerInstance:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-0c55b159cbfafe1f0
InstanceType: t2.micro
KeyName: MyKeyPair
SecurityGroupIds:
- !Ref WebServerSecurityGroup
UserData:
Fn::Base64: !Sub |
#!/bin/bash
yum update -y
yum install -y httpd
systemctl start httpd
systemctl enable httpd
WebServerSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Allow HTTP and SSH
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 80
ToPort: 80
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 22
ToPort: 22
CidrIp: 0.0.0.0/0
S3Bucket:
Type: AWS::S3::Bucket
Properties:
BucketName: my-app-bucket
PublicAccessBlockConfiguration:
BlockPublicAcls: true
BlockPublicPolicy: true
IgnorePublicAcls: true
RestrictPublicBuckets: true
Outputs:
InstanceId:
Description: Instance ID
Value: !Ref WebServerInstance
BucketName:
Description: S3 Bucket Name
Value: !Ref S3Bucket
Best Practices
1. Use IAM Roles Instead of Access Keys
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject"
],
"Resource": "arn:aws:s3:::my-bucket/*"
}
]
}
2. Enable CloudWatch Monitoring
const AWS = require('aws-sdk');
const cloudwatch = new AWS.CloudWatch();
async function putMetric(metricName, value) {
const params = {
Namespace: 'MyApp',
MetricData: [
{
MetricName: metricName,
Value: value,
Unit: 'Count',
Timestamp: new Date()
}
]
};
await cloudwatch.putMetricData(params).promise();
}
3. Use VPC for Security
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "main-vpc"
}
}
resource "aws_subnet" "private" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
tags = {
Name = "private-subnet"
}
}
4. Implement Auto Scaling
resource "aws_autoscaling_group" "web" {
name = "web-asg"
min_size = 2
max_size = 10
desired_capacity = 2
health_check_type = "ELB"
launch_template {
id = aws_launch_template.web.id
version = "$Latest"
}
}
Cost Optimization
- 💰 Use Reserved Instances for predictable workloads
- ⏰ Stop unused resources during off-hours
- 📊 Monitor with Cost Explorer
- 🔄 Use S3 lifecycle policies
- 🎯 Right-size your instances
Conclusion
AWS provides a comprehensive suite of cloud services for building scalable applications. Master these core services to leverage the full power of cloud computing.
Resources
Enjoyed this article?
Explore more deep dives into architecture, performance, and modern .NET.