Przejdź do głównej zawartości

Wzorce infrastruktury jako kod

Opanuj infrastrukturę jako kod dzięki Cursor i Claude Code. Ten przewodnik obejmuje moduły Terraform, szablony CloudFormation, programy Pulumi, playbook Ansible, przepływy pracy GitOps i wzorce produkcyjne IaC z pomocą AI.

  1. Inicjalizacja projektu IaC

    Okno terminala
    # Generuj konfigurację IaC
    Agent: "Stwórz konfigurację infrastruktury z:
    - Modułami Terraform dla AWS
    - Zarządzaniem stanem z S3
    - Separacją środowisk
    - Najlepszymi praktykami bezpieczeństwa
    - Optymalizacją kosztów"
  2. Instalacja serwerów MCP IaC (opcjonalne)

    Okno terminala
    # AWS
    claude mcp add aws -- docker run -e AWS_ACCESS_KEY_ID=... ghcr.io/aws/mcp-server
    # Azure (brak endpointu URL - użyj pakietu npm)
    claude mcp add azure -- npx -y @azure/mcp@latest
    # Kubernetes
    claude mcp add k8s -- npx -y kubernetes-mcp-server
  3. Konfiguracja reguł AI

    # .cursorrules lub CLAUDE.md
    Najlepsze praktyki IaC:
    - Używaj zdalnego zarządzania stanem
    - Implementuj właściwe tagowanie
    - Stosuj zasadę najmniejszych uprawnień
    - Używaj modułów dla ponownego wykorzystania
    - Kontroluj wersje wszystkiego
    - Testuj zmiany infrastruktury
    - Dokumentuj wszystkie zasoby
# Prompt AI
Agent: "Stwórz strukturę modułu Terraform dla:
- VPC z publicznymi/prywatnymi podsieciami
- Klaster EKS z grupami węzłów
- RDS z replikami do odczytu
- Application load balancer
- Grupy bezpieczeństwa i IAM"
# modules/vpc/main.tf
terraform {
required_version = ">= 1.5.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
}
locals {
common_tags = merge(
var.tags,
{
Module = "vpc"
ManagedBy = "terraform"
Environment = var.environment
LastModified = timestamp()
}
)
max_subnet_length = max(
length(var.private_subnets),
length(var.public_subnets),
length(var.database_subnets)
)
nat_gateway_count = var.single_nat_gateway ? 1 : var.one_nat_gateway_per_az ? length(var.azs) : local.max_subnet_length
}
# VPC
resource "aws_vpc" "this" {
cidr_block = var.cidr
enable_dns_hostnames = var.enable_dns_hostnames
enable_dns_support = var.enable_dns_support
tags = merge(
local.common_tags,
{
Name = format("%s-vpc", var.name)
}
)
}
# Internet Gateway
resource "aws_internet_gateway" "this" {
count = length(var.public_subnets) > 0 ? 1 : 0
vpc_id = aws_vpc.this.id
tags = merge(
local.common_tags,
{
Name = format("%s-igw", var.name)
}
)
}
# Publiczne podsieci
resource "aws_subnet" "public" {
count = length(var.public_subnets)
vpc_id = aws_vpc.this.id
cidr_block = element(var.public_subnets, count.index)
availability_zone = element(var.azs, count.index)
map_public_ip_on_launch = var.map_public_ip_on_launch
tags = merge(
local.common_tags,
{
Name = format("%s-public-%s", var.name, element(var.azs, count.index))
Type = "public"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
}
)
}
# Prywatne podsieci
resource "aws_subnet" "private" {
count = length(var.private_subnets)
vpc_id = aws_vpc.this.id
cidr_block = element(var.private_subnets, count.index)
availability_zone = element(var.azs, count.index)
tags = merge(
local.common_tags,
{
Name = format("%s-private-%s", var.name, element(var.azs, count.index))
Type = "private"
"kubernetes.io/role/internal-elb" = "1"
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
}
)
}
# Podsieci bazy danych
resource "aws_subnet" "database" {
count = length(var.database_subnets)
vpc_id = aws_vpc.this.id
cidr_block = element(var.database_subnets, count.index)
availability_zone = element(var.azs, count.index)
tags = merge(
local.common_tags,
{
Name = format("%s-database-%s", var.name, element(var.azs, count.index))
Type = "database"
}
)
}
# Elastic IP dla NAT Gateway
resource "aws_eip" "nat" {
count = var.enable_nat_gateway ? local.nat_gateway_count : 0
domain = "vpc"
tags = merge(
local.common_tags,
{
Name = format("%s-eip-%s", var.name, element(var.azs, count.index))
}
)
depends_on = [aws_internet_gateway.this]
}
# NAT Gateway
resource "aws_nat_gateway" "this" {
count = var.enable_nat_gateway ? local.nat_gateway_count : 0
allocation_id = element(aws_eip.nat[*].id, count.index)
subnet_id = element(aws_subnet.public[*].id, count.index)
tags = merge(
local.common_tags,
{
Name = format("%s-nat-%s", var.name, element(var.azs, count.index))
}
)
depends_on = [aws_internet_gateway.this]
}
# Tabele routingu
resource "aws_route_table" "public" {
count = length(var.public_subnets) > 0 ? 1 : 0
vpc_id = aws_vpc.this.id
tags = merge(
local.common_tags,
{
Name = format("%s-public-rt", var.name)
Type = "public"
}
)
}
resource "aws_route_table" "private" {
count = var.enable_nat_gateway ? local.nat_gateway_count : 0
vpc_id = aws_vpc.this.id
tags = merge(
local.common_tags,
{
Name = format("%s-private-rt-%s", var.name, element(var.azs, count.index))
Type = "private"
}
)
}
# Trasy
resource "aws_route" "public_internet_gateway" {
count = length(var.public_subnets) > 0 ? 1 : 0
route_table_id = aws_route_table.public[0].id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.this[0].id
timeouts {
create = "5m"
}
}
resource "aws_route" "private_nat_gateway" {
count = var.enable_nat_gateway ? local.nat_gateway_count : 0
route_table_id = element(aws_route_table.private[*].id, count.index)
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = element(aws_nat_gateway.this[*].id, count.index)
timeouts {
create = "5m"
}
}
# Skojarzenia tabel routingu
resource "aws_route_table_association" "public" {
count = length(var.public_subnets)
subnet_id = element(aws_subnet.public[*].id, count.index)
route_table_id = aws_route_table.public[0].id
}
resource "aws_route_table_association" "private" {
count = length(var.private_subnets)
subnet_id = element(aws_subnet.private[*].id, count.index)
route_table_id = element(
aws_route_table.private[*].id,
var.single_nat_gateway ? 0 : count.index
)
}
# VPC Endpoints
resource "aws_vpc_endpoint" "s3" {
count = var.enable_s3_endpoint ? 1 : 0
vpc_id = aws_vpc.this.id
service_name = data.aws_vpc_endpoint_service.s3.service_name
tags = merge(
local.common_tags,
{
Name = format("%s-s3-endpoint", var.name)
}
)
}
resource "aws_vpc_endpoint_route_table_association" "s3_public" {
count = var.enable_s3_endpoint && length(var.public_subnets) > 0 ? 1 : 0
vpc_endpoint_id = aws_vpc_endpoint.s3[0].id
route_table_id = aws_route_table.public[0].id
}
resource "aws_vpc_endpoint_route_table_association" "s3_private" {
count = var.enable_s3_endpoint ? local.nat_gateway_count : 0
vpc_endpoint_id = aws_vpc_endpoint.s3[0].id
route_table_id = element(aws_route_table.private[*].id, count.index)
}
# Flow Logs
resource "aws_flow_log" "this" {
count = var.enable_flow_log ? 1 : 0
iam_role_arn = aws_iam_role.flow_log[0].arn
log_destination = aws_cloudwatch_log_group.flow_log[0].arn
traffic_type = var.flow_log_traffic_type
vpc_id = aws_vpc.this.id
tags = merge(
local.common_tags,
{
Name = format("%s-flow-log", var.name)
}
)
}
resource "aws_cloudwatch_log_group" "flow_log" {
count = var.enable_flow_log ? 1 : 0
name = "/aws/vpc/${var.name}"
retention_in_days = var.flow_log_retention_in_days
kms_key_id = var.flow_log_kms_key_id
tags = local.common_tags
}
resource "aws_iam_role" "flow_log" {
count = var.enable_flow_log ? 1 : 0
name = format("%s-flow-log-role", var.name)
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Principal = {
Service = "vpc-flow-logs.amazonaws.com"
}
Action = "sts:AssumeRole"
}
]
})
tags = local.common_tags
}
resource "aws_iam_role_policy" "flow_log" {
count = var.enable_flow_log ? 1 : 0
name = format("%s-flow-log-policy", var.name)
role = aws_iam_role.flow_log[0].id
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams"
]
Resource = "*"
}
]
})
}
# Prompt AI
Agent: "Stwórz szablon CloudFormation dla:
- Architektury trójwarstwowej
- Grup auto-scaling
- RDS Multi-AZ
- Klastra ElastiCache
- Dystrybucji CloudFront"
# templates/infrastructure.yaml
AWSTemplateFormatVersion: '2010-09-09'
Description: 'Infrastruktura aplikacji trójwarstwowej z HA i auto-scaling'
Parameters:
EnvironmentName:
Description: Nazwa prefiksu środowiska
Type: String
Default: production
VPCCidr:
Description: Blok CIDR dla VPC
Type: String
Default: 10.0.0.0/16
PublicSubnetCidrs:
Description: Bloki CIDR dla publicznych podsieci
Type: CommaDelimitedList
Default: "10.0.1.0/24,10.0.2.0/24,10.0.3.0/24"
PrivateSubnetCidrs:
Description: Bloki CIDR dla prywatnych podsieci
Type: CommaDelimitedList
Default: "10.0.11.0/24,10.0.12.0/24,10.0.13.0/24"
DatabaseSubnetCidrs:
Description: Bloki CIDR dla podsieci bazy danych
Type: CommaDelimitedList
Default: "10.0.21.0/24,10.0.22.0/24,10.0.23.0/24"
InstanceType:
Description: Typ instancji EC2 dla serwerów aplikacji
Type: String
Default: t3.medium
AllowedValues:
- t3.small
- t3.medium
- t3.large
- m5.large
- m5.xlarge
DatabaseInstanceType:
Description: Typ instancji RDS
Type: String
Default: db.t3.medium
DatabasePassword:
Description: Hasło master RDS
Type: String
NoEcho: true
MinLength: 8
MaxLength: 41
KeyPairName:
Description: Nazwa pary kluczy EC2
Type: AWS::EC2::KeyPair::KeyName
MinSize:
Description: Minimalna liczba instancji
Type: Number
Default: 2
MinValue: 1
MaxSize:
Description: Maksymalna liczba instancji
Type: Number
Default: 10
MinValue: 1
DesiredCapacity:
Description: Pożądana liczba instancji
Type: Number
Default: 4
MinValue: 1
Metadata:
AWS::CloudFormation::Interface:
ParameterGroups:
- Label:
default: "Konfiguracja sieci"
Parameters:
- VPCCidr
- PublicSubnetCidrs
- PrivateSubnetCidrs
- DatabaseSubnetCidrs
- Label:
default: "Konfiguracja serwera"
Parameters:
- InstanceType
- KeyPairName
- MinSize
- MaxSize
- DesiredCapacity
- Label:
default: "Konfiguracja bazy danych"
Parameters:
- DatabaseInstanceType
- DatabasePassword
Resources:
# VPC
VPC:
Type: AWS::EC2::VPC
Properties:
CidrBlock: !Ref VPCCidr
EnableDnsHostnames: true
EnableDnsSupport: true
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-vpc
InternetGateway:
Type: AWS::EC2::InternetGateway
Properties:
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-igw
InternetGatewayAttachment:
Type: AWS::EC2::VPCGatewayAttachment
Properties:
InternetGatewayId: !Ref InternetGateway
VpcId: !Ref VPC
# Publiczne podsieci
PublicSubnet1:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [0, !GetAZs '']
CidrBlock: !Select [0, !Ref PublicSubnetCidrs]
MapPublicIpOnLaunch: true
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-public-subnet-1
- Key: kubernetes.io/role/elb
Value: 1
PublicSubnet2:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [1, !GetAZs '']
CidrBlock: !Select [1, !Ref PublicSubnetCidrs]
MapPublicIpOnLaunch: true
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-public-subnet-2
- Key: kubernetes.io/role/elb
Value: 1
PublicSubnet3:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [2, !GetAZs '']
CidrBlock: !Select [2, !Ref PublicSubnetCidrs]
MapPublicIpOnLaunch: true
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-public-subnet-3
- Key: kubernetes.io/role/elb
Value: 1
# NAT Gateway
NatGateway1EIP:
Type: AWS::EC2::EIP
DependsOn: InternetGatewayAttachment
Properties:
Domain: vpc
NatGateway2EIP:
Type: AWS::EC2::EIP
DependsOn: InternetGatewayAttachment
Properties:
Domain: vpc
NatGateway1:
Type: AWS::EC2::NatGateway
Properties:
AllocationId: !GetAtt NatGateway1EIP.AllocationId
SubnetId: !Ref PublicSubnet1
NatGateway2:
Type: AWS::EC2::NatGateway
Properties:
AllocationId: !GetAtt NatGateway2EIP.AllocationId
SubnetId: !Ref PublicSubnet2
# Tabele routingu
PublicRouteTable:
Type: AWS::EC2::RouteTable
Properties:
VpcId: !Ref VPC
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-public-routes
DefaultPublicRoute:
Type: AWS::EC2::Route
DependsOn: InternetGatewayAttachment
Properties:
RouteTableId: !Ref PublicRouteTable
DestinationCidrBlock: 0.0.0.0/0
GatewayId: !Ref InternetGateway
PublicSubnet1RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref PublicRouteTable
SubnetId: !Ref PublicSubnet1
PublicSubnet2RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref PublicRouteTable
SubnetId: !Ref PublicSubnet2
PublicSubnet3RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref PublicRouteTable
SubnetId: !Ref PublicSubnet3
# Prywatne podsieci
PrivateSubnet1:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [0, !GetAZs '']
CidrBlock: !Select [0, !Ref PrivateSubnetCidrs]
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-private-subnet-1
- Key: kubernetes.io/role/internal-elb
Value: 1
PrivateSubnet2:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [1, !GetAZs '']
CidrBlock: !Select [1, !Ref PrivateSubnetCidrs]
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-private-subnet-2
- Key: kubernetes.io/role/internal-elb
Value: 1
PrivateSubnet3:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [2, !GetAZs '']
CidrBlock: !Select [2, !Ref PrivateSubnetCidrs]
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-private-subnet-3
- Key: kubernetes.io/role/internal-elb
Value: 1
# Prywatne tabele routingu
PrivateRouteTable1:
Type: AWS::EC2::RouteTable
Properties:
VpcId: !Ref VPC
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-private-routes-1
DefaultPrivateRoute1:
Type: AWS::EC2::Route
Properties:
RouteTableId: !Ref PrivateRouteTable1
DestinationCidrBlock: 0.0.0.0/0
NatGatewayId: !Ref NatGateway1
PrivateSubnet1RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref PrivateRouteTable1
SubnetId: !Ref PrivateSubnet1
PrivateRouteTable2:
Type: AWS::EC2::RouteTable
Properties:
VpcId: !Ref VPC
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-private-routes-2
DefaultPrivateRoute2:
Type: AWS::EC2::Route
Properties:
RouteTableId: !Ref PrivateRouteTable2
DestinationCidrBlock: 0.0.0.0/0
NatGatewayId: !Ref NatGateway2
PrivateSubnet2RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref PrivateRouteTable2
SubnetId: !Ref PrivateSubnet2
PrivateSubnet3RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref PrivateRouteTable2
SubnetId: !Ref PrivateSubnet3
# Podsieci bazy danych
DatabaseSubnet1:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [0, !GetAZs '']
CidrBlock: !Select [0, !Ref DatabaseSubnetCidrs]
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-database-subnet-1
DatabaseSubnet2:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [1, !GetAZs '']
CidrBlock: !Select [1, !Ref DatabaseSubnetCidrs]
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-database-subnet-2
DatabaseSubnet3:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [2, !GetAZs '']
CidrBlock: !Select [2, !Ref DatabaseSubnetCidrs]
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-database-subnet-3
# Grupy bezpieczeństwa
ALBSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupName: !Sub ${EnvironmentName}-alb-sg
GroupDescription: Grupa bezpieczeństwa dla Application Load Balancer
VpcId: !Ref VPC
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 80
ToPort: 80
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 443
ToPort: 443
CidrIp: 0.0.0.0/0
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-alb-sg
WebServerSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupName: !Sub ${EnvironmentName}-webserver-sg
GroupDescription: Grupa bezpieczeństwa dla serwerów web
VpcId: !Ref VPC
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 80
ToPort: 80
SourceSecurityGroupId: !Ref ALBSecurityGroup
- IpProtocol: tcp
FromPort: 443
ToPort: 443
SourceSecurityGroupId: !Ref ALBSecurityGroup
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-webserver-sg
DatabaseSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupName: !Sub ${EnvironmentName}-database-sg
GroupDescription: Grupa bezpieczeństwa dla bazy danych RDS
VpcId: !Ref VPC
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 3306
ToPort: 3306
SourceSecurityGroupId: !Ref WebServerSecurityGroup
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-database-sg
CacheSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupName: !Sub ${EnvironmentName}-cache-sg
GroupDescription: Grupa bezpieczeństwa dla ElastiCache
VpcId: !Ref VPC
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 6379
ToPort: 6379
SourceSecurityGroupId: !Ref WebServerSecurityGroup
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-cache-sg
# Application Load Balancer
ApplicationLoadBalancer:
Type: AWS::ElasticLoadBalancingV2::LoadBalancer
Properties:
Name: !Sub ${EnvironmentName}-alb
Subnets:
- !Ref PublicSubnet1
- !Ref PublicSubnet2
- !Ref PublicSubnet3
SecurityGroups:
- !Ref ALBSecurityGroup
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-alb
ALBTargetGroup:
Type: AWS::ElasticLoadBalancingV2::TargetGroup
Properties:
Name: !Sub ${EnvironmentName}-tg
Port: 80
Protocol: HTTP
VpcId: !Ref VPC
HealthCheckEnabled: true
HealthCheckPath: /health
HealthCheckProtocol: HTTP
HealthCheckIntervalSeconds: 30
HealthCheckTimeoutSeconds: 5
HealthyThresholdCount: 2
UnhealthyThresholdCount: 3
TargetType: instance
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-tg
ALBListener:
Type: AWS::ElasticLoadBalancingV2::Listener
Properties:
DefaultActions:
- Type: forward
TargetGroupArn: !Ref ALBTargetGroup
LoadBalancerArn: !Ref ApplicationLoadBalancer
Port: 80
Protocol: HTTP
# Szablon uruchamiania
LaunchTemplate:
Type: AWS::EC2::LaunchTemplate
Properties:
LaunchTemplateName: !Sub ${EnvironmentName}-lt
LaunchTemplateData:
ImageId: !Ref LatestAmiId
InstanceType: !Ref InstanceType
KeyName: !Ref KeyPairName
SecurityGroupIds:
- !Ref WebServerSecurityGroup
IamInstanceProfile:
Arn: !GetAtt InstanceProfile.Arn
UserData:
Fn::Base64: !Sub |
#!/bin/bash
yum update -y
yum install -y httpd
systemctl start httpd
systemctl enable httpd
# Instaluj agenta CloudWatch
wget https://s3.amazonaws.com/amazoncloudwatch-agent/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm
rpm -U ./amazon-cloudwatch-agent.rpm
# Konfiguruj aplikację
cat > /var/www/html/index.html <<EOF
<h1>Witaj z ${EnvironmentName}</h1>
<p>ID instancji: $(ec2-metadata --instance-id | cut -d " " -f 2)</p>
<p>Strefa dostępności: $(ec2-metadata --availability-zone | cut -d " " -f 2)</p>
EOF
# Konfiguruj logi CloudWatch
/opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl \
-a fetch-config \
-m ec2 \
-s \
-c file:/opt/aws/amazon-cloudwatch-agent/etc/config.json
TagSpecifications:
- ResourceType: instance
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-instance
- ResourceType: volume
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-volume
# Grupa auto-scaling
AutoScalingGroup:
Type: AWS::AutoScaling::AutoScalingGroup
Properties:
AutoScalingGroupName: !Sub ${EnvironmentName}-asg
VPCZoneIdentifier:
- !Ref PrivateSubnet1
- !Ref PrivateSubnet2
- !Ref PrivateSubnet3
LaunchTemplate:
LaunchTemplateId: !Ref LaunchTemplate
Version: !GetAtt LaunchTemplate.LatestVersionNumber
MinSize: !Ref MinSize
MaxSize: !Ref MaxSize
DesiredCapacity: !Ref DesiredCapacity
HealthCheckType: ELB
HealthCheckGracePeriod: 300
TargetGroupARNs:
- !Ref ALBTargetGroup
Tags:
- Key: Name
Value: !Sub ${EnvironmentName}-asg-instance
PropagateAtLaunch: true
# Parametr store dla najnowszego AMI
Parameters:
LatestAmiId:
Type: AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>
Default: /aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2
Outputs:
VPCId:
Description: ID VPC
Value: !Ref VPC
Export:
Name: !Sub ${EnvironmentName}-VPC-ID
PublicSubnets:
Description: ID publicznych podsieci
Value: !Join [',', [!Ref PublicSubnet1, !Ref PublicSubnet2, !Ref PublicSubnet3]]
Export:
Name: !Sub ${EnvironmentName}-PUBLIC-SUBNETS
PrivateSubnets:
Description: ID prywatnych podsieci
Value: !Join [',', [!Ref PrivateSubnet1, !Ref PrivateSubnet2, !Ref PrivateSubnet3]]
Export:
Name: !Sub ${EnvironmentName}-PRIVATE-SUBNETS
ALBDNSName:
Description: Nazwa DNS Application Load Balancer
Value: !GetAtt ApplicationLoadBalancer.DNSName
Export:
Name: !Sub ${EnvironmentName}-ALB-DNS
CloudFrontURL:
Description: URL dystrybucji CloudFront
Value: !GetAtt CloudFrontDistribution.DomainName
Export:
Name: !Sub ${EnvironmentName}-CLOUDFRONT-URL
DatabaseEndpoint:
Description: Endpoint bazy danych RDS
Value: !GetAtt DatabaseInstance.Endpoint.Address
Export:
Name: !Sub ${EnvironmentName}-DATABASE-ENDPOINT
CacheEndpoint:
Description: Endpoint ElastiCache
Value: !GetAtt CacheCluster.RedisEndpoint.Address
Export:
Name: !Sub ${EnvironmentName}-CACHE-ENDPOINT
// Prompt AI
Agent: "Stwórz program Pulumi dla:
- Klastra Kubernetes z GitOps
- Wdrożenia mikrousług
- Konfiguracji service mesh
- Stosu obserwowalności
- Wdrożenia multi-region"
// index.ts
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
import * as awsx from "@pulumi/awsx";
import * as eks from "@pulumi/eks";
import * as k8s from "@pulumi/kubernetes";
import * as cloudflare from "@pulumi/cloudflare";
// Konfiguracja
const config = new pulumi.Config();
const environment = config.require("environment");
const region = config.get("region") || aws.config.region;
const azCount = config.getNumber("azCount") || 3;
const instanceType = config.get("instanceType") || "t3.medium";
const minSize = config.getNumber("minSize") || 2;
const maxSize = config.getNumber("maxSize") || 10;
const desiredCapacity = config.getNumber("desiredCapacity") || 4;
// Tagi
const tags = {
Environment: environment,
ManagedBy: "pulumi",
Project: pulumi.getProject(),
Stack: pulumi.getStack(),
};
// Stwórz VPC
const vpc = new awsx.ec2.Vpc(`${environment}-vpc`, {
numberOfAvailabilityZones: azCount,
natGateways: {
strategy: "Single", // Lub "HighlyAvailable" dla produkcji
},
tags: {
...tags,
Name: `${environment}-vpc`,
},
subnets: [
{
type: "public",
tags: {
...tags,
"kubernetes.io/role/elb": "1",
Type: "public",
},
},
{
type: "private",
tags: {
...tags,
"kubernetes.io/role/internal-elb": "1",
Type: "private",
},
},
{
type: "isolated",
name: "database",
tags: {
...tags,
Type: "database",
},
},
],
});
// Stwórz klaster EKS
const cluster = new eks.Cluster(`${environment}-cluster`, {
vpcId: vpc.id,
subnetIds: vpc.privateSubnetIds,
instanceType: instanceType,
desiredCapacity: desiredCapacity,
minSize: minSize,
maxSize: maxSize,
nodeAssociatePublicIpAddress: false,
version: "1.28",
enabledClusterLogTypes: [
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler",
],
tags: tags,
// Włącz dostawcę OIDC dla IRSA
createOidcProvider: true,
// Konfiguracja node group
nodeGroupOptions: {
amiType: "AL2_x86_64",
diskSize: 100,
instanceTypes: [instanceType],
labels: {
"node.kubernetes.io/lifecycle": "normal",
},
taints: [],
tags: {
...tags,
Name: `${environment}-node`,
},
},
// Profile Fargate dla workloadów systemowych
fargateProfiles: [
{
name: "system",
selectors: [
{
namespace: "kube-system",
labels: {
"fargate": "true",
},
},
{
namespace: "cert-manager",
},
],
},
],
});
// Stwórz dostawcę Kubernetes
const k8sProvider = new k8s.Provider(`${environment}-k8s`, {
kubeconfig: cluster.kubeconfig,
});
// Zainstaluj Metrics Server
const metricsServer = new k8s.helm.v3.Release(
"metrics-server",
{
chart: "metrics-server",
namespace: "kube-system",
repositoryOpts: {
repo: "https://kubernetes-sigs.github.io/metrics-server/",
},
values: {
args: [
"--cert-dir=/tmp",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
"--kubelet-use-node-status-port",
"--metric-resolution=15s",
],
},
},
{ provider: k8sProvider }
);
// Zainstaluj Cluster Autoscaler
const clusterAutoscaler = new k8s.helm.v3.Release(
"cluster-autoscaler",
{
chart: "cluster-autoscaler",
namespace: "kube-system",
repositoryOpts: {
repo: "https://kubernetes.github.io/autoscaler",
},
values: {
autoDiscovery: {
clusterName: cluster.eksCluster.name,
},
awsRegion: region,
rbac: {
serviceAccount: {
annotations: {
"eks.amazonaws.com/role-arn": clusterAutoscalerRole.arn,
},
},
},
},
},
{ provider: k8sProvider, dependsOn: [cluster] }
);
// Eksporty
export const vpcId = vpc.id;
export const clusterName = cluster.eksCluster.name;
export const kubeconfig = cluster.kubeconfig;
export const databaseEndpoint = database.endpoint;
export const databasePort = database.port;
export const appBucketName = appBucket.bucket;
export const cdnDomainName = cdn.domainName;
export const istioIngressEndpoint = istioIngress.status.apply(
s => s.loadBalancer?.ingress?.[0]?.hostname || ""
);
// Wyjścia stosu dla referencji
export const stackOutputs = {
vpc: {
id: vpc.id,
publicSubnets: vpc.publicSubnetIds,
privateSubnets: vpc.privateSubnetIds,
databaseSubnets: vpc.isolatedSubnetIds,
},
eks: {
clusterName: cluster.eksCluster.name,
clusterEndpoint: cluster.eksCluster.endpoint,
nodeSecurityGroup: cluster.nodeSecurityGroup.id,
},
database: {
endpoint: database.endpoint,
port: database.port,
},
storage: {
appBucket: appBucket.bucket,
},
cdn: {
domainName: cdn.domainName,
distributionId: cdn.id,
},
};
# Prompt AI
Agent: "Stwórz konfigurację GitOps z:
- Manifestami aplikacji ArgoCD
- Konfiguracją multi-environment
- Progressive deployment
- Zarządzaniem sekretami
- Integracją monitoringu"
# argocd/applications/production.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: production-app
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: production
source:
repoURL: https://github.com/company/infrastructure
targetRevision: main
path: environments/production
helm:
valueFiles:
- values.yaml
- values-production.yaml
parameters:
- name: image.tag
value: "$ARGOCD_APP_REVISION"
destination:
server: https://kubernetes.default.svc
namespace: production
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=foreground
- PruneLast=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m
revisionHistoryLimit: 3
ignoreDifferences:
- group: apps
kind: Deployment
jsonPointers:
- /spec/replicas
- group: autoscaling
kind: HorizontalPodAutoscaler
jsonPointers:
- /spec/minReplicas
- /spec/maxReplicas
# environments/production/kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../base
namespace: production
commonLabels:
environment: production
patchesStrategicMerge:
- deployment-patch.yaml
- service-patch.yaml
- ingress-patch.yaml
configMapGenerator:
- name: app-config
behavior: merge
files:
- config-production.yaml
secretGenerator:
- name: app-secrets
behavior: merge
files:
- secrets-production.enc.yaml
images:
- name: app
newName: registry.company.com/app
newTag: v1.2.3
replicas:
- name: app
count: 5
resources:
- hpa.yaml
- pdb.yaml
- network-policy.yaml
# .github/workflows/gitops.yml
name: GitOps Deploy
on:
push:
branches:
- main
- develop
paths:
- 'src/**'
- 'Dockerfile'
- '.github/workflows/**'
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-push:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
outputs:
image-tag: ${{ steps.meta.outputs.tags }}
image-digest: ${{ steps.build.outputs.digest }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=sha,prefix={{branch}}-
- name: Build and push Docker image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
update-manifests:
needs: build-and-push
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout infrastructure repo
uses: actions/checkout@v4
with:
repository: company/infrastructure
token: ${{ secrets.INFRASTRUCTURE_PAT }}
- name: Update image tag
run: |
ENV_DIR="environments/${{ github.ref_name == 'main' && 'production' || 'staging' }}"
cd $ENV_DIR
yq e -i '.images[0].newTag = "${{ needs.build-and-push.outputs.image-tag }}"' kustomization.yaml
- name: Create Pull Request
uses: peter-evans/create-pull-request@v5
with:
token: ${{ secrets.INFRASTRUCTURE_PAT }}
commit-message: "chore: update ${{ github.ref_name }} image to ${{ needs.build-and-push.outputs.image-tag }}"
title: "Deploy ${{ needs.build-and-push.outputs.image-tag }} to ${{ github.ref_name }}"
body: |
## Aktualizacja wdrożenia
**Image**: `${{ needs.build-and-push.outputs.image-tag }}`
**Digest**: `${{ needs.build-and-push.outputs.image-digest }}`
**Environment**: `${{ github.ref_name == 'main' && 'production' || 'staging' }}`
### Zmiany
${{ github.event.head_commit.message }}
### Commit
${{ github.sha }}
branch: deploy/${{ github.ref_name }}-${{ github.sha }}
delete-branch: true
# Prompt AI
Agent: "Stwórz kompleksowe testy Terraform z:
- Testami jednostkowymi dla modułów
- Testami integracyjnymi
- Testowaniem zgodności
- Walidacją kosztów
- Skanowaniem bezpieczeństwa"
# test/terraform_test.go
package test
import (
"testing"
"time"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/gruntwork-io/terratest/modules/aws"
"github.com/gruntwork-io/terratest/modules/retry"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestVPCModule(t *testing.T) {
t.Parallel()
// Region AWS
awsRegion := "us-east-1"
// Opcje Terraform
terraformOptions := &terraform.Options{
TerraformDir: "../modules/vpc",
Vars: map[string]interface{}{
"name": "test-vpc",
"cidr": "10.0.0.0/16",
"azs": []string{"us-east-1a", "us-east-1b", "us-east-1c"},
"private_subnets": []string{"10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"},
"public_subnets": []string{"10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"},
"enable_nat_gateway": true,
"single_nat_gateway": true,
"environment": "test",
},
EnvVars: map[string]string{
"AWS_DEFAULT_REGION": awsRegion,
},
}
// Oczyść zasoby
defer terraform.Destroy(t, terraformOptions)
// Wdróż infrastrukturę
terraform.InitAndApply(t, terraformOptions)
// Pobierz wyjścia
vpcID := terraform.Output(t, terraformOptions, "vpc_id")
privateSubnetIDs := terraform.OutputList(t, terraformOptions, "private_subnet_ids")
publicSubnetIDs := terraform.OutputList(t, terraformOptions, "public_subnet_ids")
// Waliduj VPC
vpc := aws.GetVpcById(t, vpcID, awsRegion)
require.Equal(t, "10.0.0.0/16", vpc.CidrBlock)
// Waliduj podsieci
assert.Equal(t, 3, len(privateSubnetIDs))
assert.Equal(t, 3, len(publicSubnetIDs))
// Testuj łączność
for _, subnetID := range publicSubnetIDs {
subnet := aws.GetSubnetById(t, subnetID, awsRegion)
assert.True(t, subnet.MapPublicIpOnLaunch)
}
// Waliduj NAT Gateway
natGateways := aws.GetNatGatewaysInVpc(t, vpcID, awsRegion)
assert.Equal(t, 1, len(natGateways))
}
# policies/terraform.rego
package terraform.analysis
import future.keywords.contains
import future.keywords.if
import future.keywords.in
# Zabroń publicznych bucket S3
deny[msg] {
resource := input.resource_changes[_]
resource.type == "aws_s3_bucket"
resource.change.after.acl == "public-read"
msg := sprintf("Bucket S3 %v ma publiczny dostęp do odczytu", [resource.address])
}
# Wymagaj szyfrowania dla instancji RDS
deny[msg] {
resource := input.resource_changes[_]
resource.type == "aws_db_instance"
not resource.change.after.storage_encrypted
msg := sprintf("Instancja RDS %v nie jest zaszyfrowana", [resource.address])
}
# Wymuś standardy tagowania
deny[msg] {
resource := input.resource_changes[_]
required_tags := {"Environment", "Owner", "Project", "CostCenter"}
resource_tags := object.get(resource.change.after, "tags", {})
missing_tags := required_tags - {tag | resource_tags[tag]}
count(missing_tags) > 0
msg := sprintf("Zasób %v nie ma wymaganych tagów: %v", [resource.address, missing_tags])
}
# Ogranicz typy instancji dla kontroli kosztów
expensive_instance_types := {
"m5.24xlarge",
"c5.24xlarge",
"r5.24xlarge",
"x1e.32xlarge",
}
warn[msg] {
resource := input.resource_changes[_]
resource.type == "aws_instance"
resource.change.after.instance_type in expensive_instance_types
msg := sprintf("Instancja EC2 %v używa drogiego typu instancji: %v", [resource.address, resource.change.after.instance_type])
}
# Prompt AI
Agent: "Stwórz strategie optymalizacji kosztów z:
- Right-sizing zasobów
- Użyciem instancji spot
- Planowaniem instancji zarezerwowanych
- Politykami auto-scaling
- Tagami alokacji kosztów"
# modules/cost-optimized-compute/main.tf
variable "workload_type" {
description = "Typ workload: web, batch, ml"
type = string
default = "web"
}
locals {
# Rekomendacje instancji na podstawie workload
instance_recommendations = {
web = {
small = "t3.small"
medium = "t3.medium"
large = "t3.large"
}
batch = {
small = "m5.large"
medium = "m5.xlarge"
large = "m5.2xlarge"
}
ml = {
small = "g4dn.xlarge"
medium = "g4dn.2xlarge"
large = "g4dn.4xlarge"
}
}
# Konfiguracja instancji spot
spot_config = {
web = { enabled = false, max_price = "" }
batch = { enabled = true, max_price = "0.50" }
ml = { enabled = true, max_price = "1.00" }
}
}
# Auto Scaling Group z mieszanymi instancjami
resource "aws_autoscaling_group" "optimized" {
name = "${var.name}-asg"
vpc_zone_identifier = var.subnet_ids
min_size = var.min_size
max_size = var.max_size
desired_capacity = var.desired_capacity
# Polityka mieszanych instancji dla optymalizacji kosztów
mixed_instances_policy {
launch_template {
launch_template_specification {
launch_template_id = aws_launch_template.optimized.id
version = "$Latest"
}
override {
instance_type = local.instance_recommendations[var.workload_type]["small"]
weighted_capacity = 1
}
override {
instance_type = local.instance_recommendations[var.workload_type]["medium"]
weighted_capacity = 2
}
override {
instance_type = local.instance_recommendations[var.workload_type]["large"]
weighted_capacity = 4
}
}
instances_distribution {
on_demand_base_capacity = var.on_demand_base
on_demand_percentage_above_base_capacity = var.on_demand_percentage
spot_allocation_strategy = "capacity-optimized-prioritized"
spot_instance_pools = 3
spot_max_price = local.spot_config[var.workload_type]["max_price"]
}
}
# Predictive scaling dla optymalizacji kosztów
dynamic "predictive_scaling_configuration" {
for_each = var.enable_predictive_scaling ? [1] : []
content {
metric_specification {
target_value = 50
predefined_metric_pair_specification {
predefined_metric_type = "ASGCPUUtilization"
}
}
mode = "ForecastAndScale"
scheduling_buffer_time = 10
max_capacity_breach_behavior = "IncreaseMaxCapacity"
max_capacity_buffer = 10
}
}
enabled_metrics = [
"GroupMinSize",
"GroupMaxSize",
"GroupDesiredCapacity",
"GroupInServiceInstances",
"GroupTotalInstances"
]
tag {
key = "Name"
value = "${var.name}-instance"
propagate_at_launch = true
}
tag {
key = "Workload"
value = var.workload_type
propagate_at_launch = true
}
tag {
key = "CostCenter"
value = var.cost_center
propagate_at_launch = true
}
}
# Scheduled scaling dla przewidywalnych workloadów
resource "aws_autoscaling_schedule" "scale_down_nights" {
scheduled_action_name = "${var.name}-scale-down-nights"
min_size = 1
max_size = var.max_size
desired_capacity = 1
recurrence = "0 20 * * MON-FRI" # 8 PM w dni robocze
autoscaling_group_name = aws_autoscaling_group.optimized.name
}
resource "aws_autoscaling_schedule" "scale_up_mornings" {
scheduled_action_name = "${var.name}-scale-up-mornings"
min_size = var.min_size
max_size = var.max_size
desired_capacity = var.desired_capacity
recurrence = "0 7 * * MON-FRI" # 7 AM w dni robocze
autoscaling_group_name = aws_autoscaling_group.optimized.name
}
# Prompt AI
Agent: "Stwórz pipeline skanowania bezpieczeństwa z:
- Skanowaniem SAST/DAST
- Skanowaniem luk w kontenerach
- Sprawdzaniem zgodności infrastruktury
- Wykrywaniem sekretów
- Raportowaniem bezpieczeństwa"
# .github/workflows/security-scan.yml
name: Skanowanie bezpieczeństwa
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
schedule:
- cron: '0 2 * * *' # Codziennie o 2:00
jobs:
infrastructure-scan:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
# Skanowanie bezpieczeństwa Terraform
- name: Terraform Security Scan
uses: aquasecurity/tfsec-action@v1.0.0
with:
soft_fail: false
- name: Checkov Policy Scan
uses: bridgecrewio/checkov-action@master
with:
directory: .
framework: terraform
output_format: sarif
output_file_path: checkov.sarif
- name: Terrascan
run: |
docker run --rm -v "$(pwd):/src" \
accurics/terrascan scan -t aws -f terraform \
--config-path /src/.terrascan.yaml
# Poława bezpieczeństwa w chmurze
- name: Cloud Security Scan
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
run: |
# Skan bezpieczeństwa Prowler
docker run --rm \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
toniblyx/prowler:latest \
-g cis_level2 -f json -o /tmp/prowler-report.json
# Wykrywanie sekretów
- name: Secret Detection
uses: trufflesecurity/trufflehog@v3
with:
path: ./
base: ${{ github.event.repository.default_branch }}
head: HEAD
- name: GitLeaks
uses: gitleaks/gitleaks-action@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Skanowanie kontenerów
- name: Container Scan
uses: aquasecurity/trivy-action@master
with:
scan-type: 'fs'
scan-ref: '.'
format: 'sarif'
output: 'trivy-results.sarif'
severity: 'CRITICAL,HIGH'
# Upload wyników
- name: Upload SARIF results
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: |
checkov.sarif
trivy-results.sarif
compliance-validation:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup InSpec
run: |
curl https://omnitruck.chef.io/install.sh | sudo bash -s -- -P inspec
- name: Run Compliance Tests
run: |
# Benchmarki CIS
inspec exec https://github.com/dev-sec/cis-dil-benchmark \
--reporter json:/tmp/cis-results.json
# AWS Foundations Benchmark
inspec exec https://github.com/inspec/inspec-aws \
-t aws:// \
--controls cis-aws-foundations-1.2.0 \
--reporter json:/tmp/aws-foundations.json
# Niestandardowy profil zgodności
inspec exec compliance/profiles/company-baseline \
-t aws:// \
--reporter json:/tmp/company-baseline.json html:/tmp/compliance-report.html
- name: Generate Compliance Report
run: |
python scripts/generate_compliance_report.py \
--cis-results /tmp/cis-results.json \
--aws-results /tmp/aws-foundations.json \
--company-results /tmp/company-baseline.json \
--output compliance-report.pdf
# Prompt AI
Agent: "Stwórz infrastrukturę multi-cloud z:
- Modułami niezależnymi od chmury
- Abstrakcją dostawców
- Zunifikowaną siecią
- Łącznością cross-cloud
- Przenośnymi workloadami"
# modules/cloud-agnostic-compute/variables.tf
variable "cloud_provider" {
description = "Dostawca chmury: aws, azure, gcp"
type = string
validation {
condition = contains(["aws", "azure", "gcp"], var.cloud_provider)
error_message = "Dostawca chmury musi być aws, azure lub gcp."
}
}
variable "instance_size" {
description = "Ogólny rozmiar instancji: small, medium, large"
type = string
default = "medium"
}
# modules/cloud-agnostic-compute/main.tf
locals {
# Mapuj ogólne rozmiary na typy instancji specyficzne dla dostawcy
instance_mapping = {
aws = {
small = "t3.small"
medium = "t3.medium"
large = "t3.large"
}
azure = {
small = "Standard_B2s"
medium = "Standard_B2ms"
large = "Standard_B4ms"
}
gcp = {
small = "e2-small"
medium = "e2-medium"
large = "e2-standard-4"
}
}
# Mapuj ogólny OS na obrazy specyficzne dla dostawcy
image_mapping = {
aws = {
ubuntu = "ami-0c55b159cbfafe1f0" # Ubuntu 22.04
centos = "ami-0f2b4fc905b0bd1f1" # CentOS 8
}
azure = {
ubuntu = {
publisher = "Canonical"
offer = "0001-com-ubuntu-server-jammy"
sku = "22_04-lts"
}
centos = {
publisher = "OpenLogic"
offer = "CentOS"
sku = "8_5"
}
}
gcp = {
ubuntu = "ubuntu-os-cloud/ubuntu-2204-lts"
centos = "centos-cloud/centos-8"
}
}
}
# Implementacja AWS
module "aws_compute" {
count = var.cloud_provider == "aws" ? 1 : 0
source = "./aws"
instance_type = local.instance_mapping["aws"][var.instance_size]
ami = local.image_mapping["aws"][var.os_type]
subnet_id = var.subnet_id
user_data = templatefile("${path.module}/templates/cloud-init.yaml", {
hostname = var.hostname
packages = var.packages
})
tags = var.tags
}
# Implementacja Azure
module "azure_compute" {
count = var.cloud_provider == "azure" ? 1 : 0
source = "./azure"
vm_size = local.instance_mapping["azure"][var.instance_size]
source_image_reference {
publisher = local.image_mapping["azure"][var.os_type]["publisher"]
offer = local.image_mapping["azure"][var.os_type]["offer"]
sku = local.image_mapping["azure"][var.os_type]["sku"]
version = "latest"
}
custom_data = base64encode(templatefile("${path.module}/templates/cloud-init.yaml", {
hostname = var.hostname
packages = var.packages
}))
tags = var.tags
}
# Implementacja GCP
module "gcp_compute" {
count = var.cloud_provider == "gcp" ? 1 : 0
source = "./gcp"
machine_type = local.instance_mapping["gcp"][var.instance_size]
boot_disk {
initialize_params {
image = local.image_mapping["gcp"][var.os_type]
}
}
metadata_startup_script = templatefile("${path.module}/templates/cloud-init.yaml", {
hostname = var.hostname
packages = var.packages
})
labels = var.tags
}
# Wyjścia
output "instance_id" {
value = coalesce(
try(module.aws_compute[0].instance_id, ""),
try(module.azure_compute[0].vm_id, ""),
try(module.gcp_compute[0].instance_id, "")
)
}
output "private_ip" {
value = coalesce(
try(module.aws_compute[0].private_ip, ""),
try(module.azure_compute[0].private_ip, ""),
try(module.gcp_compute[0].private_ip, "")
)
}

Wytyczne infrastruktury jako kod

  • Przechowuj cały kod infrastruktury w Git
  • Używaj znaczących komunikatów commit
  • Taguj wydania dla wdrożeń produkcyjnych
  • Implementuj reguły ochrony branchy
  • Przeglądaj wszystkie zmiany przez pull requesty
  • Twórz moduły wielokrotnego użytku
  • Utrzymuj moduły skupione i jednozadaniowe
  • Wersjonuj swoje moduły
  • Dokumentuj interfejsy modułów
  • Testuj moduły niezależnie
  • Używaj zdalnych backendów stanu
  • Włącz blokowanie stanu
  • Implementuj szyfrowanie plików stanu
  • Regularne kopie zapasowe stanu
  • Separuj stany według środowisk
  • Nigdy nie commituj sekretów
  • Używaj narzędzi do zarządzania sekretami
  • Implementuj najmniejsze uprawnienia
  • Włącz logowanie audytowe
  • Regularne skanowanie bezpieczeństwa
  • Testy jednostkowe modułów
  • Testy integracyjne wdrożeń
  • Walidacja zgodności
  • Szacowanie kosztów
  • Testowanie wydajności
  • Dokumentuj decyzje architektoniczne
  • Utrzymuj runbook
  • Twórz przewodniki wdrażania
  • Dokumentuj kroki rozwiązywania problemów
  • Utrzymuj aktualne przykłady
# Prompt AI
Agent: "Stwórz kompleksowy przepływ pracy rozwoju IaC z:
- Hookami pre-commit
- Testowaniem lokalnym
- Pipeline CI/CD
- Strategiami wdrażania
- Procedurami rollback"
# .pre-commit-config.yaml
repos:
# Hooki Terraform
- repo: https://github.com/antonbabenko/pre-commit-terraform
rev: v1.83.5
hooks:
- id: terraform_fmt
- id: terraform_docs
args:
- --hook-config=--path-to-file=README.md
- --hook-config=--add-to-existing-file=true
- id: terraform_validate
- id: terraform_tflint
args:
- --args=--config=__GIT_WORKING_DIR__/.tflint.hcl
- id: terraform_tfsec
args:
- --args=--exclude-downloaded-modules
- id: checkov
args:
- --args=--quiet
- --args=--framework=terraform
- id: infracost_breakdown
args:
- --args=--path=.
- --hook-config='.totalHourlyCost|tonumber > 1'
verbose: true
# Ogólne hooki
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-json
- id: check-merge-conflict
- id: detect-private-key
- id: detect-aws-credentials
args: ['--allow-missing-credentials']
# Makefile - Przepływ pracy lokalnego rozwoju
.PHONY: help init validate plan apply destroy test docs clean
help: ## Pokaż tę pomoc
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
init: ## Inicjalizuj Terraform
@echo "🚀 Inicjalizacja Terraform..."
terraform init -upgrade
@echo "📦 Instalacja hooków pre-commit..."
pre-commit install
@echo "✅ Inicjalizacja zakończona!"
validate: ## Waliduj konfigurację Terraform
@echo "🔍 Walidacja plików Terraform..."
terraform fmt -check -recursive
terraform validate
tflint --init && tflint
@echo "🔒 Uruchamianie sprawdzeń bezpieczeństwa..."
tfsec . --minimum-severity HIGH
checkov -d . --quiet --framework terraform
@echo "✅ Walidacja zakończona!"
plan: validate ## Stwórz plan Terraform
@echo "📋 Tworzenie planu Terraform..."
terraform plan -out=tfplan
@echo "💰 Sprawdzanie kosztów..."
infracost breakdown --path . --terraform-plan-flags "-out=tfplan"
@echo "✅ Plan zakończony!"
apply: ## Zastosuj zmiany Terraform
@echo "🚀 Stosowanie zmian Terraform..."
terraform apply tfplan
@echo "✅ Zastosowanie zakończone!"
destroy: ## Zniszcz infrastrukturę
@echo "💥 Niszczenie infrastruktury..."
@read -p "Czy jesteś pewien? [y/N] " confirm && \
if [ "$$confirm" = "y" ]; then \
terraform destroy -auto-approve; \
fi
test: ## Uruchom testy
@echo "🧪 Uruchamianie testów..."
cd test && go test -v -timeout 30m ./...
@echo "✅ Testy zakończone!"
docs: ## Generuj dokumentację
@echo "📚 Generowanie dokumentacji..."
terraform-docs markdown table --output-file README.md --output-mode inject .
@echo "✅ Dokumentacja zakończona!"
clean: ## Oczyść pliki
@echo "🧹 Czyszczenie..."
rm -rf .terraform tfplan* *.tfstate*
find . -type f -name "*.tfvars" -delete
@echo "✅ Czyszczenie zakończone!"
Okno terminala
# Prompt AI: "Debuguj problemy ze stanem Terraform"
# Błąd blokady stanu
Error: Error acquiring the state lock
ConflictException: Unable to acquire lock
# Rozwiązanie 1: Wymuś odblokowanie (używaj ostrożnie)
terraform force-unlock <LOCK_ID>
# Rozwiązanie 2: Sprawdź tabelę blokad DynamoDB
aws dynamodb scan \
--table-name terraform-state-lock \
--filter-expression "attribute_exists(LockID)"
# Rozwiązanie 3: Manualne czyszczenie
aws dynamodb delete-item \
--table-name terraform-state-lock \
--key '{"LockID": {"S": "<LOCK_ID>"}}'
# Wykrywanie dryfu stanu
terraform plan -refresh-only
# Importuj istniejące zasoby
terraform import aws_instance.example i-1234567890abcdef0
# Przenieś zasoby między stanami
terraform state mv aws_instance.old aws_instance.new
# Usuń zasoby ze stanu
terraform state rm aws_instance.obsolete
# Backup i przywracanie stanu
terraform state pull > backup.tfstate
terraform state push backup.tfstate
Okno terminala
# Włącz szczegółowe logowanie
export TF_LOG=DEBUG
export TF_LOG_PATH=terraform.log
# Konsola Terraform do testowania
terraform console
> aws_instance.web.private_ip
> [for instance in aws_instance.web : instance.private_ip]
# Graf zależności
terraform graph | dot -Tpng > graph.png
# Waliduj konkretne moduły
terraform validate -json | jq '.diagnostics[] | select(.severity=="error")'
# Sprawdź atrybuty zasobów
terraform state show aws_instance.web
# Wylistuj wszystkie zasoby
terraform state list
# Odśwież konkretny zasób
terraform apply -refresh-only -target=aws_instance.web
# Debuguj problemy z dostawcami
terraform providers lock -platform=linux_amd64 -platform=darwin_amd64
# Sprawdź workspace
terraform workspace show
terraform workspace list
# Analizuj wyjście planu
terraform show -json tfplan | jq '.resource_changes[] | {address: .address, actions: .change.actions}'