From 8288d5ea271d73bfbb23750a075a3721e76bb037 Mon Sep 17 00:00:00 2001 From: Emika Hammond Date: Wed, 4 Feb 2026 10:39:58 -0500 Subject: [PATCH] update example to use port 8080 --- .../vpc-proxied-explicit-classic/README.md | 75 +++++ .../assets/Caddyfile.tpl | 26 ++ .../assets/mitmproxy-sysctl.conf | 3 + .../assets/mitmproxy.service.tpl | 13 + .../assets/userdata.yaml.tpl | 60 ++++ .../vpc-proxied-explicit-classic/main.tf | 266 ++++++++++++++++++ .../terraform.tfvars.example | 68 +++++ .../vpc-proxied-explicit-classic/variables.tf | 75 +++++ 8 files changed, 586 insertions(+) create mode 100644 examples/aws/terraform/vpc-proxied-explicit-classic/README.md create mode 100644 examples/aws/terraform/vpc-proxied-explicit-classic/assets/Caddyfile.tpl create mode 100644 examples/aws/terraform/vpc-proxied-explicit-classic/assets/mitmproxy-sysctl.conf create mode 100644 examples/aws/terraform/vpc-proxied-explicit-classic/assets/mitmproxy.service.tpl create mode 100644 examples/aws/terraform/vpc-proxied-explicit-classic/assets/userdata.yaml.tpl create mode 100644 examples/aws/terraform/vpc-proxied-explicit-classic/main.tf create mode 100644 examples/aws/terraform/vpc-proxied-explicit-classic/terraform.tfvars.example create mode 100644 examples/aws/terraform/vpc-proxied-explicit-classic/variables.tf diff --git a/examples/aws/terraform/vpc-proxied-explicit-classic/README.md b/examples/aws/terraform/vpc-proxied-explicit-classic/README.md new file mode 100644 index 00000000..640dd022 --- /dev/null +++ b/examples/aws/terraform/vpc-proxied-explicit-classic/README.md @@ -0,0 +1,75 @@ +# Setting up an explicitly (non-transparently) proxied VPC +The Terraform scripts in this directory will deploy an AWS VPC with a [mitmproxy](https://mitmproxy.org/)-based explicit (non-transparent) HTTP(S) proxy. By definition, explicit proxies require client applications to be explictly configured to use the proxy, usually through the use of environmental variables such as `HTTP_PROXY` and `https_proxy`. If you're looking for the kind of proxy that doesn't require clients to know the proxy address, see [../vpc-proxied-transparent](../vpc-proxied-transparent/). + +## Prerequisites + * Terraform or OpenTofu + * An AWS account with a credentials profile saved to "~/.aws/credentials" + +## Setup + +> [!IMPORTANT] +> The proxied subnet set up by this script will be configured to allow only HTTP[S] (i.e., TCP ports 80 & 443) egress via the generated proxy server. IOW, **anything you launch in the proxied subnet will not be able to connect to the internet unless the application is configured to use the proxy server**. Same goes for any application using ports other than 80 or 443 (e.g., Splunk inputs). You can partially bypass this behavior by adding CIDR blocks covering the destination IPs you'd like to access unproxied to `proxied_subnet_escape_routes` in your terraform.tfvars, which will add a NAT gateway to the proxied subnet and adjust its routing table accordingly. + +1. Clone this repo and `cd` into this directory +2. Run `terraform init` +3. Copy/rename "terraform.tfvars.example" to "terraform.tfvars" and fill in the values according to the comments +4. Run `terraform apply` +5. Once you see "Apply complete!", wait an additional 3-5 minutes for the proxy server to initialize +6. Download the proxy's CA cert using the following command +```bash +curl --insecure $(terraform output -raw proxy_machine_cert_url) -o ./cacert.pem +``` + +And that's it! You may now launch EC2 instances in the "proxied" subnet (`terraform output proxied_subnet_id`) and configure applications on that instance to use the your shiny new proxy server using the `http[s]_proxy_var` outputs that were printed at the end of the `terraform apply`. For example, assuming you spun up a RHEL instance with SSH access in your proxied subnet, you might do something like the following. _Note: if your only goal is to test the network verifier against a proxy, you can skip this conceptual example._ +```bash +# On your local machine, in the same dir where you ran `terraform apply` +localhost$ echo "http_proxy='$(terraform output -raw http_proxy_var)' HTTPS_PROXY='$(terraform output -raw https_proxy_var)'" +http_proxy='http://123.0.0.10:80' HTTPS_PROXY='https://123.0.0.10:80' # Copy this output line to your clipboard +# Upload the CA cert into the RHEL instance you launched in the proxied subnet +localhost$ scp ./cacert.pem ec2-user@my-proxied-instance-public-hostname.com:/home/ec2-user/ +# SSH into that RHEL instance +localhost$ ssh ec2-user@my-proxied-instance-public-hostname.com +# Now run something like curl, prepending the command with the line you copied earlier +# Don't forget to tell whatever you're running about the CA cert +my-proxied-instance$ http_proxy='http://123.0.0.10:80' HTTPS_PROXY='https://123.0.0.10:80' curl -vv --proxy-cacert ~/cacert.pem https://example.com +[verbose curl output showing connection to the proxy] +``` + +Regardless of what application you're running, be sure to add the CA cert you downloaded to your proxied clients' trust store to avoid certificate errors, and be sure to set the necessary `HTTP[S]_PROXY` environmental variables or CLI flags. You [might](https://superuser.com/q/944958) need to set both lowercase and uppercase versions of said environmental variables. + +> [!TIP] +> Run `terraform apply` again after making any changes to the files in this repo. Your proxy EC2 instance will probably be destroyed and recreated in the process, resulting in new IP addresses, CA certs, and passwords. + +## Usage +### Launch the network verifier in the proxied subnet +Run the following command on your workstation to launch an EC2 VM that will make a series of HTTPS requests that will be explicitly proxied. Be sure to replace `default` with the name of your AWS credentials profile (see `profile` in "terraform.tfvars"). +```bash +osd-network-verifier egress --profile=default --subnet-id=$(terraform output -raw proxied_subnet_id) --region=$(terraform output -raw region) --cacert=cacert.pem --http-proxy="$(terraform output -raw http_proxy_var)" --https-proxy="$(terraform output -raw https_proxy_var)" +``` +Remember that non-HTTP(S) connections are expected to fail, so you can safely ignore the verifier reporting that the Splunk input endpoints (which use port 9997) are blocked. + +### View/manipulate traffic flowing through the proxy +> [!NOTE] +> The proxy webUI is HTTPS-secured but uses a runtime-generated self-signed certificate. As a result, you'll probably have to click-past some scary browser warnings (usually under "Advanced > Proceed to [...] (unsafe)"). This is also why we have to use curl's `--insecure` flag when downloading the proxy CA cert (which is unrelated to the webUI's self-signed cert). + +Run the following command to print credentials you can use to access the mitmproxy's webUI in your browser. +```bash +for V in url username password; do echo "$V: $(terraform output -raw proxy_webui_${V})"; done +``` +If you're having trouble connecting to the webUI (other than certificate warnings; see above note), try disabling any VPNs or browser proxy extensions/configurations. Also ensure that your workstation's IP address is covered by the value you set for `developer_cidr_block` in "terraform.tfvars". As an insecure last resort, you can set `developer_cidr_block` to "0.0.0.0/0" to allow the entire internet to access your proxy machine. + +### SSH into the proxy machine +Run the following command to log into the RHEL 9 machine hosting the proxy server. Add `-i [path to your private key]` to the command if the `proxy_machine_ssh_pubkey` you provided in "terraform.tfvars" does not correspond to your default private key (usually "~/.ssh/id_rsa"). See the paragraph above if you encounter connection issues. +```bash +ssh $(terraform output -raw proxy_machine_ssh_url) +``` +Once logged in, you can see the status of the proxy server using `sudo systemctl status mitmproxy`. The proxy's webUI is running on port 8081, but traffic from the outside world is reverse-proxied through [Caddy](https://caddyserver.com/) (via port 8443) first; you can check its status using `sudo systemctl status caddy`. + +Remember that the proxy machine (and therefore changes you make to it via SSH) will likely be destroyed next time you run `terraform apply`. To make your changes more durable, add commands or [cloud-init](https://cloudinit.readthedocs.io/en/latest/reference/modules.html) directives to [assets/userdata.yaml.tpl](assets/userdata.yaml.tpl). + +## Cleanup +To delete the proxy server, the surrounding subnets/VPC, and all other AWS resources created by this script, simply run `terraform destroy`. + + + + diff --git a/examples/aws/terraform/vpc-proxied-explicit-classic/assets/Caddyfile.tpl b/examples/aws/terraform/vpc-proxied-explicit-classic/assets/Caddyfile.tpl new file mode 100644 index 00000000..660410ee --- /dev/null +++ b/examples/aws/terraform/vpc-proxied-explicit-classic/assets/Caddyfile.tpl @@ -0,0 +1,26 @@ +{ + skip_install_trust + servers { + protocols h1 h2 + } +} +:8443 { + tls internal { + on_demand + } + basic_auth { + ${proxy_webui_username} ${proxy_webui_password_hash} + } + handle /mitmproxy-ca-cert.pem { + root * /usr/share/caddy + file_server + } + handle { + reverse_proxy 127.0.0.1:8081 { + header_up Host "127.0.0.1:8081" + header_up Origin "http://127.0.0.1:8081" + header_up -X-Frame-Options + header_up Authorization "Bearer ${proxy_webui_password}" + } + } +} diff --git a/examples/aws/terraform/vpc-proxied-explicit-classic/assets/mitmproxy-sysctl.conf b/examples/aws/terraform/vpc-proxied-explicit-classic/assets/mitmproxy-sysctl.conf new file mode 100644 index 00000000..b2a9edb0 --- /dev/null +++ b/examples/aws/terraform/vpc-proxied-explicit-classic/assets/mitmproxy-sysctl.conf @@ -0,0 +1,3 @@ +net.ipv4.ip_forward = 1 +net.ipv6.conf.all.forwarding = 1 +net.ipv4.conf.all.send_redirects = 0 \ No newline at end of file diff --git a/examples/aws/terraform/vpc-proxied-explicit-classic/assets/mitmproxy.service.tpl b/examples/aws/terraform/vpc-proxied-explicit-classic/assets/mitmproxy.service.tpl new file mode 100644 index 00000000..90589172 --- /dev/null +++ b/examples/aws/terraform/vpc-proxied-explicit-classic/assets/mitmproxy.service.tpl @@ -0,0 +1,13 @@ +[Unit] +Description=mitmproxy web daemon +After=network.target +Wants=network.target iptables.service + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/mitmweb --web-port=8081 --web-host=0.0.0.0 --no-web-open-browser --showhost --set web_password="${proxy_webui_password}" +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/examples/aws/terraform/vpc-proxied-explicit-classic/assets/userdata.yaml.tpl b/examples/aws/terraform/vpc-proxied-explicit-classic/assets/userdata.yaml.tpl new file mode 100644 index 00000000..377aba5e --- /dev/null +++ b/examples/aws/terraform/vpc-proxied-explicit-classic/assets/userdata.yaml.tpl @@ -0,0 +1,60 @@ +#cloud-config +bootcmd: +- touch /var/run/reboot-required +yum_repos: + caddy: + name: Copr repo for caddy owned by @caddy + baseurl: https://download.copr.fedorainfracloud.org/results/@caddy/caddy/epel-9-$basearch/ + type: rpm-md + skip_if_unavailable: true + gpgcheck: 1 + gpgkey: https://download.copr.fedorainfracloud.org/results/@caddy/caddy/pubkey.gpg + repo_gpgcheck: 0 + enabled: true + enabled_metadata: 1 +packages: + - iptables + - iptables-nft-services + - caddy +package_reboot_if_required: true +write_files: +- encoding: b64 + content: ${mitmproxy_sysctl_b64} + owner: root:root + path: /etc/sysctl.d/mitmproxy.conf + permissions: '0644' +- encoding: b64 + content: ${mitmproxy_service_b64} + owner: root:root + path: /etc/systemd/system/mitmproxy.service + permissions: '0644' +- encoding: b64 + content: ${caddyfile_b64} + owner: root:root + path: /etc/caddy/Caddyfile + permissions: '0644' +runcmd: +- sysctl -p /etc/sysctl.d/mitmproxy.conf +- curl -s https://downloads.mitmproxy.org/12.1.2/mitmproxy-12.1.2-linux-x86_64.tar.gz -o - | tar -C /usr/bin/ -xzf - +- iptables -F +- iptables -X +- iptables -t nat -F +- iptables -t nat -X +- iptables -t mangle -F +- iptables -t mangle -X +- iptables -t raw -F +- iptables -t raw -X +- iptables -t security -F +- iptables -t security -X +- iptables -P INPUT ACCEPT +- iptables -P FORWARD ACCEPT +- iptables -P OUTPUT ACCEPT +- iptables -t nat -A PREROUTING -i ens5 -p tcp --dport 80 -j REDIRECT --to-port 8080 +- iptables -t nat -A PREROUTING -i ens5 -p tcp --dport 443 -j REDIRECT --to-port 8080 +- ip6tables -t nat -A PREROUTING -i ens5 -p tcp --dport 80 -j REDIRECT --to-port 8080 +- ip6tables -t nat -A PREROUTING -i ens5 -p tcp --dport 443 -j REDIRECT --to-port 8080 +- /sbin/iptables-save > /etc/sysconfig/iptables +- /sbin/ip6tables-save > /etc/sysconfig/ip6tables +- systemctl daemon-reload +- systemctl enable --now iptables.service mitmproxy.service caddy.service +- sleep 10 && cp ~/.mitmproxy/mitmproxy-ca-cert.pem /usr/share/caddy/ && chmod -R 755 /usr/share/caddy/mitmproxy-ca-cert.pem diff --git a/examples/aws/terraform/vpc-proxied-explicit-classic/main.tf b/examples/aws/terraform/vpc-proxied-explicit-classic/main.tf new file mode 100644 index 00000000..1959a9bd --- /dev/null +++ b/examples/aws/terraform/vpc-proxied-explicit-classic/main.tf @@ -0,0 +1,266 @@ +# AWS provider configuration +provider "aws" { + profile = var.profile # AWS profile + region = var.region # AWS region + ignore_tags { + key_prefixes = ["kubernetes.io", "openshift"] + } +} + +## RESOURCES +# Create a VPC +resource "aws_vpc" "main" { + cidr_block = var.vpc_cidr_block + enable_dns_support = true + enable_dns_hostnames = true + tags = { Name = "${var.name_prefix}-vpc" } +} + +# Create an Internet Gateway and attach it to the VPC +resource "aws_internet_gateway" "igw" { + vpc_id = aws_vpc.main.id + tags = { Name = "${var.name_prefix}-igw" } +} + +# Create a public subnet within the VPC (where the proxy machine will live) +resource "aws_subnet" "public" { + vpc_id = aws_vpc.main.id + cidr_block = var.public_subnet_cidr_block # CIDR block for public subnet + availability_zone = var.availability_zone + map_public_ip_on_launch = true + tags = { Name = "${var.name_prefix}-public" } +} + +# Create a route table for the public subnet +resource "aws_route_table" "public" { + vpc_id = aws_vpc.main.id + tags = { Name = "${var.name_prefix}-public-rtb" } + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.igw.id + } +} + +# Associate the public subnet with its route table (skipping this because default RTB is fine for public sub) +resource "aws_route_table_association" "public" { + subnet_id = aws_subnet.public.id + route_table_id = aws_route_table.public.id +} + +# Create a security group for the proxy machine (rules below) +resource "aws_security_group" "proxy_machine_sg" { + name_prefix = var.name_prefix + description = "Allow all outbound traffic and inbound traffic from proxied subnet or developer SSH client" + vpc_id = aws_vpc.main.id +} +# proxy_machine_sg: Allow ONLY web proxy traffic from the proxied subnet + +resource "aws_vpc_security_group_ingress_rule" "allow_proxy_explicit" { + security_group_id = aws_security_group.proxy_machine_sg.id + cidr_ipv4 = aws_subnet.proxied.cidr_block + from_port = 8080 + to_port = 8080 + ip_protocol = "tcp" +} + +# proxy_machine_sg: Allow SSH traffic from the developer's IP +resource "aws_vpc_security_group_ingress_rule" "allow_developer_ssh" { + security_group_id = aws_security_group.proxy_machine_sg.id + cidr_ipv4 = var.developer_cidr_block + from_port = 22 + to_port = 22 + ip_protocol = "tcp" +} +# proxy_machine_sg: Allow webUI traffic from the developer's IP +resource "aws_vpc_security_group_ingress_rule" "allow_developer_webui" { + security_group_id = aws_security_group.proxy_machine_sg.id + cidr_ipv4 = var.developer_cidr_block + from_port = 8443 + to_port = 8443 + ip_protocol = "tcp" +} +# proxy_machine_sg: Allow all IPv4 egress traffic from the proxy +resource "aws_vpc_security_group_egress_rule" "allow_all_traffic_ipv4" { + security_group_id = aws_security_group.proxy_machine_sg.id + cidr_ipv4 = "0.0.0.0/0" + ip_protocol = "-1" # semantically equivalent to all ports +} +# proxy_machine_sg: Allow all IPv6 egress traffic from the proxy +resource "aws_vpc_security_group_egress_rule" "allow_all_traffic_ipv6" { + security_group_id = aws_security_group.proxy_machine_sg.id + cidr_ipv6 = "::/0" + ip_protocol = "-1" # semantically equivalent to all ports +} +# End proxy_machine_sg rules + +# Create an SSH keypair that the user can use for debugging the proxy_machine +resource "aws_key_pair" "proxy_machine_key" { + key_name_prefix = var.name_prefix + public_key = var.proxy_machine_ssh_pubkey +} + +# Generate a random password for the proxy web UI +resource "random_password" "proxy_webui_password" { + length = 16 + special = false +} + +# Create the proxy EC2 instance inside the public subnet +resource "aws_instance" "proxy_machine" { + ami = data.aws_ami.rhel10.id + instance_type = "t3.micro" + key_name = aws_key_pair.proxy_machine_key.key_name # SSH key for debugging + availability_zone = var.availability_zone + tags = { Name = "${var.name_prefix}-proxy-machine" } + + user_data = templatefile( + "assets/userdata.yaml.tpl", + { + mitmproxy_sysctl_b64 = filebase64("assets/mitmproxy-sysctl.conf") + mitmproxy_service_b64 = base64encode(templatefile( + "assets/mitmproxy.service.tpl", + { + proxy_webui_password = random_password.proxy_webui_password.result + } + )) + caddyfile_b64 = base64encode(templatefile( + "assets/Caddyfile.tpl", + { + proxy_webui_password = random_password.proxy_webui_password.result + proxy_webui_password_hash = random_password.proxy_webui_password.bcrypt_hash + proxy_webui_username = var.proxy_webui_username + } + )) + } + ) + user_data_replace_on_change = true # Destroy and re-create this instance if user-data.yaml changes + + subnet_id = aws_subnet.public.id + vpc_security_group_ids = [aws_security_group.proxy_machine_sg.id] + associate_public_ip_address = true # Necessary b/c we're not using a NAT gateway + source_dest_check = false # Critical for correct routing +} + +# Create a proxied subnet (where the test/"captive" machines will live) +resource "aws_subnet" "proxied" { + vpc_id = aws_vpc.main.id + availability_zone = var.availability_zone + cidr_block = var.proxied_subnet_cidr_block + tags = { Name = "${var.name_prefix}-proxied" } +} + +# If any escape routes specified, create an unproxied NAT gateway +resource "aws_eip" "escape_nat_eip" { + count = min(1, length(var.proxied_subnet_escape_routes)) + domain = "vpc" + tags = { Name = "${var.name_prefix}-escape-nat-eip" } + depends_on = [aws_internet_gateway.igw] +} +resource "aws_nat_gateway" "escape_nat_gw" { + count = min(1, length(var.proxied_subnet_escape_routes)) + allocation_id = aws_eip.escape_nat_eip[0].id + subnet_id = aws_subnet.public.id + tags = { Name = "${var.name_prefix}-escape-nat-gw" } + depends_on = [aws_internet_gateway.igw] +} + +# Create a route table for the proxied subnet that routes all traffic into the proxy_machine, +# except for traffic destined for an IP covered by var.proxied_subnet_escape_routes +resource "aws_route_table" "proxied" { + vpc_id = aws_vpc.main.id + tags = { Name = "${var.name_prefix}-proxied-rtb" } + + dynamic "route" { + for_each = toset(var.proxied_subnet_escape_routes) + content { + cidr_block = route.value + gateway_id = aws_nat_gateway.escape_nat_gw[0].id + } + } +} + +# Associate the proxied subnet with its route table +resource "aws_route_table_association" "proxied" { + subnet_id = aws_subnet.proxied.id # ID of proxied subnet + route_table_id = aws_route_table.proxied.id # ID of proxied route table +} + +## OUTPUTS +output "region" { + description = "VPC region" + value = data.aws_region.current.id +} +output "proxy_machine_instance_id" { + description = "Proxy machine instance ID" + value = aws_instance.proxy_machine.id +} +output "proxy_machine_ssh_url" { + description = "SSH URL for logging into the proxy machine" + value = "ssh://ec2-user@${aws_instance.proxy_machine.public_ip}" +} +output "proxy_webui_url" { + description = "URL for accessing the proxy webUI (available in 2-5 minutes)" + value = "https://${aws_instance.proxy_machine.public_ip}:8443/" +} +output "proxy_webui_username" { + description = "Proxy webUI username" + value = var.proxy_webui_username +} +output "proxy_webui_password" { + description = "Proxy webUI password" + value = random_password.proxy_webui_password.result + sensitive = true +} +output "proxy_machine_cert_url" { + description = "Credentialed URL for downloading the proxy CA cert (available in 2-5 minutes)" + value = "https://${var.proxy_webui_username}:${random_password.proxy_webui_password.result}@${aws_instance.proxy_machine.public_ip}:8443/mitmproxy-ca-cert.pem" + sensitive = true +} +output "public_subnet_id" { + description = "Public subnet ID" + value = aws_subnet.public.id +} +output "proxied_subnet_id" { + description = "Proxied subnet ID (launch your test/'captive' instances here)" + value = aws_subnet.proxied.id +} +output "http_proxy_var" { + description = "value for http_proxy environmental variable" + value = "http://${aws_instance.proxy_machine.private_ip}:8080" +} +output "https_proxy_var" { + description = "value for HTTPS_PROXY environmental variable" + value = "https://${aws_instance.proxy_machine.private_ip}:8080" +} + +## DATA +# Get the current AWS region +data "aws_region" "current" {} + +# Automatic lookup of the latest official RHEL 10 AMI +data "aws_ami" "rhel10" { + most_recent = true + + filter { + name = "platform-details" + values = ["Red Hat Enterprise Linux"] + } + + filter { + name = "architecture" + values = ["x86_64"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "manifest-location" + values = ["amazon/RHEL-10.*_HVM-*-x86_64-*-Hourly2-GP3"] + } + + owners = ["309956199498"] # Amazon's "Official Red Hat" account +} \ No newline at end of file diff --git a/examples/aws/terraform/vpc-proxied-explicit-classic/terraform.tfvars.example b/examples/aws/terraform/vpc-proxied-explicit-classic/terraform.tfvars.example new file mode 100644 index 00000000..3debbe88 --- /dev/null +++ b/examples/aws/terraform/vpc-proxied-explicit-classic/terraform.tfvars.example @@ -0,0 +1,68 @@ +## Sample Variables +## Fill in the values below and rename me to "terraform.tfvars" + +# Your AWS Profile +# This profile should be configured in your AWS credentials file, typically located at ~/.aws/credentials on Unix-based systems and C:\Users\USERNAME\.aws\credentials on Windows. +# The profile configuration should look something like this: +# +# [default] +# aws_access_key_id = YOUR_ACCESS_KEY +# aws_secret_access_key = YOUR_SECRET_KEY +# +profile = "default" + +# The AWS region where you want to create your resources +region = "us-east-1" + +# The availability zone within the region where you want to create your subnets +availability_zone = "us-east-1a" + +# The CIDR block for your VPC +vpc_cidr_block = "10.0.0.0/16" + +# The CIDR block for your public subnet within your VPC +public_subnet_cidr_block = "10.0.0.0/24" + +# The CIDR block for your proxied subnet within your VPC +proxied_subnet_cidr_block = "10.0.1.0/24" + +# (optional) A list of CIDR blocks for which you'd like to create "proxy escape routes", +# i.e., routes that allow clients in your proxied subnet to connect to certain websites +# directly without going through the proxy (e.g., via NO_PROXY env var). If you're interested +# in NO_PROXY-ing a list of numbered domain names, for example, try using the following bash +# one-liner to generate a list of CIDRs that cover all of the domain names +# echo -n \[; for IP in $(dig +short $(seq -f"inputs%g.osdsecuritylogs.splunkcloud.com" -s" " 1 15) | sort -u); do echo -n "\"$IP/32\", "; done; echo -e "\b\b]" +# You can also just allow all traffic to escape the proxy by setting this to ["0.0.0.0/0"]. +proxied_subnet_escape_routes = [ + To make your Transparent Proxy architecture work (given your current Terraform route table logic), you need to ensure that local VPC traffic and essential AWS management traffic bypass the proxy. If you don't, the instances in your proxied subnet won't even be able to talk to the AWS metadata service to figure out who they are. + +Here is what you should put in your terraform.tfvars: + +Updated terraform.tfvars +Terraform +# ... (profile, region, zones remain the same) ... + +# The CIDR block for your VPC +vpc_cidr_block = "10.0.0.0/16" + +# The CIDR block for your public subnet +public_subnet_cidr_block = "10.0.0.0/24" + +# The CIDR block for your proxied subnet +proxied_subnet_cidr_block = "10.0.1.0/24" + +# These routes BYPASS the proxy machine and go straight to the NAT Gateway. +# 1. We MUST include the Metadata service (169.254.169.254/32). +# 2. We SHOULD include the VPC CIDR so internal traffic doesn't loop. +# 3. We include the developer IP so return SSH traffic isn't intercepted. +proxied_subnet_escape_routes = [] + +# A prefix to add to the Name tag associated with most of the resources created by these scripts +name_prefix = "explicit-proxy-classic" + +# SSH public key to use for ec2-user@proxy-machine +proxy_machine_ssh_pubkey = "ssh-rsa AAAAB3N...SrbX8ZbabVohBK41 replaceme@example.com" + +# A CIDR block containing your workstation's IP, for SSH/webUI access to the proxy machine for debugging. +# Running "echo $(curl -s ipv4.icanhazip.com)/32' should produce a sane default value +developer_cidr_block = "123.123.123.123/32" \ No newline at end of file diff --git a/examples/aws/terraform/vpc-proxied-explicit-classic/variables.tf b/examples/aws/terraform/vpc-proxied-explicit-classic/variables.tf new file mode 100644 index 00000000..e0b0594a --- /dev/null +++ b/examples/aws/terraform/vpc-proxied-explicit-classic/variables.tf @@ -0,0 +1,75 @@ +# AWS Profile to use +variable "profile" { + description = "AWS Profile to use" + type = string + default = "default" +} + +# The AWS Region where resources will be created +variable "region" { + description = "AWS Region" + type = string + default = "us-east-1" # Default to US East (N. Virginia) +} + +# The availability zone where the subnets will be created +variable "availability_zone" { + description = "The availability zone where the subnets will be created" + type = string + default = "us-east-1a" # Default to US East (N. Virginia) AZ a +} + +# CIDR block for the VPC +variable "vpc_cidr_block" { + description = "CIDR block for the VPC" + type = string + default = "10.0.0.0/16" # Default to a /16 block within the 10.0.0.0 private network +} + +# CIDR block for the public subnet +variable "public_subnet_cidr_block" { + description = "CIDR block for the public subnet" + type = string + default = "10.0.0.0/24" # Default to a /24 block within the 10.0.0.0 private network +} + +# CIDR block for the private subnet +variable "proxied_subnet_cidr_block" { + description = "CIDR block for the proxied subnet" + type = string + default = "10.0.1.0/24" # Default to a /24 block within the 10.0.0.0 private network +} + +# Optional destination CIDR blocks for which proxy will be bypassed +variable "proxied_subnet_escape_routes" { + description = "CIDR blocks that proxied clients can connect to directly" + type = list(string) + default = [] +} + +# User/developer's CIDR block for SSH/webUI access to the proxy machine for debugging +variable "developer_cidr_block" { + description = "A CIDR block containing your workstation's IP, for SSH/webUI access to the proxy machine for debugging. The output of 'echo $(curl -s ipv4.icanhazip.com)/32' is a sane default" + type = string +} + +# Username to use for the proxy machine web UI +variable "proxy_webui_username" { + description = "Username to use for the proxy machine web UI" + type = string + default = "developer" +} + +# Prefix to add to name tags +variable "name_prefix" { + description = "prefix to add to the Name tag associated with most of the resources created by these scripts" + type = string + default = "explicit-proxy-" +} + +# SSH public key to use for ec2-user@proxy-machine +variable "proxy_machine_ssh_pubkey" { + description = "SSH public key to use for ec2-user@proxy-machine" + type = string +} +