Manually provisioning CAPTCHA workers doesn't scale. Terraform lets you define your entire CAPTCHA solving infrastructure as code — version it, review it, replicate it across environments, and tear it down when you're done.
Architecture
terraform/
├── main.tf # Provider config
├── variables.tf # Input variables
├── outputs.tf # Output values
├── modules/
│ └── captcha-worker/
│ ├── main.tf # ECS/EC2 resources
│ ├── variables.tf # Module inputs
│ └── outputs.tf # Module outputs
├── environments/
│ ├── dev.tfvars
│ ├── staging.tfvars
│ └── production.tfvars
Core Terraform Configuration
Provider and Backend
# main.tf
terraform {
required_version = ">= 1.5"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
backend "s3" {
bucket = "my-terraform-state"
key = "captcha-workers/terraform.tfstate"
region = "us-east-1"
dynamodb_table = "terraform-locks"
encrypt = true
}
}
provider "aws" {
region = var.aws_region
}
Variables
# variables.tf
variable "aws_region" {
description = "AWS region for deployment"
type = string
default = "us-east-1"
}
variable "environment" {
description = "Environment name (dev, staging, production)"
type = string
}
variable "worker_count" {
description = "Number of CAPTCHA solving workers"
type = number
default = 3
}
variable "worker_cpu" {
description = "CPU units for each worker (1024 = 1 vCPU)"
type = number
default = 512
}
variable "worker_memory" {
description = "Memory in MB for each worker"
type = number
default = 1024
}
variable "max_workers" {
description = "Maximum workers for auto-scaling"
type = number
default = 10
}
variable "captchaai_concurrency" {
description = "Concurrent CAPTCHA tasks per worker"
type = number
default = 10
}
Secrets Management
# secrets.tf — Store API key in AWS Secrets Manager
resource "aws_secretsmanager_secret" "captchaai_api_key" {
name = "${var.environment}/captchaai-api-key"
description = "CaptchaAI API key for CAPTCHA solving workers"
}
# Reference secret in ECS task (never in plain text)
data "aws_secretsmanager_secret_version" "captchaai_api_key" {
secret_id = aws_secretsmanager_secret.captchaai_api_key.id
}
ECS Worker Cluster
# ecs.tf — Fargate-based CAPTCHA workers
resource "aws_ecs_cluster" "captcha" {
name = "captcha-workers-${var.environment}"
setting {
name = "containerInsights"
value = "enabled"
}
}
resource "aws_ecs_task_definition" "captcha_worker" {
family = "captcha-worker-${var.environment}"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = var.worker_cpu
memory = var.worker_memory
execution_role_arn = aws_iam_role.ecs_execution.arn
task_role_arn = aws_iam_role.ecs_task.arn
container_definitions = jsonencode([
{
name = "captcha-worker"
image = "${aws_ecr_repository.captcha_worker.repository_url}:latest"
environment = [
{ name = "CAPTCHAAI_CONCURRENCY", value = tostring(var.captchaai_concurrency) },
{ name = "CAPTCHAAI_POLL_INTERVAL", value = "5" },
{ name = "ENVIRONMENT", value = var.environment },
]
secrets = [
{
name = "CAPTCHAAI_API_KEY"
valueFrom = aws_secretsmanager_secret.captchaai_api_key.arn
}
]
logConfiguration = {
logDriver = "awslogs"
options = {
"awslogs-group" = aws_cloudwatch_log_group.captcha.name
"awslogs-region" = var.aws_region
"awslogs-stream-prefix" = "worker"
}
}
}
])
}
resource "aws_ecs_service" "captcha_worker" {
name = "captcha-workers"
cluster = aws_ecs_cluster.captcha.id
task_definition = aws_ecs_task_definition.captcha_worker.arn
desired_count = var.worker_count
launch_type = "FARGATE"
network_configuration {
subnets = var.private_subnets
security_groups = [aws_security_group.captcha_worker.id]
}
}
Auto-Scaling
# autoscaling.tf
resource "aws_appautoscaling_target" "captcha" {
max_capacity = var.max_workers
min_capacity = var.worker_count
resource_id = "service/${aws_ecs_cluster.captcha.name}/${aws_ecs_service.captcha_worker.name}"
scalable_dimension = "ecs:service:DesiredCount"
service_namespace = "ecs"
}
# Scale up when queue is deep
resource "aws_appautoscaling_policy" "scale_up" {
name = "captcha-scale-up"
policy_type = "StepScaling"
resource_id = aws_appautoscaling_target.captcha.resource_id
scalable_dimension = aws_appautoscaling_target.captcha.scalable_dimension
service_namespace = aws_appautoscaling_target.captcha.service_namespace
step_scaling_policy_configuration {
adjustment_type = "ChangeInCapacity"
cooldown = 120
step_adjustment {
scaling_adjustment = 2
metric_interval_lower_bound = 0
}
}
}
# Scale down when idle
resource "aws_appautoscaling_policy" "scale_down" {
name = "captcha-scale-down"
policy_type = "StepScaling"
resource_id = aws_appautoscaling_target.captcha.resource_id
scalable_dimension = aws_appautoscaling_target.captcha.scalable_dimension
service_namespace = aws_appautoscaling_target.captcha.service_namespace
step_scaling_policy_configuration {
adjustment_type = "ChangeInCapacity"
cooldown = 300
step_adjustment {
scaling_adjustment = -1
metric_interval_upper_bound = 0
}
}
}
Per-Environment Variables
# environments/dev.tfvars
environment = "dev"
worker_count = 1
max_workers = 3
worker_cpu = 256
worker_memory = 512
captchaai_concurrency = 3
# environments/production.tfvars
environment = "production"
worker_count = 5
max_workers = 20
worker_cpu = 1024
worker_memory = 2048
captchaai_concurrency = 20
Worker Application Code
"""captcha_worker.py — The container runs this."""
import os
import time
import signal
import requests
API_KEY = os.environ["CAPTCHAAI_API_KEY"]
CONCURRENCY = int(os.environ.get("CAPTCHAAI_CONCURRENCY", "10"))
POLL_INTERVAL = int(os.environ.get("CAPTCHAAI_POLL_INTERVAL", "5"))
running = True
def shutdown_handler(signum, frame):
global running
print("Graceful shutdown initiated")
running = False
signal.signal(signal.SIGTERM, shutdown_handler)
signal.signal(signal.SIGINT, shutdown_handler)
session = requests.Session()
def solve_captcha(sitekey, pageurl):
resp = session.post("https://ocr.captchaai.com/in.php", data={
"key": API_KEY,
"method": "userrecaptcha",
"googlekey": sitekey,
"pageurl": pageurl,
"json": 1
})
data = resp.json()
if data.get("status") != 1:
return {"error": data.get("request")}
captcha_id = data["request"]
for _ in range(60):
time.sleep(POLL_INTERVAL)
result = session.get("https://ocr.captchaai.com/res.php", params={
"key": API_KEY, "action": "get", "id": captcha_id, "json": 1
}).json()
if result.get("status") == 1:
return {"solution": result["request"]}
if result.get("request") != "CAPCHA_NOT_READY":
return {"error": result.get("request")}
return {"error": "TIMEOUT"}
# Main loop — pull tasks from SQS or Redis
print(f"Worker started: concurrency={CONCURRENCY}")
while running:
# Pull tasks from your queue here
time.sleep(1)
print("Worker shutdown complete")
Deployment Commands
# Initialize
terraform init
# Plan for production
terraform plan -var-file=environments/production.tfvars
# Apply
terraform apply -var-file=environments/production.tfvars
# Destroy (dev cleanup)
terraform destroy -var-file=environments/dev.tfvars
Troubleshooting
| Issue | Cause | Fix |
|---|---|---|
| Secret not found at deploy | Secret not yet populated | Create secret value before terraform apply |
| Workers crash on start | Missing env vars or wrong image | Check CloudWatch logs; verify ECR image tag |
| Auto-scaling not triggering | Missing CloudWatch alarm or wrong metric | Verify alarm ARN in scaling policy |
| State lock error | Previous apply interrupted | Remove lock: terraform force-unlock <lock-id> |
FAQ
Should I use Fargate or EC2 for CAPTCHA workers?
Fargate for simplicity and auto-scaling. EC2 for cost optimization at steady-state (reserved instances). Most teams start with Fargate and move high-volume workloads to EC2.
How do I handle multiple environments?
Use .tfvars files per environment and separate state files. Use workspaces or separate state backends to isolate dev/staging/production.
Can I use Terraform with GCP or Azure instead?
Yes. Replace the AWS provider and resources with their GCP (Cloud Run, GKE) or Azure (Container Instances, AKS) equivalents. The module structure stays the same.
Next Steps
Codify your CAPTCHA infrastructure — get your CaptchaAI API key and deploy with Terraform.
Related guides:
Discussions (0)
Join the conversation
Sign in to share your opinion.
Sign InNo comments yet.