API Tutorials

Building a CaptchaAI Usage Dashboard and Monitoring

Tracking your CAPTCHA solving metrics helps optimize costs, detect issues early, and plan capacity. This guide builds a monitoring system that logs every solve and generates usage reports.


What to Monitor

Metric Why It Matters
Solve count Track usage volume
Success rate Detect quality issues
Response time Identify slowdowns
Spending rate Budget management
Error distribution Debug failing patterns
Balance Prevent outages
Method breakdown Understand usage patterns

Metrics Collector

import time
import csv
import datetime
import threading
from collections import defaultdict


class MetricsCollector:
    """Collect and store CaptchaAI solve metrics."""

    def __init__(self, log_file="captchaai_metrics.csv"):
        self.log_file = log_file
        self.lock = threading.Lock()
        self.session_stats = defaultdict(lambda: {
            "count": 0, "success": 0, "error": 0,
            "timeout": 0, "total_time": 0,
        })
        self._init_log()

    def _init_log(self):
        try:
            with open(self.log_file, "r"):
                pass
        except FileNotFoundError:
            with open(self.log_file, "w", newline="") as f:
                writer = csv.writer(f)
                writer.writerow([
                    "timestamp", "method", "duration_s",
                    "status", "error_code", "task_id",
                ])

    def record(self, method, duration, status, error_code="", task_id=""):
        """Record a solve attempt."""
        with self.lock:
            # Update in-memory stats
            stats = self.session_stats[method]
            stats["count"] += 1
            stats["total_time"] += duration
            if status == "success":
                stats["success"] += 1
            elif status == "timeout":
                stats["timeout"] += 1
            else:
                stats["error"] += 1

            # Write to CSV
            with open(self.log_file, "a", newline="") as f:
                writer = csv.writer(f)
                writer.writerow([
                    datetime.datetime.utcnow().isoformat(),
                    method, f"{duration:.2f}",
                    status, error_code, task_id,
                ])

    def get_session_summary(self):
        """Get current session statistics."""
        summary = {}
        for method, stats in self.session_stats.items():
            avg_time = (
                stats["total_time"] / stats["count"]
                if stats["count"] > 0 else 0
            )
            success_rate = (
                stats["success"] / stats["count"] * 100
                if stats["count"] > 0 else 0
            )
            summary[method] = {
                "total": stats["count"],
                "success": stats["success"],
                "errors": stats["error"],
                "timeouts": stats["timeout"],
                "success_rate": f"{success_rate:.1f}%",
                "avg_time": f"{avg_time:.1f}s",
            }
        return summary

Instrumented Solver

Wrap your solver to automatically collect metrics:

import requests
import time


class MonitoredSolver:
    """Solver with automatic metric collection."""

    def __init__(self, api_key, metrics=None):
        self.api_key = api_key
        self.base = "https://ocr.captchaai.com"
        self.metrics = metrics or MetricsCollector()

    def solve(self, method, **params):
        start = time.time()
        task_id = ""
        status = "error"
        error_code = ""

        try:
            # Submit
            data = {"key": self.api_key, "method": method, "json": 1}
            data.update(params)
            resp = requests.post(
                f"{self.base}/in.php", data=data, timeout=30,
            )
            result = resp.json()

            if result.get("status") != 1:
                error_code = result.get("request", "UNKNOWN")
                raise RuntimeError(f"Submit error: {error_code}")

            task_id = result["request"]

            # Poll
            token = self._poll(task_id)
            status = "success"
            return token

        except TimeoutError:
            status = "timeout"
            raise
        except Exception as e:
            error_code = str(e)[:50]
            raise
        finally:
            duration = time.time() - start
            self.metrics.record(method, duration, status, error_code, task_id)

    def _poll(self, task_id, timeout=120):
        start = time.time()
        while time.time() - start < timeout:
            time.sleep(5)
            resp = requests.get(f"{self.base}/res.php", params={
                "key": self.api_key, "action": "get",
                "id": task_id, "json": 1,
            }, timeout=15)
            data = resp.json()
            if data["request"] != "CAPCHA_NOT_READY":
                if data.get("status") == 1:
                    return data["request"]
                raise RuntimeError(f"Solve error: {data['request']}")
        raise TimeoutError("Poll timeout")

    def print_summary(self):
        """Print current session metrics."""
        summary = self.metrics.get_session_summary()
        print("\n=== CaptchaAI Usage Summary ===")
        for method, stats in summary.items():
            print(f"\n{method}:")
            for key, value in stats.items():
                print(f"  {key}: {value}")


# Usage
metrics = MetricsCollector()
solver = MonitoredSolver("YOUR_API_KEY", metrics)

# Solve some CAPTCHAs
for i in range(10):
    try:
        token = solver.solve(
            "userrecaptcha",
            googlekey="SITE_KEY",
            pageurl="https://example.com",
        )
    except Exception as e:
        print(f"Failed: {e}")

# Print results
solver.print_summary()

Usage Report Generator

Generate daily/weekly reports from collected metrics:

import csv
import datetime
from collections import defaultdict


class UsageReport:
    """Generate usage reports from metrics CSV."""

    def __init__(self, log_file="captchaai_metrics.csv"):
        self.log_file = log_file

    def _load_data(self, days=None):
        """Load metrics, optionally filtered by date range."""
        cutoff = None
        if days:
            cutoff = datetime.datetime.utcnow() - datetime.timedelta(days=days)

        records = []
        with open(self.log_file, "r") as f:
            reader = csv.DictReader(f)
            for row in reader:
                ts = datetime.datetime.fromisoformat(row["timestamp"])
                if cutoff and ts < cutoff:
                    continue
                row["_ts"] = ts
                row["_duration"] = float(row["duration_s"])
                records.append(row)
        return records

    def daily_summary(self, days=7):
        """Summarize usage per day."""
        records = self._load_data(days=days)
        by_day = defaultdict(lambda: {"count": 0, "success": 0, "total_time": 0})

        for rec in records:
            day = rec["_ts"].date().isoformat()
            by_day[day]["count"] += 1
            if rec["status"] == "success":
                by_day[day]["success"] += 1
            by_day[day]["total_time"] += rec["_duration"]

        print(f"=== Daily Summary (last {days} days) ===")
        print(f"{'Date':<12} {'Total':>6} {'Success':>8} {'Rate':>7} {'Avg Time':>9}")
        for day in sorted(by_day.keys()):
            stats = by_day[day]
            rate = stats["success"] / stats["count"] * 100 if stats["count"] > 0 else 0
            avg = stats["total_time"] / stats["count"] if stats["count"] > 0 else 0
            print(f"{day:<12} {stats['count']:>6} {stats['success']:>8} {rate:>6.1f}% {avg:>8.1f}s")

    def method_breakdown(self, days=30):
        """Summarize usage by CAPTCHA type."""
        records = self._load_data(days=days)
        by_method = defaultdict(lambda: {"count": 0, "success": 0, "total_time": 0})

        for rec in records:
            method = rec["method"]
            by_method[method]["count"] += 1
            if rec["status"] == "success":
                by_method[method]["success"] += 1
            by_method[method]["total_time"] += rec["_duration"]

        print(f"\n=== Method Breakdown (last {days} days) ===")
        print(f"{'Method':<25} {'Total':>6} {'Success':>8} {'Rate':>7} {'Avg Time':>9}")
        for method in sorted(by_method.keys()):
            stats = by_method[method]
            rate = stats["success"] / stats["count"] * 100
            avg = stats["total_time"] / stats["count"]
            print(f"{method:<25} {stats['count']:>6} {stats['success']:>8} {rate:>6.1f}% {avg:>8.1f}s")

    def error_breakdown(self, days=7):
        """Show error distribution."""
        records = self._load_data(days=days)
        errors = defaultdict(int)

        for rec in records:
            if rec["status"] != "success" and rec["error_code"]:
                errors[rec["error_code"]] += 1

        if errors:
            print(f"\n=== Error Breakdown (last {days} days) ===")
            for error, count in sorted(errors.items(), key=lambda x: -x[1]):
                print(f"  {error}: {count}")


# Usage
report = UsageReport()
report.daily_summary(days=7)
report.method_breakdown(days=30)
report.error_breakdown(days=7)

Balance History Tracking

import requests
import time
import csv
import datetime


class BalanceDashboard:
    """Track balance over time for spending analysis."""

    def __init__(self, api_key, log_file="balance_history.csv"):
        self.api_key = api_key
        self.log_file = log_file

    def record(self):
        resp = requests.get("https://ocr.captchaai.com/res.php", params={
            "key": self.api_key,
            "action": "getbalance",
            "json": 1,
        })
        balance = float(resp.json()["request"])

        with open(self.log_file, "a", newline="") as f:
            writer = csv.writer(f)
            writer.writerow([
                datetime.datetime.utcnow().isoformat(),
                f"{balance:.4f}",
            ])
        return balance

    def get_spending(self, hours=24):
        """Calculate spending over time period."""
        cutoff = datetime.datetime.utcnow() - datetime.timedelta(hours=hours)
        balances = []

        try:
            with open(self.log_file, "r") as f:
                reader = csv.reader(f)
                for row in reader:
                    ts = datetime.datetime.fromisoformat(row[0])
                    if ts > cutoff:
                        balances.append(float(row[1]))
        except FileNotFoundError:
            return 0

        if len(balances) < 2:
            return 0
        return balances[0] - balances[-1]

Troubleshooting

Issue Cause Fix
CSV file growing too large Long-running monitoring Rotate files daily/weekly
Missing solve records Solver not instrumented Wrap all solvers with MonitoredSolver
Stats don't match billing Missing error records Ensure finally block always logs
High error rate in dashboard Incorrect API parameters Check error breakdown report

FAQ

How much data should I keep?

Keep detailed metrics for 30 days and summarized data for 90 days. Archive older data to reduce file sizes.

Can I export metrics to Prometheus/Grafana?

Yes. The MetricsCollector can be extended to push metrics to Prometheus using the prometheus_client library. See the Prometheus monitoring guide.

Should I monitor in production?

Yes. The overhead of logging each solve to CSV is negligible (<1ms per write). The visibility it provides is essential for production pipelines.



Know your numbers — start monitoring CaptchaAI today.

Discussions (0)

No comments yet.

Related Posts

DevOps & Scaling CaptchaAI Monitoring with Datadog: Metrics and Alerts
Monitor Captcha AI performance with Datadog — custom metrics, dashboards, anomaly detection alerts, and solve rate tracking for CAPTCHA solving pipelines.

Monitor Captcha AI performance with Datadog — custom metrics, dashboards, anomaly detection alerts, and solve...

Automation Python All CAPTCHA Types
Feb 19, 2026
Tutorials Webhook Endpoint Monitoring for CAPTCHA Solve Callbacks
Monitor your Captcha AI callback endpoints — track uptime, response latency, error rates, and set up alerts before missed results impact your pipeline.

Monitor your Captcha AI callback endpoints — track uptime, response latency, error rates, and set up alerts be...

Automation Python All CAPTCHA Types
Mar 12, 2026
DevOps & Scaling CaptchaAI Monitoring with New Relic: APM Integration
Integrate Captcha AI with New Relic APM — custom events, transaction tracing, dashboards, and alert policies for CAPTCHA solving performance.

Integrate Captcha AI with New Relic APM — custom events, transaction tracing, dashboards, and alert policies f...

Automation Python All CAPTCHA Types
Jan 31, 2026
DevOps & Scaling Building Custom CaptchaAI Alerts with PagerDuty
Integrate Captcha AI with Pager Duty for incident management — trigger alerts on low balance, high error rates, and pipeline failures with escalation policies.

Integrate Captcha AI with Pager Duty for incident management — trigger alerts on low balance, high error rates...

Automation Python All CAPTCHA Types
Jan 15, 2026
DevOps & Scaling Grafana Dashboard Templates for CaptchaAI Metrics
Ready-to-import Grafana dashboard templates for Captcha AI — solve rate panels, latency histograms, balance gauges, and queue depth monitors.

Ready-to-import Grafana dashboard templates for Captcha AI — solve rate panels, latency histograms, balance ga...

Automation Python All CAPTCHA Types
Feb 21, 2026
Tutorials Batch CAPTCHA Solving Cost Estimation and Budget Alerts
Estimate costs for batch CAPTCHA solving, set budget limits, track per-task spending, and configure alerts to prevent unexpected charges with Captcha AI.

Estimate costs for batch CAPTCHA solving, set budget limits, track per-task spending, and configure alerts to...

Automation Python All CAPTCHA Types
Mar 28, 2026
Reference CAPTCHA Solve Rate SLI/SLO: How to Define and Monitor
Define SLIs and SLOs for CAPTCHA solving — success rate, latency percentiles, availability targets, error budgets, and burn rate alerting with Captcha AI.

Define SLIs and SLOs for CAPTCHA solving — success rate, latency percentiles, availability targets, error budg...

Automation Python All CAPTCHA Types
Mar 05, 2026
Tutorials Discord Webhook Alerts for CAPTCHA Pipeline Status
Send CAPTCHA pipeline alerts to Discord — webhook integration for balance warnings, error spikes, queue status, and daily summary reports with Captcha AI.

Send CAPTCHA pipeline alerts to Discord — webhook integration for balance warnings, error spikes, queue status...

Automation Python All CAPTCHA Types
Tutorials Structured Logging for CAPTCHA Operations
Add structured JSON logging to your CAPTCHA solving workflows — track task IDs, solve times, errors, and costs with Python and Node.js.

Add structured JSON logging to your CAPTCHA solving workflows — track task IDs, solve times, errors, and costs...

Automation Python All CAPTCHA Types
Jan 30, 2026
API Tutorials How to Solve reCAPTCHA v2 Callback Using API
how to solve re CAPTCHA v 2 callback implementations using Captcha AI API.

Learn how to solve re CAPTCHA v 2 callback implementations using Captcha AI API. Detect the callback function,...

Automation reCAPTCHA v2 Webhooks
Mar 01, 2026
API Tutorials Solve GeeTest v3 CAPTCHA with Python and CaptchaAI
Step-by-step Python tutorial for solving Gee Test v 3 slide puzzle CAPTCHAs using the Captcha AI API.

Step-by-step Python tutorial for solving Gee Test v 3 slide puzzle CAPTCHAs using the Captcha AI API. Includes...

Automation Python Testing
Mar 23, 2026
API Tutorials Case-Sensitive CAPTCHA API Parameter Guide
How to use the regsense parameter for case-sensitive CAPTCHA solving with Captcha AI.

How to use the regsense parameter for case-sensitive CAPTCHA solving with Captcha AI. Covers when to use, comm...

Python Web Scraping Image OCR
Apr 09, 2026