From 1eba2eb205b40b9e4d94381a6bb05668191fa8b6 Mon Sep 17 00:00:00 2001 From: Azwan Ngali Date: Wed, 17 Dec 2025 16:51:09 +0000 Subject: [PATCH] Initial commit --- gitea/.env | 11 + gitea/.env.backup.20251217_091025 | 11 + gitea/.env.backup.20251217_092048 | 11 + gitea/.env.backup.20251217_160100 | 11 + gitea/README.md | 251 +++++++ gitea/deploy.py | 202 ++++++ .../ascidiia-bridoon_20251217_160155.json | 23 + gitea/destroy.py | 529 +++++++++++++++ gitea/docker-compose.yml | 57 ++ gitea/gitea_deployer/__init__.py | 8 + .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 318 bytes .../__pycache__/config.cpython-39.pyc | Bin 0 -> 4496 bytes .../deployment_config_manager.cpython-39.pyc | Bin 0 -> 5397 bytes .../deployment_logger.cpython-39.pyc | Bin 0 -> 6720 bytes .../__pycache__/dns_manager.cpython-39.pyc | Bin 0 -> 6756 bytes .../__pycache__/docker_manager.cpython-39.pyc | Bin 0 -> 6966 bytes .../__pycache__/env_generator.cpython-39.pyc | Bin 0 -> 11153 bytes .../__pycache__/health.cpython-39.pyc | Bin 0 -> 3191 bytes .../__pycache__/orchestrator.cpython-39.pyc | Bin 0 -> 15883 bytes .../__pycache__/webhooks.cpython-39.pyc | Bin 0 -> 5587 bytes gitea/gitea_deployer/config.py | 187 ++++++ .../deployment_config_manager.py | 153 +++++ gitea/gitea_deployer/deployment_logger.py | 218 ++++++ gitea/gitea_deployer/dns_manager.py | 286 ++++++++ gitea/gitea_deployer/docker_manager.py | 276 ++++++++ gitea/gitea_deployer/env_generator.py | 390 +++++++++++ gitea/gitea_deployer/health.py | 128 ++++ gitea/gitea_deployer/orchestrator.py | 626 ++++++++++++++++++ gitea/gitea_deployer/webhooks.py | 199 ++++++ ...lly-copious.merakit.my_20251217_092143.txt | 14 + ...iia-bridoon.merakit.my_20251217_160155.txt | 14 + gitea/requirements.txt | 4 + scripts/.claude/settings.local.json | 18 + scripts/cloudflare-add.sh | 229 +++++++ scripts/cloudflare-remove.sh | 327 +++++++++ wordpress/.claude/settings.local.json | 37 ++ wordpress/.env | 14 + wordpress/.env.backup | 22 + wordpress/.env.backup.20251216_163858 | 22 + wordpress/.env.backup.20251216_164443 | 14 + wordpress/.env.backup.20251216_164618 | 14 + wordpress/.env.backup.20251216_164814 | 14 + wordpress/.env.backup.20251216_165109 | 14 + wordpress/.env.backup.20251216_170611 | 14 + wordpress/.env.backup.20251216_184629 | 14 + wordpress/.env.backup.20251217_061213 | 14 + wordpress/.env.backup.20251217_061237 | 14 + wordpress/.env.backup.20251217_061526 | 14 + wordpress/.env.backup.20251217_065205 | 14 + wordpress/.env.backup.20251217_070700 | 14 + wordpress/.env.backup.20251217_071039 | 14 + wordpress/DESTROY.md | 354 ++++++++++ wordpress/deploy.py | 202 ++++++ wordpress/destroy.py | 529 +++++++++++++++ wordpress/docker-compose.yml | 56 ++ ...o-hedgeless.merakit.my_20251217_070805.txt | 18 + ...g-refocuses.merakit.my_20251217_061237.txt | 18 + ...d-doodlebug.merakit.my_20251217_061213.txt | 18 + ...c-unactable.merakit.my_20251217_061635.txt | 14 + ...-allotrylic.merakit.my_20251217_071135.txt | 14 + ...ic-fuglemen.merakit.my_20251216_170709.txt | 14 + ...-calcinator.merakit.my_20251216_184725.txt | 14 + ...y-spareable.merakit.my_20251217_065302.txt | 14 + wordpress/requirements.txt | 4 + wordpress/wordpress_deployer/__init__.py | 0 .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 121 bytes .../__pycache__/config.cpython-39.pyc | Bin 0 -> 4504 bytes .../deployment_config_manager.cpython-39.pyc | Bin 0 -> 5405 bytes .../deployment_logger.cpython-39.pyc | Bin 0 -> 6728 bytes .../__pycache__/dns_manager.cpython-39.pyc | Bin 0 -> 6764 bytes .../__pycache__/docker_manager.cpython-39.pyc | Bin 0 -> 6947 bytes .../__pycache__/env_generator.cpython-39.pyc | Bin 0 -> 11227 bytes .../__pycache__/health.cpython-39.pyc | Bin 0 -> 3199 bytes .../__pycache__/orchestrator.cpython-39.pyc | Bin 0 -> 15888 bytes .../__pycache__/webhooks.cpython-39.pyc | Bin 0 -> 5568 bytes wordpress/wordpress_deployer/config.py | 187 ++++++ .../deployment_config_manager.py | 153 +++++ .../wordpress_deployer/deployment_logger.py | 218 ++++++ wordpress/wordpress_deployer/dns_manager.py | 286 ++++++++ .../wordpress_deployer/docker_manager.py | 276 ++++++++ wordpress/wordpress_deployer/env_generator.py | 394 +++++++++++ wordpress/wordpress_deployer/health.py | 128 ++++ wordpress/wordpress_deployer/orchestrator.py | 626 ++++++++++++++++++ wordpress/wordpress_deployer/webhooks.py | 199 ++++++ 84 files changed, 8182 insertions(+) create mode 100644 gitea/.env create mode 100644 gitea/.env.backup.20251217_091025 create mode 100644 gitea/.env.backup.20251217_092048 create mode 100644 gitea/.env.backup.20251217_160100 create mode 100644 gitea/README.md create mode 100755 gitea/deploy.py create mode 100644 gitea/deployments/ascidiia-bridoon_20251217_160155.json create mode 100755 gitea/destroy.py create mode 100644 gitea/docker-compose.yml create mode 100644 gitea/gitea_deployer/__init__.py create mode 100644 gitea/gitea_deployer/__pycache__/__init__.cpython-39.pyc create mode 100644 gitea/gitea_deployer/__pycache__/config.cpython-39.pyc create mode 100644 gitea/gitea_deployer/__pycache__/deployment_config_manager.cpython-39.pyc create mode 100644 gitea/gitea_deployer/__pycache__/deployment_logger.cpython-39.pyc create mode 100644 gitea/gitea_deployer/__pycache__/dns_manager.cpython-39.pyc create mode 100644 gitea/gitea_deployer/__pycache__/docker_manager.cpython-39.pyc create mode 100644 gitea/gitea_deployer/__pycache__/env_generator.cpython-39.pyc create mode 100644 gitea/gitea_deployer/__pycache__/health.cpython-39.pyc create mode 100644 gitea/gitea_deployer/__pycache__/orchestrator.cpython-39.pyc create mode 100644 gitea/gitea_deployer/__pycache__/webhooks.cpython-39.pyc create mode 100644 gitea/gitea_deployer/config.py create mode 100644 gitea/gitea_deployer/deployment_config_manager.py create mode 100644 gitea/gitea_deployer/deployment_logger.py create mode 100644 gitea/gitea_deployer/dns_manager.py create mode 100644 gitea/gitea_deployer/docker_manager.py create mode 100644 gitea/gitea_deployer/env_generator.py create mode 100644 gitea/gitea_deployer/health.py create mode 100644 gitea/gitea_deployer/orchestrator.py create mode 100644 gitea/gitea_deployer/webhooks.py create mode 100644 gitea/logs/success/success_artfully-copious.merakit.my_20251217_092143.txt create mode 100644 gitea/logs/success/success_ascidiia-bridoon.merakit.my_20251217_160155.txt create mode 100644 gitea/requirements.txt create mode 100644 scripts/.claude/settings.local.json create mode 100755 scripts/cloudflare-add.sh create mode 100755 scripts/cloudflare-remove.sh create mode 100644 wordpress/.claude/settings.local.json create mode 100644 wordpress/.env create mode 100644 wordpress/.env.backup create mode 100644 wordpress/.env.backup.20251216_163858 create mode 100644 wordpress/.env.backup.20251216_164443 create mode 100644 wordpress/.env.backup.20251216_164618 create mode 100644 wordpress/.env.backup.20251216_164814 create mode 100644 wordpress/.env.backup.20251216_165109 create mode 100644 wordpress/.env.backup.20251216_170611 create mode 100644 wordpress/.env.backup.20251216_184629 create mode 100644 wordpress/.env.backup.20251217_061213 create mode 100644 wordpress/.env.backup.20251217_061237 create mode 100644 wordpress/.env.backup.20251217_061526 create mode 100644 wordpress/.env.backup.20251217_065205 create mode 100644 wordpress/.env.backup.20251217_070700 create mode 100644 wordpress/.env.backup.20251217_071039 create mode 100644 wordpress/DESTROY.md create mode 100755 wordpress/deploy.py create mode 100755 wordpress/destroy.py create mode 100644 wordpress/docker-compose.yml create mode 100644 wordpress/logs/failed/failed_caimito-hedgeless.merakit.my_20251217_070805.txt create mode 100644 wordpress/logs/failed/failed_insuring-refocuses.merakit.my_20251217_061237.txt create mode 100644 wordpress/logs/failed/failed_juslted-doodlebug.merakit.my_20251217_061213.txt create mode 100644 wordpress/logs/success/success_ankylotic-unactable.merakit.my_20251217_061635.txt create mode 100644 wordpress/logs/success/success_daidle-allotrylic.merakit.my_20251217_071135.txt create mode 100644 wordpress/logs/success/success_emetic-fuglemen.merakit.my_20251216_170709.txt create mode 100644 wordpress/logs/success/success_exing-calcinator.merakit.my_20251216_184725.txt create mode 100644 wordpress/logs/success/success_slenderly-spareable.merakit.my_20251217_065302.txt create mode 100644 wordpress/requirements.txt create mode 100644 wordpress/wordpress_deployer/__init__.py create mode 100644 wordpress/wordpress_deployer/__pycache__/__init__.cpython-39.pyc create mode 100644 wordpress/wordpress_deployer/__pycache__/config.cpython-39.pyc create mode 100644 wordpress/wordpress_deployer/__pycache__/deployment_config_manager.cpython-39.pyc create mode 100644 wordpress/wordpress_deployer/__pycache__/deployment_logger.cpython-39.pyc create mode 100644 wordpress/wordpress_deployer/__pycache__/dns_manager.cpython-39.pyc create mode 100644 wordpress/wordpress_deployer/__pycache__/docker_manager.cpython-39.pyc create mode 100644 wordpress/wordpress_deployer/__pycache__/env_generator.cpython-39.pyc create mode 100644 wordpress/wordpress_deployer/__pycache__/health.cpython-39.pyc create mode 100644 wordpress/wordpress_deployer/__pycache__/orchestrator.cpython-39.pyc create mode 100644 wordpress/wordpress_deployer/__pycache__/webhooks.cpython-39.pyc create mode 100644 wordpress/wordpress_deployer/config.py create mode 100644 wordpress/wordpress_deployer/deployment_config_manager.py create mode 100644 wordpress/wordpress_deployer/deployment_logger.py create mode 100644 wordpress/wordpress_deployer/dns_manager.py create mode 100644 wordpress/wordpress_deployer/docker_manager.py create mode 100644 wordpress/wordpress_deployer/env_generator.py create mode 100644 wordpress/wordpress_deployer/health.py create mode 100644 wordpress/wordpress_deployer/orchestrator.py create mode 100644 wordpress/wordpress_deployer/webhooks.py diff --git a/gitea/.env b/gitea/.env new file mode 100644 index 0000000..50334a5 --- /dev/null +++ b/gitea/.env @@ -0,0 +1,11 @@ +COMPOSE_PROJECT_NAME=ascidiia-bridoon +APP_NAME=gitea +SUBDOMAIN=ascidiia-bridoon +DOMAIN=merakit.my +URL=ascidiia-bridoon.merakit.my +GITEA_VERSION=1.21 +POSTGRES_VERSION=16-alpine +DB_NAME=angali_27658dcb_gitea_ascidiia_bridoon +DB_USER=angali_27658dcb_gitea_ascidiia_bridoon +DB_PASSWORD=diapason-dukkha-munchausen +DISABLE_REGISTRATION=false diff --git a/gitea/.env.backup.20251217_091025 b/gitea/.env.backup.20251217_091025 new file mode 100644 index 0000000..7707ce2 --- /dev/null +++ b/gitea/.env.backup.20251217_091025 @@ -0,0 +1,11 @@ +COMPOSE_PROJECT_NAME=gitea-template +APP_NAME=gitea +SUBDOMAIN=gitea-template +DOMAIN=merakit.my +URL=gitea-template.merakit.my +GITEA_VERSION=1.21 +POSTGRES_VERSION=16-alpine +DB_NAME=gitea_db +DB_USER=gitea_user +DB_PASSWORD=change-me +DISABLE_REGISTRATION=false diff --git a/gitea/.env.backup.20251217_092048 b/gitea/.env.backup.20251217_092048 new file mode 100644 index 0000000..9c7da79 --- /dev/null +++ b/gitea/.env.backup.20251217_092048 @@ -0,0 +1,11 @@ +COMPOSE_PROJECT_NAME=dodman-kuichua +APP_NAME=gitea +SUBDOMAIN=dodman-kuichua +DOMAIN=merakit.my +URL=dodman-kuichua.merakit.my +GITEA_VERSION=1.21 +POSTGRES_VERSION=16-alpine +DB_NAME=angali_7675e8e6_gitea_dodman_kuichua +DB_USER=angali_7675e8e6_gitea_dodman_kuichua +DB_PASSWORD=viva-overheats-chusite +DISABLE_REGISTRATION=false diff --git a/gitea/.env.backup.20251217_160100 b/gitea/.env.backup.20251217_160100 new file mode 100644 index 0000000..01b8b91 --- /dev/null +++ b/gitea/.env.backup.20251217_160100 @@ -0,0 +1,11 @@ +COMPOSE_PROJECT_NAME=artfully-copious +APP_NAME=gitea +SUBDOMAIN=artfully-copious +DOMAIN=merakit.my +URL=artfully-copious.merakit.my +GITEA_VERSION=1.21 +POSTGRES_VERSION=16-alpine +DB_NAME=angali_2f2ec2eb_gitea_artfully_copious +DB_USER=angali_2f2ec2eb_gitea_artfully_copious +DB_PASSWORD=bannerer-tetchy-polyaxone +DISABLE_REGISTRATION=false diff --git a/gitea/README.md b/gitea/README.md new file mode 100644 index 0000000..caa4943 --- /dev/null +++ b/gitea/README.md @@ -0,0 +1,251 @@ +# Gitea Deployment Template + +Production-ready Gitea deployment with automated DNS, environment generation, and health checking. + +## Features + +- **Automated Environment Generation**: Random subdomain and secure password generation +- **DNS Management**: Automatic Cloudflare DNS record creation +- **Health Checking**: Automated deployment verification +- **Rollback Support**: Automatic rollback on deployment failure +- **Webhook Notifications**: Optional webhook notifications for deployment events +- **Deployment Tracking**: Track and manage all deployments +- **Dry-Run Mode**: Preview changes before applying + +## Architecture + +``` +gitea/ +├── docker-compose.yml # Docker Compose configuration +├── .env # Environment variables (generated) +├── deploy.py # Main deployment script +├── destroy.py # Deployment destruction script +├── requirements.txt # Python dependencies +├── deployments/ # Deployment configuration tracking +├── logs/ # Deployment logs +│ ├── success/ # Successful deployment logs +│ └── failed/ # Failed deployment logs +└── gitea_deployer/ # Python deployment module + ├── config.py # Configuration management + ├── orchestrator.py # Deployment orchestration + ├── env_generator.py # Environment generation + ├── dns_manager.py # DNS management (Cloudflare) + ├── docker_manager.py # Docker operations + ├── health.py # Health checking + ├── webhooks.py # Webhook notifications + ├── deployment_logger.py # File logging + └── deployment_config_manager.py # Deployment tracking +``` + +## Prerequisites + +- Docker and Docker Compose +- Python 3.9+ +- Cloudflare account with API token +- Traefik reverse proxy running on `proxy` network +- `/usr/share/dict/words` file (install `words` package) + +## Installation + +1. Install Python dependencies: +```bash +pip3 install -r requirements.txt +``` + +2. Set environment variables: +```bash +export CLOUDFLARE_API_TOKEN="your-token" +export CLOUDFLARE_ZONE_ID="your-zone-id" +``` + +3. Ensure Docker proxy network exists: +```bash +docker network create proxy +``` + +## Usage + +### Deploy Gitea + +Basic deployment: +```bash +./deploy.py +``` + +With options: +```bash +# Dry-run mode (preview only) +./deploy.py --dry-run + +# Debug mode +./deploy.py --log-level DEBUG + +# With webhook notifications +./deploy.py --webhook-url https://hooks.slack.com/your-webhook + +# Custom retry count for DNS conflicts +./deploy.py --max-retries 5 +``` + +### List Deployments + +```bash +./destroy.py --list +``` + +### Destroy Deployment + +By subdomain: +```bash +./destroy.py --subdomain my-gitea-site +``` + +By URL: +```bash +./destroy.py --url my-gitea-site.merakit.my +``` + +With options: +```bash +# Skip confirmation +./destroy.py --subdomain my-gitea-site --yes + +# Dry-run mode +./destroy.py --subdomain my-gitea-site --dry-run + +# Keep deployment config file +./destroy.py --subdomain my-gitea-site --keep-config +``` + +## Environment Variables + +### Required + +- `CLOUDFLARE_API_TOKEN`: Cloudflare API token with DNS edit permissions +- `CLOUDFLARE_ZONE_ID`: Cloudflare zone ID for your domain + +### Optional + +- `DEPLOYMENT_WEBHOOK_URL`: Webhook URL for deployment notifications +- `DEPLOYMENT_MAX_RETRIES`: Max retries for DNS conflicts (default: 3) +- `DEPLOYMENT_HEALTHCHECK_TIMEOUT`: Health check timeout in seconds (default: 60) +- `DEPLOYMENT_HEALTHCHECK_INTERVAL`: Health check interval in seconds (default: 10) + +## Configuration + +### Docker Compose Services + +- **postgres**: PostgreSQL 16 database +- **gitea**: Gitea 1.21 Git service + +### Generated Values + +The deployment automatically generates: + +- Random subdomain (e.g., `awesome-robot.merakit.my`) +- Database name with prefix `angali_{random}_{app}_{subdomain}` +- Database user with same pattern +- Secure memorable passwords (3-word format) + +### Customization + +Edit `.env` file to customize: + +- `GITEA_VERSION`: Gitea version (default: 1.21) +- `POSTGRES_VERSION`: PostgreSQL version (default: 16-alpine) +- `DISABLE_REGISTRATION`: Disable user registration (default: false) +- `DOMAIN`: Base domain (default: merakit.my) + +## Deployment Workflow + +1. **Validation**: Check dependencies and configuration +2. **Environment Generation**: Generate random subdomain and credentials +3. **DNS Setup**: Create Cloudflare DNS record +4. **Container Deployment**: Pull images and start services +5. **Health Check**: Verify deployment is accessible +6. **Logging**: Record deployment success/failure + +## Rollback + +If deployment fails at any stage, automatic rollback occurs: + +1. Stop and remove containers +2. Remove DNS records +3. Restore previous `.env` file + +## Troubleshooting + +### DNS Conflicts + +If subdomain is already taken, the script automatically retries with a new random subdomain (up to `max_retries` times). + +### Health Check Failures + +Health checks wait up to 60 seconds by default. Increase timeout if needed: + +```bash +export DEPLOYMENT_HEALTHCHECK_TIMEOUT=120 +./deploy.py +``` + +### Missing Dictionary File + +Install the words package: + +```bash +# Ubuntu/Debian +sudo apt-get install wamerican + +# RHEL/CentOS +sudo yum install words +``` + +## Logs + +- Success logs: `logs/success/success_{url}_{timestamp}.txt` +- Failure logs: `logs/failed/failed_{url}_{timestamp}.txt` + +## Deployment Tracking + +Deployment configurations are saved in `deployments/` directory: + +- Format: `{subdomain}_{timestamp}.json` +- Contains: containers, volumes, networks, DNS records +- Used by `destroy.py` for cleanup + +## Security Notes + +- Passwords are generated using cryptographically secure random generation +- API tokens are never logged or displayed +- SSL verification is enabled by default (use `--no-verify-ssl` only for testing) +- Database credentials are automatically generated per deployment + +## Integration + +### Webhook Notifications + +The script can send webhook notifications for: + +- `deployment_started`: When deployment begins +- `dns_added`: When DNS record is created +- `health_check_passed`: When health check succeeds +- `deployment_success`: When deployment completes +- `deployment_failed`: When deployment fails + +Example webhook payload: +```json +{ + "event_type": "deployment_success", + "timestamp": "2024-01-01T12:00:00Z", + "subdomain": "awesome-robot", + "url": "awesome-robot.merakit.my", + "message": "Deployment successful for awesome-robot.merakit.my", + "metadata": { + "duration": 45.2 + } +} +``` + +## License + +This deployment template is part of the infrastructure management system. diff --git a/gitea/deploy.py b/gitea/deploy.py new file mode 100755 index 0000000..6f6f427 --- /dev/null +++ b/gitea/deploy.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python3 +""" +Production-ready Gitea deployment script + +Combines environment generation and deployment with: +- Configuration validation +- Rollback capability +- Dry-run mode +- Monitoring hooks +""" + +import argparse +import logging +import sys +from pathlib import Path +from typing import NoReturn + +from rich.console import Console +from rich.logging import RichHandler + +from gitea_deployer.config import ConfigurationError, DeploymentConfig +from gitea_deployer.orchestrator import DeploymentError, DeploymentOrchestrator + + +console = Console() + + +def setup_logging(log_level: str) -> None: + """ + Setup rich logging with colored output + + Args: + log_level: Logging level (DEBUG, INFO, WARNING, ERROR) + """ + logging.basicConfig( + level=log_level.upper(), + format="%(message)s", + datefmt="[%X]", + handlers=[RichHandler(console=console, rich_tracebacks=True, show_path=False)] + ) + + # Reduce noise from urllib3/requests + logging.getLogger("urllib3").setLevel(logging.WARNING) + logging.getLogger("requests").setLevel(logging.WARNING) + + +def parse_args() -> argparse.Namespace: + """ + Parse CLI arguments + + Returns: + argparse.Namespace with parsed arguments + """ + parser = argparse.ArgumentParser( + description="Deploy Gitea with automatic environment generation", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Normal deployment + ./deploy.py + + # Dry-run mode (preview only) + ./deploy.py --dry-run + + # With webhook notifications + ./deploy.py --webhook-url https://hooks.slack.com/xxx + + # Debug mode + ./deploy.py --log-level DEBUG + + # Custom retry count + ./deploy.py --max-retries 5 + +Environment Variables: + CLOUDFLARE_API_TOKEN Cloudflare API token (required) + CLOUDFLARE_ZONE_ID Cloudflare zone ID (required) + DEPLOYMENT_WEBHOOK_URL Webhook URL for notifications (optional) + DEPLOYMENT_MAX_RETRIES Max retries for DNS conflicts (default: 3) + +For more information, see the documentation at: + /infra/templates/gitea/README.md + """ + ) + + parser.add_argument( + "--dry-run", + action="store_true", + help="Preview deployment without making changes" + ) + + parser.add_argument( + "--env-file", + type=Path, + default=Path(".env"), + help="Path to .env file (default: .env)" + ) + + parser.add_argument( + "--compose-file", + type=Path, + default=Path("docker-compose.yml"), + help="Path to docker-compose.yml (default: docker-compose.yml)" + ) + + parser.add_argument( + "--max-retries", + type=int, + default=3, + help="Max retries for DNS conflicts (default: 3)" + ) + + parser.add_argument( + "--webhook-url", + type=str, + help="Webhook URL for deployment notifications" + ) + + parser.add_argument( + "--log-level", + choices=["DEBUG", "INFO", "WARNING", "ERROR"], + default="INFO", + help="Logging level (default: INFO)" + ) + + parser.add_argument( + "--no-verify-ssl", + action="store_true", + help="Skip SSL verification for health checks (not recommended for production)" + ) + + return parser.parse_args() + + +def print_banner() -> None: + """Print deployment banner""" + console.print("\n[bold cyan]╔══════════════════════════════════════════════╗[/bold cyan]") + console.print("[bold cyan]║[/bold cyan] [bold white]Gitea Production Deployment[/bold white] [bold cyan]║[/bold cyan]") + console.print("[bold cyan]╚══════════════════════════════════════════════╝[/bold cyan]\n") + + +def main() -> NoReturn: + """ + Main entry point + + Exit codes: + 0: Success + 1: Deployment failure + 130: User interrupt (Ctrl+C) + """ + args = parse_args() + setup_logging(args.log_level) + + logger = logging.getLogger(__name__) + + print_banner() + + try: + # Load configuration + logger.debug("Loading configuration...") + config = DeploymentConfig.from_env_and_args(args) + config.validate() + logger.debug("Configuration loaded successfully") + + if config.dry_run: + console.print("[bold yellow]━━━ DRY-RUN MODE: No changes will be made ━━━[/bold yellow]\n") + + # Create orchestrator and deploy + orchestrator = DeploymentOrchestrator(config) + orchestrator.deploy() + + console.print("\n[bold green]╔══════════════════════════════════════════════╗[/bold green]") + console.print("[bold green]║[/bold green] [bold white]✓ Deployment Successful![/bold white] [bold green]║[/bold green]") + console.print("[bold green]╚══════════════════════════════════════════════╝[/bold green]\n") + + sys.exit(0) + + except ConfigurationError as e: + logger.error(f"Configuration error: {e}") + console.print(f"\n[bold red]✗ Configuration error: {e}[/bold red]\n") + console.print("[yellow]Please check your environment variables and configuration.[/yellow]") + console.print("[yellow]Required: CLOUDFLARE_API_TOKEN, CLOUDFLARE_ZONE_ID[/yellow]\n") + sys.exit(1) + + except DeploymentError as e: + logger.error(f"Deployment failed: {e}") + console.print(f"\n[bold red]✗ Deployment failed: {e}[/bold red]\n") + sys.exit(1) + + except KeyboardInterrupt: + logger.warning("Deployment interrupted by user") + console.print("\n[bold yellow]✗ Deployment interrupted by user[/bold yellow]\n") + sys.exit(130) + + except Exception as e: + logger.exception("Unexpected error") + console.print(f"\n[bold red]✗ Unexpected error: {e}[/bold red]\n") + console.print("[yellow]Please check the logs above for more details.[/yellow]\n") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/gitea/deployments/ascidiia-bridoon_20251217_160155.json b/gitea/deployments/ascidiia-bridoon_20251217_160155.json new file mode 100644 index 0000000..8119db6 --- /dev/null +++ b/gitea/deployments/ascidiia-bridoon_20251217_160155.json @@ -0,0 +1,23 @@ +{ + "subdomain": "ascidiia-bridoon", + "url": "ascidiia-bridoon.merakit.my", + "domain": "merakit.my", + "compose_project_name": "ascidiia-bridoon", + "db_name": "angali_27658dcb_gitea_ascidiia_bridoon", + "db_user": "angali_27658dcb_gitea_ascidiia_bridoon", + "deployment_timestamp": "2025-12-17T16:01:55.543308", + "dns_record_id": "0e5fef38bac853f3e3c65b6bdbc62f2e", + "dns_ip": "64.120.92.151", + "containers": [ + "ascidiia-bridoon_db", + "ascidiia-bridoon_gitea" + ], + "volumes": [ + "ascidiia-bridoon_db_data", + "ascidiia-bridoon_gitea_data" + ], + "networks": [ + "ascidiia-bridoon_internal" + ], + "env_file_path": "/infra/templates/gitea/.env" +} \ No newline at end of file diff --git a/gitea/destroy.py b/gitea/destroy.py new file mode 100755 index 0000000..8ca8917 --- /dev/null +++ b/gitea/destroy.py @@ -0,0 +1,529 @@ +#!/usr/bin/env python3 +""" +Gitea Deployment Destroyer + +Destroys Gitea deployments based on saved deployment configurations +""" + +import argparse +import logging +import subprocess +import sys +from pathlib import Path +from typing import List, NoReturn, Optional + +from rich.console import Console +from rich.logging import RichHandler +from rich.prompt import Confirm +from rich.table import Table + +from gitea_deployer.deployment_config_manager import ( + DeploymentConfigManager, + DeploymentMetadata +) +from gitea_deployer.dns_manager import DNSError, DNSManager + + +console = Console() + + +def setup_logging(log_level: str) -> None: + """ + Setup rich logging with colored output + + Args: + log_level: Logging level (DEBUG, INFO, WARNING, ERROR) + """ + logging.basicConfig( + level=log_level.upper(), + format="%(message)s", + datefmt="[%X]", + handlers=[RichHandler(console=console, rich_tracebacks=True, show_path=False)] + ) + + +def parse_args() -> argparse.Namespace: + """ + Parse CLI arguments + + Returns: + argparse.Namespace with parsed arguments + """ + parser = argparse.ArgumentParser( + description="Destroy Gitea deployments", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # List all deployments + ./destroy.py --list + + # Destroy by subdomain + ./destroy.py --subdomain my-site + + # Destroy by URL + ./destroy.py --url my-site.example.com + + # Destroy by config file + ./destroy.py --config deployments/my-site_20231215_120000.json + + # Destroy without confirmation + ./destroy.py --subdomain my-site --yes + + # Dry-run mode (preview only) + ./destroy.py --subdomain my-site --dry-run + +Environment Variables: + CLOUDFLARE_API_TOKEN Cloudflare API token (required) + CLOUDFLARE_ZONE_ID Cloudflare zone ID (required) + """ + ) + + # Action group - mutually exclusive + action_group = parser.add_mutually_exclusive_group(required=True) + action_group.add_argument( + "--list", + action="store_true", + help="List all deployments" + ) + action_group.add_argument( + "--subdomain", + type=str, + help="Subdomain to destroy" + ) + action_group.add_argument( + "--url", + type=str, + help="Full URL to destroy" + ) + action_group.add_argument( + "--config", + type=Path, + help="Path to deployment config file" + ) + + # Options + parser.add_argument( + "--yes", "-y", + action="store_true", + help="Skip confirmation prompts" + ) + + parser.add_argument( + "--dry-run", + action="store_true", + help="Preview destruction without making changes" + ) + + parser.add_argument( + "--keep-config", + action="store_true", + help="Keep deployment config file after destruction" + ) + + parser.add_argument( + "--log-level", + choices=["DEBUG", "INFO", "WARNING", "ERROR"], + default="INFO", + help="Logging level (default: INFO)" + ) + + return parser.parse_args() + + +def print_banner() -> None: + """Print destruction banner""" + console.print("\n[bold red]╔══════════════════════════════════════════════╗[/bold red]") + console.print("[bold red]║[/bold red] [bold white]Gitea Deployment Destroyer[/bold white] [bold red]║[/bold red]") + console.print("[bold red]╚══════════════════════════════════════════════╝[/bold red]\n") + + +def list_deployments(config_manager: DeploymentConfigManager) -> None: + """ + List all deployments + + Args: + config_manager: DeploymentConfigManager instance + """ + deployments = config_manager.list_deployments() + + if not deployments: + console.print("[yellow]No deployments found[/yellow]") + return + + table = Table(title="Active Deployments") + table.add_column("Subdomain", style="cyan") + table.add_column("URL", style="green") + table.add_column("Deployed", style="yellow") + table.add_column("Config File", style="blue") + + for config_file in deployments: + try: + metadata = config_manager.load_deployment(config_file) + table.add_row( + metadata.subdomain, + metadata.url, + metadata.deployment_timestamp, + config_file.name + ) + except Exception as e: + console.print(f"[red]Error loading {config_file}: {e}[/red]") + + console.print(table) + console.print(f"\n[bold]Total deployments: {len(deployments)}[/bold]\n") + + +def find_config( + args: argparse.Namespace, + config_manager: DeploymentConfigManager +) -> Optional[Path]: + """ + Find deployment config based on arguments + + Args: + args: CLI arguments + config_manager: DeploymentConfigManager instance + + Returns: + Path to config file or None + """ + if args.config: + return args.config if args.config.exists() else None + + if args.subdomain: + return config_manager.find_deployment_by_subdomain(args.subdomain) + + if args.url: + return config_manager.find_deployment_by_url(args.url) + + return None + + +def run_command(cmd: List[str], dry_run: bool = False) -> bool: + """ + Run a shell command + + Args: + cmd: Command and arguments + dry_run: If True, only print command + + Returns: + True if successful, False otherwise + """ + cmd_str = " ".join(cmd) + + if dry_run: + console.print(f"[dim]Would run: {cmd_str}[/dim]") + return True + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=30 + ) + if result.returncode != 0: + logging.warning(f"Command failed: {cmd_str}") + logging.debug(f"Error: {result.stderr}") + return False + return True + except subprocess.TimeoutExpired: + logging.error(f"Command timed out: {cmd_str}") + return False + except Exception as e: + logging.error(f"Failed to run command: {e}") + return False + + +def destroy_containers(metadata: DeploymentMetadata, dry_run: bool = False) -> bool: + """ + Stop and remove containers + + Args: + metadata: Deployment metadata + dry_run: If True, only preview + + Returns: + True if successful + """ + console.print("\n[bold yellow]═══ Destroying Containers ═══[/bold yellow]") + + success = True + + if metadata.containers: + for container in metadata.containers: + console.print(f"Stopping container: [cyan]{container}[/cyan]") + if not run_command(["docker", "stop", container], dry_run): + success = False + + console.print(f"Removing container: [cyan]{container}[/cyan]") + if not run_command(["docker", "rm", "-f", container], dry_run): + success = False + else: + # Try to stop by project name + console.print(f"Stopping docker-compose project: [cyan]{metadata.compose_project_name}[/cyan]") + if not run_command( + ["docker", "compose", "-p", metadata.compose_project_name, "down"], + dry_run + ): + success = False + + return success + + +def destroy_volumes(metadata: DeploymentMetadata, dry_run: bool = False) -> bool: + """ + Remove Docker volumes + + Args: + metadata: Deployment metadata + dry_run: If True, only preview + + Returns: + True if successful + """ + console.print("\n[bold yellow]═══ Destroying Volumes ═══[/bold yellow]") + + success = True + + if metadata.volumes: + for volume in metadata.volumes: + console.print(f"Removing volume: [cyan]{volume}[/cyan]") + if not run_command(["docker", "volume", "rm", "-f", volume], dry_run): + success = False + else: + # Try with project name + volumes = [ + f"{metadata.compose_project_name}_db_data", + f"{metadata.compose_project_name}_gitea_data" + ] + for volume in volumes: + console.print(f"Removing volume: [cyan]{volume}[/cyan]") + run_command(["docker", "volume", "rm", "-f", volume], dry_run) + + return success + + +def destroy_networks(metadata: DeploymentMetadata, dry_run: bool = False) -> bool: + """ + Remove Docker networks (except external ones) + + Args: + metadata: Deployment metadata + dry_run: If True, only preview + + Returns: + True if successful + """ + console.print("\n[bold yellow]═══ Destroying Networks ═══[/bold yellow]") + + success = True + + if metadata.networks: + for network in metadata.networks: + # Skip external networks + if network == "proxy": + console.print(f"Skipping external network: [cyan]{network}[/cyan]") + continue + + console.print(f"Removing network: [cyan]{network}[/cyan]") + if not run_command(["docker", "network", "rm", network], dry_run): + # Networks might not exist or be in use, don't fail + pass + + return success + + +def destroy_dns( + metadata: DeploymentMetadata, + dns_manager: DNSManager, + dry_run: bool = False +) -> bool: + """ + Remove DNS record + + Args: + metadata: Deployment metadata + dns_manager: DNSManager instance + dry_run: If True, only preview + + Returns: + True if successful + """ + console.print("\n[bold yellow]═══ Destroying DNS Record ═══[/bold yellow]") + + if not metadata.url: + console.print("[yellow]No URL found in metadata, skipping DNS cleanup[/yellow]") + return True + + console.print(f"Looking up DNS record: [cyan]{metadata.url}[/cyan]") + + if dry_run: + console.print("[dim]Would remove DNS record[/dim]") + return True + + try: + # Look up and remove by hostname to get the real record ID from Cloudflare + # This ensures we don't rely on potentially stale/fake IDs from the config + dns_manager.remove_record(metadata.url, dry_run=False) + console.print("[green]✓ DNS record removed[/green]") + return True + except DNSError as e: + console.print(f"[red]✗ Failed to remove DNS record: {e}[/red]") + return False + + +def destroy_deployment( + metadata: DeploymentMetadata, + config_path: Path, + args: argparse.Namespace, + dns_manager: DNSManager +) -> bool: + """ + Destroy a deployment + + Args: + metadata: Deployment metadata + config_path: Path to config file + args: CLI arguments + dns_manager: DNSManager instance + + Returns: + True if successful + """ + # Show deployment info + console.print("\n[bold]Deployment Information:[/bold]") + console.print(f" Subdomain: [cyan]{metadata.subdomain}[/cyan]") + console.print(f" URL: [cyan]{metadata.url}[/cyan]") + console.print(f" Project: [cyan]{metadata.compose_project_name}[/cyan]") + console.print(f" Deployed: [cyan]{metadata.deployment_timestamp}[/cyan]") + console.print(f" Containers: [cyan]{len(metadata.containers or [])}[/cyan]") + console.print(f" DNS Record ID: [cyan]{metadata.dns_record_id or 'N/A'}[/cyan]") + + if args.dry_run: + console.print("\n[bold yellow]━━━ DRY-RUN MODE: No changes will be made ━━━[/bold yellow]") + + # Confirm destruction + if not args.yes and not args.dry_run: + console.print() + if not Confirm.ask( + f"[bold red]Are you sure you want to destroy {metadata.url}?[/bold red]", + default=False + ): + console.print("\n[yellow]Destruction cancelled[/yellow]\n") + return False + + # Execute destruction + success = True + + # 1. Destroy containers + if not destroy_containers(metadata, args.dry_run): + success = False + + # 2. Destroy volumes + if not destroy_volumes(metadata, args.dry_run): + success = False + + # 3. Destroy networks + if not destroy_networks(metadata, args.dry_run): + success = False + + # 4. Destroy DNS + if not destroy_dns(metadata, dns_manager, args.dry_run): + success = False + + # 5. Delete config file + if not args.keep_config and not args.dry_run: + console.print("\n[bold yellow]═══ Deleting Config File ═══[/bold yellow]") + console.print(f"Deleting: [cyan]{config_path}[/cyan]") + try: + config_path.unlink() + console.print("[green]✓ Config file deleted[/green]") + except Exception as e: + console.print(f"[red]✗ Failed to delete config: {e}[/red]") + success = False + + return success + + +def main() -> NoReturn: + """ + Main entry point + + Exit codes: + 0: Success + 1: Failure + 2: Not found + """ + args = parse_args() + setup_logging(args.log_level) + + print_banner() + + config_manager = DeploymentConfigManager() + + # Handle list command + if args.list: + list_deployments(config_manager) + sys.exit(0) + + # Find deployment config + config_path = find_config(args, config_manager) + + if not config_path: + console.print("[red]✗ Deployment not found[/red]") + console.print("\nUse --list to see all deployments\n") + sys.exit(2) + + # Load deployment metadata + try: + metadata = config_manager.load_deployment(config_path) + except Exception as e: + console.print(f"[red]✗ Failed to load deployment config: {e}[/red]\n") + sys.exit(1) + + # Initialize DNS manager + import os + cloudflare_token = os.getenv("CLOUDFLARE_API_TOKEN") + cloudflare_zone = os.getenv("CLOUDFLARE_ZONE_ID") + + if not cloudflare_token or not cloudflare_zone: + console.print("[yellow]⚠ Cloudflare credentials not found[/yellow]") + console.print("[yellow] DNS record will not be removed[/yellow]") + console.print("[yellow] Set CLOUDFLARE_API_TOKEN and CLOUDFLARE_ZONE_ID to enable DNS cleanup[/yellow]\n") + dns_manager = None + else: + dns_manager = DNSManager(cloudflare_token, cloudflare_zone) + + # Destroy deployment + try: + success = destroy_deployment(metadata, config_path, args, dns_manager) + + if success or args.dry_run: + console.print("\n[bold green]╔══════════════════════════════════════════════╗[/bold green]") + if args.dry_run: + console.print("[bold green]║[/bold green] [bold white]✓ Dry-Run Complete![/bold white] [bold green]║[/bold green]") + else: + console.print("[bold green]║[/bold green] [bold white]✓ Destruction Successful![/bold white] [bold green]║[/bold green]") + console.print("[bold green]╚══════════════════════════════════════════════╝[/bold green]\n") + sys.exit(0) + else: + console.print("\n[bold yellow]╔══════════════════════════════════════════════╗[/bold yellow]") + console.print("[bold yellow]║[/bold yellow] [bold white]⚠ Destruction Partially Failed[/bold white] [bold yellow]║[/bold yellow]") + console.print("[bold yellow]╚══════════════════════════════════════════════╝[/bold yellow]\n") + console.print("[yellow]Some resources may not have been cleaned up.[/yellow]") + console.print("[yellow]Check the logs above for details.[/yellow]\n") + sys.exit(1) + + except KeyboardInterrupt: + console.print("\n[bold yellow]✗ Destruction interrupted by user[/bold yellow]\n") + sys.exit(130) + + except Exception as e: + console.print(f"\n[bold red]✗ Unexpected error: {e}[/bold red]\n") + logging.exception("Unexpected error") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/gitea/docker-compose.yml b/gitea/docker-compose.yml new file mode 100644 index 0000000..73cac52 --- /dev/null +++ b/gitea/docker-compose.yml @@ -0,0 +1,57 @@ +services: + postgres: + image: postgres:${POSTGRES_VERSION} + container_name: ${SUBDOMAIN}_db + restart: unless-stopped + environment: + POSTGRES_DB: ${DB_NAME} + POSTGRES_USER: ${DB_USER} + POSTGRES_PASSWORD: ${DB_PASSWORD} + volumes: + - db_data:/var/lib/postgresql/data + networks: + - internal + + gitea: + image: gitea/gitea:${GITEA_VERSION} + container_name: ${SUBDOMAIN}_gitea + restart: unless-stopped + depends_on: + - postgres + environment: + USER_UID: 1000 + USER_GID: 1000 + GITEA__database__DB_TYPE: postgres + GITEA__database__HOST: postgres:5432 + GITEA__database__NAME: ${DB_NAME} + GITEA__database__USER: ${DB_USER} + GITEA__database__PASSWD: ${DB_PASSWORD} + GITEA__server__DOMAIN: ${URL} + GITEA__server__SSH_DOMAIN: ${URL} + GITEA__server__ROOT_URL: https://${URL}/ + GITEA__security__INSTALL_LOCK: true + GITEA__service__DISABLE_REGISTRATION: ${DISABLE_REGISTRATION} + volumes: + - gitea_data:/data + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + labels: + - "traefik.enable=true" + - "traefik.http.routers.${SUBDOMAIN}.rule=Host(`${URL}`)" + - "traefik.http.routers.${SUBDOMAIN}.entrypoints=https" + - "traefik.http.routers.${SUBDOMAIN}.tls=true" + - "traefik.http.routers.${SUBDOMAIN}.tls.certresolver=letsencrypt" + - "traefik.http.services.${SUBDOMAIN}.loadbalancer.server.port=3000" + networks: + - proxy + - internal + +volumes: + db_data: + gitea_data: + +networks: + proxy: + external: true + internal: + internal: true diff --git a/gitea/gitea_deployer/__init__.py b/gitea/gitea_deployer/__init__.py new file mode 100644 index 0000000..b27051e --- /dev/null +++ b/gitea/gitea_deployer/__init__.py @@ -0,0 +1,8 @@ +""" +Gitea Deployment Automation + +Production-ready deployment system for Gitea with automated DNS, +environment generation, and health checking. +""" + +__version__ = "1.0.0" diff --git a/gitea/gitea_deployer/__pycache__/__init__.cpython-39.pyc b/gitea/gitea_deployer/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3175ff2595cd94a11e635b9a4d5552141b9f008c GIT binary patch literal 318 zcmYjN!AiqG5KU60vf6*xYoR9XxgshBZ$;>>mto0HlcmXSnB7*=Z}3O-lXCShJUK}Y zIxuf$VCM1OsM$1vl>c2hT`hc}OIFcjBy9uRK<27oz+3pqs?b(pC195>C zX&_!mWwk6(ReX_`m>-QTQgOLnrOR}=ohRKCK)W2Ey9GESc~$`M8PS9-4Dhqcj6IOb nSVL2(fIcgFCnLhB6Db2Q)&zjGX}g<;n(b8h*nh+)3|G@%eh^=) literal 0 HcmV?d00001 diff --git a/gitea/gitea_deployer/__pycache__/config.cpython-39.pyc b/gitea/gitea_deployer/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c81782bc3f534d954303624ae9a236dbf208458b GIT binary patch literal 4496 zcmbVPOLN=E5yqP!NJ-Sg@^ja)*A<&CmE_$do0MX^R+ea+YngJ<@@|3)3gQeYSRlZi z0c8tCUt(8I*;FdmjjODjl52iO4*3DN=Hy$>PGyzu0ZBoRWUE3JdV0EhFg=5wuY0_S z2}^?C6L!!4?O93s2X^v54LV=I!vi2hawH-XC6FCiw2GrZs|IRUbF{AR=rZcHz~~m7 zg5dSQ>=vD(EK&KLMN9j#Q+^;3gB0FKq(CQLtIj0VKw^?2NQ$qO*RnH3l&4bFN`7Ol zMq%6U^qCj?QD}D~(hsQJj+jkoFNpTKG>mOd7+_F}o%e=r(D6v~dM!xXlZF?6~ zBzsevMO~YQyFQCT@seGS`QBDQx$T9-UR|%-9_#c`#jW?Sa;Z^O(jtuZT7k!Ts<(X_ zkgA+&PrP^=T63d^K6*h5t@A$_I$yxUD?q3OJ160;IjUHtn$8?qWR0?h+aKV|uKP{j3x=A4>yoJDx_^;a30x55!_tx; zwwbpS({3;DV#=2~eoVcLy4m(pwv=t-VsDQbFceq7F(OP^lZE8OYRBlmZ^DO0^14b zYt96n1b!0uDQAjSfUf{Q?Mw@P2KZTLM(}gOepc9@0lw_ajo&pd?9K?gv%pU|^MXGI z{CVfBnEwL!c8*Bo;vZ#liG1`%124~$vgm(IF2B*73)B=nn_Pk3MN)=HzM59n#_^hc z?d0BL&kyrqd^BK#$RqImPsC|1I(ceB1iNiNpy@1p;2p}`R@Ci9oC@K6j7~;-ES+lw zQJ=H}k5Sj_`EDHT&@i3JNs}m~u1`LkS?Y7P#J9n6iTJH}=|#i{hrkD3y!3Uu9-Mq}nv{{A*fX1A#q#M`ZH z+S+krze}Tj{2pN_oy%+dFs2M5J+*cz^V@qa=RsO}LASP}XvgidAe|a@hA%l34ChO= zdgJ~^S`4C&8_-=Ez@8YO>$9+>z=We;5ttcxR^j1)0}@N`0O9s!S&Ef+a8mc>SBl_N z;M9S_Z^jydum5X#pz_aw*O8C)fsEMpTJ3!)tWB`iVngu7f&9F%5BT;G7bC$nfbQz$`oGD4$SYQ zx8SiX4NQo>Iqw=AMav%k?t85`VtaO%#@i9G)O4C z8yk<@XU+8^^2aN`bepx!X1(^*`8a3)u(q8=8#My!E?vvkwE<+v2#c+Ai0F(Vq2(fs z1F*aJo}Yo3H-Si#mTJkCqREP^9BA6GHOjL2y<*IO)H={qK{W-^u_ha6DgMG1xRFHr zr!iZRldH!M*5ZljTUf1(eu$6-cmyg|Ky!$S1L-;7;=aVM0rn}_L(CC)C_UGO<}PSd z(C9ghAv6wRCuofCB%;4nhocI_*pXNaI>5i@h1@7pXnqG86Ep(dhGU9C^JmZ$K~v0) z!M?epv%iDR%5YKDMJn&g1IPpL zW`Xu%2c{8Q$qZoYZ1RUOdVH0Ymm?q@6|ERg0R|rBS#jBl7^AHi`~Mf79W4|N%fYcD z$+@8`Cy*?etA)EZRt$C+#ZXB1L$Yiqm)At~0H`Obhm!^u*C796bqjgp#ORepyN>uA z1om}NE?l<(_IJDvwV_a_A@4J4pITSg^rg>ZzW5%4F1ehm6YOrE$MzPr@7}RLU9dY* z3~pY}7YDbO?%^mT&?PU>;d}({7(yC zuQVI=#)DL^HJcmF)Ld=WH|wh_>&a|AE7$>_hGht>W@gcTJX|CcUv|_6nX+IfpB#lW zQR!!w7T#~QAZXhCpi!M**I`a}0}0|*YGivUz%7TcM`1|7R;u@VJwQh7dE|$1tI;?O z+D=7GY$$Dzs`dO{PJj(-V6Dj-5t6bq0But>wUe4rWm{z#5(8SP~4zyKRlu85v_ zpdDy>a{dG|jb=LnNEjzrstG{)5ikS~uLK7bAqWns033iK0WT`F`r9HLQzQB8Db$~k z*s&g_9IkcQmWtA(>+u~Z4?xQO*x#jiQS4KmTsZF3jZwXQC$SdnVUc?$nOv}Qp?fE} zxnSo?>v6$-=O+FZ>`~1M1?(O687!izWqI*1^Vuks*)lXIK7$LjbdS02UqQ(+i%O6L zU=tPXdrj5;t?AmojDu4)bTqDdi6N#l6A5l6!^&G^x^&8gJOs!fHiyFq6;+s}Q-lNM}_jyM=>(fdnrIxiyOmE2c^|0R-y_eiI0! zMb(r|bq1b_T9)w{{?!U{YFSa_%Du^-i*ZnS;3Crk76lLntctzT_&QHH!(1npn-+R- zqYwP8Y)g!IuZJUWt!WX8hILU_WP6BbFfG@$DE0$W(75P7#UC7!1 literal 0 HcmV?d00001 diff --git a/gitea/gitea_deployer/__pycache__/deployment_config_manager.cpython-39.pyc b/gitea/gitea_deployer/__pycache__/deployment_config_manager.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbdd7e46f44826f780ff17ec00a3571b6ef79902 GIT binary patch literal 5397 zcmcIo&2QYs73Yv#F2&WyT9IYvqbZR_jW-Bnr!CM{Fj6N`m5pi#a>ar8h^t`s$YqT4^+4d|mgn2wC=AW?kf~dV?@H%%N8QeWGk662j z`V99__m0f=C2npRtw!;{TNnK}J?M!f_uo#FZnQIyVIHN4eu4zsMq zl#7bUqn<$7{vgcnw=89^(`WYWD9e?5w@-@;;|>ioeOaj7!kzsCLKx7e(KbJZ_S%-P zMU9&`4gIu6PkZ#Njh@cvSsy*!o6uL=5p`idYoNcU>uyzV==u!m9(|#|nbmEL6K!+4 zZT3Xlyl$JT##*2{?9o(<6LZ`Vc}S`${BcPuHS{y6G!|33S2NjQo2R`nN|ZH_v2sS0 zrB2%Grv5?oNwq8Ow=#{`FQIK7<6hPyE%Pg=FYwDi+f{XCg0|B4 zM?PP?Hum|6cKj0#?)PDRIHX8AqTQH$MLtaU0FMMbk{uHpA74}^jl7LJdlh2HhDKlv zja_3`v)^OJjNCl7a{EhzTYoj5(EA#v_s?TuJOIa1^UnVhf)xe$4kw#erw|J?ujZ zwz7xT^Twf_*M{tf9oBMZXmk6>{K7wU^7^hjtPLGr#-z$A@h5P5&V;rDhF&<@m-)8FZ?qTREQdMjkCEz*W zcQU|-T1BVeLQzPg1B>Bfm=V8K%Pi(rK3VCl@L=WLl{+h2iUmb&bvH{B#U6e`s0VDE zDB*~)o5i!VDL%#CqaoM)Rzs3|C@V=HDi=K1B`%hfPL#l)p_HBWMWSpyyv+x_zFg7G zAiK0xmp`VjuTjfP#pKGR-!!56MjmQ{UU$Z|kz|0O;87O^3DgJFF zG-quxMp{>b+~NVC0f_$-7iS|oJ1ADOZr=&@261sKEBCxiaqbL zc_8qbc`^P@^=+{davhQFQ-90^NFLF+#+o{Y!6x#diRqgmLx;3gsXlfe5TUaiHo_>?4s7EgQH%H_da(-igy~ z<&+9&nHnYS*(v|ZPA{){dd@#!g&DDlQE#&1+V@>5Cew6#hL|jjRH0-Ql`%?)lsKdK z1*{-aW-rpo3YY<6xDCwI9@9z7DgIemX#=6a&+iM^6(&UfL1(XtzAD$n2`cof%zlES zvl&hG(VD+CJ_+cMB|_P`PsfHy?{mrL9J=x`{brg7Kk80gz1l-j4@#(QeqXG~ThN+( zlf*iSlWs0I&~`xW??4nA7a6Pz-A!RD*ggoV&7Z{?{QX@F2cjX?D1+#4P*PSe{Iq42 z^g{-3&-cQ-b00aHKDnI1*9+e^?=A+&C~6trGP9X&6+io5mJjSTFvi)SNVuannxP({ zP&)9s@%#Nlvv>(LRtCVKwTr;GXUf-x<~{g4gU{bH9=JB}nA^Lxu6ZzI$laJx+yKWp z$8`i%%I!>@gV~gW-7`Kx7(KKzyj1^vF?O@UT=O?uuDnCK{}l<+welvRQOB+J!uwly zH`j3(=4HSv)2{l%P$qy)t5$+ZTTlnIYcvWIr*N*8&p{Hj@JD=|(Mi+rW|_%6{N{16 zyvklSi;IpKtLG23c_O1C<|iK1;GyKO%&6o`cp}x;X*iBtmnBU zIf?kGwC1dNrzrdUl!$T=x{Bw^InK$!$U3&XiIpp7ki=24r!_Bsik4AqkXqp<2}NDy zdezfpQ7(th;#ql&qSJhF<05ChXniEAMc=cjmp{ z`@J_~e|FYU@Z1s?yEjiO%Act)`RFKoh&NhA!jzW6RHg-LOU17qXg#f^sZ?hKde3MX zGG7TQJ+o!13Rib6ZttnB>Q#lA%zCUa>l3ZzFl|F=*rS$nnePPsVULHg8}v6fyWytW z>$5??9p{=KvVceK$D$ikJv-1fa^t=m4ccuUMavz(8*t{buHfysFSTd4D-G1l?ZR$1oSSWmmH~J$Iu3(o+OJizF=LXY0Q9CMr^=H^E zt30l>Dzqn*Y*u}2v`lVsi#g9UHp6Nt+f?RN)XhFK*c>~8I)|N>wKKfNtEUvqHvg=` zjLtc-r)xcPHEgp&QBJK~c|qxDE!aN(3) zFNn>kS(a#gvCgJ@(T^}e4uhfF=?mHDo@2b@W0y@?TyB)ZQ!B?%YGxoBy|FY+Y- zRrQJwr8O@fM&e0lylkv2Sm;DJPOF9aHdlAi}na(ZxgRui%yH{~R2eMS?RMOFiw|Aa4;N#XKkP>F zZAix2@daoiN^M!?^|wc-SELBXhCj@@M@@IFQ4v+FPSB>)N^cut7IQMs_`ShqLlg5T zwW@{qhf>;Ea*PHZL2?#vM9NUKd38=5otrMQshZRWA`r(h^ySub4y}p0D;af9!>`V? zJ&i<&JQzmSne+>5u|~TkYBzi`7ud?3^;HS-0bt(^U9uMf+J7?^EUPKU5r5|MjQg)k zjUTeF;&V)ni~J6kA1%L@+pxQDZ^iLW)LdRpm61hxWcStORQ2}NCIDp#aJb**x%9;; z2;h+1(^;=rm4~I0h$5mJW#+gxIy+tdlLHz?YDTdi4- zS7P~HWI#_!0+c>75`7mKMy^80Wm;m$G$S*jSHnLEvnVxkm4{puXf6d)HeczBo)7jZ zSOYur$S*C$4`UZ3mGj4B^;jn_5?;2!rn{KWw@7m=W|J(IA0@R&ZI7i!3OvwWN~h^g z^eSt|i>fWC>Ef+a^}%&{cgQiXI}kU{YHI5yam~g4B9L-@1l^}7}1el*_4JT zzKz-p4`g($*+Qp`P|%W2+{2i|XgTI>=92dcss9M$Gozg&rW-AeH%9BtD0%6{abOI> zV_J;{_gKH@cSFPu11J@df}llZ03z}$9vhhwKtDqvF_=0~#C)vms(T7xr0i+{kD3^J zI-mqHOe$xTN8|?$)M|;27UPkXn7ft4B6>t0bJqf4ngwAd8Qc~p;f75VSn3Rd370um z*o&z7GAxw)Zn_(V2wX4=EA>iMSpGSKbUkNr2O)WYl6bK0crF3v)^DdGD?*FD42|S*mN7VAXk>^_@H(%4AuU(VZdu^qX{#eKr>C956@vNXhms-3(*%ZK`;5o z61@zC7tpJUHxo4kf(g4oGSDlL4EzGgCXj3$Lb9D$Kr$l!63Iqll}NTI>ho_LMsh}6 zUmwNpxt>3C?{OC)Qaq#pBA=1_A{dT?J%aB`LH@r?BBk1NS4g-n+)Lzdj?qV7DUtpE z!Fy^A!fgOK6vRj+@UP%lgb;Ho@Wmo}X26#rQVDz#9mS=9lxR>Nikbv@$42!q;FqaM z@-;=h#MI)uRP`Pu=P98OHcKd|%4{J2!Zs?R6G$Y^^%?@KuZZ+~g7zd8Q=R}VeK{E- z1L-1+f;XoFi1gw{q9I>NbY?y^nDto|aJ2mslW@!&b-}Gta=FQlN$GWB*72&RQfao` z7-whb&nH&)heyBl{dU0T`L za93|$9k){s12E_hugn%8%+DO>b`h60i`DKQt=~Og@13tRw|=czztOC3j6QHSWH4KH zLg##=4q~~COmONbVN*0|X51E#BUa+#NqcM6=|4 zu8Qwq*YK7vbK-}%OVCl|S9l|0GG#$C?0*@iwr^P4KMY&@`^yf8FUAuGc#9&HlD96R zkf>q~+$GP;x$D)$=|n?be`GR4vRXy5nwijf1i>RxJJFF=C8s&QJB8D7);wV8WytiE zIPIW|{91nX;Gl2nN52uf3EXxnqqA$v7u<6s8=&eQhqkg)b$+Zs?B5va=<|AU;gt*5 zSMRK^yuMs0zE8?|ZwmX-)>bZ0AwTg03>o|$iSw#a*vk6)&Gn`;_|vPnxbg8g51W_V z4USk^2DgmIFb$CEz5WB95p8?R4>!THTmAzMi{YUw`hm0_zrDTWh!3#IhIN=$$D!g3 z!bIG9Mz{$YwvLwZcu{qjcE5+|zQCJ@ipSKNc7S?o+TXr}dUN8Vh_<2PUURK+RLo+= z)bYIRKO1l{Tl2h+27XXp(|WAm_B^4ZpP<`Qc^{bSbOqs3kq(bCd`>H!pzp_mE|$a+ zCGS&0-a^pYC-x(QVe%e=qE+%c5i!248rq2?bLRY!<0qAWYMQ3fpQ&p5wZ%i_cd|C5 z`?dNwFXwB_2)d3JGnCNs=*l$S3cB~?E>bfd?!YHz3$0NnDFhQ@!G*M(V%l0$`zM)@ TehQzWL^RMaZM8;!x}*OG$kK1J literal 0 HcmV?d00001 diff --git a/gitea/gitea_deployer/__pycache__/dns_manager.cpython-39.pyc b/gitea/gitea_deployer/__pycache__/dns_manager.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db2cdb23a05f8a7de9a174b0a8034fbf1bc93f11 GIT binary patch literal 6756 zcmb_hU2ojR73J5GT<%JiW!Z6@G-jGK%{I2=v~CcnM(Phmse@KERnx>IwLeT z8{NjH*)`?87Fu14t2;Jt?y6nqErm6h`H{j*-nyfA+gwMD#cb5rceFcdcLMiK=HTAB zqje{lc2()LvR^q1OIO`ZKl0c3CXZ5gGiHO3yElV$-MtjX1GXCag1Z;4Ty}#fR#DO*W*Yw_xvzST*3RH-wUEOw>NtFw9nXVvhMm3tJey?8Q^iQSC_Wwg$>i* z6e1Sc$z?xCICF2V^T;KG<32A9N!(RG2$woWVS8TWZ*tEo9M3Cf0oQHM`(WUQRg2|$ zEbe*Ur;4COmBqueb3wE!{JE5G!t|6Ub8A7${j&5J<|E$XVva?LSI)DT?Qe+&oO6JMQ4B4w>^7KoPh&bW zJ~Fxu*l)51?oDRl-eSj4gF0@r<~@Tsto2c&+hkA4er+~!PiK?t5b7LuysDc*-C;J3 zx)wLsk$WneVMp<* zRu)GbliZ_K9UgQ|K5z-YDsI+I<~&usBygR_<%eAbU}xr^Zer zHPOSoukF%v7PBy7JNvccR>$QiNCQ6%GA>uVazSU*enG4y^P@X@Xf)vYx?#is(u_TQ zD~tB@q&oNV!l+;N%6gjilli$hym@v^3$wj=bFLQ#@W;6uXXog#WG-t9H7Z_Qe><3(HQ*j>jP+10hZ zYTQNBTk#X_4MYe6Sm!?DA}OpeURwi&70orC{uI|-NY5`Uk6O6s=!KE+a8=A;y~I%} zMtmU1)rn04OuXlP8+XYh3dJ^6Q=QaoRr{xHWJkuMn;rJ;Xo7kcrr_y7L_Lijs}6!1 zeM7p*UvZHu8_Etjh;)fvRaLTkOv}|k-PTfV0}iuc;Md3{Q0^Mrdam!7yNasdZ0#!B zMsDor6f}0VWL;icYs#*+-AHX_=M8eSJL-q3Y-!3CGi~PP9hEuPjcqHp#2>0Lw8mqI zrmMR4xGuM@DOcg+7?rd27ggD|G1h1Cm5JOgzmnS@YS)za6gIhS-?k&CJfBg{pa$-I z$j7Lakw0Bp=e_H0uv%jdW zB_-_ybz9<48l}Uq?I1K*T{J{dxrR~vYf>{aS;Q`DL~^(i-@h5%QSX-a?NU; z<%033$LHOpPD`A?7m5bsD}%Md7W{(&$8r`r@Fyr@h^MH1O3>7KU|cUjq!}cIK};Yc zXQ78+*s106(Km1PxRec@rg(;)CtL}_BE3E7$5FxygT#jT4pmwe-(W_hBNZy&kLV-P z1h4GqmEC@TXOmMXl!<9gLohSspQdV$Ow-nEt*y2Jd6WotGupIfYEx?VTn+jMymYZ+ zl*2tFN5;gKCkB!D7Vdf!D24~|%!mpxP0WLu3xNBS; zvWH11t@q15VFm(_M$!@Cv$1WYdc`BkhMCK73NC5n4dM`RNn^(bmzcS^(`0RCgG)Y< z*AtR4tQ1(ztr3?{uR{T`jGf!Kp4wHnn|U+&Gx{BtY*Vt@bl#+A%8}D!xI~^Mm&|}mj`|ntTylXiNh233CmzG}@woV( zkp)H2;Sux!=qoPdFPsii6H;OHpDsw*{o<*xygUl(y0uK2y{V>6{PeiAlvf3dLq-s$Ccm)>`;$>G7u8Y2V# zhI{%AH*;XzX*lp{h)eYXe0u_ZJ9cnE;ypWpbs%CJ6U4mxfLLd-(P zFZ47GyH;Psw?Nk7B-%Q?bPx4vl`NK9$!GGr;JC4_xd!IMc&r8GeOL0C&L&De6Q85ElQ+SklXo>$ z`QYOdxW$q3WkVpYY+*O}IMyXb zNUZFK_^b3Xh%(x=e>K!6_I%`1Jd?bJLTMT5v}zDyu?hJKR70mstEO(N4thYbsM+xv zp7*>Mh?Z1-Frq0xFj6NEar0jx`nSLjZG3@i;86#%Nrlj!ra?QFyE>tI0~`Juiby3g zfoG{@n)h|$gbJGmsS~IaW2Xs~LdbV0ApqomCa>ES@>|%8jFDezkjXmIqt}35eWShg zH(=M2nxh5mR>~%~D$Sv7TemG}4tcI&_wQeiU4)^RH)EvP1P=p$1^G9U3d3OC`8r6C z;`^R0c+JCW8}a`|YCI{RJKqRsBCpE}1nXy(iP-kSdf56@I# zeoRA|kg>jG1jz$L3x&z#_vH4a+bTCLUN1(L;XImaG_f!8+hyb*;j7S*q1ev`2ZVWX zfxe1gaux+4-c)VGcZqs+3YgaD_Yc!@(Bh!Q#4csHPdT=Kcd`fXo@6T!q}+*M%0~z7 zN=8`!C2k}lk(-dw`E`oQtjt1a+t``x7!s4}MuXz=HWFDflYvadQ1j==22>dXYbyid zkOG$(12fkvm9JNbv$plyIuIw%5^-e$>`x=ay~R_vKUhKXXpa^rwa0IzTp+dVCS2SI zdfz0iuRQh^KXD=D;IU(fexsnqh5(vV1pEd*Oxfq!7XMjf7^sCX@YtBM6T>`O(7$nZ z7Rj(^-OqwP<XA&ooR42ko9|}nxi90%S}Y?`3gwL%DjH;B zUxq0uPNCtYE|vOI>e2%|v6#T#-79VIf_@YJl1$^BiqnQ-oWx$)L3EM(;d+GG^H4`! zto}_LlzA4EKNru+fv&`HD9Gnao&5$~U7~^jA>O8fW~f$>hb`~!lbHg$OF zjPhtCfWK_NJ@e|!ob1;*B=g&F)epMaRyeS!&S{WywmmE zh=)t9cEsJ3I*fO{V4I62bi`6R=~9Zt?zSj^#X*vScDtk+s;t>3td6f2sM)h1xv+0w z*Bd%Fc!6mvihSzX(^yeh0sChrGZzCta@#%^SN!!LK1)<|vYl89dXeqgqUZbA2D_Oz zO{&A9(`Jc5q9m6rriPBPqnpZ)N z;tr{^r5S1oKUup^ZW`zUHT^9}E_Wui!L*MxrZeLMqfuZdxXFtr6kfuwylFA>V}li0 z>4QR}!phA0SY;JfMawFi=RiHHq3;1Ui|;e+B>I@Vc0!T02iY8I53p0ic8Azu)XcIc zhBZgnQPdpFdJ*?y>^Qj3u`*WubTX5I?{%p&B7QP`wWn!EszY^+)?Dh2v)=Z2QsVxW zEH!EG^NY%al#95-Ga!AnuQ*Cy163n!SC5R3fQnDFdsGixU^aUE8_UkFD}Fm_LltA5 zYe9xZYuMx^(F&J`Z`5yeUduKqr5y!!isgmu)R2wc;?5p@s@V9_e=* zE8sX!;w_}kO374BH9k5vkHx&%4cG*|*m!@|Jm8#k5f z*k^n%ZvRSE1_iWUVTFESv($YDwA_6svigM`$!lQB5h{I?xJK2!xufG8Ld*sAYe&QR z$WCK>GuAuXHamY_q*Iyb zmI6t7on`yVdcG_+xtLJ;J_)54ZiwEV%yAhDN+x*OPE;GH69@4dFG^{qOX71;*@&;l5Q&wwb0k82t{ri@e~n4oy4l3s6s3ep}4B9k3emaYA4*m5jhmXwK1RbCR&EXNhwx!L$&ZTzA%a! zp0ip_ji0(Cg?~Nu?Tb@+T!ud{GOVsyI9juIo8ybO|Od6 zlR_FZGFsn-7527mxLicjZe-sHdLFaa2#5aHw2fVwku;3MNycY>dgGz zIm)@+@c#jzX-=6=f_4C}Dmc`Z!4qbOn@}D?Mc{dBZu8dO zbRNe=!>vQI2^^lC#9`Vh16(e5d|WQe2o&=#)4-T$y>6^6u=v{(Y!H}W3KNK2w(nO) z1gw`*llNo7-xQOV97b3SLzXRw1C}_gCA!Cb#1qoz4xyV^!*=1MQLM?iIJq3ZAS6>D zqRfmVnQWq^mo!tYeQ6l+sc}S)GZ_Q*H);L;FaqD8`%R*k;?+llhGB#$O~CHy{jl4c zcfKd91iPTyeO%ZXEV$#xPwxjEcuas!JWufY36Tl&Hbc`L!s2&m@l^)y<~=YR{WiZp^46R z`5LB6${r3X7`_o;Aa}%^w2zaq_}jy9Ce4o;k0%foAH{AgzNeG`-4c2aP{?2=WA;bI z_jz5(D5aDLd*wZz(v->z9;we>p%ZhE@6nR%J7-!oZAIWq+L5 z2gQCd?Kvp1TEDcVh(GpA;8J4;?rHC6gYv-Ymv_r-7H!LYW7mqnr(f<{?BIt|A#N(S z-@=Kay8U-mpih*z(8RagT|ixAGpH{Ac~cT z-qK=drdE4kn#eihC`U&+3%Tc6%|a4T<`4c`EmRTGq~%%jxEh~%^gh+K8#n6e<-NZ{ z(=z0O0v~ab2mvikmKbY6;EA(%Pi85o9?dwVC)poS+hrn@Hy0O(yiA15mXviwe(AGi zl6?+eDQpXE5d337n>_&A@P(EMXMA3**N)B2A3ZxaKQ}L^b$wPQ^2ITf(-~EU|Bd0H zAO%T^U1Wc~_L`I>F{5pYK~iy!Q#YB9HPdu#Qhw!blXvMq0VxvX{Ar|V6HZDi=|5b~ Y6I4UW-Wn$TW1vbu-O^_ZnmSwjAEImle*gdg literal 0 HcmV?d00001 diff --git a/gitea/gitea_deployer/__pycache__/env_generator.cpython-39.pyc b/gitea/gitea_deployer/__pycache__/env_generator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5af4e72eae756999c45d64e9773f1169b0fe3ac4 GIT binary patch literal 11153 zcmdT~%X1q?dY>0w0}zBLntH9ZV_I4ZUXhY)m#t;VyL$T(31UdgDq+`)3^79z6fgtz z3@C97=3qIMN5!>1$I=#fNyWLQQmM)@$DDGW%55tLRqY>;)g}3T-7|v$2uhnOUjo$h zbocc0dw*ZIIW?tA_338}|1=aXZ!2 z>KcD%>KXpd*0Xr4?OZ2c&vy#-f=vCjw%*a}I?rd?#ZIYS;`wZQs#C6)JJa>)PNiOv zC0iaGvS;?>`eD29k!0np{MV9|f1*5*>$4~qEFES2iBf+JrJ_|rsf5xTN>f%Dr7}uK zEM--yP6z*~-*C2D%yl}p;~N{cV>8olxsK6stzO$U77S*0+h)`D;*a)%?QAc4o4UTt z-0hZyFWnxqjpZGG6a8A9Zri58_%NQa?y@C)!B{aJ%k3DCTxJ=b-L{*2l68hUtHlc@ z+d;eAcE@Gr8fNaAp2wRH7lrTX)vmYDZ1~l{6h_B%THRioPp&@)cBEQW3A3h0vxEi9 z^vz})BUj}xkAm&DIyUm!vgvPD)lj=m&9qxB&kyr=y0l2M-K1gCKNW?`c)U-M*b>k# z)s?$IVqLX0J7X!AB>q;Tw-&uK(K{QxbJ07031|a4^ss>aeQvgUwimp8%>@u!KnTGw z-luID0Dpq8ZQeF4c)c}%7p=o=^j3SU9po_>_(Tl zTR;hKTdQWne1m5jjZkki1SKF}YBV19%r*^(Qm)ak+$J)r=d*CS(J&pyjj-Nmd@r#h z01i)Hynn9ctTXeRZ-Y1hbno0o%ePIDHY^aByJNF+*jOV%r^^<*JFI{ajuMOixUxNc9DB|kIBTI!y>D{n}^JIf-rrUG>UHT0V^`~IgZ!Sf zo9WAgLO-)7vz5Lw&?zTN4_EF%l54>G6IGU+$_2^$tMih~|M!vh74Pr;qLq*5O|7Ni zm-<*6NJ4*xRZv?HL^1fE1SPjz(@JBLZ4nqZpErMB!p2$R&=~R423L*<*cfg4S!Uf79(b)(ys>8jMdyc!U$0<=UR}mT%aP zF-5Wq5UIgM z2@qA7<@@o%L;Ik>eoWspmc7>72o(mlAi<0!hnm~9oiGCl+vR;k=QRTv&Mk6JVzYhDb0q3W$Wi?s zi8dwJO>p{=?HeAD0A#Y1LBX*nRuAa6iE5HA6KOYXjhsmsz&kr+fs{b2YK#EE-bVuY z!RHOL%}uw}gz3OxJ`U7rNVA4$$-zm=NXVzWF6&D0`Y7DST0`b#nxJK$y@`GY>xNVK zIvIRh9DIEkrQeV+P=kL+;6Whl^xA$4W?CAEo?)-Ax0)?jZta~R`1WfGzOp5w*6XZ6 zfIt9nDkDh((aKu(wms}U5%{s-NMX=RmRAF>$q&*XXCDyUa{}I8RS^JRLghHrD`Rl< z8fYZI4k8BFPrelFUaeZ<%7@L+;D;%hr6fbi8G*$vCmHFipt#&(!nQG_Q$`l}x?B zjg?AxXgRW?hX%_#QPws(`)qBP>!TLAKrV1nCM*xFvxA9}SVb5tzC2KT6?#dIOcm%& zPK;kC*iM-$u~8Zux-J`%(S9NvUCoj)f>_96&o`Z>%?)R2I&?Xbse_rteXyxjGcaUF z$7Zi%L+m6aWSDSK9TVYYsu*IOBb?~Oqlb`5=16dS-#m#y@|;8=*j?vy9>DW75Ro7k znTyB}&ky9j43h)q$w2AD5`>u%n@3+07CkGBUETwdUakM~*4uqyx^XVg%m4RTDfGpK~LG3Xls zn33WO^KrK^?1m>cgyC;D66Q(Vsa^E+$c~islB~bIsZdc?Z>)Uv(SrXi{iznYxTTRg3KnVgDZ5Oy>mV!Md)lg? zcG}i>>5w&pQpGv}hxrhk>)EHO^_n$@*=Eo}wT?hX9kq_3&0&jV(T~CkA{=A@M??t^ z`cm41UPKT;SnKIXj&UZ5LamwJCg>Dqp1aX%R$u6#X7fSIU+nCV_=cUn&0T6n;0b49 zMjooJu!tdS#4dQaBoAYn$2ywaeMBGnQJ%yu7rE~tBQ#nceqTfGL)12uzJ^roXOL>- z0s6U=rppUWXFdkCCiI(9c35N*!hzkIM6q73^x~}zUGxN6$t2_@jViE%l;cR3{a~U= zL;x}7tfts6#5I1I_-iZI&-U~ELLadbtMp9{Vq|K>C*(0}8``ZsAvQWxViWBR zfSkM0o{@xxjYlm6z-Y8sXISLUBPN^Beein36r?mkPYE-NcJ=&~<>f~0%IzEV3@1x= z9T*DbMXoQn?g)%%1V}Q`iStVm$3=)D1Lnc2aQ!MDD$3qny|J=nRCA0R3PxnWh>+M6 zC1pycDXAc-PK&^T2D8ZxE5tL6W{)wrc*F$k9@>N|oF-u!kvGFWr{pdrq^80{F;hTb zMxo@z-@qLUkPYB0aV7noJ z_Alt}okt?&N@`x7L6}09%W6d}sb#!N2wCvIqR5J@;fel#2I4 z4|8C@iDHwR^)w-Vq$cR$Ovo*BbwEcs3Rn%=uTPdZtaF|cdu55eC^Q%uX<74RT0z`& z86rGOILFvPRIBl&fXc<)#+Q&kNE71NJ!|-^=Rh>t)Jh*hcqu-Mxwyci zeq&bF%SCX)0n{v6&yukkpFarZ+2=M)RV5vK#n6XiGS(8dZ*2A1iBV36TDS!EZI z=cftmvxgU?yy3U?CwgfJwh6BPD%{GVD{0#<(Osyoi&8{K!edZ%0lN zXk~B#{6b$Arv|hl2q!IGBhC)C6Fy%y85opDu1VJ=1}AJmNFF-Yd4+;a1>xZ&j_OL* zqLbdCT75AK-rE0ANvf=*iR_= zF_JL%$Yc(d&wff(XQ@g@goybbsd|LhVQ2b06u?NAMD&C+LHH81MfNWBBR?QiAJ{u# zhHHp%!>*3+MX<6*!6GTADHH=hz>k9FNEPU(ic*o!DktUOjeSC9Tsx0Yn~DKR@o;J* zx+a`WKB4k)KD8%-GAJm-&#tz}hU4~}P)X0!6Ld}mtB&dsJW{xQrVomsHz=#fVxw4H zCiv%s60Tu|q;GIw4%l_N6d*F24|-j4{u8=*1vSKHi&0PVeNsLC=o6!4I`|rs&WeIJ zuJP08c!dsPN1TS~k^@Z&V`}R3{nMS(R^#-?r*EHL4f2a(O^ZQA>@?mUjIC0Npxy%X z8Q~&S9rsa~hx%M6n1tCbjzk??9v~qWX1&dx-)d7Jzq|8}h&A0uuc|7Hqe22l@<$1b z@Iw&v85LFGbo7lVk=9)$`XZSo={S~wjdL87HKR|+ zYH%J3<0vbFl@BlP`;y~c371R{FI)LltY=W*XQ&x?BJx%dPYKTyo-&?kJQX~Ltin@B zM&WmC&Co8k4r3<$nF89+27Q0FpS6mR!WZGya6brZJt@}O&kbJVJ`tbYDhbK>@b@Ef zb>@Qft?bRGC96L-I08=vb446Qu$iJVW-g08-jke zXFo*tLtfq=F%-nNM0WS*+?yiPl@Fh+o{Y+fHu=HuXRK$RHC(5?L(v}N(WdDmxY28a zH7UyAq~=@s!DI*900RU^etCW6{=&-L+7~!mpc#7tvO$)Ebcx-hR*C`t?8CRCA8Vnm zR?XFCuid%5d}sAWV|nGyr#G&BCS1Y7>fNi?@7%ufNv)oZ-YQ(YdMomSuq&*CST!jdNiC*2`O2eJrm)Cit90( z%7q(`n>N2sg+U!0M?VNlD>&kWg-%t;uuH$0bP2ld`7fRSo`N>i>s)aSmQ$Nv_Bjxm>M4f9K^1 zl67nvM4caFj>+MAG84Hly^0J1F%mA7%=x$`NWP)CQo;|rNl|N&*nl|V7fk4u5BDL3 z0J1c$kpC&+mKEq>7Z>k@GNf}mA@g6xgeg@sj3SCkPA$W}e>_8_x*w#Dk6aa&0;#Z( zYZ!%S6QJmQQlQ%?~c0(8wQ9Ql;b;B?MGl1dOjj+8!4Jj0^!`+0YG=&30>zpL~Z|f2T{vGhcp= z8u4wCmCFHx11oWHi|=^|5n9WnO@;YO;!5Gqffva$kZ6VU&Atk_uq4kax~j;PV)<72 Gr~eB$Z9 literal 0 HcmV?d00001 diff --git a/gitea/gitea_deployer/__pycache__/health.cpython-39.pyc b/gitea/gitea_deployer/__pycache__/health.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5556aee33c052d47d9278a91c11e5f9d512d7bdd GIT binary patch literal 3191 zcma)8&u`qu6`mnyNiMaL61Vq4_Z`}()JKwQ525Ts7~MthNM8q76n0bW>>T* zay`SfB!Vgm*w+HNI|k%qi=KMzKhS$`bM49hLJ`1yZ@64(4|#GZlOujyh-oIn@WKB6BYt>gB+bYC zBn3SsOLfMxVw@&C&IO;SVyvXrJk3R##ATs$_y&9=I|<6Gg+k*Ve0mjxB;Y$izlW~% z#WkyqE1#k#pD9(S>h5lwYAN{1q0ATD;iEXs^iH3dAd2$%SVoZvqo{Eo>b)p>I*qg0 zh#y6wNTTR3MEPLaivMhFrTIw3TctdnWN|6=);KL?+{j4CNmiUmwbdNL=HyH{Fcm%5 zcozX>)b_ly%MOg*L$z%XS9d8H{5rz<#UUY^5_K5B`jtIrl z3kLVpgL_&r%U8}us_MgcF6f=x>AS`+(_>joOB1AdDb=$$GvPC-($QI@b(Y{h+Jbfb zK7~)OgQ#guB2qgbY3+h^O80`5otF@KubkI7>NWRP{VU|N@_C+?X`H2%T(~W%!nw*P zYODwI7S5YFIpF(68E0)1;)!dS6uHm~+U-sT+{Ov0H9PxX`j`71@Pk8H9!kZ_f;ZiK zZ|@;bq$<-9#;&|HRjq6;ocK=Phfr(Jvm3Q$%~Et}!T5lSF+jz1$Fh6~b*YpKhiUu> zN2G#)Wm;w(_nel}> zi_|nX9w2q9@&xU-1y@3&_Le!|v3As*nmi#Fz#}z%N$_SV!F!$cy4Wiiv>f>FyaHyS z1R}wGP0n56owJ$=|6_8_n!eiofSh;g4(cB<^(VUu6G06z{gR2WX0XaqvkFFbO)fe$ zeNEM0XZDuAQ?v2MsXMT`S9`)cVqXLNN)KlFHSpQFZ|4VM#ag&g`_|s-Y_<1b6#c*X zsyp+)Tl-jhdf(1N57Dz*uUxx-@a*Y*u?EPy9fKB`N8lr=Muj?Fi0(_|r4T@K)pr9O zu4ll%+M$@{?kk3I14eQRKkhI0ehDjeu)#!}&ZNEpA~FYv9=pz8z;;@&ZwE0?a#Ksp7x? z&F)t}+x>Fq5B$IyYc|lZBQLXrp0CW_16YKg;n@#zHjTm>zn6{dnWD5@Vf@jN>a z1eoAJSv(#Au6gBd{(Mx0?L58?^|(aJE_Hvqy0b93*+e@t_@vR})oq(CaC& zRZsJEnZ*;>_yAiQE%iXl(fz7Bx3tp_)lG2LFi07NTDDbm)pP*hg)+;LhE+2e*;6s@ zahfa0;>2`>d^{bSj?Sc<^aFJZI@N6y@8fW&%UG3`R*XN}m+@y#nXZz@G4Mb>9(GS+ zpaV2{oho|PWYiq_PhnC01qial=uOHTkKTOC7$hqL2s)-?O*d#9g+x>>GTB?Dt)p zXpGn7D03suvf?CCQlzj%so(q)w9>Buh}GQ%DCudEz@u(71;(;|v|%vADL%gtHg;^g zy1g*D9ju%I-|2Tvpybo3)TK5qq(yZDY?>vA{Bo+Jq!7|#OWnop)!jz*v(rRQ%CyK0 zE6*mH#jow}1d=mENV* z4;$~>2w%YRPM>ZA`Z0+rjB17FsyqDe%Y!9oCB)m$VIv;=DN5=2o*ingSb1v8{%#T_f7;dBESa5lc~ zA%)I>aS4i(N?9q{uEZ`&On^%4L@6I~$|03oQmM)%r<}TSQgZRxha`$6=KKHmdKmzu zWmg91H~p4={g!|E{{Mf|tX9h!KL0E(wYy)@wEv(?_G2P*0hj-bu4%63Xs+%W9o^CS z+i(p2&N(@J=Q?IL@8r9dW9ih+>=e2Mr@;65PO)2ZN_=m1%H4`n=}tHk-KtYXS-~xK zYTZd^lJ85MdUwj1>P|b;oG*7~y0gwK-&Z<^x^vE4cix%rE;tKZHqklUJ>nec9(9g# zzS=p~J?ER@W;hfs28+t}8f$B>_M=aHY^Hk`+iTW}8} zcNn=P*SMxF9tqzlU-kx_{$|(f1$JMw);&KE&7j@y+1X~ySR@M?yq{SzHsrT z%H@9Rh9_jJs?4Z{i)K{+V(%99_=4B-uzcvKgp#i|J44TprmkSJST#4Tt-hAcqWk)b zUb7RdKfms^Zm4%;N-#)0ntaW>zTWTOSm_7t)wU;Ew6fU`tr`4S2^_`y#v19<7MK@_`u@8;xGG>opotxzUhxf%}O@eR9ZMz~#S)Bwja3t#0mZ&7rk3-TeE8Yq^EDO~=COlc16hQ9jY?}?AmtM8 zO;l)5jVC^Ufb_l2YG&bS&51SJcy*)k6pfUItQnRb9@+Q4rMThB2PBLo#aO}%hN9Op z=sESF`()~8kQr$k+KxWbcXgfajU6b|k-lNRr_mxnU9orxH_Ep9NTF#r_pf+4X>(Dm z`O8U;9+X2`w$qVc-u12z*96Vt z14BH4FYz=bc9QTSR%QQcH$IO_xsrtmJ$xt~kD{lgvDP2x;u$=!k9uesnR` zpin(*C-Zp03+!g6qo$Y4#!r{&D(}ypl_ITZ_gD9?RR8*h*9!ck|3yQtiP`Eys-DL{ z_RhQla*K)ZBIIF_*sB3}tz; z1hy()Sv2Jo@-2FfcU63jCUU@R#IvaQ4P1&3%6gaq2=VT5M~mfyM4xz)dOl6b7bq!G z@~f0k&zZHeDNCdy&LN?pu*`$1GK%?ndAh{qEtHRaNOE4l<^MO5tjzNuMR&lnmdWMWv{|d5>7Oo39sT+-D%X$xUI+@r`%do#!%L+x?*1oE><{vp>!{!o%X>7GJHxCXt8_lKE^_2v3_C@$KW6dlYuv>i~rFIW$h@d5S#Z8f@ zA9w>okyeZG;65ov%gt2zx40RRxIRFD2YJXcYny5dK@qfD;QLZgMtOzuxJtO6z*XJO zqg^eS#C;vt)V6{AbTEVaSzHy)9}4DhKaXnx*Wv9P+8hav;{F(}EH~1KddC`&4iH;)-KtFN=3$g$tss8D_JFgw%a`bgkH-_-V@Ge6jgR>Ru&(D zDv}+E$FQ8@aY`6IkfarJNPz8u+8e6?R?cLD5N~5G2E^hJ8aOkJfx^O>ni+bu==E;# zlTqb5P${rv7j{QfgfwUD8PyupBJFRIo|m>q7zNV8A4e7VUQL; zn~F9e{zlvc2#UeFSl~CVGs=gR)wNrN8* zEGMA@&9Bgfs8V)X!D+JO6lM?=s+d&U7Ec~ZffeC%sLY5omS!aXuHo`8AkhkzKAoG; zCv*c4T>bs8Rp8twmO1vVn;%r=V=K31 zqFoW~z`#2tZlUX{_mRG!ZCU($Ieva*?dDj4Rdyy^gW3c2Z#0pigNTjhiLtRIkzN7 zf;Sf4GG5cZ27J3!xKrqr<@XcX6DWb?E{B~&RNT)kkP4!!&9?9Dfi9`wXNzpverYu; z(a9)F;mgYKEJ)$Q_rIG#I<`M-wLITn9d=IIuU`G~vb`Pz1AqDa`LJN04PYPJALW*w zT>Z%K7l-rw`1i7pOS5m;jBns4~cTM)I0Mpcpm&WqS5 z5arnm0CbQxl1nIPZ)AT|Fce-?ZNv-NfTm`feX+>GCMxr+S>{D$669Dx@{LqUDl0q< zDQ!` z#>Zyf(#yJe&$?$?Nm;>!sN=t%6$^C;Ka1O={$tCURXJ0)kh8d*^|1*Xp=f0B@|dWT z;~Nukgs>A4_bxJEVTdwU*;gdYSn%XF%$@wM=06o!5I_)a(&F0I-_pYx@&yuSEMT62 zfGKST5HApE;W4z&jdT(%U}#-2w7I3<(ZSICO$-e&SaxGEM(?*6rIQg|2yZ29eue1F zesbBqBD}{_BCr()kc%&Adl18ab=(ER44*#B9F{Wt@KG$shkx>}{I`>Lr=8eIZ8!@L zn@u{~hLVLPKkT{7c6jWi9(Qj`HU79#?n|59&a%A{F4sP#goKM6j1 zUO*1jSSjD2j_o92rTjHy{~eb{;>2BE$r z-axOf{3MSQ{+U{;&Q< z((j?cf8p}Ufz(}V~Lm<#uyqN_zA^{ zY;8!F4*Npr#rV+1B<%7QGKyV{a2eo>nFh?EubJeobc-PT+$gtUFyUVaa)4Nn$S!P3 zkhhs(0%OAr@?e4ym;i7QOptRcBlfL9dYcIon0HKO0(>V-FcJPkLX{jjfG;2BL$BG1 zw^+>y!b(Xn~2 zXwDK%-veg(Bs~A~DR)eDj7fDOQ!)qHe)=Q4ISVczUlovWjd^Vw^8H=?^($L&eQe}+ ztX*kC5fsRQq!ZgD+(CVjZKg7`z~8GL$ z7O4Ge+RYPyKXb6JdWwaxNdQ!b+#0cwJ9!tnivd-B=l^Yl7THMkEij`TtGp-1ym=y+ zcc+q3PT$4s-qPJVbkyCqK{A}z10qUD)10P zRb&|OAh$5^5L6*hwT#ThCW%W}4W*f@}|gUO{ifec8ia`{oduHa1<72@RhH z)g|CeK{w0~gG#2Q<_gT1!Z=aFbK#+Q8Qiq%WqU}z7c&2@#jmrQx1h^tRLeFZA_Z4$ zHb^y9uzHQ*fJ`!1tl}y1BDHS7jv64S{DH|40P!#?1aYDuoH-poJp@PE!k(A0R=Gw`y>7QNBUwo%}_r0BceY zo=oa(B|Mi3q5H0_-CTt=RWs86q^vH5U%$=|sYRpJlwLf!rfk(xfq0vGxk||wC^=1ui6qMP`nLrsOtuT7 z!tJIYNFr)fL{R}HHJyT*3~!V8CRHCKIUQ8cxelLhwDN?pV3dtn0=-DfFvSer`oyvz zKcW50dKm^Al|r`O10Dc2fFGV}?wLRZs`?!_p`*?&wG9)_Vfe4`jI~j4a||P-dB`=8XQ(9z zp=285a!`g`g_mjDE1o)^zzQ$5jBMB_)cS$0nc4Vv)k}iiICQK})($mZK zMb~AvkTjA?j{UfS)96a+*qZ!+JIjP&xcb+ zH_9O*h$p3~c|W!j-=ZepriA=V;*XIaV#e@<$&>sGPC0e5?6oalM?IjY<`cxxsuQ^~ zdp*bxLSBv|xtL{&KPwjNM)>H1*(LE^je!yp3kgfm5cjYIK@N^2qukga2w}ox%)KMk zHr2-DPOjFZmVpb^=7YjW-zI!*CCXkhMnN)$TjV1HnCWGWz(yNzLctAkYfglJw}%Cu zS+?0Z7~|;F9hD)*gJ?Jo%_a}P;)6=*@-d3cvj-6}qq@{Bns(tP*8N_xeiDnhm#i`N z=VMP{*7KRP3m2{oI~`I|3RkzgI5Xu#FEP1?M|tX*dLJQ)HtayO7}8l0<^^eo##J_( zoXipxQhc$aayq?_9iO6&F80v}iOp-c*vUbMEnc74(gW<+w*GD7CcIKixr;jM7+A>0 z(i4&1+C$D!`PmK(9dhM7_m80JpQ1x8eEz{SD_1w;U0r&%({D99{&TpAdy~vt%`h+f zdMX2~WJ9iB%BfC`SECUxPL%6-y@RwSvv2JBp=H@KAMraV`(eVKWpZFrLB1kOr*l1~lOPZ3n5%_DvPG^d57`lPu@(z(lVw?05oE@~M-&(wdm)wu zDrdcw-+=q^9mBs66kL>0J3_#+@XU{3EF+py$TBU$v?ggNw+O~8g+JcI zn4ev?U*s5?bY-L*wO=#AEwfA!g2aBc&yoU%M$$P1%jmGI8en0CM~`h#VkgN- zx@#c$7ou2FnuOcRaF56Mt7c5-bGh)`gBdbAjxjkwGNOWuiSkFd2@uZ*F)^gthNOT- z(EUeH0?)bhr&AY#A+&fWck>e5llW!>@RE8To2MMfFNTj_jgQIF5e6Bzjn6LmP!WI?i1)%j zh?qH!i_NX{)xjp8s5g$Ptb|qSha@u}xeY6ED@f+>NNQ5V52$K8Gyqkmk};&N?#Ltw z7Zug(T`8pa#7jy<-^WTLIu&=SjthzFDCR?`Y+NbS5f#*lHhIqKAY||2x&I^zA*(<$ zrVSGjW)t|HBpF=VpqBc*V`g~fepHeU1VJV`_O|cf2N4Pr2^kMSy>QmuFvJ|T83aPe zxp#p}4U|5HQphtdJ;$XvlwuQi;Q8iKioOy-5dzm@w9Y44tW2OJSzUEJQ?3IxintUO zFCEH!khpr_@i#?1i7;fKC%%K(iQh*O9(uKR0|rZ>8?8{r7OLw37%|;AKG-+5ZM0=${#9CRU^{F9 zg(AcqYyzD;%ot4J8QPdJb4fazs2w9l1R6fa+)Un8;^q_F3vn~T81nQYbFoF-3;!yA zvwxM3;)XYp6;HNT+WlmDXFMwVwa&yAQW)R zbHn2q0l-BW6OM?X%x;e;2ZJM{BBV<|_7O*7uW#bWOh!r2CNrzYw#Cy_z-Grm9DWzI zC7=nZ0}voJ^=U>aY_pqnaQUa;ZcyqNh2*jnLaGOQsVD?Hdx~~8u^o(z?ZECoMG4lX z3rYZ$FeO0h#}I`Wohc!|W!y193H;5JKu6ZB@OH9K*Mk0lEn2x|8MOOLu{YC0!iET5 z@loyMZQ+S=f{?H~Y41x5^(dC(U{v16X;?Sw^Tu(@K@9w3fJ)R+%+Lg5+bXq8Fl>ez zrtmUJ4LNCPGBq4T2;}}aMuaed8$>Qa1k-@3*#6J_T;ZNhaf}KGlqPb5-7DjA;s*gF z1dsrtFXU(ue@HR{!W&0&VvY}wq0EBDj~&BQuc+ka4_IEj4Jx5%49N>~2S>|gL?%Ne zaQ)u=0jNa9V|vrv5)veR537hm7$kdn73cB$h$t>WzRJk;s_1uFmk$TOJn=}UolTz3 z1 z>(teIQnzA?B}n7MsFUc*nN&VWnjTC@aetTLpQHJ?aj`?k&`9)5Fx8ziK84u9-r)W0 zJXu4BVx1DU&DcnyNyQ(+qIFyhHU14Z;rA&j_%=e@Es;mA<-$QJk%zr|8Vbl6^@ANmNJj#>8}Ox6$Z9ZE};gCXQVN#1VblX|nD zrQqyHj(}$Lo&D0~eb2N9@DW}i*B$83w}%AG0KK7O$C{>sXW4$RJBJ2CpD`l^1jv^2 zHGC((9C>|KIx}2E-2~xC<4!Vy`FNU~Ppnn{FZ>{DM=YI}qbc6%}svJ45_XDJrV?tM62aE-2c-!JE|M zlzPD=&Dk_la>AZUfe#h!r%uvuv*Isl5lIuJa}s|+*&k9u_N4rg6Zur5QrquK#~lYW zr24x^HO1z2A7jP>y@ISWxo5pI!j7dH3$#kcB|E+V&SXMwynMd+D;mgO^P9Trr;AQS z&X^bEz;Hz)xdDxQpM&2^ z_c^btzoO{S9UWZ~Jxb0}Lg8>RO9>ffQ67$W#FWSfEq`i3?k@Hq2$$|o zQ$m)8AVeuh+Y1u%f@YiH4x6&fBEN>aE4ch~Na)uXwR5#b?P%>}{aEeA`eN-&{b=n_ zyGYp|NG(Kfcs)T8C?4+gq=@*~xb}A(H;^qv<=>)tW z{o~YJc_JXLW`s6PHOxyYh}9Qjh92P!px`GT+IKAF6Jk%$0zFhzr`LT!0oe4Ue`Kiv zF=3PWE>A}AKGBNvIv%t!QSn*%v(e{tG~?4eCSrH;_knc6d={ReI(;KupgY9)>iIfI Jw_dEB{6CDaZKePK literal 0 HcmV?d00001 diff --git a/gitea/gitea_deployer/__pycache__/webhooks.cpython-39.pyc b/gitea/gitea_deployer/__pycache__/webhooks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf4f0eaa71f3c0b342d73523181ee2edda635422 GIT binary patch literal 5587 zcmbVQ%ahzj8P{8y$3DDv>~#{LlR)7CYBzyYfWd@KVmprtODayvFjbV$PWS9;t&zqp zO`KKDa&fL)IB-c^wLWn0g)0{h{2e+`1(y_8ibH@hzpo|DV?A~rn$o9HcYobqf8XzW zw1SnDriSYs`D%FUf~NhQUgj?gFFUx?9E58 zx02Y31wh|r)Vt!KOso%Bp$37Z?2HT5*Br&T@e>gnL=V1zp` zE@~L>UWO*H`$o~OHh-0Nm2cFlC^tzm7ftow&yGf-Xpqop77Ry4BOUMYWEg~TVUA@~ zxadfOeNlKrkp;xL+wls|_v2tFe7|V=eyLIXZ~OkOaS&BKuJ7}tk7hH?WU=P^K@9Dd zR`7l4V9U}5xv$A5=s%IqUf2%ffef}YF&stEX1cu}W+Eu5Uz$tE?c1ek)2-2w^uTu( zce)DFG%Vel{#4&XTBntFh~W<)wbpB$LywMdxbca}O>TW;^<3d`n>(Kv+~pqL8^VUB zZQj6$CU2poNiCvGH^1^iqtjzux@Vsl>qh4pCr!P*lL+8ZhXo0hu5Z%iE-DK7E zW*lZ=5QVv5i-HT;oDOBzlP0@O3H;Dyx^Dqte zBGFlxuiAZ=y(?~w0d!V1urS7a{Uqk;!uUBQb=l8@55wVj$l~#EPe_&ws)!I|1k!BA znXhkEvQBOG{aW}%fh!?7yA&sk_5~3J;r>_(-XZsD`*RwAv(?sX`*V^NE@IMt81EO2 zeUZJ5|3b<&Sgvq=>JhTAx!4=;cdWunMKq9`=#&?UJV|8gb@DuM!KXu*%lDtbTiOQE zRvn#w8*r+8^VHt9>fsa)Z52!k2gn-BxNi`1b-8%bWC~cDI)J4eYH5?}*R@Qa=y&vE zKfXs zz&~_uYVvQqH9)8ISee0$2`MmbLcIzd@ zwZ>PgRo1HATEtDVb=Kz`=aIh7W;y9`9;ECs^_{CKw^hzFV*^aZqT^i)!U!l#Ss(?a zynVsh-Z*1CiNBpO8HA}|;={gB2`wdm3qD9fzTi9q%mGx+<<^w(v2xA|)htX?xZ#v* zH$OxBC8labmwgJPXh4>Am?p6TZT?h+KnejFOUL~_Qqy1@MMqthH`%4iA=zhoCx50M z%S9A^AdtXGEbL}()OK!mun>VYZ@)`fo+Lm>$gOm1Yb$4@4X{vDDQBt0W5FOpD9Br^ z4gy`)S(i^?CP~vtMuhxGHb4qn1~L5cYxLifWeiFSYm^`}K2445O11tVkv`IJHcpH6 zDpXyqwOzQk0~wRMTNDloJB@@G!Skd{WXF}y(rVwN$=p$J6eR&K+)6rHGaaG$5rr-a zcQ5GQOa_C~{_m(5b@W3_k-iL~wH)0tJmZ{kPB(N5KhXa*w@mRiE|1dXS&5EKtY7t#bN~DO;FZQb>^B0_jr`R9}>VD4(lf@gn|YaE(kq zG45$6<~@xY7bq_o2n_}WicmucaE{@mC)Px{p-P@ket(Ls*OD0E4@KM;3>C~l!WpzY z2ToGB%wT5aGBf9RI4eynqb|E%7fEbS%&;yN9RZ2A%dP>RD#SlxV6ic)L2FS8y_a9O zGAm7M2}$t#j)Ih=GJH;xEA&bQ(C&R(RXvhaSvcdYA1AkqMwlkx9tN5GG1@y;3745A z2sozzA-iZhz@2V@Xr2N4ui`dPUKyV`UjFQ93qQOQEHqsM!5o*qhsTj8Nwe5MTiT_M{A@E<&l7|KrObpbRBn5dqmjkg?kVf}FD0V?prv zv9*4>Y|i*~f)W|2}sU+!Rl{0@=JAiEuB1{g`Ymp=nR%u2>F zFX3XJhRY5OTZWle&_PGP5;Lpwm}!45X37l|1W$#UAJ8hZKyex{(?CJx>QcZUW0!$~ zTN8qcT|oskgUY%NpyKBL{1Q;9vdCA970UY63@WM6;d8n;!=-|GmtCcpj&P+;UJC;j zW6GsraD)iNs17}1;b1PT)%i{YGCG=7!^N37CoM%O0dsZ^FlBp}No4>968Tf;xHFG~ znQ@lk;0lKS9(Rd@#yk!j!a<#%r`h2lv2cX(4v-+}G+7wo=yQ_AtB=6IiN%o^xOEYU zgK9%c8>kHl4Q`b>>?(DrlXL#3slFeTI^xt1IR7FUqn5wR-l%rMrV*f=0x-C;dwsej zVIUlJ*_-b$^doDd#ySwJo`;Q+FwUs%nsVR^L@kdpWT9_KDxLBtFfUFj{X}w_3*u7x zkk`=nc)w3aQMi~Ut`d)8TxROm(fdc-3u&UQ(u6Yg#fQ>FEih^U<=s0p|I%7uniGDB zApL@QD`IWP(V=JYIimRN7shOs$lHKb--(~NizGGQ%Q^d+<^BhaFz}Y3zDKN-4 zNV5tK-a(d)((d-Qq#T>O^oqB9^9{ALQl45yg_IrQol6tmvNR#z#HhlRNPR(HNbsJ3{$QXJ5Haiz-g*`yAoJq=DlERiGKuZ#Bt{_rpWjZEHeD&16jyomd>6YbCP4wUP`Slkz z&QYAh??3I$^Jh0U)Y#6MQW6?2UjQju({F|%EsqN(^;y*C;CmbjO7&r^{6wa9@I4yv u4I=7*@092JawjCvGD!>Xa`|QMWdcBY4#aZk+t|8}Qr+;(rfKNw&Hn@7O_Wmr literal 0 HcmV?d00001 diff --git a/gitea/gitea_deployer/config.py b/gitea/gitea_deployer/config.py new file mode 100644 index 0000000..0a7690c --- /dev/null +++ b/gitea/gitea_deployer/config.py @@ -0,0 +1,187 @@ +""" +Configuration module for deployment settings + +Centralized configuration with validation from environment variables and CLI arguments +""" + +import logging +import os +from dataclasses import dataclass, field +from pathlib import Path +from typing import Optional + +logger = logging.getLogger(__name__) + + +class ConfigurationError(Exception): + """Raised when configuration is invalid""" + pass + + +@dataclass +class DeploymentConfig: + """Main deployment configuration loaded from environment and CLI args""" + + # File paths (required - no defaults) + env_file: Path + docker_compose_file: Path + + # Cloudflare credentials (required - no defaults) + cloudflare_api_token: str = field(repr=False) # Hide in logs + cloudflare_zone_id: str + + # File paths (with defaults) + dict_file: Path = Path("/usr/share/dict/words") + + # Domain settings + base_domain: str = "merakit.my" + app_name: Optional[str] = None + + # Deployment options + dry_run: bool = False + max_retries: int = 3 + healthcheck_timeout: int = 60 # seconds + healthcheck_interval: int = 10 # seconds + verify_ssl: bool = False + + # Webhook settings (optional) + webhook_url: Optional[str] = None + webhook_timeout: int = 10 # seconds + webhook_retries: int = 3 + + # Logging + log_level: str = "INFO" + + @classmethod + def from_env_and_args(cls, args) -> "DeploymentConfig": + """ + Factory method to create config from environment and CLI args + + Args: + args: argparse.Namespace with CLI arguments + + Returns: + DeploymentConfig instance + + Raises: + ConfigurationError: If required configuration is missing + """ + logger.debug("Loading configuration from environment and arguments") + + # Get Cloudflare credentials from environment + cloudflare_api_token = os.getenv('CLOUDFLARE_API_TOKEN') + cloudflare_zone_id = os.getenv('CLOUDFLARE_ZONE_ID') + + if not cloudflare_api_token: + raise ConfigurationError( + "CLOUDFLARE_API_TOKEN environment variable is required" + ) + + if not cloudflare_zone_id: + raise ConfigurationError( + "CLOUDFLARE_ZONE_ID environment variable is required" + ) + + # Get optional webhook URL from environment or args + webhook_url = ( + getattr(args, 'webhook_url', None) + or os.getenv('DEPLOYMENT_WEBHOOK_URL') + ) + + # Get optional settings from environment with defaults + max_retries = int(os.getenv('DEPLOYMENT_MAX_RETRIES', args.max_retries)) + healthcheck_timeout = int( + os.getenv('DEPLOYMENT_HEALTHCHECK_TIMEOUT', '60') + ) + healthcheck_interval = int( + os.getenv('DEPLOYMENT_HEALTHCHECK_INTERVAL', '10') + ) + + config = cls( + env_file=args.env_file, + docker_compose_file=args.compose_file, + dict_file=Path("/usr/share/dict/words"), + cloudflare_api_token=cloudflare_api_token, + cloudflare_zone_id=cloudflare_zone_id, + base_domain="merakit.my", + app_name=None, + dry_run=args.dry_run, + max_retries=max_retries, + healthcheck_timeout=healthcheck_timeout, + healthcheck_interval=healthcheck_interval, + verify_ssl=not args.no_verify_ssl, + webhook_url=webhook_url, + webhook_timeout=10, + webhook_retries=3, + log_level=args.log_level + ) + + logger.debug(f"Configuration loaded: {config}") + return config + + def validate(self) -> None: + """ + Validate configuration completeness and correctness + + Raises: + ConfigurationError: If configuration is invalid + """ + logger.debug("Validating configuration") + + # Validate file paths exist + if not self.env_file.exists(): + raise ConfigurationError(f"Env file not found: {self.env_file}") + + if not self.docker_compose_file.exists(): + raise ConfigurationError( + f"Docker compose file not found: {self.docker_compose_file}" + ) + + if not self.dict_file.exists(): + raise ConfigurationError( + f"Dictionary file not found: {self.dict_file}. " + "Install 'words' package or ensure /usr/share/dict/words exists." + ) + + # Validate numeric ranges + if self.max_retries < 1: + raise ConfigurationError( + f"max_retries must be >= 1, got: {self.max_retries}" + ) + + if self.healthcheck_timeout < 1: + raise ConfigurationError( + f"healthcheck_timeout must be >= 1, got: {self.healthcheck_timeout}" + ) + + if self.healthcheck_interval < 1: + raise ConfigurationError( + f"healthcheck_interval must be >= 1, got: {self.healthcheck_interval}" + ) + + if self.healthcheck_interval >= self.healthcheck_timeout: + raise ConfigurationError( + f"healthcheck_interval ({self.healthcheck_interval}) must be < " + f"healthcheck_timeout ({self.healthcheck_timeout})" + ) + + # Validate log level + valid_log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] + if self.log_level.upper() not in valid_log_levels: + raise ConfigurationError( + f"Invalid log_level: {self.log_level}. " + f"Must be one of: {', '.join(valid_log_levels)}" + ) + + logger.debug("Configuration validation successful") + + def __repr__(self) -> str: + """String representation with masked sensitive values""" + return ( + f"DeploymentConfig(" + f"env_file={self.env_file}, " + f"dry_run={self.dry_run}, " + f"max_retries={self.max_retries}, " + f"cloudflare_api_token=*****, " + f"webhook_url={self.webhook_url})" + ) diff --git a/gitea/gitea_deployer/deployment_config_manager.py b/gitea/gitea_deployer/deployment_config_manager.py new file mode 100644 index 0000000..3d3b009 --- /dev/null +++ b/gitea/gitea_deployer/deployment_config_manager.py @@ -0,0 +1,153 @@ +""" +Deployment Configuration Manager + +Manages saving and loading deployment configurations for tracking and cleanup +""" + +import json +import logging +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Dict, List, Optional + + +logger = logging.getLogger(__name__) + + +@dataclass +class DeploymentMetadata: + """Metadata for a single deployment""" + subdomain: str + url: str + domain: str + compose_project_name: str + db_name: str + db_user: str + deployment_timestamp: str + dns_record_id: Optional[str] = None + dns_ip: Optional[str] = None + containers: Optional[List[str]] = None + volumes: Optional[List[str]] = None + networks: Optional[List[str]] = None + env_file_path: Optional[str] = None + + +class DeploymentConfigManager: + """Manages deployment configuration persistence""" + + def __init__(self, config_dir: Path = Path("deployments")): + """ + Initialize deployment config manager + + Args: + config_dir: Directory to store deployment configs + """ + self.config_dir = config_dir + self.config_dir.mkdir(exist_ok=True) + self._logger = logging.getLogger(f"{__name__}.DeploymentConfigManager") + + def save_deployment(self, metadata: DeploymentMetadata) -> Path: + """ + Save deployment configuration to disk + + Args: + metadata: DeploymentMetadata instance + + Returns: + Path to saved config file + """ + # Create filename based on subdomain and timestamp + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + filename = f"{metadata.subdomain}_{timestamp}.json" + config_path = self.config_dir / filename + + # Convert to dict and save as JSON + config_data = asdict(metadata) + + with open(config_path, 'w') as f: + json.dump(config_data, f, indent=2) + + self._logger.info(f"Saved deployment config: {config_path}") + return config_path + + def load_deployment(self, config_file: Path) -> DeploymentMetadata: + """ + Load deployment configuration from disk + + Args: + config_file: Path to config file + + Returns: + DeploymentMetadata instance + + Raises: + FileNotFoundError: If config file doesn't exist + ValueError: If config file is invalid + """ + if not config_file.exists(): + raise FileNotFoundError(f"Config file not found: {config_file}") + + with open(config_file, 'r') as f: + config_data = json.load(f) + + return DeploymentMetadata(**config_data) + + def list_deployments(self) -> List[Path]: + """ + List all deployment config files + + Returns: + List of config file paths sorted by modification time (newest first) + """ + config_files = list(self.config_dir.glob("*.json")) + return sorted(config_files, key=lambda p: p.stat().st_mtime, reverse=True) + + def find_deployment_by_subdomain(self, subdomain: str) -> Optional[Path]: + """ + Find the most recent deployment config for a subdomain + + Args: + subdomain: Subdomain to search for + + Returns: + Path to config file or None if not found + """ + matching_files = list(self.config_dir.glob(f"{subdomain}_*.json")) + if not matching_files: + return None + + # Return most recent + return max(matching_files, key=lambda p: p.stat().st_mtime) + + def find_deployment_by_url(self, url: str) -> Optional[Path]: + """ + Find deployment config by URL + + Args: + url: Full URL to search for + + Returns: + Path to config file or None if not found + """ + for config_file in self.list_deployments(): + try: + metadata = self.load_deployment(config_file) + if metadata.url == url: + return config_file + except (ValueError, json.JSONDecodeError) as e: + self._logger.warning(f"Failed to load config {config_file}: {e}") + continue + + return None + + def delete_deployment_config(self, config_file: Path) -> None: + """ + Delete deployment config file + + Args: + config_file: Path to config file + """ + if config_file.exists(): + config_file.unlink() + self._logger.info(f"Deleted deployment config: {config_file}") diff --git a/gitea/gitea_deployer/deployment_logger.py b/gitea/gitea_deployer/deployment_logger.py new file mode 100644 index 0000000..c96f08c --- /dev/null +++ b/gitea/gitea_deployer/deployment_logger.py @@ -0,0 +1,218 @@ +""" +Deployment logging module + +Handles writing deployment logs to success/failed directories +""" + +import logging +from datetime import datetime +from pathlib import Path +from typing import Optional + + +logger = logging.getLogger(__name__) + + +class DeploymentFileLogger: + """Logs deployment results to files""" + + def __init__(self, logs_dir: Path = Path("logs")): + """ + Initialize deployment file logger + + Args: + logs_dir: Base directory for logs (default: logs/) + """ + self._logs_dir = logs_dir + self._success_dir = logs_dir / "success" + self._failed_dir = logs_dir / "failed" + self._logger = logging.getLogger(f"{__name__}.DeploymentFileLogger") + + # Ensure directories exist + self._ensure_directories() + + def _ensure_directories(self) -> None: + """Create log directories if they don't exist""" + for directory in [self._success_dir, self._failed_dir]: + directory.mkdir(parents=True, exist_ok=True) + self._logger.debug(f"Ensured directory exists: {directory}") + + def _sanitize_url(self, url: str) -> str: + """ + Sanitize URL for use in filename + + Args: + url: URL to sanitize + + Returns: + Sanitized URL safe for filename + """ + # Remove protocol if present + url = url.replace("https://", "").replace("http://", "") + # Replace invalid filename characters + return url.replace("/", "_").replace(":", "_") + + def _generate_filename(self, status: str, url: str, timestamp: datetime) -> str: + """ + Generate log filename + + Format: success_url_date.txt or failed_url_date.txt + + Args: + status: 'success' or 'failed' + url: Deployment URL + timestamp: Deployment timestamp + + Returns: + Filename string + """ + sanitized_url = self._sanitize_url(url) + date_str = timestamp.strftime("%Y%m%d_%H%M%S") + return f"{status}_{sanitized_url}_{date_str}.txt" + + def log_success( + self, + url: str, + subdomain: str, + duration: float, + timestamp: Optional[datetime] = None + ) -> Path: + """ + Log successful deployment + + Args: + url: Deployment URL + subdomain: Subdomain used + duration: Deployment duration in seconds + timestamp: Deployment timestamp (default: now) + + Returns: + Path to created log file + """ + if timestamp is None: + timestamp = datetime.now() + + filename = self._generate_filename("success", url, timestamp) + log_file = self._success_dir / filename + + log_content = self._format_success_log( + url, subdomain, duration, timestamp + ) + + log_file.write_text(log_content) + self._logger.info(f"✓ Success log written: {log_file}") + + return log_file + + def log_failure( + self, + url: str, + subdomain: str, + error: str, + timestamp: Optional[datetime] = None + ) -> Path: + """ + Log failed deployment + + Args: + url: Deployment URL (may be empty if failed early) + subdomain: Subdomain used (may be empty if failed early) + error: Error message + timestamp: Deployment timestamp (default: now) + + Returns: + Path to created log file + """ + if timestamp is None: + timestamp = datetime.now() + + # Handle case where URL is empty (failed before URL generation) + log_url = url if url else "unknown" + filename = self._generate_filename("failed", log_url, timestamp) + log_file = self._failed_dir / filename + + log_content = self._format_failure_log( + url, subdomain, error, timestamp + ) + + log_file.write_text(log_content) + self._logger.info(f"✓ Failure log written: {log_file}") + + return log_file + + def _format_success_log( + self, + url: str, + subdomain: str, + duration: float, + timestamp: datetime + ) -> str: + """ + Format success log content + + Args: + url: Deployment URL + subdomain: Subdomain used + duration: Deployment duration in seconds + timestamp: Deployment timestamp + + Returns: + Formatted log content + """ + return f"""╔══════════════════════════════════════════════╗ +║ DEPLOYMENT SUCCESS LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: {timestamp.strftime("%Y-%m-%d %H:%M:%S")} +Status: SUCCESS +URL: https://{url} +Subdomain: {subdomain} +Duration: {duration:.2f} seconds + +═══════════════════════════════════════════════ + +Deployment completed successfully. +All services are running and health checks passed. +""" + + def _format_failure_log( + self, + url: str, + subdomain: str, + error: str, + timestamp: datetime + ) -> str: + """ + Format failure log content + + Args: + url: Deployment URL (may be empty) + subdomain: Subdomain used (may be empty) + error: Error message + timestamp: Deployment timestamp + + Returns: + Formatted log content + """ + url_display = f"https://{url}" if url else "N/A (failed before URL generation)" + subdomain_display = subdomain if subdomain else "N/A" + + return f"""╔══════════════════════════════════════════════╗ +║ DEPLOYMENT FAILURE LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: {timestamp.strftime("%Y-%m-%d %H:%M:%S")} +Status: FAILED +URL: {url_display} +Subdomain: {subdomain_display} + +═══════════════════════════════════════════════ + +ERROR: +{error} + +═══════════════════════════════════════════════ + +Deployment failed. See error details above. +All changes have been rolled back. +""" diff --git a/gitea/gitea_deployer/dns_manager.py b/gitea/gitea_deployer/dns_manager.py new file mode 100644 index 0000000..7b77dc2 --- /dev/null +++ b/gitea/gitea_deployer/dns_manager.py @@ -0,0 +1,286 @@ +""" +DNS management module with Cloudflare API integration + +Direct Python API calls replacing cloudflare-add.sh and cloudflare-remove.sh +""" + +import logging +from dataclasses import dataclass +from typing import Dict, Optional + +import requests + + +logger = logging.getLogger(__name__) + + +class DNSError(Exception): + """Raised when DNS operations fail""" + pass + + +@dataclass +class DNSRecord: + """Represents a DNS record""" + record_id: str + hostname: str + ip: str + record_type: str + + +class DNSManager: + """Python wrapper for Cloudflare DNS operations""" + + def __init__(self, api_token: str, zone_id: str): + """ + Initialize DNS manager + + Args: + api_token: Cloudflare API token + zone_id: Cloudflare zone ID + """ + self._api_token = api_token + self._zone_id = zone_id + self._base_url = f"https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records" + self._headers = { + "Authorization": f"Bearer {api_token}", + "Content-Type": "application/json" + } + self._logger = logging.getLogger(f"{__name__}.DNSManager") + + def check_record_exists(self, hostname: str) -> bool: + """ + Check if DNS record exists using Cloudflare API + + Args: + hostname: Fully qualified domain name + + Returns: + True if record exists, False otherwise + + Raises: + DNSError: If API call fails + """ + self._logger.debug(f"Checking if DNS record exists: {hostname}") + + try: + params = {"name": hostname} + response = requests.get( + self._base_url, + headers=self._headers, + params=params, + timeout=30 + ) + response.raise_for_status() + + data = response.json() + + if not data.get("success", False): + errors = data.get("errors", []) + raise DNSError(f"Cloudflare API error: {errors}") + + records = data.get("result", []) + exists = len(records) > 0 + + if exists: + self._logger.debug(f"DNS record exists: {hostname}") + else: + self._logger.debug(f"DNS record does not exist: {hostname}") + + return exists + + except requests.RequestException as e: + raise DNSError(f"Failed to check DNS record existence: {e}") from e + + def add_record( + self, + hostname: str, + ip: str, + dry_run: bool = False + ) -> DNSRecord: + """ + Add DNS A record + + Args: + hostname: Fully qualified domain name + ip: IP address for A record + dry_run: If True, only log what would be done + + Returns: + DNSRecord with record_id for rollback + + Raises: + DNSError: If API call fails + """ + if dry_run: + self._logger.info( + f"[DRY-RUN] Would add DNS record: {hostname} -> {ip}" + ) + return DNSRecord( + record_id="dry-run-id", + hostname=hostname, + ip=ip, + record_type="A" + ) + + self._logger.info(f"Adding DNS record: {hostname} -> {ip}") + + try: + payload = { + "type": "A", + "name": hostname, + "content": ip, + "ttl": 1, # Automatic TTL + "proxied": False # DNS only, not proxied through Cloudflare + } + + response = requests.post( + self._base_url, + headers=self._headers, + json=payload, + timeout=30 + ) + response.raise_for_status() + + data = response.json() + + if not data.get("success", False): + errors = data.get("errors", []) + raise DNSError(f"Cloudflare API error: {errors}") + + result = data.get("result", {}) + record_id = result.get("id") + + if not record_id: + raise DNSError("No record ID returned from Cloudflare API") + + self._logger.info(f"DNS record added successfully: {record_id}") + + return DNSRecord( + record_id=record_id, + hostname=hostname, + ip=ip, + record_type="A" + ) + + except requests.RequestException as e: + raise DNSError(f"Failed to add DNS record: {e}") from e + + def remove_record(self, hostname: str, dry_run: bool = False) -> None: + """ + Remove DNS record by hostname + + Args: + hostname: Fully qualified domain name + dry_run: If True, only log what would be done + + Raises: + DNSError: If API call fails + """ + if dry_run: + self._logger.info(f"[DRY-RUN] Would remove DNS record: {hostname}") + return + + self._logger.info(f"Removing DNS record: {hostname}") + + try: + # First, get the record ID + params = {"name": hostname} + response = requests.get( + self._base_url, + headers=self._headers, + params=params, + timeout=30 + ) + response.raise_for_status() + + data = response.json() + + if not data.get("success", False): + errors = data.get("errors", []) + raise DNSError(f"Cloudflare API error: {errors}") + + records = data.get("result", []) + + if not records: + self._logger.warning(f"No DNS record found for: {hostname}") + return + + # Remove all matching records (typically just one) + for record in records: + record_id = record.get("id") + if record_id: + self.remove_record_by_id(record_id, dry_run=False) + + except requests.RequestException as e: + raise DNSError(f"Failed to remove DNS record: {e}") from e + + def remove_record_by_id(self, record_id: str, dry_run: bool = False) -> None: + """ + Remove DNS record by ID (more reliable for rollback) + + Args: + record_id: Cloudflare DNS record ID + dry_run: If True, only log what would be done + + Raises: + DNSError: If API call fails + """ + if dry_run: + self._logger.info( + f"[DRY-RUN] Would remove DNS record by ID: {record_id}" + ) + return + + self._logger.info(f"Removing DNS record by ID: {record_id}") + + try: + url = f"{self._base_url}/{record_id}" + response = requests.delete( + url, + headers=self._headers, + timeout=30 + ) + + # Handle 404/405 gracefully - record doesn't exist or can't be deleted + if response.status_code in [404, 405]: + self._logger.warning( + f"DNS record {record_id} not found or cannot be deleted (may already be removed)" + ) + return + + response.raise_for_status() + + data = response.json() + + if not data.get("success", False): + errors = data.get("errors", []) + raise DNSError(f"Cloudflare API error: {errors}") + + self._logger.info(f"DNS record removed successfully: {record_id}") + + except requests.RequestException as e: + raise DNSError(f"Failed to remove DNS record: {e}") from e + + def get_public_ip(self) -> str: + """ + Get public IP address from external service + + Returns: + Public IP address as string + + Raises: + DNSError: If IP retrieval fails + """ + self._logger.debug("Retrieving public IP address") + + try: + response = requests.get("https://ipv4.icanhazip.com", timeout=10) + response.raise_for_status() + ip = response.text.strip() + + self._logger.debug(f"Public IP: {ip}") + return ip + + except requests.RequestException as e: + raise DNSError(f"Failed to retrieve public IP: {e}") from e diff --git a/gitea/gitea_deployer/docker_manager.py b/gitea/gitea_deployer/docker_manager.py new file mode 100644 index 0000000..27a3a30 --- /dev/null +++ b/gitea/gitea_deployer/docker_manager.py @@ -0,0 +1,276 @@ +""" +Docker management module + +Wrapper for Docker Compose operations with validation and error handling +""" + +import logging +import subprocess +from dataclasses import dataclass +from pathlib import Path +from typing import List + + +logger = logging.getLogger(__name__) + + +class DockerError(Exception): + """Raised when Docker operations fail""" + pass + + +@dataclass +class ContainerInfo: + """Information about a running container""" + container_id: str + name: str + status: str + + +class DockerManager: + """Docker Compose operations wrapper""" + + def __init__(self, compose_file: Path, env_file: Path): + """ + Initialize Docker manager + + Args: + compose_file: Path to docker-compose.yml + env_file: Path to .env file + """ + self._compose_file = compose_file + self._env_file = env_file + self._logger = logging.getLogger(f"{__name__}.DockerManager") + + def _run_command( + self, + cmd: List[str], + check: bool = True, + capture_output: bool = True + ) -> subprocess.CompletedProcess: + """ + Run docker compose command + + Args: + cmd: Command list to execute + check: Whether to raise on non-zero exit + capture_output: Whether to capture stdout/stderr + + Returns: + CompletedProcess instance + + Raises: + DockerError: If command fails and check=True + """ + self._logger.debug(f"Running: {' '.join(cmd)}") + + try: + result = subprocess.run( + cmd, + check=check, + capture_output=capture_output, + text=True, + cwd=self._compose_file.parent + ) + return result + + except subprocess.CalledProcessError as e: + error_msg = f"Docker command failed: {e.stderr or e.stdout or str(e)}" + self._logger.error(error_msg) + raise DockerError(error_msg) from e + except FileNotFoundError as e: + raise DockerError( + f"Docker command not found. Is Docker installed? {e}" + ) from e + + def validate_compose_file(self) -> None: + """ + Validate docker-compose.yml syntax + + Raises: + DockerError: If compose file is invalid + """ + self._logger.debug("Validating docker-compose.yml") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "config", "--quiet" + ] + + try: + self._run_command(cmd) + self._logger.debug("docker-compose.yml is valid") + + except DockerError as e: + raise DockerError(f"Invalid docker-compose.yml: {e}") from e + + def pull_images(self, dry_run: bool = False) -> None: + """ + Pull required Docker images + + Args: + dry_run: If True, only log what would be done + + Raises: + DockerError: If pull fails + """ + if dry_run: + self._logger.info("[DRY-RUN] Would pull Docker images") + return + + self._logger.info("Pulling Docker images") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "pull" + ] + + self._run_command(cmd) + self._logger.info("Docker images pulled successfully") + + def start_services(self, dry_run: bool = False) -> List[ContainerInfo]: + """ + Start Docker Compose services + + Args: + dry_run: If True, only log what would be done + + Returns: + List of created containers for rollback + + Raises: + DockerError: If start fails + """ + if dry_run: + self._logger.info("[DRY-RUN] Would start Docker services") + return [] + + self._logger.info("Starting Docker services") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "up", "-d" + ] + + self._run_command(cmd) + + # Get container info for rollback + containers = self.get_container_status() + + self._logger.info( + f"Docker services started successfully: {len(containers)} containers" + ) + + return containers + + def stop_services(self, dry_run: bool = False) -> None: + """ + Stop Docker Compose services + + Args: + dry_run: If True, only log what would be done + + Raises: + DockerError: If stop fails + """ + if dry_run: + self._logger.info("[DRY-RUN] Would stop Docker services") + return + + self._logger.info("Stopping Docker services") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "down" + ] + + self._run_command(cmd) + self._logger.info("Docker services stopped successfully") + + def stop_services_and_remove_volumes(self, dry_run: bool = False) -> None: + """ + Stop services and remove volumes (full cleanup) + + Args: + dry_run: If True, only log what would be done + + Raises: + DockerError: If stop fails + """ + if dry_run: + self._logger.info("[DRY-RUN] Would stop Docker services and remove volumes") + return + + self._logger.info("Stopping Docker services and removing volumes") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "down", "-v" + ] + + self._run_command(cmd) + self._logger.info("Docker services stopped and volumes removed") + + def get_container_status(self) -> List[ContainerInfo]: + """ + Get status of containers for this project + + Returns: + List of ContainerInfo objects + + Raises: + DockerError: If status check fails + """ + self._logger.debug("Getting container status") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "ps", "-q" + ] + + result = self._run_command(cmd) + + container_ids = [ + cid.strip() + for cid in result.stdout.strip().split('\n') + if cid.strip() + ] + + containers = [] + for container_id in container_ids: + # Get container details + inspect_cmd = ["docker", "inspect", container_id, "--format", "{{.Name}}:{{.State.Status}}"] + try: + inspect_result = self._run_command(inspect_cmd) + name_status = inspect_result.stdout.strip() + if ':' in name_status: + name, status = name_status.split(':', 1) + # Remove leading slash from container name + name = name.lstrip('/') + containers.append(ContainerInfo( + container_id=container_id, + name=name, + status=status + )) + except DockerError: + # If inspect fails, just record the ID + containers.append(ContainerInfo( + container_id=container_id, + name="unknown", + status="unknown" + )) + + self._logger.debug(f"Found {len(containers)} containers") + return containers diff --git a/gitea/gitea_deployer/env_generator.py b/gitea/gitea_deployer/env_generator.py new file mode 100644 index 0000000..846e025 --- /dev/null +++ b/gitea/gitea_deployer/env_generator.py @@ -0,0 +1,390 @@ +""" +Environment generation module - replaces generate-env.sh + +Provides pure Python implementations for: +- Random word selection from dictionary +- Memorable password generation +- Environment file generation and manipulation +""" + +import logging +import os +import random +import re +import secrets +import shutil +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Dict, List, Optional + +logger = logging.getLogger(__name__) + + +@dataclass +class EnvValues: + """Container for generated environment values""" + subdomain: str + domain: str + url: str + db_name: str + db_user: str + db_password: str + compose_project_name: str + + +class WordGenerator: + """Pure Python implementation of dictionary word selection""" + + def __init__(self, dict_file: Path): + """ + Initialize word generator + + Args: + dict_file: Path to dictionary file (e.g., /usr/share/dict/words) + """ + self._dict_file = dict_file + self._words_cache: Optional[List[str]] = None + self._logger = logging.getLogger(f"{__name__}.WordGenerator") + + def _load_and_filter_words(self) -> List[str]: + """ + Load dictionary and filter to 4-10 char lowercase words + + Returns: + List of filtered words + + Raises: + FileNotFoundError: If dictionary file doesn't exist + ValueError: If no valid words found + """ + if not self._dict_file.exists(): + raise FileNotFoundError(f"Dictionary file not found: {self._dict_file}") + + self._logger.debug(f"Loading words from {self._dict_file}") + + # Read and filter words matching pattern: ^[a-z]{4,10}$ + pattern = re.compile(r'^[a-z]{4,10}$') + words = [] + + with open(self._dict_file, 'r', encoding='utf-8') as f: + for line in f: + word = line.strip() + if pattern.match(word): + words.append(word) + + if not words: + raise ValueError(f"No valid words found in {self._dict_file}") + + self._logger.debug(f"Loaded {len(words)} valid words") + return words + + def get_random_word(self) -> str: + """ + Get single random word from filtered list + + Returns: + Random word (4-10 chars, lowercase) + """ + # Load and cache words on first use + if self._words_cache is None: + self._words_cache = self._load_and_filter_words() + + return random.choice(self._words_cache) + + def get_random_words(self, count: int) -> List[str]: + """ + Get multiple random words efficiently + + Args: + count: Number of words to retrieve + + Returns: + List of random words + """ + # Load and cache words on first use + if self._words_cache is None: + self._words_cache = self._load_and_filter_words() + + return random.choices(self._words_cache, k=count) + + +class PasswordGenerator: + """Generate memorable passwords from dictionary words""" + + def __init__(self, word_generator: WordGenerator): + """ + Initialize password generator + + Args: + word_generator: WordGenerator instance for word selection + """ + self._word_generator = word_generator + self._logger = logging.getLogger(f"{__name__}.PasswordGenerator") + + def generate_memorable_password(self, word_count: int = 3) -> str: + """ + Generate password from N random nouns joined by hyphens + + Args: + word_count: Number of words to use (default: 3) + + Returns: + Password string like "templon-infantly-yielding" + """ + words = self._word_generator.get_random_words(word_count) + password = '-'.join(words) + self._logger.debug(f"Generated {word_count}-word password") + return password + + def generate_random_string(self, length: int = 8) -> str: + """ + Generate alphanumeric random string using secrets module + + Args: + length: Length of string to generate (default: 8) + + Returns: + Random alphanumeric string + """ + # Use secrets for cryptographically secure random generation + # Generate hex and convert to lowercase alphanumeric + return secrets.token_hex(length // 2 + 1)[:length] + + +class EnvFileGenerator: + """Pure Python .env file manipulation (replaces bash sed logic)""" + + def __init__( + self, + env_file: Path, + word_generator: WordGenerator, + password_generator: PasswordGenerator, + base_domain: str = "merakit.my", + app_name: Optional[str] = None + ): + """ + Initialize environment file generator + + Args: + env_file: Path to .env file + word_generator: WordGenerator instance + password_generator: PasswordGenerator instance + base_domain: Base domain for URL generation (default: "merakit.my") + app_name: Application name (default: read from .env or "gitea") + """ + self._env_file = env_file + self._word_generator = word_generator + self._password_generator = password_generator + self._base_domain = base_domain + self._app_name = app_name + self._logger = logging.getLogger(f"{__name__}.EnvFileGenerator") + + def generate_values(self) -> EnvValues: + """ + Generate all environment values + + Returns: + EnvValues dataclass with all generated values + """ + self._logger.info("Generating environment values") + + # Read current .env to get app_name if not provided + current_env = self.read_current_env() + app_name = self._app_name or current_env.get('APP_NAME', 'gitea') + + # 1. Generate subdomain: two random words + word1 = self._word_generator.get_random_word() + word2 = self._word_generator.get_random_word() + subdomain = f"{word1}-{word2}" + + # 2. Construct URL + url = f"{subdomain}.{self._base_domain}" + + # 3. Generate random string for DB identifiers + random_str = self._password_generator.generate_random_string(8) + + # 4. Generate DB identifiers with truncation logic + db_name = self._generate_db_name(random_str, app_name, subdomain) + db_user = self._generate_db_user(random_str, app_name, subdomain) + + # 5. Generate password + db_password = self._password_generator.generate_memorable_password(3) + + self._logger.info(f"Generated values for subdomain: {subdomain}") + self._logger.debug(f"URL: {url}") + self._logger.debug(f"DB_NAME: {db_name}") + self._logger.debug(f"DB_USER: {db_user}") + + return EnvValues( + subdomain=subdomain, + domain=self._base_domain, + url=url, + db_name=db_name, + db_user=db_user, + db_password=db_password, + compose_project_name=subdomain + ) + + def _generate_db_name(self, random_str: str, app_name: str, subdomain: str) -> str: + """ + Format: angali_{random8}_{app}_{subdomain}, truncate to 64 chars + + Args: + random_str: Random 8-char string + app_name: Application name + subdomain: Subdomain with hyphens + + Returns: + Database name (max 64 chars) + """ + # Replace hyphens with underscores for DB compatibility + subdomain_safe = subdomain.replace('-', '_') + db_name = f"angali_{random_str}_{app_name}_{subdomain_safe}" + + # Truncate to PostgreSQL limit of 63 chars (64 - 1 for null terminator) + return db_name[:63] + + def _generate_db_user(self, random_str: str, app_name: str, subdomain: str) -> str: + """ + Format: angali_{random8}_{app}_{subdomain}, truncate to 63 chars + + Args: + random_str: Random 8-char string + app_name: Application name + subdomain: Subdomain with hyphens + + Returns: + Database username (max 63 chars) + """ + # Replace hyphens with underscores for DB compatibility + subdomain_safe = subdomain.replace('-', '_') + db_user = f"angali_{random_str}_{app_name}_{subdomain_safe}" + + # Truncate to PostgreSQL limit of 63 chars + return db_user[:63] + + def read_current_env(self) -> Dict[str, str]: + """ + Parse existing .env file into dict + + Returns: + Dictionary of environment variables + """ + env_dict = {} + + if not self._env_file.exists(): + self._logger.warning(f"Env file not found: {self._env_file}") + return env_dict + + with open(self._env_file, 'r') as f: + for line in f: + line = line.strip() + # Skip empty lines and comments + if not line or line.startswith('#'): + continue + + # Parse KEY=VALUE format + if '=' in line: + key, value = line.split('=', 1) + # Remove quotes if present + value = value.strip('"').strip("'") + env_dict[key.strip()] = value + + self._logger.debug(f"Read {len(env_dict)} variables from {self._env_file}") + return env_dict + + def backup_env_file(self) -> Path: + """ + Create timestamped backup of .env file + + Returns: + Path to backup file + + Raises: + FileNotFoundError: If .env file doesn't exist + """ + if not self._env_file.exists(): + raise FileNotFoundError(f"Cannot backup non-existent file: {self._env_file}") + + # Create backup with timestamp + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + backup_path = self._env_file.parent / f"{self._env_file.name}.backup.{timestamp}" + + shutil.copy2(self._env_file, backup_path) + self._logger.info(f"Created backup: {backup_path}") + + return backup_path + + def update_env_file(self, values: EnvValues, dry_run: bool = False) -> None: + """ + Update .env file with new values (Python dict manipulation) + + Uses atomic write pattern: write to temp file, then rename + + Args: + values: EnvValues to write + dry_run: If True, only log what would be done + + Raises: + FileNotFoundError: If .env file doesn't exist + """ + if not self._env_file.exists(): + raise FileNotFoundError(f"Env file not found: {self._env_file}") + + if dry_run: + self._logger.info(f"[DRY-RUN] Would update {self._env_file} with:") + for key, value in asdict(values).items(): + if 'password' in key.lower(): + self._logger.info(f" {key.upper()}=********") + else: + self._logger.info(f" {key.upper()}={value}") + return + + # Read current env + current_env = self.read_current_env() + + # Update with new values + current_env.update({ + 'COMPOSE_PROJECT_NAME': values.compose_project_name, + 'SUBDOMAIN': values.subdomain, + 'DOMAIN': values.domain, + 'URL': values.url, + 'DB_NAME': values.db_name, + 'DB_USER': values.db_user, + 'DB_PASSWORD': values.db_password + }) + + # Write atomically: write to temp file, then rename + temp_file = self._env_file.parent / f"{self._env_file.name}.tmp" + + try: + with open(temp_file, 'w') as f: + for key, value in current_env.items(): + f.write(f"{key}={value}\n") + + # Atomic rename + os.replace(temp_file, self._env_file) + self._logger.info(f"Updated {self._env_file} successfully") + + except Exception as e: + # Cleanup temp file on error + if temp_file.exists(): + temp_file.unlink() + raise RuntimeError(f"Failed to update env file: {e}") from e + + def restore_env_file(self, backup_path: Path) -> None: + """ + Restore .env from backup + + Args: + backup_path: Path to backup file + + Raises: + FileNotFoundError: If backup file doesn't exist + """ + if not backup_path.exists(): + raise FileNotFoundError(f"Backup file not found: {backup_path}") + + shutil.copy2(backup_path, self._env_file) + self._logger.info(f"Restored {self._env_file} from {backup_path}") diff --git a/gitea/gitea_deployer/health.py b/gitea/gitea_deployer/health.py new file mode 100644 index 0000000..7b4ad68 --- /dev/null +++ b/gitea/gitea_deployer/health.py @@ -0,0 +1,128 @@ +""" +Health check module + +HTTP health checking with retry logic and progress indicators +""" + +import logging +import time + +import requests + + +logger = logging.getLogger(__name__) + + +class HealthCheckError(Exception): + """Raised when health check fails""" + pass + + +class HealthChecker: + """HTTP health check with retry logic""" + + def __init__( + self, + timeout: int, + interval: int, + verify_ssl: bool + ): + """ + Initialize health checker + + Args: + timeout: Total timeout in seconds + interval: Check interval in seconds + verify_ssl: Whether to verify SSL certificates + """ + self._timeout = timeout + self._interval = interval + self._verify_ssl = verify_ssl + self._logger = logging.getLogger(f"{__name__}.HealthChecker") + + def check_health(self, url: str, dry_run: bool = False) -> bool: + """ + Perform health check with retries + + Args: + url: URL to check (e.g., https://example.com) + dry_run: If True, only log what would be done + + Returns: + True if health check passed, False otherwise + """ + if dry_run: + self._logger.info(f"[DRY-RUN] Would check health of {url}") + return True + + self._logger.info( + f"Checking health of {url} for up to {self._timeout} seconds" + ) + + start_time = time.time() + attempt = 0 + + while True: + attempt += 1 + elapsed = time.time() - start_time + + if elapsed > self._timeout: + self._logger.error( + f"Health check timed out after {elapsed:.1f} seconds " + f"({attempt} attempts)" + ) + return False + + # Perform single check + if self._single_check(url): + self._logger.info( + f"Health check passed after {elapsed:.1f} seconds " + f"({attempt} attempts)" + ) + return True + + # Wait before next attempt + remaining = self._timeout - elapsed + if remaining > 0: + wait_time = min(self._interval, remaining) + self._logger.debug( + f"Attempt {attempt} failed, retrying in {wait_time:.1f}s " + f"(elapsed: {elapsed:.1f}s, timeout: {self._timeout}s)" + ) + time.sleep(wait_time) + else: + # No time remaining + self._logger.error(f"Health check timed out after {attempt} attempts") + return False + + def _single_check(self, url: str) -> bool: + """ + Single health check attempt + + Args: + url: URL to check + + Returns: + True if valid HTTP response (2xx or 3xx) received, False otherwise + """ + try: + response = requests.get( + url, + timeout=5, + verify=self._verify_ssl, + allow_redirects=True + ) + + # Accept any 2xx or 3xx status code as valid + if 200 <= response.status_code < 400: + self._logger.debug(f"Health check successful: HTTP {response.status_code}") + return True + else: + self._logger.debug( + f"Health check failed: HTTP {response.status_code}" + ) + return False + + except requests.RequestException as e: + self._logger.debug(f"Health check failed: {type(e).__name__}: {e}") + return False diff --git a/gitea/gitea_deployer/orchestrator.py b/gitea/gitea_deployer/orchestrator.py new file mode 100644 index 0000000..2276cca --- /dev/null +++ b/gitea/gitea_deployer/orchestrator.py @@ -0,0 +1,626 @@ +""" +Deployment orchestration module + +Main deployment workflow with rollback tracking and execution +""" + +import logging +import shutil +import time +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +from .config import DeploymentConfig +from .deployment_config_manager import DeploymentConfigManager, DeploymentMetadata +from .deployment_logger import DeploymentFileLogger +from .dns_manager import DNSError, DNSManager, DNSRecord +from .docker_manager import DockerError, DockerManager +from .env_generator import EnvFileGenerator, EnvValues, PasswordGenerator, WordGenerator +from .health import HealthCheckError, HealthChecker +from .webhooks import WebhookNotifier + + +logger = logging.getLogger(__name__) + + +class DeploymentError(Exception): + """Base exception for deployment errors""" + pass + + +class ValidationError(DeploymentError): + """Validation failed""" + pass + + +@dataclass +class DeploymentAction: + """Represents a single deployment action""" + action_type: str # 'dns_added', 'containers_started', 'env_updated' + timestamp: datetime + details: Dict[str, Any] + rollback_data: Dict[str, Any] + + +class DeploymentTracker: + """Track deployment actions for rollback""" + + def __init__(self): + """Initialize deployment tracker""" + self._actions: List[DeploymentAction] = [] + self._logger = logging.getLogger(f"{__name__}.DeploymentTracker") + + def record_action(self, action: DeploymentAction) -> None: + """ + Record a deployment action + + Args: + action: DeploymentAction to record + """ + self._actions.append(action) + self._logger.debug(f"Recorded action: {action.action_type}") + + def get_actions(self) -> List[DeploymentAction]: + """ + Get all recorded actions + + Returns: + List of DeploymentAction objects + """ + return self._actions.copy() + + def clear(self) -> None: + """Clear tracking history""" + self._actions.clear() + self._logger.debug("Cleared action history") + + +class DeploymentOrchestrator: + """Main orchestrator coordinating all deployment steps""" + + def __init__(self, config: DeploymentConfig): + """ + Initialize deployment orchestrator + + Args: + config: DeploymentConfig instance + """ + self._config = config + self._logger = logging.getLogger(f"{__name__}.DeploymentOrchestrator") + + # Initialize components + self._word_generator = WordGenerator(config.dict_file) + self._password_generator = PasswordGenerator(self._word_generator) + self._env_generator = EnvFileGenerator( + config.env_file, + self._word_generator, + self._password_generator, + config.base_domain, + config.app_name + ) + self._dns_manager = DNSManager( + config.cloudflare_api_token, + config.cloudflare_zone_id + ) + self._docker_manager = DockerManager( + config.docker_compose_file, + config.env_file + ) + self._webhook_notifier = WebhookNotifier( + config.webhook_url, + config.webhook_timeout, + config.webhook_retries + ) + self._health_checker = HealthChecker( + config.healthcheck_timeout, + config.healthcheck_interval, + config.verify_ssl + ) + self._tracker = DeploymentTracker() + self._deployment_logger = DeploymentFileLogger() + self._config_manager = DeploymentConfigManager() + + def deploy(self) -> None: + """ + Main deployment workflow + + Raises: + DeploymentError: If deployment fails + """ + start_time = time.time() + env_values = None + dns_record_id = None + dns_ip = None + containers = [] + + try: + # Phase 1: Validation + self._phase_validate() + + # Phase 2: Environment Generation (with retry on DNS conflicts) + env_values = self._phase_generate_env_with_retries() + + # Send deployment_started webhook + self._webhook_notifier.deployment_started( + env_values.subdomain, + env_values.url + ) + + # Phase 3: DNS Setup + dns_record_id, dns_ip = self._phase_setup_dns(env_values) + + # Phase 4: Container Deployment + containers = self._phase_deploy_containers() + + # Phase 5: Health Check + self._phase_health_check(env_values.url) + + # Success + duration = time.time() - start_time + self._webhook_notifier.deployment_success( + env_values.subdomain, + env_values.url, + duration + ) + self._logger.info( + f"✓ Deployment successful! URL: https://{env_values.url} " + f"(took {duration:.1f}s)" + ) + + # Log success to file + self._deployment_logger.log_success( + env_values.url, + env_values.subdomain, + duration + ) + + # Save deployment configuration + self._save_deployment_config( + env_values, + dns_record_id, + dns_ip, + containers + ) + + except Exception as e: + self._logger.error(f"✗ Deployment failed: {e}") + + # Send failure webhook + if env_values: + self._webhook_notifier.deployment_failed( + env_values.subdomain, + str(e), + env_values.url + ) + else: + self._webhook_notifier.deployment_failed("", str(e), "") + + # Log failure to file + if env_values: + self._deployment_logger.log_failure( + env_values.url, + env_values.subdomain, + str(e) + ) + else: + self._deployment_logger.log_failure( + "", + "", + str(e) + ) + + # Rollback + self._logger.info("Starting rollback...") + self._rollback_all() + + raise DeploymentError(f"Deployment failed: {e}") from e + + def _phase_validate(self) -> None: + """ + Phase 1: Pre-deployment validation + + Raises: + ValidationError: If validation fails + """ + self._logger.info("═══ Phase 1: Validation ═══") + + # Check system dependencies + self._validate_dependencies() + + # Validate environment file + if not self._config.env_file.exists(): + raise ValidationError(f"Env file not found: {self._config.env_file}") + + # Validate Docker Compose file + try: + self._docker_manager.validate_compose_file() + except DockerError as e: + raise ValidationError(f"Invalid docker-compose.yml: {e}") from e + + # Check external Docker network exists + self._validate_docker_network("proxy") + + self._logger.info("✓ Validation complete") + + def _validate_dependencies(self) -> None: + """ + Validate system dependencies + + Raises: + ValidationError: If dependencies are missing + """ + import shutil as sh + + required_commands = ["docker", "curl"] + + for cmd in required_commands: + if not sh.which(cmd): + raise ValidationError( + f"Required command not found: {cmd}. " + f"Please install {cmd} and try again." + ) + + # Check Docker daemon is running + try: + import subprocess + result = subprocess.run( + ["docker", "info"], + capture_output=True, + timeout=5 + ) + if result.returncode != 0: + raise ValidationError( + "Docker daemon is not running. Please start Docker." + ) + except (subprocess.TimeoutExpired, FileNotFoundError) as e: + raise ValidationError(f"Failed to check Docker daemon: {e}") from e + + def _validate_docker_network(self, network_name: str) -> None: + """ + Check external Docker network exists + + Args: + network_name: Network name to check + + Raises: + ValidationError: If network doesn't exist + """ + import subprocess + + try: + result = subprocess.run( + ["docker", "network", "inspect", network_name], + capture_output=True, + timeout=5 + ) + if result.returncode != 0: + raise ValidationError( + f"Docker network '{network_name}' not found. " + f"Please create it with: docker network create {network_name}" + ) + except (subprocess.TimeoutExpired, FileNotFoundError) as e: + raise ValidationError( + f"Failed to check Docker network: {e}" + ) from e + + def _phase_generate_env_with_retries(self) -> EnvValues: + """ + Phase 2: Generate environment with DNS conflict retry + + Returns: + EnvValues with generated values + + Raises: + DeploymentError: If unable to generate unique subdomain + """ + self._logger.info("═══ Phase 2: Environment Generation ═══") + + for attempt in range(1, self._config.max_retries + 1): + # Generate new values + env_values = self._env_generator.generate_values() + + self._logger.info(f"Generated subdomain: {env_values.subdomain}") + + # Check DNS conflict + try: + if not self._dns_manager.check_record_exists(env_values.url): + # No conflict, proceed + self._logger.info(f"✓ Subdomain available: {env_values.subdomain}") + + # Create backup + backup_path = self._env_generator.backup_env_file() + + # Update .env file + self._env_generator.update_env_file( + env_values, + dry_run=self._config.dry_run + ) + + # Track for rollback + self._tracker.record_action(DeploymentAction( + action_type="env_updated", + timestamp=datetime.now(), + details={"env_values": asdict(env_values)}, + rollback_data={"backup_path": str(backup_path)} + )) + + return env_values + + else: + self._logger.warning( + f"✗ DNS conflict for {env_values.url}, " + f"regenerating... (attempt {attempt}/{self._config.max_retries})" + ) + + except DNSError as e: + self._logger.warning( + f"DNS check failed: {e}. " + f"Assuming no conflict and proceeding..." + ) + # If DNS check fails, proceed anyway (fail open) + backup_path = self._env_generator.backup_env_file() + self._env_generator.update_env_file( + env_values, + dry_run=self._config.dry_run + ) + self._tracker.record_action(DeploymentAction( + action_type="env_updated", + timestamp=datetime.now(), + details={"env_values": asdict(env_values)}, + rollback_data={"backup_path": str(backup_path)} + )) + return env_values + + raise DeploymentError( + f"Failed to generate unique subdomain after {self._config.max_retries} attempts" + ) + + def _phase_setup_dns(self, env_values: EnvValues) -> tuple: + """ + Phase 3: Add DNS record + + Args: + env_values: EnvValues with subdomain and URL + + Returns: + Tuple of (record_id, ip) + + Raises: + DNSError: If DNS setup fails + """ + self._logger.info("═══ Phase 3: DNS Setup ═══") + + # Get public IP + ip = self._dns_manager.get_public_ip() + self._logger.info(f"Public IP: {ip}") + + # Add DNS record + dns_record = self._dns_manager.add_record( + env_values.url, + ip, + dry_run=self._config.dry_run + ) + + self._logger.info(f"✓ DNS record added: {env_values.url} -> {ip}") + + # Track for rollback + self._tracker.record_action(DeploymentAction( + action_type="dns_added", + timestamp=datetime.now(), + details={"hostname": env_values.url, "ip": ip}, + rollback_data={"record_id": dns_record.record_id} + )) + + # Send webhook notification + self._webhook_notifier.dns_added(env_values.url, ip) + + return dns_record.record_id, ip + + def _phase_deploy_containers(self) -> List: + """ + Phase 4: Start Docker containers + + Returns: + List of container information + + Raises: + DockerError: If container deployment fails + """ + self._logger.info("═══ Phase 4: Container Deployment ═══") + + # Pull images + self._logger.info("Pulling Docker images...") + self._docker_manager.pull_images(dry_run=self._config.dry_run) + + # Start services + self._logger.info("Starting Docker services...") + containers = self._docker_manager.start_services( + dry_run=self._config.dry_run + ) + + self._logger.info( + f"✓ Docker services started: {len(containers)} containers" + ) + + # Track for rollback + self._tracker.record_action(DeploymentAction( + action_type="containers_started", + timestamp=datetime.now(), + details={"containers": [asdict(c) for c in containers]}, + rollback_data={} + )) + + return containers + + def _phase_health_check(self, url: str) -> None: + """ + Phase 5: Health check + + Args: + url: URL to check (without https://) + + Raises: + HealthCheckError: If health check fails + """ + self._logger.info("═══ Phase 5: Health Check ═══") + + health_url = f"https://{url}" + start_time = time.time() + + if not self._health_checker.check_health( + health_url, + dry_run=self._config.dry_run + ): + raise HealthCheckError(f"Health check failed for {health_url}") + + duration = time.time() - start_time + self._logger.info(f"✓ Health check passed (took {duration:.1f}s)") + + # Send webhook notification + self._webhook_notifier.health_check_passed(url, duration) + + def _rollback_all(self) -> None: + """Rollback all tracked actions in reverse order""" + actions = list(reversed(self._tracker.get_actions())) + + if not actions: + self._logger.info("No actions to rollback") + return + + self._logger.info(f"Rolling back {len(actions)} actions...") + + for action in actions: + try: + self._rollback_action(action) + except Exception as e: + # Log but don't fail rollback + self._logger.error( + f"Failed to rollback action {action.action_type}: {e}" + ) + + self._logger.info("Rollback complete") + + def _rollback_action(self, action: DeploymentAction) -> None: + """ + Rollback single action based on type + + Args: + action: DeploymentAction to rollback + """ + if action.action_type == "dns_added": + self._rollback_dns(action) + elif action.action_type == "containers_started": + self._rollback_containers(action) + elif action.action_type == "env_updated": + self._rollback_env(action) + else: + self._logger.warning(f"Unknown action type: {action.action_type}") + + def _rollback_dns(self, action: DeploymentAction) -> None: + """ + Rollback DNS changes + + Args: + action: DeploymentAction with DNS details + """ + record_id = action.rollback_data.get("record_id") + if record_id: + self._logger.info(f"Rolling back DNS record: {record_id}") + try: + self._dns_manager.remove_record_by_id( + record_id, + dry_run=self._config.dry_run + ) + self._logger.info("✓ DNS record removed") + except DNSError as e: + self._logger.error(f"Failed to remove DNS record: {e}") + + def _rollback_containers(self, action: DeploymentAction) -> None: + """ + Stop and remove containers + + Args: + action: DeploymentAction with container details + """ + self._logger.info("Rolling back Docker containers") + try: + self._docker_manager.stop_services(dry_run=self._config.dry_run) + self._logger.info("✓ Docker services stopped") + except DockerError as e: + self._logger.error(f"Failed to stop Docker services: {e}") + + def _rollback_env(self, action: DeploymentAction) -> None: + """ + Restore .env file from backup + + Args: + action: DeploymentAction with backup path + """ + backup_path_str = action.rollback_data.get("backup_path") + if backup_path_str: + backup_path = Path(backup_path_str) + if backup_path.exists(): + self._logger.info(f"Rolling back .env file from {backup_path}") + try: + self._env_generator.restore_env_file(backup_path) + self._logger.info("✓ .env file restored") + except Exception as e: + self._logger.error(f"Failed to restore .env file: {e}") + else: + self._logger.warning(f"Backup file not found: {backup_path}") + + def _save_deployment_config( + self, + env_values: EnvValues, + dns_record_id: str, + dns_ip: str, + containers: List + ) -> None: + """ + Save deployment configuration for later cleanup + + Args: + env_values: EnvValues with deployment info + dns_record_id: Cloudflare DNS record ID + dns_ip: IP address used in DNS + containers: List of container information + """ + try: + # Extract container names, volumes, and networks + container_names = [c.name for c in containers if hasattr(c, 'name')] + + # Get volumes and networks from docker-compose + volumes = [ + f"{env_values.compose_project_name}_db_data", + f"{env_values.compose_project_name}_gitea_data" + ] + + networks = [ + f"{env_values.compose_project_name}_internal" + ] + + # Create metadata + metadata = DeploymentMetadata( + subdomain=env_values.subdomain, + url=env_values.url, + domain=env_values.domain, + compose_project_name=env_values.compose_project_name, + db_name=env_values.db_name, + db_user=env_values.db_user, + deployment_timestamp=datetime.now().isoformat(), + dns_record_id=dns_record_id, + dns_ip=dns_ip, + containers=container_names, + volumes=volumes, + networks=networks, + env_file_path=str(self._config.env_file.absolute()) + ) + + # Save configuration + config_path = self._config_manager.save_deployment(metadata) + self._logger.info(f"✓ Deployment config saved: {config_path}") + + except Exception as e: + self._logger.warning(f"Failed to save deployment config: {e}") diff --git a/gitea/gitea_deployer/webhooks.py b/gitea/gitea_deployer/webhooks.py new file mode 100644 index 0000000..3616c2e --- /dev/null +++ b/gitea/gitea_deployer/webhooks.py @@ -0,0 +1,199 @@ +""" +Webhook notifications module + +Send deployment event notifications with retry logic +""" + +import logging +import time +from dataclasses import asdict, dataclass +from datetime import datetime +from typing import Any, Dict, Optional + +import requests + + +logger = logging.getLogger(__name__) + + +@dataclass +class WebhookEvent: + """Webhook event data""" + event_type: str # deployment_started, deployment_success, etc. + timestamp: str + subdomain: str + url: str + message: str + metadata: Dict[str, Any] + + +class WebhookNotifier: + """Send webhook notifications with retry logic""" + + def __init__( + self, + webhook_url: Optional[str], + timeout: int, + max_retries: int + ): + """ + Initialize webhook notifier + + Args: + webhook_url: Webhook URL to send notifications to (None to disable) + timeout: Request timeout in seconds + max_retries: Maximum number of retry attempts + """ + self._webhook_url = webhook_url + self._timeout = timeout + self._max_retries = max_retries + self._logger = logging.getLogger(f"{__name__}.WebhookNotifier") + + if not webhook_url: + self._logger.debug("Webhook notifications disabled (no URL configured)") + + def notify(self, event: WebhookEvent) -> None: + """ + Send webhook notification with retry + + Args: + event: WebhookEvent to send + + Note: + Failures are logged but don't raise exceptions to avoid + failing deployments due to webhook issues + """ + if not self._webhook_url: + return + + payload = asdict(event) + + self._logger.debug(f"Sending webhook: {event.event_type}") + + for attempt in range(1, self._max_retries + 1): + try: + response = requests.post( + self._webhook_url, + json=payload, + timeout=self._timeout + ) + response.raise_for_status() + + self._logger.debug( + f"Webhook sent successfully: {event.event_type} " + f"(attempt {attempt})" + ) + return + + except requests.RequestException as e: + self._logger.warning( + f"Webhook delivery failed (attempt {attempt}/{self._max_retries}): {e}" + ) + + if attempt < self._max_retries: + # Exponential backoff: 1s, 2s, 4s, etc. + backoff = 2 ** (attempt - 1) + self._logger.debug(f"Retrying in {backoff}s...") + time.sleep(backoff) + + self._logger.error( + f"Failed to deliver webhook after {self._max_retries} attempts: " + f"{event.event_type}" + ) + + def deployment_started(self, subdomain: str, url: str) -> None: + """ + Convenience method for deployment_started event + + Args: + subdomain: Subdomain being deployed + url: Full URL being deployed + """ + event = WebhookEvent( + event_type="deployment_started", + timestamp=datetime.utcnow().isoformat() + "Z", + subdomain=subdomain, + url=url, + message=f"Deployment started for {url}", + metadata={} + ) + self.notify(event) + + def deployment_success( + self, + subdomain: str, + url: str, + duration: float + ) -> None: + """ + Convenience method for deployment_success event + + Args: + subdomain: Subdomain that was deployed + url: Full URL that was deployed + duration: Deployment duration in seconds + """ + event = WebhookEvent( + event_type="deployment_success", + timestamp=datetime.utcnow().isoformat() + "Z", + subdomain=subdomain, + url=url, + message=f"Deployment successful for {url}", + metadata={"duration": round(duration, 2)} + ) + self.notify(event) + + def deployment_failed(self, subdomain: str, error: str, url: str = "") -> None: + """ + Convenience method for deployment_failed event + + Args: + subdomain: Subdomain that failed to deploy + error: Error message + url: Full URL (may be empty if deployment failed early) + """ + event = WebhookEvent( + event_type="deployment_failed", + timestamp=datetime.utcnow().isoformat() + "Z", + subdomain=subdomain, + url=url, + message=f"Deployment failed: {error}", + metadata={"error": error} + ) + self.notify(event) + + def dns_added(self, hostname: str, ip: str) -> None: + """ + Convenience method for dns_added event + + Args: + hostname: Hostname that was added to DNS + ip: IP address the hostname points to + """ + event = WebhookEvent( + event_type="dns_added", + timestamp=datetime.utcnow().isoformat() + "Z", + subdomain=hostname.split('.')[0], # Extract subdomain + url=hostname, + message=f"DNS record added for {hostname}", + metadata={"ip": ip} + ) + self.notify(event) + + def health_check_passed(self, url: str, duration: float) -> None: + """ + Convenience method for health_check_passed event + + Args: + url: URL that passed health check + duration: Time taken for health check in seconds + """ + event = WebhookEvent( + event_type="health_check_passed", + timestamp=datetime.utcnow().isoformat() + "Z", + subdomain=url.split('.')[0].replace('https://', '').replace('http://', ''), + url=url, + message=f"Health check passed for {url}", + metadata={"duration": round(duration, 2)} + ) + self.notify(event) diff --git a/gitea/logs/success/success_artfully-copious.merakit.my_20251217_092143.txt b/gitea/logs/success/success_artfully-copious.merakit.my_20251217_092143.txt new file mode 100644 index 0000000..871ec54 --- /dev/null +++ b/gitea/logs/success/success_artfully-copious.merakit.my_20251217_092143.txt @@ -0,0 +1,14 @@ +╔══════════════════════════════════════════════╗ +║ DEPLOYMENT SUCCESS LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: 2025-12-17 09:21:43 +Status: SUCCESS +URL: https://artfully-copious.merakit.my +Subdomain: artfully-copious +Duration: 56.29 seconds + +═══════════════════════════════════════════════ + +Deployment completed successfully. +All services are running and health checks passed. diff --git a/gitea/logs/success/success_ascidiia-bridoon.merakit.my_20251217_160155.txt b/gitea/logs/success/success_ascidiia-bridoon.merakit.my_20251217_160155.txt new file mode 100644 index 0000000..1e17b6e --- /dev/null +++ b/gitea/logs/success/success_ascidiia-bridoon.merakit.my_20251217_160155.txt @@ -0,0 +1,14 @@ +╔══════════════════════════════════════════════╗ +║ DEPLOYMENT SUCCESS LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: 2025-12-17 16:01:55 +Status: SUCCESS +URL: https://ascidiia-bridoon.merakit.my +Subdomain: ascidiia-bridoon +Duration: 56.02 seconds + +═══════════════════════════════════════════════ + +Deployment completed successfully. +All services are running and health checks passed. diff --git a/gitea/requirements.txt b/gitea/requirements.txt new file mode 100644 index 0000000..8839043 --- /dev/null +++ b/gitea/requirements.txt @@ -0,0 +1,4 @@ +# Core dependencies +requests>=2.31.0 +rich>=13.7.0 +python-dotenv>=1.0.0 diff --git a/scripts/.claude/settings.local.json b/scripts/.claude/settings.local.json new file mode 100644 index 0000000..c098ddb --- /dev/null +++ b/scripts/.claude/settings.local.json @@ -0,0 +1,18 @@ +{ + "permissions": { + "allow": [ + "Bash(chmod:*)", + "Bash(echo:*)", + "Bash(curl:*)", + "Bash(if [ -z \"$CLOUDFLARE_API_TOKEN\" ])", + "Bash(then echo \"Token is empty\")", + "Bash(else echo \"Token exists with length: $#CLOUDFLARE_API_TOKEN\")", + "Bash(fi)", + "Bash(tee:*)", + "Bash(printf:*)", + "Bash(env)", + "Bash(./cloudflare-remove.sh:*)", + "Bash(bash:*)" + ] + } +} diff --git a/scripts/cloudflare-add.sh b/scripts/cloudflare-add.sh new file mode 100755 index 0000000..bcf7a77 --- /dev/null +++ b/scripts/cloudflare-add.sh @@ -0,0 +1,229 @@ +#!/bin/bash + +set -euo pipefail + +# Cloudflare API credentials +CF_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}" +CF_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}" + +# Dictionary files +DICT_FILE="/usr/share/dict/words" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +usage() { + echo "Usage: $0 --hostname --ip " + echo " $0 --random --domain --ip " + echo "" + echo "Options:" + echo " --hostname Specific hostname to add (e.g., test.example.com)" + echo " --random Generate random hostname" + echo " --domain Base domain for random hostname (e.g., example.org)" + echo " --ip IP address for A record" + echo "" + echo "Environment variables required:" + echo " CLOUDFLARE_API_TOKEN" + echo " CLOUDFLARE_ZONE_ID" + exit 1 +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" >&2 +} + +log_info() { + echo -e "${YELLOW}[INFO]${NC} $1" >&2 +} + +check_requirements() { + if [[ -z "$CF_API_TOKEN" ]]; then + log_error "CLOUDFLARE_API_TOKEN environment variable not set" + exit 1 + fi + + if [[ -z "$CF_ZONE_ID" ]]; then + log_error "CLOUDFLARE_ZONE_ID environment variable not set" + exit 1 + fi + + if ! command -v curl &> /dev/null; then + log_error "curl is required but not installed" + exit 1 + fi + + if ! command -v jq &> /dev/null; then + log_error "jq is required but not installed" + exit 1 + fi +} + +get_random_word() { + if [[ ! -f "$DICT_FILE" ]]; then + log_error "Dictionary file not found: $DICT_FILE" + exit 1 + fi + + # Get random word: lowercase, letters only, 3-10 characters + grep -E '^[a-z]{3,10}$' "$DICT_FILE" | shuf -n 1 +} + +generate_random_hostname() { + local domain=$1 + local word1=$(get_random_word) + local word2=$(get_random_word) + echo "${word1}-${word2}.${domain}" +} + +check_dns_exists() { + local hostname=$1 + + log_info "Checking if DNS record exists for: $hostname" + + local response=$(curl -s -X GET \ + "https://api.cloudflare.com/client/v4/zones/${CF_ZONE_ID}/dns_records?name=${hostname}" \ + -H "Authorization: Bearer ${CF_API_TOKEN}" \ + -H "Content-Type: application/json") + + local success=$(echo "$response" | jq -r '.success') + + if [[ "$success" != "true" ]]; then + log_error "Cloudflare API request failed" + echo "$response" | jq '.' + exit 1 + fi + + local count=$(echo "$response" | jq -r '.result | length') + + if [[ "$count" -gt 0 ]]; then + return 0 # Record exists + else + return 1 # Record does not exist + fi +} + +add_dns_record() { + local hostname=$1 + local ip=$2 + + log_info "Adding DNS record: $hostname -> $ip" + + local response=$(curl -s -X POST \ + "https://api.cloudflare.com/client/v4/zones/${CF_ZONE_ID}/dns_records" \ + -H "Authorization: Bearer ${CF_API_TOKEN}" \ + -H "Content-Type: application/json" \ + --data "{ + \"type\": \"A\", + \"name\": \"${hostname}\", + \"content\": \"${ip}\", + \"ttl\": 1, + \"proxied\": false + }") + + local success=$(echo "$response" | jq -r '.success') + + if [[ "$success" == "true" ]]; then + log_success "DNS record added successfully: $hostname -> $ip" + echo "$response" | jq -r '.result | "Record ID: \(.id)"' + return 0 + else + log_error "Failed to add DNS record" + echo "$response" | jq '.' + return 1 + fi +} + +# Parse arguments +HOSTNAME="" +IP="" +RANDOM_MODE=false +DOMAIN="" + +while [[ $# -gt 0 ]]; do + case $1 in + --hostname) + HOSTNAME="$2" + shift 2 + ;; + --ip) + IP="$2" + shift 2 + ;; + --random) + RANDOM_MODE=true + shift + ;; + --domain) + DOMAIN="$2" + shift 2 + ;; + -h|--help) + usage + ;; + *) + log_error "Unknown option: $1" + usage + ;; + esac +done + +# Validate arguments +if [[ -z "$IP" ]]; then + log_error "IP address is required" + usage +fi + +if [[ "$RANDOM_MODE" == true ]]; then + if [[ -z "$DOMAIN" ]]; then + log_error "Domain is required when using --random mode" + usage + fi +else + if [[ -z "$HOSTNAME" ]]; then + log_error "Hostname is required" + usage + fi +fi + +# Check requirements +check_requirements + +# Generate or use provided hostname +if [[ "$RANDOM_MODE" == true ]]; then + MAX_ATTEMPTS=50 + attempt=1 + + while [[ $attempt -le $MAX_ATTEMPTS ]]; do + HOSTNAME=$(generate_random_hostname "$DOMAIN") + log_info "Generated hostname (attempt $attempt): $HOSTNAME" + + if ! check_dns_exists "$HOSTNAME"; then + log_success "Hostname is available: $HOSTNAME" + break + else + log_info "Hostname already exists, generating new one..." + attempt=$((attempt + 1)) + fi + done + + if [[ $attempt -gt $MAX_ATTEMPTS ]]; then + log_error "Failed to generate unique hostname after $MAX_ATTEMPTS attempts" + exit 1 + fi +else + if check_dns_exists "$HOSTNAME"; then + log_error "DNS record already exists for: $HOSTNAME" + exit 1 + fi +fi + +# Add the DNS record +add_dns_record "$HOSTNAME" "$IP" + diff --git a/scripts/cloudflare-remove.sh b/scripts/cloudflare-remove.sh new file mode 100755 index 0000000..dcc98aa --- /dev/null +++ b/scripts/cloudflare-remove.sh @@ -0,0 +1,327 @@ +#!/bin/bash + +set -euo pipefail + +# Cloudflare API credentials +CF_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}" +CF_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +usage() { + echo "Usage: $0 --hostname " + echo " $0 --record-id " + echo " $0 --all-matching " + echo "" + echo "Options:" + echo " --hostname Remove DNS record by hostname (e.g., test.example.com)" + echo " --record-id Remove DNS record by Cloudflare record ID" + echo " --all-matching Remove all DNS records matching pattern (e.g., '*.example.com')" + echo "" + echo "Environment variables required:" + echo " CLOUDFLARE_API_TOKEN" + echo " CLOUDFLARE_ZONE_ID" + exit 1 +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" >&2 +} + +log_info() { + echo -e "${YELLOW}[INFO]${NC} $1" >&2 +} + +check_requirements() { + if [[ -z "$CF_API_TOKEN" ]]; then + log_error "CLOUDFLARE_API_TOKEN environment variable not set" + exit 1 + fi + + if [[ -z "$CF_ZONE_ID" ]]; then + log_error "CLOUDFLARE_ZONE_ID environment variable not set" + exit 1 + fi + + if ! command -v curl &> /dev/null; then + log_error "curl is required but not installed" + exit 1 + fi + + if ! command -v jq &> /dev/null; then + log_error "jq is required but not installed" + exit 1 + fi +} + +get_dns_records_by_hostname() { + local hostname=$1 + + log_info "Looking up DNS records for: $hostname" + + local response=$(curl -s -X GET \ + "https://api.cloudflare.com/client/v4/zones/${CF_ZONE_ID}/dns_records?name=${hostname}" \ + -H "Authorization: Bearer ${CF_API_TOKEN}" \ + -H "Content-Type: application/json") + + local success=$(echo "$response" | jq -r '.success') + + if [[ "$success" != "true" ]]; then + log_error "Cloudflare API request failed" + echo "$response" | jq '.' + exit 1 + fi + + echo "$response" +} + +get_all_dns_records() { + log_info "Fetching all DNS records in zone" + + local response=$(curl -s -X GET \ + "https://api.cloudflare.com/client/v4/zones/${CF_ZONE_ID}/dns_records?per_page=1000" \ + -H "Authorization: Bearer ${CF_API_TOKEN}" \ + -H "Content-Type: application/json") + + local success=$(echo "$response" | jq -r '.success') + + if [[ "$success" != "true" ]]; then + log_error "Cloudflare API request failed" + echo "$response" | jq '.' + exit 1 + fi + + echo "$response" +} + +delete_dns_record() { + local record_id=$1 + local hostname=$2 + + log_info "Deleting DNS record: $hostname (ID: $record_id)" + + local response=$(curl -s -X DELETE \ + "https://api.cloudflare.com/client/v4/zones/${CF_ZONE_ID}/dns_records/${record_id}" \ + -H "Authorization: Bearer ${CF_API_TOKEN}" \ + -H "Content-Type: application/json") + + local success=$(echo "$response" | jq -r '.success') + + if [[ "$success" == "true" ]]; then + log_success "DNS record deleted successfully: $hostname (ID: $record_id)" + return 0 + else + log_error "Failed to delete DNS record: $hostname (ID: $record_id)" + echo "$response" | jq '.' + return 1 + fi +} + +delete_by_hostname() { + local hostname=$1 + + local response=$(get_dns_records_by_hostname "$hostname") + local count=$(echo "$response" | jq -r '.result | length') + + if [[ "$count" -eq 0 ]]; then + log_error "No DNS records found for: $hostname" + exit 1 + fi + + log_info "Found $count record(s) for: $hostname" + + local deleted=0 + local failed=0 + + while IFS= read -r record; do + local record_id=$(echo "$record" | jq -r '.id') + local record_name=$(echo "$record" | jq -r '.name') + local record_type=$(echo "$record" | jq -r '.type') + local record_content=$(echo "$record" | jq -r '.content') + + log_info "Found: $record_name ($record_type) -> $record_content" + + if delete_dns_record "$record_id" "$record_name"; then + deleted=$((deleted + 1)) + else + failed=$((failed + 1)) + fi + done < <(echo "$response" | jq -c '.result[]') + + log_info "Summary: $deleted deleted, $failed failed" + + if [[ $failed -gt 0 ]]; then + exit 1 + fi +} + +delete_by_record_id() { + local record_id=$1 + + # First, get the record details + log_info "Fetching record details for ID: $record_id" + + local response=$(curl -s -X GET \ + "https://api.cloudflare.com/client/v4/zones/${CF_ZONE_ID}/dns_records/${record_id}" \ + -H "Authorization: Bearer ${CF_API_TOKEN}" \ + -H "Content-Type: application/json") + + local success=$(echo "$response" | jq -r '.success') + + if [[ "$success" != "true" ]]; then + log_error "Record not found or API request failed" + echo "$response" | jq '.' + exit 1 + fi + + local hostname=$(echo "$response" | jq -r '.result.name') + local record_type=$(echo "$response" | jq -r '.result.type') + local content=$(echo "$response" | jq -r '.result.content') + + log_info "Record found: $hostname ($record_type) -> $content" + + delete_dns_record "$record_id" "$hostname" +} + +delete_all_matching() { + local pattern=$1 + + log_info "Searching for records matching pattern: $pattern" + + local response=$(get_all_dns_records) + local all_records=$(echo "$response" | jq -c '.result[]') + + local matching_records=() + + while IFS= read -r record; do + local record_name=$(echo "$record" | jq -r '.name') + + # Simple pattern matching (supports * wildcard) + if [[ "$pattern" == *"*"* ]]; then + # Convert pattern to regex + local regex="${pattern//\*/.*}" + if [[ "$record_name" =~ ^${regex}$ ]]; then + matching_records+=("$record") + fi + else + # Exact match + if [[ "$record_name" == "$pattern" ]]; then + matching_records+=("$record") + fi + fi + done < <(echo "$all_records") + + local count=${#matching_records[@]} + + if [[ $count -eq 0 ]]; then + log_error "No DNS records found matching pattern: $pattern" + exit 1 + fi + + log_info "Found $count record(s) matching pattern: $pattern" + + # List matching records + for record in "${matching_records[@]}"; do + local record_name=$(echo "$record" | jq -r '.name') + local record_type=$(echo "$record" | jq -r '.type') + local content=$(echo "$record" | jq -r '.content') + log_info " - $record_name ($record_type) -> $content" + done + + # Confirm deletion + echo "" + read -p "Delete all $count record(s)? [y/N] " -n 1 -r + echo "" + + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + log_info "Deletion cancelled" + exit 0 + fi + + local deleted=0 + local failed=0 + + for record in "${matching_records[@]}"; do + local record_id=$(echo "$record" | jq -r '.id') + local record_name=$(echo "$record" | jq -r '.name') + + if delete_dns_record "$record_id" "$record_name"; then + deleted=$((deleted + 1)) + else + failed=$((failed + 1)) + fi + done + + log_info "Summary: $deleted deleted, $failed failed" + + if [[ $failed -gt 0 ]]; then + exit 1 + fi +} + +# Parse arguments +HOSTNAME="" +RECORD_ID="" +PATTERN="" +MODE="" + +while [[ $# -gt 0 ]]; do + case $1 in + --hostname) + HOSTNAME="$2" + MODE="hostname" + shift 2 + ;; + --record-id) + RECORD_ID="$2" + MODE="record-id" + shift 2 + ;; + --all-matching) + PATTERN="$2" + MODE="pattern" + shift 2 + ;; + -h|--help) + usage + ;; + *) + log_error "Unknown option: $1" + usage + ;; + esac +done + +# Validate arguments +if [[ -z "$MODE" ]]; then + log_error "No deletion mode specified" + usage +fi + +# Check requirements +check_requirements + +# Execute based on mode +case $MODE in + hostname) + delete_by_hostname "$HOSTNAME" + ;; + record-id) + delete_by_record_id "$RECORD_ID" + ;; + pattern) + delete_all_matching "$PATTERN" + ;; + *) + log_error "Invalid mode: $MODE" + exit 1 + ;; +esac diff --git a/wordpress/.claude/settings.local.json b/wordpress/.claude/settings.local.json new file mode 100644 index 0000000..5bf99dc --- /dev/null +++ b/wordpress/.claude/settings.local.json @@ -0,0 +1,37 @@ +{ + "permissions": { + "allow": [ + "Bash(chmod:*)", + "Bash(test:*)", + "Bash(python3:*)", + "Bash(docker network create:*)", + "Bash(bash:*)", + "Bash(cat:*)", + "Bash(docker compose config:*)", + "Bash(docker compose:*)", + "Bash(docker ps:*)", + "Bash(docker volume:*)", + "Bash(docker network:*)", + "Bash(docker exec:*)", + "Bash(docker inspect:*)", + "Bash(curl:*)", + "Bash(nslookup:*)", + "Bash(dig:*)", + "Bash(tree:*)", + "Bash(ls:*)", + "Bash(pip3 install:*)", + "Bash(find:*)", + "Bash(pip install:*)", + "Bash(python -m json.tool:*)", + "Bash(pkill:*)", + "Bash(python test_integration.py:*)", + "Bash(docker run:*)", + "Bash(redis-cli ping:*)", + "Bash(mkdir:*)", + "Bash(./destroy.py:*)", + "Bash(lsof:*)", + "Bash(netstat:*)", + "Bash(kill:*)" + ] + } +} diff --git a/wordpress/.env b/wordpress/.env new file mode 100644 index 0000000..91d83bd --- /dev/null +++ b/wordpress/.env @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=daidle-allotrylic +APP_NAME=wordpress +SUBDOMAIN=daidle-allotrylic +DOMAIN=merakit.my +URL=daidle-allotrylic.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_ddc6c26a_wordpress_daidle_allotrylic +DB_USER=angali_ddc6c26a_wordpress_daidle +DB_PASSWORD=emblazer-stairway-sweety +DB_ROOT_PASSWORD=idaein-silkgrower-tariffism +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup b/wordpress/.env.backup new file mode 100644 index 0000000..c585a79 --- /dev/null +++ b/wordpress/.env.backup @@ -0,0 +1,22 @@ +# App +COMPOSE_PROJECT_NAME=emyd-tartarian +APP_NAME=wordpress +SUBDOMAIN=emyd-tartarian +DOMAIN=merakit.my +URL=emyd-tartarian.merakit.my + +# Versions +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 + +# Database +DB_NAME=angali_guzagmpc_wordpress_emyd_tartarian +DB_USER=angali_guzagmpc_wordpress_emyd_t +DB_PASSWORD=creditrix-lutein-discolors +DB_ROOT_PASSWORD=sixtieths-murines-rabbling + +# WordPress +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M + diff --git a/wordpress/.env.backup.20251216_163858 b/wordpress/.env.backup.20251216_163858 new file mode 100644 index 0000000..00d61fd --- /dev/null +++ b/wordpress/.env.backup.20251216_163858 @@ -0,0 +1,22 @@ +# App +COMPOSE_PROJECT_NAME=litterers-apotropaic +APP_NAME=wordpress +SUBDOMAIN=litterers-apotropaic +DOMAIN=merakit.my +URL=litterers-apotropaic.merakit.my + +# Versions +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 + +# Database +DB_NAME=angali_xewzeu15_wordpress_litterers_apotropaic +DB_USER=angali_xewzeu15_wordpress_litter +DB_PASSWORD=templon-infantly-yielding +DB_ROOT_PASSWORD=beplumed-falus-tendry + +# WordPress +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M + diff --git a/wordpress/.env.backup.20251216_164443 b/wordpress/.env.backup.20251216_164443 new file mode 100644 index 0000000..8ef1d0a --- /dev/null +++ b/wordpress/.env.backup.20251216_164443 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=modif-sporidial +APP_NAME=wordpress +SUBDOMAIN=modif-sporidial +DOMAIN=merakit.my +URL=modif-sporidial.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_a08f84d9_wordpress_modif_sporidial +DB_USER=angali_a08f84d9_wordpress_modif_ +DB_PASSWORD=fumeroot-rummest-tiltboard +DB_ROOT_PASSWORD=unalike-prologizer-axonic +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251216_164618 b/wordpress/.env.backup.20251216_164618 new file mode 100644 index 0000000..8ef1d0a --- /dev/null +++ b/wordpress/.env.backup.20251216_164618 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=modif-sporidial +APP_NAME=wordpress +SUBDOMAIN=modif-sporidial +DOMAIN=merakit.my +URL=modif-sporidial.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_a08f84d9_wordpress_modif_sporidial +DB_USER=angali_a08f84d9_wordpress_modif_ +DB_PASSWORD=fumeroot-rummest-tiltboard +DB_ROOT_PASSWORD=unalike-prologizer-axonic +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251216_164814 b/wordpress/.env.backup.20251216_164814 new file mode 100644 index 0000000..8ef1d0a --- /dev/null +++ b/wordpress/.env.backup.20251216_164814 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=modif-sporidial +APP_NAME=wordpress +SUBDOMAIN=modif-sporidial +DOMAIN=merakit.my +URL=modif-sporidial.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_a08f84d9_wordpress_modif_sporidial +DB_USER=angali_a08f84d9_wordpress_modif_ +DB_PASSWORD=fumeroot-rummest-tiltboard +DB_ROOT_PASSWORD=unalike-prologizer-axonic +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251216_165109 b/wordpress/.env.backup.20251216_165109 new file mode 100644 index 0000000..b1e1170 --- /dev/null +++ b/wordpress/.env.backup.20251216_165109 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=dtente-yali +APP_NAME=wordpress +SUBDOMAIN=dtente-yali +DOMAIN=merakit.my +URL=dtente-yali.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_1fc30955_wordpress_dtente_yali +DB_USER=angali_1fc30955_wordpress_dtente +DB_PASSWORD=chronic-urophanic-subminimal +DB_ROOT_PASSWORD=determiner-reaks-cochleated +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251216_170611 b/wordpress/.env.backup.20251216_170611 new file mode 100644 index 0000000..15ac362 --- /dev/null +++ b/wordpress/.env.backup.20251216_170611 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=rappini-misseated +APP_NAME=wordpress +SUBDOMAIN=rappini-misseated +DOMAIN=merakit.my +URL=rappini-misseated.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_d6646fab_wordpress_rappini_misseated +DB_USER=angali_d6646fab_wordpress_rappin +DB_PASSWORD=painterish-tayir-mentalist +DB_ROOT_PASSWORD=venemous-haymow-overbend +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251216_184629 b/wordpress/.env.backup.20251216_184629 new file mode 100644 index 0000000..f8f9728 --- /dev/null +++ b/wordpress/.env.backup.20251216_184629 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=emetic-fuglemen +APP_NAME=wordpress +SUBDOMAIN=emetic-fuglemen +DOMAIN=merakit.my +URL=emetic-fuglemen.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_a8c12895_wordpress_emetic_fuglemen +DB_USER=angali_a8c12895_wordpress_emetic +DB_PASSWORD=heteroside-budder-chipyard +DB_ROOT_PASSWORD=overkeen-gangliated-describer +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251217_061213 b/wordpress/.env.backup.20251217_061213 new file mode 100644 index 0000000..2a5d35e --- /dev/null +++ b/wordpress/.env.backup.20251217_061213 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=exing-calcinator +APP_NAME=wordpress +SUBDOMAIN=exing-calcinator +DOMAIN=merakit.my +URL=exing-calcinator.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_f9404c19_wordpress_exing_calcinator +DB_USER=angali_f9404c19_wordpress_exing_ +DB_PASSWORD=blencorn-raniform-sectism +DB_ROOT_PASSWORD=florilege-haya-thin +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251217_061237 b/wordpress/.env.backup.20251217_061237 new file mode 100644 index 0000000..2a5d35e --- /dev/null +++ b/wordpress/.env.backup.20251217_061237 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=exing-calcinator +APP_NAME=wordpress +SUBDOMAIN=exing-calcinator +DOMAIN=merakit.my +URL=exing-calcinator.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_f9404c19_wordpress_exing_calcinator +DB_USER=angali_f9404c19_wordpress_exing_ +DB_PASSWORD=blencorn-raniform-sectism +DB_ROOT_PASSWORD=florilege-haya-thin +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251217_061526 b/wordpress/.env.backup.20251217_061526 new file mode 100644 index 0000000..2a5d35e --- /dev/null +++ b/wordpress/.env.backup.20251217_061526 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=exing-calcinator +APP_NAME=wordpress +SUBDOMAIN=exing-calcinator +DOMAIN=merakit.my +URL=exing-calcinator.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_f9404c19_wordpress_exing_calcinator +DB_USER=angali_f9404c19_wordpress_exing_ +DB_PASSWORD=blencorn-raniform-sectism +DB_ROOT_PASSWORD=florilege-haya-thin +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251217_065205 b/wordpress/.env.backup.20251217_065205 new file mode 100644 index 0000000..ea356ab --- /dev/null +++ b/wordpress/.env.backup.20251217_065205 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=ankylotic-unactable +APP_NAME=wordpress +SUBDOMAIN=ankylotic-unactable +DOMAIN=merakit.my +URL=ankylotic-unactable.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_6aa981f6_wordpress_ankylotic_unactable +DB_USER=angali_6aa981f6_wordpress_ankylo +DB_PASSWORD=mesoskelic-leopard-libertines +DB_ROOT_PASSWORD=lavature-barmkin-slipsoles +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251217_070700 b/wordpress/.env.backup.20251217_070700 new file mode 100644 index 0000000..fecf94a --- /dev/null +++ b/wordpress/.env.backup.20251217_070700 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=slenderly-spareable +APP_NAME=wordpress +SUBDOMAIN=slenderly-spareable +DOMAIN=merakit.my +URL=slenderly-spareable.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_94934db7_wordpress_slenderly_spareable +DB_USER=angali_94934db7_wordpress_slende +DB_PASSWORD=chaped-toothwort-transform +DB_ROOT_PASSWORD=outearn-testar-platinise +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/.env.backup.20251217_071039 b/wordpress/.env.backup.20251217_071039 new file mode 100644 index 0000000..fecf94a --- /dev/null +++ b/wordpress/.env.backup.20251217_071039 @@ -0,0 +1,14 @@ +COMPOSE_PROJECT_NAME=slenderly-spareable +APP_NAME=wordpress +SUBDOMAIN=slenderly-spareable +DOMAIN=merakit.my +URL=slenderly-spareable.merakit.my +WORDPRESS_VERSION=6.5-php8.2-apache +MARIADB_VERSION=11.3 +DB_NAME=angali_94934db7_wordpress_slenderly_spareable +DB_USER=angali_94934db7_wordpress_slende +DB_PASSWORD=chaped-toothwort-transform +DB_ROOT_PASSWORD=outearn-testar-platinise +WP_TABLE_PREFIX=wp_ +WP_MEMORY_LIMIT=256M +WP_MAX_MEMORY_LIMIT=256M diff --git a/wordpress/DESTROY.md b/wordpress/DESTROY.md new file mode 100644 index 0000000..77eb711 --- /dev/null +++ b/wordpress/DESTROY.md @@ -0,0 +1,354 @@ +# WordPress Deployment Destruction Guide + +This document explains how to destroy WordPress deployments using the config-based destruction system. + +## Overview + +The WordPress deployment system now automatically saves configuration for each successful deployment in the `deployments/` directory. These configurations can be used to cleanly destroy environments, removing all associated resources. + +## Deployment Config Repository + +Each successful deployment creates a JSON config file in `deployments/` containing: + +- **Subdomain and URL**: Deployment identifiers +- **Docker Resources**: Container names, volumes, networks +- **DNS Information**: Cloudflare record ID and IP address +- **Database Details**: Database name and user +- **Timestamps**: When the deployment was created + +Example config file: `deployments/my-site_20251217_120000.json` + +```json +{ + "subdomain": "my-site", + "url": "my-site.example.com", + "domain": "example.com", + "compose_project_name": "my-site", + "db_name": "wp_db_my_site", + "db_user": "wp_user_my_site", + "deployment_timestamp": "2025-12-17T12:00:00", + "dns_record_id": "abc123xyz", + "dns_ip": "203.0.113.1", + "containers": ["my-site_wp", "my-site_db"], + "volumes": ["my-site_db_data", "my-site_wp_data"], + "networks": ["my-site_internal"], + "env_file_path": "/path/to/.env" +} +``` + +## Using the Destroy Script + +### Prerequisites + +Set the following environment variables (required for DNS cleanup): + +```bash +export CLOUDFLARE_API_TOKEN="your_token" +export CLOUDFLARE_ZONE_ID="your_zone_id" +``` + +If these are not set, the script will still work but DNS records won't be removed. + +### List All Deployments + +View all tracked deployments: + +```bash +./destroy.py --list +``` + +This displays a table with: +- Subdomain +- URL +- Deployment timestamp +- Config file name + +### Destroy a Deployment + +#### By Subdomain (Recommended) + +```bash +./destroy.py --subdomain my-site +``` + +#### By URL + +```bash +./destroy.py --url my-site.example.com +``` + +#### By Config File + +```bash +./destroy.py --config deployments/my-site_20251217_120000.json +``` + +### Options + +#### Skip Confirmation + +Use `-y` or `--yes` to skip the confirmation prompt: + +```bash +./destroy.py --subdomain my-site --yes +``` + +#### Dry Run + +Preview what would be destroyed without making changes: + +```bash +./destroy.py --subdomain my-site --dry-run +``` + +#### Keep Config File + +By default, the config file is deleted after destruction. To keep it: + +```bash +./destroy.py --subdomain my-site --keep-config +``` + +#### Debug Mode + +Enable verbose logging: + +```bash +./destroy.py --subdomain my-site --log-level DEBUG +``` + +## What Gets Destroyed + +The destroy script removes the following resources in order: + +1. **Docker Containers** + - Stops all containers + - Removes containers forcefully + +2. **Docker Volumes** + - Removes database volume (e.g., `project_db_data`) + - Removes WordPress volume (e.g., `project_wp_data`) + +3. **Docker Networks** + - Removes internal networks + - Skips external networks like `proxy` + +4. **DNS Records** + - Removes the Cloudflare DNS record using the saved record ID + - Requires Cloudflare credentials + +5. **Config File** + - Deletes the deployment config file (unless `--keep-config` is used) + +## Safety Features + +### Confirmation Prompt + +By default, the script asks for confirmation before destroying: + +``` +Are you sure you want to destroy my-site.example.com? [y/N] +``` + +### Dry-Run Mode + +Test the destruction process without making changes: + +```bash +./destroy.py --subdomain my-site --dry-run +``` + +This shows exactly what commands would be executed. + +### Graceful Failures + +- If DNS credentials are missing, the script continues and skips DNS cleanup +- If a resource doesn't exist, the script logs a warning and continues +- Partial failures are reported, allowing manual cleanup of remaining resources + +## Exit Codes + +- `0`: Success +- `1`: Failure (partial or complete) +- `2`: Deployment not found +- `130`: User cancelled (Ctrl+C) + +## Examples + +### Example 1: Clean Destruction + +```bash +# List deployments +./destroy.py --list + +# Destroy with confirmation +./destroy.py --subdomain test-site + +# Output: +# Deployment Information: +# Subdomain: test-site +# URL: test-site.example.com +# Project: test-site +# Deployed: 2025-12-17T12:00:00 +# Containers: 2 +# DNS Record ID: abc123 +# +# Are you sure you want to destroy test-site.example.com? [y/N]: y +# +# ═══ Destroying Containers ═══ +# Stopping container: test-site_wp +# Removing container: test-site_wp +# ... +# +# ✓ Destruction Successful! +``` + +### Example 2: Batch Destruction + +Destroy multiple deployments in one command: + +```bash +#!/bin/bash +# destroy_all.sh - Destroy all test deployments + +for subdomain in test-1 test-2 test-3; do + ./destroy.py --subdomain "$subdomain" --yes +done +``` + +### Example 3: Conditional Destruction + +Destroy deployments older than 7 days: + +```bash +#!/bin/bash +# cleanup_old.sh + +for config in deployments/*.json; do + age=$(( ($(date +%s) - $(stat -c %Y "$config")) / 86400 )) + if [ $age -gt 7 ]; then + echo "Destroying $config (age: $age days)" + ./destroy.py --config "$config" --yes + fi +done +``` + +## Troubleshooting + +### "Deployment not found" + +The deployment config doesn't exist. Check available deployments: + +```bash +./destroy.py --list +``` + +### "Failed to remove DNS record" + +Possible causes: +- Cloudflare credentials not set +- DNS record already deleted +- Invalid record ID in config + +The script will continue and clean up other resources. + +### "Command failed: docker stop" + +Container might already be stopped. The script continues with removal. + +### Containers Still Running + +If containers aren't removed, manually stop them: + +```bash +docker ps | grep my-site +docker stop my-site_wp my-site_db +docker rm my-site_wp my-site_db +``` + +### Volumes Not Removed + +Volumes may be in use by other containers: + +```bash +docker volume ls | grep my-site +docker volume rm my-site_db_data my-site_wp_data +``` + +## Integration with Deployment + +The deployment orchestrator automatically saves configs after successful deployments. The config is saved in `deployments/` with the format: + +``` +deployments/{subdomain}_{timestamp}.json +``` + +This happens automatically in `wordpress_deployer/orchestrator.py` after Phase 5 (Health Check) completes successfully. + +## Advanced Usage + +### Manual Config Creation + +If you need to create a config manually for an existing deployment: + +```python +from wordpress_deployer.deployment_config_manager import ( + DeploymentConfigManager, + DeploymentMetadata +) + +manager = DeploymentConfigManager() + +metadata = DeploymentMetadata( + subdomain="my-site", + url="my-site.example.com", + domain="example.com", + compose_project_name="my-site", + db_name="wp_db", + db_user="wp_user", + deployment_timestamp="2025-12-17T12:00:00", + dns_record_id="abc123", + dns_ip="203.0.113.1", + containers=["my-site_wp", "my-site_db"], + volumes=["my-site_db_data", "my-site_wp_data"], + networks=["my-site_internal"], + env_file_path="/path/to/.env" +) + +manager.save_deployment(metadata) +``` + +### Programmatic Destruction + +Use the destroy script in Python: + +```python +import subprocess +import sys + +result = subprocess.run( + ["./destroy.py", "--subdomain", "my-site", "--yes"], + capture_output=True, + text=True +) + +if result.returncode == 0: + print("Destruction successful") +else: + print(f"Destruction failed: {result.stderr}") + sys.exit(1) +``` + +## Best Practices + +1. **Always Test with Dry-Run**: Use `--dry-run` first to preview destruction +2. **Keep Config Backups**: Use `--keep-config` for audit trails +3. **Verify Before Batch Operations**: List deployments before bulk destruction +4. **Monitor Partial Failures**: Check logs for resources that weren't cleaned up +5. **Set Cloudflare Credentials**: Always configure DNS credentials to ensure complete cleanup + +## See Also + +- [Main README](README.md) - Deployment documentation +- [deploy.py](deploy.py) - Deployment script +- [wordpress_deployer/](wordpress_deployer/) - Core deployment modules diff --git a/wordpress/deploy.py b/wordpress/deploy.py new file mode 100755 index 0000000..8eedba0 --- /dev/null +++ b/wordpress/deploy.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python3 +""" +Production-ready WordPress deployment script + +Combines environment generation and deployment with: +- Configuration validation +- Rollback capability +- Dry-run mode +- Monitoring hooks +""" + +import argparse +import logging +import sys +from pathlib import Path +from typing import NoReturn + +from rich.console import Console +from rich.logging import RichHandler + +from wordpress_deployer.config import ConfigurationError, DeploymentConfig +from wordpress_deployer.orchestrator import DeploymentError, DeploymentOrchestrator + + +console = Console() + + +def setup_logging(log_level: str) -> None: + """ + Setup rich logging with colored output + + Args: + log_level: Logging level (DEBUG, INFO, WARNING, ERROR) + """ + logging.basicConfig( + level=log_level.upper(), + format="%(message)s", + datefmt="[%X]", + handlers=[RichHandler(console=console, rich_tracebacks=True, show_path=False)] + ) + + # Reduce noise from urllib3/requests + logging.getLogger("urllib3").setLevel(logging.WARNING) + logging.getLogger("requests").setLevel(logging.WARNING) + + +def parse_args() -> argparse.Namespace: + """ + Parse CLI arguments + + Returns: + argparse.Namespace with parsed arguments + """ + parser = argparse.ArgumentParser( + description="Deploy WordPress with automatic environment generation", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Normal deployment + ./deploy.py + + # Dry-run mode (preview only) + ./deploy.py --dry-run + + # With webhook notifications + ./deploy.py --webhook-url https://hooks.slack.com/xxx + + # Debug mode + ./deploy.py --log-level DEBUG + + # Custom retry count + ./deploy.py --max-retries 5 + +Environment Variables: + CLOUDFLARE_API_TOKEN Cloudflare API token (required) + CLOUDFLARE_ZONE_ID Cloudflare zone ID (required) + DEPLOYMENT_WEBHOOK_URL Webhook URL for notifications (optional) + DEPLOYMENT_MAX_RETRIES Max retries for DNS conflicts (default: 3) + +For more information, see the documentation at: + /infra/templates/wordpress/README.md + """ + ) + + parser.add_argument( + "--dry-run", + action="store_true", + help="Preview deployment without making changes" + ) + + parser.add_argument( + "--env-file", + type=Path, + default=Path(".env"), + help="Path to .env file (default: .env)" + ) + + parser.add_argument( + "--compose-file", + type=Path, + default=Path("docker-compose.yml"), + help="Path to docker-compose.yml (default: docker-compose.yml)" + ) + + parser.add_argument( + "--max-retries", + type=int, + default=3, + help="Max retries for DNS conflicts (default: 3)" + ) + + parser.add_argument( + "--webhook-url", + type=str, + help="Webhook URL for deployment notifications" + ) + + parser.add_argument( + "--log-level", + choices=["DEBUG", "INFO", "WARNING", "ERROR"], + default="INFO", + help="Logging level (default: INFO)" + ) + + parser.add_argument( + "--no-verify-ssl", + action="store_true", + help="Skip SSL verification for health checks (not recommended for production)" + ) + + return parser.parse_args() + + +def print_banner() -> None: + """Print deployment banner""" + console.print("\n[bold cyan]╔══════════════════════════════════════════════╗[/bold cyan]") + console.print("[bold cyan]║[/bold cyan] [bold white]WordPress Production Deployment[/bold white] [bold cyan]║[/bold cyan]") + console.print("[bold cyan]╚══════════════════════════════════════════════╝[/bold cyan]\n") + + +def main() -> NoReturn: + """ + Main entry point + + Exit codes: + 0: Success + 1: Deployment failure + 130: User interrupt (Ctrl+C) + """ + args = parse_args() + setup_logging(args.log_level) + + logger = logging.getLogger(__name__) + + print_banner() + + try: + # Load configuration + logger.debug("Loading configuration...") + config = DeploymentConfig.from_env_and_args(args) + config.validate() + logger.debug("Configuration loaded successfully") + + if config.dry_run: + console.print("[bold yellow]━━━ DRY-RUN MODE: No changes will be made ━━━[/bold yellow]\n") + + # Create orchestrator and deploy + orchestrator = DeploymentOrchestrator(config) + orchestrator.deploy() + + console.print("\n[bold green]╔══════════════════════════════════════════════╗[/bold green]") + console.print("[bold green]║[/bold green] [bold white]✓ Deployment Successful![/bold white] [bold green]║[/bold green]") + console.print("[bold green]╚══════════════════════════════════════════════╝[/bold green]\n") + + sys.exit(0) + + except ConfigurationError as e: + logger.error(f"Configuration error: {e}") + console.print(f"\n[bold red]✗ Configuration error: {e}[/bold red]\n") + console.print("[yellow]Please check your environment variables and configuration.[/yellow]") + console.print("[yellow]Required: CLOUDFLARE_API_TOKEN, CLOUDFLARE_ZONE_ID[/yellow]\n") + sys.exit(1) + + except DeploymentError as e: + logger.error(f"Deployment failed: {e}") + console.print(f"\n[bold red]✗ Deployment failed: {e}[/bold red]\n") + sys.exit(1) + + except KeyboardInterrupt: + logger.warning("Deployment interrupted by user") + console.print("\n[bold yellow]✗ Deployment interrupted by user[/bold yellow]\n") + sys.exit(130) + + except Exception as e: + logger.exception("Unexpected error") + console.print(f"\n[bold red]✗ Unexpected error: {e}[/bold red]\n") + console.print("[yellow]Please check the logs above for more details.[/yellow]\n") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/wordpress/destroy.py b/wordpress/destroy.py new file mode 100755 index 0000000..4679cb6 --- /dev/null +++ b/wordpress/destroy.py @@ -0,0 +1,529 @@ +#!/usr/bin/env python3 +""" +WordPress Deployment Destroyer + +Destroys WordPress deployments based on saved deployment configurations +""" + +import argparse +import logging +import subprocess +import sys +from pathlib import Path +from typing import List, NoReturn, Optional + +from rich.console import Console +from rich.logging import RichHandler +from rich.prompt import Confirm +from rich.table import Table + +from wordpress_deployer.deployment_config_manager import ( + DeploymentConfigManager, + DeploymentMetadata +) +from wordpress_deployer.dns_manager import DNSError, DNSManager + + +console = Console() + + +def setup_logging(log_level: str) -> None: + """ + Setup rich logging with colored output + + Args: + log_level: Logging level (DEBUG, INFO, WARNING, ERROR) + """ + logging.basicConfig( + level=log_level.upper(), + format="%(message)s", + datefmt="[%X]", + handlers=[RichHandler(console=console, rich_tracebacks=True, show_path=False)] + ) + + +def parse_args() -> argparse.Namespace: + """ + Parse CLI arguments + + Returns: + argparse.Namespace with parsed arguments + """ + parser = argparse.ArgumentParser( + description="Destroy WordPress deployments", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # List all deployments + ./destroy.py --list + + # Destroy by subdomain + ./destroy.py --subdomain my-site + + # Destroy by URL + ./destroy.py --url my-site.example.com + + # Destroy by config file + ./destroy.py --config deployments/my-site_20231215_120000.json + + # Destroy without confirmation + ./destroy.py --subdomain my-site --yes + + # Dry-run mode (preview only) + ./destroy.py --subdomain my-site --dry-run + +Environment Variables: + CLOUDFLARE_API_TOKEN Cloudflare API token (required) + CLOUDFLARE_ZONE_ID Cloudflare zone ID (required) + """ + ) + + # Action group - mutually exclusive + action_group = parser.add_mutually_exclusive_group(required=True) + action_group.add_argument( + "--list", + action="store_true", + help="List all deployments" + ) + action_group.add_argument( + "--subdomain", + type=str, + help="Subdomain to destroy" + ) + action_group.add_argument( + "--url", + type=str, + help="Full URL to destroy" + ) + action_group.add_argument( + "--config", + type=Path, + help="Path to deployment config file" + ) + + # Options + parser.add_argument( + "--yes", "-y", + action="store_true", + help="Skip confirmation prompts" + ) + + parser.add_argument( + "--dry-run", + action="store_true", + help="Preview destruction without making changes" + ) + + parser.add_argument( + "--keep-config", + action="store_true", + help="Keep deployment config file after destruction" + ) + + parser.add_argument( + "--log-level", + choices=["DEBUG", "INFO", "WARNING", "ERROR"], + default="INFO", + help="Logging level (default: INFO)" + ) + + return parser.parse_args() + + +def print_banner() -> None: + """Print destruction banner""" + console.print("\n[bold red]╔══════════════════════════════════════════════╗[/bold red]") + console.print("[bold red]║[/bold red] [bold white]WordPress Deployment Destroyer[/bold white] [bold red]║[/bold red]") + console.print("[bold red]╚══════════════════════════════════════════════╝[/bold red]\n") + + +def list_deployments(config_manager: DeploymentConfigManager) -> None: + """ + List all deployments + + Args: + config_manager: DeploymentConfigManager instance + """ + deployments = config_manager.list_deployments() + + if not deployments: + console.print("[yellow]No deployments found[/yellow]") + return + + table = Table(title="Active Deployments") + table.add_column("Subdomain", style="cyan") + table.add_column("URL", style="green") + table.add_column("Deployed", style="yellow") + table.add_column("Config File", style="blue") + + for config_file in deployments: + try: + metadata = config_manager.load_deployment(config_file) + table.add_row( + metadata.subdomain, + metadata.url, + metadata.deployment_timestamp, + config_file.name + ) + except Exception as e: + console.print(f"[red]Error loading {config_file}: {e}[/red]") + + console.print(table) + console.print(f"\n[bold]Total deployments: {len(deployments)}[/bold]\n") + + +def find_config( + args: argparse.Namespace, + config_manager: DeploymentConfigManager +) -> Optional[Path]: + """ + Find deployment config based on arguments + + Args: + args: CLI arguments + config_manager: DeploymentConfigManager instance + + Returns: + Path to config file or None + """ + if args.config: + return args.config if args.config.exists() else None + + if args.subdomain: + return config_manager.find_deployment_by_subdomain(args.subdomain) + + if args.url: + return config_manager.find_deployment_by_url(args.url) + + return None + + +def run_command(cmd: List[str], dry_run: bool = False) -> bool: + """ + Run a shell command + + Args: + cmd: Command and arguments + dry_run: If True, only print command + + Returns: + True if successful, False otherwise + """ + cmd_str = " ".join(cmd) + + if dry_run: + console.print(f"[dim]Would run: {cmd_str}[/dim]") + return True + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=30 + ) + if result.returncode != 0: + logging.warning(f"Command failed: {cmd_str}") + logging.debug(f"Error: {result.stderr}") + return False + return True + except subprocess.TimeoutExpired: + logging.error(f"Command timed out: {cmd_str}") + return False + except Exception as e: + logging.error(f"Failed to run command: {e}") + return False + + +def destroy_containers(metadata: DeploymentMetadata, dry_run: bool = False) -> bool: + """ + Stop and remove containers + + Args: + metadata: Deployment metadata + dry_run: If True, only preview + + Returns: + True if successful + """ + console.print("\n[bold yellow]═══ Destroying Containers ═══[/bold yellow]") + + success = True + + if metadata.containers: + for container in metadata.containers: + console.print(f"Stopping container: [cyan]{container}[/cyan]") + if not run_command(["docker", "stop", container], dry_run): + success = False + + console.print(f"Removing container: [cyan]{container}[/cyan]") + if not run_command(["docker", "rm", "-f", container], dry_run): + success = False + else: + # Try to stop by project name + console.print(f"Stopping docker-compose project: [cyan]{metadata.compose_project_name}[/cyan]") + if not run_command( + ["docker", "compose", "-p", metadata.compose_project_name, "down"], + dry_run + ): + success = False + + return success + + +def destroy_volumes(metadata: DeploymentMetadata, dry_run: bool = False) -> bool: + """ + Remove Docker volumes + + Args: + metadata: Deployment metadata + dry_run: If True, only preview + + Returns: + True if successful + """ + console.print("\n[bold yellow]═══ Destroying Volumes ═══[/bold yellow]") + + success = True + + if metadata.volumes: + for volume in metadata.volumes: + console.print(f"Removing volume: [cyan]{volume}[/cyan]") + if not run_command(["docker", "volume", "rm", "-f", volume], dry_run): + success = False + else: + # Try with project name + volumes = [ + f"{metadata.compose_project_name}_db_data", + f"{metadata.compose_project_name}_wp_data" + ] + for volume in volumes: + console.print(f"Removing volume: [cyan]{volume}[/cyan]") + run_command(["docker", "volume", "rm", "-f", volume], dry_run) + + return success + + +def destroy_networks(metadata: DeploymentMetadata, dry_run: bool = False) -> bool: + """ + Remove Docker networks (except external ones) + + Args: + metadata: Deployment metadata + dry_run: If True, only preview + + Returns: + True if successful + """ + console.print("\n[bold yellow]═══ Destroying Networks ═══[/bold yellow]") + + success = True + + if metadata.networks: + for network in metadata.networks: + # Skip external networks + if network == "proxy": + console.print(f"Skipping external network: [cyan]{network}[/cyan]") + continue + + console.print(f"Removing network: [cyan]{network}[/cyan]") + if not run_command(["docker", "network", "rm", network], dry_run): + # Networks might not exist or be in use, don't fail + pass + + return success + + +def destroy_dns( + metadata: DeploymentMetadata, + dns_manager: DNSManager, + dry_run: bool = False +) -> bool: + """ + Remove DNS record + + Args: + metadata: Deployment metadata + dns_manager: DNSManager instance + dry_run: If True, only preview + + Returns: + True if successful + """ + console.print("\n[bold yellow]═══ Destroying DNS Record ═══[/bold yellow]") + + if not metadata.url: + console.print("[yellow]No URL found in metadata, skipping DNS cleanup[/yellow]") + return True + + console.print(f"Looking up DNS record: [cyan]{metadata.url}[/cyan]") + + if dry_run: + console.print("[dim]Would remove DNS record[/dim]") + return True + + try: + # Look up and remove by hostname to get the real record ID from Cloudflare + # This ensures we don't rely on potentially stale/fake IDs from the config + dns_manager.remove_record(metadata.url, dry_run=False) + console.print("[green]✓ DNS record removed[/green]") + return True + except DNSError as e: + console.print(f"[red]✗ Failed to remove DNS record: {e}[/red]") + return False + + +def destroy_deployment( + metadata: DeploymentMetadata, + config_path: Path, + args: argparse.Namespace, + dns_manager: DNSManager +) -> bool: + """ + Destroy a deployment + + Args: + metadata: Deployment metadata + config_path: Path to config file + args: CLI arguments + dns_manager: DNSManager instance + + Returns: + True if successful + """ + # Show deployment info + console.print("\n[bold]Deployment Information:[/bold]") + console.print(f" Subdomain: [cyan]{metadata.subdomain}[/cyan]") + console.print(f" URL: [cyan]{metadata.url}[/cyan]") + console.print(f" Project: [cyan]{metadata.compose_project_name}[/cyan]") + console.print(f" Deployed: [cyan]{metadata.deployment_timestamp}[/cyan]") + console.print(f" Containers: [cyan]{len(metadata.containers or [])}[/cyan]") + console.print(f" DNS Record ID: [cyan]{metadata.dns_record_id or 'N/A'}[/cyan]") + + if args.dry_run: + console.print("\n[bold yellow]━━━ DRY-RUN MODE: No changes will be made ━━━[/bold yellow]") + + # Confirm destruction + if not args.yes and not args.dry_run: + console.print() + if not Confirm.ask( + f"[bold red]Are you sure you want to destroy {metadata.url}?[/bold red]", + default=False + ): + console.print("\n[yellow]Destruction cancelled[/yellow]\n") + return False + + # Execute destruction + success = True + + # 1. Destroy containers + if not destroy_containers(metadata, args.dry_run): + success = False + + # 2. Destroy volumes + if not destroy_volumes(metadata, args.dry_run): + success = False + + # 3. Destroy networks + if not destroy_networks(metadata, args.dry_run): + success = False + + # 4. Destroy DNS + if not destroy_dns(metadata, dns_manager, args.dry_run): + success = False + + # 5. Delete config file + if not args.keep_config and not args.dry_run: + console.print("\n[bold yellow]═══ Deleting Config File ═══[/bold yellow]") + console.print(f"Deleting: [cyan]{config_path}[/cyan]") + try: + config_path.unlink() + console.print("[green]✓ Config file deleted[/green]") + except Exception as e: + console.print(f"[red]✗ Failed to delete config: {e}[/red]") + success = False + + return success + + +def main() -> NoReturn: + """ + Main entry point + + Exit codes: + 0: Success + 1: Failure + 2: Not found + """ + args = parse_args() + setup_logging(args.log_level) + + print_banner() + + config_manager = DeploymentConfigManager() + + # Handle list command + if args.list: + list_deployments(config_manager) + sys.exit(0) + + # Find deployment config + config_path = find_config(args, config_manager) + + if not config_path: + console.print("[red]✗ Deployment not found[/red]") + console.print("\nUse --list to see all deployments\n") + sys.exit(2) + + # Load deployment metadata + try: + metadata = config_manager.load_deployment(config_path) + except Exception as e: + console.print(f"[red]✗ Failed to load deployment config: {e}[/red]\n") + sys.exit(1) + + # Initialize DNS manager + import os + cloudflare_token = os.getenv("CLOUDFLARE_API_TOKEN") + cloudflare_zone = os.getenv("CLOUDFLARE_ZONE_ID") + + if not cloudflare_token or not cloudflare_zone: + console.print("[yellow]⚠ Cloudflare credentials not found[/yellow]") + console.print("[yellow] DNS record will not be removed[/yellow]") + console.print("[yellow] Set CLOUDFLARE_API_TOKEN and CLOUDFLARE_ZONE_ID to enable DNS cleanup[/yellow]\n") + dns_manager = None + else: + dns_manager = DNSManager(cloudflare_token, cloudflare_zone) + + # Destroy deployment + try: + success = destroy_deployment(metadata, config_path, args, dns_manager) + + if success or args.dry_run: + console.print("\n[bold green]╔══════════════════════════════════════════════╗[/bold green]") + if args.dry_run: + console.print("[bold green]║[/bold green] [bold white]✓ Dry-Run Complete![/bold white] [bold green]║[/bold green]") + else: + console.print("[bold green]║[/bold green] [bold white]✓ Destruction Successful![/bold white] [bold green]║[/bold green]") + console.print("[bold green]╚══════════════════════════════════════════════╝[/bold green]\n") + sys.exit(0) + else: + console.print("\n[bold yellow]╔══════════════════════════════════════════════╗[/bold yellow]") + console.print("[bold yellow]║[/bold yellow] [bold white]⚠ Destruction Partially Failed[/bold white] [bold yellow]║[/bold yellow]") + console.print("[bold yellow]╚══════════════════════════════════════════════╝[/bold yellow]\n") + console.print("[yellow]Some resources may not have been cleaned up.[/yellow]") + console.print("[yellow]Check the logs above for details.[/yellow]\n") + sys.exit(1) + + except KeyboardInterrupt: + console.print("\n[bold yellow]✗ Destruction interrupted by user[/bold yellow]\n") + sys.exit(130) + + except Exception as e: + console.print(f"\n[bold red]✗ Unexpected error: {e}[/bold red]\n") + logging.exception("Unexpected error") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/wordpress/docker-compose.yml b/wordpress/docker-compose.yml new file mode 100644 index 0000000..635809a --- /dev/null +++ b/wordpress/docker-compose.yml @@ -0,0 +1,56 @@ +services: + mariadb: + image: mariadb:${MARIADB_VERSION} + container_name: ${SUBDOMAIN}_db + restart: unless-stopped + environment: + MYSQL_DATABASE: ${DB_NAME} + MYSQL_USER: ${DB_USER} + MYSQL_PASSWORD: ${DB_PASSWORD} + MYSQL_ROOT_PASSWORD: ${DB_ROOT_PASSWORD} + volumes: + - db_data:/var/lib/mysql + networks: + - internal + + wordpress: + image: wordpress:${WORDPRESS_VERSION} + container_name: ${SUBDOMAIN}_wp + restart: unless-stopped + depends_on: + - mariadb + environment: + WORDPRESS_DB_HOST: mariadb:3306 + WORDPRESS_DB_NAME: ${DB_NAME} + WORDPRESS_DB_USER: ${DB_USER} + WORDPRESS_DB_PASSWORD: ${DB_PASSWORD} + WORDPRESS_TABLE_PREFIX: ${WP_TABLE_PREFIX} + WORDPRESS_CONFIG_EXTRA: | + define('WP_MEMORY_LIMIT', '${WP_MEMORY_LIMIT}'); + define('WP_MAX_MEMORY_LIMIT', '${WP_MAX_MEMORY_LIMIT}'); + define('DISALLOW_FILE_EDIT', true); + define('AUTOMATIC_UPDATER_DISABLED', true); + define('FS_METHOD', 'direct'); + volumes: + - wp_data:/var/www/html + labels: + - "traefik.enable=true" + - "traefik.http.routers.${SUBDOMAIN}.rule=Host(`${URL}`)" + - "traefik.http.routers.${SUBDOMAIN}.entrypoints=https" + - "traefik.http.routers.${SUBDOMAIN}.tls=true" + - "traefik.http.routers.${SUBDOMAIN}.tls.certresolver=letsencrypt" + - "traefik.http.services.${SUBDOMAIN}.loadbalancer.server.port=80" + networks: + - proxy + - internal + +volumes: + db_data: + wp_data: + +networks: + proxy: + external: true + internal: + internal: true + diff --git a/wordpress/logs/failed/failed_caimito-hedgeless.merakit.my_20251217_070805.txt b/wordpress/logs/failed/failed_caimito-hedgeless.merakit.my_20251217_070805.txt new file mode 100644 index 0000000..eb29a5f --- /dev/null +++ b/wordpress/logs/failed/failed_caimito-hedgeless.merakit.my_20251217_070805.txt @@ -0,0 +1,18 @@ +╔══════════════════════════════════════════════╗ +║ DEPLOYMENT FAILURE LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: 2025-12-17 07:08:05 +Status: FAILED +URL: https://caimito-hedgeless.merakit.my +Subdomain: caimito-hedgeless + +═══════════════════════════════════════════════ + +ERROR: +Health check failed for https://caimito-hedgeless.merakit.my + +═══════════════════════════════════════════════ + +Deployment failed. See error details above. +All changes have been rolled back. diff --git a/wordpress/logs/failed/failed_insuring-refocuses.merakit.my_20251217_061237.txt b/wordpress/logs/failed/failed_insuring-refocuses.merakit.my_20251217_061237.txt new file mode 100644 index 0000000..3ab1770 --- /dev/null +++ b/wordpress/logs/failed/failed_insuring-refocuses.merakit.my_20251217_061237.txt @@ -0,0 +1,18 @@ +╔══════════════════════════════════════════════╗ +║ DEPLOYMENT FAILURE LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: 2025-12-17 06:12:37 +Status: FAILED +URL: https://insuring-refocuses.merakit.my +Subdomain: insuring-refocuses + +═══════════════════════════════════════════════ + +ERROR: +Failed to add DNS record: 401 Client Error: Unauthorized for url: https://api.cloudflare.com/client/v4/zones/7eb0d48b7e396e0cc8b06ac1a7fe667a/dns_records + +═══════════════════════════════════════════════ + +Deployment failed. See error details above. +All changes have been rolled back. diff --git a/wordpress/logs/failed/failed_juslted-doodlebug.merakit.my_20251217_061213.txt b/wordpress/logs/failed/failed_juslted-doodlebug.merakit.my_20251217_061213.txt new file mode 100644 index 0000000..e31ad78 --- /dev/null +++ b/wordpress/logs/failed/failed_juslted-doodlebug.merakit.my_20251217_061213.txt @@ -0,0 +1,18 @@ +╔══════════════════════════════════════════════╗ +║ DEPLOYMENT FAILURE LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: 2025-12-17 06:12:13 +Status: FAILED +URL: https://juslted-doodlebug.merakit.my +Subdomain: juslted-doodlebug + +═══════════════════════════════════════════════ + +ERROR: +Failed to add DNS record: 401 Client Error: Unauthorized for url: https://api.cloudflare.com/client/v4/zones/7eb0d48b7e396e0cc8b06ac1a7fe667a/dns_records + +═══════════════════════════════════════════════ + +Deployment failed. See error details above. +All changes have been rolled back. diff --git a/wordpress/logs/success/success_ankylotic-unactable.merakit.my_20251217_061635.txt b/wordpress/logs/success/success_ankylotic-unactable.merakit.my_20251217_061635.txt new file mode 100644 index 0000000..4ed4366 --- /dev/null +++ b/wordpress/logs/success/success_ankylotic-unactable.merakit.my_20251217_061635.txt @@ -0,0 +1,14 @@ +╔══════════════════════════════════════════════╗ +║ DEPLOYMENT SUCCESS LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: 2025-12-17 06:16:35 +Status: SUCCESS +URL: https://ankylotic-unactable.merakit.my +Subdomain: ankylotic-unactable +Duration: 70.30 seconds + +═══════════════════════════════════════════════ + +Deployment completed successfully. +All services are running and health checks passed. diff --git a/wordpress/logs/success/success_daidle-allotrylic.merakit.my_20251217_071135.txt b/wordpress/logs/success/success_daidle-allotrylic.merakit.my_20251217_071135.txt new file mode 100644 index 0000000..9845bdc --- /dev/null +++ b/wordpress/logs/success/success_daidle-allotrylic.merakit.my_20251217_071135.txt @@ -0,0 +1,14 @@ +╔══════════════════════════════════════════════╗ +║ DEPLOYMENT SUCCESS LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: 2025-12-17 07:11:35 +Status: SUCCESS +URL: https://daidle-allotrylic.merakit.my +Subdomain: daidle-allotrylic +Duration: 57.28 seconds + +═══════════════════════════════════════════════ + +Deployment completed successfully. +All services are running and health checks passed. diff --git a/wordpress/logs/success/success_emetic-fuglemen.merakit.my_20251216_170709.txt b/wordpress/logs/success/success_emetic-fuglemen.merakit.my_20251216_170709.txt new file mode 100644 index 0000000..eeb4e46 --- /dev/null +++ b/wordpress/logs/success/success_emetic-fuglemen.merakit.my_20251216_170709.txt @@ -0,0 +1,14 @@ +╔══════════════════════════════════════════════╗ +║ DEPLOYMENT SUCCESS LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: 2025-12-16 17:07:09 +Status: SUCCESS +URL: https://emetic-fuglemen.merakit.my +Subdomain: emetic-fuglemen +Duration: 58.80 seconds + +═══════════════════════════════════════════════ + +Deployment completed successfully. +All services are running and health checks passed. diff --git a/wordpress/logs/success/success_exing-calcinator.merakit.my_20251216_184725.txt b/wordpress/logs/success/success_exing-calcinator.merakit.my_20251216_184725.txt new file mode 100644 index 0000000..81b89d5 --- /dev/null +++ b/wordpress/logs/success/success_exing-calcinator.merakit.my_20251216_184725.txt @@ -0,0 +1,14 @@ +╔══════════════════════════════════════════════╗ +║ DEPLOYMENT SUCCESS LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: 2025-12-16 18:47:25 +Status: SUCCESS +URL: https://exing-calcinator.merakit.my +Subdomain: exing-calcinator +Duration: 57.69 seconds + +═══════════════════════════════════════════════ + +Deployment completed successfully. +All services are running and health checks passed. diff --git a/wordpress/logs/success/success_slenderly-spareable.merakit.my_20251217_065302.txt b/wordpress/logs/success/success_slenderly-spareable.merakit.my_20251217_065302.txt new file mode 100644 index 0000000..abaf146 --- /dev/null +++ b/wordpress/logs/success/success_slenderly-spareable.merakit.my_20251217_065302.txt @@ -0,0 +1,14 @@ +╔══════════════════════════════════════════════╗ +║ DEPLOYMENT SUCCESS LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: 2025-12-17 06:53:02 +Status: SUCCESS +URL: https://slenderly-spareable.merakit.my +Subdomain: slenderly-spareable +Duration: 58.05 seconds + +═══════════════════════════════════════════════ + +Deployment completed successfully. +All services are running and health checks passed. diff --git a/wordpress/requirements.txt b/wordpress/requirements.txt new file mode 100644 index 0000000..8839043 --- /dev/null +++ b/wordpress/requirements.txt @@ -0,0 +1,4 @@ +# Core dependencies +requests>=2.31.0 +rich>=13.7.0 +python-dotenv>=1.0.0 diff --git a/wordpress/wordpress_deployer/__init__.py b/wordpress/wordpress_deployer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/wordpress/wordpress_deployer/__pycache__/__init__.cpython-39.pyc b/wordpress/wordpress_deployer/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6607d0c9e73b97aef478f5bd83e8ba0a278a84d8 GIT binary patch literal 121 zcmYe~<>g`kf@4jNnIQTxh(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o2C*JijQVpeVJt qI6ft{ASb^vwMaicJ~J<~BtBlRpz;=nO>TZlX-=vgNZ)55W&i-9o*7gC literal 0 HcmV?d00001 diff --git a/wordpress/wordpress_deployer/__pycache__/config.cpython-39.pyc b/wordpress/wordpress_deployer/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a09435a20cd62d9dafc71b9940ccd83216e15e3 GIT binary patch literal 4504 zcmbVQOLN=E5yp!k2vQRDu>4$m>~+PaOC@gZvl|jY9Km?6vNUGXo?REr6u(T6?WJ6I6SuRP5xp z_DU4C{H-qcVm}H=JEGlykygYBWt||}YqKyWg2k~PZVB67fiCv~KVg(KkBofb$J=BV zRJ1=+i$`t3!d;(7p`2vbQsnxo5I>gm*xO;$ONcyhhr`2u$F zVw;7>w&DxohjJ_Riji8b8+vW#x~c8D{qq67lH zXAm8Z&J32L+8srxm(C1#yh zz;+D!hBL;-fgcBc!kJ)Y;LE^II+K#00)E<=lKhOcpO*G#fG;^Sqq%0K-5F_j7WfHg zR`Tb7KkuBC>t6uh&QXP4{G&=Q(U0C3;N^K*lKso{${WMEz%1D#^eXf&(h@}UwY0oC zirfCLp4@xv`C&Gkj|OfKc@*N{M7(CARHPO}vfJ_lmQKSDy~DWMjM|+@Fe$u`QOa3| zr!&nU>e5!=aprm*-;JXk7N%1fX%dCZ_34K*3thn%#5PziP`??=;1F;DfR~egu-lA# zJAORh-b;&{9;`*9Hf*JyT3)9kgEGxizUT68_+E!lNsDdorOR2&eJ0-DLCN$s^MZK0 zxy_n8ZtS;N)Q#UG9Hlc^jUUF0L&T@{F5`Y{&lMs_i!a#bb`9p5F(YdzBkVY+96k zbI>o*G1<@e6tK16EyOk*f35bko=zud9wfzHu4nd;E0vU{_}an!@P z0XW4X(vHUko38^riH_G~M1t^9XdMk~$lv1FOvf(^0H27xu*nXWmpFNB*hxHHB()ad z?3=F7PoxIG)3z@JWRQ_F$-DNzTTOnRk&P@*%|`^>8#e5cME-dBS8k)a(Wq6QI+ru{539>-8xL0=R#zUm8@0#P^=BK7_StRc%73w` z)iUWCqD361;bO775fL3O|SB0+LHeE+e^u zgdn+!U)Z9^c3_8HS=lRSiOKN@x8VfWM-3DU{QuAlf6hKqR zjKRLOljDB}o!!^bVt+I(y_IOarO>fIsjpO!+V6_{fVsQs?^UkBh;fvGcN4VyuGE7( z05eOpmnSfZ*h;4WTc?vhjL_q&ewjH0(qZw6@f2X-L4_rY%x1(nYsT3Bzwqp6p>S9h zjvPtO4O}^aWXVi5+$C5skT8m&pzelrktA1EW%U54C##2(1{YT$|6`R4dE>!OlX_<&2zKR}l8n7qOUsk3pAQ$B{GXANCH?xMrD&kbj*;6fVCRZyX^E)JKGhFuO2enekrMcDWy=M>j$KZdO z|9ZJmuhk!f|*ZPG!;M1TCf!$2algEQiLh(hS7Ra7(V6fds=7Zo80E~x|@fFcPmI<&dB1-PbW^668k zKOwPWJ_B+{QsKw`F2fJTE)&Ux<4)Zi*4s;oJx2yb?ou*7M>3(i zl-!ylnbLYxa4+4$XO0YORw!VX_@}UmiqX%DhnbI{ROO4%ocI}BsHHpHb^i)Vfmu|6 zBmjq~89x}h@oyt%{L4HzzUw~>*RLR zd}8{I8+ZN*3T|`_&bS$~F4Ir5YvO6eR$_PUq|t3K zdT+;0;&dHdZ^X^S?Yg?|#B+()^^#V%mCSeNnIX*M1+n;)b(cirO@lYN``F;_k$J?r zZM5gOhqiZQb}w=Bp3!NQ54}5L5N8L8NDKes`KgK-t6Gq?`P64WZ2uA4C%!pOos#S3vAoi>X^z2a~|~yHOIpu?8RZ8 zcbIa~5Ji*-)b01e;z7q!_8t1nz8mF*a^D@$;=;H`VrDN3joY~MzoQTa^l5a>523xT zC2Y~)=1oICt?|U?8(y(JO*9OhmWIx2Kh1E~HwS+A$#I=S7$dw$uVo^FWGTCV3EXNJTRm zC=aGCFqe?IYVK$85CfE(isDfw_j0u$(*2+x#h8`sx2W7ePZtEr3xXu$L%a&qd=NYw zhB2)Ot!5DLtcQw~7g8++L71jlp@EnOfn0)3rAtMd3iy>Fn^Y`Q@f;N^D4rSe1*%i= zpSQ0^XX067#xm317AOCG;)Seaq;3j<29^5ppkUNvr${+DrU0Pt@ySJX(#Y$$^H))f z*vJTsk+Ey+Y8HILm{FL=R$+f`aO=kH%M`D5dG z_18a_JNZW4A`#Q&ZTNR0;JnD>fnQ{Pj>^sQz%B$-asBf#|$ZunR zs*&tL391=qJ3DY*HMb**yQm8(FVp*GKyQQuE84k;`+C$A%92n=ZWLk1LGTJ%xr4%J zI&6`Z{sq&oPX<7&n~d2+IZ_lunVzr+xe|Gow#cWb6vi=oY8)dLJ!OaHi0xXiggG*` zJ?ujZwz5ao3&x>cG)C-*9X1MQWOMt-{LDXeisr66YK$D-;La0^8TS#1Y?%BA^|bvx zBj3JZF#Y=oC1lMVd)ytt+GK0=kfrC!zt#G_7w(^9k4d0{#dwtOeVM5zyN99ch*h&{ zl!EVo->HE2wTe!`g`$y02NuI8Fe9F;msu{XeYBRW@nG$jYwxVxQ!FSO>$`cDD)#6L zLOo#PL@7tC-724_P4O8PAB)`ZJ1t4>p{z7}q+IY}pEy}kT2TRmL@7HPh*a5Hyv>Kn zK)$TIL3&xIDc9(0LW-KJ8C|)wTw+|+8iSuGx^7UkQLKL&cuNTUVoa2jCww1LbK+s6 zwaAv4XSwD)TQ`?k`N~wguWR2Q^T_%s2>|3|0kk4{4YE!ElJ;K%^4DmL*s<}{$X5#x z#E214j@YNH05n0M=4anHGz;t49sxdw7T{)`fZOd^xD}1Q5g8-vFYNaWFtfvT21*}O zihmc0%~_jFkk*%3@|9Q{YnC>);ZHzQb^01E^nk(gl8 z4PP+{&Qp$_AXk^pEJQ(7Ed9{Bn6_1duVFsBO zoq8>$^Qz{Z-s}dcPMf@kTPtp4kWcNx3!bs9XB2vna1r4I2?>A36dRnjf_pO#lWiW} zdH^Z^#%+`vQ^cNh`k3M)ulY7#zZqw}FwSqGQVID0L}2ZS1J#sbABlwM*uVvbRbkWK ziL-6xR0`;r8YSKN8UM;pFRy)i&fj8%IkAb+Y_sy(*IgK41SLdD zoKgG&RuC!kZ_x=0m;qwA4a_v2(22__|500M6QRH_9thYKCPe;0XRxWhYS+a{D)j3N ze~P2C8BP7shJSBzBG4gAgtGU5jt|q(=aSDkbme3Ett=IO)StR~y@#?HR8ZUcx>%Dx zgVyA2D#+DNSx~-#z60vtL{V;DWUxMTH-oKU`yi+{e->x(_ctL9MDrGjrkqR?e<&*n zKkir+{gA=Ci%D4Y9w0~4Czvz%dhyHV-GzXhqL$&UGMm{}`Gfyu`M_QaGR_7?!X3TQ z4D}e5%7O0~zu7-D%a_n%RRA1Wy9kVXrhIK=-iOaK`22n2p=$$=g}vM8n+GF?+>IIK z4RD-u+(b~N+|JZFm`yp@ed8m9(L?(a`=L8quljBnHz1D_2ioOdYcyy?pG7dJ>%;f& zPN8=exHzkbaZyb73A+SPm%$`r8aG%7G@3+jNTb1)`O;ashrgCuC-cbGG$lcwR#Gn0Av zE#hE#mA!117acR!&lk0IBBLVaCmz(``7SC|MkQau6REyV!*S%AWLE?M{TGo{?$zeK zBVtjUlZc;6YtEW?hO*C3i6{r5t9+rFRGa=mP2Rp@iB@{^W}|;ob@6Bowh!`$yErFSE#s31-Ypt4eR7a{+MbSPjt$v z;PpJ!h$-Y274K8=0Tt7=>l&T=C7E7YR1g4ElhTSf+SUtIPH`{*ZY7;3tG`tAy1qkAeKvKfqiMAnTFZHh fRPN2{kDXhTW#sfXbnxJIwR0KM#GT# zQc&uc4O3OPx@U2FUu~4HDa>TnV})5CX$^;I8%oU{Hk>PbH|PyIyc@YeZ)>aF-EuoU z)(^PjT=%;y;Gz3Kv?Ho#hq{Jt)N{jrv&qA7x#hP5&Ro_OyczXGn}^O{uv%rUrY1J? zBObLo9C_mdKia0cTf0%a*Y$%Y^%{RH6yC=h{t*dRuuG+(F}0y{gJ~bBEfv4|Gwha? z9+w&=+7n7PD?c_GCbzi7oM#%FVHK2ZD)TbxW}g{sjvYar!_LUs8D8P#(+Xyre^z2g z*)hyE%g)L+t7uz5+i~^=+UBxRG#iaL!A@efBUyi{rM{=wY1Ga$6ViP%IX)8ZRmf)* z^5tUa;x`-@rr=>eh@?DQ=oU5%Xd)yj(XR)gHZ=$%e<*L@y^J^f36fZi6%Q#>&nn2N z$Z0aCMf#o*m7Xe0|5V$jUS_Q9S^EkwVqk@(;V&IGdv0{0WIt#R`S{XkJrWmOI7Qcs zVk@i{B^sZvv+iEbR8E^U6WnC7RYsK)y$}p6eDTsz|EKL*JTFq{h zz5GK-#mk0~c#;_}9V-hKIuVYO@)nO)rOXrC^SXYAdtPGE98g2D;PGzQ7o5iApsi_% z5%QoVOo;U}MI6P?!KLMPw0`hFV% z_PhmBnzVPzR2}>s(}&-fw%?_E1euU2c+?l&ri#(|Log`el`!}bGO==xUbUrNQGW5( zBf#Ju42Q_#S0D(q01Qlz)u#%9K~+AiWFUCwvf$VkX{JyeEZA+g+-RE*T-NI@MlOHY z4x`&Otzqr@0(223wyg4cJHs<8Qj{YC7^L09y1P~@i859vXwyllvjcI9IhkjCv%gi- z#5_uka;}DflzN&Rqk%_|oWmQEQWR}ool}SBr;BW=M)kf3#BmILxfPv9Ypm``X5H8D zt21q1BM~AGrjd0f{laRj;a-8>4WG;gHgjivRf4?___w<**$n~izZngdRhQ$4N3(fG z{nw?&C+sZ$9FyZhzs2Q8i?3z2>>k+LQM4P@mzNV|Xi*;719dr3y#uulKv@D@?l*ZR zeQ^o`n2>ui>*cHRuv8LJNVKEO9M^{DrptePK+Q~qkFWq3>UCicNq&XJTNfwKbl3Jv;M^YmN9%?V7 zQ+LOD6}6*9Oa;c(3~z222~b}#^KiC$x7w*PuX??Dv$`SPglvZfZ5EpUCJKp-5iRMR zO=*b2MQw_QZz0#1&2`EM1r6!UJ&c(|%Q0^&lf0Kp{l^%e8toi0-EeWVF#lXBSKWWA2x?jnZhL#c=u1T88B5RqU0*vJ$B`Z)@*!PLGY<|AcK-B$o3WlsZm z)Y#b90VR-OTso^fB7bP0R*QAC7>}&j+$+Tv(Ifhpdlm@O%n372;kGyqH*6xuQmY?~ zxz3ToUPR3oVWHS}-QCDV;DT9Lp;tmwE|Nol$t#J3Lf-6kS$Jqr0jNWxirO(h-R(W7 zjW#hAbrj4|6xfvhgGmQM$}hgSCMcZipGNZh*>A8J87<0w6jVmMTX$==AXk>?_@MYU za?)MEJtKC2sTO`7-YIct6!#+jFiL%h0oUBa4CF#m(7r{vDkX%J#LmGegDDBkWCl}t z5Dh*bO~P2VWyX916NKbAlm%P0wK>(%4AuU(VZdu^!!a`*Lo-dC4bNaKXhlhh3(*rV zK`;5o0=*1{7tpJUQ?c3wf(g4oGSDlL4EzGgCXj4RAlZ&BAQ_Q=fn+1L3M5+;{rNX1 zk(?6OS4XjXzT*$vP3|I8iUt%wWHWMK1cOn~NAP_q$p4o~q*Uwf3JKSRdkOum5&GyW z1+xD?cu%Z;cLzZ33Sy)J_*Zc}LZ~?v_~LE!Oo1;$qyqRPI*LyLDM8*>B3{&xtCc4K zzeMc>_y$G4#MFY^sQ3;g?@~gcY?@F|mD)h|g&kCcCy+>->lFl8UlQrr1kG_OraS>! z`f}PE1L-1+f;Xpwi1gw{tRY{Db!I*_nDuEHaJ2ms<8VwJb{_l}EuNsA|tgqi%uRHxey^4z)9gnlH zdCA@2h^1w4%XkFS0I6>F?(>vro7;YO3p~5+-{-Ix-gQMUkk;cjca|LS18lNpP15Qp zRGdMWh}+K)H$j8O(IOttt0rmpyO{2CyjiGtOs!~#sJEj1?F*Tmfr(C65Ojed505f@PD-ty=SPAr zmc#{0zDEgp3qfli+m8%}$$JQjR>|*##Q3&qXeW-$ne#`EpH%*-X_`uZrm7uO7AMMY zr)@|ND%DY5&R3feboC)-D3NDdT$x7OL3>l~qG3jZUHHUwp%v;xLTH!}3ofSR6w}t4 X*gs8$^fUMrC8U9dX{#0b(;fXk)(LT) literal 0 HcmV?d00001 diff --git a/wordpress/wordpress_deployer/__pycache__/dns_manager.cpython-39.pyc b/wordpress/wordpress_deployer/__pycache__/dns_manager.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b99eeebf376d279add84618eca696a42c86b009a GIT binary patch literal 6764 zcmb_hTW{RP73OV8E_Wr%vg|m{#Y~f?*~XTfHVp#RNPSV1I%rixPGDq!7W7F)K@?HzAuEo_In>TmWuJe|{8qEAiVJ2_g*1K)4qsC%3YV6zEZM8dr`zCX6 z@7&h9lT5p!bXwUjorR?8&TB%6DF7$&aZ{m}0P(VE*EJ$>3|Y&KbU{fO0T1>cOXb2K>j zVY;%^QHv(?Q@SS$#8w@x=cwEFPz-H-7k<&zxxpJuJFCdEUY(8V+&Bx5z)D+16Lv52 z9)^}ZvCL_BSnkC2<$Pc*(NHwYvlp!U`u$)1q?+HB&k&L-I*)H&>URX2sY z!)zLLEpD(QcU3mSj^bTy*2Ww@SrAx$DzPH6Q1r)iOE1r^TD8U7}_OZgWdpho5ajt%>+|!sr_M%2l zjh#knqKA1;+ok6$W?{s3_AAG&j!RLH27VZ1T&{TKg3hS@yjV--M|bqlXu$Jz!-xr_ z8GHIx7VYUtb?&8wQNQfv^)&4#^K)}}^X!-&W_$7GTrUjZk8{^woukK+xvVXbQu6HEYd|7j3CycO7qJ zSJ(EcaTiT*#ZS055FvOvRWXD0 z5=W^R@q!>%CsqkC@t*fB+$EDJ6x&oybyBlc?Vq-h9T|^qcG$P03F=vxf~Nx!^)z~{ zItXg?4e2I-#YL`cC_CgJ(j|6PRmtu$Ems3|TT8VKILwBDUn7@5xnpeWxxQoWDyo9B zwX19!xv`^D@YvOob$M;ADZAQsBej{GH^|X$s~@Ver72s?w3(Z?RpwkXwyoR}zpuv7 z8jm5GuIk$3y4<>|T!D{cRL<64RAt-7Sf9pMCUU#{N^XCsT~*#w*yOf-%Z{A#d`3Bg z8o2WzAEQ=A{&aDj_pZ6YYK_5MeltkGYJ-F#(BAm`IY_*5^fp*Nd-U-+Bw>NMEo@gOa%WYFs&N-Y*9 z96J`tgJXMV%$3!6AsjIogsGs2D?N6vk@If$6!BeUIg5GXA_kOGfW{XW5o0lzY3%mo zn$;eV&$~;VmN0-wyhkHnljEOBz3?lIi?s^m`h6nM?h!I}E66Lx$RiRI^0@wh-kjmfzEq5)$FFv2CP!#UsjwnagkrE@|Wq;t+62W5))Un7O&rWNl`H zOFog;6Ou8k6j;u!5tmS}Ljkdjo!hvc+Eurkc{BMF`W=>RQ?lB0-lS*Bk>^c0B02Jk zN%W1@uhp$*!6lC55{Dhhor+7Gic4m;om&pLM4ly=%z#Ud`WNb4a-K0sBj+n89>es} zxcHxu1x3)|5%dA*%PwRvsX{3gMjsmXp>wsy68%Icr`$LK8zI<0y85YmBOZjzUEyG* zsLmt@(Gi?$S*p5H%UAvyF!^;6hvACfyEZmcD&)suBlqVE%kQ0Des}48_o^HotgJCI z;BUC6-*7Vr#+`-(pN6yZ4E87CVi?AoVGT)(Wdv zhWJ8H)39sxMSK%vEl#4Xql@Qo5(G!_JQXicLGTtYQt=%W5Vb7pEX700y0m~RinU;= zRS|FQ3mw_X@qlO*(W~OlRSFld@x++o_S?VIX_vHSV4o06kt;eFL)Yj-VvlJ}yiB8$ z!~QN^(au23q5v(9xr@j^`~XE^_5H0d_L-QYsxwqbBgwvUa*3Aq)0czc4ap15$vY^N z8G|IrzYHCWRsKCRt>YSw;r*AU{Z*VM1w1%y)c2unO{>|_8oQ0ZxrF7B984yZTt>oo zc6}vc7*h8Tcj?4$;HnSh zb+b}8Hgj@_=}PxduU5%oxs`k-zYC5V>zb=zPK?J|P~LYXpXqF(MG>2WpA^ZQw)OcvB^g!n)y$2*WLolrI|sab*j;!AG$! zF+yTxKg3_5mqC=#rv0m-KC$N`pW>P1H55wAP^VRc5Q|O7SD+d?Wm+|LTXoO_ibc(i z*YLdO#Xz*A@`Djg`GJu-d4QY$3emp-erV$hTmz3fkWDIt_B0LJvE0!K)f?FGUspsb zkqJCYEz`WG6DL&IG)SF5r5HO+s1!oJLkR&O|1){ru8`ltUSy2?QiDv^ksiGU^y(Y! zt-k@gmed?AV7F2>xm9TnZQHtKL37A+4ZDB;dh8+$y|fu4-6nV#_$$c6QC49XtUF%^ z=}~;&vjwktcx@y8zetV81$5^d0Zrs}X@Ow<)G`snAgfMPZ7SO1;0hC z2!4kmYt?MdUYGlj65N={p3F^20mVa}B82JAyD6r$-qWaRaS!X|KWY5)JTXou} z0mP4JC=)W)my95JfM}sGnf#vIzI0pVrp4>U$TFNmbB!kUMSi=C{3Co7Ix-ad+2DXM zFV53f@k?GsL5Me18}VJDUY!D_HTwO-v>dcJXfd%%8SYb#?cbg3!Mi8f3Ir*4BAD{g zLA#O>)_;K;iAdxoWORO=qB1M95ZX3&COd}2q`J|dxV()-mds=zQ!&*1IkEv&#=zRj zfHRI{gw{I$+JXUnE?Ca2yt)m)a?&ekUZL>#Yye)n<*DaF}n#D z*Mr_SN$bmxyv0vkNI7`y7^2@OsIeh{<`ea+DoCST6Sucpm+v-I4T@MJBPktl`o#tan= zGO;hiloY4X@KTpb{V8?n0iIY)VDIjgHh4k534ckZ@lM5QLorTbuk0YY$o+6V!t8md zqb^qeCJxFx3(B91r{zFb;y4uK^QF#ygRU-8L4Xi%Q$aISD@byRBPeL|kWl^sK~bAJ zJatBSI1<2Lw%?w4WoAzH>l~8#ZgCu4i`M8rEO;U)Pgj`OrC`|v$yz~(C@1qtx}scm zVV1eWqB%-Vlv1Cv;_?Hv!O5}G2n+jc`F|2`5a8qt8m5h`!jxv~jy~1U)G7Plf31z@ literal 0 HcmV?d00001 diff --git a/wordpress/wordpress_deployer/__pycache__/docker_manager.cpython-39.pyc b/wordpress/wordpress_deployer/__pycache__/docker_manager.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42ee583eb19799a2c9385da158b33b9b3f79f7c3 GIT binary patch literal 6947 zcmcgx&2JmW72hu|$t6Y0^0(}`8z(NpHj>>oO6Ye?6fn|D>DIkAa(u_ywCFOlc@gWtyutRC(1J8m_vl zca4Uj(lf&?bj^k-+Xc7SEj3E2!quHJw|3P=BzA!tOS`W}A1p7usE)^<8dRH$}78!=p7{*csO) zzuWTzZu{tOh8^Dv>^q%s-QH}v9VR>MrpIh91jekR>2|!f^%+*Etkl({44!7oZ3Y2u zj5nI$I$dAu1YwI~j??r!AB?omj-hFZc` z_U>CZz33vb{uU%(ceZMSY4S!MGa5N9=vJ-}x1e1@IG7?amdD6;n;n?vsbc529Xh#f}HEPGQ)Fz}{ z!Y?=j605P|D6s~rhT4uE8Xp4{pJ=<(4_sh2dh{F1&OTSYPS}CnMLdUmhDB@0^0H_L zOT#-FH#)B+n=H@{eLKbSLiTWRtLu&p%BR0KU=dw5t!>B`)fUI}1ZX8Q&gkosvXcu+ zimu;o1Exv2&BNEwhQ}a$Bt?gwa8cJ2BjE0uID}qtn8?r^1tE`&xdRP2&Xc$cXs}W; zRa1?Qj?H5+XS)Fhptrg#DYVvk>sC^0HG5%Sa0hnN>xYRcIBvZbZAtziv!K_?E&3}O zv9h7;NDsZE#p+7bkG%8VqXcQE?XiSAU@lL z2PYiyyS&v8_dp>LN7`PpZ?5xj9l-=#f}FMOd$#9$3lSGIt`iRDBL|J`&DiK{+w34@ z*tv7K06Tj#y(V{Y0`-_@&*dR!ZwS8yFK%}{I6$w3AV+VbkBlG_!%`s05VK@oUCY&C zQ#=V|pp#H~@rLN{tvM<~q2z*>?E9(>)Coj?MVtmnjF8_AfxT&_D%h^kIy(UhO@B-tj{dQt(_#H2?g8phRU2LKwCWs@%V)mK> z5xy~ZsYzoqZO8>uNFYthD-${S@l-suMIN%O5bm=*a40?+~k1kZpdhmUI-BBz#p&CkuEsm-eMYE?b0*3et} zQq!~t(fovwj|fAeA-)AsElMHs4>{YRCI1XJxh0dGF&TP7INl9?TV(~=GSEUs)h-Rx zXg*eU3S{qUKCVRVn~L`wtkB$wV1*j&`c!DLa;#-msAYC*4YUt5&tmc_tCqBgy+rbuM{@t$)RGFrchqgNecbT9K-xocRy^%5nCKH)a=bxBqUsA;Wkf8h8 z!rFanVFCVofn+vJ

Y2c2rzgxZUsYFgm$k3cXmqHac}RegBkK0q{T(;a5CGgit53 zDk!TEi$o}|s_P?ABe+cB>4H98NMi^V7@M-dVWz=E{5aO(|J2jm>r?Iznq9 z9FFyib5Nx?Pvm<oK=NYa2LON~y9V`yY#YZNH*PykU9C>|P?96Tt_*Pbx})P#EhA9OpG*a#*6&5y0*k&qAqI&FrZ9m>naA(;{pWo8`7ZE5%~JOZxX$fuRbC*3_?t40(MXDhuz-1^F3K1*!jKQW5O;m zn19EMp56~S$d~|~_#wgT1tJsfZHA^hgvIZ`@l_7)AU?d5A0_sEZ8lVH@$wZzUNWC z(Fi#WA}>4+M5e50C0ckGYWBDZQke#(c`$JSI|LWD8GZL*Xri-SuESKN?BSq-;~N16 zYDdgT$2ggYzda0R(*3B>cmZMYQR2q(dnyUgPeShj8X3%F&i<(UKF{t4T3RuUwGDHp zxU1}x7yx%!*;WU7TftdgzpT9X%K=I;tP&SkmCdvZ12bBY<8fji6yst#a!_KmxU{K= zKgK0Usj&mQ+Pm7IJh0;OPMOVuw;UTgRtPz9IkworkEB7|RBpeG6Ge6F@2W!IoQKWf zoAJB@p7Z!+D?Aj=#K;N|?GI-Ytf-n9XT_YM!#Slv-}6@fwxU4O0JhN`4*L>an(nrEknTwhx}-_TnfHt962 zWh!&fiRi+=Nidb1Oq^Cr52EMCJ><0Dy}0c9t)?4XKqD8Es8&+MNxlaKMx}*?w3-{8 zeElgC(@CW9PV}{a`S-uP%K?=i2+~IUI>*&=NHw2SAcy-@D~ecrfJ|-4;Ja zZ&n_7`0=Vv<6oxng`nqlPziITDiV{*{~nXoTzLo+KOuRQk(1c7=xEFgHwp#jKrE9y znpvZgt<6E%tx?Nw(&|8?A`9**F89_jp>(kXIsd>fxCo+HdFm}ahGuHD2d0UdGmdg} zq_a?ap4BWA0cHK*ztut&DNWj*HIJ*&nMdzaUAu9ku3p*uJ2b6AE-3L4Cy5Zy(qf6R z>ie!Zi~GhbCDo%9r}QNI1LFM=5vrSu7m2(?&W_L%$7y=IXs}WEwDlGj|FY^ z0DQw2TDCaj^J=|zY;OMO*}3_-c|qLuSy{*z$Iwn^R2ly3g$IHRBq{b#{dGI5axIA& zZcz@BF6TH6BYZc^v>cn1U%A`jJ^D{TiUc`-degKCC#B`|zbh9AssUAR4U_&cP^GVK L>9Yk*oh|+k?w|Lm literal 0 HcmV?d00001 diff --git a/wordpress/wordpress_deployer/__pycache__/env_generator.cpython-39.pyc b/wordpress/wordpress_deployer/__pycache__/env_generator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69b59179968ffc7a8fe4808f9ec7e7a0b36d7af1 GIT binary patch literal 11227 zcmdT~O>o;*e#aL{5TYnrR^+e5i#YbiOfA_?(yXd+w*JmXZCaJ=xMnuXg$YQaO#<`- zXhjIM(^+TRUNW2e+FNR7XQ%14)0s|>J@(X@-Y~uGrM~sJqaNDd|2+T%DcVVAaw$1@ zc<u){y(FzrMhIxw$hgCGQTTzh2Pb> z%I{iT+(dnyJ?dX=IffwC2&8w##Uh z(K=)+Yf^P4{14-r`?$qCx8t~hx#74Dvx1iAnjO#XwHx5pfFWjol!yjG{%c4#rajBl=cY}r^aS1s4}I_8$gY}0qzPLsc6ouSWe z@rK2=G47_*@tAcVZ|+*Y&xa2;h3DDbuD393_|(KJ%#P)@y1h1kx$zv>k!n>X%3D6Y zB`VrhU^UxVxhh9G8cxvaIH+qYRu)QWKsvodu zrqQrm*Nf?)(fCGUhXFQT?7cx4?lQ;s8#d_7+jiJloU0LI)nkj@ZC1o^n&Qu*FceMJ zNB;czcf*mukgYrg``Ox;T0Q6Fox%~x&Y>r77to_S1>Vx_B3ebqh({Pt70ygreafB& zj>^&WUF`h>!7&~Sf3)&KE-=0I43&>D0jRG?#MvNnZYnrp@}oLQnk%^c6Day}UuuBN zP|ATaPy_APlC3;bcBu!l1=~`?zc$SH_mSHQT2{LiI^vZnP>poRCAQ%&r!AV7Ze`gd zo-hM1yLHZTr=7)(#WUvFp3lzun-+7<(xkJ5O23*eRtw9EBM<`h7>#s!&aM&aZ%8{zP@reFX6NkuSL(0c+fn=KRp~QxN9$_?eOKDa_2oga zpWBt$YF`-`RFkDgt9K#NHQ@b;DobwVyyXAQIZ5XKdno&g|GQwy)|2;U_EHQ=ee4Y+ zVLZbwXe`N+@PAU2-0&Xx1U}8Sx8<;=5xhMdH*>G<&2Rgfo1a<)yIbWBJiF=p8e4-!y7cw*Nk*GS>?Pd~L5 z!q2{V_sscoJ0~NVg}Gj^zOY1E!*QD)O{j&3YZG2;wp=qT(4v4uSjz6JI?Y}MQAK%v z9zQy;4+_kr=Q-QC-`j{32DTtMj4el+*LB<|2Z7q^M!Aj^G&iBuy6ChcV+f5%Z98r? z&t9i>3SBDzZZVBxB$C%7tqny|*QhIbb={ufJ&CcBs~5Sk zQi%?%#OCzSV0k;PI%a2|(G9zO)HD~!1y0I@<)MA{VxlCOAqndy(Gt`3iKu? z#xGNBXH1pEDore1j}6IaKM{`ZX4xD;EM&1CSZ>qdmNN|<`X1@4!`a1su&GsZuv}L!ii1-Ts|R_%#rZuzW1aC$#W8gV0VMR^94Ll3lRx& zk-3Np@%%vU%P=`$o(z;eEJ2tViD^QnfEt?uDgds9zfVt{;ywj8M|drf1TMsmZ$9v# zE^PDuwz;|8-E`cHDewYRyjUo}?J!T<&bkGevuwUo9b$E&a7bWflFEa6Znqvf=83?8 zd+E6gEqC1_`LwXza@wSgPoyW?lNUlGIUZDSd4VH7IcGJ;E&_C%Q6r5G$T7*P=!CQ( z#mR{4*gGg9J()I!-RRhcF#MfH$~>v}^mEMg$&Qqal5EIxa=4J3Cf*nPnwQLfPIJj? zg)ZL+HoryqlVAxr8QAhsxC4ivEp4?g|57<3aisrS3Yr8erq%9lT5hl7uvRm{cns$r zsa3E^6GGgNqX0ux?Z>*<fkam%a_}u|^)*p;$3J#&ZQneoh7i z`Syi)uk~W!J#^g0rt^8imLFosIO67$O(kksEdCXS`n0Z83QxZTTd^(|lc1y6B}~EX z1G*U#wFV(bXpQ%&#ch%VC5zekbP z6!uXHL6A0Z;L^gy)t=_a6@iw3Q)?IC@Cp}q3M2U6HlAv+i(4A$qo|QqpLWXByAHDA zvuErIdS@Jsw+`5|XjSZEaF`Fkxt@Ef+OOF2c-t&SsP-Y~sKfRVj5%nNEP6GnAjCli za7>i&pf6-S=tYD9gteXx>6oX}aMXRv-vph)%=0!{&FY>3YPKG>g2m1@iEr5HkGV_D zNH|Bc2_p|xS2Tqs9E2)(yhMjF%|jba?ml7;{isf2my6tYQ4t!g55KP=_aSN5gQ*P9kV&bL zIT@xT`hl(%^Y#0WJ0!RY|(FPu|=$=H(0oE6y6v__8ELsKDo zfpy6iq}`!s+Aj9~M~Sf_INk#H4FSG?LZ?pxTq=}QU7kf4!;s5rMJ=gi+)Hu^K@I*_6j_lqTtP;8IF+_ouk@VO=CYRL~ly__U&mKJzqaLf{WxtLCQf`+C0SvJ>_B!yt7eXhksZoG=j4as3sD1eUj zd3uzg+K3qmDTo*>wdWTB@^9p*5ZeuLY-|i$Q90dS!?)HQL1CjL#XiMK|A|Wk5~fS? zyc(X^M;QB<;d>^66W<3Byz}<|b0Q#-G9-lLIAessegJFs3Ku z4dHLl5gBP2_Qy0&edugQIj$kb4ZnMMUIr`s$5D|AnnG~`1O+Luj#7b+swfrtjB;EK zU*9KW#?|x)wW(Mz^;y=@Ae>FkVg<-y?Mk2w3LNqL)&sKfxV8c z*?3|;ZtXXA(H8VEx zI=+O+3&RFq46yA6><1BrBHBuRl$Iw0lS{SV;$1JMv%bQ<#?ws3Gzm-wkVa2r@{y_gc7QdUVl-F94sU$n&f@cq;OP zDO@F7)40mGW^h$-9l$kf7oX}}rV;fYq?0_D!`qB!3g|x{_JddYd3y?ycnO{l_m!~U z<6^)4!eE~JO?(G-Nl3>>|1~08XU|Jt%l;3u(lt0VI1CR4?~1vK;4@8aytypSc~^2j z)TH3Z*YfsnWNAm2rEsyICvN^4SxMB6?jkE39J6PHzw|`z>%UO$O7|ewcl4j@t|9K{ zq;u#2msYGBKvl}9ce?Zy%#bC8N#;6RiQ92H{;r2a+sM~7nLyqh{*a&dM-~MEE|cy33HPzceC5lhyC=gkrca(gzRh~h z8Pjvy+Y}Wtw>GT+L6BY>>`BoGH}k&L?@fNN4KP7?=+mpK_ZC*~)ILKFg5KB@kPY)3 zq|59YjZ#c_Z12An|5yuswW?m9yK?L1%B{6)jg{3~A78t2J9ZG)?p(fl>*l48YV~}4 zSK;*43$e?`E9CIir_i%IN9t<|gb3L2}oZrx6MLTxeVbR&5yl8sOo7>6#LhE3kY zhh6@9uif4bk6pJQGi(BKymiQtSpHRC9T#dQcFtuKqo8S&1JXNwl;d9xM7bUk(~Oah zi}GSI?reS+r}Pmm%%~v!i_|sIQ>1u4v#DLY_IcCc-(_KC7uoEGQE3$!QV2-?IY6~2 zLik&>!sk@DRCrW;h61t_geU@oq^Kj=*(XNDj?>W4WfG$le3hmt1Fp&QP}L;})8n$H z>Z%53apeC8y)+LevLsjJB`$O;Q0}@sLF|sb2x03U-ZDAfpSq5hF$7T@316km2l(KS z97pj@3eT347T02V0y)LMP@&I+xPvJqldW-W{SPS{twKqA_=HbrN=p0*h43=&%;=$E z7*cd{q8ZNp?wppH{vus{=&~>)NWYDI$5D(A0m9A;)2>!62(ylBVV@uyP{HjzlWJVY z#(888`7woxa0mxXN(vOljWavCT!uECXq%1B0aj~x8FB^Jt8)_*1eDX`d0-ym7YPD# z7evy4)GTXIK@KP*e}oY+aQ0%4QH!+up4Vo-q|U#hVt5Q*Y#|Bwthg#y&Z zp2CA26zItBm5!9p%^jROIDfo6Kex!pc!($i5!%b7Lq+-p i@d@IOffva)QE0{N$AJ}aVM(4-3{{aUrSgsPAN?!6ZH+CK|s))*%d8{ zT+eVViJ*!C_OS(WcMQnK7CrUaf1vl?cdS1C7+?P>W9P;>3#z*`(7n4*5!TtUH&-me`l_VSS zlLYvbEYulK^HCD>C=-0F@{y8S^CS~V92L3J!7H$l?8HbfHwuHh@aa_`l7Q_5?H<_1 z7Q0?;O!){c`B4rR7r4j)EIs&{&<_QNoXj%66uK^QjXL%$n_k0((& zUGc(D*f+F-BkJy->(;nx9Fv)f~p=_)J+K z0zJ@pCq8A=%)H^P1FLtDZ5zmCZzY4DBdl+1b3bh0ie>3Z2j*Ou^MwD*8TdFOI^S3# z5KGS(+*cRwYo%VkbVgE@@4azRZ`@MPuDv2TmieTp{Uj@-dJ?5|@Iw;&4m-+>3I-x?Rj>>R&Bi$&Nb8?;9~@ksXHTCJb=Db%7(+N zy%1NVg1}{3rbC6OS4&8`NM_-wd}N2boFXeO#b5dMf?1m_<2GEkJyB`x z33V2#Nmjdn*on$w)ZaFggi6gVbHHU~saq9!L@pqoRP;H)o23NrbvkR~tY9#*;NN)x z8HN&w1p6g9w}pGoDki);W{GFVQJyzv_bhmPad&Is5_!Tbf@+!z{=bqW`ixs2cTICsit5a!j!7AE+ z{Y7VLf2Z=W_xMAz4=qH?ZoIIK|6tkU55*cF?`8yCWEO#+Nj1#X@j`UZk(WRK%~;Zw&ZC9IZ1BD`EhS1heMm*X`g>G)d-GSrGHCblb?8S0rtU)bw#!=!n;R$Eo&LRskC!)1)j;=55km_h zxs+Ya*JT=w!SOzhHrmt!BTM(n&P>xzFHqOP*qTA<$knpxqN%k50MAvu9BPo7%Fvuj zZ67C@f@)4`N63ehQSInd%5l$EH(*lTMDjK+2fBzRv}KsFk5 zP9jJLsPby6=o*z_bL77Oq54xGWQo!1lvysl{+cl;R<=*ORsabkpw?>_=9qnT6wofL zuTu7!!G}CudyTedY%W)-&CBX~R^!V%pfJYr*fy*rnBh#G`6qPBVL={2hJh>tNEDW^ z|H{rS$Sc+(n-QBFQMDnQBceXd7$q6zBz6IH5!c(SbkOD{T=A}~_HKDShMv#)AqaGQ{}-`2w=hx&0?w!2YK9K6xK{fc$RxuUYu-ncHpS z+8jX}9CNh&2*A&oe-N5bs1w0WiHB#&IL`o+>%Trd1t0sYqXZ_zdC0xp8rfBKCk8T6{v4|>+YK7;jTp+a#Z-AOT%=1*OnXxXE`Z+G60he%UiaVBG z)d7O&*O(jtl??5&zU|&vyJOZs6N*;&%v86K^pL!T1Wl;j<{43UW(Q;*p=R@UX{`O< XHgDq(FxhH+YcSWRzUAA2z3TiQU0p#a literal 0 HcmV?d00001 diff --git a/wordpress/wordpress_deployer/__pycache__/orchestrator.cpython-39.pyc b/wordpress/wordpress_deployer/__pycache__/orchestrator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00c705b5e385b3df5f22df7764ea95854a67b8e2 GIT binary patch literal 15888 zcmb_jNpKw3dG7A%>De%Y!9oCB)m$VIv;=C4?I5y3QnV$dESM%GEAChs4W}ExfV1&+ z4=Hp8j7w0QRLV-pb|rRMVggiRCrbH{Qx2)zl1nZ*<9_RjxBScZ|Non2wOZEjnfmIb_QqE_=-k2puVN1dab zuXc`gk2}Y^C!7=Ahn$DHwqtX7t#h(_$~o0N?VRTPWamuxVdr68^YooZyhnF+=j;ob zTX(14*4!y?aXaUnLw?$wL4L+Nhx}(e3njDeA(R~2Hnw%=G34jmdF1D}4d-#>7Tm+g z9Y$`+HLhukN5VJCm%Tx!zuEPAf!!CabEE_*w}W+C^gEsF&DISYkF{>Jduw*H=i1(zUTa9blz&P)wz6nMR?~OetspA7&7j%p zG<|GS>GYI_Zx&hIUI zx!G&3c_NxlD_-`3CcP6~%%-JZYInRZ_t(%CuPk0(x%NdN`XVahF7Bg*`>S57FI>E- za=G8S;R)HQDl@9#q8Zh{*tvRE^Uq|9;-~)|#0Q66L zor@;Z+4C#HGnbmahrMcf1K#n~zQ}I0M=Rp5d|*b!Mx)p4dW}X@ZZsra;C`afxH)Wg zR867LaQm%B;{#3Dct0l}ec^n&wf~Hlhs@06ofyPo)L+Y7_hBu85=Ypwr=tlT>yJ3EiZFppt!q z$f588HM@i)Uj0`gYn+^CdU@A)R`V=;=kQ%XNpaOUtRZji$>*QdTnqbE$P)a;77adp z_^L;12VV8eR9ZMz~#S)Bwja3wQlZh&7rk3-TeE8Yq^EDO~=COJKXgwsejb-ki?pVVw1yVwJ4TRu z54(HU*ws;DN*)W3ztn37ZID|ywlORmF@ou(eWO}g%!9w=j`>lc1DS+(Dm z`O8U;9+X2`w$qVc-u12z*96Vt z14BH4FYz=bc9QTSR%QQcH$I0-xsrtmJ$xt~kD{lgvDP2x;u$=!k9uesnR` zpin(*C-Zp03+!g6qo$Y4#!r{&D(}ypl_KqE_gD9?RR8*h*9!ck|3yQtiP`Eys-DL{ z_RhQla*K)ZBIR&lnmdWMWv{|d5?(Z&39sT+-D%X$xUI+@r`%do#!%L+x?*1oE><{vp>!{!o%X>7GJHxCXt8_lKE^_2vB_C@$KW6mrZuv>iqhjtHYh#)3-#Z3{Z zA9w>olU9py;XWxw%gt2zceojmxIRFD2YJXcYny5dK@qfD;QLZgMtOzuxJtO6z*XJO zqg^eS#C;vt)V6{AbTEVaSzHy)9}4DhKaXnx*Wv9P+8hav;{F(}EH~1KddC`&4iH;)-KtFN=3$g$tss8D_JFww%a`bgkH-_-V@Ge6jgR>Ru&(D zDv}+E$FQ8@aY`6IkfarJNPzKy-W#g`R?cLD5MN_02FBtL8aOkJfx^U@ni+bu==E;# zlTqb5&?zuw7xqU~gfwRi8r2%qBJFRIo|)wNrN8* zJSU+8&9Bgfs8V)X!D+JS6lM?=s+d&U7Ec~ZffeC%sLY5omS!aXuHo`8AkhkzKAoG; zCv*c4T>bs8Rp8twmO1vVn;%qtV=K31 zqFoW~z`#2tZlUX{_mRG!ZCU($Ieva*?dDj4Rdyy^gW3c2Z#0pigNTjhiLtRIkzN7 zf;Sf4GG5cZ4t%>+xKrqr<@XcX6DWb?E{B~&RNT)!kP4!!&9?9Dfi9`wXNzpverYu; z(a9)F;mgYKEJ)$Q_rI4xI<`M-wO~fB4m&69SFe6~*- zu6|_ri^F+-{QKF*rP;S^M_M?0EochXE3pZ%w6qk?q(f9T;#>1Ex#XWcWcq^w{<)bZcXi-kIbpT+G_|FLDws+_4?$XVRZ`q+exP&Bf5c}&#F z@r{W%LfDCjdlwn7FhrTF>?;yxEO>Go=1zWB^PdVV2p|YIX>o1qZ|PwT`2vYE7BJ61 zz?3!vh!=>o@EF?XMmmWWFtn~1+T7Cb=wN96CWeL>EW5E7qxV~k(#eP}gtrnlze03o zKe=pQ5#Hk|5!i|Y$kms$J&57IHtvF9hEJbm4oew+_$Zd+!#{mj{@cmB(@yN9Hk^g0 z%_f~~L&?IDANJg3J3RJMkGr>}8h>0V_odBlXW3qf@&nO-b5jY1@7bA-sbR+pyv2$% zGTy*YqPc|AiA!Ksdc77Dy!cI23bL0X%XfW=%xyhFbV$LUw8sJ=@=eMbIUElI+wkPn_AOZQx3~+xE%hmb^{? zm1rvvJ-mUA*@Ey6k6ra{4%-6uE9eN&(H$eosU`ag%vUfHBW_qR;UxRZ1F>m0*C5oF z#2e`Km7nI3!aq}MH3x9acn!eI0T5f1iQM+|#C3^92N3tL+@ zT?!xi667;R20sb|NL8WwfOacJmcI_SeJ9G_UT?S7nV2E6hu6U^q->)c-Vl|Qv#9TS zQSB9peZTnTfMy*{krx~u>Ms!+vT9!}NXc*b>&}!=t3xb7tAid}U00CEj^Kmxhn+wi zMEX56_%B>OnH&)A2L2$n4WLT{qT90Q4g#5kF;FMf825^~Sv`FGek>6)!x%%u6F;Fi zk*y8s(qUf+y%-i3?}>wK@JcL64`}K z3Gy~GOkix7K^{yn0uul(f(deNWyHQUNN+P?0`rc^On~o%2`0jSOsJA02=M8{eCRbh z@fNE&p`3(N$_)E$#O&-LD5X@g$;Y42P5St!74-4z-M;7d9u4F$k|9e*5;w7!Q9(70 z3gD0doLH|2fpXy^3G0gJ3hH|pM>XsG3Ab_jxniDWJ=~B+fRRtH)p{GaG<`?M&|?whB81;H0g)#b9=n|bDz6EBKW0m*Bm^V)Z z^X^nK%IUk9-CMd_ht8S_qlES)w0UaTenC1&5RU8JYKwl41F{@pMHCXsq_^6EPX!)g zsEP~&9^@7V9)cz$@sDxG#IyYu_9~)5fMtGNIwq zpt=N{Dd>jzVNl7m)LelXQy3>ocrH8?FN2$Qy=)K3_d@31wfJ>*^A>a&jcVCOM5N$~ z%?7Ea3RbT%9FR%oid8&CUZmCy*ii!nmp?E$0w5kng&9AHiA z!IMe7t%T=NA#~rhwVSJu=5|=J)w})0g$y>$i)IgjA%db&rQ3Wnu{bZI#F>n{RhE|c z9h8a}sA@*~pOn?5@as4DA+>0MUjMcrg~@hd zRJh#~1W81ViYO|eq^46)li_U=-=gY+B&UN4I^W^bjaHs87L2knOQ08N8K#(_Tc21K z#SK(#hAtt}g}xOOl6-2J4jG_aFZq$SDP^#JTd zXZwKytp<6qSOWh!Zde*6<{gF4GLjZ3JMjS_w6YRC;>Z zzUaEl7LrC%$*~_d(7H(Z!n~dpK#7oGs0&|B+RxR4CM9dhTFnN-H!6~Osmc6#+>!=6x)O>2-3T9jFuNqat1(bQVj*D(8sZ+7AjrXyWRx2l1R+eAjJbED z+NRo=+{x9N)G~0P+I&zL>Dz>_twh;N#wbX}aEp9o05iR;5!h%0PAIrRZq14C@At64 zGs`wR2V)$ax}!40cn}T8q1ogCSbR_^T|P#UdG;VeW>lBDMbj?a#Jb;0)=y$F_mVZn z{(S5y%z8ePcHzR6VW&e%O5y5u7iXt@=p`oi@F-6`Q|}`r(S{v}7DGBK!n`2u(74KG zlapDZLW(bTR8FV&vEx&;(ZxRcAhCH37dtuVu*K^WTY7*U+t$Bh+=N$(DR)t49Rmy5 zSb8GTTYJbkDnHwSp+l~m=l%&){WEl^h0i~jX65Q;ysJykcKWSm$A1ntac`1&s~P5H zUr%L#m2AlMOF7l4@oF^U#ffqquXm8vWcH0+KeQ}+<|BR=Wj{*TvrG<5ipan%dCxK} z=r0qI7Se!q-GBzXzwIEk*}kdb-7IAI1Ou^Y{2@DnF4kfJZn7*3D}u~e_=p07V=u(A zK;^8r@*8kJzGL_of`W?@YDWlIwjAXUX+b209iI6SjAcYq3R$K_nARi>95 z%S9|y!e$Bsrz4godQu-2d26Xcoj{=*O08> zWWy3=-k{{SDfucAXF`HLIr@}T1%*gtjABOC$si-gX+1YKbr~I&RRb)n@aVA(O6(*# zNp}q-|3VZ?N|SI~8Se2If7Of$eJ&TCdoV+0$1x@+NJdm}F;V^)Hv!`LASQ-X+mIB{ z2)h3WO5iz{{&b3@t9&l7l$J0R!9g3mes#+rZ~6`g3fY{z1aq>u`CS;3Irw8_Z#ZV5 z%usfHO8CWrGK3cIWAl_F`Ni<@tMPGJI>I31w(;2|A1VT{0`Xq> z2N5&Jak06TzB<_C6ZOVXm6fnc{g7nlqqkutZUxC49!X7#_yJXIhX$a^R5FIt)g756 z;i95?y(@(jpLj`$=m%J7M5p3T)o~$l9mRYYm5nQfI--I)(I(GX9fa&%JoleOA!HS3 z#c7qfgs34$KLiG{2)SMA|c}es29$<8-|#}HiJM2 zIrlDbse#hRPzrg*rRTUbhf-|f4m{snO3_y$C_>;`jMn)ii81T4le%`+zm<{qmW#dLP+&sFBOGgXHU`2Cbol-u^rg`rzpYN zbU_KA5~c)5{TQMUqcbJsw~RXmD1pD366na972Zzv=~~bquth7^EQ5A`CH7`|NZ1g; zD?X~7ye&L2P7o4yC+&S{p&rF@9E{5QI1TG&ecm{3If#LO3{Z(WiW!<J^pT{2|MWw?QQojUjnq?%-&-jL2lD z1g_tkKLnMicua4aTS9`Q?_m{D2!muVui`v@9}&eR$X6M;UKRZ=>+<2?S0^6nw6n?6 znY@Y}Yluo_g~^`RCT@+i9FaI0;2u%3XV4&)aQ+f|SNDbA5j-c?s@S1~wo3FVNwLIb zWSzQtPwG}ou>@(H7Z(!oBRw2eKTNib9VtS2s*gtT?3(Y z;84!xk;6eNU|4;FS^#jc1CSsKpGpn*jK@w1u01+ZmLK8B7|x~Q_}Iq8PIVXWaIJT$ zTXbrb+`Sy^28K5_G>{!!O@GRR8cIVJw=rd-dfB@NY zzJ~AQmm{ywN@s?PsGA@hshk=(ylj%I5N^W)4%)yqF2pV#6yKx7PLh*!_h(41pnLIs z+=Q3zCqWNB({+$j!zni0>-9+B=qjB`{}xYh3r@hhdMsDiJ)QzfFHPy780+R%1(=^Egdn!deRJ5NoNx996zoHc+HI&Xt{3T_7Lo7$zc0OZ z9LAb-Pe>Z;!_ zIu$u%UXTN0B~7${jszb`D6MW7H4_mL9D1YZhdPrB3`Tz(dxk)yzuM2NDcIu%)bV{z zeJ|bTfZ*cc1Aje1dRhL2qCe zx;sq?nHhrMq##uxSHYFG}SOKsR&kIh#7i>H-I9ad}80RltYN!KnwIxQ5{|P1w~)eWB!q) z2E>F-w!1tQ!TUrj&g*#4#ze(uF8Q?=e(x)g1hb(;CR&0At;FG+5*ILV@Z)C;pHiBmRA_;?_i&6^_TjEm7AIT(sKW8!_H zi(~Ib*&dT3lLu^&>_)xjCs>uX-7yL$OnKDHiUtp}ur~NH1#yAr&T@e>gnL=V1(N@ zE@&90rsy26>>#v-F?Hr_bI^Bp*r48I-mW^77r%j_KCuKzbN-8h5%1(ljjHoBq_h zN4A)zxk?Pb2dTHX-e7E+BOGpgY;u!ZA6mXEJZ^L6V}rZgL%ShtY^%*17}4Y{^fakQ zw9wc7*y1aE6@6`fLG{web@ZO%8z0*KO2vB`yl41Xj01EP=Q;G8=bPwR<2KCoRIyeW zXj?&5$ovHb&uZnLfl8pQRgDak$;g_?I4fGi@PmNh8;P_>o>^b?pYGRjr{4vc=o2l_ zCI+ZJF+q*YJhn3XBVh5Kai88g6Fq%pqU}4!E=LbM(}e0){_iHMuD9YSi^4&a3%01Z zkj?3!YjQX3PFut|r+Sy&s(kjnciv`M!cub5MW00PrR^jZ^qNO$xHAx)1$ou#yX+lt zXAD%cs)I!_g8h#KB$3qs6hdV;Dq+hvzm}O!(%4VGT`c|du)MhW& z$}cKhDaqNTIAOFdsL+pg$5QYP`BFQW+W~l1ZM}9dr&-}50PRNcZqe8k+1vOpq+EmL z3OArPLKZd`JLBDsRamJQ^yMaA$_qrEA~JO`d5*Z?z!A(91ka(Bwn4O2N2lKgoGRZu zv9_&xIK@6&MUKJ&vc@v*8N^&&E}kSz0c%qSu(W+GZF2plmgy7yNIwL`_Kjnc8xwus z`bZ;i=~`xUfNtO6=6UVNxUT)`uZL!SHFNhp;wM<{H@Nj%^U#`D$IbhiPI5_#T}t83 z!~|5`dp1eCh4(b0r9ndL;9t6Sh(weBTJf~Gr^YtWa=_D^0Oa=^;v?YiJGV9YH{O~c zB5Wek96HrZ?Fo%uxo0YUSB_UFiszR0OU!j>|Hi)aYWaLodr`ads^VJXtJN%P)oLx` zCfz#gbB^;^UuUxf^dt{b_L%z44VB0$=b5nqree|Y-Uy=sP?)k%3d(Q0g0r1*#&{Bc zBV#g*Qo+OrJ)yE!O8yqUpG17YdHRq6RL&*Rl<~20&I;8mN>jMulxsUbMe8M|YDAZP z0;Fgi!?*U+Bvzo!pRN!{As}PvxYt8Y=#K}3gD%UP>{8{B>{GpyKUa_CVi3JAkgZ8A ztY&W1<=pIGA_8mPeuq*zX@HQBTj|!;R?cWQz(P@_lv1n5!hVKOkhfSJ1iGxVE}w=Z zN#Z0ULVhG0AcZZ%82q@QgFY8Qst={6PQb*q-6&P2G6tJhUv&SVw#~hks3@t)I~^ zs(02y>tTMbvOpaRw@UFHQ??*mQb>?r2kBA8Q(u&UD4(lf@e2NAaE(kqHtuW3=6#JD z=P5542n_}Wibz8Ua1P<5$JRu-p-P@k{$Pr&*OM6Fk3`%P4CTsR!WnjX4xF^%GJ~0w z%gmhP(X2ErgSzZyT@kSzF~ho8bOa>cE_(y`R3ZK`1B;DW!C9+P=zf0w`m7GEH6+C! zI0{mdYVNt6T%jfvK-&*&RrN^P%EB3Ey*RmBG@>*C_b|-li|FrIC0u5fAmE$;gnS8o zd$`jL5Y01S|5e-u$}8hj$ICCCwD6-V!9>$JPz#Txs`W~{lY;d{MIWt5*`2`mWV>@K zgT1+ZWJEf5fB*{!uqS0GbrDLv{2yNg0c9xpk_gE5!i?Pw5#*G`o(O^`kFEEo%jS$< zcP^_{7Qs`lLcWac%b%K)FpHE5`jR|cc8GinWV_?c03*Lm-LHZmW+mg8mvFI9!bOQ# zh8e0H>6}+$W_2Dj?a#zaxqyP;sZg^;%FF`ANx)141x>mXFv!?tpy1Yopkh~0LCv7D zt^=sJ`9Hq^RH`iUP(6w?u|)G=#ez+z0fGzX4P{|F8J((daXh#g86?fn$p!F>vbw5(m|Wlr~Ts5*pkp zb=XzvP{-u_kJJ5rTWQE_<_D37bZMQUqXdef#EgO2R-i>aw?f&hVbj zykHQlmcvF#6lYX-O*!znq1Hzkve36Al}`Ct$i+#ehr=_;g1VGGk-gY3eFXv{jl=roQk8S18 zH}G`n33Gduxf>Y_&YLP*v?`;!mC@S>0W0}mru+FAqwk4uknIJ%J<+=zjPPye^O#>b zt2(w)im13sNlZza&PsN#1AUjhi~5db;cXEso6U19%>t*FXs5s+e@K>7aPaoBY?O8{ zUzT0;byP?>nI^ntX+mCww8E80ePK_?@6mko6~@fI%b;@&!~cwX z0Xokp=%{S*{7L96S!hRBD?<5AXJ5Hao0Abp*V-%f7+YpPH$|e zv7J+;CNx}r6U1*#KM;zvJT6cPTor8i_=bW~eFZDOhp833Km)!?gcj*L<@vr`2`RKp g)55z_eujIE0Fa&mu^jp~wyvX8H$1ax8v1(k|8dTS)Bpeg literal 0 HcmV?d00001 diff --git a/wordpress/wordpress_deployer/config.py b/wordpress/wordpress_deployer/config.py new file mode 100644 index 0000000..0a7690c --- /dev/null +++ b/wordpress/wordpress_deployer/config.py @@ -0,0 +1,187 @@ +""" +Configuration module for deployment settings + +Centralized configuration with validation from environment variables and CLI arguments +""" + +import logging +import os +from dataclasses import dataclass, field +from pathlib import Path +from typing import Optional + +logger = logging.getLogger(__name__) + + +class ConfigurationError(Exception): + """Raised when configuration is invalid""" + pass + + +@dataclass +class DeploymentConfig: + """Main deployment configuration loaded from environment and CLI args""" + + # File paths (required - no defaults) + env_file: Path + docker_compose_file: Path + + # Cloudflare credentials (required - no defaults) + cloudflare_api_token: str = field(repr=False) # Hide in logs + cloudflare_zone_id: str + + # File paths (with defaults) + dict_file: Path = Path("/usr/share/dict/words") + + # Domain settings + base_domain: str = "merakit.my" + app_name: Optional[str] = None + + # Deployment options + dry_run: bool = False + max_retries: int = 3 + healthcheck_timeout: int = 60 # seconds + healthcheck_interval: int = 10 # seconds + verify_ssl: bool = False + + # Webhook settings (optional) + webhook_url: Optional[str] = None + webhook_timeout: int = 10 # seconds + webhook_retries: int = 3 + + # Logging + log_level: str = "INFO" + + @classmethod + def from_env_and_args(cls, args) -> "DeploymentConfig": + """ + Factory method to create config from environment and CLI args + + Args: + args: argparse.Namespace with CLI arguments + + Returns: + DeploymentConfig instance + + Raises: + ConfigurationError: If required configuration is missing + """ + logger.debug("Loading configuration from environment and arguments") + + # Get Cloudflare credentials from environment + cloudflare_api_token = os.getenv('CLOUDFLARE_API_TOKEN') + cloudflare_zone_id = os.getenv('CLOUDFLARE_ZONE_ID') + + if not cloudflare_api_token: + raise ConfigurationError( + "CLOUDFLARE_API_TOKEN environment variable is required" + ) + + if not cloudflare_zone_id: + raise ConfigurationError( + "CLOUDFLARE_ZONE_ID environment variable is required" + ) + + # Get optional webhook URL from environment or args + webhook_url = ( + getattr(args, 'webhook_url', None) + or os.getenv('DEPLOYMENT_WEBHOOK_URL') + ) + + # Get optional settings from environment with defaults + max_retries = int(os.getenv('DEPLOYMENT_MAX_RETRIES', args.max_retries)) + healthcheck_timeout = int( + os.getenv('DEPLOYMENT_HEALTHCHECK_TIMEOUT', '60') + ) + healthcheck_interval = int( + os.getenv('DEPLOYMENT_HEALTHCHECK_INTERVAL', '10') + ) + + config = cls( + env_file=args.env_file, + docker_compose_file=args.compose_file, + dict_file=Path("/usr/share/dict/words"), + cloudflare_api_token=cloudflare_api_token, + cloudflare_zone_id=cloudflare_zone_id, + base_domain="merakit.my", + app_name=None, + dry_run=args.dry_run, + max_retries=max_retries, + healthcheck_timeout=healthcheck_timeout, + healthcheck_interval=healthcheck_interval, + verify_ssl=not args.no_verify_ssl, + webhook_url=webhook_url, + webhook_timeout=10, + webhook_retries=3, + log_level=args.log_level + ) + + logger.debug(f"Configuration loaded: {config}") + return config + + def validate(self) -> None: + """ + Validate configuration completeness and correctness + + Raises: + ConfigurationError: If configuration is invalid + """ + logger.debug("Validating configuration") + + # Validate file paths exist + if not self.env_file.exists(): + raise ConfigurationError(f"Env file not found: {self.env_file}") + + if not self.docker_compose_file.exists(): + raise ConfigurationError( + f"Docker compose file not found: {self.docker_compose_file}" + ) + + if not self.dict_file.exists(): + raise ConfigurationError( + f"Dictionary file not found: {self.dict_file}. " + "Install 'words' package or ensure /usr/share/dict/words exists." + ) + + # Validate numeric ranges + if self.max_retries < 1: + raise ConfigurationError( + f"max_retries must be >= 1, got: {self.max_retries}" + ) + + if self.healthcheck_timeout < 1: + raise ConfigurationError( + f"healthcheck_timeout must be >= 1, got: {self.healthcheck_timeout}" + ) + + if self.healthcheck_interval < 1: + raise ConfigurationError( + f"healthcheck_interval must be >= 1, got: {self.healthcheck_interval}" + ) + + if self.healthcheck_interval >= self.healthcheck_timeout: + raise ConfigurationError( + f"healthcheck_interval ({self.healthcheck_interval}) must be < " + f"healthcheck_timeout ({self.healthcheck_timeout})" + ) + + # Validate log level + valid_log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] + if self.log_level.upper() not in valid_log_levels: + raise ConfigurationError( + f"Invalid log_level: {self.log_level}. " + f"Must be one of: {', '.join(valid_log_levels)}" + ) + + logger.debug("Configuration validation successful") + + def __repr__(self) -> str: + """String representation with masked sensitive values""" + return ( + f"DeploymentConfig(" + f"env_file={self.env_file}, " + f"dry_run={self.dry_run}, " + f"max_retries={self.max_retries}, " + f"cloudflare_api_token=*****, " + f"webhook_url={self.webhook_url})" + ) diff --git a/wordpress/wordpress_deployer/deployment_config_manager.py b/wordpress/wordpress_deployer/deployment_config_manager.py new file mode 100644 index 0000000..3d3b009 --- /dev/null +++ b/wordpress/wordpress_deployer/deployment_config_manager.py @@ -0,0 +1,153 @@ +""" +Deployment Configuration Manager + +Manages saving and loading deployment configurations for tracking and cleanup +""" + +import json +import logging +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Dict, List, Optional + + +logger = logging.getLogger(__name__) + + +@dataclass +class DeploymentMetadata: + """Metadata for a single deployment""" + subdomain: str + url: str + domain: str + compose_project_name: str + db_name: str + db_user: str + deployment_timestamp: str + dns_record_id: Optional[str] = None + dns_ip: Optional[str] = None + containers: Optional[List[str]] = None + volumes: Optional[List[str]] = None + networks: Optional[List[str]] = None + env_file_path: Optional[str] = None + + +class DeploymentConfigManager: + """Manages deployment configuration persistence""" + + def __init__(self, config_dir: Path = Path("deployments")): + """ + Initialize deployment config manager + + Args: + config_dir: Directory to store deployment configs + """ + self.config_dir = config_dir + self.config_dir.mkdir(exist_ok=True) + self._logger = logging.getLogger(f"{__name__}.DeploymentConfigManager") + + def save_deployment(self, metadata: DeploymentMetadata) -> Path: + """ + Save deployment configuration to disk + + Args: + metadata: DeploymentMetadata instance + + Returns: + Path to saved config file + """ + # Create filename based on subdomain and timestamp + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + filename = f"{metadata.subdomain}_{timestamp}.json" + config_path = self.config_dir / filename + + # Convert to dict and save as JSON + config_data = asdict(metadata) + + with open(config_path, 'w') as f: + json.dump(config_data, f, indent=2) + + self._logger.info(f"Saved deployment config: {config_path}") + return config_path + + def load_deployment(self, config_file: Path) -> DeploymentMetadata: + """ + Load deployment configuration from disk + + Args: + config_file: Path to config file + + Returns: + DeploymentMetadata instance + + Raises: + FileNotFoundError: If config file doesn't exist + ValueError: If config file is invalid + """ + if not config_file.exists(): + raise FileNotFoundError(f"Config file not found: {config_file}") + + with open(config_file, 'r') as f: + config_data = json.load(f) + + return DeploymentMetadata(**config_data) + + def list_deployments(self) -> List[Path]: + """ + List all deployment config files + + Returns: + List of config file paths sorted by modification time (newest first) + """ + config_files = list(self.config_dir.glob("*.json")) + return sorted(config_files, key=lambda p: p.stat().st_mtime, reverse=True) + + def find_deployment_by_subdomain(self, subdomain: str) -> Optional[Path]: + """ + Find the most recent deployment config for a subdomain + + Args: + subdomain: Subdomain to search for + + Returns: + Path to config file or None if not found + """ + matching_files = list(self.config_dir.glob(f"{subdomain}_*.json")) + if not matching_files: + return None + + # Return most recent + return max(matching_files, key=lambda p: p.stat().st_mtime) + + def find_deployment_by_url(self, url: str) -> Optional[Path]: + """ + Find deployment config by URL + + Args: + url: Full URL to search for + + Returns: + Path to config file or None if not found + """ + for config_file in self.list_deployments(): + try: + metadata = self.load_deployment(config_file) + if metadata.url == url: + return config_file + except (ValueError, json.JSONDecodeError) as e: + self._logger.warning(f"Failed to load config {config_file}: {e}") + continue + + return None + + def delete_deployment_config(self, config_file: Path) -> None: + """ + Delete deployment config file + + Args: + config_file: Path to config file + """ + if config_file.exists(): + config_file.unlink() + self._logger.info(f"Deleted deployment config: {config_file}") diff --git a/wordpress/wordpress_deployer/deployment_logger.py b/wordpress/wordpress_deployer/deployment_logger.py new file mode 100644 index 0000000..c96f08c --- /dev/null +++ b/wordpress/wordpress_deployer/deployment_logger.py @@ -0,0 +1,218 @@ +""" +Deployment logging module + +Handles writing deployment logs to success/failed directories +""" + +import logging +from datetime import datetime +from pathlib import Path +from typing import Optional + + +logger = logging.getLogger(__name__) + + +class DeploymentFileLogger: + """Logs deployment results to files""" + + def __init__(self, logs_dir: Path = Path("logs")): + """ + Initialize deployment file logger + + Args: + logs_dir: Base directory for logs (default: logs/) + """ + self._logs_dir = logs_dir + self._success_dir = logs_dir / "success" + self._failed_dir = logs_dir / "failed" + self._logger = logging.getLogger(f"{__name__}.DeploymentFileLogger") + + # Ensure directories exist + self._ensure_directories() + + def _ensure_directories(self) -> None: + """Create log directories if they don't exist""" + for directory in [self._success_dir, self._failed_dir]: + directory.mkdir(parents=True, exist_ok=True) + self._logger.debug(f"Ensured directory exists: {directory}") + + def _sanitize_url(self, url: str) -> str: + """ + Sanitize URL for use in filename + + Args: + url: URL to sanitize + + Returns: + Sanitized URL safe for filename + """ + # Remove protocol if present + url = url.replace("https://", "").replace("http://", "") + # Replace invalid filename characters + return url.replace("/", "_").replace(":", "_") + + def _generate_filename(self, status: str, url: str, timestamp: datetime) -> str: + """ + Generate log filename + + Format: success_url_date.txt or failed_url_date.txt + + Args: + status: 'success' or 'failed' + url: Deployment URL + timestamp: Deployment timestamp + + Returns: + Filename string + """ + sanitized_url = self._sanitize_url(url) + date_str = timestamp.strftime("%Y%m%d_%H%M%S") + return f"{status}_{sanitized_url}_{date_str}.txt" + + def log_success( + self, + url: str, + subdomain: str, + duration: float, + timestamp: Optional[datetime] = None + ) -> Path: + """ + Log successful deployment + + Args: + url: Deployment URL + subdomain: Subdomain used + duration: Deployment duration in seconds + timestamp: Deployment timestamp (default: now) + + Returns: + Path to created log file + """ + if timestamp is None: + timestamp = datetime.now() + + filename = self._generate_filename("success", url, timestamp) + log_file = self._success_dir / filename + + log_content = self._format_success_log( + url, subdomain, duration, timestamp + ) + + log_file.write_text(log_content) + self._logger.info(f"✓ Success log written: {log_file}") + + return log_file + + def log_failure( + self, + url: str, + subdomain: str, + error: str, + timestamp: Optional[datetime] = None + ) -> Path: + """ + Log failed deployment + + Args: + url: Deployment URL (may be empty if failed early) + subdomain: Subdomain used (may be empty if failed early) + error: Error message + timestamp: Deployment timestamp (default: now) + + Returns: + Path to created log file + """ + if timestamp is None: + timestamp = datetime.now() + + # Handle case where URL is empty (failed before URL generation) + log_url = url if url else "unknown" + filename = self._generate_filename("failed", log_url, timestamp) + log_file = self._failed_dir / filename + + log_content = self._format_failure_log( + url, subdomain, error, timestamp + ) + + log_file.write_text(log_content) + self._logger.info(f"✓ Failure log written: {log_file}") + + return log_file + + def _format_success_log( + self, + url: str, + subdomain: str, + duration: float, + timestamp: datetime + ) -> str: + """ + Format success log content + + Args: + url: Deployment URL + subdomain: Subdomain used + duration: Deployment duration in seconds + timestamp: Deployment timestamp + + Returns: + Formatted log content + """ + return f"""╔══════════════════════════════════════════════╗ +║ DEPLOYMENT SUCCESS LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: {timestamp.strftime("%Y-%m-%d %H:%M:%S")} +Status: SUCCESS +URL: https://{url} +Subdomain: {subdomain} +Duration: {duration:.2f} seconds + +═══════════════════════════════════════════════ + +Deployment completed successfully. +All services are running and health checks passed. +""" + + def _format_failure_log( + self, + url: str, + subdomain: str, + error: str, + timestamp: datetime + ) -> str: + """ + Format failure log content + + Args: + url: Deployment URL (may be empty) + subdomain: Subdomain used (may be empty) + error: Error message + timestamp: Deployment timestamp + + Returns: + Formatted log content + """ + url_display = f"https://{url}" if url else "N/A (failed before URL generation)" + subdomain_display = subdomain if subdomain else "N/A" + + return f"""╔══════════════════════════════════════════════╗ +║ DEPLOYMENT FAILURE LOG ║ +╚══════════════════════════════════════════════╝ + +Timestamp: {timestamp.strftime("%Y-%m-%d %H:%M:%S")} +Status: FAILED +URL: {url_display} +Subdomain: {subdomain_display} + +═══════════════════════════════════════════════ + +ERROR: +{error} + +═══════════════════════════════════════════════ + +Deployment failed. See error details above. +All changes have been rolled back. +""" diff --git a/wordpress/wordpress_deployer/dns_manager.py b/wordpress/wordpress_deployer/dns_manager.py new file mode 100644 index 0000000..7b77dc2 --- /dev/null +++ b/wordpress/wordpress_deployer/dns_manager.py @@ -0,0 +1,286 @@ +""" +DNS management module with Cloudflare API integration + +Direct Python API calls replacing cloudflare-add.sh and cloudflare-remove.sh +""" + +import logging +from dataclasses import dataclass +from typing import Dict, Optional + +import requests + + +logger = logging.getLogger(__name__) + + +class DNSError(Exception): + """Raised when DNS operations fail""" + pass + + +@dataclass +class DNSRecord: + """Represents a DNS record""" + record_id: str + hostname: str + ip: str + record_type: str + + +class DNSManager: + """Python wrapper for Cloudflare DNS operations""" + + def __init__(self, api_token: str, zone_id: str): + """ + Initialize DNS manager + + Args: + api_token: Cloudflare API token + zone_id: Cloudflare zone ID + """ + self._api_token = api_token + self._zone_id = zone_id + self._base_url = f"https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records" + self._headers = { + "Authorization": f"Bearer {api_token}", + "Content-Type": "application/json" + } + self._logger = logging.getLogger(f"{__name__}.DNSManager") + + def check_record_exists(self, hostname: str) -> bool: + """ + Check if DNS record exists using Cloudflare API + + Args: + hostname: Fully qualified domain name + + Returns: + True if record exists, False otherwise + + Raises: + DNSError: If API call fails + """ + self._logger.debug(f"Checking if DNS record exists: {hostname}") + + try: + params = {"name": hostname} + response = requests.get( + self._base_url, + headers=self._headers, + params=params, + timeout=30 + ) + response.raise_for_status() + + data = response.json() + + if not data.get("success", False): + errors = data.get("errors", []) + raise DNSError(f"Cloudflare API error: {errors}") + + records = data.get("result", []) + exists = len(records) > 0 + + if exists: + self._logger.debug(f"DNS record exists: {hostname}") + else: + self._logger.debug(f"DNS record does not exist: {hostname}") + + return exists + + except requests.RequestException as e: + raise DNSError(f"Failed to check DNS record existence: {e}") from e + + def add_record( + self, + hostname: str, + ip: str, + dry_run: bool = False + ) -> DNSRecord: + """ + Add DNS A record + + Args: + hostname: Fully qualified domain name + ip: IP address for A record + dry_run: If True, only log what would be done + + Returns: + DNSRecord with record_id for rollback + + Raises: + DNSError: If API call fails + """ + if dry_run: + self._logger.info( + f"[DRY-RUN] Would add DNS record: {hostname} -> {ip}" + ) + return DNSRecord( + record_id="dry-run-id", + hostname=hostname, + ip=ip, + record_type="A" + ) + + self._logger.info(f"Adding DNS record: {hostname} -> {ip}") + + try: + payload = { + "type": "A", + "name": hostname, + "content": ip, + "ttl": 1, # Automatic TTL + "proxied": False # DNS only, not proxied through Cloudflare + } + + response = requests.post( + self._base_url, + headers=self._headers, + json=payload, + timeout=30 + ) + response.raise_for_status() + + data = response.json() + + if not data.get("success", False): + errors = data.get("errors", []) + raise DNSError(f"Cloudflare API error: {errors}") + + result = data.get("result", {}) + record_id = result.get("id") + + if not record_id: + raise DNSError("No record ID returned from Cloudflare API") + + self._logger.info(f"DNS record added successfully: {record_id}") + + return DNSRecord( + record_id=record_id, + hostname=hostname, + ip=ip, + record_type="A" + ) + + except requests.RequestException as e: + raise DNSError(f"Failed to add DNS record: {e}") from e + + def remove_record(self, hostname: str, dry_run: bool = False) -> None: + """ + Remove DNS record by hostname + + Args: + hostname: Fully qualified domain name + dry_run: If True, only log what would be done + + Raises: + DNSError: If API call fails + """ + if dry_run: + self._logger.info(f"[DRY-RUN] Would remove DNS record: {hostname}") + return + + self._logger.info(f"Removing DNS record: {hostname}") + + try: + # First, get the record ID + params = {"name": hostname} + response = requests.get( + self._base_url, + headers=self._headers, + params=params, + timeout=30 + ) + response.raise_for_status() + + data = response.json() + + if not data.get("success", False): + errors = data.get("errors", []) + raise DNSError(f"Cloudflare API error: {errors}") + + records = data.get("result", []) + + if not records: + self._logger.warning(f"No DNS record found for: {hostname}") + return + + # Remove all matching records (typically just one) + for record in records: + record_id = record.get("id") + if record_id: + self.remove_record_by_id(record_id, dry_run=False) + + except requests.RequestException as e: + raise DNSError(f"Failed to remove DNS record: {e}") from e + + def remove_record_by_id(self, record_id: str, dry_run: bool = False) -> None: + """ + Remove DNS record by ID (more reliable for rollback) + + Args: + record_id: Cloudflare DNS record ID + dry_run: If True, only log what would be done + + Raises: + DNSError: If API call fails + """ + if dry_run: + self._logger.info( + f"[DRY-RUN] Would remove DNS record by ID: {record_id}" + ) + return + + self._logger.info(f"Removing DNS record by ID: {record_id}") + + try: + url = f"{self._base_url}/{record_id}" + response = requests.delete( + url, + headers=self._headers, + timeout=30 + ) + + # Handle 404/405 gracefully - record doesn't exist or can't be deleted + if response.status_code in [404, 405]: + self._logger.warning( + f"DNS record {record_id} not found or cannot be deleted (may already be removed)" + ) + return + + response.raise_for_status() + + data = response.json() + + if not data.get("success", False): + errors = data.get("errors", []) + raise DNSError(f"Cloudflare API error: {errors}") + + self._logger.info(f"DNS record removed successfully: {record_id}") + + except requests.RequestException as e: + raise DNSError(f"Failed to remove DNS record: {e}") from e + + def get_public_ip(self) -> str: + """ + Get public IP address from external service + + Returns: + Public IP address as string + + Raises: + DNSError: If IP retrieval fails + """ + self._logger.debug("Retrieving public IP address") + + try: + response = requests.get("https://ipv4.icanhazip.com", timeout=10) + response.raise_for_status() + ip = response.text.strip() + + self._logger.debug(f"Public IP: {ip}") + return ip + + except requests.RequestException as e: + raise DNSError(f"Failed to retrieve public IP: {e}") from e diff --git a/wordpress/wordpress_deployer/docker_manager.py b/wordpress/wordpress_deployer/docker_manager.py new file mode 100644 index 0000000..27a3a30 --- /dev/null +++ b/wordpress/wordpress_deployer/docker_manager.py @@ -0,0 +1,276 @@ +""" +Docker management module + +Wrapper for Docker Compose operations with validation and error handling +""" + +import logging +import subprocess +from dataclasses import dataclass +from pathlib import Path +from typing import List + + +logger = logging.getLogger(__name__) + + +class DockerError(Exception): + """Raised when Docker operations fail""" + pass + + +@dataclass +class ContainerInfo: + """Information about a running container""" + container_id: str + name: str + status: str + + +class DockerManager: + """Docker Compose operations wrapper""" + + def __init__(self, compose_file: Path, env_file: Path): + """ + Initialize Docker manager + + Args: + compose_file: Path to docker-compose.yml + env_file: Path to .env file + """ + self._compose_file = compose_file + self._env_file = env_file + self._logger = logging.getLogger(f"{__name__}.DockerManager") + + def _run_command( + self, + cmd: List[str], + check: bool = True, + capture_output: bool = True + ) -> subprocess.CompletedProcess: + """ + Run docker compose command + + Args: + cmd: Command list to execute + check: Whether to raise on non-zero exit + capture_output: Whether to capture stdout/stderr + + Returns: + CompletedProcess instance + + Raises: + DockerError: If command fails and check=True + """ + self._logger.debug(f"Running: {' '.join(cmd)}") + + try: + result = subprocess.run( + cmd, + check=check, + capture_output=capture_output, + text=True, + cwd=self._compose_file.parent + ) + return result + + except subprocess.CalledProcessError as e: + error_msg = f"Docker command failed: {e.stderr or e.stdout or str(e)}" + self._logger.error(error_msg) + raise DockerError(error_msg) from e + except FileNotFoundError as e: + raise DockerError( + f"Docker command not found. Is Docker installed? {e}" + ) from e + + def validate_compose_file(self) -> None: + """ + Validate docker-compose.yml syntax + + Raises: + DockerError: If compose file is invalid + """ + self._logger.debug("Validating docker-compose.yml") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "config", "--quiet" + ] + + try: + self._run_command(cmd) + self._logger.debug("docker-compose.yml is valid") + + except DockerError as e: + raise DockerError(f"Invalid docker-compose.yml: {e}") from e + + def pull_images(self, dry_run: bool = False) -> None: + """ + Pull required Docker images + + Args: + dry_run: If True, only log what would be done + + Raises: + DockerError: If pull fails + """ + if dry_run: + self._logger.info("[DRY-RUN] Would pull Docker images") + return + + self._logger.info("Pulling Docker images") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "pull" + ] + + self._run_command(cmd) + self._logger.info("Docker images pulled successfully") + + def start_services(self, dry_run: bool = False) -> List[ContainerInfo]: + """ + Start Docker Compose services + + Args: + dry_run: If True, only log what would be done + + Returns: + List of created containers for rollback + + Raises: + DockerError: If start fails + """ + if dry_run: + self._logger.info("[DRY-RUN] Would start Docker services") + return [] + + self._logger.info("Starting Docker services") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "up", "-d" + ] + + self._run_command(cmd) + + # Get container info for rollback + containers = self.get_container_status() + + self._logger.info( + f"Docker services started successfully: {len(containers)} containers" + ) + + return containers + + def stop_services(self, dry_run: bool = False) -> None: + """ + Stop Docker Compose services + + Args: + dry_run: If True, only log what would be done + + Raises: + DockerError: If stop fails + """ + if dry_run: + self._logger.info("[DRY-RUN] Would stop Docker services") + return + + self._logger.info("Stopping Docker services") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "down" + ] + + self._run_command(cmd) + self._logger.info("Docker services stopped successfully") + + def stop_services_and_remove_volumes(self, dry_run: bool = False) -> None: + """ + Stop services and remove volumes (full cleanup) + + Args: + dry_run: If True, only log what would be done + + Raises: + DockerError: If stop fails + """ + if dry_run: + self._logger.info("[DRY-RUN] Would stop Docker services and remove volumes") + return + + self._logger.info("Stopping Docker services and removing volumes") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "down", "-v" + ] + + self._run_command(cmd) + self._logger.info("Docker services stopped and volumes removed") + + def get_container_status(self) -> List[ContainerInfo]: + """ + Get status of containers for this project + + Returns: + List of ContainerInfo objects + + Raises: + DockerError: If status check fails + """ + self._logger.debug("Getting container status") + + cmd = [ + "docker", "compose", + "-f", str(self._compose_file), + "--env-file", str(self._env_file), + "ps", "-q" + ] + + result = self._run_command(cmd) + + container_ids = [ + cid.strip() + for cid in result.stdout.strip().split('\n') + if cid.strip() + ] + + containers = [] + for container_id in container_ids: + # Get container details + inspect_cmd = ["docker", "inspect", container_id, "--format", "{{.Name}}:{{.State.Status}}"] + try: + inspect_result = self._run_command(inspect_cmd) + name_status = inspect_result.stdout.strip() + if ':' in name_status: + name, status = name_status.split(':', 1) + # Remove leading slash from container name + name = name.lstrip('/') + containers.append(ContainerInfo( + container_id=container_id, + name=name, + status=status + )) + except DockerError: + # If inspect fails, just record the ID + containers.append(ContainerInfo( + container_id=container_id, + name="unknown", + status="unknown" + )) + + self._logger.debug(f"Found {len(containers)} containers") + return containers diff --git a/wordpress/wordpress_deployer/env_generator.py b/wordpress/wordpress_deployer/env_generator.py new file mode 100644 index 0000000..31f18e6 --- /dev/null +++ b/wordpress/wordpress_deployer/env_generator.py @@ -0,0 +1,394 @@ +""" +Environment generation module - replaces generate-env.sh + +Provides pure Python implementations for: +- Random word selection from dictionary +- Memorable password generation +- Environment file generation and manipulation +""" + +import logging +import os +import random +import re +import secrets +import shutil +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Dict, List, Optional + +logger = logging.getLogger(__name__) + + +@dataclass +class EnvValues: + """Container for generated environment values""" + subdomain: str + domain: str + url: str + db_name: str + db_user: str + db_password: str + db_root_password: str + compose_project_name: str + + +class WordGenerator: + """Pure Python implementation of dictionary word selection""" + + def __init__(self, dict_file: Path): + """ + Initialize word generator + + Args: + dict_file: Path to dictionary file (e.g., /usr/share/dict/words) + """ + self._dict_file = dict_file + self._words_cache: Optional[List[str]] = None + self._logger = logging.getLogger(f"{__name__}.WordGenerator") + + def _load_and_filter_words(self) -> List[str]: + """ + Load dictionary and filter to 4-10 char lowercase words + + Returns: + List of filtered words + + Raises: + FileNotFoundError: If dictionary file doesn't exist + ValueError: If no valid words found + """ + if not self._dict_file.exists(): + raise FileNotFoundError(f"Dictionary file not found: {self._dict_file}") + + self._logger.debug(f"Loading words from {self._dict_file}") + + # Read and filter words matching pattern: ^[a-z]{4,10}$ + pattern = re.compile(r'^[a-z]{4,10}$') + words = [] + + with open(self._dict_file, 'r', encoding='utf-8') as f: + for line in f: + word = line.strip() + if pattern.match(word): + words.append(word) + + if not words: + raise ValueError(f"No valid words found in {self._dict_file}") + + self._logger.debug(f"Loaded {len(words)} valid words") + return words + + def get_random_word(self) -> str: + """ + Get single random word from filtered list + + Returns: + Random word (4-10 chars, lowercase) + """ + # Load and cache words on first use + if self._words_cache is None: + self._words_cache = self._load_and_filter_words() + + return random.choice(self._words_cache) + + def get_random_words(self, count: int) -> List[str]: + """ + Get multiple random words efficiently + + Args: + count: Number of words to retrieve + + Returns: + List of random words + """ + # Load and cache words on first use + if self._words_cache is None: + self._words_cache = self._load_and_filter_words() + + return random.choices(self._words_cache, k=count) + + +class PasswordGenerator: + """Generate memorable passwords from dictionary words""" + + def __init__(self, word_generator: WordGenerator): + """ + Initialize password generator + + Args: + word_generator: WordGenerator instance for word selection + """ + self._word_generator = word_generator + self._logger = logging.getLogger(f"{__name__}.PasswordGenerator") + + def generate_memorable_password(self, word_count: int = 3) -> str: + """ + Generate password from N random nouns joined by hyphens + + Args: + word_count: Number of words to use (default: 3) + + Returns: + Password string like "templon-infantly-yielding" + """ + words = self._word_generator.get_random_words(word_count) + password = '-'.join(words) + self._logger.debug(f"Generated {word_count}-word password") + return password + + def generate_random_string(self, length: int = 8) -> str: + """ + Generate alphanumeric random string using secrets module + + Args: + length: Length of string to generate (default: 8) + + Returns: + Random alphanumeric string + """ + # Use secrets for cryptographically secure random generation + # Generate hex and convert to lowercase alphanumeric + return secrets.token_hex(length // 2 + 1)[:length] + + +class EnvFileGenerator: + """Pure Python .env file manipulation (replaces bash sed logic)""" + + def __init__( + self, + env_file: Path, + word_generator: WordGenerator, + password_generator: PasswordGenerator, + base_domain: str = "merakit.my", + app_name: Optional[str] = None + ): + """ + Initialize environment file generator + + Args: + env_file: Path to .env file + word_generator: WordGenerator instance + password_generator: PasswordGenerator instance + base_domain: Base domain for URL generation (default: "merakit.my") + app_name: Application name (default: read from .env or "wordpress") + """ + self._env_file = env_file + self._word_generator = word_generator + self._password_generator = password_generator + self._base_domain = base_domain + self._app_name = app_name + self._logger = logging.getLogger(f"{__name__}.EnvFileGenerator") + + def generate_values(self) -> EnvValues: + """ + Generate all environment values + + Returns: + EnvValues dataclass with all generated values + """ + self._logger.info("Generating environment values") + + # Read current .env to get app_name if not provided + current_env = self.read_current_env() + app_name = self._app_name or current_env.get('APP_NAME', 'wordpress') + + # 1. Generate subdomain: two random words + word1 = self._word_generator.get_random_word() + word2 = self._word_generator.get_random_word() + subdomain = f"{word1}-{word2}" + + # 2. Construct URL + url = f"{subdomain}.{self._base_domain}" + + # 3. Generate random string for DB identifiers + random_str = self._password_generator.generate_random_string(8) + + # 4. Generate DB identifiers with truncation logic + db_name = self._generate_db_name(random_str, app_name, subdomain) + db_user = self._generate_db_user(random_str, app_name, subdomain) + + # 5. Generate passwords + db_password = self._password_generator.generate_memorable_password(3) + db_root_password = self._password_generator.generate_memorable_password(3) + + self._logger.info(f"Generated values for subdomain: {subdomain}") + self._logger.debug(f"URL: {url}") + self._logger.debug(f"DB_NAME: {db_name}") + self._logger.debug(f"DB_USER: {db_user}") + + return EnvValues( + subdomain=subdomain, + domain=self._base_domain, + url=url, + db_name=db_name, + db_user=db_user, + db_password=db_password, + db_root_password=db_root_password, + compose_project_name=subdomain + ) + + def _generate_db_name(self, random_str: str, app_name: str, subdomain: str) -> str: + """ + Format: angali_{random8}_{app}_{subdomain}, truncate to 64 chars + + Args: + random_str: Random 8-char string + app_name: Application name + subdomain: Subdomain with hyphens + + Returns: + Database name (max 64 chars) + """ + # Replace hyphens with underscores for DB compatibility + subdomain_safe = subdomain.replace('-', '_') + db_name = f"angali_{random_str}_{app_name}_{subdomain_safe}" + + # Truncate to MySQL limit of 64 chars + return db_name[:64] + + def _generate_db_user(self, random_str: str, app_name: str, subdomain: str) -> str: + """ + Format: angali_{random8}_{app}_{subdomain}, truncate to 32 chars + + Args: + random_str: Random 8-char string + app_name: Application name + subdomain: Subdomain with hyphens + + Returns: + Database username (max 32 chars) + """ + # Replace hyphens with underscores for DB compatibility + subdomain_safe = subdomain.replace('-', '_') + db_user = f"angali_{random_str}_{app_name}_{subdomain_safe}" + + # Truncate to MySQL limit of 32 chars for usernames + return db_user[:32] + + def read_current_env(self) -> Dict[str, str]: + """ + Parse existing .env file into dict + + Returns: + Dictionary of environment variables + """ + env_dict = {} + + if not self._env_file.exists(): + self._logger.warning(f"Env file not found: {self._env_file}") + return env_dict + + with open(self._env_file, 'r') as f: + for line in f: + line = line.strip() + # Skip empty lines and comments + if not line or line.startswith('#'): + continue + + # Parse KEY=VALUE format + if '=' in line: + key, value = line.split('=', 1) + # Remove quotes if present + value = value.strip('"').strip("'") + env_dict[key.strip()] = value + + self._logger.debug(f"Read {len(env_dict)} variables from {self._env_file}") + return env_dict + + def backup_env_file(self) -> Path: + """ + Create timestamped backup of .env file + + Returns: + Path to backup file + + Raises: + FileNotFoundError: If .env file doesn't exist + """ + if not self._env_file.exists(): + raise FileNotFoundError(f"Cannot backup non-existent file: {self._env_file}") + + # Create backup with timestamp + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + backup_path = self._env_file.parent / f"{self._env_file.name}.backup.{timestamp}" + + shutil.copy2(self._env_file, backup_path) + self._logger.info(f"Created backup: {backup_path}") + + return backup_path + + def update_env_file(self, values: EnvValues, dry_run: bool = False) -> None: + """ + Update .env file with new values (Python dict manipulation) + + Uses atomic write pattern: write to temp file, then rename + + Args: + values: EnvValues to write + dry_run: If True, only log what would be done + + Raises: + FileNotFoundError: If .env file doesn't exist + """ + if not self._env_file.exists(): + raise FileNotFoundError(f"Env file not found: {self._env_file}") + + if dry_run: + self._logger.info(f"[DRY-RUN] Would update {self._env_file} with:") + for key, value in asdict(values).items(): + if 'password' in key.lower(): + self._logger.info(f" {key.upper()}=********") + else: + self._logger.info(f" {key.upper()}={value}") + return + + # Read current env + current_env = self.read_current_env() + + # Update with new values + current_env.update({ + 'COMPOSE_PROJECT_NAME': values.compose_project_name, + 'SUBDOMAIN': values.subdomain, + 'DOMAIN': values.domain, + 'URL': values.url, + 'DB_NAME': values.db_name, + 'DB_USER': values.db_user, + 'DB_PASSWORD': values.db_password, + 'DB_ROOT_PASSWORD': values.db_root_password + }) + + # Write atomically: write to temp file, then rename + temp_file = self._env_file.parent / f"{self._env_file.name}.tmp" + + try: + with open(temp_file, 'w') as f: + for key, value in current_env.items(): + f.write(f"{key}={value}\n") + + # Atomic rename + os.replace(temp_file, self._env_file) + self._logger.info(f"Updated {self._env_file} successfully") + + except Exception as e: + # Cleanup temp file on error + if temp_file.exists(): + temp_file.unlink() + raise RuntimeError(f"Failed to update env file: {e}") from e + + def restore_env_file(self, backup_path: Path) -> None: + """ + Restore .env from backup + + Args: + backup_path: Path to backup file + + Raises: + FileNotFoundError: If backup file doesn't exist + """ + if not backup_path.exists(): + raise FileNotFoundError(f"Backup file not found: {backup_path}") + + shutil.copy2(backup_path, self._env_file) + self._logger.info(f"Restored {self._env_file} from {backup_path}") diff --git a/wordpress/wordpress_deployer/health.py b/wordpress/wordpress_deployer/health.py new file mode 100644 index 0000000..7b4ad68 --- /dev/null +++ b/wordpress/wordpress_deployer/health.py @@ -0,0 +1,128 @@ +""" +Health check module + +HTTP health checking with retry logic and progress indicators +""" + +import logging +import time + +import requests + + +logger = logging.getLogger(__name__) + + +class HealthCheckError(Exception): + """Raised when health check fails""" + pass + + +class HealthChecker: + """HTTP health check with retry logic""" + + def __init__( + self, + timeout: int, + interval: int, + verify_ssl: bool + ): + """ + Initialize health checker + + Args: + timeout: Total timeout in seconds + interval: Check interval in seconds + verify_ssl: Whether to verify SSL certificates + """ + self._timeout = timeout + self._interval = interval + self._verify_ssl = verify_ssl + self._logger = logging.getLogger(f"{__name__}.HealthChecker") + + def check_health(self, url: str, dry_run: bool = False) -> bool: + """ + Perform health check with retries + + Args: + url: URL to check (e.g., https://example.com) + dry_run: If True, only log what would be done + + Returns: + True if health check passed, False otherwise + """ + if dry_run: + self._logger.info(f"[DRY-RUN] Would check health of {url}") + return True + + self._logger.info( + f"Checking health of {url} for up to {self._timeout} seconds" + ) + + start_time = time.time() + attempt = 0 + + while True: + attempt += 1 + elapsed = time.time() - start_time + + if elapsed > self._timeout: + self._logger.error( + f"Health check timed out after {elapsed:.1f} seconds " + f"({attempt} attempts)" + ) + return False + + # Perform single check + if self._single_check(url): + self._logger.info( + f"Health check passed after {elapsed:.1f} seconds " + f"({attempt} attempts)" + ) + return True + + # Wait before next attempt + remaining = self._timeout - elapsed + if remaining > 0: + wait_time = min(self._interval, remaining) + self._logger.debug( + f"Attempt {attempt} failed, retrying in {wait_time:.1f}s " + f"(elapsed: {elapsed:.1f}s, timeout: {self._timeout}s)" + ) + time.sleep(wait_time) + else: + # No time remaining + self._logger.error(f"Health check timed out after {attempt} attempts") + return False + + def _single_check(self, url: str) -> bool: + """ + Single health check attempt + + Args: + url: URL to check + + Returns: + True if valid HTTP response (2xx or 3xx) received, False otherwise + """ + try: + response = requests.get( + url, + timeout=5, + verify=self._verify_ssl, + allow_redirects=True + ) + + # Accept any 2xx or 3xx status code as valid + if 200 <= response.status_code < 400: + self._logger.debug(f"Health check successful: HTTP {response.status_code}") + return True + else: + self._logger.debug( + f"Health check failed: HTTP {response.status_code}" + ) + return False + + except requests.RequestException as e: + self._logger.debug(f"Health check failed: {type(e).__name__}: {e}") + return False diff --git a/wordpress/wordpress_deployer/orchestrator.py b/wordpress/wordpress_deployer/orchestrator.py new file mode 100644 index 0000000..899802c --- /dev/null +++ b/wordpress/wordpress_deployer/orchestrator.py @@ -0,0 +1,626 @@ +""" +Deployment orchestration module + +Main deployment workflow with rollback tracking and execution +""" + +import logging +import shutil +import time +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +from .config import DeploymentConfig +from .deployment_config_manager import DeploymentConfigManager, DeploymentMetadata +from .deployment_logger import DeploymentFileLogger +from .dns_manager import DNSError, DNSManager, DNSRecord +from .docker_manager import DockerError, DockerManager +from .env_generator import EnvFileGenerator, EnvValues, PasswordGenerator, WordGenerator +from .health import HealthCheckError, HealthChecker +from .webhooks import WebhookNotifier + + +logger = logging.getLogger(__name__) + + +class DeploymentError(Exception): + """Base exception for deployment errors""" + pass + + +class ValidationError(DeploymentError): + """Validation failed""" + pass + + +@dataclass +class DeploymentAction: + """Represents a single deployment action""" + action_type: str # 'dns_added', 'containers_started', 'env_updated' + timestamp: datetime + details: Dict[str, Any] + rollback_data: Dict[str, Any] + + +class DeploymentTracker: + """Track deployment actions for rollback""" + + def __init__(self): + """Initialize deployment tracker""" + self._actions: List[DeploymentAction] = [] + self._logger = logging.getLogger(f"{__name__}.DeploymentTracker") + + def record_action(self, action: DeploymentAction) -> None: + """ + Record a deployment action + + Args: + action: DeploymentAction to record + """ + self._actions.append(action) + self._logger.debug(f"Recorded action: {action.action_type}") + + def get_actions(self) -> List[DeploymentAction]: + """ + Get all recorded actions + + Returns: + List of DeploymentAction objects + """ + return self._actions.copy() + + def clear(self) -> None: + """Clear tracking history""" + self._actions.clear() + self._logger.debug("Cleared action history") + + +class DeploymentOrchestrator: + """Main orchestrator coordinating all deployment steps""" + + def __init__(self, config: DeploymentConfig): + """ + Initialize deployment orchestrator + + Args: + config: DeploymentConfig instance + """ + self._config = config + self._logger = logging.getLogger(f"{__name__}.DeploymentOrchestrator") + + # Initialize components + self._word_generator = WordGenerator(config.dict_file) + self._password_generator = PasswordGenerator(self._word_generator) + self._env_generator = EnvFileGenerator( + config.env_file, + self._word_generator, + self._password_generator, + config.base_domain, + config.app_name + ) + self._dns_manager = DNSManager( + config.cloudflare_api_token, + config.cloudflare_zone_id + ) + self._docker_manager = DockerManager( + config.docker_compose_file, + config.env_file + ) + self._webhook_notifier = WebhookNotifier( + config.webhook_url, + config.webhook_timeout, + config.webhook_retries + ) + self._health_checker = HealthChecker( + config.healthcheck_timeout, + config.healthcheck_interval, + config.verify_ssl + ) + self._tracker = DeploymentTracker() + self._deployment_logger = DeploymentFileLogger() + self._config_manager = DeploymentConfigManager() + + def deploy(self) -> None: + """ + Main deployment workflow + + Raises: + DeploymentError: If deployment fails + """ + start_time = time.time() + env_values = None + dns_record_id = None + dns_ip = None + containers = [] + + try: + # Phase 1: Validation + self._phase_validate() + + # Phase 2: Environment Generation (with retry on DNS conflicts) + env_values = self._phase_generate_env_with_retries() + + # Send deployment_started webhook + self._webhook_notifier.deployment_started( + env_values.subdomain, + env_values.url + ) + + # Phase 3: DNS Setup + dns_record_id, dns_ip = self._phase_setup_dns(env_values) + + # Phase 4: Container Deployment + containers = self._phase_deploy_containers() + + # Phase 5: Health Check + self._phase_health_check(env_values.url) + + # Success + duration = time.time() - start_time + self._webhook_notifier.deployment_success( + env_values.subdomain, + env_values.url, + duration + ) + self._logger.info( + f"✓ Deployment successful! URL: https://{env_values.url} " + f"(took {duration:.1f}s)" + ) + + # Log success to file + self._deployment_logger.log_success( + env_values.url, + env_values.subdomain, + duration + ) + + # Save deployment configuration + self._save_deployment_config( + env_values, + dns_record_id, + dns_ip, + containers + ) + + except Exception as e: + self._logger.error(f"✗ Deployment failed: {e}") + + # Send failure webhook + if env_values: + self._webhook_notifier.deployment_failed( + env_values.subdomain, + str(e), + env_values.url + ) + else: + self._webhook_notifier.deployment_failed("", str(e), "") + + # Log failure to file + if env_values: + self._deployment_logger.log_failure( + env_values.url, + env_values.subdomain, + str(e) + ) + else: + self._deployment_logger.log_failure( + "", + "", + str(e) + ) + + # Rollback + self._logger.info("Starting rollback...") + self._rollback_all() + + raise DeploymentError(f"Deployment failed: {e}") from e + + def _phase_validate(self) -> None: + """ + Phase 1: Pre-deployment validation + + Raises: + ValidationError: If validation fails + """ + self._logger.info("═══ Phase 1: Validation ═══") + + # Check system dependencies + self._validate_dependencies() + + # Validate environment file + if not self._config.env_file.exists(): + raise ValidationError(f"Env file not found: {self._config.env_file}") + + # Validate Docker Compose file + try: + self._docker_manager.validate_compose_file() + except DockerError as e: + raise ValidationError(f"Invalid docker-compose.yml: {e}") from e + + # Check external Docker network exists + self._validate_docker_network("proxy") + + self._logger.info("✓ Validation complete") + + def _validate_dependencies(self) -> None: + """ + Validate system dependencies + + Raises: + ValidationError: If dependencies are missing + """ + import shutil as sh + + required_commands = ["docker", "curl"] + + for cmd in required_commands: + if not sh.which(cmd): + raise ValidationError( + f"Required command not found: {cmd}. " + f"Please install {cmd} and try again." + ) + + # Check Docker daemon is running + try: + import subprocess + result = subprocess.run( + ["docker", "info"], + capture_output=True, + timeout=5 + ) + if result.returncode != 0: + raise ValidationError( + "Docker daemon is not running. Please start Docker." + ) + except (subprocess.TimeoutExpired, FileNotFoundError) as e: + raise ValidationError(f"Failed to check Docker daemon: {e}") from e + + def _validate_docker_network(self, network_name: str) -> None: + """ + Check external Docker network exists + + Args: + network_name: Network name to check + + Raises: + ValidationError: If network doesn't exist + """ + import subprocess + + try: + result = subprocess.run( + ["docker", "network", "inspect", network_name], + capture_output=True, + timeout=5 + ) + if result.returncode != 0: + raise ValidationError( + f"Docker network '{network_name}' not found. " + f"Please create it with: docker network create {network_name}" + ) + except (subprocess.TimeoutExpired, FileNotFoundError) as e: + raise ValidationError( + f"Failed to check Docker network: {e}" + ) from e + + def _phase_generate_env_with_retries(self) -> EnvValues: + """ + Phase 2: Generate environment with DNS conflict retry + + Returns: + EnvValues with generated values + + Raises: + DeploymentError: If unable to generate unique subdomain + """ + self._logger.info("═══ Phase 2: Environment Generation ═══") + + for attempt in range(1, self._config.max_retries + 1): + # Generate new values + env_values = self._env_generator.generate_values() + + self._logger.info(f"Generated subdomain: {env_values.subdomain}") + + # Check DNS conflict + try: + if not self._dns_manager.check_record_exists(env_values.url): + # No conflict, proceed + self._logger.info(f"✓ Subdomain available: {env_values.subdomain}") + + # Create backup + backup_path = self._env_generator.backup_env_file() + + # Update .env file + self._env_generator.update_env_file( + env_values, + dry_run=self._config.dry_run + ) + + # Track for rollback + self._tracker.record_action(DeploymentAction( + action_type="env_updated", + timestamp=datetime.now(), + details={"env_values": asdict(env_values)}, + rollback_data={"backup_path": str(backup_path)} + )) + + return env_values + + else: + self._logger.warning( + f"✗ DNS conflict for {env_values.url}, " + f"regenerating... (attempt {attempt}/{self._config.max_retries})" + ) + + except DNSError as e: + self._logger.warning( + f"DNS check failed: {e}. " + f"Assuming no conflict and proceeding..." + ) + # If DNS check fails, proceed anyway (fail open) + backup_path = self._env_generator.backup_env_file() + self._env_generator.update_env_file( + env_values, + dry_run=self._config.dry_run + ) + self._tracker.record_action(DeploymentAction( + action_type="env_updated", + timestamp=datetime.now(), + details={"env_values": asdict(env_values)}, + rollback_data={"backup_path": str(backup_path)} + )) + return env_values + + raise DeploymentError( + f"Failed to generate unique subdomain after {self._config.max_retries} attempts" + ) + + def _phase_setup_dns(self, env_values: EnvValues) -> tuple: + """ + Phase 3: Add DNS record + + Args: + env_values: EnvValues with subdomain and URL + + Returns: + Tuple of (record_id, ip) + + Raises: + DNSError: If DNS setup fails + """ + self._logger.info("═══ Phase 3: DNS Setup ═══") + + # Get public IP + ip = self._dns_manager.get_public_ip() + self._logger.info(f"Public IP: {ip}") + + # Add DNS record + dns_record = self._dns_manager.add_record( + env_values.url, + ip, + dry_run=self._config.dry_run + ) + + self._logger.info(f"✓ DNS record added: {env_values.url} -> {ip}") + + # Track for rollback + self._tracker.record_action(DeploymentAction( + action_type="dns_added", + timestamp=datetime.now(), + details={"hostname": env_values.url, "ip": ip}, + rollback_data={"record_id": dns_record.record_id} + )) + + # Send webhook notification + self._webhook_notifier.dns_added(env_values.url, ip) + + return dns_record.record_id, ip + + def _phase_deploy_containers(self) -> List: + """ + Phase 4: Start Docker containers + + Returns: + List of container information + + Raises: + DockerError: If container deployment fails + """ + self._logger.info("═══ Phase 4: Container Deployment ═══") + + # Pull images + self._logger.info("Pulling Docker images...") + self._docker_manager.pull_images(dry_run=self._config.dry_run) + + # Start services + self._logger.info("Starting Docker services...") + containers = self._docker_manager.start_services( + dry_run=self._config.dry_run + ) + + self._logger.info( + f"✓ Docker services started: {len(containers)} containers" + ) + + # Track for rollback + self._tracker.record_action(DeploymentAction( + action_type="containers_started", + timestamp=datetime.now(), + details={"containers": [asdict(c) for c in containers]}, + rollback_data={} + )) + + return containers + + def _phase_health_check(self, url: str) -> None: + """ + Phase 5: Health check + + Args: + url: URL to check (without https://) + + Raises: + HealthCheckError: If health check fails + """ + self._logger.info("═══ Phase 5: Health Check ═══") + + health_url = f"https://{url}" + start_time = time.time() + + if not self._health_checker.check_health( + health_url, + dry_run=self._config.dry_run + ): + raise HealthCheckError(f"Health check failed for {health_url}") + + duration = time.time() - start_time + self._logger.info(f"✓ Health check passed (took {duration:.1f}s)") + + # Send webhook notification + self._webhook_notifier.health_check_passed(url, duration) + + def _rollback_all(self) -> None: + """Rollback all tracked actions in reverse order""" + actions = list(reversed(self._tracker.get_actions())) + + if not actions: + self._logger.info("No actions to rollback") + return + + self._logger.info(f"Rolling back {len(actions)} actions...") + + for action in actions: + try: + self._rollback_action(action) + except Exception as e: + # Log but don't fail rollback + self._logger.error( + f"Failed to rollback action {action.action_type}: {e}" + ) + + self._logger.info("Rollback complete") + + def _rollback_action(self, action: DeploymentAction) -> None: + """ + Rollback single action based on type + + Args: + action: DeploymentAction to rollback + """ + if action.action_type == "dns_added": + self._rollback_dns(action) + elif action.action_type == "containers_started": + self._rollback_containers(action) + elif action.action_type == "env_updated": + self._rollback_env(action) + else: + self._logger.warning(f"Unknown action type: {action.action_type}") + + def _rollback_dns(self, action: DeploymentAction) -> None: + """ + Rollback DNS changes + + Args: + action: DeploymentAction with DNS details + """ + record_id = action.rollback_data.get("record_id") + if record_id: + self._logger.info(f"Rolling back DNS record: {record_id}") + try: + self._dns_manager.remove_record_by_id( + record_id, + dry_run=self._config.dry_run + ) + self._logger.info("✓ DNS record removed") + except DNSError as e: + self._logger.error(f"Failed to remove DNS record: {e}") + + def _rollback_containers(self, action: DeploymentAction) -> None: + """ + Stop and remove containers + + Args: + action: DeploymentAction with container details + """ + self._logger.info("Rolling back Docker containers") + try: + self._docker_manager.stop_services(dry_run=self._config.dry_run) + self._logger.info("✓ Docker services stopped") + except DockerError as e: + self._logger.error(f"Failed to stop Docker services: {e}") + + def _rollback_env(self, action: DeploymentAction) -> None: + """ + Restore .env file from backup + + Args: + action: DeploymentAction with backup path + """ + backup_path_str = action.rollback_data.get("backup_path") + if backup_path_str: + backup_path = Path(backup_path_str) + if backup_path.exists(): + self._logger.info(f"Rolling back .env file from {backup_path}") + try: + self._env_generator.restore_env_file(backup_path) + self._logger.info("✓ .env file restored") + except Exception as e: + self._logger.error(f"Failed to restore .env file: {e}") + else: + self._logger.warning(f"Backup file not found: {backup_path}") + + def _save_deployment_config( + self, + env_values: EnvValues, + dns_record_id: str, + dns_ip: str, + containers: List + ) -> None: + """ + Save deployment configuration for later cleanup + + Args: + env_values: EnvValues with deployment info + dns_record_id: Cloudflare DNS record ID + dns_ip: IP address used in DNS + containers: List of container information + """ + try: + # Extract container names, volumes, and networks + container_names = [c.name for c in containers if hasattr(c, 'name')] + + # Get volumes and networks from docker-compose + volumes = [ + f"{env_values.compose_project_name}_db_data", + f"{env_values.compose_project_name}_wp_data" + ] + + networks = [ + f"{env_values.compose_project_name}_internal" + ] + + # Create metadata + metadata = DeploymentMetadata( + subdomain=env_values.subdomain, + url=env_values.url, + domain=env_values.domain, + compose_project_name=env_values.compose_project_name, + db_name=env_values.db_name, + db_user=env_values.db_user, + deployment_timestamp=datetime.now().isoformat(), + dns_record_id=dns_record_id, + dns_ip=dns_ip, + containers=container_names, + volumes=volumes, + networks=networks, + env_file_path=str(self._config.env_file.absolute()) + ) + + # Save configuration + config_path = self._config_manager.save_deployment(metadata) + self._logger.info(f"✓ Deployment config saved: {config_path}") + + except Exception as e: + self._logger.warning(f"Failed to save deployment config: {e}") diff --git a/wordpress/wordpress_deployer/webhooks.py b/wordpress/wordpress_deployer/webhooks.py new file mode 100644 index 0000000..3616c2e --- /dev/null +++ b/wordpress/wordpress_deployer/webhooks.py @@ -0,0 +1,199 @@ +""" +Webhook notifications module + +Send deployment event notifications with retry logic +""" + +import logging +import time +from dataclasses import asdict, dataclass +from datetime import datetime +from typing import Any, Dict, Optional + +import requests + + +logger = logging.getLogger(__name__) + + +@dataclass +class WebhookEvent: + """Webhook event data""" + event_type: str # deployment_started, deployment_success, etc. + timestamp: str + subdomain: str + url: str + message: str + metadata: Dict[str, Any] + + +class WebhookNotifier: + """Send webhook notifications with retry logic""" + + def __init__( + self, + webhook_url: Optional[str], + timeout: int, + max_retries: int + ): + """ + Initialize webhook notifier + + Args: + webhook_url: Webhook URL to send notifications to (None to disable) + timeout: Request timeout in seconds + max_retries: Maximum number of retry attempts + """ + self._webhook_url = webhook_url + self._timeout = timeout + self._max_retries = max_retries + self._logger = logging.getLogger(f"{__name__}.WebhookNotifier") + + if not webhook_url: + self._logger.debug("Webhook notifications disabled (no URL configured)") + + def notify(self, event: WebhookEvent) -> None: + """ + Send webhook notification with retry + + Args: + event: WebhookEvent to send + + Note: + Failures are logged but don't raise exceptions to avoid + failing deployments due to webhook issues + """ + if not self._webhook_url: + return + + payload = asdict(event) + + self._logger.debug(f"Sending webhook: {event.event_type}") + + for attempt in range(1, self._max_retries + 1): + try: + response = requests.post( + self._webhook_url, + json=payload, + timeout=self._timeout + ) + response.raise_for_status() + + self._logger.debug( + f"Webhook sent successfully: {event.event_type} " + f"(attempt {attempt})" + ) + return + + except requests.RequestException as e: + self._logger.warning( + f"Webhook delivery failed (attempt {attempt}/{self._max_retries}): {e}" + ) + + if attempt < self._max_retries: + # Exponential backoff: 1s, 2s, 4s, etc. + backoff = 2 ** (attempt - 1) + self._logger.debug(f"Retrying in {backoff}s...") + time.sleep(backoff) + + self._logger.error( + f"Failed to deliver webhook after {self._max_retries} attempts: " + f"{event.event_type}" + ) + + def deployment_started(self, subdomain: str, url: str) -> None: + """ + Convenience method for deployment_started event + + Args: + subdomain: Subdomain being deployed + url: Full URL being deployed + """ + event = WebhookEvent( + event_type="deployment_started", + timestamp=datetime.utcnow().isoformat() + "Z", + subdomain=subdomain, + url=url, + message=f"Deployment started for {url}", + metadata={} + ) + self.notify(event) + + def deployment_success( + self, + subdomain: str, + url: str, + duration: float + ) -> None: + """ + Convenience method for deployment_success event + + Args: + subdomain: Subdomain that was deployed + url: Full URL that was deployed + duration: Deployment duration in seconds + """ + event = WebhookEvent( + event_type="deployment_success", + timestamp=datetime.utcnow().isoformat() + "Z", + subdomain=subdomain, + url=url, + message=f"Deployment successful for {url}", + metadata={"duration": round(duration, 2)} + ) + self.notify(event) + + def deployment_failed(self, subdomain: str, error: str, url: str = "") -> None: + """ + Convenience method for deployment_failed event + + Args: + subdomain: Subdomain that failed to deploy + error: Error message + url: Full URL (may be empty if deployment failed early) + """ + event = WebhookEvent( + event_type="deployment_failed", + timestamp=datetime.utcnow().isoformat() + "Z", + subdomain=subdomain, + url=url, + message=f"Deployment failed: {error}", + metadata={"error": error} + ) + self.notify(event) + + def dns_added(self, hostname: str, ip: str) -> None: + """ + Convenience method for dns_added event + + Args: + hostname: Hostname that was added to DNS + ip: IP address the hostname points to + """ + event = WebhookEvent( + event_type="dns_added", + timestamp=datetime.utcnow().isoformat() + "Z", + subdomain=hostname.split('.')[0], # Extract subdomain + url=hostname, + message=f"DNS record added for {hostname}", + metadata={"ip": ip} + ) + self.notify(event) + + def health_check_passed(self, url: str, duration: float) -> None: + """ + Convenience method for health_check_passed event + + Args: + url: URL that passed health check + duration: Time taken for health check in seconds + """ + event = WebhookEvent( + event_type="health_check_passed", + timestamp=datetime.utcnow().isoformat() + "Z", + subdomain=url.split('.')[0].replace('https://', '').replace('http://', ''), + url=url, + message=f"Health check passed for {url}", + metadata={"duration": round(duration, 2)} + ) + self.notify(event)