import asyncio
import os
import shutil
import subprocess
import uuid
from datetime import datetime
from pathlib import Path
from typing import Dict, Optional

import boto3
from fastapi import FastAPI, File, Form, HTTPException, UploadFile, Header
from fastapi.responses import JSONResponse
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi import Depends
import uvicorn

import firebase_admin
from firebase_admin import credentials
from firebase_admin import messaging

from botocore.exceptions import ClientError

cred = credentials.Certificate("durlabhdarshan-eff42-firebase-adminsdk-jo207-dd9fa8b9d1.json")
firebase_admin.initialize_app(cred)


app = FastAPI(title="Video Processing Server")
security = HTTPBearer()

# Authentication configuration
API_PASSKEY = "asdlkbcalwb232h3u2bdib29"  # Change this to your desired passkey

# Configuration
AWS_ACCESS_KEY_ID = "AKIA4ALHXKCHCESTHJHD"
AWS_SECRET_ACCESS_KEY = "3biTOwuU1u89IiyriSlB34kvDJxtq6f5V4Oldm9p"
AWS_BUCKET_NAME = "co.techxr.system.backend.upload.dev"
AWS_REGION = "ap-south-1"

# Local directories
UPLOAD_DIR = Path("uploads")
PROCESSING_DIR = Path("processing")
HLS_OUTPUT_DIR = Path("hls_output")

# Ensure directories exist
UPLOAD_DIR.mkdir(exist_ok=True)
PROCESSING_DIR.mkdir(exist_ok=True)
HLS_OUTPUT_DIR.mkdir(exist_ok=True)

# S3 client
s3_client = boto3.client(
    's3',
    aws_access_key_id=AWS_ACCESS_KEY_ID,
    aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
    region_name=AWS_REGION
)

# Task tracking
conversion_tasks: Dict[str, Dict] = {}
server_busy = False

def verify_passkey(credentials: HTTPAuthorizationCredentials = Depends(security)):
    """Verify the passkey from Authorization header"""
    if credentials.credentials != API_PASSKEY:
        raise HTTPException(
            status_code=401,
            detail="Invalid passkey",
            headers={"WWW-Authenticate": "Bearer"},
        )
    return credentials.credentials

from botocore.exceptions import ClientError

def _s3_object_exists(bucket: str, key: str) -> bool:
    try:
        s3_client.head_object(Bucket=bucket, Key=key)
        return True
    except ClientError as e:
        code = e.response.get("Error", {}).get("Code")
        if code in ("404", "NoSuchKey", "NotFound"):
            return False
        raise

def _s3_move_object(bucket: str, src_key: str, dst_key: str) -> dict:
    """Copy then delete, and verify deletion. Returns detailed status."""
    out = {"from": src_key, "to": dst_key, "copied": False, "deleted": False, "error": None}
    try:
        # Copy (preserve ContentType if possible)
        # If you want to force the JSON ContentType, set ExtraArgs={"ContentType": "application/json"}
        s3_client.copy({"Bucket": bucket, "Key": src_key}, bucket, dst_key)
        out["copied"] = True

        # Delete source
        s3_client.delete_object(Bucket=bucket, Key=src_key)

        # Verify deletion (for non-versioned buckets this should 404; for versioned, you’ll get a DeleteMarker)
        try:
            s3_client.head_object(Bucket=bucket, Key=src_key)
            # If head still succeeds, deletion didn’t “hide” the latest version (Object Lock/Legal Hold/permission).
            out["error"] = "delete_failed_or_blocked (ObjectLock/LegalHold/permission?)"
        except ClientError as e:
            code = e.response.get("Error", {}).get("Code")
            if code in ("404", "NoSuchKey", "NotFound"):
                out["deleted"] = True
            else:
                out["error"] = f"head_after_delete_error: {code}"
    except ClientError as e:
        out["error"] = f"{e.response.get('Error',{}).get('Code')}: {e.response.get('Error',{}).get('Message')}"
    except Exception as e:
        out["error"] = str(e)
    return out

def _s3_move_prefix(bucket: str, src_prefix: str, dst_prefix: str) -> dict:
    """Move all objects under a prefix; returns per-object results."""
    moved, skipped = [], []
    paginator = s3_client.get_paginator("list_objects_v2")
    for page in paginator.paginate(Bucket=bucket, Prefix=src_prefix):
        for item in page.get("Contents", []):
            key = item["Key"]
            dst_key = dst_prefix + key[len(src_prefix):]
            res = _s3_move_object(bucket, key, dst_key)
            if res.get("deleted"):
                moved.append(res)
            else:
                skipped.append(res)
    return {"moved": moved, "skipped": skipped}


@app.post("/promote-unapproved-slot")
async def promote_unapproved_slot(
    temple: str = Form(...),
    date: str = Form(...),   # "yyyy-MM-dd"
    slot: str = Form(...),   # "morning"/"evening"/etc.
    move_jpg: bool = Form(True),
    move_json: bool = Form(True),
    move_hls: bool = Form(True),
    dry_run: bool = Form(False),
    passkey: str = Depends(verify_passkey),
):
    """
    Move objects from:
      DurlabhDarshanAssets/App/aaj-ke-darshan/{temple}/{date}/unapproved/{slot}[.jpg|.json|/*]
    to:
      DurlabhDarshanAssets/App/aaj-ke-darshan/{temple}/{date}/{slot}[.jpg|.json|/*]
    """
    base = f"DurlabhDarshanAssets/App/aaj-ke-darshan/{temple}/{date}"
    results = {
        "moved": [],
        "skipped": [],
        "hls": {"moved": [], "skipped": []},
        "dry_run": dry_run,
        "bucket": AWS_BUCKET_NAME,
    }

    # Image (.jpg)
    if move_jpg:
        src_jpg = f"{base}/unapproved/{slot}.jpg"
        dst_jpg = f"{base}/{slot}.jpg"
        if _s3_object_exists(AWS_BUCKET_NAME, src_jpg):
            if dry_run:
                results["moved"].append({"from": src_jpg, "to": dst_jpg, "note": "dry_run"})
            else:
                try:
                    _s3_move_object(AWS_BUCKET_NAME, src_jpg, dst_jpg)
                    results["moved"].append({"from": src_jpg, "to": dst_jpg})
                except Exception as e:
                    results["skipped"].append({"from": src_jpg, "error": str(e)})
        else:
            results["skipped"].append({"from": src_jpg, "error": "not_found"})

    # JSON (.json)
    if move_json:
        src_json = f"{base}/unapproved/{slot}.json"
        dst_json = f"{base}/{slot}.json"
        if _s3_object_exists(AWS_BUCKET_NAME, src_json):
            if dry_run:
                results["moved"].append({"from": src_json, "to": dst_json, "note": "dry_run"})
            else:
                try:
                    _s3_move_object(AWS_BUCKET_NAME, src_json, dst_json)
                    results["moved"].append({"from": src_json, "to": dst_json})
                except Exception as e:
                    results["skipped"].append({"from": src_json, "error": str(e)})
        else:
            results["skipped"].append({"from": src_json, "error": "not_found"})

    # HLS folder (prefix)
    if move_hls:
        src_prefix = f"{base}/unapproved/{slot}/"
        dst_prefix = f"{base}/{slot}/"
        # Check if prefix has any objects
        try:
            head = s3_client.list_objects_v2(Bucket=AWS_BUCKET_NAME, Prefix=src_prefix, MaxKeys=1)
            has_objects = head.get("KeyCount", 0) > 0
        except Exception as e:
            return JSONResponse(status_code=500, content={"success": False, "message": f"Failed to list prefix: {str(e)}"})

        if has_objects:
            if dry_run:
                # list and report what would move
                collate = _s3_move_prefix if False else None  # no-op to keep linters calm
                items = []
                paginator = s3_client.get_paginator("list_objects_v2")
                for page in paginator.paginate(Bucket=AWS_BUCKET_NAME, Prefix=src_prefix):
                    for item in page.get("Contents", []):
                        key = item["Key"]
                        dst_key = dst_prefix + key[len(src_prefix):]
                        items.append({"from": key, "to": dst_key, "note": "dry_run"})
                results["hls"]["moved"] = items
            else:
                out = _s3_move_prefix(AWS_BUCKET_NAME, src_prefix, dst_prefix)
                results["hls"]["moved"] = out["moved"]
                results["hls"]["skipped"] = out["skipped"]
        else:
            results["hls"]["skipped"].append({"from_prefix": src_prefix, "error": "not_found"})

    return JSONResponse(content={"success": True, "message": "Promotion attempted", "result": results})

class ConversionStatus:
    PROCESSING = "processing"
    UPLOADING = "uploading"
    COMPLETED = "completed"
    FAILED = "failed"
    SERVER_BUSY = "server_busy"

@app.post("/notify-image-upload")
async def notify_image_upload(
    temple: str = Form(...),
    slot: str = Form(...),
    url: str = Form(...),
    authorization: str = Header(None)
):
    if authorization != f"Bearer {API_PASSKEY}":
        return {"success": False, "message": "Unauthorized"}

    send_push_notification(temple, slot, url)
    return {"success": True, "message": "Notification sent"}

@app.post("/upload-video")
async def upload_video(
    video: UploadFile = File(...),
    temple: str = Form(...),
    date: str = Form(...),
    slot: str = Form(...),
    passkey: str = Depends(verify_passkey)
):
    global server_busy
    
    if server_busy:
        return JSONResponse(
            content={
                "success": False,
                "message": "Server is busy processing another video. Please try again later.",
                "task_id": None
            },
            status_code=202
        )
    
    try:
        # Generate unique task ID
        task_id = str(uuid.uuid4())
        
        # Save uploaded file
        file_path = UPLOAD_DIR / f"{task_id}_{video.filename}"
        with open(file_path, "wb") as buffer:
            content = await video.read()
            buffer.write(content)
        
        # Initialize task tracking
        conversion_tasks[task_id] = {
            "status": ConversionStatus.PROCESSING,
            "temple": temple,
            "date": date,
            "slot": slot,
            "file_path": str(file_path),
            "error": None,
            "s3_path": None
        }
        
        # Start background processing
        asyncio.create_task(process_video(task_id))
        
        return JSONResponse(content={
            "success": True,
            "message": "Video uploaded successfully. Processing started.",
            "task_id": task_id
        })
        
    except Exception as e:
        return JSONResponse(
            content={
                "success": False,
                "message": f"Upload failed: {str(e)}",
                "task_id": None
            },
            status_code=500
        )

@app.post("/upload-unapproved-video")
async def upload_unapproved_video(
    video: UploadFile = File(...),
    temple: str = Form(...),
    date: str = Form(...),
    slot: str = Form(...),
    passkey: str = Depends(verify_passkey)
):
    global server_busy
    
    if server_busy:
        return JSONResponse(
            content={
                "success": False,
                "message": "Server is busy processing another video. Please try again later.",
                "task_id": None
            },
            status_code=202
        )
    
    try:
        # Generate unique task ID
        task_id = str(uuid.uuid4())
        
        # Save uploaded file
        file_path = UPLOAD_DIR / f"{task_id}_{video.filename}"
        with open(file_path, "wb") as buffer:
            content = await video.read()
            buffer.write(content)
        
        # Initialize task tracking
        conversion_tasks[task_id] = {
            "status": ConversionStatus.PROCESSING,
            "temple": temple,
            "date": date,
            "slot": slot,
            "file_path": str(file_path),
            "error": None,
            "s3_path": None
        }
        
        # Start background processing
        asyncio.create_task(process_unapproved_video(task_id))
        
        return JSONResponse(content={
            "success": True,
            "message": "Video uploaded successfully. Processing started.",
            "task_id": task_id
        })
        
    except Exception as e:
        return JSONResponse(
            content={
                "success": False,
                "message": f"Upload failed: {str(e)}",
                "task_id": None
            },
            status_code=500
        )

@app.get("/conversion-status/{task_id}")
async def get_conversion_status(task_id: str, passkey: str = Depends(verify_passkey)):
    if task_id not in conversion_tasks:
        raise HTTPException(status_code=404, detail="Task not found")
    
    task = conversion_tasks[task_id]
    
    response = {
        "status": task["status"],
        "error": task.get("error"),
        "s3_path": task.get("s3_path")
    }
    
    # Clean up completed or failed tasks after returning status
    if task["status"] in [ConversionStatus.COMPLETED, ConversionStatus.FAILED]:
        # Keep task for a bit longer in case client polls again
        pass
    
    return JSONResponse(content=response)
def send_push_notification(temple, slot, s3_url):
    slot_mapping = {
        "morning": "प्रातः",
        "evening": "सायंकाल"
    }
    slot_hindi = slot_mapping.get(slot.lower(), slot)
    print(slot_hindi)
    temple_mapping = {
        "mahakaleshwar": "महाकालेश्वर",
        "kashivishwanath": "काशी विश्वनाथ",
        "omkareshwar" : "ओंकारेश्वर",
        "somnath": "सोमनाथ",
        "vaishnodevi" : "वैष्णोदेवी",
        "dwarkadhishtemple" : "द्वारकाधीश",
        "siddhivinayak" : "सिद्धि विनायक",
        "grishneshwar" : "घृष्णेश्वर",
        "trimbkeshwar" : "त्र्यंबकेश्वर"
    }
    temple_hindi = temple_mapping.get(temple.lower(), temple)
    print(temple_hindi)

    title = f"आज के दर्शन – श्री {temple_hindi}"
    body = f"आज {slot_hindi} के दर्शन उपलब्ध हैं।"
    print(s3_url)
    message = messaging.Message(
        notification=messaging.Notification(
            title=title,
            body=body,
            image=s3_url
        ),
        topic="all_users"
    )

    try:
        response = messaging.send(message)
        print("✅ Notification sent:", response)
    except Exception as e:
        print("❌ Error sending notification:", e)

async def process_video(task_id: str):
    global server_busy
    server_busy = True
    
    try:
        task = conversion_tasks[task_id]
        file_path = Path(task["file_path"])
        temple = task["temple"]
        date = task["date"]
        slot = task["slot"]
        
        # Create processing directory for this task
        task_processing_dir = PROCESSING_DIR / task_id
        task_processing_dir.mkdir(exist_ok=True)
        
        # Move file to processing directory
        processing_file_path = task_processing_dir / file_path.name
        shutil.move(str(file_path), str(processing_file_path))
        
        # Create HLS output directory
        hls_output_path = HLS_OUTPUT_DIR / task_id
        hls_output_path.mkdir(exist_ok=True)
        
        # Convert to HLS using FFmpeg
        m3u8_file = hls_output_path / "playlist.m3u8"
        
        ffmpeg_command = [
            "ffmpeg",
            "-i", str(processing_file_path),
            "-codec:", "copy",
            "-start_number", "0",
            "-hls_time", "10",
            "-hls_list_size", "0",
            "-f", "hls",
            str(m3u8_file)
        ]
        
        # Run FFmpeg conversion
        process = await asyncio.create_subprocess_exec(
            *ffmpeg_command,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE
        )
        
        stdout, stderr = await process.communicate()
        
        if process.returncode != 0:
            raise Exception(f"FFmpeg conversion failed: {stderr.decode()}")
        
        # Update status to uploading
        conversion_tasks[task_id]["status"] = ConversionStatus.UPLOADING
        
        # Upload HLS files to S3
        s3_base_path = f"DurlabhDarshanAssets/App/aaj-ke-darshan/{temple}/{date}/{slot}"
        
        # Upload all files in the HLS output directory
        for file_item in hls_output_path.iterdir():
            if file_item.is_file():
                s3_key = f"{s3_base_path}/{file_item.name}"
                
                # Determine content type
                content_type = "application/vnd.apple.mpegurl" if file_item.suffix == ".m3u8" else "video/MP2T"
                
                with open(file_item, 'rb') as f:
                    s3_client.upload_fileobj(
                        f,
                        AWS_BUCKET_NAME,
                        s3_key,
                        ExtraArgs={'ContentType': content_type}
                    )
        
        # Update task status to completed
        conversion_tasks[task_id]["status"] = ConversionStatus.COMPLETED
        conversion_tasks[task_id]["s3_path"] = s3_base_path
        
        # Cleanup local files
        cleanup_task_files(task_id, task_processing_dir, hls_output_path)
    except Exception as e:
        conversion_tasks[task_id]["status"] = ConversionStatus.FAILED
        conversion_tasks[task_id]["error"] = str(e)
        cleanup_task_files(task_id, task_processing_dir, hls_output_path)
    finally:
        server_busy = False

async def process_unapproved_video(task_id: str):
    global server_busy
    server_busy = True
    
    try:
        task = conversion_tasks[task_id]
        file_path = Path(task["file_path"])
        temple = task["temple"]
        date = task["date"]
        slot = task["slot"]
        
        # Create processing directory for this task
        task_processing_dir = PROCESSING_DIR / task_id
        task_processing_dir.mkdir(exist_ok=True)
        
        # Move file to processing directory
        processing_file_path = task_processing_dir / file_path.name
        shutil.move(str(file_path), str(processing_file_path))
        
        # Create HLS output directory
        hls_output_path = HLS_OUTPUT_DIR / task_id
        hls_output_path.mkdir(exist_ok=True)
        
        # Convert to HLS using FFmpeg
        m3u8_file = hls_output_path / "playlist.m3u8"
        
        ffmpeg_command = [
            "ffmpeg",
            "-i", str(processing_file_path),
            "-codec:", "copy",
            "-start_number", "0",
            "-hls_time", "10",
            "-hls_list_size", "0",
            "-f", "hls",
            str(m3u8_file)
        ]
        
        # Run FFmpeg conversion
        process = await asyncio.create_subprocess_exec(
            *ffmpeg_command,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE
        )
        
        stdout, stderr = await process.communicate()
        
        if process.returncode != 0:
            raise Exception(f"FFmpeg conversion failed: {stderr.decode()}")
        
        # Update status to uploading
        conversion_tasks[task_id]["status"] = ConversionStatus.UPLOADING
        
        # Upload HLS files to S3
        s3_base_path = f"DurlabhDarshanAssets/App/aaj-ke-darshan/{temple}/{date}/unapproved/{slot}"
        
        # Upload all files in the HLS output directory
        for file_item in hls_output_path.iterdir():
            if file_item.is_file():
                s3_key = f"{s3_base_path}/{file_item.name}"
                
                # Determine content type
                content_type = "application/vnd.apple.mpegurl" if file_item.suffix == ".m3u8" else "video/MP2T"
                
                with open(file_item, 'rb') as f:
                    s3_client.upload_fileobj(
                        f,
                        AWS_BUCKET_NAME,
                        s3_key,
                        ExtraArgs={'ContentType': content_type}
                    )
        
        # Update task status to completed
        conversion_tasks[task_id]["status"] = ConversionStatus.COMPLETED
        conversion_tasks[task_id]["s3_path"] = s3_base_path
        
        # Cleanup local files
        cleanup_task_files(task_id, task_processing_dir, hls_output_path)

        
    except Exception as e:
        conversion_tasks[task_id]["status"] = ConversionStatus.FAILED
        conversion_tasks[task_id]["error"] = str(e)
        
        # Cleanup on error
        cleanup_task_files(task_id, task_processing_dir, hls_output_path)
        
    finally:
        server_busy = False

def cleanup_task_files(task_id: str, processing_dir: Path, hls_dir: Path):
    """Clean up all files related to a task"""
    try:
        # Remove processing directory
        if processing_dir.exists():
            shutil.rmtree(processing_dir)
        
        # Remove HLS output directory
        if hls_dir.exists():
            shutil.rmtree(hls_dir)
            
        # Remove original upload file if it exists
        original_file = UPLOAD_DIR / f"{task_id}_*.mp4"
        for file_path in UPLOAD_DIR.glob(f"{task_id}_*"):
            if file_path.is_file():
                file_path.unlink()
                
    except Exception as e:
        print(f"Error cleaning up files for task {task_id}: {e}")

@app.get("/")
async def root():
    return {"message": "Video Processing Server is running"}

@app.get("/health")
async def health_check():
    return {
        "status": "healthy",
        "server_busy": server_busy,
        "active_tasks": len([t for t in conversion_tasks.values() if t["status"] not in [ConversionStatus.COMPLETED, ConversionStatus.FAILED]])
    }

if __name__ == "__main__":
    # Check if FFmpeg is available
    try:
        subprocess.run(["ffmpeg", "-version"], capture_output=True, check=True)
        print("✓ FFmpeg is available")
    except (subprocess.CalledProcessError, FileNotFoundError):
        print("✗ FFmpeg is not available. Please install FFmpeg first.")
        exit(1)
    
    print("Starting Video Processing Server...")
    print("Make sure to update the serverBaseUrl in Unity script to match this server's address")
    
    uvicorn.run(app, host="0.0.0.0", port=3019)
