from fastapi import APIRouter, HTTPException, Depends
from typing import List, Dict, Any
import csv
import os
from pathlib import Path
from datetime import datetime

from app.models import User
from app.routers.auth import get_current_user

router = APIRouter()

@router.get("/api/bi/quote-performance")
async def get_quote_performance(
    branch: str = "bunbury",
    department: str = "residential",
    data_type: str = "monthly_rolling_quote_data",
    user: User = Depends(get_current_user),
) -> Dict[str, Any]:
    """
    Get monthly quote performance data from CSV export based on branch, department, and data type
    """
    try:
        # Map branch names to file prefixes
        branch_mapping = {
            "bunbury": "bunbury",
            "busselton": "busselton", 
            "mandurah": "mandurah"
        }
        
        # Map department names to file prefixes
        department_mapping = {
            "residential": "residential",
            "air": "air",
            "commercial": "commerical",  # Note: matches the actual filename
            "solar": "solar",
            "electrical": "electrical"
        }
        
        # Validate inputs
        if branch not in branch_mapping:
            raise HTTPException(status_code=400, detail=f"Invalid branch: {branch}")
        if department not in department_mapping:
            raise HTTPException(status_code=400, detail=f"Invalid department: {department}")
        if data_type not in ["monthly_rolling_quote_data", "rolling_job_data", "month_to_month_sale_conversions"]:
            raise HTTPException(status_code=400, detail=f"Invalid data_type: {data_type}")
        
        data = []
        
        if data_type == "monthly_rolling_quote_data":
            # For monthly rolling quote data, load both count and value data for the selected department
            # Try new naming pattern first (with branch), then fall back to old pattern
            count_filename_new = f"export_{department_mapping[department]}_count_{branch}.csv"
            value_filename_new = f"export_{department_mapping[department]}_value_{branch}.csv"
            count_filename_old = f"export_{department_mapping[department]}_count.csv"
            value_filename_old = f"export_{department_mapping[department]}_value.csv"
            
            count_path = Path(__file__).parent.parent.parent / "data" / "bi" / count_filename_new
            value_path = Path(__file__).parent.parent.parent / "data" / "bi" / value_filename_new
            
            # Fall back to old naming pattern if new files don't exist
            if not count_path.exists():
                count_path = Path(__file__).parent.parent.parent / "data" / "bi" / count_filename_old
            if not value_path.exists():
                value_path = Path(__file__).parent.parent.parent / "data" / "bi" / value_filename_old
            
            if not count_path.exists() and not value_path.exists():
                return {
                    "success": False,
                    "error": f"No data available for {branch.title()} {department.title()} monthly rolling quote data",
                    "data": [],
                    "chart_title": f"Monthly Rolling Quote Data - {department.title()}",
                    "description": f"No data files found for {department} department",
                    "branch": branch,
                    "department": department,
                    "data_type": data_type
                }
            
            # Load count data
            count_data = {}
            if count_path.exists():
                with open(count_path, 'r', encoding='utf-8') as file:
                    reader = csv.DictReader(file)
                    for row in reader:
                        if row.get('Date Created'):
                            count_data[row["Date Created"]] = {
                                "total_quoted_count": int(row["Total Quoted"]),
                                "converted_quotes_count": int(row["Converted Quotes"]),
                                "lost_quotes_count": int(row["Lost Quotes"]),
                                "open_quotes_count": int(row["Open Quotes"]),
                                "conversion_percentage": round(float(row["Conversion %"]) * 100, 1)
                            }
            
            # Load value data
            value_data = {}
            if value_path.exists():
                with open(value_path, 'r', encoding='utf-8') as file:
                    reader = csv.DictReader(file)
                    for row in reader:
                        if row.get('Date Created'):
                            value_data[row["Date Created"]] = {
                                "total_quoted_value": round(float(row["Total Quoted"]), 0),
                                "converted_quotes_value": round(float(row["Converted Quotes"]), 0),
                                "lost_quotes_value": round(float(row["Lost Quotes"]), 0),
                                "open_quotes_value": round(float(row["Open Quotes"]) if row["Open Quotes"] else 0, 0),
                                "closing_rate": round(float(row["Closing Rate"]) * 100, 2)
                            }
            
            # Combine the data
            all_dates = set(count_data.keys()) | set(value_data.keys())
            for date in sorted(all_dates):
                combined_row = {"date_created": date}
                combined_row.update(count_data.get(date, {}))
                combined_row.update(value_data.get(date, {}))
                data.append(combined_row)
            
            chart_title = f"Monthly Rolling Quote Data - {department.title()}"
            
        elif data_type == "rolling_job_data":
            # For rolling job data, we need to load multiple CSV files for commercial department
            if department != "commercial":
                return {
                    "success": False,
                    "error": f"Rolling job data is only available for Commercial department",
                    "data": [],
                    "chart_title": f"Rolling Job Data - {department.title()}",
                    "description": f"Rolling job data not available for {department} department",
                    "branch": branch,
                    "department": department,
                    "data_type": data_type
                }
            
            # Load jobs completed data
            jobs_completed_path = Path(__file__).parent.parent.parent / "data" / "bi" / "jobs_completed_commerical.csv"
            leadtimes_path = Path(__file__).parent.parent.parent / "data" / "bi" / "leadtimes_commercial.csv"
            first_attendance_path = Path(__file__).parent.parent.parent / "data" / "bi" / "first_attendance_commercial.csv"
            
            if not jobs_completed_path.exists() or not leadtimes_path.exists() or not first_attendance_path.exists():
                return {
                    "success": False,
                    "error": f"No rolling job data available for {branch.title()} Commercial",
                    "data": [],
                    "chart_title": f"Rolling Job Data - Commercial",
                    "description": f"Missing rolling job data files",
                    "branch": branch,
                    "department": department,
                    "data_type": data_type
                }
            
            # Load jobs completed data
            jobs_completed_data = []
            with open(jobs_completed_path, 'r', encoding='utf-8') as file:
                reader = csv.DictReader(file)
                for row in reader:
                    if row.get('Completed Date'):
                        # Parse the date and format it
                        date_str = row["Completed Date"]
                        # Convert from ISO format to readable format
                        if 'T' in date_str:
                            date_obj = datetime.fromisoformat(date_str.replace('+10:00', ''))
                            formatted_date = date_obj.strftime('%B, %Y')
                        else:
                            formatted_date = date_str
                        
                        jobs_completed_data.append({
                            "date": formatted_date,
                            "count": int(row["Count"])
                        })
            
            # Load lead times data
            leadtimes_data = []
            with open(leadtimes_path, 'r', encoding='utf-8') as file:
                reader = csv.DictReader(file)
                for row in reader:
                    if row.get('Cost Centre'):
                        leadtimes_data.append({
                            "cost_centre": row["Cost Centre"],
                            "time_to_site": round(float(row["Time to Site"]), 2)
                        })
            
            # Load first attendance data
            first_attendance_value = None
            with open(first_attendance_path, 'r', encoding='utf-8') as file:
                reader = csv.DictReader(file)
                for row in reader:
                    if row.get('Time To Site'):
                        first_attendance_value = round(float(row["Time To Site"]), 2)
                        break
            
            # Combine all data
            data = {
                "jobs_completed": jobs_completed_data,
                "leadtimes": leadtimes_data,
                "first_attendance": first_attendance_value
            }
            
            chart_title = f"Rolling Job Data - Commercial"
            
        else:
            # For other data types, use the original logic
            filename = f"export_{department_mapping[department]}_{data_type}.csv"
            csv_path = Path(__file__).parent.parent.parent / "data" / "bi" / filename
            
            if not csv_path.exists():
                return {
                    "success": False,
                    "error": f"No data available for {branch.title()} {department.title()} {data_type}",
                    "data": [],
                    "chart_title": f"{data_type.replace('_', ' ').title()} - {department.title()}",
                    "description": f"No data file found for {department} department",
                    "branch": branch,
                    "department": department,
                    "data_type": data_type
                }
            
            with open(csv_path, 'r', encoding='utf-8') as file:
                reader = csv.DictReader(file)
                for row in reader:
                    if not row.get('Date Created'):
                        continue
                    # Add basic data parsing here for other data types
                    data.append({
                        "date_created": row["Date Created"],
                        "value": float(row.get("Value", 0))
                    })
            
            chart_title = f"{data_type.replace('_', ' ').title()} - {department.title()}"
        
        return {
            "success": True,
            "data": data,
            "chart_title": chart_title,
            "description": f"Data visualization for {department} department",
            "branch": branch,
            "department": department,
            "data_type": data_type
        }
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error reading BI data: {str(e)}")

@router.get("/api/bi/health")
async def bi_health_check(user: User = Depends(get_current_user)):
    """
    Health check for BI endpoints (authenticated).
    """
    csv_path1 = Path(__file__).parent.parent.parent / "data" / "bi" / "export_air_count.csv"
    csv_path2 = Path(__file__).parent.parent.parent / "data" / "bi" / "export_commerical_count.csv"
    return {
        "status": "healthy" if csv_path1.exists() and csv_path2.exists() else "data_file_missing",
        "air_count_file_exists": csv_path1.exists(),
        "commercial_count_file_exists": csv_path2.exists(),
        "available_files": [f.name for f in (Path(__file__).parent.parent.parent / "data" / "bi").glob("*.csv")]
    }
