#!/usr/bin/env python3
"""
Script to fetch APR data from collegefootballnews.com and map it to team names
using the cfb_mastersheet.csv mapping.
"""
import csv
import re
import subprocess
import html.parser
from html.parser import HTMLParser

def load_team_mappings():
    """Load team name to filename mappings from CSV"""
    mappings = {}
    with open('cfb_mastersheet.csv', 'r', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            flair_name = row.get('flair_name', '').strip()
            image_source = row.get('image_source', '').strip()
            
            if flair_name and image_source and image_source.startswith('cfb/'):
                # Extract filename (remove 'cfb/' prefix)
                filename = image_source.replace('cfb/', '')
                
                # Store mapping: full name -> filename
                mappings[flair_name] = filename
                
                # Also create mappings for common name variations
                # Extract school name (before mascot)
                parts = flair_name.split()
                if len(parts) >= 2:
                    # Try "School Name" format
                    school_name = ' '.join(parts[:-1])
                    if school_name not in mappings:
                        mappings[school_name] = filename
                    
                    # Try abbreviated version
                    if len(parts) >= 3:
                        # "North Carolina" -> "North Carolina"
                        # "NC State" -> "NC State"
                        pass
    
    return mappings

def fetch_apr_data():
    """Fetch APR rankings from collegefootballnews.com using curl"""
    url = "https://collegefootballnews.com/rankings/ncaa-apr-rankings-2025-academic-progress-rate-136-football-schools"
    
    try:
        # Use curl to fetch the page
        result = subprocess.run(
            ['curl', '-s', '-A', 'Mozilla/5.0', url],
            capture_output=True,
            text=True,
            timeout=30
        )
        
        if result.returncode != 0:
            print(f"Curl error: {result.stderr}")
            return {}
        
        html_content = result.stdout
        
        # Extract text content (simple approach - remove HTML tags)
        # Remove script and style tags
        html_content = re.sub(r'<script[^>]*>.*?</script>', '', html_content, flags=re.DOTALL | re.IGNORECASE)
        html_content = re.sub(r'<style[^>]*>.*?</style>', '', html_content, flags=re.DOTALL | re.IGNORECASE)
        
        # Extract text from HTML
        text_content = re.sub(r'<[^>]+>', ' ', html_content)
        text_content = ' '.join(text_content.split())
        
        apr_data = {}
        
        # Look for patterns in the text
        # Pattern 1: "Rank. Team Name APR Score" or "Team Name - APR Score"
        # Pattern 2: Look for 3-digit numbers (900-1000) near team names
        
        # Try to find numbered list patterns: "1. Team Name 990" or "1) Team Name 990"
        patterns = [
            r'(\d+)\.\s+([A-Z][^0-9]+?)\s+(\d{3,4})',  # "1. Notre Dame 990"
            r'(\d+)\)\s+([A-Z][^0-9]+?)\s+(\d{3,4})',  # "1) Notre Dame 990"
            r'([A-Z][A-Za-z\s&]+?)\s+[-–]\s+(\d{3,4})',  # "Notre Dame - 990"
            r'([A-Z][A-Za-z\s&]+?)\s+\((\d{3,4})\)',  # "Notre Dame (990)"
        ]
        
        for pattern in patterns:
            matches = re.finditer(pattern, text_content)
            for match in matches:
                groups = match.groups()
                if len(groups) >= 2:
                    # Last group is usually the APR score
                    apr_score = None
                    team_name = None
                    
                    for group in groups:
                        if group.isdigit() and 900 <= int(group) <= 1000:
                            apr_score = int(group)
                        elif not group.isdigit() and len(group) > 3:
                            team_name = group.strip()
                    
                    if team_name and apr_score:
                        # Clean up team name
                        team_name = re.sub(r'\s+', ' ', team_name).strip()
                        # Remove ranking number if present
                        team_name = re.sub(r'^\d+[\.\)]\s*', '', team_name)
                        apr_data[team_name] = apr_score
        
        # Also try to find table-like structures
        # Look for <tr> tags with team data
        tr_matches = re.finditer(r'<tr[^>]*>(.*?)</tr>', html_content, re.DOTALL | re.IGNORECASE)
        for tr_match in tr_matches:
            tr_content = tr_match.group(1)
            # Extract text from cells
            cells = re.findall(r'<t[dh][^>]*>(.*?)</t[dh]>', tr_content, re.DOTALL | re.IGNORECASE)
            if len(cells) >= 2:
                # Remove HTML tags from cells
                cell_texts = []
                for cell in cells:
                    text = re.sub(r'<[^>]+>', '', cell).strip()
                    cell_texts.append(text)
                
                # Look for APR score (3-4 digit number 900-1000)
                apr_score = None
                team_name = None
                
                for text in cell_texts:
                    # Check if it's an APR score
                    apr_match = re.search(r'\b(9\d{2}|1000)\b', text)
                    if apr_match:
                        apr_score = int(apr_match.group(1))
                    # Check if it looks like a team name (starts with capital, has letters)
                    elif re.match(r'^[A-Z][A-Za-z\s&]+$', text) and len(text) > 3:
                        team_name = text.strip()
                
                if team_name and apr_score:
                    apr_data[team_name] = apr_score
        
        return apr_data
        
    except Exception as e:
        print(f"Error fetching APR data: {e}")
        import traceback
        traceback.print_exc()
        return {}

def normalize_team_name(name):
    """Normalize team names for matching"""
    # Remove common suffixes
    name = re.sub(r'\s+(Eagles|Bears|Tigers|Wildcats|Bulldogs|Seminoles|Cardinals|Panthers|Devils|Jackets|Hurricanes|Wolfpack|Tar Heels|Mustangs|Orange|Cavaliers|Hokies|Demon Deacons|Black Knights|Midshipmen|Mean Green|Owls|Green Wave|Golden Hurricane|Blazers|Bulls|Roadrunners|Cyclones|Jayhawks|Buffaloes|Cougars|Bearcats|Longhorns|Sooners|Cowboys|Red Raiders|Mountaineers|Horned Frogs|Aggies|Rebels|Razorbacks|Gamecocks|Gators|Volunteers|Commodores|Crimson Tide|War Eagles|Bulldogs|Tigers|Rebels|Aggies|Longhorns|Red Raiders|Horned Frogs|Bears|Sooners|Cowboys|Mountaineers|Cyclones|Jayhawks|Wildcats|Buffaloes|Cougars|Bearcats|Cardinals|Sun Devils|Ducks|Beavers|Bruins|Trojans|Utes|Cougars|Cardinal|Golden Bears|Huskies|Cougars|Cougars|Aggies|Rebels|Wolf Pack|Spartans|Wolverines|Buckeyes|Nittany Lions|Badgers|Hawkeyes|Gophers|Wildcats|Boilermakers|Hoosiers|Fighting Illini|Cornhuskers|Terrapins|Scarlet Knights|Scarlet Knights|Hoosiers|Fighting Illini|Cornhuskers|Terrapins|Scarlet Knights|Scarlet Knights|Hoosiers|Fighting Illini|Cornhuskers|Terrapins|Scarlet Knights|Scarlet Knights)\s*$', '', name, flags=re.IGNORECASE)
    
    # Common name mappings
    mappings = {
        'Army West Point': 'Army',
        'Army': 'Army',
        'Navy': 'Navy',
        'Notre Dame': 'Notre Dame',
        'BYU': 'BYU',
        'UConn': 'Connecticut',
        'UMass': 'Massachusetts',
        'NC State': 'NC State',
        'North Carolina State': 'NC State',
        'Virginia Tech': 'Virginia Tech',
        'Georgia Tech': 'Georgia Tech',
        'Miami': 'Miami',
        'Miami (FL)': 'Miami',
        'USF': 'USF',
        'South Florida': 'USF',
        'UCF': 'UCF',
        'Central Florida': 'UCF',
        'FAU': 'FAU',
        'Florida Atlantic': 'FAU',
        'FIU': 'FIU',
        'Florida International': 'FIU',
        'ECU': 'ECU',
        'East Carolina': 'ECU',
    }
    
    return mappings.get(name, name).strip()

def match_apr_to_teams(apr_data, team_mappings):
    """Match APR data to team names using mappings"""
    matched_apr = {}
    
    # Create reverse lookup: filename -> team names
    filename_to_teams = {}
    for team_name, filename in team_mappings.items():
        if filename not in filename_to_teams:
            filename_to_teams[filename] = []
        filename_to_teams[filename].append(team_name)
    
    # Try to match APR data to team names
    for apr_team_name, apr_score in apr_data.items():
        normalized_apr_name = normalize_team_name(apr_team_name)
        
        # Try exact match first
        if normalized_apr_name in team_mappings:
            filename = team_mappings[normalized_apr_name]
            # Use the first team name associated with this filename
            if filename in filename_to_teams:
                matched_apr[filename_to_teams[filename][0]] = apr_score
                continue
        
        # Try partial matching
        for team_name, filename in team_mappings.items():
            normalized_team = normalize_team_name(team_name)
            if normalized_apr_name.lower() in normalized_team.lower() or normalized_team.lower() in normalized_apr_name.lower():
                matched_apr[team_name] = apr_score
                break
    
    return matched_apr

if __name__ == '__main__':
    print("Loading team mappings from CSV...")
    team_mappings = load_team_mappings()
    print(f"Loaded {len(team_mappings)} team mappings")
    
    print("\nFetching APR data from collegefootballnews.com...")
    apr_data = fetch_apr_data()
    print(f"Fetched {len(apr_data)} APR entries")
    
    if apr_data:
        print("\nSample APR data:")
        for i, (team, score) in enumerate(list(apr_data.items())[:5]):
            print(f"  {team}: {score}")
    
    print("\nMatching APR data to team names...")
    matched_apr = match_apr_to_teams(apr_data, team_mappings)
    print(f"Matched {len(matched_apr)} teams")
    
    # Generate apr_data.py content
    print("\nGenerating apr_data.py...")
    with open('apr_data.py', 'w') as f:
        f.write('# APR Rankings for 5-7 teams\n')
        f.write('# Format: {team_name: apr_score}\n')
        f.write('# Higher APR scores are better\n')
        f.write('# Data sourced from collegefootballnews.com\n\n')
        f.write('APR_RANKINGS = {\n')
        
        # Sort by APR score (descending) for readability
        sorted_apr = sorted(matched_apr.items(), key=lambda x: x[1], reverse=True)
        for team_name, apr_score in sorted_apr:
            f.write(f"    '{team_name}': {apr_score},\n")
        
        f.write('}\n')
    
    print(f"\n✓ Successfully created apr_data.py with {len(matched_apr)} teams")
    print("\nNote: You may need to manually verify and adjust team name mappings")
    print("if some teams weren't matched correctly.")

