#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
NCKU 爬蟲程式 - 取得 HTML 頁面和 PDF 文件的 URL
功能：
  - 從 seed_sites 資料表取得起始 URL
  - 遵循指定深度進行爬取（depth ≤ 3）
  - 過濾與正規化 URL
  - 收集 HTML 和 PDF URL
  - 存儲到 crawl_items 資料表
"""

import os
import re
import time
import hashlib
import logging
from urllib.parse import urljoin, urlparse, urldefrag, parse_qs
from urllib.parse import urlencode
from collections import defaultdict
from datetime import datetime

import requests
from bs4 import BeautifulSoup
from dotenv import load_dotenv
import mysql.connector
from mysql.connector import Error as MySQLError
import urllib3

# 禁用 SSL 警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 降低 urllib3 日誌級別（禁用連接池調試日誌）
logging.getLogger('urllib3').setLevel(logging.WARNING)

# 配置日誌
logging.basicConfig(    
    # 只看关键信息
    level=logging.INFO,
    #level=logging.DEBUG,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# 載入 .env
load_dotenv()

DB_CONFIG = {
    'host': os.getenv('DB_HOST'),
    'user': os.getenv('DB_USER'),
    'password': os.getenv('DB_PWD'),
    'database': os.getenv('DATABASE'),
    'charset': 'utf8mb4'
}

# 爬蟲配置
MAX_DEPTH = 5
MAX_PAGES_PER_SEED = 500
TIMEOUT = 10
RATE_LIMIT = 0.5  # 秒
MAX_REDIRECTS = 3
RECRAWL_DAYS = 7  # 超過 N 天才重新爬取
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'

# 排除語言列表
EXCLUDE_LANGUAGES = {'en', 'ja', 'zh_cn', 'zh-cn', 'EN', 'eng'}


class URLFilter:
    """URL 過濾和正規化"""
    
    def __init__(self, skip_url_file='skip_url.txt', 
                 skip_webpage_file='skip_webpage.txt',
                 skip_redirect_file='skip_redirect.txt'):
        self.skip_url_prefixes = self._load_file(skip_url_file)
        self.skip_webpages = self._load_file(skip_webpage_file)
        self.skip_redirects = self._load_file(skip_redirect_file)
        logger.info(f"已載入 {len(self.skip_url_prefixes)} 個排除 URL prefix")
        logger.info(f"已載入 {len(self.skip_webpages)} 個排除網頁")
        logger.info(f"已載入 {len(self.skip_redirects)} 個排除轉址")
    
    @staticmethod
    def _load_file(filename):
        """從文件載入排除列表"""
        if not os.path.exists(filename):
            logger.warning(f"找不到文件: {filename}")
            return set()
        
        with open(filename, 'r', encoding='utf-8') as f:
            return set(line.strip() for line in f if line.strip())
    
    @staticmethod
    def normalize_url(url):
        """
        正規化 URL：
        - 移除 fragment (#)
        - 移除 query string (?)
        - 統一協議為 https
        """
        # 移除 fragment
        url = urldefrag(url)[0]
        
        # 解析 URL
        parsed = urlparse(url)
        
        # 統一為 HTTPS
        scheme = 'https'
        
        # 只保留 path（移除 query string）
        netloc = parsed.netloc
        path = parsed.path if parsed.path else '/'
        
        # 移除結尾的 /
        path = path.rstrip('/')
        
        normalized = f"{scheme}://{netloc}{path}"
        return normalized
    
    def should_skip_url(self, url):
        """檢查是否應該跳過此 URL"""
        
        # 檢查URL格式（排除 @ 符號，除非用於基本認證）
        parsed = urlparse(url)
        if '@' in parsed.netloc and not (':' in parsed.netloc.split('@')[0]):
            # @ 後面不是密碼格式，表示是不合法的 URL
            return True, "invalid URL format with @"
        
        # 檢查排除的 URL 前綴
        for skip_prefix in self.skip_url_prefixes:
            if url.startswith(skip_prefix):
                return True, f"matches skip_url prefix: {skip_prefix}"
        
        # 檢查排除的網頁
        if url in self.skip_webpages:
            return True, "in skip_webpage list"
        
        # 檢查是否是 ncku.edu.tw 域名
        if 'ncku.edu.tw' not in url:
            return True, "not ncku.edu.tw domain"
        
        # 檢查語言排除
        for lang in EXCLUDE_LANGUAGES:
            if f"/{lang}/" in url.lower() or f"_{lang}" in url.lower():
                return True, f"contains excluded language: {lang}"
        
        # 檢查是否以 sso.php 結尾
        if url.endswith('sso.php'):
            return True, "ends with sso.php"
        
        return False, None
    
    def is_pdf_url(self, url):
        """檢查是否為 PDF URL"""
        path = urlparse(url).path.lower()
        return path.endswith('.pdf')
    
    def is_html_url(self, url):
        """檢查是否為 HTML URL"""
        path = urlparse(url).path.lower()
        # HTML, PHP, ASP 等
        return url.endswith('.html') or url.endswith('.htm') or url.endswith('.php') or \
               (not path.endswith('.pdf') and '.' not in path.split('/')[-1])
    
    def should_follow_url(self, url):
        """檢查是否應該跟隨此 URL"""
        skip, reason = self.should_skip_url(url)
        return not skip, reason


class CrawlDatabase:
    """資料庫操作"""
    
    def __init__(self):
        self.conn = None
        self.connect()
    
    def connect(self):
        """連接資料庫"""
        try:
            self.conn = mysql.connector.connect(**DB_CONFIG)
            logger.info("資料庫連接成功")
        except MySQLError as e:
            logger.error(f"資料庫連接失敗: {e}")
            raise
    
    def get_seed_sites(self):
        """從 seed_sites 取得活躍的 URL"""
        cursor = self.conn.cursor(dictionary=True)
        try:
            cursor.execute("""
                SELECT id, site_url FROM seed_sites 
                WHERE is_active = 1
            """)
            sites = cursor.fetchall()
            logger.info(f"取得 {len(sites)} 個活躍的 Seed URL")
            return sites
        finally:
            cursor.close()
    
    def load_existing_urls(self):
        """從資料庫加載已爬過的 URL hash、最後爬取時間和 is_active 狀態"""
        cursor = self.conn.cursor()
        try:
            cursor.execute("""
                SELECT url_hash, last_crawled, is_active FROM crawl_items
            """)
            results = cursor.fetchall()
            # 格式: {url_hash: (last_crawled, is_active)}
            url_data = {row[0]: (row[1], row[2]) for row in results}
            logger.info(f"已從資料庫加載 {len(url_data)} 個現存 URL")
            return url_data
        finally:
            cursor.close()
    
    def check_url_exists(self, url_hash):
        """檢查 URL 是否已存在"""
        cursor = self.conn.cursor()
        try:
            cursor.execute("""
                SELECT id FROM crawl_items WHERE url_hash = %s
            """, (url_hash,))
            return cursor.fetchone() is not None
        finally:
            cursor.close()
    
    def insert_or_update_url(self, url, url_hash, seed_site_id, ext=None, is_active=1):
        """插入或更新 URL 到資料庫
        
        Args:
            url: URL 地址
            url_hash: URL 的 hash
            seed_site_id: Seed 站點 ID
            ext: 副檔名 (php, pdf) - 只在首次插入時使用，更新時保持不變
            is_active: 是否活躍（1=活躍, 0=失敗）
        """
        cursor = self.conn.cursor()
        try:
            # 檢查是否存在
            cursor.execute("""
                SELECT id FROM crawl_items WHERE url_hash = %s
            """, (url_hash,))
            
            result = cursor.fetchone()
            now = datetime.now()
            
            if result:
                # 更新：只更新時間和 is_active，ext 保持不變
                cursor.execute("""
                    UPDATE crawl_items 
                    SET last_crawled = %s, is_active = %s
                    WHERE url_hash = %s
                """, (now, is_active, url_hash))
                
                # 根據 is_active 決定日誌訊息
                if is_active == 0:
                    status = "[失敗]"
                else:
                    status = "[更新]"
                action = "UPDATE"
            else:
                # 插入（只有首次成功才會插入）
                cursor.execute("""
                    INSERT INTO crawl_items 
                    (url, url_hash, ext, seed_site_id, is_active, first_seen, last_crawled)
                    VALUES (%s, %s, %s, %s, %s, %s, %s)
                """, (url, url_hash, ext, seed_site_id, is_active, now, now))
                status = "[新增]"
                action = "INSERT"
            
            self.conn.commit()
            
            # 日誌記錄
            if ext:
                logger.info(f"{status} [{ext.upper()}] {url}")
            else:
                logger.info(f"{status} {url}")
            
            return action
        except MySQLError as e:
            logger.error(f"資料庫操作失敗: {e}")
            self.conn.rollback()
            return None
        finally:
            cursor.close()
    
    def close(self):
        """關閉資料庫連接"""
        if self.conn:
            self.conn.close()


class WebCrawler:
    """網頁爬蟲"""
    
    def __init__(self):
        self.filter = URLFilter()
        self.db = CrawlDatabase()
        self.session = requests.Session()
        self.session.headers.update({'User-Agent': USER_AGENT})
        self.visited_urls = set()  # 本次爬蟲運行中的去重（防止同一頁面樹無限遞迴）
        self.html_urls = set()  # 收集 HTML URL
        self.pdf_urls = set()  # 收集 PDF URL
        self.stats = defaultdict(int)
        # 從資料庫加載已爬過的 URLs 及時間戳（用於增量爬蟲）
        self.url_timestamps = self.db.load_existing_urls()
    
    def _extract_domain(self, url):
        """提取 URL 的主域名"""
        parsed = urlparse(url)
        parts = parsed.netloc.split('.')
        if len(parts) >= 2:
            return '.'.join(parts[-2:])  # 返回 example.tw
        return parsed.netloc
    
    def _should_follow_domain(self, current_url, base_url):
        """檢查是否應該跟隨（限制在同一 domain）"""
        current_domain = self._extract_domain(current_url)
        base_domain = self._extract_domain(base_url)
        return current_domain == base_domain
    
    def _absolute_http_link(self, link, current_url):
        """將相對 URL 轉換為絕對 URL"""
        return urljoin(current_url, link) if link else None
    
    def _get_url_hash(self, url):
        """生成 URL 的 SHA256 hash"""
        return hashlib.sha256(url.encode('utf-8')).hexdigest()
    
    def _should_process_url(self, url, seed_url):
        """檢查 URL 是否應該處理（URL 應已在 crawl_page 中規範化）"""
        # 檢查是否已訪問（本次爬蟲運行中）- 防止同一頁面樹無限遞迴
        if url in self.visited_urls:
            logger.debug(f"[跳過] {url} - 本次運行已訪問")
            return False, "already visited in this crawl"
        
        # 檢查過濾規則
        should_follow, reason = self.filter.should_follow_url(url)
        if not should_follow:
            logger.debug(f"[跳過] {url} - {reason}")
            return False, reason
        
        # 檢查域名限制
        if not self._should_follow_domain(url, seed_url):
            logger.debug(f"[跳過] {url} - 不同域名")
            return False, "different domain"
        
        # 檢查是否已被爬過（查數據庫）
        url_hash = self._get_url_hash(url)
        
        # Seed URL 本身始終會被爬取（允許重新爬 seed 首頁以發現新鏈接）
        is_seed_url = (self.filter.normalize_url(url) == self.filter.normalize_url(seed_url))
        
        if url_hash in self.url_timestamps:
            last_crawled, is_active = self.url_timestamps[url_hash]
            
            # 如果標記為不活躍，永久跳過
            if is_active == 0:
                logger.info(f"[跳過] {url} - 已標記為失敗 (is_active=0)")
                return False, "is_active=0"
            
            # Seed URL 不受時間限制，始終可以重爬
            if is_seed_url:
                logger.debug(f"[重爬] {url} - Seed URL 始終允許重爬")
                return True, None
            
            # 檢查是否需要重爬（基於時間間隔）
            days_since = (datetime.now() - last_crawled).days
            
            if days_since < RECRAWL_DAYS:
                logger.debug(f"[跳過] {url} - 距離上次爬取 {days_since} 天（< {RECRAWL_DAYS} 天）")
                return False, f"crawled {days_since} days ago"
            else:
                logger.debug(f"[重爬] {url} - 距離上次爬取 {days_since} 天（≥ {RECRAWL_DAYS} 天）")
        
        return True, None

    def _fetch_page(self, url):
        """取得頁面內容"""
        try:
            response = self.session.get(
                url,
                timeout=TIMEOUT,
                allow_redirects=True,
                verify=False  # 忽略 SSL 警告
            )
            response.raise_for_status()
            return response
        except requests.exceptions.Timeout:
            logger.warning(f"[超時] {url}")
            self.stats['timeout'] += 1
            return None
        except requests.exceptions.ConnectionError:
            logger.warning(f"[連接錯誤] {url}")
            self.stats['connection_error'] += 1
            return None
        except requests.exceptions.SSLError:
            logger.warning(f"[SSL 錯誤] {url}")
            self.stats['ssl_error'] += 1
            return None
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 404:
                logger.warning(f"[404] {url}")
                self.stats['http_404'] += 1
            elif e.response.status_code == 500:
                logger.warning(f"[500] {url}")
                self.stats['http_500'] += 1
            else:
                logger.warning(f"[HTTP 錯誤 {e.response.status_code}] {url}")
                self.stats['http_error'] += 1
            return None
        except Exception as e:
            logger.warning(f"[錯誤] {url}: {e}")
            self.stats['other_error'] += 1
            return None
    
    def _extract_links(self, html, current_url):
        """從 HTML 提取鏈接"""
        try:
            soup = BeautifulSoup(html, 'html.parser')
            links = []
            
            for link in soup.find_all('a', href=True):
                href = link.get('href', '').strip()
                if href:
                    absolute_url = self._absolute_http_link(href, current_url)
                    if absolute_url:
                        links.append(absolute_url)
            
            return links
        except Exception as e:
            logger.warning(f"HTML 解析錯誤 {current_url}: {e}")
            return []

    def _check_pdf_exists(self, url):
        """檢查 PDF 是否存在（先用 HEAD 請求，失敗則用 GET 流式）"""
        try:
            # 先嘗試 HEAD 請求（更快）
            response = self.session.head(
                url,
                timeout=TIMEOUT,
                allow_redirects=True,
                verify=False
            )
            if response.status_code < 400:
                return True
            
            # HEAD 失敗，改用 GET stream 模式（只讀 headers，不下載內容）
            response = self.session.get(
                url,
                timeout=TIMEOUT,
                allow_redirects=True,
                verify=False,
                stream=True
            )
            return response.status_code < 400
        except:
            return False

    def _extract_pdfs(self, html, current_url):
        """從 HTML 提取 PDF 鏈接"""
        try:
            soup = BeautifulSoup(html, 'html.parser')
            pdfs = []
            
            # 尋找 href 包含 .pdf 的 <a> 標籤
            for link in soup.find_all('a', href=True):
                href = link.get('href', '').strip().lower()
                if '.pdf' in href:
                    # 轉換為完整 URL
                    absolute_url = self._absolute_http_link(href, current_url)
                    if absolute_url:
                        pdfs.append(absolute_url)
            
            # 尋找 <embed> 和 <object> 標籤
            for tag in soup.find_all(['embed', 'object']):
                src = tag.get('src') or tag.get('data')
                if src and '.pdf' in src.lower():
                    absolute_url = self._absolute_http_link(src, current_url)
                    if absolute_url:
                        pdfs.append(absolute_url)
            
            return pdfs
        except Exception as e:
            logger.warning(f"PDF 提取錯誤 {current_url}: {e}")
            return []
    
    def crawl_page(self, url, seed_url, seed_site_id, depth=0):
        """爬蟲單一頁面"""
        if depth > MAX_DEPTH:
            return 0
        
        # 規範化 URL（移除 fragment 和 query string）
        url = self.filter.normalize_url(url)
        
        # 檢查是否應處理
        should_process, reason = self._should_process_url(url, seed_url)
        if not should_process:
            #logger.debug(f"[調試] {url} - 不處理: {reason}")
            return 0
        
        # 記為已訪問（本次爬蟲運行中）
        self.visited_urls.add(url)
        url_hash = self._get_url_hash(url)
        
        # 判斷是否為重爬
        is_recrawl = url_hash in self.url_timestamps
        
        # 取得頁面
        response = self._fetch_page(url)
        if not response:
            # 連線失敗：重爬時標記為 is_active=0，首次爬蟲時略過
            if is_recrawl:
                logger.warning(f"[重爬失敗] {url} - 標記為 is_active=0")
                self.db.insert_or_update_url(url, url_hash, seed_site_id, is_active=0)
                # 同步更新 url_timestamps
                self.url_timestamps[url_hash] = (datetime.now(), 0)
            return 0
        
        # 檢查轉址（跟隨 redirect）
        final_url = response.url
        final_url = self.filter.normalize_url(final_url)
        
        # 檢查轉址是否在排除列表
        if final_url != url:
            # 檢查轉址後的 URL 是否已訪問過
            if final_url in self.visited_urls:
                logger.debug(f"[轉址已訪問] {url} -> {final_url}")
                return 0
            
            if final_url in self.filter.skip_redirects:
                logger.info(f"[轉址排除] {url} -> {final_url}")
                self.stats['skip_redirect'] += 1
                return 0
            
            # 重新檢查轉址後的 URL 是否符合過濾規則
            should_follow, reason = self.filter.should_follow_url(final_url)
            if not should_follow:
                logger.info(f"[轉址域名排除] {url} -> {final_url}")
                return 0
            
            # 更新為最終 URL
            url = final_url
            url_hash = self._get_url_hash(url)
            self.visited_urls.add(url)  # 記錄轉址後的最終 URL
        
        # 檢查是否為 HTML
        content_type = response.headers.get('content-type', '').lower()
        is_html = 'text/html' in content_type
        is_pdf = 'application/pdf' in content_type or url.endswith('.pdf')
        
        if not is_html and not is_pdf:
            logger.debug(f"[忽略] {url} - 不支援的內容類型")
            return 0
        
        # 如果是 HTML
        if is_html:
            # 保存 HTML URL（統一記錄為 php）
            self.html_urls.add(url)
            self.db.insert_or_update_url(url, url_hash, seed_site_id, ext='php')
            # 同步更新 url_timestamps
            self.url_timestamps[url_hash] = (datetime.now(), 1)
            
            # 提取 PDF 鏈接
            try:
                pdfs = self._extract_pdfs(response.content, url)
                for pdf_url in pdfs:
                    pdf_url = self.filter.normalize_url(pdf_url)
                
                    # 如果 PDF 最近爬過，就不要再 check 也不要更新 DB
                    should_proc, _ = self._should_process_url(pdf_url, seed_url)
                    if not should_proc:
                        continue

                    # 檢查過濾
                    should_follow, _ = self.filter.should_follow_url(pdf_url)
                    if should_follow and self._should_follow_domain(pdf_url, seed_url):
                        # 檢查是否已訪問過此 PDF（跨 seed 去重）
                        if pdf_url in self.visited_urls:
                            logger.debug(f"[PDF 已訪問] {pdf_url}")
                            continue
                        
                        pdf_hash = self._get_url_hash(pdf_url)
                        # 驗證 PDF 是否真的存在
                        if self._check_pdf_exists(pdf_url):
                            # 添加到已訪問集合
                            self.visited_urls.add(pdf_url)
                            self.pdf_urls.add(pdf_url)
                            self.db.insert_or_update_url(pdf_url, pdf_hash, seed_site_id, ext='pdf')
                            # 同步更新 url_timestamps
                            self.url_timestamps[pdf_hash] = (datetime.now(), 1)
                            self.stats['pdf_found'] += 1
                        else:
                            logger.debug(f"[PDF 不存在] {pdf_url}")
                            self.stats['pdf_notfound'] += 1
            except Exception as e:
                logger.warning(f"PDF 提取失敗 {url}: {e}")
            
            # 提取並爬蟲更多的 HTML 鏈接（只在深度允許時）
            if depth < MAX_DEPTH:
                try:
                    links = self._extract_links(response.content, url)
                    count = 0
                    for link in links:
                        if count >= MAX_PAGES_PER_SEED:
                            break
                        
                        link = self.filter.normalize_url(link)
                        
                        # 只爬蟲 HTML 鏈接
                        if self.filter.is_html_url(link):
                            time.sleep(RATE_LIMIT)  # Rate limit
                            count += self.crawl_page(
                                link, seed_url, seed_site_id, depth + 1
                            )
                            if count >= MAX_PAGES_PER_SEED:
                                break
                except Exception as e:
                    logger.warning(f"鏈接提取失敗 {url}: {e}")
            
            self.stats['html_crawled'] += 1
            return 1
        
        # 如果是 PDF
        elif is_pdf:
            # 雙重驗證：確保 URL 在 visited_urls 中
            if url not in self.visited_urls:
                self.visited_urls.add(url)
            self.pdf_urls.add(url)
            self.db.insert_or_update_url(url, url_hash, seed_site_id, ext='pdf')
            # 同步更新 url_timestamps
            self.url_timestamps[url_hash] = (datetime.now(), 1)
            self.stats['pdf_crawled'] += 1
            return 1
    
    def crawl_seed(self, seed_site):
        """爬蟲單一 Seed 網站"""
        seed_id = seed_site['id']
        seed_url = seed_site['site_url']
        
        logger.info(f"\n{'='*80}")
        logger.info(f"Seed Site: {seed_url} (ID: {seed_id})")
        logger.info(f"{'='*80}")
        
        # 開始爬蟲（visited_urls 在整個程序運行中保留，跨越所有 Seed Site）
        self.crawl_page(seed_url, seed_url, seed_id, depth=0)
    
    def crawl_all(self):
        """爬蟲所有 Seed URL"""
        seed_sites = self.db.get_seed_sites()
        # DEBUG: 只爬 https://donate.ncku.edu.tw
        #seed_sites = [{'id': 999, 'site_url': 'https://donate.ncku.edu.tw'}]
        #logger.info(f"DEBUG MODE: 只爬測試 URL - https://donate.ncku.edu.tw")        

        total_stats = defaultdict(int)
        
        for seed_site in seed_sites:
            self.crawl_seed(seed_site)
            
            # 累計統計
            for key, value in self.stats.items():
                total_stats[key] += value
            
            # Rate limit between seeds
            time.sleep(1)
        
        # 顯示總統計
        logger.info(f"\n{'='*60}")
        cursor = self.db.conn.cursor()
        try:
            cursor.execute("SELECT COUNT(*) as count FROM crawl_items WHERE ext = 'php'")
            html_count = cursor.fetchone()[0]
            cursor.execute("SELECT COUNT(*) as count FROM crawl_items WHERE ext = 'pdf'")
            pdf_count = cursor.fetchone()[0]
            logger.info(f"爬蟲完成 - HTML: {html_count}, PDF: {pdf_count}")
        finally:
            cursor.close()
        logger.info(f"{'='*60}")
    
    def close(self):
        """關閉爬蟲"""
        self.db.close()
        self.session.close()


def main():
    """主程式"""
    crawlers = WebCrawler()
    try:
        crawlers.crawl_all()
    except KeyboardInterrupt:
        logger.info("\n爬蟲被中斷")
    finally:
        crawlers.close()


if __name__ == '__main__':
    main()
