#!/usr/bin/env python3
"""
Reprocess all PDFs with chunking ONLY (no enrichment).
This creates raw chunks that can then be enriched via the UI button.
"""

import json
import time
from pathlib import Path
from pdf_processor import PDFProcessor
from vector_store import VectorStore
from config import (
    PDF_DIR,
    DOCUMENTS_DIR,
    EMBEDDINGS_DIR,
    METADATA_DIR,
)

# Status tracking file
STATUS_FILE = METADATA_DIR / "reprocessing_status.json"

def save_status(status, progress, message, current_file="", files_processed=0, total_files=0, chunks_created=0):
    """Save reprocessing status to file"""
    STATUS_FILE.parent.mkdir(parents=True, exist_ok=True)
    status_data = {
        'status': status,  # 'running', 'completed', 'failed'
        'progress': progress,  # 0-100
        'message': message,
        'current_file': current_file,
        'files_processed': files_processed,
        'total_files': total_files,
        'chunks_created': chunks_created,
        'updated': time.time()
    }
    with open(STATUS_FILE, 'w') as f:
        json.dump(status_data, f, indent=2)

def reprocess_all_pdfs_chunks_only():
    """Reprocess all PDFs with new chunking system (no enrichment)"""
    print("=" * 60)
    print("📄 REPROCESSING ALL PDFs - CHUNKING ONLY")
    print("=" * 60)
    print()
    
    processor = PDFProcessor()
    
    # Get all PDF files
    pdf_files = list(PDF_DIR.glob("*.pdf"))
    
    if not pdf_files:
        print("⚠️  No PDF files found in pdf_directory/")
        save_status('failed', 0, 'No PDF files found', "", 0, 0, 0)
        return []
    
    total_files = len(pdf_files)
    print(f"Found {total_files} PDF files\n")
    
    save_status('running', 0, f'Starting reprocessing of {total_files} files...', "", 0, total_files, 0)
    
    processed_chunks = []
    successful_files = 0
    failed_files = 0
    start_time = time.time()
    
    for i, pdf_file in enumerate(pdf_files, 1):
        progress = int((i / total_files) * 90)  # 90% for file processing, 10% for vector store
        current_file = pdf_file.name
        save_status('running', progress, f'Processing {current_file}...', current_file, i, total_files, len(processed_chunks))
        
        print(f"[{i}/{total_files}] ({progress}%) Processing: {current_file}")
        try:
            chunks = processor.process_pdf(pdf_file)
            if chunks:
                processed_chunks.extend(chunks)
                successful_files += 1
                save_status('running', progress, f'✅ {current_file}: {len(chunks)} chunks', current_file, i, total_files, len(processed_chunks))
                print(f"  ✅ Created {len(chunks)} chunks (Total: {len(processed_chunks)})\n")
            else:
                failed_files += 1
                save_status('running', progress, f'⚠️  {current_file}: No chunks created', current_file, i, total_files, len(processed_chunks))
                print(f"  ⚠️  No chunks created (may have been skipped)\n")
        except Exception as e:
            failed_files += 1
            save_status('running', progress, f'❌ {current_file}: Error - {str(e)[:50]}', current_file, i, total_files, len(processed_chunks))
            print(f"  ❌ Error processing {current_file}: {e}\n")
            import traceback
            traceback.print_exc()
    
    duration = time.time() - start_time
    print(f"✅ Processed {successful_files} PDFs successfully, {failed_files} failed/skipped")
    print(f"✅ Total chunks created: {len(processed_chunks)}")
    print(f"✅ Time taken: {duration:.1f} seconds")
    
    save_status('running', 90, f'File processing complete. {successful_files} successful, {failed_files} failed. {len(processed_chunks)} total chunks.', "", total_files, total_files, len(processed_chunks))
    
    return processed_chunks


def rebuild_vector_store():
    """Rebuild the vector store with all processed chunks"""
    print("\n🔨 Rebuilding vector store...")
    save_status('running', 90, 'Rebuilding vector store...', "", 0, 0, 0)
    
    # Load all chunks from processed documents
    all_chunks = []
    
    if DOCUMENTS_DIR.exists():
        chunk_files = list(DOCUMENTS_DIR.glob("*_chunks.json"))
        print(f"Found {len(chunk_files)} chunk files")
        
        for chunk_file in chunk_files:
            try:
                with open(chunk_file, 'r') as f:
                    chunks = json.load(f)
                    if chunks and isinstance(chunks, list):
                        if chunks and isinstance(chunks[0], dict):
                            # New format: list of dicts with content and metadata
                            all_chunks.extend(chunks)
                        else:
                            # Old format: list of strings
                            for i, chunk_content in enumerate(chunks):
                                all_chunks.append({
                                    'content': chunk_content,
                                    'metadata': {'source': chunk_file.stem.replace('_chunks', ''), 'chunk_id': i}
                                })
            except Exception as e:
                print(f"  ⚠️  Error loading {chunk_file.name}: {e}")
    
    if not all_chunks:
        print("⚠️  No chunks found to rebuild vector store")
        save_status('failed', 0, 'No chunks found to rebuild vector store', "", 0, 0, 0)
        return
    
    print(f"Total chunks to index: {len(all_chunks)}")
    
    # Initialize vector store
    try:
        vector_store = VectorStore()
        
        # Add all chunks to vector store in batches
        print("Adding chunks to vector store...")
        batch_size = 100
        total_batches = (len(all_chunks) + batch_size - 1) // batch_size
        
        for batch_num, i in enumerate(range(0, len(all_chunks), batch_size), 1):
            batch = all_chunks[i:i+batch_size]
            # Format chunks for VectorStore (expects list of dicts with 'content' and 'metadata')
            formatted_chunks = []
            for chunk_data in batch:
                content = chunk_data.get('content', '')
                metadata = chunk_data.get('metadata', {})
                if content:
                    formatted_chunks.append({
                        'content': content,
                        'metadata': metadata
                    })
            
            if formatted_chunks:
                vector_store.add_documents(formatted_chunks)
                progress = 90 + int((batch_num / total_batches) * 10)
                save_status('running', progress, f'Indexing chunks: batch {batch_num}/{total_batches}', "", 0, 0, len(all_chunks))
                print(f"  Progress: {min(i+batch_size, len(all_chunks))}/{len(all_chunks)} chunks indexed ({progress}%)...")
        
        # Save the vector store
        print("Saving vector store...")
        save_status('running', 99, 'Saving vector store...', "", 0, 0, len(all_chunks))
        vector_store.save_index()
        vector_store.save_metadata()
        
        print(f"✅ Vector store rebuilt with {len(all_chunks)} chunks")
        
    except Exception as e:
        print(f"❌ Error rebuilding vector store: {e}")
        save_status('failed', 0, f'Error rebuilding vector store: {str(e)}', "", 0, 0, 0)
        import traceback
        traceback.print_exc()


def main():
    """Main reprocessing workflow - chunks only"""
    print("=" * 60)
    print("🔄 REPROCESSING ALL PDFs - CHUNKING ONLY")
    print("=" * 60)
    print("This will:")
    print("  1. Process all PDFs with new optimized chunking")
    print("  2. Create raw chunks (no enrichment)")
    print("  3. Rebuild vector store")
    print("  4. Ready for enrichment via UI button")
    print("=" * 60)
    print()
    
    # Step 1: Reprocess all PDFs
    processed_chunks = reprocess_all_pdfs_chunks_only()
    
    if not processed_chunks:
        print("⚠️  No chunks were processed. Exiting.")
        return
    
    # Step 2: Rebuild vector store
    rebuild_vector_store()
    
    # Final status
    total_chunks = len(processed_chunks)
    pdf_files = list(PDF_DIR.glob("*.pdf"))
    total_files = len(pdf_files)
    if total_chunks > 0:
        save_status('completed', 100, f'Reprocessing complete! Created {total_chunks} chunks from {total_files} files.', "", total_files, total_files, total_chunks)
    else:
        save_status('completed', 100, 'Reprocessing complete but no chunks were created.', "", total_files, total_files, 0)
    
    print("\n" + "=" * 60)
    print("✅ CHUNKING COMPLETE!")
    print("=" * 60)
    print(f"\nSummary:")
    print(f"  • Total chunks created: {len(processed_chunks)}")
    print(f"  • Vector store rebuilt")
    print(f"  • Raw chunks ready for search")
    print(f"\n📝 Next Step:")
    print(f"  • Use the 'Enrich Documents with Grok AI' button in the UI")
    print(f"  • This will enhance chunks with AI-generated summaries, key points, and themes")
    print(f"\n💡 Status saved to: {STATUS_FILE}")


if __name__ == "__main__":
    main()

