"""
Memory-Enhanced RAG System
Integrates conversation memory with vector search for personalized responses
"""

import json
import numpy as np
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime, timedelta
import logging

logger = logging.getLogger(__name__)


class MemoryEnhancedRAG:
    """RAG system enhanced with user memory and personalization"""

    def __init__(self, vector_store, memory_store, entity_extractor=None):
        self.vector_store = vector_store
        self.memory_store = memory_store
        self.entity_extractor = entity_extractor

    def query_with_memory(self, query: str, user_id: str, session_id: str = None,
                         user_profile: Dict = None, conversation_context: List[Dict] = None) -> Dict[str, Any]:
        """Enhanced query that incorporates user memory"""

        # Get relevant memories
        relevant_memories = self._get_relevant_memories(user_id, query, session_id)

        # Enhance query with memory context
        enhanced_query = self._enhance_query_with_memory(query, relevant_memories, user_profile)

        # Get RAG context
        rag_context = self.vector_store.search(enhanced_query, k=8)

        # Personalize results based on user profile
        personalized_context = self._personalize_context(rag_context, user_profile, relevant_memories)

        return {
            'enhanced_query': enhanced_query,
            'rag_context': personalized_context,
            'relevant_memories': relevant_memories,
            'user_profile': user_profile,
            'personalization_applied': self._get_personalization_summary(user_profile, relevant_memories)
        }

    def _get_relevant_memories(self, user_id: str, query: str, session_id: str = None, k: int = 3) -> List[Dict]:
        """Retrieve relevant conversation memories"""
        try:
            # Get memories from the last 30 days
            cutoff_date = datetime.now() - timedelta(days=30)

            memories = self.memory_store.get_user_memories(
                user_id=user_id,
                since=cutoff_date,
                limit=20
            )

            if not memories:
                return []

            # Find most relevant memories using semantic similarity
            query_embedding = self.vector_store.embedder.encode([query])[0]

            memory_texts = [m['content'] for m in memories]
            memory_embeddings = self.vector_store.embedder.encode(memory_texts)

            # Calculate similarities
            similarities = np.dot(memory_embeddings, query_embedding) / (
                np.linalg.norm(memory_embeddings, axis=1) * np.linalg.norm(query_embedding)
            )

            # Get top-k most similar
            top_indices = np.argsort(similarities)[-k:][::-1]
            relevant_memories = [memories[i] for i in top_indices]

            return relevant_memories

        except Exception as e:
            logger.error(f"Error retrieving memories: {e}")
            return []

    def _enhance_query_with_memory(self, original_query: str, memories: List[Dict],
                                  user_profile: Dict = None) -> str:
        """Enhance the query with memory context"""

        enhancements = []

        # Add user profile context
        if user_profile:
            expertise = user_profile.get('expertise_level')
            interests = user_profile.get('interests', [])

            if expertise:
                enhancements.append(f"User expertise level: {expertise}")

            if interests:
                enhancements.append(f"User interests: {', '.join(interests)}")

        # Add relevant conversation context
        if memories:
            recent_topics = []
            user_questions = []

            for memory in memories[:3]:  # Use top 3 memories
                content = memory.get('content', '')

                # Extract topics and questions
                if '?' in content:
                    user_questions.append(content)
                else:
                    recent_topics.append(content[:100])  # First 100 chars

            if recent_topics:
                enhancements.append(f"Recent conversation topics: {'; '.join(recent_topics)}")

            if user_questions:
                enhancements.append(f"Previous user questions: {'; '.join(user_questions)}")

        # Combine enhancements with original query
        if enhancements:
            enhanced = f"""
            Original Query: {original_query}

            User Context:
            {'; '.join(enhancements)}

            Please provide a response that takes the user's background and previous interactions into account.
            """
            return enhanced.strip()
        else:
            return original_query

    def _personalize_context(self, rag_context: List[Dict], user_profile: Dict = None,
                           memories: List[Dict] = None) -> List[Dict]:
        """Personalize RAG results based on user profile and history"""

        if not rag_context:
            return rag_context

        personalized_results = []

        for result in rag_context:
            personalized_result = result.copy()
            score_modifier = 0.0

            # Expertise-based adjustments
            expertise_level = user_profile.get('expertise_level') if user_profile else None
            if expertise_level:
                content_expertise = self._assess_content_expertise(result.get('content', ''))

                if expertise_level == 'beginner' and content_expertise == 'advanced':
                    # Reduce score for advanced content for beginners
                    score_modifier -= 0.2
                elif expertise_level == 'expert' and content_expertise == 'basic':
                    # Reduce score for basic content for experts
                    score_modifier -= 0.1
                elif expertise_level == content_expertise:
                    # Boost score for matching expertise level
                    score_modifier += 0.1

            # Interest-based adjustments
            user_interests = user_profile.get('interests', []) if user_profile else []
            if user_interests:
                content_interests = self._extract_content_interests(result.get('content', ''))

                # Boost score for matching interests
                matching_interests = set(user_interests) & set(content_interests)
                if matching_interests:
                    score_modifier += len(matching_interests) * 0.05

            # Memory-based adjustments
            if memories:
                memory_relevance = self._calculate_memory_relevance(result, memories)
                score_modifier += memory_relevance * 0.1

            # Apply score modification
            original_score = result.get('score', 0.5)
            personalized_result['score'] = max(0.0, min(1.0, original_score + score_modifier))
            personalized_result['personalization_modifier'] = score_modifier

            personalized_results.append(personalized_result)

        # Re-sort by personalized scores
        personalized_results.sort(key=lambda x: x['score'], reverse=True)

        return personalized_results

    def _assess_content_expertise(self, content: str) -> str:
        """Assess the expertise level required for content"""
        content_lower = content.lower()

        # Advanced indicators
        advanced_terms = ['optimization', 'algorithm', 'statistical', 'methodology',
                         'research', 'analysis', 'complex', 'sophisticated']

        # Basic indicators
        basic_terms = ['introduction', 'basics', 'beginner', 'simple', 'easy',
                      'getting started', 'fundamentals']

        advanced_count = sum(1 for term in advanced_terms if term in content_lower)
        basic_count = sum(1 for term in basic_terms if term in content_lower)

        if advanced_count > basic_count:
            return 'expert'
        elif basic_count > advanced_count:
            return 'beginner'
        else:
            return 'intermediate'

    def _extract_content_interests(self, content: str) -> List[str]:
        """Extract interest categories from content"""
        interests = []
        content_lower = content.lower()

        interest_keywords = {
            'business': ['business', 'entrepreneur', 'startup', 'revenue', 'profit'],
            'finance': ['finance', 'investment', 'funding', 'capital', 'roi'],
            'health': ['health', 'medical', 'therapy', 'treatment', 'wellness'],
            'legal': ['legal', 'law', 'regulation', 'compliance', 'license'],
            'psychology': ['psychology', 'mental health', 'anxiety', 'depression'],
            'history': ['history', 'historical', 'civilization', 'ancient'],
            'hydroponics': ['hydroponics', 'hydro', 'soilless', 'dwc', 'nft'],
            'cannabis': ['cannabis', 'marijuana', 'cbd', 'thc', 'cultivation']
        }

        for interest, keywords in interest_keywords.items():
            if any(keyword in content_lower for keyword in keywords):
                interests.append(interest)

        return interests

    def _calculate_memory_relevance(self, result: Dict, memories: List[Dict]) -> float:
        """Calculate how relevant this result is based on conversation memory"""
        relevance_score = 0.0
        content = result.get('content', '').lower()

        for memory in memories:
            memory_content = memory.get('content', '').lower()

            # Simple overlap calculation
            memory_words = set(memory_content.split())
            content_words = set(content.split())

            overlap = len(memory_words & content_words)
            total_words = len(memory_words | content_words)

            if total_words > 0:
                similarity = overlap / total_words
                relevance_score = max(relevance_score, similarity)

        return min(relevance_score, 0.5)  # Cap at 0.5

    def _get_personalization_summary(self, user_profile: Dict, memories: List[Dict]) -> Dict[str, Any]:
        """Get summary of personalization applied"""
        summary = {
            'expertise_level': user_profile.get('expertise_level') if user_profile else None,
            'interests': user_profile.get('interests', []) if user_profile else [],
            'memory_count': len(memories),
            'has_recent_context': len(memories) > 0
        }

        return summary

    def build_memory_context_prompt(self, user_id: str, current_query: str) -> str:
        """Build a comprehensive memory context prompt for the LLM"""

        # Get user profile
        user_profile = self.memory_store.get_user_profile(user_id)

        # Get recent conversation
        recent_conversation = self.memory_store.get_recent_conversation(user_id, limit=5)

        # Get relevant memories
        relevant_memories = self._get_relevant_memories(user_id, current_query, k=3)

        prompt_parts = [
            f"Current user query: {current_query}",
            ""
        ]

        # Add user profile context
        if user_profile:
            profile_info = []

            if user_profile.get('expertise_level'):
                profile_info.append(f"Expertise level: {user_profile['expertise_level']}")

            if user_profile.get('interests'):
                profile_info.append(f"Interests: {', '.join(user_profile['interests'])}")

            if user_profile.get('demographics'):
                demo = user_profile['demographics']
                if 'profession' in demo:
                    profile_info.append(f"Profession: {demo['profession']}")

            if profile_info:
                prompt_parts.extend([
                    "User Profile:",
                    " ".join(profile_info),
                    ""
                ])

        # Add recent conversation context
        if recent_conversation:
            prompt_parts.extend([
                "Recent Conversation:",
                "\n".join([f"- {turn.get('role', 'Unknown')}: {turn.get('content', '')[:200]}..."
                           for turn in recent_conversation[-3:]]),  # Last 3 turns
                ""
            ])

        # Add relevant memories
        if relevant_memories:
            prompt_parts.extend([
                "Relevant Previous Context:",
                "\n".join([f"- {memory.get('content', '')[:150]}..."
                           for memory in relevant_memories]),
                ""
            ])

        prompt_parts.append(
            "Please provide a personalized response that takes into account the user's background, "
            "interests, and previous interactions. Adjust your communication style and content depth "
            "appropriately for their expertise level."
        )

        return "\n".join(prompt_parts)
