"""
Grok Fast API Integration
Switches RAG system from local LLM to Grok Fast Reasoning Model API
"""

import requests
import json
import time
from typing import Optional, Dict, Any, List
from config import MAX_NEW_TOKENS, TEMPERATURE

# Grok Fast API Configuration
GROK_API_KEY = "xai-bYupNvIKS2S5I41aDUOKj7XSzoIitscspt9HvhesX0wK58Bnggck8gPAbK1jXvgc7PzKZJntt8HIheqE"
GROK_API_URL = "https://api.x.ai/v1/chat/completions"
GROK_MODEL = "grok-4-fast-reasoning"



def call_grok_api(
    prompt: str,
    system_prompt: Optional[str] = None,
    max_tokens: int = MAX_NEW_TOKENS,
    temperature: float = TEMPERATURE,
    conversation_history: Optional[List[Dict[str, str]]] = None
) -> Optional[str]:
    """
    Call Grok Fast API to generate a response
    
    Args:
        prompt: The user prompt/question
        system_prompt: Optional system prompt for context
        max_tokens: Maximum tokens to generate
        temperature: Sampling temperature
        conversation_history: Optional conversation history
        
    Returns:
        Generated response text or None if error
    """
    headers = {
        "Authorization": f"Bearer {GROK_API_KEY}",
        "Content-Type": "application/json"
    }
    
    messages = []
    
    # Add system prompt if provided
    if system_prompt:
        messages.append({
            "role": "system",
            "content": system_prompt
        })
    
    # Add conversation history if provided
    if conversation_history:
        messages.extend(conversation_history)
    
    # Add current user message
    messages.append({
        "role": "user",
        "content": prompt
    })
    
    payload = {
        "model": GROK_MODEL,
        "messages": messages,
        "max_tokens": max_tokens,
        "temperature": temperature,
        "stream": False
    }
    
    try:
        # print(f"Calling Grok API... (Model: {GROK_MODEL})") # Debug log
        start_time = time.time()
        response = requests.post(
            GROK_API_URL,
            headers=headers,
            json=payload,
            timeout=45  # Increased timeout slightly
        )
        duration = time.time() - start_time
        
        if response.status_code == 200:
            data = response.json()
            if 'choices' in data and len(data['choices']) > 0:
                return data['choices'][0]['message']['content']
            else:
                print(f"Grok API: No choices in response: {data}")
                return None
        else:
            print(f"Grok API Error: {response.status_code} - {response.text}")
            return None
            
    except requests.exceptions.Timeout:
        print(f"Grok API Timeout after {time.time() - start_time:.2f}s")
        return None
    except requests.exceptions.RequestException as e:
        print(f"Grok API Request Error: {e}")
        return None
    except Exception as e:
        print(f"Grok API Unexpected Error: {e}")
        return None


def generate_answer_with_grok(
    context: str,
    question: str,
    system_instructions: Optional[str] = None
) -> str:
    """
    Generate an answer using Grok Fast API with RAG context
    
    Args:
        context: Formatted RAG context from vector search
        question: User's question
        system_instructions: Optional system instructions
        
    Returns:
        Generated answer
    """
    if not context:
        return "I don't have enough information to answer that question based on the available documents."
    
    # Build system prompt
    system_prompt = system_instructions or """You are a helpful AI assistant that answers questions based on provided context.
    - Use only the information provided in the context
    - If the context doesn't contain relevant information, say so explicitly
    - Be concise and accurate
    - For technical questions (pH, temperature, measurements), extract specific numbers/ranges
    - Do NOT provide generic information if the specific answer isn't in the context
    - Cite sources when mentioned in the context"""
    
    # Build user prompt with context
    user_prompt = f"""Based on the following context, answer the user's question.

CONTEXT:
{context}

QUESTION: {question}

ANSWER:"""
    
    answer = call_grok_api(
        prompt=user_prompt,
        system_prompt=system_prompt,
        max_tokens=800,  # Reasonable limit for answers
        temperature=0.7
    )
    
    return answer or "I encountered an error generating a response. Please try again."
