import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from ..config import Config
import logging
import json
from .gemini_service import gemini_service
from .chatgpt_service import chatgpt_service

logger = logging.getLogger(__name__)

class ChatService:
    def __init__(self):
        self.config = Config()
        self.session = self._create_session()
        
    def _create_session(self):
        session = requests.Session()
        retry_strategy = Retry(
            total=self.config.RETRY_TOTAL,
            backoff_factor=self.config.RETRY_BACKOFF_FACTOR,
            status_forcelist=self.config.RETRY_STATUS_FORCELIST,
            allowed_methods=self.config.RETRY_ALLOWED_METHODS
        )
        adapter = HTTPAdapter(max_retries=retry_strategy)
        session.mount("http://", adapter)
        session.mount("https://", adapter)
        return session

    def send_message(self, message, model=None, system_prompt=None, context=None, stream=False):
        try:
            if model and model.startswith('gemini'):
                logger.info(f"Using Gemini model: {model}")
                return gemini_service.generate_response(
                    message=message,
                    system_prompt=system_prompt,
                    temperature=0.7,  
                    max_tokens=2000,  
                    top_p=0.9,
                    model=model
                )
            if model and (model.startswith('gpt') or 'chatgpt' in model.lower()):
                logger.info(f"Using ChatGPT model: {model}")
                return chatgpt_service.generate_response(
                    message=message,
                    system_prompt=system_prompt,
                    temperature=0.7,  
                    max_tokens=2000,  
                    top_p=0.9  
                )
            url = f"{self.config.OLLAMA_URL}{self.config.CHAT_ENDPOINT}"
            
            payload = {
                "model": model or self.config.OLLAMA_MODEL,
                "messages": [
                    {"role": "system", "content": system_prompt or "You are a helpful AI assistant."},
                    {"role": "user", "content": message}
                ],
                "context": context or [],
                "stream": stream,
                "options": {
                    "temperature": self.config.OLLAMA_TEMPERATURE,
                    "top_p": self.config.OLLAMA_TOP_P,
                    "top_k": self.config.OLLAMA_TOP_K
                }
            }
            
            logger.info(f"Sending request to Ollama API: {url}")
            logger.debug(f"Request payload: {payload}")
            
            response = self.session.post(
                url,
                json=payload,
                timeout=self.config.OLLAMA_TIMEOUT,
                stream=stream
            )
            
            response.raise_for_status()
            
            if stream:
                full_response = ""
                for line in response.iter_lines():
                    if line:
                        try:
                            chunk = json.loads(line)
                            if 'message' in chunk and 'content' in chunk['message']:
                                full_response += chunk['message']['content']
                        except json.JSONDecodeError:
                            continue
                
                logger.info("Successfully received streaming response from Ollama API")
                logger.debug(f"Response: {full_response}")
                
                return {
                    "response": full_response,
                    "context": [], 
                    "done": True
                }
            else:
                result = response.json()
                logger.info("Successfully received response from Ollama API")
                logger.debug(f"Response: {result}")
                
                if 'message' not in result or 'content' not in result['message']:
                    raise Exception("Invalid response format from Ollama API")
                
                return {
                    "response": result['message']['content'],
                    "context": result.get('context', []),
                    "done": True
                }
            
        except requests.exceptions.Timeout:
            logger.error("Request to API timed out")
            raise Exception("The request timed out. Please try again.")
            
        except requests.exceptions.ConnectionError:
            logger.error("Failed to connect to API")
            raise Exception("Could not connect to the AI service. Please check your connection.")
            
        except requests.exceptions.RequestException as e:
            logger.error(f"Error in API request: {str(e)}")
            raise Exception(f"Error communicating with the AI service: {str(e)}")
            
        except Exception as e:
            logger.error(f"Unexpected error: {str(e)}")
            raise Exception(f"An unexpected error occurred: {str(e)}")

    @staticmethod
    def get_user_chats():
        """Retorna os chats do usuário atual"""
        from src.models import Chat
        from flask_login import current_user
        return Chat.query.filter_by(user_id=current_user.id).order_by(Chat.created_at.desc()).all()

chat_service = ChatService() 