from langchain_ollama import OllamaLLM def load_llm(): return OllamaLLM( model="llama3.2", base_url="http://localhost:11434", temperature=0)