Azure model
litellm_config.yaml
model_list: - model_name: azure-gpt-4o-mini litellm_params: model: azure/gpt-4o-mini api_base: os.environ/AZURE_API_BASE api_key: os.environ/AZURE_API_KEY api_version: os.environ/AZURE_API_VERSION
AZURE_API_KEY
AZURE_API_BASE
AZURE_API_VERSION
docker run -v $(pwd)/litellm_config.yaml:/app/config.yaml \ -e AZURE_API_KEY="your_api_key" -e AZURE_API_BASE="your_api_base_url" -e AZURE_API_VERSION="your_api_version"\ -p 4000:4000 ghcr.io/berriai/litellm:main-latest --config /app/config.yaml --detailed_debug
http://0.0.0.0:4000
config.yaml
... 13:49:43 - LiteLLM Proxy:DEBUG: proxy_server.py:1507 - loaded config={ "model_list": [ { "model_name": "azure-gpt-4o-mini", "litellm_params": { "model": "azure/gpt-4o-mini", "api_base": "os.environ/AZURE_API_BASE", "api_key": "os.environ/AZURE_API_KEY", "api_version": "os.environ/AZURE_API_VERSION" } } ] } ...
config_list
from autogen import AssistantAgent, UserProxyAgent, LLMConfig llm_config = LLMConfig( model="azure-gpt-4o-mini", base_url="http://0.0.0.0:4000", ) user_proxy = UserProxyAgent( name="user_proxy", human_input_mode="NEVER", ) with llm_config: assistant = AssistantAgent(name="assistant") user_proxy.initiate_chat( recipient=assistant, message="Solve the following equation: 2x + 3 = 7", max_turns=3, )