OpenAI model
litellm_config.yaml
model_list: - model_name: openai-gpt-4o-mini litellm_params: model: openai/gpt-4o-mini api_key: os.environ/OPENAI_API_KEY
OPENAI_API_KEY
docker run -v $(pwd)/litellm_config.yaml:/app/config.yaml \ -e OPENAI_API_KEY="your_api_key" \ -p 4000:4000 ghcr.io/berriai/litellm:main-latest --config /app/config.yaml --detailed_debug
http://0.0.0.0:4000
config.yaml
... 14:15:59 - LiteLLM Proxy:DEBUG: proxy_server.py:1507 - loaded config={ "model_list": [ { "model_name": "openai-gpt-4o-mini", "litellm_params": { "model": "openai/gpt-4o-mini", "api_key": "os.environ/OPENAI_API_KEY" } } ] } ...
config_list
from autogen import AssistantAgent, UserProxyAgent, LLMConfig llm_config = LLMConfig( model="openai-gpt-4o-mini", base_url="http://0.0.0.0:4000", ) user_proxy = UserProxyAgent( name="user_proxy", human_input_mode="NEVER", ) with llm_config: assistant = AssistantAgent(name="assistant") user_proxy.initiate_chat( recipient=assistant, message="Solve the following equation: 2x + 3 = 7", max_turns=3, )