To communicate with LiteLLM, configure the models in phi1 and phi2 and initiate a chat session.
Copy
from autogen import AssistantAgentphi1 = { "config_list": [ { "model": "llama-3-8b", "base_url": "http://localhost:4000", #use http://0.0.0.0:4000 for Macs "api_key":"watsonx", "price" : [0,0] }, ], "cache_seed": None, # Disable caching.}phi2 = { "config_list": [ { "model": "llama-3-8b", "base_url": "http://localhost:4000", #use http://0.0.0.0:4000 for Macs "api_key":"watsonx", "price" : [0,0] }, ], "cache_seed": None, # Disable caching.}jack = AssistantAgent( name="Jack(Phi-2)", llm_config=phi2, system_message="Your name is Jack and you are a comedian in a two-person comedy show.",)emma = AssistantAgent( name="Emma(Gemma)", llm_config=phi1, system_message="Your name is Emma and you are a comedian in two-person comedy show.",)jack.initiate_chat(emma, message="Emma, tell me a joke.", max_turns=2)