import autogen
from autogen import AssistantAgent, UserProxyAgent, LLMConfig
from autogen.code_utils import content_str
seed = 42 # for caching, use None for no caching
llm_config_gemini = LLMConfig.from_json(
path="OAI_CONFIG_LIST",
seed=seed,
).where(model="gemini-2.0-flash-lite")
with llm_config_gemini:
assistant = AssistantAgent(
name="assistant",
system_message=(
"You are a helpful coding assistant. "
"After your code has been executed and you have a code execution result, say 'ALL DONE'. "
"Do not say 'ALL DONE' in the same response as code."
),
max_consecutive_auto_reply=3
)
user_proxy = UserProxyAgent(
"user_proxy",
code_execution_config={"work_dir": "coding", "use_docker": False},
human_input_mode="NEVER",
is_termination_msg=lambda x: content_str(x.get("content")).find("ALL DONE") >= 0,
)
result = user_proxy.initiate_chat(
recipient=assistant,
message="Sort the array with Bubble Sort: [4, 1, 5, 2, 3]"
)