from ollama import Client, ChatResponse client = Client(host='http://localhost:11434') def askGPT(systemprompt,userprompt,format="json"): messages = [{ 'role': 'system', 'content': systemprompt },{ 'role': 'user', 'content': userprompt }] # 1) First call: let the model decide to call the tool resp: ChatResponse = client.chat( model='gpt-oss', # be explicit about the tag format="json", messages=messages, ) # Model chose not to call the tool; just print what it said print("############### content ###############") print(resp.message.content) print("############### Message ###############") print(resp['message']) # or print(resp.message) return(resp['message']) def howToSayHello() -> str: """Return the customary greeting.""" print("function called") return "Hallo Welt" def askGPTwithTools(systemprompt,userprompt): messages = [{ 'role': 'system', 'content': systemprompt },{ 'role': 'user', 'content': userprompt }] # 1) First call: let the model decide to call the tool resp: ChatResponse = client.chat( model='gpt-oss', # be explicit about the tag messages=messages, tools=[howToSayHello], ) tcalls = resp.message.tool_calls or [] if tcalls: # 2) Execute the requested tool(s) tool_msgs = [] for tc in tcalls: if tc.function.name == 'howToSayHello': result = howToSayHello() else: result = f"Unknown tool: {tc.function.name}" tool_msgs.append({ "role": "tool", "content": result, # include id if present; Ollama doesn't always require it **({"tool_call_id": getattr(tc, "id", None)} if getattr(tc, "id", None) else {}) }) # 3) Send tool outputs back, then ask the model to finish messages = messages + [resp.message] + tool_msgs final = client.chat(model='gpt-oss', messages=messages) print("############### content ###############") print(final.message.content) else: # Model chose not to call the tool; just print what it said print("############### content ###############") print(resp.message.content) print("############### Message ###############") print(resp['message']) # or print(resp.message) return(resp['message'])