# basic example of running ollama model from Python

import ollama

prompt = "Please tell a short joke in ten words or less."
response = ollama.generate(model='gemma3:4b', prompt=prompt)

print("Reply generated in %f seconds:" % (1e-9*response.total_duration))
print(response.response)
