import anthropic
client = anthropic.Anthropic(
base_url="http://localhost:8080/anthropic",
api_key="dummy-key"
)
# Anthropic models (default)
anthropic_response = client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1000,
messages=[{"role": "user", "content": "Hello from Claude!"}]
)
# OpenAI models via Anthropic SDK format
openai_response = client.messages.create(
model="openai/gpt-4o-mini",
max_tokens=1000,
messages=[{"role": "user", "content": "Hello from OpenAI!"}]
)
# Google Vertex models via Anthropic SDK format
vertex_response = client.messages.create(
model="vertex/gemini-pro",
max_tokens=1000,
messages=[{"role": "user", "content": "Hello from Gemini!"}]
)
# Azure OpenAI models
azure_response = client.messages.create(
model="azure/gpt-4o",
max_tokens=1000,
messages=[{"role": "user", "content": "Hello from Azure!"}]
)
# Local Ollama models
ollama_response = client.messages.create(
model="ollama/llama3.1:8b",
max_tokens=1000,
messages=[{"role": "user", "content": "Hello from Ollama!"}]
)