Wrapping the Client
Copy
from muxx import Muxx
from anthropic import Anthropic
muxx = Muxx()
client = muxx.wrap(Anthropic())
# All calls are now automatically traced
response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1024,
messages=[{"role": "user", "content": "Hello!"}]
)
Async Client
Copy
from muxx import Muxx
from anthropic import AsyncAnthropic
muxx = Muxx()
client = muxx.wrap(AsyncAnthropic())
response = await client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1024,
messages=[{"role": "user", "content": "Hello!"}]
)
Supported Methods
| Method | Traced |
|---|---|
messages.create | Yes |
completions.create (legacy) | Yes |
Streaming
Streaming is fully supported:Copy
with client.messages.stream(
model="claude-3-5-sonnet-20241022",
max_tokens=1024,
messages=[{"role": "user", "content": "Write a poem"}]
) as stream:
for text in stream.text_stream:
print(text, end="")
# Complete response is logged when streaming finishes
System Prompts
System prompts are captured in logs:Copy
response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1024,
system="You are a helpful coding assistant.",
messages=[{"role": "user", "content": "Write a Python function to sort a list"}]
)
Tool Use
Tool definitions and calls are automatically logged:Copy
tools = [
{
"name": "get_weather",
"description": "Get current weather for a location",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country"
}
},
"required": ["location"]
}
}
]
response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1024,
tools=tools,
messages=[{"role": "user", "content": "What's the weather in Tokyo?"}]
)
# Tool use is captured in the trace
Multi-turn Conversations
Copy
from muxx import trace
@trace("conversation")
def have_conversation():
messages = []
# Turn 1
messages.append({"role": "user", "content": "What is Python?"})
response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1024,
messages=messages
)
messages.append({"role": "assistant", "content": response.content[0].text})
# Turn 2
messages.append({"role": "user", "content": "What are its main uses?"})
response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1024,
messages=messages
)
return response.content[0].text
# Both turns are grouped in one trace
Vision
Image inputs are logged (without the actual image data for efficiency):Copy
import base64
with open("image.png", "rb") as f:
image_data = base64.standard_b64encode(f.read()).decode("utf-8")
response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1024,
messages=[
{
"role": "user",
"content": [
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/png",
"data": image_data
}
},
{"type": "text", "text": "Describe this image"}
]
}
]
)
# Log shows image was included, token count reflects image tokens
Model Selection
All Claude models are supported:Copy
# Latest and most capable
client.messages.create(model="claude-3-5-sonnet-20241022", ...)
# Fast and cost-effective
client.messages.create(model="claude-3-5-haiku-20241022", ...)
# Most powerful (when available)
client.messages.create(model="claude-3-opus-20240229", ...)