Wrapping the Client
Async Client
Supported Methods
The wrapper automatically traces these methods:| Method | Traced |
|---|---|
chat.completions.create | Yes |
completions.create | Yes |
embeddings.create | Yes |
images.generate | Yes |
audio.transcriptions.create | Yes |
Use Muxx with the OpenAI Python SDK.
from muxx import Muxx
from openai import OpenAI
muxx = Muxx()
client = muxx.wrap(OpenAI())
# All calls are now automatically traced
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)
from muxx import Muxx
from openai import AsyncOpenAI
muxx = Muxx()
client = muxx.wrap(AsyncOpenAI())
response = await client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)
| Method | Traced |
|---|---|
chat.completions.create | Yes |
completions.create | Yes |
embeddings.create | Yes |
images.generate | Yes |
audio.transcriptions.create | Yes |
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Write a story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
# The complete response is logged when streaming finishes
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
}
}
}
}
]
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "What's the weather in Paris?"}],
tools=tools
)
# Tool calls are logged with the response
from muxx import Muxx, trace
from openai import OpenAI
muxx = Muxx()
client = muxx.wrap(OpenAI())
@trace("customer-support")
def handle_support_request(user_message: str):
# Classify intent
intent = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "Classify the intent: billing, technical, general"},
{"role": "user", "content": user_message}
]
)
# Generate response
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": f"You are a {intent.choices[0].message.content} support agent"},
{"role": "user", "content": user_message}
]
)
return response.choices[0].message.content
embeddings = client.embeddings.create(
model="text-embedding-3-small",
input=["Hello world", "Goodbye world"]
)
# Token usage and latency are logged
from muxx import Muxx
from openai import OpenAI
muxx = Muxx()
# Point to gateway for caching/rate limiting
# SDK still adds tracing
client = muxx.wrap(OpenAI(
base_url="https://gateway.muxx.dev/v1",
default_headers={"X-Muxx-Api-Key": muxx.api_key}
))