Prerequisites
- Muxx Python SDK installed (
pip install muxx) - Your Muxx API key
- An LLM provider SDK (OpenAI, Anthropic, etc.)
Get started with the Muxx Python SDK in minutes.
pip install muxx)from muxx import Muxx
from openai import OpenAI
# Initialize Muxx
muxx = Muxx(api_key="muxx_sk_live_xxxxxxxxxxxx")
# Wrap the OpenAI client
client = muxx.wrap(OpenAI())
# Use as normal - all calls are automatically traced
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": "What is the capital of France?"}
]
)
print(response.choices[0].message.content)
from muxx import Muxx, trace
from openai import OpenAI
muxx = Muxx()
client = muxx.wrap(OpenAI())
@trace("document-summary")
def summarize_document(document: str) -> str:
# Extract key points
points = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": f"Extract key points from: {document}"}
]
)
# Generate summary
summary = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": f"Summarize these points: {points.choices[0].message.content}"}
]
)
return summary.choices[0].message.content
# Both LLM calls are grouped under one trace
result = summarize_document("Your long document here...")
from muxx import Muxx, trace, span
from openai import OpenAI
muxx = Muxx()
client = muxx.wrap(OpenAI())
@trace("document-summary")
def summarize_document(document: str) -> str:
@span("extract-points")
def extract_points():
return client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": f"Extract key points: {document}"}]
)
@span("generate-summary")
def generate_summary(points: str):
return client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": f"Summarize: {points}"}]
)
points = extract_points()
summary = generate_summary(points.choices[0].message.content)
return summary.choices[0].message.content
import asyncio
from muxx import Muxx, trace
from openai import AsyncOpenAI
muxx = Muxx()
client = muxx.wrap(AsyncOpenAI())
@trace("async-chat")
async def chat(message: str) -> str:
response = await client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": message}]
)
return response.choices[0].message.content
# Run async
result = asyncio.run(chat("Hello!"))
from muxx import Muxx, trace
muxx = Muxx()
@trace("user-chat", metadata={"user_id": "user_123", "feature": "support"})
def handle_support_request(message: str):
# Your code here
pass