Documentation Index Fetch the complete documentation index at: https://docs.muxx.dev/llms.txt
Use this file to discover all available pages before exploring further.
This guide shows you how to add observability to your Python LLM application.
Prerequisites
Muxx Python SDK installed (pip install muxx)
Your Muxx API key
An LLM provider SDK (OpenAI, Anthropic, etc.)
Basic Usage
Wrap Your Client
The simplest way to use Muxx is to wrap your LLM client:
from muxx import Muxx
from openai import OpenAI
# Initialize Muxx
muxx = Muxx( api_key = "muxx_sk_live_xxxxxxxxxxxx" )
# Wrap the OpenAI client
client = muxx.wrap(OpenAI())
# Use as normal - all calls are automatically traced
response = client.chat.completions.create(
model = "gpt-4o" ,
messages = [
{ "role" : "user" , "content" : "What is the capital of France?" }
]
)
print (response.choices[ 0 ].message.content)
That’s it! All requests are now logged to Muxx.
Adding Context
Traces
Group related operations into traces:
from muxx import Muxx, trace
from openai import OpenAI
muxx = Muxx()
client = muxx.wrap(OpenAI())
@trace ( "document-summary" )
def summarize_document ( document : str ) -> str :
# Extract key points
points = client.chat.completions.create(
model = "gpt-4o" ,
messages = [
{ "role" : "user" , "content" : f "Extract key points from: { document } " }
]
)
# Generate summary
summary = client.chat.completions.create(
model = "gpt-4o" ,
messages = [
{ "role" : "user" , "content" : f "Summarize these points: { points.choices[ 0 ].message.content } " }
]
)
return summary.choices[ 0 ].message.content
# Both LLM calls are grouped under one trace
result = summarize_document( "Your long document here..." )
Spans
Add more granular tracking with spans:
from muxx import Muxx, trace, span
from openai import OpenAI
muxx = Muxx()
client = muxx.wrap(OpenAI())
@trace ( "document-summary" )
def summarize_document ( document : str ) -> str :
@span ( "extract-points" )
def extract_points ():
return client.chat.completions.create(
model = "gpt-4o" ,
messages = [{ "role" : "user" , "content" : f "Extract key points: { document } " }]
)
@span ( "generate-summary" )
def generate_summary ( points : str ):
return client.chat.completions.create(
model = "gpt-4o" ,
messages = [{ "role" : "user" , "content" : f "Summarize: { points } " }]
)
points = extract_points()
summary = generate_summary(points.choices[ 0 ].message.content)
return summary.choices[ 0 ].message.content
Async Support
The SDK fully supports async operations:
import asyncio
from muxx import Muxx, trace
from openai import AsyncOpenAI
muxx = Muxx()
client = muxx.wrap(AsyncOpenAI())
@trace ( "async-chat" )
async def chat ( message : str ) -> str :
response = await client.chat.completions.create(
model = "gpt-4o" ,
messages = [{ "role" : "user" , "content" : message}]
)
return response.choices[ 0 ].message.content
# Run async
result = asyncio.run(chat( "Hello!" ))
Add custom metadata for filtering:
from muxx import Muxx, trace
muxx = Muxx()
@trace ( "user-chat" , metadata = { "user_id" : "user_123" , "feature" : "support" })
def handle_support_request ( message : str ):
# Your code here
pass
Next Steps
Tracing Learn more about traces and spans
Decorators Use @observe and other decorators