from muxx import Muxx, trace, span
from openai import OpenAI
muxx = Muxx()
client = muxx.wrap(OpenAI())
@trace("rag-query")
def answer_question(question: str) -> str:
# Get embedding
with muxx.span("embed-query"):
embedding = client.embeddings.create(
model="text-embedding-3-small",
input=question
)
# Search (your vector DB)
with muxx.span("search"):
documents = search_similar(embedding.data[0].embedding)
# Generate answer
with muxx.span("generate"):
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": f"Context: {documents}"},
{"role": "user", "content": question}
]
)
return response.choices[0].message.content