Introduction

Autoblocks is a developer-centric tool that helps monitor and improve AI features powered by LLMs and other foundation models.

import os
import uuid
import traceback
import time

import openai
from autoblocks.tracer import AutoblocksTracer

openai.api_key = os.environ["OPENAI_API_KEY"]

tracer = AutoblocksTracer(
  os.environ["AUTOBLOCKS_INGESTION_KEY"],
  # All events sent below will have this trace ID
  trace_id=str(uuid.uuid4()),
  # All events sent below will include this property
  # alongside any other properties set in the send_event call
  properties=dict(
    provider="openai",
  ),
)

params = dict(
  model="gpt-3.5-turbo",
  messages=[
    {
      "role": "system",
      "content": "You are a helpful assistant. You answer questions about a software product named Acme.",
    },
    {"role": "user", "content": "How do I sign up?"},
  ],
  temperature=0.7,
  top_p=1,
  frequency_penalty=0,
  presence_penalty=0,
  n=1,
)

# Use a span ID to group together the request + response events
span_id = str(uuid.uuid4())

tracer.send_event(
  "ai.request",
  span_id=span_id,
  properties=params,
)

try:
  start_time = time.time()
  response = openai.ChatCompletion.create(**params)
  tracer.send_event(
    "ai.response",
    span_id=span_id,
    properties=dict(
      response=response,
      latency_ms=(time.time() - start_time) * 1000,
    ),
  )
except Exception as error:
  tracer.send_event(
    "ai.error",
    span_id=span_id,
    properties=dict(
      error=dict(
        type=type(error).__name__,
        message=str(error),
        stacktrace=traceback.format_exc(),
      ),
    ),
  )
  raise

# Simulate user feedback
tracer.send_event(
  "user.feedback",
  properties=dict(
    feedback="good",
  ),
)