Skip to content

Commit

Permalink
Add span IDs to examples (#47)
Browse files Browse the repository at this point in the history
  • Loading branch information
Nicole White authored Nov 6, 2023
1 parent aab43ed commit 3ee859f
Show file tree
Hide file tree
Showing 8 changed files with 83 additions and 58 deletions.
13 changes: 10 additions & 3 deletions JavaScript/chatbot-nextjs/src/pages/api/chat.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import crypto from 'crypto';
import type { NextApiRequest, NextApiResponse } from 'next';
import OpenAI from 'openai';
import { AutoblocksTracer } from '@autoblocks/client';
Expand Down Expand Up @@ -41,7 +42,10 @@ export default async function handler(
},
});

const requestParams = {
// Use a span ID to group together the request + response/error events
const spanId = crypto.randomUUID();

const params = {
model: 'gpt-3.5-turbo',
messages: [
{ role: 'system', content: systemPrompt },
Expand All @@ -52,13 +56,15 @@ export default async function handler(
};

await tracer.sendEvent('ai.request', {
properties: requestParams,
spanId,
properties: params,
});

try {
const now = Date.now();
const response = await openai.chat.completions.create(requestParams);
const response = await openai.chat.completions.create(params);
await tracer.sendEvent('ai.response', {
spanId,
properties: {
response,
latencyMs: Date.now() - now,
Expand All @@ -69,6 +75,7 @@ export default async function handler(
});
} catch (error) {
await tracer.sendEvent('ai.error', {
spanId,
properties: {
error,
},
Expand Down
12 changes: 9 additions & 3 deletions JavaScript/jest-replays/src/run.js
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@ const run = async ({ input, traceId }) => {
// a random traceId while in production.
tracer.setTraceId(traceId || crypto.randomUUID());

const request = {
// Use a span ID to group together the request + response/error events
const spanId = crypto.randomUUID();

const params = {
model: 'gpt-3.5-turbo',
messages: [
{
Expand All @@ -40,13 +43,15 @@ const run = async ({ input, traceId }) => {
};

await tracer.sendEvent('ai.request', {
properties: request,
spanId,
properties: params,
});

try {
const now = Date.now();
const response = await openai.chat.completions.create(request);
const response = await openai.chat.completions.create(params);
await tracer.sendEvent('ai.response', {
spanId,
properties: {
response,
latencyMs: Date.now() - now,
Expand All @@ -55,6 +60,7 @@ const run = async ({ input, traceId }) => {
return response.choices[0].message.content;
} catch (error) {
await tracer.sendEvent('ai.error', {
spanId,
properties: {
error,
},
Expand Down
53 changes: 26 additions & 27 deletions JavaScript/novel-ai-text-editor/src/app/api/generate/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,39 +24,38 @@ export async function POST(req: Request): Promise<Response> {

let { prompt } = await req.json();

const openAIRequest: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming =
{
model: 'gpt-3.5-turbo',
messages: [
{
role: 'system',
content:
'You are an AI writing assistant that continues existing text based on context from prior text. ' +
'Give more weight/priority to the later characters than the beginning ones. ' +
'Limit your response to no more than 200 characters, but make sure to construct complete sentences.',
// we're disabling markdown for now until we can figure out a way to stream markdown text with proper formatting: https://github.com/steven-tey/novel/discussions/7
// "Use Markdown formatting when appropriate.",
},
{
role: 'user',
content: prompt,
},
],
temperature: 0.7,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
stream: true,
n: 1,
};
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model: 'gpt-3.5-turbo',
messages: [
{
role: 'system',
content:
'You are an AI writing assistant that continues existing text based on context from prior text. ' +
'Give more weight/priority to the later characters than the beginning ones. ' +
'Limit your response to no more than 200 characters, but make sure to construct complete sentences.',
// we're disabling markdown for now until we can figure out a way to stream markdown text with proper formatting: https://github.com/steven-tey/novel/discussions/7
// "Use Markdown formatting when appropriate.",
},
{
role: 'user',
content: prompt,
},
],
temperature: 0.7,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
stream: true,
n: 1,
};

const { traceId } = await sendEventToAutoblocks({
eventName: 'ai.request',
properties: { ...openAIRequest, provider: 'openai' },
properties: { ...params, provider: 'openai' },
});

try {
const response = await openai.chat.completions.create(openAIRequest);
const response = await openai.chat.completions.create(params);

// Convert the response into a friendly text-stream
const stream = OpenAIStream(response, {
Expand Down
4 changes: 2 additions & 2 deletions JavaScript/openai-automated-ts/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ async function main() {

const openai = new OpenAI();

const openAIRequest: ChatCompletionCreateParamsNonStreaming = {
const params: ChatCompletionCreateParamsNonStreaming = {
model: 'gpt-3.5-turbo',
messages: [
{
Expand All @@ -32,7 +32,7 @@ async function main() {
};

console.log('Calling OpenAI...');
await openai.chat.completions.create(openAIRequest);
await openai.chat.completions.create(params);
console.log('Finished calling OpenAI!');

console.log('View the trace at https://app.autoblocks.ai/explore');
Expand Down
4 changes: 2 additions & 2 deletions JavaScript/openai-automated/src/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ async function main() {

const openai = new OpenAI();

const openAIRequest = {
const params = {
model: 'gpt-3.5-turbo',
messages: [
{
Expand All @@ -30,7 +30,7 @@ async function main() {
};

console.log('Calling OpenAI...');
await openai.chat.completions.create(openAIRequest);
await openai.chat.completions.create(params);
console.log('Finished calling OpenAI');

console.log('View the trace at https://app.autoblocks.ai/explore');
Expand Down
14 changes: 8 additions & 6 deletions JavaScript/openai-manual/src/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ const tracer = new AutoblocksTracer(process.env.AUTOBLOCKS_INGESTION_KEY, {
async function run() {
console.log('Running example...');

// Use a spanId to group together the request + response/error events
// Use a span ID to group together the request + response/error events
const spanId = crypto.randomUUID();

const openAIRequest = {
const params = {
model: 'gpt-3.5-turbo',
messages: [
{
Expand All @@ -44,26 +44,28 @@ async function run() {
};

await tracer.sendEvent('ai.request', {
properties: { ...openAIRequest, spanId },
spanId,
properties: params,
});

try {
const now = Date.now();
const response = await openai.chat.completions.create(openAIRequest);
const response = await openai.chat.completions.create(params);
await tracer.sendEvent('ai.response', {
spanId,
properties: {
response,
latency: Date.now() - now,
spanId,
},
});
} catch (error) {
await tracer.sendEvent('ai.error', {
spanId,
properties: {
error,
spanId,
},
});
throw error;
}

console.log(
Expand Down
27 changes: 16 additions & 11 deletions Python/openai-manual/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,15 @@

openai.api_key = os.environ["OPENAI_API_KEY"]

messages = [
{
"role": "system",
"content": "You are a helpful assistant. You answer questions about a software product named Acme.",
},
{"role": "user", "content": "How do I sign up?"},
]
request_params = dict(
params = dict(
model="gpt-3.5-turbo",
messages=messages,
messages=[
{
"role": "system",
"content": "You are a helpful assistant. You answer questions about a software product named Acme.",
},
{"role": "user", "content": "How do I sign up?"},
],
temperature=0.7,
top_p=1,
frequency_penalty=0,
Expand All @@ -36,12 +35,16 @@


def main():
tracer.send_event("ai.request", properties=request_params)
# Use a span ID to group together the request + response/error events
span_id = str(uuid.uuid4())

tracer.send_event("ai.request", span_id=span_id, properties=params)
try:
start_time = time.time()
openai_response = openai.ChatCompletion.create(**request_params)
openai_response = openai.ChatCompletion.create(**params)
tracer.send_event(
"ai.response",
span_id=span_id,
properties=dict(
response=openai_response,
latency=(time.time() - start_time) * 1000,
Expand All @@ -50,6 +53,7 @@ def main():
except Exception as error:
tracer.send_event(
"ai.error",
span_id=span_id,
properties=dict(
error=dict(
type=type(error).__name__,
Expand All @@ -58,6 +62,7 @@ def main():
),
),
)
raise

print(f"View your trace: https://app.autoblocks.ai/explore/trace/{tracer.trace_id}")

Expand Down
14 changes: 10 additions & 4 deletions Python/pytest-replays/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,17 @@


def run(content: str, trace_id: Optional[str] = None):
# Set the traceId to the one given, or fall back to a random UUID.
# Set the trace ID to the one given, or fall back to a random UUID.
# When we call this function from the test suite we will pass in a
# trace_id so that it is stable across replay runs, but in production
# we'll only pass in the content, like run(content), so that we generate
# a random trace_id while in production.
tracer.set_trace_id(trace_id or str(uuid.uuid4()))

request = dict(
# Use a span ID to group together the request + response/error events
span_id = str(uuid.uuid4())

params = dict(
model="gpt-3.5-turbo",
messages=[
{
Expand All @@ -41,13 +44,14 @@ def run(content: str, trace_id: Optional[str] = None):
temperature=0.3,
)

tracer.send_event("ai.request", properties=request)
tracer.send_event("ai.request", span_id=span_id, properties=params)

try:
start_time = time.time()
response = openai.ChatCompletion.create(**request)
response = openai.ChatCompletion.create(**params)
tracer.send_event(
"ai.response",
span_id=span_id,
properties=dict(
response=response,
latency_ms=(time.time() - start_time) * 1000,
Expand All @@ -57,6 +61,7 @@ def run(content: str, trace_id: Optional[str] = None):
except Exception as error:
tracer.send_event(
"ai.error",
span_id=span_id,
properties=dict(
error=dict(
type=type(error).__name__,
Expand All @@ -65,6 +70,7 @@ def run(content: str, trace_id: Optional[str] = None):
),
),
)
raise


if __name__ == "__main__":
Expand Down

1 comment on commit 3ee859f

@vercel
Copy link

@vercel vercel bot commented on 3ee859f Nov 6, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.