Skip to content

Commit 027b9a8

Browse files
committed
docs for currently supported spans
1 parent 274ffe1 commit 027b9a8

File tree

14 files changed

+2419
-737
lines changed

14 files changed

+2419
-737
lines changed

docs/docs.json

Lines changed: 37 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,7 @@
1313
"dark": "/logo/dark.svg"
1414
},
1515
"contextual": {
16-
"options": [
17-
"copy",
18-
"view",
19-
"chatgpt",
20-
"claude"
21-
]
16+
"options": ["copy", "view", "chatgpt", "claude"]
2217
},
2318
"navbar": {
2419
"primary": {
@@ -119,9 +114,7 @@
119114
},
120115
{
121116
"group": "Experiments",
122-
"pages": [
123-
"guides/experiments/overview"
124-
]
117+
"pages": ["guides/experiments/overview"]
125118
},
126119
{
127120
"group": "Deployment",
@@ -130,10 +123,7 @@
130123
"guides/integration/publishing-deployment",
131124
{
132125
"group": "Latitude SDK",
133-
"pages": [
134-
"guides/sdk/typescript",
135-
"guides/sdk/python"
136-
]
126+
"pages": ["guides/sdk/typescript", "guides/sdk/python"]
137127
},
138128
{
139129
"group": "HTTP API",
@@ -162,15 +152,11 @@
162152
},
163153
{
164154
"group": "Changelog",
165-
"pages": [
166-
"changelog/overview"
167-
]
155+
"pages": ["changelog/overview"]
168156
},
169157
{
170158
"group": "Support",
171-
"pages": [
172-
"guides/integration/community-support"
173-
]
159+
"pages": ["guides/integration/community-support"]
174160
}
175161
]
176162
},
@@ -179,9 +165,7 @@
179165
"groups": [
180166
{
181167
"group": "Getting Started",
182-
"pages": [
183-
"promptl/getting-started/introduction"
184-
]
168+
"pages": ["promptl/getting-started/introduction"]
185169
},
186170
{
187171
"group": "Syntax",
@@ -218,6 +202,37 @@
218202
}
219203
]
220204
},
205+
{
206+
"tab": "Integrations",
207+
"hidden": true,
208+
"groups": [
209+
{
210+
"group": "Overview",
211+
"pages": ["integrations/overview"]
212+
},
213+
{
214+
"group": "Providers",
215+
"pages": [
216+
"integrations/providers/openai",
217+
"integrations/providers/anthropic",
218+
"integrations/providers/azure",
219+
"integrations/providers/google-ai-platform",
220+
"integrations/providers/amazon-bedrock",
221+
"integrations/providers/cohere",
222+
"integrations/providers/together-ai",
223+
"integrations/providers/vertex-ai"
224+
]
225+
},
226+
{
227+
"group": "Frameworks",
228+
"pages": [
229+
"integrations/frameworks/langchain",
230+
"integrations/frameworks/llamaindex",
231+
"integrations/frameworks/vercel-ai-sdk"
232+
]
233+
}
234+
]
235+
},
221236
{
222237
"tab": "Examples",
223238
"groups": [
Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
---
2+
title: LangChain
3+
description: Connect your LangChain-based application to Latitude Telemetry to observe chains per feature and run evaluations.
4+
---
5+
6+
## Overview
7+
8+
This guide shows you how to integrate **Latitude Telemetry** into an existing application that uses **LangChain**.
9+
10+
After completing these steps:
11+
12+
- Each LangChain run can be captured as a log in Latitude.
13+
- Logs are attached to a specific **prompt** and **version** in Latitude.
14+
- You can annotate, evaluate, and debug your chains from the Latitude dashboard.
15+
16+
> You keep using LangChain as usual — Telemetry observes runs via the LangChain callback system.
17+
18+
---
19+
20+
## Requirements
21+
22+
Before you start, make sure you have:
23+
24+
- A **Latitude account** and **API key**.
25+
- At least one **prompt** created in Latitude.
26+
- A Node.js-based project that uses LangChain.
27+
28+
---
29+
30+
## Steps
31+
32+
<Steps>
33+
34+
<Step title="Install requirements">
35+
Add the Latitude Telemetry package to your project:
36+
37+
<CodeGroup>
38+
```bash npm
39+
npm add @latitude-data/telemetry @opentelemetry/api
40+
```
41+
42+
```bash pnpm
43+
pnpm add @latitude-data/telemetry @opentelemetry/api
44+
```
45+
46+
```bash yarn
47+
yarn add @latitude-data/telemetry @opentelemetry/api
48+
```
49+
50+
```bash bun
51+
bun add @latitude-data/telemetry @opentelemetry/api
52+
```
53+
</CodeGroup>
54+
55+
</Step>
56+
57+
<Step title="Initialize Latitude Telemetry with LangChain">
58+
Create a <code>LatitudeTelemetry</code> instance and pass the LangChain callback manager module as an instrumentation.
59+
60+
```ts
61+
import { LatitudeTelemetry } from '@latitude-data/telemetry'
62+
import * as LangchainCallbacks from '@langchain/core/callbacks/manager'
63+
64+
export const telemetry = new LatitudeTelemetry('your-latitude-api-key', {
65+
instrumentations: {
66+
langchain: {
67+
callbackManagerModule: LangchainCallbacks, // Enables tracing via LangChain callbacks
68+
},
69+
},
70+
})
71+
```
72+
73+
</Step>
74+
75+
<Step title="Wrap your LangChain-powered feature">
76+
Wrap the code that runs your LangChain chain with a Telemetry prompt span, and execute the chain inside that span.
77+
78+
```ts
79+
import { context } from '@opentelemetry/api'
80+
import { BACKGROUND } from '@latitude-data/telemetry'
81+
import { createAgent } from "langchain";
82+
83+
export async function generateSupportReply(input: string) {
84+
const $prompt = telemetry.prompt(BACKGROUND(), {
85+
promptUuid: 'your-prompt-uuid',
86+
versionUuid: 'your-version-uuid',
87+
})
88+
89+
await context
90+
.with($prompt.context, async () => {
91+
const agent = createAgent({ model: 'claude-sonnet-4-5' });
92+
const result = await agent.invoke({
93+
messages: [
94+
{
95+
role: "user",
96+
content: prompt,
97+
},
98+
],
99+
});
100+
101+
// Use result here...
102+
})
103+
.then(() => $prompt.end())
104+
.catch((error) => $prompt.fail(error as Error))
105+
.finally(() => telemetry.flush())
106+
}
107+
```
108+
109+
</Step>
110+
111+
</Steps>
112+
113+
---
114+
115+
## Seeing your logs in Latitude
116+
117+
Once you've wrapped your LangChain-powered feature, you can see your logs in Latitude.
118+
119+
1. Go to the **Traces** section of your prompt in Latitude.
120+
2. You should see new entries every time your chain is executed, including:
121+
- Chain input/output
122+
- Provider calls made within the chain (when instrumented)
123+
- Latency and error information
Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
---
2+
title: LlamaIndex
3+
description: Connect your LlamaIndex-based application to Latitude Telemetry to observe queries per feature and run evaluations.
4+
---
5+
6+
## Overview
7+
8+
This guide shows you how to integrate **Latitude Telemetry** into an existing application that uses **LlamaIndex**.
9+
10+
After completing these steps:
11+
12+
- Each LlamaIndex query or pipeline execution can be captured as a log in Latitude.
13+
- Logs are attached to a specific **prompt** and **version** in Latitude.
14+
- You can annotate, evaluate, and debug your LlamaIndex-powered features from the Latitude dashboard.
15+
16+
> You keep using LlamaIndex as usual — Telemetry observes calls made through the LlamaIndex library.
17+
18+
---
19+
20+
## Requirements
21+
22+
Before you start, make sure you have:
23+
24+
- A **Latitude account** and **API key**.
25+
- At least one **prompt** created in Latitude.
26+
- A Node.js-based project that uses `llamaindex`.
27+
28+
---
29+
30+
## Steps
31+
32+
<Steps>
33+
34+
<Step title="Install requirements">
35+
Add the Latitude Telemetry package to your project:
36+
37+
<CodeGroup>
38+
```bash npm
39+
npm add @latitude-data/telemetry @opentelemetry/api
40+
```
41+
42+
```bash pnpm
43+
pnpm add @latitude-data/telemetry @opentelemetry/api
44+
```
45+
46+
```bash yarn
47+
yarn add @latitude-data/telemetry @opentelemetry/api
48+
```
49+
50+
```bash bun
51+
bun add @latitude-data/telemetry @opentelemetry/api
52+
```
53+
</CodeGroup>
54+
55+
</Step>
56+
57+
<Step title="Initialize Latitude Telemetry with LlamaIndex">
58+
Create a <code>LatitudeTelemetry</code> instance and pass the LlamaIndex module as an instrumentation.
59+
60+
```ts
61+
import { LatitudeTelemetry } from '@latitude-data/telemetry'
62+
import * as LlamaIndex from 'llamaindex'
63+
64+
export const telemetry = new LatitudeTelemetry('your-latitude-api-key', {
65+
instrumentations: {
66+
llamaindex: LlamaIndex, // Enables automatic tracing for LlamaIndex
67+
},
68+
})
69+
```
70+
71+
</Step>
72+
73+
<Step title="Wrap your LlamaIndex-powered feature">
74+
Wrap the code that calls LlamaIndex with a Telemetry prompt span, and execute your query or pipeline inside that span.
75+
76+
```ts
77+
import { context } from '@opentelemetry/api'
78+
import { BACKGROUND } from '@latitude-data/telemetry'
79+
80+
import { agent } from "@llamaindex/workflow";
81+
import { Settings } from "llamaindex";
82+
import { openai } from "@llamaindex/openai";
83+
84+
export async function answerQuestion(input: string) {
85+
const $prompt = telemetry.prompt(BACKGROUND(), {
86+
promptUuid: 'your-prompt-uuid',
87+
versionUuid: 'your-version-uuid',
88+
})
89+
90+
await context
91+
.with($prompt.context, async () => {
92+
93+
Settings.llm = openai({
94+
apiKey: process.env.OPENAI_API_KEY,
95+
model: 'gpt-4o',
96+
});
97+
98+
const myAgent = agent({});
99+
const response = await myAgent.run(prompt);
100+
101+
// Use response here...
102+
})
103+
.then(() => $prompt.end())
104+
.catch((error) => $prompt.fail(error as Error))
105+
.finally(() => telemetry.flush())
106+
}
107+
```
108+
109+
</Step>
110+
111+
</Steps>
112+
113+
---
114+
115+
## Seeing your logs in Latitude
116+
117+
Once you've wrapped your LlamaIndex-powered feature, you can see your logs in Latitude.
118+
119+
1. Go to the **Traces** section of your prompt in Latitude.
120+
2. You should see new entries every time your query runs, including:
121+
- Query input and generated answer
122+
- Underlying provider calls (when instrumented)
123+
- Latency and error information

0 commit comments

Comments
 (0)