Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 37 additions & 22 deletions docs/docs.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,7 @@
"dark": "/logo/dark.svg"
},
"contextual": {
"options": [
"copy",
"view",
"chatgpt",
"claude"
]
"options": ["copy", "view", "chatgpt", "claude"]
},
"navbar": {
"primary": {
Expand Down Expand Up @@ -119,9 +114,7 @@
},
{
"group": "Experiments",
"pages": [
"guides/experiments/overview"
]
"pages": ["guides/experiments/overview"]
},
{
"group": "Deployment",
Expand All @@ -130,10 +123,7 @@
"guides/integration/publishing-deployment",
{
"group": "Latitude SDK",
"pages": [
"guides/sdk/typescript",
"guides/sdk/python"
]
"pages": ["guides/sdk/typescript", "guides/sdk/python"]
},
{
"group": "HTTP API",
Expand Down Expand Up @@ -162,15 +152,11 @@
},
{
"group": "Changelog",
"pages": [
"changelog/overview"
]
"pages": ["changelog/overview"]
},
{
"group": "Support",
"pages": [
"guides/integration/community-support"
]
"pages": ["guides/integration/community-support"]
}
]
},
Expand All @@ -179,9 +165,7 @@
"groups": [
{
"group": "Getting Started",
"pages": [
"promptl/getting-started/introduction"
]
"pages": ["promptl/getting-started/introduction"]
},
{
"group": "Syntax",
Expand Down Expand Up @@ -218,6 +202,37 @@
}
]
},
{
"tab": "Integrations",
"hidden": true,
"groups": [
{
"group": "Overview",
"pages": ["integrations/overview"]
},
{
"group": "Providers",
"pages": [
"integrations/providers/openai",
"integrations/providers/anthropic",
"integrations/providers/azure",
"integrations/providers/google-ai-platform",
"integrations/providers/amazon-bedrock",
"integrations/providers/cohere",
"integrations/providers/together-ai",
"integrations/providers/vertex-ai"
]
},
{
"group": "Frameworks",
"pages": [
"integrations/frameworks/langchain",
"integrations/frameworks/llamaindex",
"integrations/frameworks/vercel-ai-sdk"
]
}
]
},
{
"tab": "Examples",
"groups": [
Expand Down
123 changes: 123 additions & 0 deletions docs/integrations/frameworks/langchain.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
---
title: LangChain
description: Connect your LangChain-based application to Latitude Telemetry to observe chains per feature and run evaluations.
---

## Overview

This guide shows you how to integrate **Latitude Telemetry** into an existing application that uses **LangChain**.

After completing these steps:

- Each LangChain run can be captured as a log in Latitude.
- Logs are attached to a specific **prompt** and **version** in Latitude.
- You can annotate, evaluate, and debug your chains from the Latitude dashboard.

> You keep using LangChain as usual — Telemetry observes runs via the LangChain callback system.

---

## Requirements

Before you start, make sure you have:

- A **Latitude account** and **API key**.
- At least one **prompt** created in Latitude.
- A Node.js-based project that uses LangChain.

---

## Steps

<Steps>

<Step title="Install requirements">
Add the Latitude Telemetry package to your project:

<CodeGroup>
```bash npm
npm add @latitude-data/telemetry @opentelemetry/api
```

```bash pnpm
pnpm add @latitude-data/telemetry @opentelemetry/api
```

```bash yarn
yarn add @latitude-data/telemetry @opentelemetry/api
```

```bash bun
bun add @latitude-data/telemetry @opentelemetry/api
```
</CodeGroup>

</Step>

<Step title="Initialize Latitude Telemetry with LangChain">
Create a <code>LatitudeTelemetry</code> instance and pass the LangChain callback manager module as an instrumentation.

```ts
import { LatitudeTelemetry } from '@latitude-data/telemetry'
import * as LangchainCallbacks from '@langchain/core/callbacks/manager'

export const telemetry = new LatitudeTelemetry('your-latitude-api-key', {
instrumentations: {
langchain: {
callbackManagerModule: LangchainCallbacks, // Enables tracing via LangChain callbacks
},
},
})
```

</Step>

<Step title="Wrap your LangChain-powered feature">
Wrap the code that runs your LangChain chain with a Telemetry prompt span, and execute the chain inside that span.

```ts
import { context } from '@opentelemetry/api'
import { BACKGROUND } from '@latitude-data/telemetry'
import { createAgent } from "langchain";

export async function generateSupportReply(input: string) {
const $prompt = telemetry.prompt(BACKGROUND(), {
promptUuid: 'your-prompt-uuid',
versionUuid: 'your-version-uuid',
})

await context
.with($prompt.context, async () => {
const agent = createAgent({ model: 'claude-sonnet-4-5' });
const result = await agent.invoke({
messages: [
{
role: "user",
content: prompt,
},
],
});

// Use result here...
})
.then(() => $prompt.end())
.catch((error) => $prompt.fail(error as Error))
.finally(() => telemetry.flush())
}
```

</Step>

</Steps>

---

## Seeing your logs in Latitude

Once you've wrapped your LangChain-powered feature, you can see your logs in Latitude.

1. Go to the **Traces** section of your prompt in Latitude.
2. You should see new entries every time your chain is executed, including:
- Chain input/output
- Provider calls made within the chain (when instrumented)
- Latency and error information
123 changes: 123 additions & 0 deletions docs/integrations/frameworks/llamaindex.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
---
title: LlamaIndex
description: Connect your LlamaIndex-based application to Latitude Telemetry to observe queries per feature and run evaluations.
---

## Overview

This guide shows you how to integrate **Latitude Telemetry** into an existing application that uses **LlamaIndex**.

After completing these steps:

- Each LlamaIndex query or pipeline execution can be captured as a log in Latitude.
- Logs are attached to a specific **prompt** and **version** in Latitude.
- You can annotate, evaluate, and debug your LlamaIndex-powered features from the Latitude dashboard.

> You keep using LlamaIndex as usual — Telemetry observes calls made through the LlamaIndex library.

---

## Requirements

Before you start, make sure you have:

- A **Latitude account** and **API key**.
- At least one **prompt** created in Latitude.
- A Node.js-based project that uses `llamaindex`.

---

## Steps

<Steps>

<Step title="Install requirements">
Add the Latitude Telemetry package to your project:

<CodeGroup>
```bash npm
npm add @latitude-data/telemetry @opentelemetry/api
```

```bash pnpm
pnpm add @latitude-data/telemetry @opentelemetry/api
```

```bash yarn
yarn add @latitude-data/telemetry @opentelemetry/api
```

```bash bun
bun add @latitude-data/telemetry @opentelemetry/api
```
</CodeGroup>

</Step>

<Step title="Initialize Latitude Telemetry with LlamaIndex">
Create a <code>LatitudeTelemetry</code> instance and pass the LlamaIndex module as an instrumentation.

```ts
import { LatitudeTelemetry } from '@latitude-data/telemetry'
import * as LlamaIndex from 'llamaindex'

export const telemetry = new LatitudeTelemetry('your-latitude-api-key', {
instrumentations: {
llamaindex: LlamaIndex, // Enables automatic tracing for LlamaIndex
},
})
```

</Step>

<Step title="Wrap your LlamaIndex-powered feature">
Wrap the code that calls LlamaIndex with a Telemetry prompt span, and execute your query or pipeline inside that span.

```ts
import { context } from '@opentelemetry/api'
import { BACKGROUND } from '@latitude-data/telemetry'

import { agent } from "@llamaindex/workflow";
import { Settings } from "llamaindex";
import { openai } from "@llamaindex/openai";

export async function answerQuestion(input: string) {
const $prompt = telemetry.prompt(BACKGROUND(), {
promptUuid: 'your-prompt-uuid',
versionUuid: 'your-version-uuid',
})

await context
.with($prompt.context, async () => {

Settings.llm = openai({
apiKey: process.env.OPENAI_API_KEY,
model: 'gpt-4o',
});

const myAgent = agent({});
const response = await myAgent.run(prompt);

// Use response here...
})
.then(() => $prompt.end())
.catch((error) => $prompt.fail(error as Error))
.finally(() => telemetry.flush())
}
```

</Step>

</Steps>

---

## Seeing your logs in Latitude

Once you've wrapped your LlamaIndex-powered feature, you can see your logs in Latitude.

1. Go to the **Traces** section of your prompt in Latitude.
2. You should see new entries every time your query runs, including:
- Query input and generated answer
- Underlying provider calls (when instrumented)
- Latency and error information
Loading
Loading