Structured outputs
When using the .create
method in the Python SDK, we can generate structured outputs if we provide a pydantic
model with our prompt.
When using .create
method in Typescript SDK, we can generate structured outputs if we provide a Zod object with our prompt.
from notdiamond import NotDiamond
from pydantic import BaseModel, Field
class LanguageChoice(BaseModel):
language: str = Field(description="The programming language of choice.")
reason: str = Field(description="The reason to pick the programming language.")
messages = [
{"role": "system", "content": "You are a world class software developer."},
{"role": "user", "content": "What language would you suggest for developing a web application?"}
]
llm_providers = ['openai/gpt-3.5-turbo', 'openai/gpt-4-turbo-2024-04-09',
'openai/gpt-4o-2024-05-13', 'anthropic/claude-3-opus-20240229']
client = NotDiamond()
result, session_id, _ = client.chat.completions.create(
messages=messages,
model=llm_providers,
response_model=LanguageChoice
)
print(result)
import { z } from "zod";
const structuredOutput = z.object({
name: z.string(),
age: z.union([z.number(), z.string()]),
});
const notDiamond = new NotDiamond();
const result = await notDiamond.create({
messages: [{ content: 'What is 12x12?', role: 'user' }],
llmProviders: [
{ provider: 'openai', model: 'gpt-4o-2024-05-13' },
{ provider: 'anthropic', model: 'claude-3-5-sonnet-20240620' },
{ provider: 'google', model: 'gemini-1.5-pro-latest' },
],
tradeoff: 'latency',
responseModel: structuredOutput,
});
We can also do this with a streamed response. Response model fields will start with None
and gradually build out as the response is streamed. Example:
from notdiamond import NotDiamond
from pydantic import BaseModel, Field
class LanguageChoice(BaseModel):
language: str = Field(description="The programming language of choice.")
reason: str = Field(description="The reason to pick the programming language.")
messages = [
{"role": "system", "content": "You are a world class software developer."},
{"role": "user", "content": "What language would you suggest for developing a web application?"}
]
llm_providers = ['openai/gpt-3.5-turbo', 'openai/gpt-4-turbo-2024-04-09',
'openai/gpt-4o-2024-05-13', 'anthropic/claude-3-opus-20240229']
client = NotDiamond(llm_configs=llm_providers)
result = client.chat.completions.stream(
messages=messages,
response_model=LanguageChoice
)
for chunk in result:
print(chunk)
import { z } from "zod";
const structuredOutput = z.object({
name: z.string(),
age: z.union([z.number(), z.string()]),
});
const notDiamond = new NotDiamond();
const result = await notDiamond.stream({
messages: [{ content: 'What is 12x12?', role: 'user' }],
llmProviders: [
{ provider: 'openai', model: 'gpt-4o-2024-05-13' },
{ provider: 'anthropic', model: 'claude-3-5-sonnet-20240620' },
{ provider: 'google', model: 'gemini-1.5-pro-latest' },
],
tradeoff: 'latency',
responseModel: structuredOutput,
});
for await (const chunk of result.stream) {
setMessage((prevMessage) => prevMessage + chunk);
}
Not all models support structured outputs
Visit the supported LLM models page to see which models support structured outputs.
Updated about 2 months ago