LangChain integration

Installation

Requires Python 3.9+

pip install notdiamond

Integration

If you already have a LangChain project up and running, integrating Not Diamond into your code is as easy as 3 line changes. Not Diamond inherits many LangChain functionalities, making the switch seamless. For more info on LangChain, checkout their docs.

PromptTemplate use case

from langchain_core.prompts import PromptTemplate
- from langchain_openai import ChatOpenAI
+ from notdiamond.llms.llm import NDLLM

user_input = "Write merge sort in Python."

prompt_template = PromptTemplate.from_template(
    "You are a world class software developer. {user_input}"
)

- model = ChatOpenAI(model_name='gpt-3.5-turbo')
- chain = prompt_template | model
- result = chain.invoke({"user_input": user_input})

+ nd_llm = NDLLM(llm_providers=['openai/gpt-3.5-turbo', 'anthropic/claude-3-opus-20240229'])
+ result, session_id, provider = nd_llm.invoke(prompt_template=prompt_template,
+                                                 input={"user_input": user_input})

+ print(provider.model)
print(result.content)
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI

user_input = "Write merge sort in Python."

prompt_template = PromptTemplate.from_template(
    "You are a world class software developer. {user_input}"
)

model = ChatOpenAI(model_name='gpt-3.5-turbo')
chain = prompt_template | model
result = chain.invoke({"user_input": user_input})

print(result.content)
from langchain_core.prompts import PromptTemplate
from notdiamond.llms.llm import NDLLM

user_input = "Write merge sort in Python."

prompt_template = PromptTemplate.from_template(
    "You are a world class software developer. {user_input}"
)

nd_llm = NDLLM(llm_providers=['openai/gpt-3.5-turbo', 'openai/gpt-4', 'anthropic/claude-2.1', 'google/gemini-pro'])
result, session_id, provider = nd_llm.invoke(prompt_template=prompt_template,
                                                input={"user_input": user_input})

print(provider.model)
print(result.content)

ChatPromptTemplate use case

from langchain_core.prompts import ChatPromptTemplate
- from langchain_openai import ChatOpenAI
+ from notdiamond.llms.llm import NDLLM


chat_template = ChatPromptTemplate.from_messages(
    [
        ("system", "You are a world class software developer. Your name is {name}."),
        ("human", "Hello, how are you doing?"),
        ("ai", "I'm doing well, thanks!"),
        ("human", "{user_input}")
    ]
)

- model = ChatOpenAI(model_name='gpt-3.5-turbo')
- chain = chat_template | model
- result = chain.invoke({"user_input": "Can you tell me the difference between systemd-boot and grub?", 
-                        "name": "Linus Torvalds"})

+ nd_llm = NDLLM(llm_providers=['openai/gpt-3.5-turbo', 'anthropic/claude-3-opus-20240229'])
+ result, session_id, provider = nd_llm.invoke(prompt_template=chat_template,
+                                                 input={"user_input": "Can you tell me the difference between systemd-boot and grub?", 
+                                                        "name": "Linus Torvalds"})

+ print(provider.model)
print(result.content)
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI


chat_template = ChatPromptTemplate.from_messages(
    [
        ("system", "You are a world class software developer. Your name is {name}."),
        ("human", "Hello, how are you doing?"),
        ("ai", "I'm doing well, thanks!"),
        ("human", "{user_input}")
    ]
)

model = ChatOpenAI(model_name='gpt-3.5-turbo')
chain = chat_template | model
result = chain.invoke({"user_input": "Can you tell me the difference between systemd-boot and grub?", 
                       "name": "Linus Torvalds"})

print(result.content)
from langchain_core.prompts import ChatPromptTemplate
from notdiamond.llms.llm import NDLLM


chat_template = ChatPromptTemplate.from_messages(
    [
        ("system", "You are a world class software developer. Your name is {name}."),
        ("human", "Hello, how are you doing?"),
        ("ai", "I'm doing well, thanks!"),
        ("human", "{user_input}")
    ]
)

nd_llm = NDLLM(llm_providers=['openai/gpt-3.5-turbo', 'openai/gpt-4', 'anthropic/claude-2.1', 'google/gemini-pro'])
result, session_id, provider = nd_llm.invoke(prompt_template=chat_template,
                                                input={"user_input": "Can you tell me the difference between systemd-boot and grub?", 
                                                       "name": "Linus Torvalds"})

print(provider.model)
print(result.content)

Streaming

from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
- from langchain_openai import ChatOpenAI
+ from notdiamond.llms.llm import NDLLM


prompt_template = PromptTemplate.from_template(
    "You are a world class software developer. {user_input}"
)

- chat = ChatOpenAI(model_name="gpt-3.5-turbo")
+ chat = NDLLM(llm_providers=['openai/gpt-3.5-turbo', 'anthropic/claude-3-opus-20240229'])

for chunk in chat.stream(prompt_template=prompt_template, input={"user_input": "Write a merge sort in Python."}):
    print(chunk.content, end="", flush=True)
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from langchain_openai import ChatOpenAI


prompt_template = PromptTemplate.from_template(
    "You are a world class software developer. {user_input}"
)

chat = ChatOpenAI(model_name="gpt-3.5-turbo")

for chunk in chat.stream(prompt_template.format(user_input="Write merge sort in Python.")):
    print(chunk.content, end="", flush=True)
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from notdiamond.llms.llm import NDLLM


prompt_template = PromptTemplate.from_template(
    "You are a world class software developer. {user_input}"
)

chat = NDLLM(llm_providers=['openai/gpt-3.5-turbo', 'openai/gpt-4', 'anthropic/claude-2.1', 'google/gemini-pro'])

for chunk in chat.stream(prompt_template.format(user_input="Write merge sort in Python.")):
    print(chunk.content, end="", flush=True)