Australia/Sydney
BlogJuly 7, 2023

LLM Caching Using LangChain - Step by Step Tutorial

Fahd Mirza

 



! pip install openai

! pip install langchain

! pip install yt_dlp

! pip install pydub


import langchain

import time

import os

import openai

import sys

sys.path.append('../..')


from dotenv import load_dotenv, find_dotenv

_ = load_dotenv(find_dotenv()) # read local .env file


openai.api_key = ('sk-6kPZK2gbUUEsv2NY')


from langchain.document_loaders import WebBaseLoader

from langchain.llms import OpenAI

from langchain.callbacks import get_openai_callback

from langchain.cache import InMemoryCache


langchain.llm_cache=InMemoryCache()


loader = WebBaseLoader("https://www.fahdmirza.com/2023/07/installing-draggan-on-linux-locally.html")


print("------------------------------------------------")

start_time=time.time()

docs = loader.load()

print(docs[0].page_content[:100])

print(docs[0].metadata)

end_time=time.time()

print(f"Time Taken:{end_time-start_time:0.2f} seconds")

print("------------------------------------------------")


Share this post:
On this page

Let's Partner

If you are looking to build, deploy or scale AI solutions — whether you're just starting or facing production-scale challenges — let's chat.

Subscribe to Fahd's Newsletter

Weekly updates on AI, cloud engineering, and tech innovations