我获取了几个代码示例,添加了一些 Streamlit 标记,然后毁掉了我的项目。一切都处理得很好,直到你向这个应用程序提出问题 - 然后我收到循环引用错误。有人可以看一下并让我知道我做错了什么吗?
错误:
An error occurred: Circular reference detected on line <traceback object at 0x0000027FDC728440>
Error in tracing queue
Traceback (most recent call last):
File "\venv\Lib\site-packages\langsmith\client.py", line 4112, in _tracing_thread_handle_batch
client.batch_ingest_runs(create=create, update=update, pre_sampled=True)
File "\venv\Lib\site-packages\langsmith\client.py", line 1220, in batch_ingest_runs
"post": [_dumps_json(run) for run in raw_body["post"]],
^^^^^^^^^^^^^^^^
File "\venv\Lib\site-packages\langsmith\client.py", line 236, in _dumps_json
return _dumps_json_single(obj, functools.partial(_serialize_json, depth=depth))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "\venv\Lib\site-packages\langsmith\client.py", line 211, in _dumps_json_single
return orjson.dumps(
^^^^^^^^^^^^^
更新 我重新编写了代码,将其分解为菜单选项,以防它以某种方式递归地重新运行某些内容。这没有帮助...
import os
import streamlit as st # used to create our UI frontend
from langchain_openai import ChatOpenAI # used for GPT3.5/4 model
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain_community.document_loaders import YoutubeLoader
from langchain_community.callbacks import get_openai_callback
def init_page():
st.set_page_config(
page_title="The Moss Masher",
page_icon=":dog:"
)
st.sidebar.title("Menu")
st.sidebar.image("./FullLogo.png", width=300, use_column_width="always")
st.session_state.costs = []
def select_model(model, aiTemp):
if model == "GPT-3.5":
st.session_state.model_name = "gpt-3.5-turbo"
st.session_state.max_token = 16385
elif model == "GPT-3.5-16k":
st.session_state.model_name = "gpt-3.5-turbo-16k"
st.session_state.max_token = 16385
else:
st.session_state.model_name = "gpt-4"
st.session_state.max_token = 8192
# 300: The number of tokens for instructions outside the main text
return ChatOpenAI(temperature=aiTemp, model_name=st.session_state.model_name)
def init_messages():
clear_button = st.sidebar.button("Clear Conversation", key="clear")
if clear_button or "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.costs = []
def clear_history():
if'history' in st.session_state:
del st.session_state['history']
def create_login_page():
st.title(":football: :green[The Moss Masher] :dog:")
st.header("Mash your long documents and videos into easy answers!")
container = st.container()
with container:
with st.form(key="frmOptions", clear_on_submit=True):
st.markdown("To use this app, please provided an OpenAI key")
st.markdown(
"First, create an OpenAI account or sign in: "
"https://platform.openai.com/signup and then go to the API key page, "
"https://platform.openai.com/account/api-keys, and create new secret key.")
userEnteredCode = st.text_input("Please enter your API Key:")
submit_button = st.form_submit_button(label='Authenticate My Key')
st.session_state["holdMe"] = os.environ["OPENAI_API_KEY"]
if submit_button and userEnteredCode and userEnteredCode != "":
# We need to set our environmental variable
if os.environ["MOSS_MASHER"] != userEnteredCode:
os.environ["OPENAI_API_KEY"] = userEnteredCode
else:
os.environ["OPENAI_API_KEY"] = st.session_state["holdMe"]
st.markdown("Got your code! Now click on Upload a PDF or Enter Youtube Video!")
def create_pdf_page():
st.title(":football: :green[The Moss Masher] :dog:")
st.header("Mash your long documents and videos into easy answers!")
container = st.container()
with container:
uploaded_file = st.file_uploader('Select your file and click Add File:', type=['pdf', 'docx', 'txt'])
add_file = st.button('Add File', on_click=clear_history)
if uploaded_file and add_file:
with st.spinner('Mashing your file into vectors, this may take a while...'):
bytes_data = uploaded_file.read()
file_name = os.path.join('./', uploaded_file.name)
with open(file_name, 'wb') as f:
f.write(bytes_data)
name, extension = os.path.splitext(file_name)
if extension == '.pdf':
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader(file_name)
elif extension == '.docx':
from langchain_community.document_loaders import Docx2txtLoader
loader = Docx2txtLoader(file_name)
elif extension == '.txt':
from langchain_community.document_loaders import TextLoader
loader = TextLoader(file_name)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
chunks = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vector_store = Chroma.from_documents(chunks, embeddings)
os.remove(file_name)
st.session_state["retriever"] = vector_store.as_retriever()
st.write("PDF mashed! Click on Ask Questions!")
def create_youtube_page():
st.title(":football: :green[The Moss Masher] :dog:")
st.header("Mash your long documents and videos into easy answers!")
container = st.container()
with container:
youtube_url = st.text_input('Or enter your Youtube URL')
if youtube_url:
with st.spinner('Mashing your video in vectors...'):
loader = YoutubeLoader.from_youtube_url(youtube_url)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
chunks = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vector_store = Chroma.from_documents(chunks, embeddings)
st.session_state["retriever"] = vector_store.as_retriever()
st.write("Video mashed! Click on Ask Questions!")
def ask_questions():
st.title(":football: :green[The Moss Masher] :dog:")
st.header("Mash your long documents and videos into easy answers!")
question = st.text_input('Enter your question here!')
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-3.5-16k", "GPT-4"))
aiTemp = st.sidebar.slider("How Strict (0) to Creative(10) do you want your responses:", min_value=0.0,
max_value=2.0, value=0.0, step=0.01)
if question:
with st.spinner('Looking for your answer now....'):
llm = select_model(model, aiTemp)
crc = ConversationalRetrievalChain.from_llm(llm, st.session_state["retriever"])
if "crc" in st.session_state:
crc = st.session_state.crc
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'costs' not in st.session_state:
st.session_state['costs'] = []
with get_openai_callback() as cb:
response = crc.invoke({'question': question, 'chat_history': st.session_state['history']})
st.session_state['costs'].append(cb.total_cost)
st.session_state['history'].append((question, response))
st.write(response)
# for prompts in st.session_state['history']:
# st.write("Question: " + prompts[0])
# st.write("Answer: " + prompts[1])
costs = st.session_state.get('costs', [])
st.sidebar.markdown("## Costs")
st.sidebar.markdown(f"**Total cost: ${sum(costs):.5f}**")
for cost in costs:
st.sidebar.markdown(f"- ${cost:.5f}")
def main():
try:
init_page()
selection = st.sidebar.radio("Go to",
["Enter OpenAI Key", "Upload A PDF", "Enter YouTube Video", "Ask Questions"])
if selection == "Enter OpenAI Key":
create_login_page()
elif selection == "Upload A PDF":
create_pdf_page()
elif selection == "Enter YouTube Video":
create_youtube_page()
elif selection == "Ask Questions":
ask_questions()
except Exception as error:
print(f"An error occurred: {error} on line {error.__traceback__}")
st.error("Your API key was not valid, please close the site and then try again.")
finally:
print("Done")
if __name__ == "__main__":
main()
事实证明,循环引用是一个非常具有误导性的错误,与代码无关。如果您使用 ChatGPT API 并且查询后出现错误,则会出现该错误。在这种情况下,我试图写出主要响应对象而不是response.content。
找到重构的代码