如何从笔记本访问当前用户/auth_state

问题描述 投票:0回答:3

我正在使用 JupyterHub 和自定义验证器。 它使用访问令牌设置 auth_state,然后可以将其复制到

pre_spawn_start
方法内的环境中,如示例所示:

class MyAuthenticator(Authenticator):
    @gen.coroutine
    def authenticate(self, handler, data=None):
        username = yield identify_user(handler, data)
        upstream_token = yield token_for_user(username)
        return {
            'name': username,
            'auth_state': {
                'upstream_token': upstream_token,
            },
        }

@gen.coroutine
def pre_spawn_start(self, user, spawner):
    """Pass upstream_token to spawner via environment variable"""
    auth_state = yield user.get_auth_state()
    if not auth_state:
        # auth_state not enabled
        return
    spawner.environment['UPSTREAM_TOKEN'] = auth_state['upstream_token']

但是,每个用户只会发生一次。如果我注销并再次登录,则不会再次调用

pre_spawn_start
,并且旧令牌仍然存在于环境变量中。

是否可以直接从笔记本访问

user.get_auth_state()
,以便我可以确保使用当前令牌,而不是之前设置并存储在环境中?

否则,是否可以在注销时强制生成器停止,以便后续登录将触发

pre_spawn_start

jupyter-notebook jupyterhub
3个回答
0
投票

您可以在令牌过期时强制停止服务器并要求登录,可以通过以下步骤实现,

  1. 编写refresh_user方法获取新token(附示例代码)
  2. 设置refresh_user的时间。
    • c.MyAuthenticator.auth_refresh_age = 30 #这可能是token的有效期(以秒为单位)
import inspect, concurrent,asyncio

async def refresh_user(self, user,handler=None):
    """
    1. Check if token is valid and then call _shutdown_servers and then redirect to login page
    2. If time of refresh_user is set as token expiry, directly call _shutdown_servers and then redirect to login page
    This is shutdown single user servers and once redirected to login, auth flow gets run and new tokens are passed to spawner
    """
    auth_state = await user.get_auth_state()
    if self._is_invalid_sessionid(auth_state):
        await self._shutdown_servers(user, handler=None)
        handler.clear_login_cookie()
        handler.redirect('/login')
    return True

async def maybe_future(obj):
    """Return an asyncio Future
    Use instead of gen.maybe_future
    For our compatibility, this must accept:
    - asyncio coroutine (gen.maybe_future doesn't work in tornado < 5)
    - tornado coroutine (asyncio.ensure_future doesn't work)
    - scalar (asyncio.ensure_future doesn't work)
    - concurrent.futures.Future (asyncio.ensure_future doesn't work)
    - tornado Future (works both ways)
    - asyncio Future (works both ways)
    """
    if inspect.isawaitable(obj):
        # already awaitable, use ensure_future
        return asyncio.ensure_future(obj)
    elif isinstance(obj, concurrent.futures.Future):
        return asyncio.wrap_future(obj)
    else:
        # could also check for tornado.concurrent.Future
        # but with tornado >= 5.1 tornado.Future is asyncio.Future
        f = asyncio.Future()
        f.set_result(obj)
        return f

async def _shutdown_servers(self, user,handler):
    """Shutdown servers for logout
    Get all active servers for the provided user, stop them.
    """
    active_servers = [
        name
        for (name, spawner) in user.spawners.items()
        if spawner.active and not spawner.pending
    ]
    if active_servers:
        self.log.info("Shutting down %s's servers", user.name)
        futures = []
        for server_name in active_servers:
            futures.append(self.maybe_future(handler.stop_single_user(user, server_name)))
        await asyncio.gather(*futures)

https://jupyterhub.readthedocs.io/en/stable/api/auth.html#jupyterhub.auth.Authenticator.auth_refresh_age https://jupyterhub.readthedocs.io/en/stable/api/auth.html#jupyterhub.auth.Authenticator.refresh_user


0
投票
from langchain import OpenAI, LLMChain, ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain.agents import ConversationalAgentExecutor, Tool
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains import RetrievalQA

# Step 1: Initialize the LLM with GPT-4
llm = OpenAI(model="gpt-4")

# Step 2: Define memory for conversational context
memory = ConversationBufferMemory(memory_key="chat_history")

# Step 3: Define a Tool for querying the Vector Store
def query_vector_store(query):
    # Assuming `documents` is the list of CER documents
    embeddings = OpenAIEmbeddings(model="text-embedding-ada-002")
    vector_store = FAISS.from_documents(documents, embeddings)
    qa = RetrievalQA(llm=llm, retriever=vector_store.as_retriever())
    return qa.run(query)

# Step 4: Define the tools
tools = [
    Tool(
        name="Query Vector Store",
        func=query_vector_store,
        description="Use this tool to query the Country Engagement Rules (CER) database after gathering all necessary inputs."
    )
]

# Step 5: Create the ReAct-style prompt template
prompt_template = """
You are an assistant that helps with Country Engagement Rules (CER) in a bank. You need to gather all required variables before using tools to fetch information.
Your task is to check the user's input and determine if the required variables (business_line, employment_location) have been provided.
If some information is missing, ask the user to clarify. Once all required information is collected, you may use a tool to query the database.

Example:
User: "We are looking to onboard a client in Guatemala. What are the rules?"
Assistant: "Can you advise the business line and the employment location?"

If the user provides all the required information, use the "Query Vector Store" tool to retrieve the relevant rules.
Chat History:
{chat_history}

User Input:
{input}

If all required variables are present, respond with 'All variables collected' and proceed to query the database. If any variable is missing, ask the user for the missing information.
"""

# Step 6: Create the LLM chain for the conversational agent
prompt = PromptTemplate(input_variables=["input", "chat_history"], template=prompt_template)
llm_chain = LLMChain(llm=llm, prompt=prompt, memory=memory)

# Step 7: Create the conversational agent executor with tools
agent_executor = ConversationalAgentExecutor(
    llm_chain=llm_chain,
    tools=tools,
    memory=memory
)

# Step 8: Define the chatbot interaction function
def chat_with_user(user_input):
    # Run the agent executor, which will determine whether to prompt or query the vector store
    response = agent_executor.run(user_input)
    return response

# Example conversation loop
def chatbot_interaction():
    while True:
        user_input = input("User: ")
        bot_response = chat_with_user(user_input)
        print(f"Assistant: {bot_response}")

# Start the chatbot
chatbot_interaction()


from langchain import LLMChain, LLM, ChatModel, PromptTemplate, Tool, Action
from langchain.chains.qa import QA
from langchain.llms import ChatGPT
from langchain.chat_models import ChatMessage
from langchain.tools import ConversationSummary, VariableExtractor

# Initialize ChatGPT-4 model
llm = ChatGPT(model_name="gpt-4")

# Define required variables for CER
required_variables = ["business_line", "employment_location", "country"]

# Define ReAct prompt template
react_prompt_template = PromptTemplate(
    template="Given the conversation history and the following requirements: {requirements}. Please respond with the next question to ask the user to fulfill the requirements.",
    input_variables=["requirements"]
)

# Define variable extraction prompt template
variable_extraction_prompt_template = PromptTemplate(
    template="Identify the {variable} from the user's response: {user_response}.",
    input_variables=["variable", "user_response"]
)

# Define tools
conversation_summary = ConversationSummary()
variable_extractor = VariableExtractor(llm=llm, prompt_template=variable_extraction_prompt_template)

# Define actions
ask_user_action = Action(
    name="ask_user",
    prompt_template=react_prompt_template,
    tool=conversation_summary,
    llm=llm
)

extract_variable_action = Action(
    name="extract_variable",
    prompt_template=variable_extraction_prompt_template,
    tool=variable_extractor,
    llm=llm
)

# Define LLM chain
cer_chain = LLMChain(
    llm=llm,
    actions=[ask_user_action, extract_variable_action],
    variable_storage="cer_variables"
)

# Define the chatbot function
def cer_chatbot(prompt: str) -> str:
    # Initialize the conversation
    conversation = [ChatMessage(prompt=prompt, role="user")]

    # Run the LLM chain
    output = cer_chain.run(
        conversation=conversation,
        variables=required_variables,
        max_iterations=len(required_variables)
    )

    # Frame the final prompt with collected variables
    final_prompt = f"Based on the following information: business line - {output['variables']['business_line']}, employment location - {output['variables']['employment_location']}, country - {output['variables']['country']}. What are the CER rules for onboarding a client?"
    # Retrieve information from vector store using LLM
    answer = llm(final_prompt)
    return answer

# Test the chatbot
prompt = "We are looking to onboard a client located in Guatemala, what are the rules for this?"
print(cer_chatbot(prompt))

import os
from typing import Dict, List
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA, LLMChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent
from langchain.schema import AgentAction, AgentFinish

# Set your OpenAI API key
os.environ["OPENAI_API_KEY"] = "your-api-key-here"

# Initialize embeddings and vector store
embeddings = OpenAIEmbeddings()
vector_store = Chroma(embedding_function=embeddings, persist_directory="./chroma_db")

# Initialize ChatGPT-4 model
llm = ChatOpenAI(model_name="gpt-4", temperature=0)

# Define required variables and their descriptions
REQUIRED_VARIABLES = {
    "business_line": "The specific area of banking or financial services the client is interested in",
    "employment_location": "The location where the client's employees are based",
    "client_location": "The country or region where the client's business is registered or primarily operates"
}

# Create a retriever from the vector store
retriever = vector_store.as_retriever()

# Define the ReAct prompt template
react_template = """Answer the following questions as best you can. You have access to the following tools:

{tools}

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question

Begin!

Question: {input}
Thought: To answer this question about Country Engagement Rules (CER), I need to gather all the required information and then query the vector store for relevant rules. Let's check what information we have and what we need to ask for.

{agent_scratchpad}
"""

# Create the prompt
prompt = PromptTemplate(
    input_variables=["input", "tools", "tool_names", "agent_scratchpad"],
    template=react_template
)

# Define a function to check for missing variables using LLM
def check_missing_variables(query: str) -> List[str]:
    context_check_template = """
    Given the following user query about Country Engagement Rules (CER):
    "{query}"
    
    Determine if the following required information is present or can be inferred:
    {required_vars}
    
    For each piece of required information, respond with either:
    - The specific information provided or inferred
    - "MISSING" if the information is not present and cannot be reasonably inferred
    
    Format your response as a Python dictionary, like this:
    {{"business_line": "Transaction Banking", "employment_location": "MISSING", "client_location": "Guatemala"}}
    """
    
    context_check_prompt = PromptTemplate(
        input_variables=["query", "required_vars"],
        template=context_check_template
    )
    
    context_check_chain = LLMChain(llm=llm, prompt=context_check_prompt)
    
    result = context_check_chain.run(query=query, required_vars=str(REQUIRED_VARIABLES))
    
    # Parse the result (assuming it's correctly formatted as a Python dict)
    parsed_result = eval(result)
    
    missing = [var for var, value in parsed_result.items() if value == "MISSING"]
    return missing

# Define a function to ask for missing information
def ask_for_missing_info(missing_vars: List[str]) -> str:
    if not missing_vars:
        return "All required information is provided."
    
    missing_descriptions = [f"{var} ({REQUIRED_VARIABLES[var]})" for var in missing_vars]
    return f"To proceed with your Country Engagement Rules inquiry, I need some additional information. Could you please provide details about: {', '.join(missing_descriptions)}?"

# Define the tool for querying the vector store
query_tool = Tool(
    name="Query CER Database",
    func=RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever).run,
    description="Useful for querying the Country Engagement Rules database"
)

tools = [query_tool]

# Define the output parser
class CEROutputParser:
    def parse(self, llm_output: str) -> Dict:
        if "Final Answer:" in llm_output:
            return AgentFinish(
                return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
                log=llm_output,
            )
        
        action_prefix = "Action:"
        action_input_prefix = "Action Input:"
        
        for line in llm_output.split('\n'):
            if line.startswith(action_prefix):
                action = line[len(action_prefix):].strip()
            elif line.startswith(action_input_prefix):
                action_input = line[len(action_input_prefix):].strip()
                return AgentAction(tool=action, tool_input=action_input, log=llm_output)
        
        return AgentFinish(
            return_values={"output": "I couldn't determine the next action. Please provide more information."},
            log=llm_output,
        )

output_parser = CEROutputParser()

# Initialize the agent
llm_chain = LLMSingleActionAgent(
    llm=llm,
    prompt=prompt,
    stop=["\nObservation:"],
    allowed_tools=[tool.name for tool in tools],
    output_parser=output_parser,
)

# Set up the agent executor
agent_executor = AgentExecutor.from_agent_and_tools(
    agent=llm_chain, 
    tools=tools, 
    verbose=True,
    memory=ConversationBufferMemory(memory_key="chat_history", return_messages=True)
)

# Main chat loop
def chat_loop():
    print("Welcome to the CER Chatbot. Type 'exit' to end the conversation.")
    context = ""
    while True:
        user_input = input("You: ")
        if user_input.lower() == 'exit':
            print("Thank you for using the CER Chatbot. Goodbye!")
            break
        
        context += f" {user_input}"
        missing_vars = check_missing_variables(context)
        while missing_vars:
            print("Chatbot:", ask_for_missing_info(missing_vars))
            additional_info = input("You: ")
            context += f" {additional_info}"
            missing_vars = check_missing_variables(context)
        
        response = agent_executor.run(context)
        print("Chatbot:", response)

if __name__ == "__main__":
    chat_loop()

-1
投票

似乎 c.Authenticator.refresh_pre_spawn = True,正如 Eos Antigen 的建议解决了这个问题:每次您再次登录时,预生成定义的函数都会再次执行。

© www.soinside.com 2019 - 2024. All rights reserved.