litellm.BadRequestError:GetLLMProvider 异常“Ollama”对象没有属性“split”

问题描述 投票:0回答:1

引发 litellm.exceptions.BadRequestError(litellm.exceptions.BadRequestError: litellm.BadRequestError: GetLLMProvider 异常 - 'Ollama' 对象没有属性 'split'

我正在创建一个基于人工智能代理的系统,你可以看到我的三个文件crew.py、task.py和agent.py,我遇到了我在下面的代码中提到的问题/错误,请为我提供解决此问题的方法

Crew.py

from crewai import Crew
from agents import TextGenerationAgents
from task import TextGenerationTasks

class TextGenerationCrew:
    def __init__(self, topic):
        self.topic = topic

    def run(self):
        researcher = TextGenerationAgents().researcher()
        
        research_task = TextGenerationTasks().research_task(researcher, self.topic)
    
        # Create a crew with the agents and tasks
        crew = Crew(
            agents=[researcher],
            tasks=[research_task],
            verbose=True
        )

        # Start the crew's work
        result = crew.kickoff()
        return result

if __name__ == '__main__':
    topic = input("Enter the topic: ")
    crew = TextGenerationCrew(topic)
    results = crew.run()
    print("\n\n########################")
    print(results)
    print("########################\n")

任务.py

from crewai import Task

class TextGenerationTasks:
    def research_task(self, agent, topic):
        return Task(
            description=f"Conduct research on the following topic: {topic}. Summarize your findings",
            agent=agent,
            expected_output=f"A comprehensive research report on {topic} based on web search results"
        )

代理.py

from crewai import Agent
from langchain.llms import Ollama
from tools.search_tool import WebSearchTool

# Initialize Ollama LLM
ollama_llm = Ollama(model="llama2")

search_tool = WebSearchTool()

class TextGenerationAgents:
    def researcher(self):
        return Agent(
            role='Researcher',
            goal='Find comprehensive and accurate information on given topics',
            backstory="You are an expert researcher with a keen eye for detail and the ability to find relevant information quickly.",
            tools=[search_tool],
            verbose=True,
            allow_delegation=False,
            llm=ollama_llm
        )

错误:

Traceback (most recent call last):
  File "D:\ML\FW 2.0\Agents\crew.py", line 28, in <module>
    results = crew.run()
              ^^^^^^^^^^
  File "D:\ML\FW 2.0\Agents\crew.py", line 22, in run
    result = crew.kickoff()
             ^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\crew.py", line 465, in kickoff
    result = self._run_sequential_process()
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\crew.py", line 573, in _run_sequential_process
    return self._execute_tasks(self.tasks)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\crew.py", line 671, in _execute_tasks
    task_output = task.execute_sync(
                  ^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\task.py", line 191, in execute_sync
    return self._execute_core(agent, context, tools)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\task.py", line 247, in _execute_core
    result = agent.execute_task(
             ^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\agent.py", line 198, in execute_task
    result = self.execute_task(task, context, tools)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\agent.py", line 198, in execute_task
    result = self.execute_task(task, context, tools)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\agent.py", line 197, in execute_task
    raise e
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\agent.py", line 186, in execute_task
    result = self.agent_executor.invoke(
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 89, in invoke
    formatted_answer = self._invoke_loop()
                       ^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 162, in _invoke_loop
    raise e
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 111, in _invoke_loop
    ).call(self.messages)
      ^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\crewai\llm.py", line 13, in call
    response = completion(
               ^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\litellm\utils.py", line 1057, in wrapper
    return litellm.completion_with_retries(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\litellm\main.py", line 2880, in completion_with_retries
    return retryer(original_function, *args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\tenacity\__init__.py", line 379, in __call__
    do = self.iter(retry_state=retry_state)
         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\tenacity\__init__.py", line 325, in iter
    raise retry_exc.reraise()
          ^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\tenacity\__init__.py", line 158, in reraise
    raise self.last_attempt.result()
          ^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\concurrent\futures\_base.py", line 449, in result
    return self.__get_result()
           ^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\concurrent\futures\_base.py", line 401, in __get_result
    raise self._exception
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\tenacity\__init__.py", line 382, in __call__
    result = fn(*args, **kwargs)
             ^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\litellm\utils.py", line 1082, in wrapper
    raise e
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\litellm\utils.py", line 970, in wrapper
    result = original_function(*args, **kwargs)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\litellm\main.py", line 2847, in completion
    raise exception_type(
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\litellm\main.py", line 838, in completion
    model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(
                                                            ^^^^^^^^^^^^^^^^^
  File "C:\Users\PMLS\.pyenv\pyenv-win\versions\3.11.0\Lib\site-packages\litellm\litellm_core_utils\get_llm_provider_logic.py", line 507, in get_llm_provider
    raise litellm.exceptions.BadRequestError(  # type: ignore
litellm.exceptions.BadRequestError: litellm.BadRequestError: GetLLMProvider Exception - 'Ollama' object has no attribute 'split'

original model: Ollama
Params: {'model': 'llama2', 'format': None, 'options': {'mirostat': None, 'mirostat_eta': None, 'mirostat_tau': None, 'num_ctx': None, 'num_gpu': None, 'num_thread': None, 'num_predict': None, 'repeat_last_n': None, 'repeat_penalty': None, 'temperature': None, 'stop': None, 'tfs_z': None, 'top_k': None, 'top_p': None}, 'system': None, 'template': None, 'keep_alive': None, 'raw': None}
langchain large-language-model agent ollama crewai
1个回答
0
投票

我看到您正在使用 langchain.llms 中的 Ollama,您可能需要尝试 LLM,而不是船员 AI 或 LiteLLM 的完成。

© www.soinside.com 2019 - 2024. All rights reserved.