Skip to content
This repository has been archived by the owner on Jun 9, 2024. It is now read-only.

Add gradio-tools plugin #14

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,11 @@ requests-mock
setuptools
tweepy==4.13.0
twine
tweepy
pandas
auto_gpt_plugin_template
python-dotenv
gradio_tools>=0.0.5
validators
wheel
wolframalpha==5.0.0
35 changes: 35 additions & 0 deletions src/autogpt_plugins/autogpt_gradio_tools/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# freddyaboulton/autogpt-gradio-tools 🤝

A plugin giving AutoGPT access to [Gradio](https://github.com/gradio-app/gradio) spaces running on
the [huggingface hub](https://huggingface.co/spaces) and elsewhere!

Integration powered by [gradio-tools](https://github.com/freddyaboulton/gradio-tools)

gradio-tools comes with a set of pre-built tools but it is easy to add new tools.

All contributions to `gradio-tools` and this plugin are welcome!

## Features

Each tool specified via the env file will add a command that gives AutoGPT
the ability to call that gradio app programmatically and get its prediciton.

For example, an LLM could use a Gradio tool to transcribe a voice recording it finds online and then summarize it for you. Or it could use a different Gradio tool to apply OCR to a document on your Google Drive and then answer questions about it.

## Installation

1. Download this repository, and save it as `autogpt-gradio-tools.zip`
2. Place the `.zip` file in the plugins directory of your AutoGPT install
3. Add your twitter API information to the `.env` file within AutoGPT:
Comment on lines +21 to +23
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Remove this section


```
################################################################################
### GRADIO-TOOLS
################################################################################

# Consumer Keys are also known as API keys on the dev portal

AUTOGPT_GRADIO_TOOLS=StableDiffusion,ImageCaptioner
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

List all tools that function here

GRADIO_TOOLS_HF_TOKEN=<Optional hs token to clone spaces and avoid rate limits>
```

240 changes: 240 additions & 0 deletions src/autogpt_plugins/autogpt_gradio_tools/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,240 @@
"""Integration with Gradio Spaces On HuggingFace via gradio_tools."""
from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar
from dotenv import load_dotenv
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from pathlib import Path
import os
from .tools import (AutoGPTClipInterrogatorTool,
AutoGPTStableDiffusion,
AutoGPTWhisperTool,
AutoGPTTextToVideoTool,
AutoGPTCaptioner,
AutoGPTPromptGeneratorTool,
AutoGPTImageToMusicTool,
AutoGPTDocumentAnsweringTool)
from gradio_tools import GradioTool

PromptGenerator = TypeVar("PromptGenerator")

if (Path(os.getcwd()) / ".env").exists():
with open(str(Path(os.getcwd()) / ".env"), 'r') as fp:
load_dotenv(stream=fp)

Comment on lines +19 to +22
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This shouldn't be needed


TOOLS = [
AutoGPTStableDiffusion(hf_token=os.getenv("GRADIO_TOOLS_HF_TOKEN")),
AutoGPTCaptioner(hf_token=os.getenv("GRADIO_TOOLS_HF_TOKEN")),
AutoGPTWhisperTool(hf_token=os.getenv("GRADIO_TOOLS_HF_TOKEN")),
AutoGPTTextToVideoTool(hf_token=os.getenv("GRADIO_TOOLS_HF_TOKEN")),
AutoGPTPromptGeneratorTool(hf_token=os.getenv("GRADIO_TOOLS_HF_TOKEN")),
AutoGPTDocumentAnsweringTool(hf_token=os.getenv("GRADIO_TOOLS_HF_TOKEN")),
AutoGPTImageToMusicTool(hf_token=os.getenv("GRADIO_TOOLS_HF_TOKEN")),
AutoGPTClipInterrogatorTool(hf_token=os.getenv("GRADIO_TOOLS_HF_TOKEN"))
]
Comment on lines +24 to +33
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Only run this if your plugin is loaded. Take a look at the email plugin to see how to do this

Comment on lines +24 to +33
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

don't use this like a global variable, initialize it within the init and scope it to the plugin's class instance


def get_tool(tool: str) -> GradioTool:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

make this a class function

return next(t for t in TOOLS if t.name == tool)


class Message(TypedDict):
role: str
content: str


class AutoGPTGradioTools(AutoGPTPluginTemplate):
"""
Twitter API integrations using Tweepy
"""

def __init__(self):
super().__init__()
self._name = "autogpt-gradio-tools"
self._version = "0.1.0"
self._description = "Calling Gradio Apps ."
ts = [t for t in os.getenv("AUTOGPT_GRADIO_TOOLS", "").split(",") if t != ""]
self.tools = [get_tool(t) for t in ts]
Comment on lines +54 to +55
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Filter attempts to load this to names matching a tool. If you attempt to load an invalid name it will crash

@ntindle ➜ /workspaces/Auto-GPT (master) $ AUTOGPT_GRADIO_TOOLS=StableDiffusion,AutoGPTCaptioner ./run.sh
All packages are installed.
NEWS:  Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here. If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag # INCLUDED COMMAND 'send_tweet' IS DEPRICATED, AND WILL BE REMOVED IN THE NEXT STABLE RELEASE Base Twitter functionality (and more) is now covered by plugins: https://github.com/Significant-Gravitas/Auto-GPT-Plugins ## Changes to Docker configuration The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly.
WARNING:  You are running on `master` branch - this is not a supported branch.
Loaded as API: https://gradio-client-demos-stable-diffusion.hf.space ✔
Loaded as API: https://taesiri-blip-2.hf.space ✔
Loaded as API: https://abidlabs-whisper.hf.space ✔
Loaded as API: https://damo-vilab-modelscope-text-to-video-synthesis.hf.space ✔
Loaded as API: https://microsoft-promptist.hf.space ✔
Loaded as API: https://abidlabs-docquery.hf.space ✔
Loaded as API: https://fffiloni-img-to-music.hf.space ✔
Loaded as API: https://pharma-clip-interrogator.hf.space ✔
WARNING: Plugin AutoGPTGradioTools found. But not in the allowlist... Load? (y/n): y
Traceback (most recent call last):
  File "/usr/local/lib/python3.10/runpy.py", line 196, in _run_module_as_main
    return _run_code(code, main_globals, None,
  File "/usr/local/lib/python3.10/runpy.py", line 86, in _run_code
    exec(code, run_globals)
  File "/workspaces/Auto-GPT/autogpt/__main__.py", line 5, in <module>
    autogpt.cli.main()
  File "/home/vscode/.local/lib/python3.10/site-packages/click/core.py", line 1130, in __call__
    return self.main(*args, **kwargs)
  File "/home/vscode/.local/lib/python3.10/site-packages/click/core.py", line 1055, in main
    rv = self.invoke(ctx)
  File "/home/vscode/.local/lib/python3.10/site-packages/click/core.py", line 1635, in invoke
    rv = super().invoke(ctx)
  File "/home/vscode/.local/lib/python3.10/site-packages/click/core.py", line 1404, in invoke
    return ctx.invoke(self.callback, **ctx.params)
  File "/home/vscode/.local/lib/python3.10/site-packages/click/core.py", line 760, in invoke
    return __callback(*args, **kwargs)
  File "/home/vscode/.local/lib/python3.10/site-packages/click/decorators.py", line 26, in new_func
    return f(get_current_context(), *args, **kwargs)
  File "/workspaces/Auto-GPT/autogpt/cli.py", line 90, in main
    run_auto_gpt(
  File "/workspaces/Auto-GPT/autogpt/main.py", line 104, in run_auto_gpt
    cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
  File "/workspaces/Auto-GPT/autogpt/plugins.py", line 229, in scan_plugins
    loaded_plugins.append(a_module())
  File "/home/vscode/.local/lib/python3.10/site-packages/abstract_singleton/__init__.py", line 14, in __call__
    cls._instances[cls] = super().__call__(*args, **kwargs)
  File "plugins/Auto-GPT-Plugins.zip/Auto-GPT-Plugins-add-gradio-tools/src/autogpt_plugins/autogpt_gradio_tools/__init__.py", line 55, in __init__
  File "plugins/Auto-GPT-Plugins.zip/Auto-GPT-Plugins-add-gradio-tools/src/autogpt_plugins/autogpt_gradio_tools/__init__.py", line 55, in <listcomp>
  File "plugins/Auto-GPT-Plugins.zip/Auto-GPT-Plugins-add-gradio-tools/src/autogpt_plugins/autogpt_gradio_tools/__init__.py", line 36, in get_tool
StopIteration
Press any key to continue...


def can_handle_on_response(self) -> bool:
"""This method is called to check that the plugin can
handle the on_response method.
Returns:
bool: True if the plugin can handle the on_response method."""
return False

def on_response(self, response: str, *args, **kwargs) -> str:
"""This method is called when a response is received from the model."""
pass

def can_handle_post_prompt(self) -> bool:
"""This method is called to check that the plugin can
handle the post_prompt method.
Returns:
bool: True if the plugin can handle the post_prompt method."""
return True

def can_handle_on_planning(self) -> bool:
"""This method is called to check that the plugin can
handle the on_planning method.
Returns:
bool: True if the plugin can handle the on_planning method."""
return False

def on_planning(
self, prompt: PromptGenerator, messages: List[str]
) -> Optional[str]:
"""This method is called before the planning chat completeion is done.
Args:
prompt (PromptGenerator): The prompt generator.
messages (List[str]): The list of messages.
"""
pass

def can_handle_post_planning(self) -> bool:
"""This method is called to check that the plugin can
handle the post_planning method.
Returns:
bool: True if the plugin can handle the post_planning method."""
return False

def post_planning(self, response: str) -> str:
"""This method is called after the planning chat completeion is done.
Args:
response (str): The response.
Returns:
str: The resulting response.
"""
pass

def can_handle_pre_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the pre_instruction method.
Returns:
bool: True if the plugin can handle the pre_instruction method."""
return False

def pre_instruction(self, messages: List[str]) -> List[str]:
"""This method is called before the instruction chat is done.
Args:
messages (List[str]): The list of context messages.
Returns:
List[str]: The resulting list of messages.
"""
pass

def can_handle_on_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the on_instruction method.
Returns:
bool: True if the plugin can handle the on_instruction method."""
return False

def on_instruction(self, messages: List[str]) -> Optional[str]:
"""This method is called when the instruction chat is done.
Args:
messages (List[str]): The list of context messages.
Returns:
Optional[str]: The resulting message.
"""
pass

def can_handle_post_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the post_instruction method.
Returns:
bool: True if the plugin can handle the post_instruction method."""
return False

def post_instruction(self, response: str) -> str:
"""This method is called after the instruction chat is done.
Args:
response (str): The response.
Returns:
str: The resulting response.
"""
pass

def can_handle_pre_command(self) -> bool:
"""This method is called to check that the plugin can
handle the pre_command method.
Returns:
bool: True if the plugin can handle the pre_command method."""
return False

def pre_command(
self, command_name: str, arguments: Dict[str, Any]
) -> Tuple[str, Dict[str, Any]]:
"""This method is called before the command is executed.
Args:
command_name (str): The command name.
arguments (Dict[str, Any]): The arguments.
Returns:
Tuple[str, Dict[str, Any]]: The command name and the arguments.
"""
pass

def can_handle_post_command(self) -> bool:
"""This method is called to check that the plugin can
handle the post_command method.
Returns:
bool: True if the plugin can handle the post_command method."""
return False

def post_command(self, command_name: str, response: str) -> str:
"""This method is called after the command is executed.
Args:
command_name (str): The command name.
response (str): The response.
Returns:
str: The resulting response.
"""
pass

def can_handle_chat_completion(
self,
messages: list[Dict[Any, Any]],
model: str,
temperature: float,
max_tokens: int,
) -> bool:
"""This method is called to check that the plugin can
handle the chat_completion method.
Args:
messages (Dict[Any, Any]): The messages.
model (str): The model name.
temperature (float): The temperature.
max_tokens (int): The max tokens.
Returns:
bool: True if the plugin can handle the chat_completion method."""
return False

def handle_chat_completion(
self,
messages: list[Dict[Any, Any]],
model: str,
temperature: float,
max_tokens: int,
) -> str:
"""This method is called when the chat completion is done.
Args:
messages (Dict[Any, Any]): The messages.
model (str): The model name.
temperature (float): The temperature.
max_tokens (int): The max tokens.
Returns:
str: The resulting response.
"""
return None

def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
"""This method is called just after the generate_prompt is called,
but actually before the prompt is generated.
Args:
prompt (PromptGenerator): The prompt generator.
Returns:
PromptGenerator: The prompt generator.
"""
if self.tools:
for tool in self.tools:
prompt.add_command(tool.description, tool.name.lower(), tool.args, tool.run)

return prompt
60 changes: 60 additions & 0 deletions src/autogpt_plugins/autogpt_gradio_tools/test_gradio_tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import os
from unittest.mock import patch, MagicMock
import unittest
import gradio_tools
from .tools import (AutoGPTClipInterrogatorTool,
AutoGPTStableDiffusion,
AutoGPTWhisperTool,
AutoGPTTextToVideoTool,
AutoGPTCaptioner,
AutoGPTPromptGeneratorTool,
AutoGPTImageToMusicTool,
AutoGPTDocumentAnsweringTool)


class TestGradioTools(unittest.TestCase):
@patch.dict(
os.environ,
{
"AUTOGPT_GRADIO_TOOLS": "WhisperAudioTranscription,TextToVideo",
},
)
def test_right_tools_loaded(self):
from . import AutoGPTGradioTools

plugin = AutoGPTGradioTools()
assert plugin.tools[0].name == "WhisperAudioTranscription"
assert plugin.tools[1].name == "TextToVideo"

@patch.dict(
os.environ,
{
"AUTOGPT_GRADIO_TOOLS": "WhisperAudioTranscription,TextToVideo",
},
)
def test_commands_added_to_prompt(self):
from . import AutoGPTGradioTools

mock_prompt = MagicMock()
plugin = AutoGPTGradioTools()
plugin.post_prompt(mock_prompt)
# Two tools added to prompt
assert mock_prompt.add_command.call_count == 2


def test_tools_configured_correctly(self):
all_tools = [
(AutoGPTClipInterrogatorTool(), gradio_tools.ClipInterrogatorTool()),
(AutoGPTStableDiffusion(), gradio_tools.StableDiffusionTool()),
(AutoGPTWhisperTool(), gradio_tools.WhisperAudioTranscriptionTool()),
(AutoGPTTextToVideoTool(), gradio_tools.TextToVideoTool()),
(AutoGPTCaptioner(), gradio_tools.ImageCaptioningTool()),
(AutoGPTPromptGeneratorTool(), gradio_tools.StableDiffusionPromptGeneratorTool()),
(AutoGPTImageToMusicTool(), gradio_tools.ImageToMusicTool()),
(AutoGPTDocumentAnsweringTool(), gradio_tools.DocQueryDocumentAnsweringTool())
]
for tool_1, tool_2 in all_tools:
assert tool_1.name == tool_2.name
assert tool_1.description == tool_2.description
assert tool_1.src == tool_2.src
assert tool_1.args
Loading