In [ ]:
%pip install markdownify smolagents requests duckduckgo_search pandas
Let's define tools¶
In [12]:
from typing import Any, Optional
from smolagents.tools import Tool
from typing import Any, Optional
from smolagents.tools import Tool
import requests
import markdownify
import smolagents
from typing import Any, Optional
from smolagents.tools import Tool
import duckduckgo_search
import datetime
class FinalAnswerTool(Tool):
name = "final_answer"
description = "Provides a final answer to the given problem."
inputs = {'answer': {'type': 'any', 'description': 'The final answer to the problem'}}
output_type = "any"
def forward(self, answer: Any) -> Any:
return answer
def __init__(self, *args, **kwargs):
self.is_initialized = False
class VisitWebpageTool(Tool):
name = "visit_webpage"
description = "Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages."
inputs = {'url': {'type': 'string', 'description': 'The url of the webpage to visit.'}}
output_type = "string"
def forward(self, url: str) -> str:
try:
import requests
from markdownify import markdownify
from requests.exceptions import RequestException
from smolagents.utils import truncate_content
except ImportError as e:
raise ImportError(
"You must install packages `markdownify` and `requests` to run this tool: for instance run `pip install markdownify requests`."
) from e
try:
# Send a GET request to the URL with a 20-second timeout
response = requests.get(url, timeout=20)
response.raise_for_status() # Raise an exception for bad status codes
# Convert the HTML content to Markdown
markdown_content = markdownify(response.text).strip()
# Remove multiple line breaks
markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
return truncate_content(markdown_content, 10000)
except requests.exceptions.Timeout:
return "The request timed out. Please try again later or check the URL."
except RequestException as e:
return f"Error fetching the webpage: {str(e)}"
except Exception as e:
return f"An unexpected error occurred: {str(e)}"
def __init__(self, *args, **kwargs):
self.is_initialized = False
class DuckDuckGoSearchTool(Tool):
name = "web_search"
description = "Performs a duckduckgo web search based on your query (think a Google search) then returns the top search results."
inputs = {'query': {'type': 'string', 'description': 'The search query to perform.'}}
output_type = "string"
def __init__(self, max_results=10, **kwargs):
super().__init__()
self.max_results = max_results
try:
from duckduckgo_search import DDGS
except ImportError as e:
raise ImportError(
"You must install package `duckduckgo_search` to run this tool: for instance run `pip install duckduckgo-search`."
) from e
self.ddgs = DDGS(**kwargs)
def forward(self, query: str) -> str:
results = self.ddgs.text(query, max_results=self.max_results)
if len(results) == 0:
raise Exception("No results found! Try a less restrictive/shorter query.")
postprocessed_results = [f"[{result['title']}]({result['href']})\n{result['body']}" for result in results]
return "## Search Results\n\n" + "\n\n".join(postprocessed_results)
# Below is an example of a tool that does nothing. Amaze us with your creativity!
def my_custom_tool(arg1:str, arg2:int)-> str: # it's important to specify the return type
# Keep this format for the tool description / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
Gradio UI¶
In [17]:
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mimetypes
import os
import re
import shutil
from typing import Optional
from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
from smolagents.agents import ActionStep, MultiStepAgent
from smolagents.memory import MemoryStep
from smolagents.utils import _is_package_available
def pull_messages_from_step(
step_log: MemoryStep,
):
"""Extract ChatMessage objects from agent steps with proper nesting"""
import gradio as gr
if isinstance(step_log, ActionStep):
# Output the step number
step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
# First yield the thought/reasoning from the LLM
if hasattr(step_log, "model_output") and step_log.model_output is not None:
# Clean up the LLM output
model_output = step_log.model_output.strip()
# Remove any trailing <end_code> and extra backticks, handling multiple possible formats
model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
model_output = model_output.strip()
yield gr.ChatMessage(role="assistant", content=model_output)
# For tool calls, create a parent message
if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
first_tool_call = step_log.tool_calls[0]
used_code = first_tool_call.name == "python_interpreter"
parent_id = f"call_{len(step_log.tool_calls)}"
# Tool call becomes the parent message with timing info
# First we will handle arguments based on type
args = first_tool_call.arguments
if isinstance(args, dict):
content = str(args.get("answer", str(args)))
else:
content = str(args).strip()
if used_code:
# Clean up the content by removing any end code tags
content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
content = content.strip()
if not content.startswith("```python"):
content = f"```python\n{content}\n```"
parent_message_tool = gr.ChatMessage(
role="assistant",
content=content,
metadata={
"title": f"🛠️ Used tool {first_tool_call.name}",
"id": parent_id,
"status": "pending",
},
)
yield parent_message_tool
# Nesting execution logs under the tool call if they exist
if hasattr(step_log, "observations") and (
step_log.observations is not None and step_log.observations.strip()
): # Only yield execution logs if there's actual content
log_content = step_log.observations.strip()
if log_content:
log_content = re.sub(r"^Execution logs:\s*", "", log_content)
yield gr.ChatMessage(
role="assistant",
content=f"{log_content}",
metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
)
# Nesting any errors under the tool call
if hasattr(step_log, "error") and step_log.error is not None:
yield gr.ChatMessage(
role="assistant",
content=str(step_log.error),
metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
)
# Update parent message metadata to done status without yielding a new message
parent_message_tool.metadata["status"] = "done"
# Handle standalone errors but not from tool calls
elif hasattr(step_log, "error") and step_log.error is not None:
yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
# Calculate duration and token information
step_footnote = f"{step_number}"
if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
token_str = (
f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
)
step_footnote += token_str
if hasattr(step_log, "duration"):
step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
step_footnote += step_duration
step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
yield gr.ChatMessage(role="assistant", content="-----")
def stream_to_gradio(
agent,
task: str,
reset_agent_memory: bool = False,
additional_args: Optional[dict] = None,
):
"""Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
if not _is_package_available("gradio"):
raise ModuleNotFoundError(
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
)
import gradio as gr
total_input_tokens = 0
total_output_tokens = 0
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
# Track tokens if model provides them
if hasattr(agent.model, "last_input_token_count"):
total_input_tokens += agent.model.last_input_token_count
total_output_tokens += agent.model.last_output_token_count
if isinstance(step_log, ActionStep):
step_log.input_token_count = agent.model.last_input_token_count
step_log.output_token_count = agent.model.last_output_token_count
for message in pull_messages_from_step(
step_log,
):
yield message
final_answer = step_log # Last log is the run's final_answer
final_answer = handle_agent_output_types(final_answer)
if isinstance(final_answer, AgentText):
yield gr.ChatMessage(
role="assistant",
content=f"**Final answer:**\n{final_answer.to_string()}\n",
)
elif isinstance(final_answer, AgentImage):
yield gr.ChatMessage(
role="assistant",
content={"path": final_answer.to_string(), "mime_type": "image/png"},
)
elif isinstance(final_answer, AgentAudio):
yield gr.ChatMessage(
role="assistant",
content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
)
else:
yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
class GradioUI:
"""A one-line interface to launch your agent in Gradio"""
def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
if not _is_package_available("gradio"):
raise ModuleNotFoundError(
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
)
self.agent = agent
self.file_upload_folder = file_upload_folder
if self.file_upload_folder is not None:
if not os.path.exists(file_upload_folder):
os.mkdir(file_upload_folder)
def interact_with_agent(self, prompt, messages):
import gradio as gr
messages.append(gr.ChatMessage(role="user", content=prompt))
yield messages
for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
messages.append(msg)
yield messages
yield messages
def upload_file(
self,
file,
file_uploads_log,
allowed_file_types=[
"application/pdf",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain",
],
):
"""
Handle file uploads, default allowed types are .pdf, .docx, and .txt
"""
import gradio as gr
if file is None:
return gr.Textbox("No file uploaded", visible=True), file_uploads_log
try:
mime_type, _ = mimetypes.guess_type(file.name)
except Exception as e:
return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
if mime_type not in allowed_file_types:
return gr.Textbox("File type disallowed", visible=True), file_uploads_log
# Sanitize file name
original_name = os.path.basename(file.name)
sanitized_name = re.sub(
r"[^\w\-.]", "_", original_name
) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
type_to_ext = {}
for ext, t in mimetypes.types_map.items():
if t not in type_to_ext:
type_to_ext[t] = ext
# Ensure the extension correlates to the mime type
sanitized_name = sanitized_name.split(".")[:-1]
sanitized_name.append("" + type_to_ext[mime_type])
sanitized_name = "".join(sanitized_name)
# Save the uploaded file to the specified folder
file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
shutil.copy(file.name, file_path)
return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
def log_user_message(self, text_input, file_uploads_log):
return (
text_input
+ (
f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}"
if len(file_uploads_log) > 0
else ""
),
"",
)
def launch(self, **kwargs):
import gradio as gr
with gr.Blocks(fill_height=True) as demo:
stored_messages = gr.State([])
file_uploads_log = gr.State([])
chatbot = gr.Chatbot(
label="Agent",
type="messages",
avatar_images=(
None,
"https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
),
scale=1,
)
# If an upload folder is provided, enable the upload feature
if self.file_upload_folder is not None:
upload_file = gr.File(label="Upload a file")
upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
upload_file.change(
self.upload_file,
[upload_file, file_uploads_log],
[upload_status, file_uploads_log],
)
text_input = gr.Textbox(lines=1, label="Chat Message")
text_input.submit(
self.log_user_message,
[text_input, file_uploads_log],
[stored_messages, text_input],
).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
demo.launch(debug=True, share=True, **kwargs)
__all__ = ["stream_to_gradio", "GradioUI"]
Agent¶
In [19]:
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
#Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, DuckDuckGoSearchTool(), my_custom_tool, get_current_time_in_timezone, image_generation_tool, VisitWebpageTool()], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch()
* Running on local URL: http://127.0.0.1:7860 * Running on public URL: https://03cc413e0c5b254d86.gradio.live This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)
╭──────────────────────────────────────────────────── New run ────────────────────────────────────────────────────╮ │ │ │ What's the time in Paris? │ │ │ ╰─ HfApiModel - Qwen/Qwen2.5-Coder-32B-Instruct ──────────────────────────────────────────────────────────────────╯
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Step 1 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
─ Executing parsed code: ──────────────────────────────────────────────────────────────────────────────────────── paris_time = get_current_time_in_timezone(timezone="Europe/Paris") print(paris_time) ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Execution logs:
The current local time in Europe/Paris is: 2025-02-22 13:35:51
Out: None
[Step 0: Duration 4.50 seconds| Input tokens: 2,345 | Output tokens: 55]
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Step 2 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
─ Executing parsed code: ──────────────────────────────────────────────────────────────────────────────────────── paris_time = get_current_time_in_timezone(timezone="Europe/Paris") final_answer(paris_time) ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Out - Final answer: The current local time in Europe/Paris is: 2025-02-22 13:35:56
[Step 1: Duration 4.83 seconds| Input tokens: 4,864 | Output tokens: 131]
╭──────────────────────────────────────────────────── New run ────────────────────────────────────────────────────╮ │ │ │ Generate an image with a dog swimming │ │ │ ╰─ HfApiModel - Qwen/Qwen2.5-Coder-32B-Instruct ──────────────────────────────────────────────────────────────────╯
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Step 1 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
─ Executing parsed code: ──────────────────────────────────────────────────────────────────────────────────────── image = image_generator(prompt="A high-res, photorealistic image of a dog swimming in a lake.") final_answer(image) ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Out - Final answer: <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=1024x1024 at 0x168C145F0>
[Step 2: Duration 18.79 seconds| Input tokens: 7,587 | Output tokens: 187]
Keyboard interruption in main thread... closing server. Killing tunnel 127.0.0.1:7860 <> https://03cc413e0c5b254d86.gradio.live
In [ ]: