Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 14 additions & 1 deletion src/google/adk/tools/google_search_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,26 @@ class GoogleSearchTool(BaseTool):
local code execution.
"""

def __init__(self, *, bypass_multi_tools_limit: bool = False):
def __init__(
self,
*,
bypass_multi_tools_limit: bool = False,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The bypass_multi_tools_limit parameter appears to be unused within the GoogleSearchTool class. The logic in process_llm_request for Gemini 1.x models unconditionally raises a ValueError if other tools are present, and this check does not consult bypass_multi_tools_limit. For Gemini 2.x+ models, multiple tools are supported by default, making the flag seem redundant there as well.

If this parameter is obsolete, consider removing it and the corresponding instance attribute self.bypass_multi_tools_limit to improve code clarity. If it has a purpose that is not immediately apparent, adding a more detailed explanation in the docstring would be helpful.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i will let ADK team decide if this can be removed

model: str | None = None,
):
"""Initializes the Google search tool.

Args:
bypass_multi_tools_limit: Whether to bypass the multi tools limitation,
so that the tool can be used with other tools in the same agent.
model: Optional model name to use for processing the LLM request. If
provided, this model will be used instead of the model from the
incoming llm_request.
"""

# Name and description are not used because this is a model built-in tool.
super().__init__(name='google_search', description='google_search')
self.bypass_multi_tools_limit = bypass_multi_tools_limit
self.model = model

@override
async def process_llm_request(
Expand All @@ -54,6 +63,10 @@ async def process_llm_request(
tool_context: ToolContext,
llm_request: LlmRequest,
) -> None:
# If a custom model is specified, use it instead of the original model
if self.model is not None:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will actually override the model to which Agent talks to.

llm_request.model = self.model

llm_request.config = llm_request.config or types.GenerateContentConfig()
llm_request.config.tools = llm_request.config.tools or []
if is_gemini_1_model(llm_request.model):
Expand Down
43 changes: 43 additions & 0 deletions tests/unittests/tools/test_google_search_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,3 +432,46 @@ async def test_process_llm_request_gemini_version_specifics(self):
assert len(llm_request.config.tools) == 1
assert llm_request.config.tools[0].google_search is not None
assert llm_request.config.tools[0].google_search_retrieval is None

@pytest.mark.asyncio
@pytest.mark.parametrize(
(
'tool_model',
'request_model',
'expected_model',
),
[
(
'gemini-2.5-flash-lite',
'gemini-2.5-flash',
'gemini-2.5-flash-lite',
),
(
None,
'gemini-2.5-flash',
'gemini-2.5-flash',
),
],
ids=['with_custom_model', 'without_custom_model'],
)
async def test_process_llm_request_custom_model_behavior(
self,
tool_model,
request_model,
expected_model,
):
"""Tests custom model parameter behavior in process_llm_request."""
tool = GoogleSearchTool(model=tool_model)
tool_context = await _create_tool_context()

llm_request = LlmRequest(
model=request_model, config=types.GenerateContentConfig()
)

await tool.process_llm_request(
tool_context=tool_context, llm_request=llm_request
)

assert llm_request.model == expected_model
assert llm_request.config.tools is not None
assert len(llm_request.config.tools) == 1