Content-Length: 541830 | pFad | https://github.com/googleapis/python-aiplatform/commit/c71c3ddbfeaa577dfce683b3299d94e77d1c4895

B8 feat: Add system_instruction to LangchainAgent template. · googleapis/python-aiplatform@c71c3dd · GitHub
Skip to content

Commit c71c3dd

Browse files
yeesiancopybara-github
authored andcommitted
feat: Add system_instruction to LangchainAgent template.
PiperOrigin-RevId: 656647049
1 parent a02d82f commit c71c3dd

File tree

2 files changed

+49
-9
lines changed

2 files changed

+49
-9
lines changed

tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
_TEST_LOCATION = "us-central1"
3939
_TEST_PROJECT = "test-project"
4040
_TEST_MODEL = "gemini-1.0-pro"
41+
_TEST_SYSTEM_INSTRUCTION = "You are a helpful bot."
4142

4243

4344
def place_tool_query(
@@ -173,6 +174,7 @@ def test_initialization_with_tools(self, mock_chatvertexai):
173174
]
174175
agent = reasoning_engines.LangchainAgent(
175176
model=_TEST_MODEL,
177+
system_instruction=_TEST_SYSTEM_INSTRUCTION,
176178
tools=tools,
177179
)
178180
for tool, agent_tool in zip(tools, agent._tools):
@@ -255,11 +257,6 @@ def test_enable_tracing_warning(self, caplog, langchain_instrumentor_none_mock):
255257
assert "enable_tracing=True but proceeding with tracing disabled" in caplog.text
256258

257259

258-
class TestConvertToolsOrRaise:
259-
def test_convert_tools_or_raise(self, vertexai_init_mock):
260-
pass
261-
262-
263260
def _return_input_no_typing(input_):
264261
"""Returns input back to user."""
265262
return input_
@@ -272,3 +269,20 @@ def test_raise_untyped_input_args(self, vertexai_init_mock):
272269
model=_TEST_MODEL,
273270
tools=[_return_input_no_typing],
274271
)
272+
273+
274+
class TestSystemInstructionAndPromptRaisesErrors:
275+
def test_raise_both_system_instruction_and_prompt_error(self, vertexai_init_mock):
276+
with pytest.raises(
277+
ValueError,
278+
match=r"Only one of `prompt` or `system_instruction` should be specified.",
279+
):
280+
reasoning_engines.LangchainAgent(
281+
model=_TEST_MODEL,
282+
system_instruction=_TEST_SYSTEM_INSTRUCTION,
283+
prompt=prompts.ChatPromptTemplate.from_messages(
284+
[
285+
("user", "{input}"),
286+
]
287+
),
288+
)

vertexai/preview/reasoning_engines/templates/langchain.py

Lines changed: 30 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ def _default_model_builder(
114114
def _default_runnable_builder(
115115
model: "BaseLanguageModel",
116116
*,
117+
system_instruction: Optional[str] = None,
117118
tools: Optional[Sequence["_ToolLike"]] = None,
118119
prompt: Optional["RunnableSerializable"] = None,
119120
output_parser: Optional["RunnableSerializable"] = None,
@@ -131,7 +132,10 @@ def _default_runnable_builder(
131132
# user would reflect that is by setting chat_history (which defaults to
132133
# None).
133134
has_history: bool = chat_history is not None
134-
prompt = prompt or _default_prompt(has_history)
135+
prompt = prompt or _default_prompt(
136+
has_history=has_history,
137+
system_instruction=system_instruction,
138+
)
135139
output_parser = output_parser or _default_output_parser()
136140
model_tool_kwargs = model_tool_kwargs or {}
137141
agent_executor_kwargs = agent_executor_kwargs or {}
@@ -162,7 +166,10 @@ def _default_runnable_builder(
162166
return agent_executor
163167

164168

165-
def _default_prompt(has_history: bool) -> "RunnableSerializable":
169+
def _default_prompt(
170+
has_history: bool,
171+
system_instruction: Optional[str] = None,
172+
) -> "RunnableSerializable":
166173
from langchain_core import prompts
167174

168175
try:
@@ -173,6 +180,10 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable":
173180
format_to_openai_tool_messages as format_to_tool_messages,
174181
)
175182

183+
system_instructions = []
184+
if system_instruction:
185+
system_instructions = [("system", system_instruction)]
186+
176187
if has_history:
177188
return {
178189
"history": lambda x: x["history"],
@@ -181,7 +192,8 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable":
181192
lambda x: format_to_tool_messages(x["intermediate_steps"])
182193
),
183194
} | prompts.ChatPromptTemplate.from_messages(
184-
[
195+
system_instructions
196+
+ [
185197
prompts.MessagesPlaceholder(variable_name="history"),
186198
("user", "{input}"),
187199
prompts.MessagesPlaceholder(variable_name="agent_scratchpad"),
@@ -194,7 +206,8 @@ def _default_prompt(has_history: bool) -> "RunnableSerializable":
194206
lambda x: format_to_tool_messages(x["intermediate_steps"])
195207
),
196208
} | prompts.ChatPromptTemplate.from_messages(
197-
[
209+
system_instructions
210+
+ [
198211
("user", "{input}"),
199212
prompts.MessagesPlaceholder(variable_name="agent_scratchpad"),
200213
]
@@ -265,6 +278,7 @@ def __init__(
265278
self,
266279
model: str,
267280
*,
281+
system_instruction: Optional[str] = None,
268282
prompt: Optional["RunnableSerializable"] = None,
269283
tools: Optional[Sequence["_ToolLike"]] = None,
270284
output_parser: Optional["RunnableSerializable"] = None,
@@ -319,6 +333,9 @@ def __init__(
319333
Args:
320334
model (str):
321335
Optional. The name of the model (e.g. "gemini-1.0-pro").
336+
system_instruction (str):
337+
Optional. The system instruction to use for the agent. This
338+
argument should not be specified if `prompt` is specified.
322339
prompt (langchain_core.runnables.RunnableSerializable):
323340
Optional. The prompt template for the model. Defaults to a
324341
ChatPromptTemplate.
@@ -394,6 +411,7 @@ def __init__(
394411
False.
395412
396413
Raises:
414+
ValueError: If both `prompt` and `system_instruction` are specified.
397415
TypeError: If there is an invalid tool (e.g. function with an input
398416
that did not specify its type).
399417
"""
@@ -407,7 +425,14 @@ def __init__(
407425
# they are deployed.
408426
_validate_tools(tools)
409427
self._tools = tools
428+
if prompt and system_instruction:
429+
raise ValueError(
430+
"Only one of `prompt` or `system_instruction` should be specified. "
431+
"Consider incorporating the system instruction into the prompt "
432+
"rather than passing it separately as an argument."
433+
)
410434
self._model_name = model
435+
self._system_instruction = system_instruction
411436
self._prompt = prompt
412437
self._output_parser = output_parser
413438
self._chat_history = chat_history
@@ -528,6 +553,7 @@ def set_up(self):
528553
prompt=self._prompt,
529554
model=self._model,
530555
tools=self._tools,
556+
system_instruction=self._system_instruction,
531557
output_parser=self._output_parser,
532558
chat_history=self._chat_history,
533559
model_tool_kwargs=self._model_tool_kwargs,

0 commit comments

Comments
 (0)








ApplySandwichStrip

pFad - (p)hone/(F)rame/(a)nonymizer/(d)eclutterfier!      Saves Data!


--- a PPN by Garber Painting Akron. With Image Size Reduction included!

Fetched URL: https://github.com/googleapis/python-aiplatform/commit/c71c3ddbfeaa577dfce683b3299d94e77d1c4895

Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy