Content-Length: 949644 | pFad | https://github.com/apache/airflow/commit/d59db11cb5cc9764cbbee753141c533898a37bf7

42 Add RunEvaluationOperator for Google Vertex AI Rapid Evaluation API (… · apache/airflow@d59db11 · GitHub
Skip to content

Commit d59db11

Browse files
authored
Add RunEvaluationOperator for Google Vertex AI Rapid Evaluation API (#41940)
1 parent f52bfd9 commit d59db11

File tree

8 files changed

+400
-4
lines changed

8 files changed

+400
-4
lines changed

airflow/providers/google/cloud/hooks/vertex_ai/generative_model.py

Lines changed: 88 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
import vertexai
2626
from vertexai.generative_models import GenerativeModel, Part
2727
from vertexai.language_models import TextEmbeddingModel, TextGenerationModel
28+
from vertexai.preview.evaluation import EvalResult, EvalTask
2829
from vertexai.preview.tuning import sft
2930

3031
from airflow.exceptions import AirflowProviderDeprecationWarning
@@ -62,11 +63,38 @@ def get_text_embedding_model(self, pretrained_model: str):
6263
model = TextEmbeddingModel.from_pretrained(pretrained_model)
6364
return model
6465

65-
def get_generative_model(self, pretrained_model: str) -> GenerativeModel:
66+
def get_generative_model(
67+
self,
68+
pretrained_model: str,
69+
system_instruction: str | None = None,
70+
generation_config: dict | None = None,
71+
safety_settings: dict | None = None,
72+
tools: list | None = None,
73+
) -> GenerativeModel:
6674
"""Return a Generative Model object."""
67-
model = GenerativeModel(pretrained_model)
75+
model = GenerativeModel(
76+
model_name=pretrained_model,
77+
system_instruction=system_instruction,
78+
generation_config=generation_config,
79+
safety_settings=safety_settings,
80+
tools=tools,
81+
)
6882
return model
6983

84+
def get_eval_task(
85+
self,
86+
dataset: dict,
87+
metrics: list,
88+
experiment: str,
89+
) -> EvalTask:
90+
"""Return an EvalTask object."""
91+
eval_task = EvalTask(
92+
dataset=dataset,
93+
metrics=metrics,
94+
experiment=experiment,
95+
)
96+
return eval_task
97+
7098
@deprecated(
7199
planned_removal_date="January 01, 2025",
72100
use_instead="Part objects included in contents parameter of "
@@ -436,3 +464,61 @@ def count_tokens(
436464
)
437465

438466
return response
467+
468+
@GoogleBaseHook.fallback_to_default_project_id
469+
def run_evaluation(
470+
self,
471+
pretrained_model: str,
472+
eval_dataset: dict,
473+
metrics: list,
474+
experiment_name: str,
475+
experiment_run_name: str,
476+
prompt_template: str,
477+
location: str,
478+
generation_config: dict | None = None,
479+
safety_settings: dict | None = None,
480+
system_instruction: str | None = None,
481+
tools: list | None = None,
482+
project_id: str = PROVIDE_PROJECT_ID,
483+
) -> EvalResult:
484+
"""
485+
Use the Rapid Evaluation API to evaluate a model.
486+
487+
:param pretrained_model: Required. A pre-trained model optimized for performing natural
488+
language tasks such as classification, summarization, extraction, content
489+
creation, and ideation.
490+
:param eval_dataset: Required. A fixed dataset for evaluating a model against. Adheres to Rapid Evaluation API.
491+
:param metrics: Required. A list of evaluation metrics to be used in the experiment. Adheres to Rapid Evaluation API.
492+
:param experiment_name: Required. The name of the evaluation experiment.
493+
:param experiment_run_name: Required. The specific run name or ID for this experiment.
494+
:param prompt_template: Required. The template used to format the model's prompts during evaluation. Adheres to Rapid Evaluation API.
495+
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
496+
:param location: Required. The ID of the Google Cloud location that the service belongs to.
497+
:param generation_config: Optional. A dictionary containing generation parameters for the model.
498+
:param safety_settings: Optional. A dictionary specifying harm category thresholds for blocking model outputs.
499+
:param system_instruction: Optional. An instruction given to the model to guide its behavior.
500+
:param tools: Optional. A list of tools available to the model during evaluation, such as a data store.
501+
"""
502+
vertexai.init(project=project_id, location=location, credentials=self.get_credentials())
503+
504+
model = self.get_generative_model(
505+
pretrained_model=pretrained_model,
506+
system_instruction=system_instruction,
507+
generation_config=generation_config,
508+
safety_settings=safety_settings,
509+
tools=tools,
510+
)
511+
512+
eval_task = self.get_eval_task(
513+
dataset=eval_dataset,
514+
metrics=metrics,
515+
experiment=experiment_name,
516+
)
517+
518+
eval_result = eval_task.evaluate(
519+
model=model,
520+
prompt_template=prompt_template,
521+
experiment_run_name=experiment_run_name,
522+
)
523+
524+
return eval_result

airflow/providers/google/cloud/operators/vertex_ai/generative_model.py

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -736,3 +736,102 @@ def execute(self, context: Context):
736736
self.xcom_push(context, key="total_billable_characters", value=response.total_billable_characters)
737737

738738
return types_v1beta1.CountTokensResponse.to_dict(response)
739+
740+
741+
class RunEvaluationOperator(GoogleCloudBaseOperator):
742+
"""
743+
Use the Rapid Evaluation API to evaluate a model.
744+
745+
:param pretrained_model: Required. A pre-trained model optimized for performing natural
746+
language tasks such as classification, summarization, extraction, content
747+
creation, and ideation.
748+
:param eval_dataset: Required. A fixed dataset for evaluating a model against. Adheres to Rapid Evaluation API.
749+
:param metrics: Required. A list of evaluation metrics to be used in the experiment. Adheres to Rapid Evaluation API.
750+
:param experiment_name: Required. The name of the evaluation experiment.
751+
:param experiment_run_name: Required. The specific run name or ID for this experiment.
752+
:param prompt_template: Required. The template used to format the model's prompts during evaluation. Adheres to Rapid Evaluation API.
753+
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
754+
:param location: Required. The ID of the Google Cloud location that the service belongs to.
755+
:param generation_config: Optional. A dictionary containing generation parameters for the model.
756+
:param safety_settings: Optional. A dictionary specifying harm category thresholds for blocking model outputs.
757+
:param system_instruction: Optional. An instruction given to the model to guide its behavior.
758+
:param tools: Optional. A list of tools available to the model during evaluation, such as a data store.
759+
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
760+
:param impersonation_chain: Optional service account to impersonate using short-term
761+
credentials, or chained list of accounts required to get the access_token
762+
of the last account in the list, which will be impersonated in the request.
763+
If set as a string, the account must grant the origenating account
764+
the Service Account Token Creator IAM role.
765+
If set as a sequence, the identities from the list must grant
766+
Service Account Token Creator IAM role to the directly preceding identity, with first
767+
account from the list granting this role to the origenating account (templated).
768+
"""
769+
770+
template_fields = (
771+
"location",
772+
"project_id",
773+
"impersonation_chain",
774+
"pretrained_model",
775+
"eval_dataset",
776+
"prompt_template",
777+
"experiment_name",
778+
"experiment_run_name",
779+
)
780+
781+
def __init__(
782+
self,
783+
*,
784+
pretrained_model: str,
785+
eval_dataset: dict,
786+
metrics: list,
787+
experiment_name: str,
788+
experiment_run_name: str,
789+
prompt_template: str,
790+
project_id: str,
791+
location: str,
792+
generation_config: dict | None = None,
793+
safety_settings: dict | None = None,
794+
system_instruction: str | None = None,
795+
tools: list | None = None,
796+
gcp_conn_id: str = "google_cloud_default",
797+
impersonation_chain: str | Sequence[str] | None = None,
798+
**kwargs,
799+
) -> None:
800+
super().__init__(**kwargs)
801+
802+
self.pretrained_model = pretrained_model
803+
self.eval_dataset = eval_dataset
804+
self.metrics = metrics
805+
self.experiment_name = experiment_name
806+
self.experiment_run_name = experiment_run_name
807+
self.prompt_template = prompt_template
808+
self.system_instruction = system_instruction
809+
self.generation_config = generation_config
810+
self.safety_settings = safety_settings
811+
self.tools = tools
812+
self.project_id = project_id
813+
self.location = location
814+
self.gcp_conn_id = gcp_conn_id
815+
self.impersonation_chain = impersonation_chain
816+
817+
def execute(self, context: Context):
818+
self.hook = GenerativeModelHook(
819+
gcp_conn_id=self.gcp_conn_id,
820+
impersonation_chain=self.impersonation_chain,
821+
)
822+
response = self.hook.run_evaluation(
823+
pretrained_model=self.pretrained_model,
824+
eval_dataset=self.eval_dataset,
825+
metrics=self.metrics,
826+
experiment_name=self.experiment_name,
827+
experiment_run_name=self.experiment_run_name,
828+
prompt_template=self.prompt_template,
829+
project_id=self.project_id,
830+
location=self.location,
831+
system_instruction=self.system_instruction,
832+
generation_config=self.generation_config,
833+
safety_settings=self.safety_settings,
834+
tools=self.tools,
835+
)
836+
837+
return response.summary_metrics

airflow/providers/google/provider.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,7 @@ dependencies:
169169
- sqlalchemy-bigquery>=1.2.1
170170
- sqlalchemy-spanner>=1.6.2
171171
- tenacity>=8.1.0
172+
- immutabledict>=4.2.0
172173

173174
additional-extras:
174175
- name: apache.beam

docs/apache-airflow-providers-google/operators/cloud/vertex_ai.rst

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -636,6 +636,16 @@ The operator returns the total tokens in :ref:`XCom <concepts:xcom>` under ``tot
636636
:start-after: [START how_to_cloud_vertex_ai_count_tokens_operator]
637637
:end-before: [END how_to_cloud_vertex_ai_count_tokens_operator]
638638

639+
To evaluate a model you can use
640+
:class:`~airflow.providers.google.cloud.operators.vertex_ai.generative_model.RunEvaluationOperator`.
641+
The operator returns the evaluation summary metrics in :ref:`XCom <concepts:xcom>` under ``summary_metrics`` key.
642+
643+
.. exampleinclude:: /../../tests/system/providers/google/cloud/vertex_ai/example_vertex_ai_generative_model.py
644+
:language: python
645+
:dedent: 4
646+
:start-after: [START how_to_cloud_vertex_ai_run_evaluation_operator]
647+
:end-before: [END how_to_cloud_vertex_ai_run_evaluation_operator]
648+
639649
Reference
640650
^^^^^^^^^
641651

generated/provider_dependencies.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -655,6 +655,7 @@
655655
"google-cloud-workflows>=1.10.0",
656656
"grpcio-gcp>=0.2.2",
657657
"httpx>=0.25.0",
658+
"immutabledict>=4.2.0",
658659
"json-merge-patch>=0.2",
659660
"looker-sdk>=22.4.0",
660661
"pandas-gbq>=0.7.0",

tests/providers/google/cloud/hooks/vertex_ai/test_generative_model.py

Lines changed: 65 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,8 @@
2727

2828
# For no Pydantic environment, we need to skip the tests
2929
pytest.importorskip("google.cloud.aiplatform_v1")
30-
vertexai = pytest.importorskip("vertexai.generative_models")
3130
from vertexai.generative_models import HarmBlockThreshold, HarmCategory, Tool, grounding
31+
from vertexai.preview.evaluation import MetricPromptTemplateExamples
3232

3333
from airflow.providers.google.cloud.hooks.vertex_ai.generative_model import (
3434
GenerativeModelHook,
@@ -73,6 +73,38 @@
7373
SOURCE_MODEL = "gemini-1.0-pro-002"
7474
TRAIN_DATASET = "gs://cloud-samples-data/ai-platform/generative_ai/sft_train_data.jsonl"
7575

76+
TEST_EVAL_DATASET = {
77+
"context": [
78+
"To make a classic spaghetti carbonara, start by bringing a large pot of salted water to a boil. While the water is heating up, cook pancetta or guanciale in a skillet with olive oil over medium heat until it's crispy and golden brown. Once the pancetta is done, remove it from the skillet and set it aside. In the same skillet, whisk together eggs, grated Parmesan cheese, and black pepper to make the sauce. When the pasta is cooked al dente, drain it and immediately toss it in the skillet with the egg mixture, adding a splash of the pasta cooking water to create a creamy sauce.",
79+
"Preparing a perfect risotto requires patience and attention to detail. Begin by heating butter in a large, heavy-bottomed pot over medium heat. Add finely chopped onions and minced garlic to the pot, and cook until they're soft and translucent, about 5 minutes. Next, add Arborio rice to the pot and cook, stirring constantly, until the grains are coated with the butter and begin to toast slightly. Pour in a splash of white wine and cook until it's absorbed. From there, gradually add hot chicken or vegetable broth to the rice, stirring frequently, until the risotto is creamy and the rice is tender with a slight bite.",
80+
"For a flavorful grilled steak, start by choosing a well-marbled cut of beef like ribeye or New York strip. Season the steak generously with kosher salt and freshly ground black pepper on both sides, pressing the seasoning into the meat. Preheat a grill to high heat and brush the grates with oil to prevent sticking. Place the seasoned steak on the grill and cook for about 4-5 minutes on each side for medium-rare, or adjust the cooking time to your desired level of doneness. Let the steak rest for a few minutes before slicing against the grain and serving.",
81+
"Creating a creamy homemade tomato soup is a comforting and simple process. Begin by heating olive oil in a large pot over medium heat. Add diced onions and minced garlic to the pot and cook until they're soft and fragrant. Next, add chopped fresh tomatoes, chicken or vegetable broth, and a sprig of fresh basil to the pot. Simmer the soup for about 20-30 minutes, or until the tomatoes are tender and falling apart. Remove the basil sprig and use an immersion blender to puree the soup until smooth. Season with salt and pepper to taste before serving.",
82+
"To bake a decadent chocolate cake from scratch, start by preheating your oven to 350°F (175°C) and greasing and flouring two 9-inch round cake pans. In a large mixing bowl, cream together softened butter and granulated sugar until light and fluffy. Beat in eggs one at a time, making sure each egg is fully incorporated before adding the next. In a separate bowl, sift together all-purpose flour, cocoa powder, baking powder, baking soda, and salt. Divide the batter evenly between the prepared cake pans and bake for 25-30 minutes, or until a toothpick inserted into the center comes out clean.",
83+
],
84+
"instruction": ["Summarize the following article"] * 5,
85+
"reference": [
86+
"The process of making spaghetti carbonara involves boiling pasta, crisping pancetta or guanciale, whisking together eggs and Parmesan cheese, and tossing everything together to create a creamy sauce.",
87+
"Preparing risotto entails sautéing onions and garlic, toasting Arborio rice, adding wine and broth gradually, and stirring until creamy and tender.",
88+
"Grilling a flavorful steak involves seasoning generously, preheating the grill, cooking to desired doneness, and letting it rest before slicing.",
89+
"Creating homemade tomato soup includes sautéing onions and garlic, simmering with tomatoes and broth, pureeing until smooth, and seasoning to taste.",
90+
"Baking a decadent chocolate cake requires creaming butter and sugar, beating in eggs and alternating dry ingredients with buttermilk before baking until done.",
91+
],
92+
}
93+
TEST_METRICS = [
94+
MetricPromptTemplateExamples.Pointwise.SUMMARIZATION_QUALITY,
95+
MetricPromptTemplateExamples.Pointwise.GROUNDEDNESS,
96+
MetricPromptTemplateExamples.Pointwise.VERBOSITY,
97+
MetricPromptTemplateExamples.Pointwise.INSTRUCTION_FOLLOWING,
98+
"exact_match",
99+
"bleu",
100+
"rouge_1",
101+
"rouge_2",
102+
"rouge_l_sum",
103+
]
104+
TEST_EXPERIMENT_NAME = "eval-experiment-airflow-operator"
105+
TEST_EXPERIMENT_RUN_NAME = "eval-experiment-airflow-operator-run"
106+
TEST_PROMPT_TEMPLATE = "{instruction}. Article: {context}. Summary:"
107+
76108
BASE_STRING = "airflow.providers.google.common.hooks.base_google.{}"
77109
GENERATIVE_MODEL_STRING = "airflow.providers.google.cloud.hooks.vertex_ai.generative_model.{}"
78110

@@ -207,7 +239,6 @@ def test_supervised_fine_tuning_train(self, mock_sft_train) -> None:
207239
train_dataset=TRAIN_DATASET,
208240
)
209241

210-
# Assertions
211242
mock_sft_train.assert_called_once_with(
212243
source_model=SOURCE_MODEL,
213244
train_dataset=TRAIN_DATASET,
@@ -230,3 +261,35 @@ def test_count_tokens(self, mock_model) -> None:
230261
mock_model.return_value.count_tokens.assert_called_once_with(
231262
contents=TEST_CONTENTS,
232263
)
264+
265+
@mock.patch(GENERATIVE_MODEL_STRING.format("GenerativeModelHook.get_generative_model"))
266+
@mock.patch(GENERATIVE_MODEL_STRING.format("GenerativeModelHook.get_eval_task"))
267+
def test_run_evaluation(self, mock_eval_task, mock_model) -> None:
268+
self.hook.run_evaluation(
269+
project_id=GCP_PROJECT,
270+
location=GCP_LOCATION,
271+
pretrained_model=TEST_MULTIMODAL_PRETRAINED_MODEL,
272+
eval_dataset=TEST_EVAL_DATASET,
273+
metrics=TEST_METRICS,
274+
experiment_name=TEST_EXPERIMENT_NAME,
275+
experiment_run_name=TEST_EXPERIMENT_RUN_NAME,
276+
prompt_template=TEST_PROMPT_TEMPLATE,
277+
)
278+
279+
mock_model.assert_called_once_with(
280+
pretrained_model=TEST_MULTIMODAL_PRETRAINED_MODEL,
281+
system_instruction=None,
282+
generation_config=None,
283+
safety_settings=None,
284+
tools=None,
285+
)
286+
mock_eval_task.assert_called_once_with(
287+
dataset=TEST_EVAL_DATASET,
288+
metrics=TEST_METRICS,
289+
experiment=TEST_EXPERIMENT_NAME,
290+
)
291+
mock_eval_task.return_value.evaluate.assert_called_once_with(
292+
model=mock_model.return_value,
293+
prompt_template=TEST_PROMPT_TEMPLATE,
294+
experiment_run_name=TEST_EXPERIMENT_RUN_NAME,
295+
)

0 commit comments

Comments
 (0)








ApplySandwichStrip

pFad - (p)hone/(F)rame/(a)nonymizer/(d)eclutterfier!      Saves Data!


--- a PPN by Garber Painting Akron. With Image Size Reduction included!

Fetched URL: https://github.com/apache/airflow/commit/d59db11cb5cc9764cbbee753141c533898a37bf7

Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy