Skip to content
1 change: 0 additions & 1 deletion contributing/samples/gepa/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
from tau_bench.types import EnvRunResult
from tau_bench.types import RunConfig
import tau_bench_agent as tau_bench_agent_lib

import utils


Expand Down
1 change: 0 additions & 1 deletion contributing/samples/gepa/run_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
from absl import flags
import experiment
from google.genai import types

import utils

_OUTPUT_DIR = flags.DEFINE_string(
Expand Down
6 changes: 6 additions & 0 deletions src/google/adk/flows/llm_flows/base_llm_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,6 +324,9 @@ def get_author_for_event(llm_response):
id=Event.new_id(),
invocation_id=invocation_context.invocation_id,
author=get_author_for_event(llm_response),
custom_metadata=getattr(
invocation_context.run_config, 'custom_metadata', None
),
)

async with Aclosing(
Expand Down Expand Up @@ -438,6 +441,9 @@ async def _run_one_step_async(
invocation_id=invocation_context.invocation_id,
author=invocation_context.agent.name,
branch=invocation_context.branch,
custom_metadata=getattr(
invocation_context.run_config, 'custom_metadata', None
),
)
async with Aclosing(
self._call_llm_async(
Expand Down
4 changes: 4 additions & 0 deletions src/google/adk/models/lite_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -533,6 +533,10 @@ async def _get_content(

content_objects = []
for part in parts:
# Skip thought parts - these are model reasoning and should not be
# fed back as input in subsequent turns
if getattr(part, "thought", False):
continue
if part.text:
if len(parts) == 1:
return part.text
Expand Down