trace_feedback#

langsmith.testing._internal.trace_feedback(*, name: str = 'Feedback') Generator[RunTree | None, None, None][源代码]#

追踪 pytest 运行反馈的计算,并将其作为其自身的运行。

警告

此 API 处于 beta 阶段,未来版本中可能会更改。

参数:

name (str) – 反馈运行名称。默认为 “Feedback”。

返回类型:

Generator[RunTree | None, None, None]

示例

import openai
import pytest

from langsmith import testing as t
from langsmith import wrappers

oai_client = wrappers.wrap_openai(openai.Client())


@pytest.mark.langsmith
def test_openai_says_hello():
    # Traced code will be included in the test case
    text = "Say hello!"
    response = oai_client.chat.completions.create(
        model="gpt-4o-mini",
        messages=[
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": text},
        ],
    )
    t.log_inputs({"text": text})
    t.log_outputs({"response": response.choices[0].message.content})
    t.log_reference_outputs({"response": "hello!"})

    # Use this context manager to trace any steps used for generating evaluation
    # feedback separately from the main application logic
    with t.trace_feedback():
        grade = oai_client.chat.completions.create(
            model="gpt-4o-mini",
            messages=[
                {
                    "role": "system",
                    "content": "Return 1 if 'hello' is in the user message and 0 otherwise.",
                },
                {
                    "role": "user",
                    "content": response.choices[0].message.content,
                },
            ],
        )
        # Make sure to log relevant feedback within the context for the
        # trace to be associated with this feedback.
        t.log_feedback(
            key="llm_judge", score=float(grade.choices[0].message.content)
        )

    assert "hello" in response.choices[0].message.content.lower()