Asynchronous support

run_async flag

Each synchronous method has its asynchronous equivalent. The SDK exposes asynchronous functionality through methods that are named the same way as their synchronous counterparts but with an “a” prefix, indicating they are asynchronous coroutines.

To use these methods, create your SDK client with the run_async=True argument. The default value for the run_async argument is False.

This sets the SDK in an asynchronous-ready environment, allowing you to benefit from asynchronous execution.

Note

Synchronous methods are not available in an asynchronous environment and vice versa. Attempting to use them interchangeably will result in an error.

Examples

Evaluator with ThreadPoolExecutor (sync)

from concurrent.futures import ThreadPoolExecutor

from root import RootSignals

# Connect to the Root Signals API
client = RootSignals()


def main():
    response = "This is polite and clear."

    tasks = [
        (client.evaluators.Politeness, response),
        (client.evaluators.Clarity, response),
    ]

    with ThreadPoolExecutor() as executor:
        future_to_eval = {func.__name__: executor.submit(func, response) for func, response in tasks}
        for future in future_to_eval:
            result = future_to_eval[future].result()
            print(f"Evaluation result for {future}: {result}")
// print(f"Evaluation result for {future}: {result}")

"Politeness": {
    "score": "0.7", 
    "cost": "None",
    "justification": "The response \"This is polite and clear.\" is neutral and 
    lacks any negative or aggressive language. It is concise and straightforward, 
    which can be seen as polite in its simplicity..."
}

"Clarity": {
    "score": "0.95", 
    "cost": "None",
    "justification": "The response \"This is polite and clear.\" is straightforward 
    and easy to understand at first read. The ideas are presented in a logical and concise manner,
    with sufficient detail..."
}

Evaluator with Asyncio

import asyncio

from root import RootSignals

# Connect to the Root Signals API
aclient = RootSignals(run_async=True)


async def main():
    response = "This is polite and clear."

    tasks = [
        aclient.evaluators.Politeness(response),
        aclient.evaluators.Clarity(response),
    ]

    response = await asyncio.gather(*tasks)

    for future in response:
        print(f"Evaluation result for {future.evaluator_name}: {future}")
// print(f"Evaluation result for {eval}: {future.get(eval)}")

"Politeness": {
    "score": "0.7",
    "cost": "None",
    "justification": "The response \"This is polite and clear.\" is neutral and
    lacks any negative or aggressive language. It is concise and straightforward,
    which can be seen as polite in its simplicity..."
}

"Clarity": {
    "score": "0.95",
    "cost": "None",
    "justification": "The response \"This is polite and clear.\" is straightforward
    and easy to understand at first read. The ideas are presented in a logical and concise manner,
    with sufficient detail..."
}

Asynchronous Evaluator Skill

The asynchronous version of a Evaluator Skill example would look like this:

from root import RootSignals
from root.validators import AValidator

aclient = RootSignals(run_async=True)


async def main():
    evaluator_skill = await aclient.evaluators.acreate(
        name="Cooking recipe",
        intent="This skill will evaluate if the answer is a cooking recipe.",
        predicate="Is the following a cooking recipe: {{output}}",
        model="gpt-4o",
    )

    cooking_skill = await aclient.skills.acreate(
        name="Cooking skill with a custom evaluator",
        prompt="Find me a good recipe for Italian food.",
        validators=[
            AValidator(evaluator_id=evaluator_skill.id, threshold=0.1),
            AValidator(
                evaluator_name="Truthfulness",
                threshold=0.5,
            ),
        ],
    )
    response = await cooking_skill.arun()

    # Check if the recipe was about cooking
    print(response.validation)
// print(response.validation)

{
  "validator_results": [
    {
      "evaluator_name": "Cooking recipe evaluator",
      "evaluator_id": "...",
      "threshold": "0.1",
      "is_valid": "True",
      "result": "0.9",
      "status": "finished"
    }
  ],
  "is_valid": "True"}