Skip to content

pydantic_ai.usage

RequestUsage dataclass

Bases: UsageBase

LLM usage associated with a single request.

This is an implementation of genai_prices.types.AbstractUsage so it can be used to calculate the price of the request.

Prices for LLM requests are calculated using genai-prices.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
@dataclass(repr=False)
class RequestUsage(UsageBase):
    """LLM usage associated with a single request.

    This is an implementation of `genai_prices.types.AbstractUsage` so it can be used to calculate the price of the
    request.

    Prices for LLM requests are calculated using [genai-prices](https://github.com/pydantic/genai-prices).
    """

    input_tokens: int | None = None
    """Number of text input/prompt tokens."""

    cache_write_tokens: int | None = None
    """Number of tokens written to the cache."""
    cache_read_tokens: int | None = None
    """Number of tokens read from the cache."""

    output_tokens: int | None = None
    """Number of text output/completion tokens."""

    input_audio_tokens: int | None = None
    """Number of audio input tokens."""
    cache_audio_read_tokens: int | None = None
    """Number of audio tokens read from the cache."""

    details: dict[str, int] | None = None
    """Any extra details returned by the model."""

    @property
    def output_audio_tokens(self):
        return None

    @property
    def requests(self):
        return 1

    @property
    @deprecated('`request_tokens` is deprecated, use `input_tokens` instead')
    def request_tokens(self) -> int | None:
        return self.input_tokens

    @property
    @deprecated('`response_tokens` is deprecated, use `output_tokens` instead')
    def response_tokens(self) -> int | None:
        return self.output_tokens

    @property
    @deprecated('`total_tokens` is deprecated, sum the specific fields you need instead')
    def total_tokens(self) -> int | None:
        return sum(v for k, v in dataclasses.asdict(self).values() if k.endswith('_tokens') and v is not None)

    def incr(self, incr_usage: RequestUsage) -> None:
        """Increment the usage in place.

        Args:
            incr_usage: The usage to increment by.
        """
        return _incr_usage_tokens(self, incr_usage)

    def __add__(self, other: RequestUsage) -> RequestUsage:
        """Add two RequestUsages together.

        This is provided so it's trivial to sum usage information from multiple parts of a response.

        **WARNING:** this CANNOT be used to sum multiple requests without breaking some pricing calculations.
        """
        new_usage = copy(self)
        new_usage.incr(other)
        return new_usage

input_tokens class-attribute instance-attribute

input_tokens: int | None = None

Number of text input/prompt tokens.

cache_write_tokens class-attribute instance-attribute

cache_write_tokens: int | None = None

Number of tokens written to the cache.

cache_read_tokens class-attribute instance-attribute

cache_read_tokens: int | None = None

Number of tokens read from the cache.

output_tokens class-attribute instance-attribute

output_tokens: int | None = None

Number of text output/completion tokens.

input_audio_tokens class-attribute instance-attribute

input_audio_tokens: int | None = None

Number of audio input tokens.

cache_audio_read_tokens class-attribute instance-attribute

cache_audio_read_tokens: int | None = None

Number of audio tokens read from the cache.

details class-attribute instance-attribute

details: dict[str, int] | None = None

Any extra details returned by the model.

incr

incr(incr_usage: RequestUsage) -> None

Increment the usage in place.

Parameters:

Name Type Description Default
incr_usage RequestUsage

The usage to increment by.

required
Source code in pydantic_ai_slim/pydantic_ai/usage.py
 95
 96
 97
 98
 99
100
101
def incr(self, incr_usage: RequestUsage) -> None:
    """Increment the usage in place.

    Args:
        incr_usage: The usage to increment by.
    """
    return _incr_usage_tokens(self, incr_usage)

__add__

__add__(other: RequestUsage) -> RequestUsage

Add two RequestUsages together.

This is provided so it's trivial to sum usage information from multiple parts of a response.

WARNING: this CANNOT be used to sum multiple requests without breaking some pricing calculations.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
103
104
105
106
107
108
109
110
111
112
def __add__(self, other: RequestUsage) -> RequestUsage:
    """Add two RequestUsages together.

    This is provided so it's trivial to sum usage information from multiple parts of a response.

    **WARNING:** this CANNOT be used to sum multiple requests without breaking some pricing calculations.
    """
    new_usage = copy(self)
    new_usage.incr(other)
    return new_usage

RunUsage dataclass

Bases: UsageBase

LLM usage associated with an agent run.

Responsibility for calculating request usage is on the model; Pydantic AI simply sums the usage information across requests.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
@dataclass(repr=False)
class RunUsage(UsageBase):
    """LLM usage associated with an agent run.

    Responsibility for calculating request usage is on the model; Pydantic AI simply sums the usage information across requests.
    """

    requests: int = 0
    """Number of requests made to the LLM API."""

    input_tokens: int | None = None
    """Total number of text input/prompt tokens."""

    cache_write_tokens: int | None = None
    """Total number of tokens written to the cache."""
    cache_read_tokens: int | None = None
    """Total number of tokens read from the cache."""

    input_audio_tokens: int | None = None
    """Total number of audio input tokens."""
    cache_audio_read_tokens: int | None = None
    """Total number of audio tokens read from the cache."""

    output_tokens: int | None = None
    """Total number of text output/completion tokens."""

    details: dict[str, int] | None = None
    """Any extra details returned by the model."""

    def input_output_tokens(self) -> int | None:
        """Sum of `input_tokens + output_tokens`."""
        if self.input_tokens is None and self.output_tokens is None:
            return None
        else:
            return (self.input_tokens or 0) + (self.output_tokens or 0)

    @property
    @deprecated('`request_tokens` is deprecated, use `input_tokens` instead')
    def request_tokens(self) -> int | None:
        return self.input_tokens

    @property
    @deprecated('`response_tokens` is deprecated, use `output_tokens` instead')
    def response_tokens(self) -> int | None:
        return self.output_tokens

    @property
    @deprecated('`total_tokens` is deprecated, sum the specific fields you need or use `input_output_tokens` instead')
    def total_tokens(self) -> int | None:
        return sum(v for k, v in dataclasses.asdict(self).values() if k.endswith('_tokens') and v is not None)

    def incr(self, incr_usage: RunUsage | RequestUsage) -> None:
        """Increment the usage in place.

        Args:
            incr_usage: The usage to increment by.
        """
        if isinstance(incr_usage, RunUsage):
            self.requests += incr_usage.requests
        return _incr_usage_tokens(self, incr_usage)

    def __add__(self, other: RunUsage | RequestUsage) -> RunUsage:
        """Add two RunUsages together.

        This is provided so it's trivial to sum usage information from multiple runs.
        """
        new_usage = copy(self)
        new_usage.incr(other)
        return new_usage

requests class-attribute instance-attribute

requests: int = 0

Number of requests made to the LLM API.

input_tokens class-attribute instance-attribute

input_tokens: int | None = None

Total number of text input/prompt tokens.

cache_write_tokens class-attribute instance-attribute

cache_write_tokens: int | None = None

Total number of tokens written to the cache.

cache_read_tokens class-attribute instance-attribute

cache_read_tokens: int | None = None

Total number of tokens read from the cache.

input_audio_tokens class-attribute instance-attribute

input_audio_tokens: int | None = None

Total number of audio input tokens.

cache_audio_read_tokens class-attribute instance-attribute

cache_audio_read_tokens: int | None = None

Total number of audio tokens read from the cache.

output_tokens class-attribute instance-attribute

output_tokens: int | None = None

Total number of text output/completion tokens.

details class-attribute instance-attribute

details: dict[str, int] | None = None

Any extra details returned by the model.

input_output_tokens

input_output_tokens() -> int | None

Sum of input_tokens + output_tokens.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
144
145
146
147
148
149
def input_output_tokens(self) -> int | None:
    """Sum of `input_tokens + output_tokens`."""
    if self.input_tokens is None and self.output_tokens is None:
        return None
    else:
        return (self.input_tokens or 0) + (self.output_tokens or 0)

incr

incr(incr_usage: RunUsage | RequestUsage) -> None

Increment the usage in place.

Parameters:

Name Type Description Default
incr_usage RunUsage | RequestUsage

The usage to increment by.

required
Source code in pydantic_ai_slim/pydantic_ai/usage.py
166
167
168
169
170
171
172
173
174
def incr(self, incr_usage: RunUsage | RequestUsage) -> None:
    """Increment the usage in place.

    Args:
        incr_usage: The usage to increment by.
    """
    if isinstance(incr_usage, RunUsage):
        self.requests += incr_usage.requests
    return _incr_usage_tokens(self, incr_usage)

__add__

__add__(other: RunUsage | RequestUsage) -> RunUsage

Add two RunUsages together.

This is provided so it's trivial to sum usage information from multiple runs.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
176
177
178
179
180
181
182
183
def __add__(self, other: RunUsage | RequestUsage) -> RunUsage:
    """Add two RunUsages together.

    This is provided so it's trivial to sum usage information from multiple runs.
    """
    new_usage = copy(self)
    new_usage.incr(other)
    return new_usage

UsageLimits dataclass

Limits on model usage.

The request count is tracked by pydantic_ai, and the request limit is checked before each request to the model. Token counts are provided in responses from the model, and the token limits are checked after each response.

Each of the limits can be set to None to disable that limit.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
@dataclass(repr=False)
class UsageLimits:
    """Limits on model usage.

    The request count is tracked by pydantic_ai, and the request limit is checked before each request to the model.
    Token counts are provided in responses from the model, and the token limits are checked after each response.

    Each of the limits can be set to `None` to disable that limit.
    """

    request_limit: int | None = 50
    """The maximum number of requests allowed to the model."""
    input_tokens_limit: int | None = None
    """The maximum number of input/prompt tokens allowed."""
    output_tokens_limit: int | None = None
    """The maximum number of output/response tokens allowed."""
    total_tokens_limit: int | None = None
    """The maximum number of combined input and output tokens allowed."""

    @overload
    def __init__(
        self,
        *,
        request_limit: int | None = 50,
        input_tokens_limit: int | None = None,
        output_tokens_limit: int | None = None,
        total_tokens_limit: int | None = None,
    ) -> None:
        self.request_limit = request_limit
        self.input_tokens_limit = input_tokens_limit
        self.output_tokens_limit = output_tokens_limit
        self.total_tokens_limit = total_tokens_limit

    @overload
    @deprecated(
        'Use `input_tokens_limit` instead of `request_tokens_limit` and `output_tokens_limit` and `total_tokens_limit`'
    )
    def __init__(
        self,
        *,
        request_limit: int | None = 50,
        request_tokens_limit: int | None = None,
        response_tokens_limit: int | None = None,
        total_tokens_limit: int | None = None,
    ) -> None:
        self.request_limit = request_limit
        self.input_tokens_limit = request_tokens_limit
        self.output_tokens_limit = response_tokens_limit
        self.total_tokens_limit = total_tokens_limit

    def __init__(
        self,
        *,
        request_limit: int | None = 50,
        input_tokens_limit: int | None = None,
        output_tokens_limit: int | None = None,
        total_tokens_limit: int | None = None,
        # deprecated:
        request_tokens_limit: int | None = None,
        response_tokens_limit: int | None = None,
    ):
        self.request_limit = request_limit
        self.input_tokens_limit = input_tokens_limit or request_tokens_limit
        self.output_tokens_limit = output_tokens_limit or response_tokens_limit
        self.total_tokens_limit = total_tokens_limit

    def has_token_limits(self) -> bool:
        """Returns `True` if this instance places any limits on token counts.

        If this returns `False`, the `check_tokens` method will never raise an error.

        This is useful because if we have token limits, we need to check them after receiving each streamed message.
        If there are no limits, we can skip that processing in the streaming response iterator.
        """
        return any(
            limit is not None for limit in (self.input_tokens_limit, self.output_tokens_limit, self.total_tokens_limit)
        )

    def check_before_request(self, usage: RunUsage) -> None:
        """Raises a `UsageLimitExceeded` exception if the next request would exceed the request_limit."""
        request_limit = self.request_limit
        if request_limit is not None and usage.requests >= request_limit:
            raise UsageLimitExceeded(f'The next request would exceed the request_limit of {request_limit}')

    def check_tokens(self, usage: RunUsage) -> None:
        """Raises a `UsageLimitExceeded` exception if the usage exceeds any of the token limits."""
        input_tokens = usage.input_tokens or 0
        if self.input_tokens_limit is not None and input_tokens > self.input_tokens_limit:
            raise UsageLimitExceeded(f'Exceeded the input_tokens_limit of {self.input_tokens_limit} ({input_tokens=})')

        output_tokens = usage.output_tokens or 0
        if self.output_tokens_limit is not None and output_tokens > self.output_tokens_limit:
            raise UsageLimitExceeded(
                f'Exceeded the output_tokens_limit of {self.output_tokens_limit} ({output_tokens=})'
            )

        total_tokens = usage.input_output_tokens() or 0
        if self.total_tokens_limit is not None and total_tokens > self.total_tokens_limit:
            raise UsageLimitExceeded(f'Exceeded the total_tokens_limit of {self.total_tokens_limit} ({total_tokens=})')

    __repr__ = _utils.dataclasses_no_defaults_repr

request_limit class-attribute instance-attribute

request_limit: int | None = request_limit

The maximum number of requests allowed to the model.

input_tokens_limit class-attribute instance-attribute

input_tokens_limit: int | None = (
    input_tokens_limit or request_tokens_limit
)

The maximum number of input/prompt tokens allowed.

output_tokens_limit class-attribute instance-attribute

output_tokens_limit: int | None = (
    output_tokens_limit or response_tokens_limit
)

The maximum number of output/response tokens allowed.

total_tokens_limit class-attribute instance-attribute

total_tokens_limit: int | None = total_tokens_limit

The maximum number of combined input and output tokens allowed.

has_token_limits

has_token_limits() -> bool

Returns True if this instance places any limits on token counts.

If this returns False, the check_tokens method will never raise an error.

This is useful because if we have token limits, we need to check them after receiving each streamed message. If there are no limits, we can skip that processing in the streaming response iterator.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
284
285
286
287
288
289
290
291
292
293
294
def has_token_limits(self) -> bool:
    """Returns `True` if this instance places any limits on token counts.

    If this returns `False`, the `check_tokens` method will never raise an error.

    This is useful because if we have token limits, we need to check them after receiving each streamed message.
    If there are no limits, we can skip that processing in the streaming response iterator.
    """
    return any(
        limit is not None for limit in (self.input_tokens_limit, self.output_tokens_limit, self.total_tokens_limit)
    )

check_before_request

check_before_request(usage: RunUsage) -> None

Raises a UsageLimitExceeded exception if the next request would exceed the request_limit.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
296
297
298
299
300
def check_before_request(self, usage: RunUsage) -> None:
    """Raises a `UsageLimitExceeded` exception if the next request would exceed the request_limit."""
    request_limit = self.request_limit
    if request_limit is not None and usage.requests >= request_limit:
        raise UsageLimitExceeded(f'The next request would exceed the request_limit of {request_limit}')

check_tokens

check_tokens(usage: RunUsage) -> None

Raises a UsageLimitExceeded exception if the usage exceeds any of the token limits.

Source code in pydantic_ai_slim/pydantic_ai/usage.py
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
def check_tokens(self, usage: RunUsage) -> None:
    """Raises a `UsageLimitExceeded` exception if the usage exceeds any of the token limits."""
    input_tokens = usage.input_tokens or 0
    if self.input_tokens_limit is not None and input_tokens > self.input_tokens_limit:
        raise UsageLimitExceeded(f'Exceeded the input_tokens_limit of {self.input_tokens_limit} ({input_tokens=})')

    output_tokens = usage.output_tokens or 0
    if self.output_tokens_limit is not None and output_tokens > self.output_tokens_limit:
        raise UsageLimitExceeded(
            f'Exceeded the output_tokens_limit of {self.output_tokens_limit} ({output_tokens=})'
        )

    total_tokens = usage.input_output_tokens() or 0
    if self.total_tokens_limit is not None and total_tokens > self.total_tokens_limit:
        raise UsageLimitExceeded(f'Exceeded the total_tokens_limit of {self.total_tokens_limit} ({total_tokens=})')