@@ -583,10 +583,10 @@ pub struct CreateResponse {
583583 /// - `web_search_call.action.sources`: Include the sources of the web search tool call.
584584 ///
585585 /// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
586- /// interpreter tool call items.
586+ /// interpreter tool call items.
587587 ///
588588 /// - `computer_call_output.output.image_url`: Include image urls from the computer call
589- /// output.
589+ /// output.
590590 ///
591591 /// - `file_search_call.results`: Include the search results of the file search tool call.
592592 ///
@@ -595,9 +595,9 @@ pub struct CreateResponse {
595595 /// - `message.output_text.logprobs`: Include logprobs with assistant messages.
596596 ///
597597 /// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
598- /// reasoning item outputs. This enables reasoning items to be used in multi-turn
599- /// conversations when using the Responses API statelessly (like when the `store` parameter is
600- /// set to `false`, or when an organization is enrolled in the zero data retention program).
598+ /// reasoning item outputs. This enables reasoning items to be used in multi-turn
599+ /// conversations when using the Responses API statelessly (like when the `store` parameter is
600+ /// set to `false`, or when an organization is enrolled in the zero data retention program).
601601 #[ serde( skip_serializing_if = "Option::is_none" ) ]
602602 pub include : Option < Vec < IncludeEnum > > ,
603603
@@ -760,10 +760,10 @@ pub struct CreateResponse {
760760
761761 ///The truncation strategy to use for the model response.
762762 /// - `auto`: If the input to this Response exceeds
763- /// the model's context window size, the model will truncate the
763+ /// the model's context window size, the model will truncate the
764764 /// response to fit the context window by dropping items from the beginning of the conversation.
765765 /// - `disabled` (default): If the input size will exceed the context window
766- /// size for a model, the request will fail with a 400 error.
766+ /// size for a model, the request will fail with a 400 error.
767767 #[ serde( skip_serializing_if = "Option::is_none" ) ]
768768 pub truncation : Option < Truncation > ,
769769}
@@ -2333,10 +2333,10 @@ pub struct Response {
23332333
23342334 ///The truncation strategy to use for the model response.
23352335 /// - `auto`: If the input to this Response exceeds
2336- /// the model's context window size, the model will truncate the
2337- /// response to fit the context window by dropping items from the beginning of the conversation.
2336+ /// the model's context window size, the model will truncate the
2337+ /// response to fit the context window by dropping items from the beginning of the conversation.
23382338 /// - `disabled` (default): If the input size will exceed the context window
2339- /// size for a model, the request will fail with a 400 error.
2339+ /// size for a model, the request will fail with a 400 error.
23402340 #[ serde( skip_serializing_if = "Option::is_none" ) ]
23412341 pub truncation : Option < Truncation > ,
23422342
@@ -2539,10 +2539,10 @@ pub struct TokenCountsBody {
25392539
25402540 ///The truncation strategy to use for the model response.
25412541 /// - `auto`: If the input to this Response exceeds
2542- /// the model's context window size, the model will truncate the
2543- /// response to fit the context window by dropping items from the beginning of the conversation.
2542+ /// the model's context window size, the model will truncate the
2543+ /// response to fit the context window by dropping items from the beginning of the conversation.
25442544 /// - `disabled` (default): If the input size will exceed the context window
2545- /// size for a model, the request will fail with a 400 error.
2545+ /// size for a model, the request will fail with a 400 error.
25462546 #[ serde( skip_serializing_if = "Option::is_none" ) ]
25472547 pub truncation : Option < Truncation > ,
25482548}
0 commit comments