Skip to content

Commit 12c997f

Browse files
committed
fix errors reported by clippy
1 parent ed7a4c4 commit 12c997f

File tree

4 files changed

+17
-18
lines changed

4 files changed

+17
-18
lines changed

async-openai/src/client.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -381,7 +381,7 @@ impl<C: Config> Client<C> {
381381
// Convert response body to EventSource stream
382382
let stream = response
383383
.bytes_stream()
384-
.map(|result| result.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)));
384+
.map(|result| result.map_err(|e| std::io::Error::other(e)));
385385
let event_stream = eventsource_stream::EventStream::new(stream);
386386

387387
// Convert EventSource stream to our expected format

async-openai/src/transcriptions.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,10 +58,9 @@ impl<'c, C: Config> Transcriptions<'c, C> {
5858
request.stream = Some(true);
5959
}
6060

61-
Ok(self
62-
.client
61+
self.client
6362
.post_form_stream("/audio/transcriptions", request)
64-
.await?)
63+
.await
6564
}
6665

6766
/// Transcribes audio into the input language.

async-openai/src/types/impls.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -942,7 +942,7 @@ impl AsyncTryFrom<CreateTranscriptionRequest> for reqwest::multipart::Form {
942942
TranscriptionChunkingStrategy::ServerVad(vad_config) => {
943943
form = form.text(
944944
"chunking_strategy",
945-
format!("{}", serde_json::to_string(&vad_config).unwrap()),
945+
serde_json::to_string(&vad_config).unwrap().to_string(),
946946
);
947947
}
948948
}

async-openai/src/types/responses/response.rs

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -583,10 +583,10 @@ pub struct CreateResponse {
583583
/// - `web_search_call.action.sources`: Include the sources of the web search tool call.
584584
///
585585
/// - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
586-
/// interpreter tool call items.
586+
/// interpreter tool call items.
587587
///
588588
/// - `computer_call_output.output.image_url`: Include image urls from the computer call
589-
/// output.
589+
/// output.
590590
///
591591
/// - `file_search_call.results`: Include the search results of the file search tool call.
592592
///
@@ -595,9 +595,9 @@ pub struct CreateResponse {
595595
/// - `message.output_text.logprobs`: Include logprobs with assistant messages.
596596
///
597597
/// - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in
598-
/// reasoning item outputs. This enables reasoning items to be used in multi-turn
599-
/// conversations when using the Responses API statelessly (like when the `store` parameter is
600-
/// set to `false`, or when an organization is enrolled in the zero data retention program).
598+
/// reasoning item outputs. This enables reasoning items to be used in multi-turn
599+
/// conversations when using the Responses API statelessly (like when the `store` parameter is
600+
/// set to `false`, or when an organization is enrolled in the zero data retention program).
601601
#[serde(skip_serializing_if = "Option::is_none")]
602602
pub include: Option<Vec<IncludeEnum>>,
603603

@@ -760,10 +760,10 @@ pub struct CreateResponse {
760760

761761
///The truncation strategy to use for the model response.
762762
/// - `auto`: If the input to this Response exceeds
763-
/// the model's context window size, the model will truncate the
763+
/// the model's context window size, the model will truncate the
764764
/// response to fit the context window by dropping items from the beginning of the conversation.
765765
/// - `disabled` (default): If the input size will exceed the context window
766-
/// size for a model, the request will fail with a 400 error.
766+
/// size for a model, the request will fail with a 400 error.
767767
#[serde(skip_serializing_if = "Option::is_none")]
768768
pub truncation: Option<Truncation>,
769769
}
@@ -2333,10 +2333,10 @@ pub struct Response {
23332333

23342334
///The truncation strategy to use for the model response.
23352335
/// - `auto`: If the input to this Response exceeds
2336-
/// the model's context window size, the model will truncate the
2337-
/// response to fit the context window by dropping items from the beginning of the conversation.
2336+
/// the model's context window size, the model will truncate the
2337+
/// response to fit the context window by dropping items from the beginning of the conversation.
23382338
/// - `disabled` (default): If the input size will exceed the context window
2339-
/// size for a model, the request will fail with a 400 error.
2339+
/// size for a model, the request will fail with a 400 error.
23402340
#[serde(skip_serializing_if = "Option::is_none")]
23412341
pub truncation: Option<Truncation>,
23422342

@@ -2539,10 +2539,10 @@ pub struct TokenCountsBody {
25392539

25402540
///The truncation strategy to use for the model response.
25412541
/// - `auto`: If the input to this Response exceeds
2542-
/// the model's context window size, the model will truncate the
2543-
/// response to fit the context window by dropping items from the beginning of the conversation.
2542+
/// the model's context window size, the model will truncate the
2543+
/// response to fit the context window by dropping items from the beginning of the conversation.
25442544
/// - `disabled` (default): If the input size will exceed the context window
2545-
/// size for a model, the request will fail with a 400 error.
2545+
/// size for a model, the request will fail with a 400 error.
25462546
#[serde(skip_serializing_if = "Option::is_none")]
25472547
pub truncation: Option<Truncation>,
25482548
}

0 commit comments

Comments
 (0)