@@ -92,6 +92,7 @@ type RunRequest struct {
9292 // Sampling temperature between 0 and 2. Higher values like 0.8 are more random.
9393 // lower values are more focused and deterministic.
9494 Temperature * float32 `json:"temperature,omitempty"`
95+ TopP * float32 `json:"top_p,omitempty"`
9596
9697 // The maximum number of prompt tokens that may be used over the course of the run.
9798 // If the run exceeds the number of prompt tokens specified, the run will end with status 'complete'.
@@ -103,6 +104,11 @@ type RunRequest struct {
103104
104105 // ThreadTruncationStrategy defines the truncation strategy to use for the thread.
105106 TruncationStrategy * ThreadTruncationStrategy `json:"truncation_strategy,omitempty"`
107+
108+ // This can be either a string or a ToolChoice object.
109+ ToolChoice any `json:"tool_choice,omitempty"`
110+ // This can be either a string or a ResponseFormat object.
111+ ResponseFormat any `json:"response_format,omitempty"`
106112}
107113
108114// ThreadTruncationStrategy defines the truncation strategy to use for the thread.
@@ -124,6 +130,13 @@ const (
124130 TruncationStrategyLastMessages = TruncationStrategy ("last_messages" )
125131)
126132
133+ // ReponseFormat specifies the format the model must output.
134+ // https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-response_format.
135+ // Type can either be text or json_object.
136+ type ReponseFormat struct {
137+ Type string `json:"type"`
138+ }
139+
127140type RunModifyRequest struct {
128141 Metadata map [string ]any `json:"metadata,omitempty"`
129142}
0 commit comments