Skip to content

Commit b87d4c0

Browse files
committed
Add configurable verbosity for OpenAI Responses API
- Add ai:verbosity config option to AIModeConfigType schema - Support low/medium/high verbosity levels (defaults to medium) - Use medium verbosity by default for better model compatibility - Change rate limit fallback from low to medium thinking level - Remove hardcoded model-specific constraints in favor of user config - Document that verbosity is OpenAI Responses API specific Fixes issue where models like gpt-5.2-codex only support medium verbosity/reasoning levels, causing 400 Bad Request errors with unsupported 'low' values. Users can now configure both ai:thinkinglevel and ai:verbosity per model in their waveai.json config files.
1 parent 31a8714 commit b87d4c0

File tree

6 files changed

+30
-5
lines changed

6 files changed

+30
-5
lines changed

frontend/types/gotypes.d.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ declare global {
2323
"ai:apitype"?: string;
2424
"ai:model"?: string;
2525
"ai:thinkinglevel"?: string;
26+
"ai:verbosity"?: string;
2627
"ai:endpoint"?: string;
2728
"ai:azureapiversion"?: string;
2829
"ai:apitoken"?: string;

pkg/aiusechat/openai/openai-convertmessage.go

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@ import (
2222
const (
2323
OpenAIDefaultAPIVersion = "2024-12-31"
2424
OpenAIDefaultMaxTokens = 4096
25+
// "medium" verbosity is more widely supported across models than "low"
26+
OpenAIDefaultVerbosity = "medium"
2527
)
2628

2729
// convertContentBlockToParts converts a single content block to UIMessageParts
@@ -190,10 +192,11 @@ func debugPrintReq(req *OpenAIRequest, endpoint string) {
190192
func buildOpenAIHTTPRequest(ctx context.Context, inputs []any, chatOpts uctypes.WaveChatOpts, cont *uctypes.WaveContinueResponse) (*http.Request, error) {
191193
opts := chatOpts.Config
192194

193-
// If continuing from premium rate limit, downgrade to default model and low thinking
195+
// If continuing from premium rate limit, downgrade to default model and medium thinking
196+
// (medium is more widely supported than low across different models)
194197
if cont != nil && cont.ContinueFromKind == uctypes.StopKindPremiumRateLimit {
195198
opts.Model = uctypes.DefaultOpenAIModel
196-
opts.ThinkingLevel = uctypes.ThinkingLevelLow
199+
opts.ThinkingLevel = uctypes.ThinkingLevelMedium
197200
}
198201

199202
if opts.Model == "" {
@@ -229,13 +232,18 @@ func buildOpenAIHTTPRequest(ctx context.Context, inputs []any, chatOpts uctypes.
229232
}
230233

231234
// Build request body
235+
// Use configured verbosity, or fall back to default constant
236+
verbosity := opts.Verbosity
237+
if verbosity == "" {
238+
verbosity = OpenAIDefaultVerbosity
239+
}
232240
reqBody := &OpenAIRequest{
233241
Model: opts.Model,
234242
Input: inputs,
235243
Stream: true,
236244
StreamOptions: &StreamOptionsType{IncludeObfuscation: false},
237245
MaxOutputTokens: maxTokens,
238-
Text: &TextType{Verbosity: "low"},
246+
Text: &TextType{Verbosity: verbosity},
239247
}
240248

241249
// Add system prompt as instructions if provided
@@ -264,10 +272,10 @@ func buildOpenAIHTTPRequest(ctx context.Context, inputs []any, chatOpts uctypes.
264272
reqBody.Tools = append(reqBody.Tools, webSearchTool)
265273
}
266274

267-
// Set reasoning based on thinking level
275+
// Set reasoning based on thinking level from config
268276
if opts.ThinkingLevel != "" {
269277
reqBody.Reasoning = &ReasoningType{
270-
Effort: opts.ThinkingLevel, // low, medium, high map directly
278+
Effort: opts.ThinkingLevel,
271279
}
272280
if opts.Model == "gpt-5" || opts.Model == "gpt-5.1" {
273281
reqBody.Reasoning.Summary = "auto"

pkg/aiusechat/uctypes/uctypes.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -278,6 +278,7 @@ type AIOptsType struct {
278278
MaxTokens int `json:"maxtokens,omitempty"`
279279
TimeoutMs int `json:"timeoutms,omitempty"`
280280
ThinkingLevel string `json:"thinkinglevel,omitempty"` // ThinkingLevelLow, ThinkingLevelMedium, or ThinkingLevelHigh
281+
Verbosity string `json:"verbosity,omitempty"` // Text verbosity level (OpenAI Responses API only, ignored by other backends)
281282
AIMode string `json:"aimode,omitempty"`
282283
Capabilities []string `json:"capabilities,omitempty"`
283284
WaveAIPremium bool `json:"waveaipremium,omitempty"`

pkg/aiusechat/usechat.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,12 +110,17 @@ func getWaveAISettings(premium bool, builderMode bool, rtInfo waveobj.ObjRTInfo,
110110
if thinkingLevel == "" {
111111
thinkingLevel = uctypes.ThinkingLevelMedium
112112
}
113+
verbosity := config.Verbosity
114+
if verbosity == "" {
115+
verbosity = uctypes.ThinkingLevelMedium // default to medium
116+
}
113117
opts := &uctypes.AIOptsType{
114118
Provider: config.Provider,
115119
APIType: config.APIType,
116120
Model: config.Model,
117121
MaxTokens: maxTokens,
118122
ThinkingLevel: thinkingLevel,
123+
Verbosity: verbosity,
119124
AIMode: aiMode,
120125
Endpoint: baseUrl,
121126
Capabilities: config.Capabilities,

pkg/wconfig/settingsconfig.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,7 @@ type AIModeConfigType struct {
273273
APIType string `json:"ai:apitype,omitempty" jsonschema:"enum=google-gemini,enum=openai-responses,enum=openai-chat"`
274274
Model string `json:"ai:model,omitempty"`
275275
ThinkingLevel string `json:"ai:thinkinglevel,omitempty" jsonschema:"enum=low,enum=medium,enum=high"`
276+
Verbosity string `json:"ai:verbosity,omitempty" jsonschema:"enum=low,enum=medium,enum=high,description=Text verbosity level (OpenAI Responses API only)"`
276277
Endpoint string `json:"ai:endpoint,omitempty"`
277278
AzureAPIVersion string `json:"ai:azureapiversion,omitempty"`
278279
APIToken string `json:"ai:apitoken,omitempty"`

schema/waveai.json

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,15 @@
4646
"high"
4747
]
4848
},
49+
"ai:verbosity": {
50+
"type": "string",
51+
"enum": [
52+
"low",
53+
"medium",
54+
"high"
55+
],
56+
"description": "Text verbosity level (OpenAI Responses API only)"
57+
},
4958
"ai:endpoint": {
5059
"type": "string"
5160
},

0 commit comments

Comments
 (0)