Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion app/actions.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,11 @@ import type { FeatureCollection } from 'geojson'
import { Spinner } from '@/components/ui/spinner'
import { Section } from '@/components/section'
import { FollowupPanel } from '@/components/followup-panel'
import { inquire, researcher, taskManager, querySuggestor, resolutionSearch, type DrawnFeature } from '@/lib/agents'
import { inquire } from '@/lib/agents/inquire'
import { researcher } from '@/lib/agents/researcher'
import { taskManager } from '@/lib/agents/task-manager'
import { querySuggestor } from '@/lib/agents/query-suggestor'
import { resolutionSearch, type DrawnFeature } from '@/lib/agents/resolution-search'
import { writer } from '@/lib/agents/writer'
import { saveChat, getSystemPrompt } from '@/lib/actions/chat'
import { Chat, AIMessage } from '@/lib/types'
Expand Down
2 changes: 1 addition & 1 deletion lib/actions/suggest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import { createStreamableUI, createStreamableValue } from 'ai/rsc'
import { CoreMessage, LanguageModel, streamObject } from 'ai'
import { PartialRelated, relatedSchema } from '@/lib/schema/related'
import { getModel } from '../utils'
import { getModel } from '../utils/ai-model'
import { MapData } from '@/components/map/map-data-context'

export async function getSuggestions(
Expand Down
2 changes: 1 addition & 1 deletion lib/agents/inquire.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import { Copilot } from '@/components/copilot';
import { createStreamableUI, createStreamableValue } from 'ai/rsc';
import { CoreMessage, LanguageModel, streamObject } from 'ai';
import { PartialInquiry, inquirySchema } from '@/lib/schema/inquiry';
import { getModel } from '../utils';
import { getModel } from '../utils/ai-model';

// Define a plain object type for the inquiry prop
interface InquiryProp {
Expand Down
2 changes: 1 addition & 1 deletion lib/agents/query-suggestor.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import { CoreMessage, LanguageModel, streamObject } from 'ai'
import { PartialRelated, relatedSchema } from '@/lib/schema/related'
import { Section } from '@/components/section'
import SearchRelated from '@/components/search-related'
import { getModel } from '../utils'
import { getModel } from '../utils/ai-model'

export async function querySuggestor(
uiStream: ReturnType<typeof createStreamableUI>,
Expand Down
2 changes: 1 addition & 1 deletion lib/agents/researcher.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import {
import { Section } from '@/components/section'
import { BotMessage } from '@/components/message'
import { getTools } from './tools'
import { getModel } from '../utils'
import { getModel } from '../utils/ai-model'
import { MapProvider } from '@/lib/store/settings'
import { DrawnFeature } from './resolution-search'

Expand Down
2 changes: 1 addition & 1 deletion lib/agents/resolution-search.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { CoreMessage, streamObject } from 'ai'
import { getModel } from '@/lib/utils'
import { getModel } from '@/lib/utils/ai-model'
import { z } from 'zod'

// This agent is now a pure data-processing module, with no UI dependencies.
Expand Down
2 changes: 1 addition & 1 deletion lib/agents/task-manager.tsx
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { CoreMessage, generateObject, LanguageModel } from 'ai'
import { nextActionSchema } from '../schema/next-action'
import { getModel } from '../utils'
import { getModel } from '../utils/ai-model'

// Decide whether inquiry is required for the user input
export async function taskManager(messages: CoreMessage[]) {
Expand Down
2 changes: 1 addition & 1 deletion lib/agents/writer.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import { createStreamableUI, createStreamableValue } from 'ai/rsc'
import { CoreMessage, LanguageModel, streamText as nonexperimental_streamText } from 'ai'
import { Section } from '@/components/section'
import { BotMessage } from '@/components/message'
import { getModel } from '../utils'
import { getModel } from '../utils/ai-model'

export async function writer(
dynamicSystemPrompt: string, // New parameter
Expand Down
103 changes: 103 additions & 0 deletions lib/utils/ai-model.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
import { getSelectedModel } from '@/lib/actions/users'
import { createOpenAI } from '@ai-sdk/openai'
import { createGoogleGenerativeAI } from '@ai-sdk/google'
import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock'
import { createXai } from '@ai-sdk/xai';

export async function getModel(requireVision: boolean = false) {
// Check for specific API model override
if (process.env.SPECIFIC_API_MODEL) {
const provider = process.env.SPECIFIC_API_MODEL.split(':')[0];
const modelId = process.env.SPECIFIC_API_MODEL.split(':').slice(1).join(':');

if (provider === 'openai') {
return createOpenAI({ apiKey: process.env.OPENAI_API_KEY })(modelId);
} else if (provider === 'google') {
return createGoogleGenerativeAI({ apiKey: process.env.GEMINI_3_PRO_API_KEY })(modelId);
} else if (provider === 'xai') {
return createXai({ apiKey: process.env.XAI_API_KEY })(modelId);
}
Comment on lines +7 to +20
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

# First, locate and examine the file
find . -name "ai-model.ts" -type f

Repository: QueueLab/QCX

Length of output: 79


🏁 Script executed:

cat -n ./lib/utils/ai-model.ts

Repository: QueueLab/QCX

Length of output: 4490


Add API key validation for the SPECIFIC_API_MODEL override path.

Lines 11–19 instantiate providers without validating required API keys. Unlike the fallback paths below (lines 33–110), this creates a consistency issue and allows opaque SDK auth failures. Add explicit checks before provider instantiation.

🛡️ Suggested guardrails for override path
   if (process.env.SPECIFIC_API_MODEL) {
     const provider = process.env.SPECIFIC_API_MODEL.split(':')[0];
     const modelId = process.env.SPECIFIC_API_MODEL.split(':').slice(1).join(':');

     if (provider === 'openai') {
+      if (!process.env.OPENAI_API_KEY) {
+        throw new Error('SPECIFIC_API_MODEL=openai requires OPENAI_API_KEY');
+      }
       const { createOpenAI } = await import('@ai-sdk/openai');
       return createOpenAI({ apiKey: process.env.OPENAI_API_KEY })(modelId);
     } else if (provider === 'google') {
+      if (!process.env.GEMINI_3_PRO_API_KEY) {
+        throw new Error('SPECIFIC_API_MODEL=google requires GEMINI_3_PRO_API_KEY');
+      }
       const { createGoogleGenerativeAI } = await import('@ai-sdk/google');
       return createGoogleGenerativeAI({ apiKey: process.env.GEMINI_3_PRO_API_KEY })(modelId);
     } else if (provider === 'xai') {
+      if (!process.env.XAI_API_KEY) {
+        throw new Error('SPECIFIC_API_MODEL=xai requires XAI_API_KEY');
+      }
       const { createXai } = await import('@ai-sdk/xai');
       return createXai({ apiKey: process.env.XAI_API_KEY })(modelId);
     }
   }
🤖 Prompt for AI Agents
In `@lib/utils/ai-model.ts` around lines 7 - 20, The SPECIFIC_API_MODEL override
path currently constructs provider clients without validating API keys; before
calling createOpenAI/createGoogleGenerativeAI/createXai, add explicit checks
that the corresponding env vars (OPENAI_API_KEY for provider 'openai',
GEMINI_3_PRO_API_KEY for 'google', XAI_API_KEY for 'xai') are present and
non-empty, and if missing throw or return a clear error (or log and exit)
indicating the missing key and referencing SPECIFIC_API_MODEL so auth failures
are descriptive.

}
Comment on lines 5 to 21
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

SPECIFIC_API_MODEL parsing is performed multiple times (split(':') called twice), and it silently ignores unknown providers or malformed values. If the env var is misconfigured (e.g., missing :), modelId becomes '' and you’ll pass an empty model name into a provider factory—hard to diagnose in production.

Also, the override path bypasses requireVision handling entirely, which can produce surprising results when callers explicitly request vision support.

Suggestion

Centralize parsing/validation once, fail fast with a clear error, and optionally support a simple vision suffix or separate env var.

export async function getModel(requireVision = false) {
  const specific = process.env.SPECIFIC_API_MODEL;
  if (specific) {
    const idx = specific.indexOf(':');
    if (idx <= 0 || idx === specific.length - 1) {
      throw new Error(
        `Invalid SPECIFIC_API_MODEL format. Expected "provider:modelId", got "${specific}".`
      );
    }
    const provider = specific.slice(0, idx);
    const modelId = specific.slice(idx + 1);

    switch (provider) {
      case 'openai':
        return createOpenAI({ apiKey: process.env.OPENAI_API_KEY })(modelId);
      case 'google':
        return createGoogleGenerativeAI({ apiKey: process.env.GEMINI_3_PRO_API_KEY })(modelId);
      case 'xai':
        return createXai({ apiKey: process.env.XAI_API_KEY })(modelId);
      default:
        throw new Error(`Unsupported SPECIFIC_API_MODEL provider: ${provider}`);
    }
  }

  // ... existing selectedModel logic
}

Reply with "@CharlieHelps yes please" if you'd like me to add a commit with this suggestion.


const selectedModel = await getSelectedModel();

const xaiApiKey = process.env.XAI_API_KEY;
const gemini3ProApiKey = process.env.GEMINI_3_PRO_API_KEY;
const awsAccessKeyId = process.env.AWS_ACCESS_KEY_ID;
const awsSecretAccessKey = process.env.AWS_SECRET_ACCESS_KEY;
const awsRegion = process.env.AWS_REGION;
const bedrockModelId = process.env.BEDROCK_MODEL_ID || (requireVision ? 'anthropic.claude-3-5-sonnet-20241022-v2:0' : 'anthropic.claude-3-5-sonnet-20241022-v2:0');
const openaiApiKey = process.env.OPENAI_API_KEY;

Comment on lines 25 to 32
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bedrockModelId uses a conditional with identical branches:

requireVision ? 'anthropic.claude-3-5-sonnet-20241022-v2:0' : 'anthropic.claude-3-5-sonnet-20241022-v2:0'

This is dead logic and makes it look like vision selection is implemented when it isn’t.

Suggestion

Either remove requireVision from bedrockModelId or implement a real mapping (if you have distinct Bedrock model IDs for vision vs non-vision).

const bedrockModelId =
  process.env.BEDROCK_MODEL_ID ?? 'anthropic.claude-3-5-sonnet-20241022-v2:0';

(or)

const bedrockModelId = process.env.BEDROCK_MODEL_ID ??
  (requireVision
    ? 'anthropic.claude-3-5-sonnet-20241022-v2:0' // vision-capable
    : 'anthropic.claude-3-5-haiku-20241022-v1:0');

Reply with "@CharlieHelps yes please" if you'd like me to add a commit with this suggestion.

if (selectedModel) {
switch (selectedModel) {
case 'Grok 4.2':
if (xaiApiKey) {
const xai = createXai({
apiKey: xaiApiKey,
baseURL: 'https://api.x.ai/v1',
});
return xai(requireVision ? 'grok-vision-beta' : 'grok-beta');
}
break;
case 'Gemini 3':
if (gemini3ProApiKey) {
const google = createGoogleGenerativeAI({
apiKey: gemini3ProApiKey,
});
return google(requireVision ? 'gemini-1.5-pro' : 'gemini-1.5-pro');
}
break;
case 'GPT-5.1':
if (openaiApiKey) {
const openai = createOpenAI({
apiKey: openaiApiKey,
});
return openai('gpt-4o');
}
break;
Comment on lines 34 to 62
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

cat -n lib/utils/ai-model.ts

Repository: QueueLab/QCX

Length of output: 4119


🌐 Web query:

xAI Grok current model ID 2026 ai-sdk grok-beta grok-vision-beta

💡 Result:

As of February 2026, the Vercel AI SDK xAI provider (@ai-sdk/xai) accepts these current Grok model IDs (you pass them as strings to xai(modelId) or xai.responses(modelId)):

  • Still valid (the ones you mentioned):

  • Recommended “current”/maintained IDs (use *-latest when you want the newest without changing code):

    • Text/chat: grok-4-latest, grok-3-latest, grok-2-latest (ai-sdk.dev)
    • Vision: grok-2-vision-latest (or pinned grok-2-vision-1212) (ai-sdk.dev)

Example:

import { xai } from "@ai-sdk/xai";

// text
const model = xai("grok-4-latest");

// vision (for image input in AI SDK docs)
const visionModel = xai.responses("grok-2-vision-latest");
``` ([ai-sdk.dev](https://ai-sdk.dev/providers/ai-sdk-providers/xai))

Citations:

- 1: https://ai-sdk.dev/providers/ai-sdk-providers/xai
- 2: https://ai-sdk.dev/providers/ai-sdk-providers/xai
- 3: https://ai-sdk.dev/providers/ai-sdk-providers/xai
- 4: https://ai-sdk.dev/providers/ai-sdk-providers/xai
- 5: https://ai-sdk.dev/providers/ai-sdk-providers/xai

---

🌐 Web query:

`Google Gemini 3 Pro current model ID 2026 ai-sdk gemini-1.5-pro`

💡 Result:

As of **February 4, 2026**, the **Gemini API** “Gemini 3 Pro” model ID you’d pass to an AI SDK is:

- **`gemini-3-pro-preview`** (Gemini 3 Pro Preview) [1][2]

(For the image-capable variant: **`gemini-3-pro-image-preview`**.) [1][2]

**Sources:** [1] [2]

---

🌐 Web query:

`OpenAI GPT-5.1 model ID 2026 gpt-4o current version`

💡 Result:

- **GPT‑5.1 (API) model ID (alias):** `gpt-5.1`  
  **Snapshot (versioned model ID):** `gpt-5.1-2025-11-13` (this is the only listed snapshot as of Feb 4, 2026). [1]

- **GPT‑4o (API) current model ID (alias):** `gpt-4o`  
  **Latest listed snapshot (versioned model ID):** `gpt-4o-2024-11-20` (newest date among the snapshots shown). [2]

**Sources:**  
[1] https://platform.openai.com/docs/models/gpt-5.1/  
[2] https://platform.openai.com/docs/models/gpt-4o

---



</details>

**Fix model ID mismatches in the user-facing labels and address structured-output safety.**

The labels "Grok 4.2", "Gemini 3", and "GPT-5.1" do not match the actual model IDs returned:

- **"Gemini 3"** (line 43) returns `gemini-1.5-pro`, which is outdated. Use `gemini-3-pro-preview` for the current Gemini 3 Pro model.
- **"GPT-5.1"** (line 51) returns `gpt-4o` instead of `gpt-5.1`. Users selecting "GPT-5.1" should receive the GPT-5.1 model, not GPT-4o.
- **"Grok 4.2"** (line 34) returns `grok-beta`, a generic beta version. Consider using `grok-4-latest` for consistency.

Additionally, the default fallback order (lines 62–84) prioritizes xAI (Grok) first. Per established guidance, structured-output flows using `generateObject` and `streamObject` must use OpenAI (`gpt-4o`), not xAI models which lack reliable structured-output support. Review whether this function is used in structured-output contexts and adjust the fallback order accordingly.

<details>
<summary>🤖 Prompt for AI Agents</summary>

In @lib/utils/ai-model.ts around lines 33 - 58, The selectedModel switch returns
incorrect model IDs and misorders fallbacks for structured-output: update the
'Grok 4.2' case in the switch (where createXai(...) is used) to return
'grok-4-latest' (or vision variant when requireVision), change the 'Gemini 3'
case (createGoogleGenerativeAI(...)) to return 'gemini-3-pro-preview' for Gemini
3 Pro, and change the 'GPT-5.1' case (createOpenAI(...)) to return the actual
GPT-5.1 model ID instead of 'gpt-4o'; also revise the function’s fallback
ordering so OpenAI (createOpenAI / gpt-5.1 or gpt-4o for compatibility) is
preferred for structured-output flows (generateObject/streamObject) because
xAI/grok lacks reliable structured-output support, ensuring vision variants
still honor requireVision where applicable.


</details>

<!-- fingerprinting:phantom:medusa:eagle -->

<!-- This is an auto-generated comment by CodeRabbit -->

}
}

// Default behavior: Grok -> Gemini -> Bedrock -> OpenAI
if (xaiApiKey) {
const xai = createXai({
apiKey: xaiApiKey,
baseURL: 'https://api.x.ai/v1',
});
try {
return xai(requireVision ? 'grok-vision-beta' : 'grok-beta');
} catch (error) {
console.warn('xAI API unavailable, falling back to next provider');
}
}

if (gemini3ProApiKey) {
const google = createGoogleGenerativeAI({
apiKey: gemini3ProApiKey,
});
try {
return google(requireVision ? 'gemini-1.5-pro' : 'gemini-1.5-pro');
} catch (error) {
console.warn('Gemini 3 Pro API unavailable, falling back to next provider:', error);
}
}
Comment on lines 66 to 90
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

# First, locate and examine the ai-model.ts file
find . -name "ai-model.ts" -type f

Repository: QueueLab/QCX

Length of output: 79


🏁 Script executed:

# Search for calls to getModel to see if structured outputs are needed
rg "getModel\(" --context=3

Repository: QueueLab/QCX

Length of output: 4936


🏁 Script executed:

# Search for streamObject and generateObject usage
rg "streamObject|generateObject" --context=2

Repository: QueueLab/QCX

Length of output: 3583


🏁 Script executed:

cat -n lib/utils/ai-model.ts

Repository: QueueLab/QCX

Length of output: 4119


🌐 Web query:

xAI grok-vision-beta grok-beta structured output JSON schema support

💡 Result:

Yes—both grok-beta and grok-vision-beta support “Structured Outputs” with a JSON Schema via the response_format mechanism (i.e., OpenAI-style response_format: { type: "json_schema", json_schema: ... }). xAI’s Structured Outputs guide states the feature is supported by all language models, and the API will guarantee the response matches your schema. [1]

Notes / constraints from xAI’s docs:

  • Supported schema constructs include: string, number (integer/float), object, array, boolean, enum, anyOf; allOf is not supported (and some array/string length constraints aren’t supported). [1]
  • grok-beta / grok-vision-beta are older/preview models and are now deprecated in xAI’s model lineup (xAI recommends the grok-2-1212 family instead). [2][3]

🌐 Web query:

grok-4-fast-non-reasoning vs grok-beta vs grok-vision-beta structured outputs AI SDK

💡 Result:

Model comparison (xAI API)

Model What it is Vision (image-in) Status Structured outputs Structured outputs + tools
grok-4-1-fast-non-reasoning Grok 4.1 Fast variant optimized for instant responses (skips “thinking” phase). Yes (multimodal) Current Yes (xAI says structured outputs work for all language models) Yes, but only for Grok 4 family models (includes grok-4-1-fast-non-reasoning). [1][2]
grok-beta Original public-beta Grok text model (128k context at launch). No Deprecated Yes (as a language model) Not called out as supported (Grok 4 family only). [3][1]
grok-vision-beta (aka grok-vision-beta) Original public-beta Grok vision model. Yes Deprecated Yes (as a language model) Not called out as supported (Grok 4 family only). [3][1]

Using Structured Outputs from the Vercel AI SDK

  • Vercel AI SDK’s “structured outputs” feature is implemented via the OpenAI provider option structuredOutputs: true (i.e., OpenAI-style JSON-schema structured outputs). [4]
  • However, “structured outputs with tools” in the AI SDK is currently only available with OpenAI models (per AI SDK 4.1 notes). [5]
  • xAI recommends using the Vercel AI SDK for JavaScript, and their OpenAI-compatible base URL is https://api.x.ai/v1. [6]

Practical implication:

  • If you just need JSON/object output from Grok in an AI SDK app, try AI SDK generateObject with structuredOutputs: true against https://api.x.ai/v1 (OpenAI-compatible). [4][6]
  • If you need Grok tool-calling + guaranteed structured output, xAI documents that capability as supported for Grok 4 family models—use xAI’s structured-output flow/tooling (per their docs/SDK) rather than relying on AI SDK’s “structured outputs with tools” feature. [1]

Sources:
[1] xAI Structured Outputs guide (supported models; Grok 4 family requirement for structured outputs + tools) (docs.x.ai)
[2] xAI Grok 4.1 Fast announcement (model variants) (x.ai)
[3] xAI Models & Pricing (grok-beta and grok-vision-beta deprecated) (grok.cadn.net.cn)
[4] AI SDK OpenAI provider guide (how structuredOutputs: true is used with generateObject) (ai-sdk.dev)
[5] Vercel AI SDK 4.1 blog (structured outputs with tools currently only for OpenAI models) (vercel.com)
[6] xAI Migration guide (recommends Vercel AI SDK; base URL https://api.x.ai/v1) (docs.x.ai)

Citations:


Add requireStructuredOutput flag to getModel() to bypass xAI for structured output tasks.

Lines 62–84 prioritize xAI (grok-beta/grok-vision-beta) when available, but multiple call sites use streamObject and generateObject for structured outputs. The Vercel AI SDK's structured output integration with xAI is unreliable; in production, structured output tasks should route to OpenAI gpt-4o. Additionally, grok-beta and grok-vision-beta are deprecated models.

Add a requireStructuredOutput parameter to getModel() and short-circuit to OpenAI when true. Update structured output call sites to pass this flag: resolution-search.tsx, task-manager.tsx, query-suggestor.tsx, suggest.ts, and inquire.tsx.

🔧 Suggested safeguard for structured output
-export async function getModel(requireVision: boolean = false) {
+export async function getModel(
+  requireVision: boolean = false,
+  requireStructuredOutput: boolean = false
+) {
   // Check for specific API model override
   if (process.env.SPECIFIC_API_MODEL) {
     const provider = process.env.SPECIFIC_API_MODEL.split(':')[0];
     const modelId = process.env.SPECIFIC_API_MODEL.split(':').slice(1).join(':');
@@
-  const openaiApiKey = process.env.OPENAI_API_KEY;
+  const openaiApiKey = process.env.OPENAI_API_KEY;
+
+  if (requireStructuredOutput && openaiApiKey) {
+    const openai = createOpenAI({ apiKey: openaiApiKey });
+    return openai('gpt-4o');
+  }
🤖 Prompt for AI Agents
In `@lib/utils/ai-model.ts` around lines 62 - 84, Add a new boolean parameter
requireStructuredOutput to getModel(requireVision?: boolean,
requireStructuredOutput?: boolean) and, at the top of the provider-selection
logic in getModel, short-circuit to the OpenAI provider (e.g., return
openai('gpt-4o' or whatever OpenAI identifier is used in this file) when
requireStructuredOutput is true so xAI (grok-beta/grok-vision-beta) is not
chosen; update the provider-selection branches to treat grok models as
deprecated and only used when requireStructuredOutput is false, and propagate
this new flag from the structured-output call sites by updating
resolution-search.tsx, task-manager.tsx, query-suggestor.tsx, suggest.ts, and
inquire.tsx to pass requireStructuredOutput:true when calling getModel or its
wrappers.


if (awsAccessKeyId && awsSecretAccessKey) {
const bedrock = createAmazonBedrock({
bedrockOptions: {
region: awsRegion,
credentials: {
accessKeyId: awsAccessKeyId,
secretAccessKey: awsSecretAccessKey,
},
},
});
return bedrock(bedrockModelId);
Comment on lines +92 to +103
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

# First, let's locate and read the ai-model.ts file
fd -t f ai-model.ts

Repository: QueueLab/QCX

Length of output: 77


🏁 Script executed:

# Read the ai-model.ts file with line numbers
cat -n lib/utils/ai-model.ts

Repository: QueueLab/QCX

Length of output: 4490


🏁 Script executed:

# Check if there's any error handling around Bedrock initialization or other Bedrock usage in the codebase
rg -i bedrock -A 5 -B 5

Repository: QueueLab/QCX

Length of output: 10664


Add AWS region check to Bedrock initialization guard.

Lines 92–103 check AWS credentials but not AWS_REGION. Unlike xAI and Gemini providers which have error handling, Bedrock lacks both a region guard and try-catch. If AWS credentials are set without a region, initialization will fail without fallback. Add awsRegion to the condition:

-  if (awsAccessKeyId && awsSecretAccessKey) {
+  if (awsAccessKeyId && awsSecretAccessKey && awsRegion) {
     const { createAmazonBedrock } = await import('@ai-sdk/amazon-bedrock');
     const bedrock = createAmazonBedrock({
       bedrockOptions: {
         region: awsRegion,
         credentials: {
           accessKeyId: awsAccessKeyId,
           secretAccessKey: awsSecretAccessKey,
         },
       },
     });
     return bedrock(bedrockModelId);
   }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if (awsAccessKeyId && awsSecretAccessKey) {
const { createAmazonBedrock } = await import('@ai-sdk/amazon-bedrock');
const bedrock = createAmazonBedrock({
bedrockOptions: {
region: awsRegion,
credentials: {
accessKeyId: awsAccessKeyId,
secretAccessKey: awsSecretAccessKey,
},
},
});
return bedrock(bedrockModelId);
if (awsAccessKeyId && awsSecretAccessKey && awsRegion) {
const { createAmazonBedrock } = await import('@ai-sdk/amazon-bedrock');
const bedrock = createAmazonBedrock({
bedrockOptions: {
region: awsRegion,
credentials: {
accessKeyId: awsAccessKeyId,
secretAccessKey: awsSecretAccessKey,
},
},
});
return bedrock(bedrockModelId);
}
🤖 Prompt for AI Agents
In `@lib/utils/ai-model.ts` around lines 92 - 103, The Bedrock initialization
currently checks only awsAccessKeyId and awsSecretAccessKey; update the guard to
require awsRegion as well and wrap the import/creation in a try-catch so it
gracefully falls back on failure. Specifically, modify the conditional that
gates createAmazonBedrock(...) to include awsRegion and add error handling
around the dynamic import and bedrock(bedrockModelId) call (references:
createAmazonBedrock, bedrock, bedrockModelId, awsAccessKeyId,
awsSecretAccessKey, awsRegion) so failures are logged/handled and do not crash
when region is missing or initialization fails.

}

const openai = createOpenAI({
apiKey: openaiApiKey,
});
return openai('gpt-4o');
}
Comment on lines +107 to +111
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Guard against missing provider keys before OpenAI fallback.

Line 99-103 instantiates OpenAI even if OPENAI_API_KEY is undefined, which leads to a late 401/invalid-auth error. Add a clear configuration error when no provider keys are available.

🛡️ Explicit guard for missing keys
-  const openai = createOpenAI({
-    apiKey: openaiApiKey,
-  });
-  return openai('gpt-4o');
+  if (!openaiApiKey) {
+    throw new Error('No AI provider API key configured');
+  }
+  const openai = createOpenAI({ apiKey: openaiApiKey });
+  return openai('gpt-4o');
🤖 Prompt for AI Agents
In `@lib/utils/ai-model.ts` around lines 99 - 103, The code currently calls
createOpenAI({ apiKey: openaiApiKey }) and returns openai('gpt-4o') even when
OPENAI_API_KEY (openaiApiKey) is undefined; add an explicit guard in the
function that builds the provider client (check the openaiApiKey variable /
process.env.OPENAI_API_KEY) and throw a clear configuration Error (e.g., "No
OpenAI API key configured; set OPENAI_API_KEY or provide another provider key")
before attempting to instantiate createOpenAI or call openai('gpt-4o'); update
any surrounding logic that falls back to OpenAI (the code around createOpenAI
and the return of openai('gpt-4o')) to only attempt instantiation when the key
exists.

108 changes: 0 additions & 108 deletions lib/utils/index.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,5 @@
import { type ClassValue, clsx } from 'clsx'
import { twMerge } from 'tailwind-merge'
import { getSelectedModel } from '@/lib/actions/users'
import { createOpenAI } from '@ai-sdk/openai'
import { createGoogleGenerativeAI } from '@ai-sdk/google'
import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock'
import { createXai } from '@ai-sdk/xai';
import { v4 as uuidv4 } from 'uuid';

export function cn(...inputs: ClassValue[]) {
Expand All @@ -14,106 +9,3 @@ export function cn(...inputs: ClassValue[]) {
export function generateUUID(): string {
return uuidv4();
}

export async function getModel(requireVision: boolean = false) {
const selectedModel = await getSelectedModel();

const xaiApiKey = process.env.XAI_API_KEY;
const gemini3ProApiKey = process.env.GEMINI_3_PRO_API_KEY;
const awsAccessKeyId = process.env.AWS_ACCESS_KEY_ID;
const awsSecretAccessKey = process.env.AWS_SECRET_ACCESS_KEY;
const awsRegion = process.env.AWS_REGION;
const bedrockModelId = process.env.BEDROCK_MODEL_ID || 'anthropic.claude-3-5-sonnet-20241022-v2:0';
const openaiApiKey = process.env.OPENAI_API_KEY;

if (selectedModel) {
switch (selectedModel) {
case 'Grok 4.2':
if (xaiApiKey) {
const xai = createXai({
apiKey: xaiApiKey,
baseURL: 'https://api.x.ai/v1',
});
try {
return xai('grok-4-fast-non-reasoning');
} catch (error) {
console.error('Selected model "Grok 4.2" is configured but failed to initialize.', error);
throw new Error('Failed to initialize selected model.');
}
} else {
console.error('User selected "Grok 4.2" but XAI_API_KEY is not set.');
throw new Error('Selected model is not configured.');
}
case 'Gemini 3':
if (gemini3ProApiKey) {
const google = createGoogleGenerativeAI({
apiKey: gemini3ProApiKey,
});
try {
return google('gemini-3-pro-preview');
} catch (error) {
console.error('Selected model "Gemini 3" is configured but failed to initialize.', error);
throw new Error('Failed to initialize selected model.');
}
} else {
console.error('User selected "Gemini 3" but GEMINI_3_PRO_API_KEY is not set.');
throw new Error('Selected model is not configured.');
}
case 'GPT-5.1':
if (openaiApiKey) {
const openai = createOpenAI({
apiKey: openaiApiKey,
});
return openai('gpt-4o');
} else {
console.error('User selected "GPT-5.1" but OPENAI_API_KEY is not set.');
throw new Error('Selected model is not configured.');
}
}
}

// Default behavior: Grok -> Gemini -> Bedrock -> OpenAI
if (xaiApiKey) {
const xai = createXai({
apiKey: xaiApiKey,
baseURL: 'https://api.x.ai/v1',
});
try {
return xai('grok-4-fast-non-reasoning');
} catch (error) {
console.warn('xAI API unavailable, falling back to next provider:');
}
}

if (gemini3ProApiKey) {
const google = createGoogleGenerativeAI({
apiKey: gemini3ProApiKey,
});
try {
return google('gemini-3-pro-preview');
} catch (error) {
console.warn('Gemini 3 Pro API unavailable, falling back to next provider:', error);
}
}

if (awsAccessKeyId && awsSecretAccessKey) {
const bedrock = createAmazonBedrock({
bedrockOptions: {
region: awsRegion,
credentials: {
accessKeyId: awsAccessKeyId,
secretAccessKey: awsSecretAccessKey,
},
},
});
const model = bedrock(bedrockModelId, {
additionalModelRequestFields: { top_k: 350 },
});
return model;
}

const openai = createOpenAI({
apiKey: openaiApiKey,
});
return openai('gpt-4o');
}
2 changes: 1 addition & 1 deletion mapbox_mcp/hooks.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { useState, useCallback, useRef, useEffect } from 'react';
import { generateText } from 'ai';
import { getModel } from '@/lib/utils';
import { getModel } from '@/lib/utils/ai-model';

// Define Tool type locally if needed
type Tool = {
Expand Down
2 changes: 1 addition & 1 deletion next.config.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ const nextConfig = {
bodySizeLimit: '200mb',
},
},
transpilePackages: ['QCX', 'mapbox_mcp'], // Added to transpile local packages
transpilePackages: ['mapbox_mcp'], // Removed 'QCX' as it's the app itself
};

export default nextConfig