Add GPT 5 support (#902)
This commit is contained in:
@@ -183,7 +183,7 @@ function getRegularModelClient(
|
||||
const provider = createOpenAI({ apiKey });
|
||||
return {
|
||||
modelClient: {
|
||||
model: provider(model.name),
|
||||
model: provider.responses(model.name),
|
||||
builtinProviderId: providerId,
|
||||
},
|
||||
backupModelClients: [],
|
||||
|
||||
@@ -23,6 +23,11 @@ export function getExtraProviderOptions(
|
||||
if (!providerId) {
|
||||
return {};
|
||||
}
|
||||
if (providerId === "openai") {
|
||||
return {
|
||||
reasoning_effort: "medium",
|
||||
};
|
||||
}
|
||||
if (PROVIDERS_THAT_SUPPORT_THINKING.includes(providerId)) {
|
||||
const budgetTokens = getThinkingBudgetTokens(settings?.thinkingBudget);
|
||||
return {
|
||||
|
||||
@@ -24,10 +24,16 @@ export async function getContextWindow() {
|
||||
return modelOption?.contextWindow || DEFAULT_CONTEXT_WINDOW;
|
||||
}
|
||||
|
||||
// Most models support at least 8000 output tokens so we use it as a default value.
|
||||
const DEFAULT_MAX_TOKENS = 8_000;
|
||||
|
||||
export async function getMaxTokens(model: LargeLanguageModel) {
|
||||
export async function getMaxTokens(
|
||||
model: LargeLanguageModel,
|
||||
): Promise<number | undefined> {
|
||||
const modelOption = await findLanguageModel(model);
|
||||
return modelOption?.maxOutputTokens || DEFAULT_MAX_TOKENS;
|
||||
return modelOption?.maxOutputTokens ?? undefined;
|
||||
}
|
||||
|
||||
export async function getTemperature(
|
||||
model: LargeLanguageModel,
|
||||
): Promise<number> {
|
||||
const modelOption = await findLanguageModel(model);
|
||||
return modelOption?.temperature ?? 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user