Skip to content

Commit 9370e5b

Browse files
samchonCopilot
andauthored
Publish v6.0 (#222)
* Unified to `ILlmSchema`, no more separation. (#213) * Unified to `ILlmSchema` * Complete the new `ILlmSchema` type * detailed description comments * Update src/structures/ILlmSchema.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/structures/ILlmSchema.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/structures/ILlmSchema.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * prettier --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Unify `ILlmApplication` and `ILlmFunction` too. (#214) * Universal `LlmSchemaComposer` (#215) * Universal LlmSchemaComposer * complete `LlmSchemaComposer.schema()` function * fix logics * Universal `LlmTypeChecker` (#216) * Universal `LlmTypeChecker` * Fix `LlmSchemaComposer` to utilize `LlmTypeCheckeer` * JSDoc comments on universal LLM types. (#217) * Universal `HttpLlm` (#218) * Universal `HttpLlm` * Update src/HttpLlm.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/HttpLlm.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix configuration comments * fix more thing --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Remove individual LLM schemas (#219) * Remove individual LLM schemas * fix * Unify test functions about LLM schemas (#220) * Unify test functions about LLM schemas * Update test/src/utils/LlmApplicationFactory.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fixed most of test functions * fixed most of test functions * completed everything --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Publish v6 * Re-write README for universal LLM schemas (#221) --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
1 parent cc2aab4 commit 9370e5b

File tree

140 files changed

+3639
-8652
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

140 files changed

+3639
-8652
lines changed

README.md

Lines changed: 234 additions & 189 deletions
Large diffs are not rendered by default.

package.json

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"name": "@samchon/openapi",
3-
"version": "5.1.0",
4-
"description": "OpenAPI definitions and converters for 'typia' and 'nestia'.",
3+
"version": "6.0.0",
4+
"description": "Universal OpenAPI to LLM function calling schemas. Transform any Swagger/OpenAPI document into type-safe schemas for OpenAI, Claude, Qwen, and more.",
55
"main": "./lib/index.js",
66
"module": "./lib/index.mjs",
77
"typings": "./lib/index.d.ts",
@@ -26,7 +26,7 @@
2626
"openai",
2727
"chatgpt",
2828
"claude",
29-
"gemini",
29+
"qwen",
3030
"llama"
3131
],
3232
"repository": {

src/HttpLlm.ts

Lines changed: 38 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,13 @@ import { OpenApiV3 } from "./OpenApiV3";
44
import { OpenApiV3_1 } from "./OpenApiV3_1";
55
import { SwaggerV2 } from "./SwaggerV2";
66
import { HttpLlmComposer } from "./composers/HttpLlmApplicationComposer";
7-
import { LlmSchemaComposer } from "./composers/LlmSchemaComposer";
87
import { HttpLlmFunctionFetcher } from "./http/HttpLlmFunctionFetcher";
98
import { IHttpConnection } from "./structures/IHttpConnection";
109
import { IHttpLlmApplication } from "./structures/IHttpLlmApplication";
1110
import { IHttpLlmFunction } from "./structures/IHttpLlmFunction";
1211
import { IHttpMigrateApplication } from "./structures/IHttpMigrateApplication";
1312
import { IHttpResponse } from "./structures/IHttpResponse";
1413
import { ILlmFunction } from "./structures/ILlmFunction";
15-
import { ILlmSchema } from "./structures/ILlmSchema";
1614
import { LlmDataMerger } from "./utils/LlmDataMerger";
1715

1816
/**
@@ -30,36 +28,28 @@ import { LlmDataMerger } from "./utils/LlmDataMerger";
3028
* {@link HttpLlm.propagate HttpLlm.propagate()}.
3129
*
3230
* By the way, if you have configured the
33-
* {@link IHttpLlmApplication.IOptions.separate} option to separate the
34-
* parameters into human and LLM sides, you can merge these human and LLM sides'
35-
* parameters into one through
36-
* {@link HttpLlm.mergeParameters HttpLlm.mergeParameters()} before the actual
37-
* LLM function call execution.
31+
* {@link IHttpLlmApplication.IConfig.separate} option to separate the parameters
32+
* into human and LLM sides, you can merge these human and LLM sides' parameters
33+
* into one through {@link HttpLlm.mergeParameters HttpLlm.mergeParameters()}
34+
* before the actual LLM function call execution.
3835
*
3936
* @author Jeongho Nam - https://github.com/samchon
4037
*/
4138
export namespace HttpLlm {
4239
/* -----------------------------------------------------------
4340
COMPOSERS
4441
----------------------------------------------------------- */
45-
/**
46-
* Properties for the LLM function calling application composer.
47-
*
48-
* @template Model Target LLM model
49-
*/
50-
export interface IApplicationProps<Model extends ILlmSchema.Model> {
51-
/** Target LLM model. */
52-
model: Model;
53-
42+
/** Properties for the LLM function calling application composer. */
43+
export interface IApplicationProps {
5444
/** OpenAPI document to convert. */
5545
document:
5646
| OpenApi.IDocument
5747
| SwaggerV2.IDocument
5848
| OpenApiV3.IDocument
5949
| OpenApiV3_1.IDocument;
6050

61-
/** Options for the LLM function calling schema conversion. */
62-
options?: Partial<IHttpLlmApplication.IOptions<Model>>;
51+
/** Configuration for the LLM function calling schema conversion. */
52+
config?: Partial<IHttpLlmApplication.IConfig>;
6353
}
6454

6555
/**
@@ -72,57 +62,44 @@ export namespace HttpLlm {
7262
* converted to the {@link IHttpLlmFunction LLM function} type, and they would
7363
* be used for the LLM function calling.
7464
*
75-
* If you have configured the {@link IHttpLlmApplication.IOptions.separate}
65+
* If you have configured the {@link IHttpLlmApplication.IConfig.separate}
7666
* option, every parameters in the {@link IHttpLlmFunction} would be separated
7767
* into both human and LLM sides. In that case, you can merge these human and
7868
* LLM sides' parameters into one through {@link HttpLlm.mergeParameters}
7969
* before the actual LLM function call execution.
8070
*
81-
* Additionally, if you have configured the
82-
* {@link IHttpLlmApplication.IOptions.keyword} as `true`, the number of
83-
* {@link IHttpLlmFunction.parameters} are always 1 and the first parameter
84-
* type is always {@link ILlmSchemaV3.IObject}. I recommend this option because
85-
* LLM can understand the keyword arguments more easily.
86-
*
8771
* @param props Properties for composition
8872
* @returns LLM function calling application
8973
*/
90-
export const application = <Model extends ILlmSchema.Model>(
91-
props: IApplicationProps<Model>,
92-
): IHttpLlmApplication<Model> => {
74+
export const application = (
75+
props: IApplicationProps,
76+
): IHttpLlmApplication => {
9377
// MIGRATE
9478
const migrate: IHttpMigrateApplication = HttpMigration.application(
9579
props.document,
9680
);
97-
const defaultConfig: ILlmSchema.IConfig<Model> =
98-
LlmSchemaComposer.defaultConfig(props.model);
99-
return HttpLlmComposer.application<Model>({
81+
return HttpLlmComposer.application({
10082
migrate,
101-
model: props.model,
102-
options: {
103-
...Object.fromEntries(
104-
Object.entries(defaultConfig).map(
105-
([key, value]) =>
106-
[key, (props.options as any)?.[key] ?? value] as const,
107-
),
108-
),
109-
separate: props.options?.separate ?? null,
110-
maxLength: props.options?.maxLength ?? 64,
111-
equals: props.options?.equals ?? false,
112-
} as any as IHttpLlmApplication.IOptions<Model>,
83+
config: {
84+
reference: props.config?.reference ?? true,
85+
strict: props.config?.strict ?? false,
86+
separate: props.config?.separate ?? null,
87+
maxLength: props.config?.maxLength ?? 64,
88+
equals: props.config?.equals ?? false,
89+
},
11390
});
11491
};
11592

11693
/* -----------------------------------------------------------
11794
FETCHERS
11895
----------------------------------------------------------- */
11996
/** Properties for the LLM function call. */
120-
export interface IFetchProps<Model extends ILlmSchema.Model> {
97+
export interface IFetchProps {
12198
/** Application of the LLM function calling. */
122-
application: IHttpLlmApplication<Model>;
99+
application: IHttpLlmApplication;
123100

124101
/** LLM function schema to call. */
125-
function: IHttpLlmFunction<ILlmSchema.Model>;
102+
function: IHttpLlmFunction;
126103

127104
/** Connection info to the HTTP server. */
128105
connection: IHttpConnection;
@@ -140,16 +117,12 @@ export namespace HttpLlm {
140117
* sometimes).
141118
*
142119
* By the way, if you've configured the
143-
* {@link IHttpLlmApplication.IOptions.separate}, so that the parameters are
144-
* separated to human and LLM sides, you have to merge these humand and LLM
120+
* {@link IHttpLlmApplication.IConfig.separate}, so that the parameters are
121+
* separated to human and LLM sides, you have to merge these human and LLM
145122
* sides' parameters into one through {@link HttpLlm.mergeParameters}
146123
* function.
147124
*
148-
* About the {@link IHttpLlmApplication.IOptions.keyword} option, don't worry
149-
* anything. This `HttmLlm.execute()` function will automatically recognize
150-
* the keyword arguments and convert them to the proper sequence.
151-
*
152-
* For reference, if the target API endpoinnt responds none 200/201 status,
125+
* For reference, if the target API endpoint responds none 200/201 status,
153126
* this would be considered as an error and the {@link HttpError} would be
154127
* thrown. Otherwise you don't want such rule, you can use the
155128
* {@link HttpLlm.propagate} function instead.
@@ -158,9 +131,8 @@ export namespace HttpLlm {
158131
* @returns Return value (response body) from the API endpoint
159132
* @throws HttpError when the API endpoint responds none 200/201 status
160133
*/
161-
export const execute = <Model extends ILlmSchema.Model>(
162-
props: IFetchProps<Model>,
163-
): Promise<unknown> => HttpLlmFunctionFetcher.execute<Model>(props);
134+
export const execute = (props: IFetchProps): Promise<unknown> =>
135+
HttpLlmFunctionFetcher.execute(props);
164136

165137
/**
166138
* Propagate the LLM function call.
@@ -171,15 +143,11 @@ export namespace HttpLlm {
171143
* sometimes).
172144
*
173145
* By the way, if you've configured the
174-
* {@link IHttpLlmApplication.IOptions.separate}, so that the parameters are
146+
* {@link IHttpLlmApplication.IConfig.separate}, so that the parameters are
175147
* separated to human and LLM sides, you have to merge these humand and LLM
176148
* sides' parameters into one through {@link HttpLlm.mergeParameters}
177149
* function.
178150
*
179-
* About the {@link IHttpLlmApplication.IOptions.keyword} option, don't worry
180-
* anything. This `HttmLlm.propagate()` function will automatically recognize
181-
* the keyword arguments and convert them to the proper sequence.
182-
*
183151
* For reference, the propagation means always returning the response from the
184152
* API endpoint, even if the status is not 200/201. This is useful when you
185153
* want to handle the response by yourself.
@@ -188,17 +156,16 @@ export namespace HttpLlm {
188156
* @returns Response from the API endpoint
189157
* @throws Error only when the connection is failed
190158
*/
191-
export const propagate = <Model extends ILlmSchema.Model>(
192-
props: IFetchProps<Model>,
193-
): Promise<IHttpResponse> => HttpLlmFunctionFetcher.propagate<Model>(props);
159+
export const propagate = (props: IFetchProps): Promise<IHttpResponse> =>
160+
HttpLlmFunctionFetcher.propagate(props);
194161

195162
/* -----------------------------------------------------------
196163
MERGERS
197164
----------------------------------------------------------- */
198165
/** Properties for the parameters' merging. */
199-
export interface IMergeProps<Model extends ILlmSchema.Model> {
166+
export interface IMergeProps {
200167
/** Metadata of the target function. */
201-
function: ILlmFunction<Model>;
168+
function: ILlmFunction;
202169

203170
/** Arguments composed by the LLM. */
204171
llm: object | null;
@@ -210,22 +177,21 @@ export namespace HttpLlm {
210177
/**
211178
* Merge the parameters.
212179
*
213-
* If you've configured the {@link IHttpLlmApplication.IOptions.separate}
180+
* If you've configured the {@link IHttpLlmApplication.IConfig.separate}
214181
* option, so that the parameters are separated to human and LLM sides, you
215182
* can merge these humand and LLM sides' parameters into one through this
216183
* `HttpLlm.mergeParameters()` function before the actual LLM function call
217-
* wexecution.
184+
* execution.
218185
*
219186
* On contrary, if you've not configured the
220-
* {@link IHttpLlmApplication.IOptions.separate} option, this function would
187+
* {@link IHttpLlmApplication.IConfig.separate} option, this function would
221188
* throw an error.
222189
*
223190
* @param props Properties for the parameters' merging
224191
* @returns Merged parameter values
225192
*/
226-
export const mergeParameters = <Model extends ILlmSchema.Model>(
227-
props: IMergeProps<Model>,
228-
): object => LlmDataMerger.parameters(props);
193+
export const mergeParameters = (props: IMergeProps): object =>
194+
LlmDataMerger.parameters(props);
229195

230196
/**
231197
* Merge two values.

src/HttpMigration.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import { OpenApi } from "./OpenApi";
22
import { OpenApiV3 } from "./OpenApiV3";
33
import { OpenApiV3_1 } from "./OpenApiV3_1";
44
import { SwaggerV2 } from "./SwaggerV2";
5-
import { HttpMigrateApplicationComposer } from "./composers/migrate/HttpMigrateApplicationComposer";
5+
import { HttpMigrateApplicationComposer } from "./composers/HttpMigrateApplicationComposer";
66
import { HttpMigrateRouteFetcher } from "./http/HttpMigrateRouteFetcher";
77
import { IHttpConnection } from "./structures/IHttpConnection";
88
import { IHttpMigrateApplication } from "./structures/IHttpMigrateApplication";

src/McpLlm.ts

Lines changed: 23 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -29,15 +29,8 @@ import { OpenApiValidator } from "./utils/OpenApiValidator";
2929
* @author Jeongho Nam - https://github.com/samchon
3030
*/
3131
export namespace McpLlm {
32-
/**
33-
* Properties for the LLM function calling application composer.
34-
*
35-
* @template Model Target LLM model
36-
*/
37-
export interface IApplicationProps<Model extends ILlmSchema.Model> {
38-
/** Target LLM model. */
39-
model: Model;
40-
32+
/** Properties for the LLM function calling application composer. */
33+
export interface IApplicationProps {
4134
/**
4235
* List of tools.
4336
*
@@ -49,8 +42,8 @@ export namespace McpLlm {
4942
*/
5043
tools: Array<IMcpTool>;
5144

52-
/** Options for the LLM function calling schema conversion. */
53-
options?: Partial<IMcpLlmApplication.IOptions<Model>>;
45+
/** Configuration for the LLM function calling schema conversion. */
46+
config?: Partial<IMcpLlmApplication.IConfig>;
5447
}
5548

5649
/**
@@ -72,19 +65,14 @@ export namespace McpLlm {
7265
* @param props Properties for composition
7366
* @returns LLM function calling application
7467
*/
75-
export const application = <Model extends ILlmSchema.Model>(
76-
props: IApplicationProps<Model>,
77-
): IMcpLlmApplication<Model> => {
78-
const options: IMcpLlmApplication.IOptions<Model> = {
79-
...Object.fromEntries(
80-
Object.entries(LlmSchemaComposer.defaultConfig(props.model)).map(
81-
([key, value]) =>
82-
[key, (props.options as any)?.[key] ?? value] as const,
83-
),
84-
),
85-
maxLength: props.options?.maxLength ?? 64,
86-
} as IMcpLlmApplication.IOptions<Model>;
87-
const functions: IMcpLlmFunction<Model>[] = [];
68+
export const application = (props: IApplicationProps): IMcpLlmApplication => {
69+
const config: IMcpLlmApplication.IConfig = {
70+
reference: props.config?.reference ?? true,
71+
strict: props.config?.strict ?? false,
72+
maxLength: props.config?.maxLength ?? 64,
73+
equals: props.config?.equals ?? false,
74+
};
75+
const functions: IMcpLlmFunction[] = [];
8876
const errors: IMcpLlmApplication.IError[] = [];
8977

9078
props.tools.forEach((tool, i) => {
@@ -114,17 +102,15 @@ export namespace McpLlm {
114102
}
115103

116104
// CONVERT TO LLM PARAMETERS
117-
const parameters: IResult<
118-
ILlmSchema.IParameters<Model>,
119-
IOpenApiSchemaError
120-
> = LlmSchemaComposer.parameters(props.model)({
121-
config: options as any,
122-
components,
123-
schema: schema as
124-
| OpenApi.IJsonSchema.IObject
125-
| OpenApi.IJsonSchema.IReference,
126-
accessor: `$input.tools[${i}].inputSchema`,
127-
}) as IResult<ILlmSchema.IParameters<Model>, IOpenApiSchemaError>;
105+
const parameters: IResult<ILlmSchema.IParameters, IOpenApiSchemaError> =
106+
LlmSchemaComposer.parameters({
107+
config,
108+
components,
109+
schema: schema as
110+
| OpenApi.IJsonSchema.IObject
111+
| OpenApi.IJsonSchema.IReference,
112+
accessor: `$input.tools[${i}].inputSchema`,
113+
});
128114
if (parameters.success)
129115
functions.push({
130116
name: tool.name,
@@ -134,7 +120,7 @@ export namespace McpLlm {
134120
components,
135121
schema,
136122
required: true,
137-
equals: options.equals,
123+
equals: config.equals,
138124
}),
139125
});
140126
else
@@ -149,9 +135,8 @@ export namespace McpLlm {
149135
});
150136
});
151137
return {
152-
model: props.model,
153138
functions,
154-
options,
139+
config,
155140
errors,
156141
};
157142
};

0 commit comments

Comments
 (0)