From 7960ae45bac62d43cb536688d2896d2cb04399b5 Mon Sep 17 00:00:00 2001 From: AssemblyAI Date: Tue, 2 Sep 2025 14:34:02 -0600 Subject: [PATCH] Project import generated by Copybara. GitOrigin-RevId: 2a553450940d7cb88913b0ebed9ca310eb254c8b --- src/types/openapi.generated.ts | 70 ++++++++++++++++- tests/unit/lemur.test.ts | 134 +++++++++++++++++++++++++++++++++ 2 files changed, 202 insertions(+), 2 deletions(-) diff --git a/src/types/openapi.generated.ts b/src/types/openapi.generated.ts index 5225fd5..400ff8a 100644 --- a/src/types/openapi.generated.ts +++ b/src/types/openapi.generated.ts @@ -11,8 +11,8 @@ type XOR = T | U extends object type OneOf = T extends [infer Only] ? Only : T extends [infer A, infer B, ...infer Rest] - ? OneOf<[XOR, ...Rest]> - : never; + ? OneOf<[XOR, ...Rest]> + : never; /* eslint-enable */ @@ -672,6 +672,71 @@ export type LemurBaseParams = { * } * ``` */ + +export type LemurRequestDetails = { + /** + * The endpoint used for the leMUR request + */ + request_endpoint: string, + /** + * The temperature to use for the model. + * Higher values result in answers that are more creative, lower values are more conservative. + * Can be any value between 0.0 and 1.0 inclusive. + * + * @defaultValue 0 + */ + temperature: number, + /** + * The model that was used for the final prompt after compression is performed. + * + * @defaultValue "default" + */ + final_model: LiteralUnion; + /** + * Max output size in tokens, up to 4000 + * @defaultValue 2000 + */ + max_output_size: number, + /** + * The date when the request was created + */ + created_at: Date, + /** + * A list of completed transcripts with text. + * Use either transcript_ids or input_text as input into LeMUR. + */ + transcript_ids?: string[], + /** + * Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000". + * Use either transcript_ids or input_text as input into LeMUR. + */ + input_text?: string, + /** + * A list of questions asked in the request + * Each question can have its own context and answer format. + */ + questions?: LemurQuestion[], + /** + * The prompt used for the model. + */ + prompt?: string, + /** + * Context provided to the model. This can be a string or a free-form JSON value. + */ + context?: OneOf< + [ + string, + { + [key: string]: unknown; + }, + ] + >, + /** + * The format to use for the model's answers. + */ + answer_format?: string +} + export type LemurBaseResponse = { /** * The ID of the LeMUR request @@ -681,6 +746,7 @@ export type LemurBaseResponse = { * The usage numbers for the LeMUR request */ usage: LemurUsage; + request?: LemurRequestDetails; }; /** diff --git a/tests/unit/lemur.test.ts b/tests/unit/lemur.test.ts index 16ad809..4e6ba8a 100644 --- a/tests/unit/lemur.test.ts +++ b/tests/unit/lemur.test.ts @@ -149,6 +149,140 @@ describe("lemur", () => { expect(response.response).toBe("some response"); }); + it("should return response with request details", async () => { + const responseWithDetails = { + request_id: knownLemurRequestId, + response: "detailed response", + usage: { + input_tokens: 250, + output_tokens: 75 + }, + request: { + request_endpoint: "/lemur/v3/generate/task", + temperature: 0.7, + final_model: "anthropic/claude-3-5-sonnet", + max_output_size: 1500, + created_at: "2024-01-01T10:30:00Z", + transcript_ids: knownTranscriptIds, + prompt: "Analyze the key themes in this conversation", + context: "Focus on business decisions and action items" + } + }; + + fetchMock.doMockOnceIf( + requestMatches({ + method: "GET", + url: `/lemur/v3/${knownLemurRequestId}`, + }), + JSON.stringify(responseWithDetails), + ); + + const response = await assembly.lemur.getResponse(knownLemurRequestId); + expect(response.request_id).toBe(knownLemurRequestId); + expect(response.request).toBeDefined(); + expect(response.request?.request_endpoint).toBe("/lemur/v3/generate/task"); + expect(response.request?.temperature).toBe(0.7); + expect(response.request?.final_model).toBe("anthropic/claude-3-5-sonnet"); + expect(response.request?.max_output_size).toBe(1500); + expect(response.request?.prompt).toBe("Analyze the key themes in this conversation"); + expect(response.usage.input_tokens).toBe(250); + expect(response.usage.output_tokens).toBe(75); + }); + + it("should return response with question-answer request details", async () => { + const qaResponseWithDetails = { + request_id: knownLemurRequestId, + response: [{ question: "What was discussed?", answer: "Project updates" }], + usage: { + input_tokens: 300, + output_tokens: 100 + }, + request: { + request_endpoint: "/lemur/v3/generate/question-answer", + temperature: 0.3, + final_model: "anthropic/claude-3-opus", + max_output_size: 2500, + created_at: "2024-01-01T14:15:00Z", + input_text: "Custom transcript content...", + questions: [ + { + question: "What was discussed?", + answer_format: "concise summary", + context: "Meeting notes" + }, + { + question: "Was the date of the next meeting called out?", + answer_options: [ + "Yes", + "No", + "Not mentioned" + ], + } + ] + } + }; + + fetchMock.doMockOnceIf( + requestMatches({ + method: "GET", + url: `/lemur/v3/${knownLemurRequestId}`, + }), + JSON.stringify(qaResponseWithDetails), + ); + + const response = await assembly.lemur.getResponse(knownLemurRequestId); + expect(response.request?.request_endpoint).toBe("/lemur/v3/generate/question-answer"); + expect(response.request?.input_text).toBe("Custom transcript content..."); + expect(response.request?.questions).toHaveLength(2); + expect(response.request?.questions?.[0].question).toBe("What was discussed?"); + expect(response.request?.questions?.[0].context).toBe("Meeting notes"); + expect(response.request?.questions?.[1].answer_options).toEqual([ + "Yes", + "No", + "Not mentioned" + ]); + }); + + it("should return response with context as object in request details", async () => { + const responseWithObjectContext = { + request_id: knownLemurRequestId, + response: "context-aware response", + usage: { + input_tokens: 180, + output_tokens: 60 + }, + request: { + request_endpoint: "/lemur/v3/generate/summary", + temperature: 0.5, + final_model: "default", + max_output_size: 2000, + created_at: "2024-01-01T16:45:00Z", + transcript_ids: knownTranscriptIds, + context: { + meeting_type: "standup", + team: "engineering", + date: "2024-01-01" + }, + answer_format: "bullet points" + } + }; + + fetchMock.doMockOnceIf( + requestMatches({ + method: "GET", + url: `/lemur/v3/${knownLemurRequestId}`, + }), + JSON.stringify(responseWithObjectContext), + ); + + const response = await assembly.lemur.getResponse(knownLemurRequestId); + expect(response.request?.context).toEqual({ + meeting_type: "standup", + team: "engineering", + date: "2024-01-01" + }); + }); + it("should purge request data", async () => { fetchMock.doMockOnceIf( requestMatches({