Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 68 additions & 2 deletions src/types/openapi.generated.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@
type OneOf<T extends any[]> = T extends [infer Only]
? Only
: T extends [infer A, infer B, ...infer Rest]
? OneOf<[XOR<A, B>, ...Rest]>
: never;
? OneOf<[XOR<A, B>, ...Rest]>
: never;

/* eslint-enable */

Expand Down Expand Up @@ -672,6 +672,71 @@
* }
* ```
*/

export type LemurRequestDetails = {
/**
* The endpoint used for the leMUR request
*/
request_endpoint: string,
/**
* The temperature to use for the model.
* Higher values result in answers that are more creative, lower values are more conservative.
* Can be any value between 0.0 and 1.0 inclusive.
*
* @defaultValue 0
*/
temperature: number,
/**
* The model that was used for the final prompt after compression is performed.
*
* @defaultValue "default"
*/
final_model: LiteralUnion<LemurModel, string>;
/**
* Max output size in tokens, up to 4000
* @defaultValue 2000
*/
max_output_size: number,
/**
* The date when the request was created
*/
created_at: Date,
/**
* A list of completed transcripts with text.
* Use either transcript_ids or input_text as input into LeMUR.
*/
transcript_ids?: string[],
/**
* Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000".
* Use either transcript_ids or input_text as input into LeMUR.
*/
input_text?: string,
/**
* A list of questions asked in the request
* Each question can have its own context and answer format.
*/
questions?: LemurQuestion[],
/**
* The prompt used for the model.
*/
prompt?: string,
/**
* Context provided to the model. This can be a string or a free-form JSON value.
*/
context?: OneOf<
[
string,
{
[key: string]: unknown;
},
]
>,
/**
* The format to use for the model's answers.
*/
answer_format?: string
}

export type LemurBaseResponse = {
/**
* The ID of the LeMUR request
Expand All @@ -681,6 +746,7 @@
* The usage numbers for the LeMUR request
*/
usage: LemurUsage;
request?: LemurRequestDetails;
};

/**
Expand Down Expand Up @@ -971,7 +1037,7 @@
/**
* Only get throttled transcripts, overrides the status filter
* @defaultValue false
* @deprecated

Check warning on line 1040 in src/types/openapi.generated.ts

View workflow job for this annotation

GitHub Actions / Node.js 20 on ubuntu-latest

tsdoc-missing-deprecation-message: The @deprecated block must include a deprecation message, e.g. describing the recommended alternative

Check warning on line 1040 in src/types/openapi.generated.ts

View workflow job for this annotation

GitHub Actions / Node.js 18 on ubuntu-latest

tsdoc-missing-deprecation-message: The @deprecated block must include a deprecation message, e.g. describing the recommended alternative
*/
throttled_only?: boolean;
};
Expand Down
134 changes: 134 additions & 0 deletions tests/unit/lemur.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,140 @@ describe("lemur", () => {
expect(response.response).toBe("some response");
});

it("should return response with request details", async () => {
const responseWithDetails = {
request_id: knownLemurRequestId,
response: "detailed response",
usage: {
input_tokens: 250,
output_tokens: 75
},
request: {
request_endpoint: "/lemur/v3/generate/task",
temperature: 0.7,
final_model: "anthropic/claude-3-5-sonnet",
max_output_size: 1500,
created_at: "2024-01-01T10:30:00Z",
transcript_ids: knownTranscriptIds,
prompt: "Analyze the key themes in this conversation",
context: "Focus on business decisions and action items"
}
};

fetchMock.doMockOnceIf(
requestMatches({
method: "GET",
url: `/lemur/v3/${knownLemurRequestId}`,
}),
JSON.stringify(responseWithDetails),
);

const response = await assembly.lemur.getResponse(knownLemurRequestId);
expect(response.request_id).toBe(knownLemurRequestId);
expect(response.request).toBeDefined();
expect(response.request?.request_endpoint).toBe("/lemur/v3/generate/task");
expect(response.request?.temperature).toBe(0.7);
expect(response.request?.final_model).toBe("anthropic/claude-3-5-sonnet");
expect(response.request?.max_output_size).toBe(1500);
expect(response.request?.prompt).toBe("Analyze the key themes in this conversation");
expect(response.usage.input_tokens).toBe(250);
expect(response.usage.output_tokens).toBe(75);
});

it("should return response with question-answer request details", async () => {
const qaResponseWithDetails = {
request_id: knownLemurRequestId,
response: [{ question: "What was discussed?", answer: "Project updates" }],
usage: {
input_tokens: 300,
output_tokens: 100
},
request: {
request_endpoint: "/lemur/v3/generate/question-answer",
temperature: 0.3,
final_model: "anthropic/claude-3-opus",
max_output_size: 2500,
created_at: "2024-01-01T14:15:00Z",
input_text: "Custom transcript content...",
questions: [
{
question: "What was discussed?",
answer_format: "concise summary",
context: "Meeting notes"
},
{
question: "Was the date of the next meeting called out?",
answer_options: [
"Yes",
"No",
"Not mentioned"
],
}
]
}
};

fetchMock.doMockOnceIf(
requestMatches({
method: "GET",
url: `/lemur/v3/${knownLemurRequestId}`,
}),
JSON.stringify(qaResponseWithDetails),
);

const response = await assembly.lemur.getResponse(knownLemurRequestId);
expect(response.request?.request_endpoint).toBe("/lemur/v3/generate/question-answer");
expect(response.request?.input_text).toBe("Custom transcript content...");
expect(response.request?.questions).toHaveLength(2);
expect(response.request?.questions?.[0].question).toBe("What was discussed?");
expect(response.request?.questions?.[0].context).toBe("Meeting notes");
expect(response.request?.questions?.[1].answer_options).toEqual([
"Yes",
"No",
"Not mentioned"
]);
});

it("should return response with context as object in request details", async () => {
const responseWithObjectContext = {
request_id: knownLemurRequestId,
response: "context-aware response",
usage: {
input_tokens: 180,
output_tokens: 60
},
request: {
request_endpoint: "/lemur/v3/generate/summary",
temperature: 0.5,
final_model: "default",
max_output_size: 2000,
created_at: "2024-01-01T16:45:00Z",
transcript_ids: knownTranscriptIds,
context: {
meeting_type: "standup",
team: "engineering",
date: "2024-01-01"
},
answer_format: "bullet points"
}
};

fetchMock.doMockOnceIf(
requestMatches({
method: "GET",
url: `/lemur/v3/${knownLemurRequestId}`,
}),
JSON.stringify(responseWithObjectContext),
);

const response = await assembly.lemur.getResponse(knownLemurRequestId);
expect(response.request?.context).toEqual({
meeting_type: "standup",
team: "engineering",
date: "2024-01-01"
});
});

it("should purge request data", async () => {
fetchMock.doMockOnceIf(
requestMatches({
Expand Down
Loading