Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: invoke newly created callLLM() function from A4D extension #2

Merged
merged 28 commits into from
Jan 2, 2025
Merged
Changes from 1 commit
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
e701b2c
feat: scaffolding for 'SFDX: Send Prompt in Current File to LLM' command
daphne-sfdc Nov 19, 2024
688ef27
feat: use @salesforce/vscode-service-provider to integrate with XGen LLM
daphne-sfdc Nov 22, 2024
f8e1ceb
feat: use current file in editor view as context for prompt
daphne-sfdc Nov 23, 2024
8cde3b2
fix: use userPrompt from spike + input reformatting
daphne-sfdc Nov 23, 2024
eee0a45
feat: use getChatStream() instead of naturalLanguageQuery()
daphne-sfdc Nov 24, 2024
58437ca
refactor: move utility code from Prompt Engineering Playground to vsc…
daphne-sfdc Dec 10, 2024
d5c83be
refactor: move prompt building and calling getChatStream() to XGen bl…
daphne-sfdc Dec 11, 2024
dc70d79
refactor: move llm from parameter to environment variable
daphne-sfdc Dec 11, 2024
257a129
feat: prompt accepts additional context after Apex class
daphne-sfdc Dec 11, 2024
5f4e571
feat: add timestamp to documentContents YAML file name
daphne-sfdc Dec 11, 2024
21e3cb5
feat: user prompt is generated in constructUserPrompt() function + ad…
daphne-sfdc Dec 12, 2024
ff44d6a
fix: strip out markdown triple backticks from resulting OpenAPI v3 sp…
daphne-sfdc Dec 12, 2024
c46dea6
fix: display error message when an OpenAPI v3 specification cannot be…
daphne-sfdc Dec 12, 2024
205b7e2
try: write a script to read prompt components from a YAML file and se…
daphne-sfdc Dec 16, 2024
089c135
feat: can read in a YAML format prompt in a Salesforce project, gener…
daphne-sfdc Dec 17, 2024
c863df4
chore: add JSDoc comments to all functions + cleanup
daphne-sfdc Dec 17, 2024
8b2770f
chore: delete prompt1.yaml because it should belong in a Salesforce p…
daphne-sfdc Dec 17, 2024
25b00e2
feat: new command to generate sample YAML prompt
daphne-sfdc Dec 17, 2024
08c0ba3
feat: add progress notifications while the 'SFDX: Send Prompt in Curr…
daphne-sfdc Dec 17, 2024
795d59d
Apply suggestions from code review
daphne-sfdc Dec 18, 2024
d8efd4a
fix: fix error in reduce block
daphne-sfdc Dec 18, 2024
e3154f2
chore: add more JSDoc comments
daphne-sfdc Dec 18, 2024
918c959
feat: use input YAML filename in result YAML filename + put result YA…
daphne-sfdc Dec 18, 2024
a3369e0
revert: revert using ISO 8601 for datetime formatting because it was …
daphne-sfdc Dec 18, 2024
7c4dd54
chore: remove unused variable
daphne-sfdc Dec 19, 2024
f07ae81
feat: invoke new callLLM() function in salesforcedx-vscode-einstein-g…
daphne-sfdc Dec 22, 2024
774b052
feat: invoke new callLLM() function in salesforcedx-vscode-einstein-g…
daphne-sfdc Dec 22, 2024
2fbe852
chore: rename function + add comment
daphne-sfdc Dec 22, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
refactor: move prompt building and calling getChatStream() to XGen bl…
…ock of callLLM()
  • Loading branch information
daphne-sfdc committed Dec 11, 2024
commit d5c83be9d2599e94e527c67a2f8070fa5b809ef4
95 changes: 50 additions & 45 deletions src/sendPromptToLLM.ts
Original file line number Diff line number Diff line change
@@ -18,51 +18,7 @@ export const sendPromptToLLM = async (): Promise<void> => {

const userPrompt = 'Generate an OpenAPI v3 specification for my current Apex class. The OpenAPI v3 specification should be in YAML. The paths should be in the format of /{ClassName}/{MethodName} for the @AuraEnabled methods. When you return Id in a SOQL query, it has `type: Id`. For every `type: object`, generate a `#/components/schemas` entry for that object. The method should have a $ref entry pointing to the generated `#/components/schemas` entry. Only include methods that have the @AuraEnabled annotation in the paths of the OpenAPI v3 specification.'

const systemTag = '<|system|>';
const endOfPromptTag = '<|endofprompt|>';
const userTag = '<|user|>';
const assistantTag = '<|assistant|>';

const input =
`${systemTag}\n${systemPrompt}\n\n${endOfPromptTag}\n${userTag}\n` +
userPrompt +
`\n\n***Code Context***\n\`\`\`\n` +
editorText +
`\n\`\`\`\n${endOfPromptTag}\n${assistantTag}`;
console.log('input = ' + input);

const apiClient = await getAiApiClient();
// const result = await apiClient.naturalLanguageQuery({
// prefix: '',
// suffix: '',
// input: input,
// commandSource: CommandSource.NLtoCodeGen,
// promptId: 'generateOpenAPIv3Specifications'
// });

// const documentContents = result[0].completion;

const chatRequestBody = {
prompt: input,
stop_sequences: ['<|endofprompt|>'],
max_tokens: 2048, // Adjust the max_tokens as needed
parameters: {
command_source: CommandSource.Chat
}
};
console.log('chatRequestBody = ' + JSON.stringify(chatRequestBody));
const apiClientStream = await apiClient.getChatStream(chatRequestBody, 'generateOpenAPIv3Specifications');
console.log('apiClientStream = ' + JSON.stringify(apiClientStream));

let documentContents = '';
for await (const chunk of apiClientStream) {
const { done, text } = processGeneration(chunk);
documentContents += text;

if (done) {
break;
}
}
const documentContents = await callLLM(systemPrompt, userPrompt, [editorText], "XGen");

console.log('documentContents = ~' + documentContents + '~');
fs.writeFileSync("documentContents.yaml", documentContents);
@@ -71,3 +27,52 @@ export const sendPromptToLLM = async (): Promise<void> => {
export const getAiApiClient = async (): Promise<AiApiClient> => {
return ServiceProvider.getService(ServiceType.AiApiClient);
};

const callLLM = async (systemPrompt: string, userPrompt: string, context: [string], llm: string): Promise<string> => {
if (llm === "XGen") {
const systemTag = '<|system|>';
const endOfPromptTag = '<|endofprompt|>';
const userTag = '<|user|>';
const assistantTag = '<|assistant|>';

const input =
`${systemTag}\n${systemPrompt}\n\n${endOfPromptTag}\n${userTag}\n` +
userPrompt +
`\n\nThis is the Apex class the OpenAPI v3 specification should be generated for:\n\`\`\`\n` +
context[0] +
`\n\`\`\`\n${endOfPromptTag}\n${assistantTag}`;
console.log('input = ' + input);

const apiClient = await getAiApiClient();

const chatRequestBody = {
prompt: input,
stop_sequences: ['<|endofprompt|>'],
max_tokens: 2048, // Adjust the max_tokens as needed
parameters: {
command_source: CommandSource.Chat
}
};
console.log('chatRequestBody = ' + JSON.stringify(chatRequestBody));
const apiClientStream = await apiClient.getChatStream(chatRequestBody, 'generateOpenAPIv3Specifications');
console.log('apiClientStream = ' + JSON.stringify(apiClientStream));

let documentContents = '';
for await (const chunk of apiClientStream) {
const { done, text } = processGeneration(chunk);
documentContents += text;

if (done) {
break;
}
}

return documentContents;
}

else if (llm === "OpenAI") {
return 'OpenAI case';
}

return 'This shouldn\'t be reached';
}