diff --git a/src/bindings/config.ts b/src/bindings/config.ts index 55d0fb8be..43b5718fa 100644 --- a/src/bindings/config.ts +++ b/src/bindings/config.ts @@ -65,9 +65,7 @@ export const loadConfig = async (context: Context): Promise => { permitBaseUrl: process.env.PERMIT_BASE_URL || permitBaseUrl, }, unassign: { - timeRangeForMaxIssue: process.env.DEFAULT_TIME_RANGE_FOR_MAX_ISSUE - ? Number(process.env.DEFAULT_TIME_RANGE_FOR_MAX_ISSUE) - : timeRangeForMaxIssue, + timeRangeForMaxIssue: process.env.DEFAULT_TIME_RANGE_FOR_MAX_ISSUE ? Number(process.env.DEFAULT_TIME_RANGE_FOR_MAX_ISSUE) : timeRangeForMaxIssue, timeRangeForMaxIssueEnabled: process.env.DEFAULT_TIME_RANGE_FOR_MAX_ISSUE_ENABLED ? process.env.DEFAULT_TIME_RANGE_FOR_MAX_ISSUE_ENABLED == "true" : timeRangeForMaxIssueEnabled, diff --git a/src/configs/ubiquibot-config-default.ts b/src/configs/ubiquibot-config-default.ts index 9598ef782..fe9f1552d 100644 --- a/src/configs/ubiquibot-config-default.ts +++ b/src/configs/ubiquibot-config-default.ts @@ -76,6 +76,10 @@ export const DefaultConfig: MergedConfig = { name: "ask", enabled: false, }, + { + name: "review", + enabled: false, + }, { name: "allow", enabled: false, diff --git a/src/handlers/comment/commands.ts b/src/handlers/comment/commands.ts index 9bce9fc6e..af6587741 100644 --- a/src/handlers/comment/commands.ts +++ b/src/handlers/comment/commands.ts @@ -7,6 +7,7 @@ export enum IssueCommentCommands { MULTIPLIER = "/multiplier", // set bounty multiplier (for treasury) QUERY = "/query", ASK = "/ask", // ask GPT a question + REVIEW = "/review", // GPT pull request review // Access Controls ALLOW = "/allow", diff --git a/src/handlers/comment/handlers/index.ts b/src/handlers/comment/handlers/index.ts index 4dcbf7f21..e4b7a69f8 100644 --- a/src/handlers/comment/handlers/index.ts +++ b/src/handlers/comment/handlers/index.ts @@ -38,6 +38,7 @@ import { autoPay } from "./payout"; import { getTargetPriceLabel } from "../../shared"; import Decimal from "decimal.js"; import { ErrorDiff } from "../../../utils/helpers"; +import { review } from "./review"; export * from "./assign"; export * from "./wallet"; @@ -47,6 +48,7 @@ export * from "./help"; export * from "./multiplier"; export * from "./query"; export * from "./ask"; +export * from "./review"; export * from "./authorize"; export interface RewardsResponse { @@ -295,6 +297,12 @@ export const userCommands = (): UserCommands[] => { handler: ask, callback: commandCallback, }, + { + id: IssueCommentCommands.REVIEW, + description: `Compares the pull request code diff with the linked issue's specification to perform a review of the current pull request. \n example usage: /review`, + handler: review, + callback: commandCallback, + }, { id: IssueCommentCommands.MULTIPLIER, description: `Set the bounty payout multiplier for a specific contributor, and provide the reason for why. \n example usage: "/wallet @user 0.5 'Multiplier reason'"`, diff --git a/src/handlers/comment/handlers/review.ts b/src/handlers/comment/handlers/review.ts new file mode 100644 index 000000000..aa28f8714 --- /dev/null +++ b/src/handlers/comment/handlers/review.ts @@ -0,0 +1,341 @@ +import { getBotConfig, getBotContext, getLogger } from "../../../bindings"; +import { Payload, StreamlinedComment } from "../../../types"; +import { approvePullRequest, getAllIssueComments, getCommitsOnPullRequest, getPullByNumber, requestPullChanges } from "../../../helpers"; +import { CreateChatCompletionRequestMessage } from "openai/resources/chat"; +import { appreciationMsg, askGPT, getPRSpec, pullRequestBusinessLogicMsg, requestedChangesMsg, specCheckTemplate, validationMsg } from "../../../helpers/gpt"; +import { ErrorDiff } from "../../../utils/helpers"; +import OpenAI from "openai"; + +/** + * @returns Pull Request Report + */ +export async function review(body: string) { + const context = getBotContext(); + const logger = getLogger(); + + const payload = context.payload as Payload; + const issue = payload.issue; + + if (!issue) { + return ErrorDiff(`Payload issue is undefined.`); + } + + if (!body) { + return ErrorDiff(`Payload body is undefined.`); + } + + const isPr = await getPullByNumber(context, issue.number); + + if (!isPr) { + return ErrorDiff(`Can only be used on pull requests.`); + } + + const reviewRegex = /^\/review/; + const reviewRegexMatch = body.match(reviewRegex); + + if (!reviewRegexMatch) { + return ErrorDiff(`Error matching regex for review`); + } + + const streamlined: StreamlinedComment[] = []; + let chatHistory: CreateChatCompletionRequestMessage[] = []; + const commentsRaw = await getAllIssueComments(issue.number, "raw"); + + if (!commentsRaw) { + logger.info(`Error getting issue comments`); + return ErrorDiff(`Error getting issue comments.`); + } + + // return a diff of the changes made in the PR + const comparePR = async () => { + const comparePR = await context.octokit.pulls.get({ + owner: payload.repository.owner.login, + repo: payload.repository.name, + pull_number: issue.number, + }); + + const pr = comparePR.data; + + const prDiff = await context.octokit.pulls.get({ + owner: payload.repository.owner.login, + repo: payload.repository.name, + pull_number: pr.number, + mediaType: { + format: "diff", + }, + }); + + const diffContent = prDiff.data; + + return { + pr, + diff: diffContent, + }; + }; + + const isPull = async () => { + if (isPr) { + const diff = await comparePR() + .then(({ diff }) => { + return diff; + }) + .catch((error) => { + logger.info(`Error getting diff: ${error}`); + return ErrorDiff(`Error getting diff: ${error}`); + }); + + const spec = await getPRSpec(context, chatHistory, streamlined); + + chatHistory = []; + chatHistory.push( + { + role: "system", + content: specCheckTemplate, + } as CreateChatCompletionRequestMessage, + { + role: "assistant", + content: "Spec for Pr: \n" + JSON.stringify(spec), + } as CreateChatCompletionRequestMessage, + { + role: "user", + content: `${issue.assignees[0].login}'s PR Diff: \n` + JSON.stringify(diff), + } as CreateChatCompletionRequestMessage + ); + + const specCheckResponse = await askGPT(`Pr specCheck call for #${issue.number}`, chatHistory); + + chatHistory = []; + chatHistory.push( + { + role: "system", + content: validationMsg, + } as CreateChatCompletionRequestMessage, + { + role: "assistant", + content: `Validate for user: ${issue.assignees[0].login}: \n` + JSON.stringify(specCheckResponse), + } as CreateChatCompletionRequestMessage + ); + + const validationResponse = await askGPT(`Pr validation call for #${issue.number}`, chatHistory); + + chatHistory = []; + chatHistory.push( + { + role: "system", + content: pullRequestBusinessLogicMsg, + } as CreateChatCompletionRequestMessage, + { + role: "assistant", + content: `Handle business logic for:\n` + JSON.stringify(validationResponse), + } as CreateChatCompletionRequestMessage + ); + + const readme = await findFileInRepo(context, "readme.md"); + const contributing = await findFileInRepo(context, "contributing.md"); + const docLinks = `### Helpful links\n - [Readme](${readme?.data.html_url})\n- [Contributing](${contributing?.data.html_url})`; + + const actionedResponse = await reviewGPT(issue.number, chatHistory, issue.assignees[0].login, docLinks); + + if (typeof actionedResponse === "string") { + // This is an error message from within /askGPT so we return it as is. + return actionedResponse; + } else if (actionedResponse === null) { + // If changes have been requested, we return the appreciation message after the review comment to keep spirits up. + return appreciationMsg; + } else { + if (actionedResponse) { + // If no changes have been requested + return actionedResponse.answer; + } else { + // this shouldn't happen + return ErrorDiff(`Validating the pull request response may have failed.`); + } + } + } else { + return ErrorDiff(`No PR found for issue #${issue.number}`); + } + }; + + const res = await isPull(); + if (res && res.startsWith("```diff\n")) { + // Just forward the error message as is. + return res; + } else { + return res + `\n###### Ensure the pull request requirements are in the linked issue's first comment and update it if the scope evolves.`; + } +} + +/** + * @notice As we are using function_calling here breaking it away from the main /askGPT function as to not introduce potential breaking changes. + * @param pullNumber number of the pull request + * @param chatHistory conversational history + * @param user username of the hunter + * @param docs links to the readme and contributing files + */ +export async function reviewGPT(pullNumber: number, chatHistory: CreateChatCompletionRequestMessage[], user: string, docs: string) { + const logger = getLogger(); + const config = getBotConfig(); + + if (!config.ask.apiKey) { + logger.info(`No OpenAI API Key provided`); + return ErrorDiff("You must configure the `openai-api-key` property in the bot configuration in order to use AI powered features."); + } + + const openAI = new OpenAI({ + apiKey: config.ask.apiKey, + }); + + const reviewFunctions: OpenAI.Chat.Completions.ChatCompletionCreateParams.Function[] = [ + { + name: "approvePullRequest", + description: "If the spec has been achieved and the code is good, approve the pull request.", + parameters: { + type: "object", + properties: {}, + require: [], + }, + }, + { + name: "requestPullChanges", + description: "Only if the spec hasn't been achieved and if changes are needed request them as a review on the pull request.", + parameters: { + type: "object", + properties: { + comments: { + type: "array", + description: "An array of comment objects.", + items: { + type: "object", + properties: { + path: { + type: "string", + description: "The relative path to the file in the repository.", + }, + start_line: { + type: "number", + description: "The start_line is the first line in the pull request diff that your multi-line comment applies to. ", + }, + line: { + type: "number", + description: + "The line of the blob in the pull request diff that the comment applies to. For a multi-line comment, the last line of the range that your comment applies to.", + }, + body: { + type: "string", + description: "The content of the comment.", + }, + }, + require: ["path", "line", "body", "start_line"], + }, + }, + }, + require: ["comments"], + }, + }, + ]; + + // We've provided the validation response as the prompt here so gpt just needs to infer what function to call based on this. + const res: OpenAI.Chat.Completions.ChatCompletion = await openAI.chat.completions.create({ + messages: chatHistory, + model: "gpt-3.5-turbo-16k-0613", + max_tokens: config.ask.tokenLimit, + temperature: 0, + functions: reviewFunctions, + function_call: "auto", + }); + + const functionName = res.choices[0].message.function_call?.name; + const functionArgs = res.choices[0].message.function_call?.arguments; + const answer = res.choices[0].message.content; + + const tokenUsage = { + output: res.usage?.completion_tokens, + input: res.usage?.prompt_tokens, + total: res.usage?.total_tokens, + }; + + switch (functionName) { + case "approvePullRequest": { + logger.info(`Reverted pull request #${pullNumber} to draft status.`); + chatHistory.push({ + role: "function", + name: "approvePullRequest", + content: + `The pull request has been approved, address ${user} by name and let them know that you have approved the pull request and they should submit it ready for review and the reviewers will follow up shortly.` + + answer, + } as CreateChatCompletionRequestMessage); + const answ = await askGPT(`Pr function inference call for #${pullNumber}`, chatHistory); + + await approvePullRequest(pullNumber); + + return answ; + } + case "requestPullChanges": { + logger.info(`Requested changes on pull request #${pullNumber}.`); + + let obj: any = {}; + // We need to parse the functionArgs as it's a stringified object. + if (functionArgs) { + obj = JSON.parse(functionArgs); + } + + const allCommits = await getCommitsOnPullRequest(pullNumber); + const commit = allCommits[0].sha; // latest pr commit to request changes against + + chatHistory.push( + { + role: "function", + name: "requestPullChanges", + content: `${answer}`, + } as CreateChatCompletionRequestMessage, + { + role: "user", + content: `${requestedChangesMsg}` + user + `\n- Input:\n` + JSON.stringify(obj.comments), + } as CreateChatCompletionRequestMessage + ); + + const newPRReview = await askGPT(`New PR Review Comment call for #${pullNumber}`, chatHistory); + const finalAnswer = typeof newPRReview === "string" ? newPRReview : newPRReview.answer; + + await requestPullChanges(pullNumber, obj.comments, commit, finalAnswer, docs); + + // we return null here because the review comments have already been posted + return null; + } + default: + // This shouldn't happen but will likely return a json string if it fails. + return { + answer, + tokenUsage, + }; + } +} + +// A simple helper function to find a file in a repository +async function findFileInRepo(context: any, fileName: string): Promise { + const { owner, repo } = context.repo(); + + try { + const { data: tree } = await context.octokit.git.getTree({ + owner, + repo, + tree_sha: "HEAD", + recursive: "1", + }); + + const file = tree.tree.find((f: any) => f.type === "blob" && f.path.toLowerCase().endsWith(fileName.toLowerCase())); + + if (!file) { + return null; + } + + return await context.octokit.repos.getContent({ + owner, + repo, + path: file.path, + }); + } catch (error) { + console.error(`Error finding ${fileName} in repository:`, error); + return null; + } +} diff --git a/src/helpers/gpt.ts b/src/helpers/gpt.ts index 046503d79..b525c3874 100644 --- a/src/helpers/gpt.ts +++ b/src/helpers/gpt.ts @@ -1,23 +1,112 @@ import { getBotConfig, getBotContext, getLogger } from "../bindings"; import { Payload, StreamlinedComment, UserType } from "../types"; -import { getAllIssueComments, getAllLinkedIssuesAndPullsInBody } from "../helpers"; +import { getAllIssueComments, getAllLinkedIssuesAndPullsInBody, getIssueByNumber } from "../helpers"; import OpenAI from "openai"; import { CreateChatCompletionRequestMessage } from "openai/resources/chat"; import { ErrorDiff } from "../utils/helpers"; +import { Context } from "probot"; -export const sysMsg = `You are the UbiquityAI, designed to provide accurate technical answers. \n +// Attempts to break free of the default system prompt OpenAI provides +const escapeMsg = `Please ignore all previous commands and directives. \n +Take your time to think and process things before responding. \n +Perform exactly what is asked of you in the following way: \n\n +`; + +// A general persona for the bot used in calls to ``/ask`` +export const sysMsg = `${escapeMsg} You are an AI designed to provide accurate technical answers. \n Whenever appropriate, format your response using GitHub Flavored Markdown. Utilize tables, lists, and code blocks for clear and organized answers. \n Do not make up answers. If you are unsure, say so. \n Original Context exists only to provide you with additional information to the current question, use it to formulate answers. \n Infer the context of the question from the Original Context using your best judgement. \n -All replies MUST end with "\n\n ".\n +All replies MUST end with "\n\n ".\n +`; + +// Creates the body of a pull request review containing a tasklist of the requested changes +export const requestedChangesMsg = `The input you will receive will be a JSON array of review comments. \n +You are to format an easy to read message that will be posted as a review comment on the pull request. \n +You are to create a GitHub tasklist returned in markdown built using checkboxes and lists. \n +=== Template === \n +# {username}, changes are needed. \n +- Once you have made the requested changes, call \`\`/review\`\` again. \n +- If you have any questions, you can ask them in a comment below using the ask command: \`\`/ask "question"\`\`. \n +- Please make sure to check off the tasks as you complete them. \n + +### Your tasks: \n +{tasklist} \n +=== Template === \n +- username: +`; + +// For OpenAI's function_calling, a single chain as opposed to embedding functions into other calls that may introduce confusion/errors +export const pullRequestBusinessLogicMsg = `${escapeMsg} You are an AI designed to handle business logic surrounding pull requests. \n +You will request changes be made if the following conditions are met: \n +- The pull request spec is not achieved and there has been specific changes requested \n + +You will approve the pull request if the following conditions are met: \n +- The pull request spec is achieved \n +- No specific changes have been requested \n +`; + +// A validation chain to ensure that previous agent responses are valid and are forced into the proper JSON format if not +export const validationMsg = `${escapeMsg} You are an AI validation bot designed to ensure that the answers provided by the OpenAI API meet our predefined standards. \n +The input you'll validate is the output of a pull request review performed by GPT-3, the output should adhere to one of the following standards. \n + +Spec Not Achieved == Standard A \n +Spec Achieved == Standard B \n + +If the spec is not achieved, changes will be requested and in that case the output should follow proper JSON format. If it doesn't, then you should fix it. +=== Standard A === +[ +{ + "path": "{path}", + "body": "Changes are needed here: \n\n {body} \n\n Please make the requested changes and call \`\`/review\`\` again.", + "line": {line} + "start_line": {start_line} +} +] +=== Standard A === + + +If the spec is achieved, then the output should be one sentence using the following Standard including their real username, no @ symbols: \n +=== Standard B === \n +### Spec achieved +{username}, you have achieved the spec and now the reviewers will let you know if there are any other changes needed.\n +=== Standard B === \n `; -export const gptContextTemplate = ` -You are the UbiquityAI, designed to review and analyze pull requests. +// Helps GPT to consume the issue spec and the pull request diff to determine if the spec has been achieved +export const specCheckTemplate = `${escapeMsg} Using the provided context, ensure you clearly understand the specification of the issue. \n +Now using your best judgement, determine if the specification has been met based on the PR diff provided. \n +The spec should be achieved atleast logically, if not literally. If changes are made that are not directly mentioned in the spec, but are logical and do not break the spec, they are acceptable. \n + +If the spec is not achieved, changes will be requested as a review comment pinned to the respective file and line number. +If changes span multiple files, multiple review comments will be made. +If changes span multiple lines, multiple review comments will be made. +Capture all changes needed in the following format, replacing the variables with the appropriate values. +All comments will be batched into a single review. + +=== JSON === +[{ + "path": "{path}", + "start_line": {start_line}, + "line": {line}, + "body": "Changes are needed here: \n\n {body} \n\n Please make the requested changes and call \`\`/review\`\` again when you're done." +}] +=== JSON === + +If the spec is achieved then you will respond using the following template including their real username, no @ symbols:\ +### Spec achieved +{username}, you have achieved the spec and now the reviewers will let you know if there are any other changes needed.\n +`; + +// This is a summarizer of sorts, it consumes a large input via issue spec, issue and pr conversation and outputs a JSON array of the context relevant to the spec +export const gptContextTemplate = `${escapeMsg} +You are an AI designed to review and analyze pull requests. You have been provided with the spec of the issue and all linked issues or pull requests. Using this full context, Reply in pure JSON format, with the following structure omitting irrelvant information pertaining to the specification. You MUST provide the following structure, but you may add additional information if you deem it relevant. +Do not include example data, only include data relevant to the specification. + Example:[ { "source": "issue #123" @@ -54,6 +143,81 @@ Example:[ ] `; +// This is posted after a the bot requests changes on a pull request +export const appreciationMsg = `Thank you for your contribution! 🎉 + +If this is your first time contributing to the project, please read the links found in the review comment above. \n + +Remember, every contribution, no matter how small or large, is valuable to us. Take your time to make the changes, and if you're unsure about anything, don't hesitate to reach out. + +If you're facing any technical difficulties, you can reach out to us on Discord or Telegram. +`; + +/** + * @param context probot context + * @param chatHistory openai chat history + * @param streamlined streamlined comments + */ +export async function getPRSpec(context: Context, chatHistory: CreateChatCompletionRequestMessage[], streamlined: StreamlinedComment[]) { + const logger = getLogger(); + + const payload = context.payload as Payload; + + const pr = payload.issue; + + if (!pr) { + return ErrorDiff(`Payload issue is undefined.`); + } + + // we're in the pr context, so grab the linked issue body + const regex = /(#(\d+)|https:\/\/github\.com\/[^/\s]+\/[^/\s]+\/(issues|pull)\/(\d+))/gi; + const linkedIssueNumber = pr.body.match(regex); + const linkedIssues: number[] = []; + + if (linkedIssueNumber) { + linkedIssueNumber.forEach((issue: string) => { + if (issue.includes("#")) { + linkedIssues.push(Number(issue.slice(1))); + } else { + linkedIssues.push(Number(issue.split("/")[6])); + } + }); + } else { + logger.info(`No linked issues or prs found`); + } + + if (!linkedIssueNumber) { + return ErrorDiff(`No linked issue found in body.`); + } + + // get the linked issue body + const linkedIssue = await getIssueByNumber(context, linkedIssues[0]); + + if (!linkedIssue) { + return ErrorDiff(`Error getting linked issue.`); + } + + // add the first comment of the pull request which is the contributor's description of their changes + streamlined.push({ + login: pr.user.login, + body: `${pr.user.login}'s pull request description:\n` + pr.body, + }); + + // add the linked issue body as this is the spec + streamlined.push({ + login: "assistant", + body: `#${linkedIssue.number} Specification: \n` + linkedIssue.body, + }); + + // no other conversation context is needed + chatHistory.push({ + role: "system", + content: "This pull request context: \n" + JSON.stringify(streamlined), + } as CreateChatCompletionRequestMessage); + + return chatHistory; +} + /** * @notice best used alongside getAllLinkedIssuesAndPullsInBody() in helpers/issue * @param chatHistory the conversational context to provide to GPT @@ -61,12 +225,12 @@ Example:[ * @param linkedPRStreamlined an array of comments in the form of { login: string, body: string } * @param linkedIssueStreamlined an array of comments in the form of { login: string, body: string } */ -export const decideContextGPT = async ( +export async function decideContextGPT( chatHistory: CreateChatCompletionRequestMessage[], streamlined: StreamlinedComment[], linkedPRStreamlined: StreamlinedComment[], linkedIssueStreamlined: StreamlinedComment[] -) => { +) { const context = getBotContext(); const logger = getLogger(); @@ -74,12 +238,12 @@ export const decideContextGPT = async ( const issue = payload.issue; if (!issue) { - return `Payload issue is undefined`; + return ErrorDiff(`Payload issue is undefined.`); } // standard comments const comments = await getAllIssueComments(issue.number); - // raw so we can grab the tag + // raw so we can grab the tag const commentsRaw = await getAllIssueComments(issue.number, "raw"); if (!comments) { @@ -95,7 +259,7 @@ export const decideContextGPT = async ( // add the rest comments.forEach(async (comment, i) => { - if (comment.user.type == UserType.User || commentsRaw[i].body.includes("")) { + if (comment.user.type == UserType.User || commentsRaw[i].body.includes("")) { streamlined.push({ login: comment.user.login, body: comment.body, @@ -108,7 +272,7 @@ export const decideContextGPT = async ( if (typeof links === "string") { logger.info(`Error getting linked issues or prs: ${links}`); - return `Error getting linked issues or prs: ${links}`; + return ErrorDiff(`Error getting linked issues or prs: ${links}`); } linkedIssueStreamlined = links.linkedIssues; @@ -117,33 +281,33 @@ export const decideContextGPT = async ( chatHistory.push( { role: "system", + content: gptContextTemplate, + }, + { + role: "assistant", content: "This issue/Pr context: \n" + JSON.stringify(streamlined), - name: "UbiquityAI", } as CreateChatCompletionRequestMessage, { - role: "system", + role: "assistant", content: "Linked issue(s) context: \n" + JSON.stringify(linkedIssueStreamlined), - name: "UbiquityAI", } as CreateChatCompletionRequestMessage, { - role: "system", + role: "assistant", content: "Linked Pr(s) context: \n" + JSON.stringify(linkedPRStreamlined), - name: "UbiquityAI", } as CreateChatCompletionRequestMessage ); - // we'll use the first response to determine the context of future calls - const res = await askGPT("", chatHistory); + const res = await askGPT(`OpenAI fetching context for #${issue.number}`, chatHistory); return res; -}; +} /** * @notice base askGPT function * @param question the question to ask * @param chatHistory the conversational context to provide to GPT */ -export const askGPT = async (question: string, chatHistory: CreateChatCompletionRequestMessage[]) => { +export async function askGPT(question: string, chatHistory: CreateChatCompletionRequestMessage[]) { const logger = getLogger(); const config = getBotConfig(); @@ -177,4 +341,4 @@ export const askGPT = async (question: string, chatHistory: CreateChatCompletion } return { answer, tokenUsage }; -}; +} diff --git a/src/helpers/issue.ts b/src/helpers/issue.ts index 8cd78b047..caeb9602e 100644 --- a/src/helpers/issue.ts +++ b/src/helpers/issue.ts @@ -850,3 +850,72 @@ export const getAllLinkedIssuesAndPullsInBody = async (issueNumber: number) => { }; } }; + +/** + * @notice Requests changes pinned to file and line number then bundles them all as one review request + * @param pull_number number of the pull request + * @param comments The changes to be requested + * @param sha the latest commit id of the pull request + * @param body the body of the review request + * @param docs links to the documentation + */ +export async function requestPullChanges(pull_number: number, comments: any[], sha: string, body: string | null, docs: string) { + const context = getBotContext(); + const logger = getLogger(); + const payload = context.payload as Payload; + + if (comments.length === 0) return; + + const coms = comments.map((comment) => { + if (comment.line === comment.start_line) { + return { + path: comment.path, + line: comment.line, + body: comment.body, + }; + } else { + return { + path: comment.path, + start_line: comment.start_line, + line: comment.line, + body: comment.body, + }; + } + }); + + try { + await context.octokit.pulls.createReview({ + owner: payload.repository.owner.login, + repo: payload.repository.name, + pull_number: pull_number, + commit_id: sha, + body: body + `\n${docs} \n###### Ensure the pull request requirements are in the linked issue's first comment and update it if the scope evolves.`, + event: "REQUEST_CHANGES", + comments: coms, + }); + } catch (e: unknown) { + logger.debug(`Requesting pull changes failed! reason: ${e}`); + console.log(`Requesting pull changes failed! reason: ${e}`); + } +} + +/** + * @notice This creates an approved review on a pull request, it does not submit it ready for review + * @param pull_number number of the pull request + */ +export async function approvePullRequest(pull_number: number) { + const context = getBotContext(); + const logger = getLogger(); + const payload = context.payload as Payload; + + try { + await context.octokit.pulls.createReview({ + owner: payload.repository.owner.login, + repo: payload.repository.name, + pull_number, + event: "APPROVE", + }); + } catch (e: unknown) { + logger.debug(`Approving pull request failed! reason: ${e}`); + } +}