Merge pull request #3474 from github/mbg/risk-assessment-analysis

Add `csra` analysis kind
This commit is contained in:
Michael B. Gale
2026-02-17 15:39:05 +00:00
committed by GitHub
25 changed files with 694 additions and 176 deletions
+112
View File
@@ -1,15 +1,23 @@
import path from "path";
import test from "ava";
import * as sinon from "sinon";
import * as actionsUtil from "./actions-util";
import {
AnalysisKind,
CodeScanning,
compatibilityMatrix,
RiskAssessment,
getAnalysisConfig,
getAnalysisKinds,
parseAnalysisKinds,
supportedAnalysisKinds,
} from "./analyses";
import { EnvVar } from "./environment";
import { getRunnerLogger } from "./logging";
import { setupTests } from "./testing-utils";
import { AssessmentPayload } from "./upload-lib/types";
import { ConfigurationError } from "./util";
setupTests(test);
@@ -67,3 +75,107 @@ test("getAnalysisKinds - throws if `analysis-kinds` input is invalid", async (t)
requiredInputStub.withArgs("analysis-kinds").returns("no-such-thing");
await t.throwsAsync(getAnalysisKinds(getRunnerLogger(true), true));
});
// Test the compatibility matrix by looping through all analysis kinds.
const analysisKinds = Object.values(AnalysisKind);
for (let i = 0; i < analysisKinds.length; i++) {
const analysisKind = analysisKinds[i];
for (let j = i + 1; j < analysisKinds.length; j++) {
const otherAnalysis = analysisKinds[j];
if (analysisKind === otherAnalysis) continue;
if (compatibilityMatrix[analysisKind].has(otherAnalysis)) {
test(`getAnalysisKinds - allows ${analysisKind} with ${otherAnalysis}`, async (t) => {
const requiredInputStub = sinon.stub(actionsUtil, "getRequiredInput");
requiredInputStub
.withArgs("analysis-kinds")
.returns([analysisKind, otherAnalysis].join(","));
const result = await getAnalysisKinds(getRunnerLogger(true), true);
t.is(result.length, 2);
});
} else {
test(`getAnalysisKinds - throws if ${analysisKind} is enabled with ${otherAnalysis}`, async (t) => {
const requiredInputStub = sinon.stub(actionsUtil, "getRequiredInput");
requiredInputStub
.withArgs("analysis-kinds")
.returns([analysisKind, otherAnalysis].join(","));
await t.throwsAsync(getAnalysisKinds(getRunnerLogger(true), true), {
instanceOf: ConfigurationError,
message: `${analysisKind} and ${otherAnalysis} cannot be enabled at the same time`,
});
});
}
}
}
test("Code Scanning configuration does not accept other SARIF extensions", (t) => {
for (const analysisKind of supportedAnalysisKinds) {
if (analysisKind === AnalysisKind.CodeScanning) continue;
const analysis = getAnalysisConfig(analysisKind);
const sarifPath = path.join("path", "to", `file${analysis.sarifExtension}`);
// The Code Scanning configuration's `sarifPredicate` should not accept a path which
// ends in a different configuration's `sarifExtension`.
t.false(CodeScanning.sarifPredicate(sarifPath));
}
});
test("Risk Assessment configuration transforms SARIF upload payload", (t) => {
process.env[EnvVar.RISK_ASSESSMENT_ID] = "1";
const payload = RiskAssessment.transformPayload({
commit_oid: "abc",
sarif: "sarif",
ref: "ref",
workflow_run_attempt: 1,
workflow_run_id: 1,
checkout_uri: "uri",
tool_names: [],
}) as AssessmentPayload;
const expected: AssessmentPayload = { sarif: "sarif", assessment_id: 1 };
t.deepEqual(expected, payload);
});
test("Risk Assessment configuration throws for negative assessment IDs", (t) => {
process.env[EnvVar.RISK_ASSESSMENT_ID] = "-1";
t.throws(
() =>
RiskAssessment.transformPayload({
commit_oid: "abc",
sarif: "sarif",
ref: "ref",
workflow_run_attempt: 1,
workflow_run_id: 1,
checkout_uri: "uri",
tool_names: [],
}),
{
instanceOf: Error,
message: (msg) =>
msg.startsWith(`${EnvVar.RISK_ASSESSMENT_ID} must not be negative: `),
},
);
});
test("Risk Assessment configuration throws for invalid IDs", (t) => {
process.env[EnvVar.RISK_ASSESSMENT_ID] = "foo";
t.throws(
() =>
RiskAssessment.transformPayload({
commit_oid: "abc",
sarif: "sarif",
ref: "ref",
workflow_run_attempt: 1,
workflow_run_id: 1,
checkout_uri: "uri",
tool_names: [],
}),
{
instanceOf: Error,
message: (msg) =>
msg.startsWith(`${EnvVar.RISK_ASSESSMENT_ID} must not be NaN: `),
},
);
});
+80 -6
View File
@@ -3,14 +3,30 @@ import {
getOptionalInput,
getRequiredInput,
} from "./actions-util";
import { EnvVar } from "./environment";
import { Logger } from "./logging";
import { ConfigurationError } from "./util";
import {
AssessmentPayload,
BasePayload,
UploadPayload,
} from "./upload-lib/types";
import { ConfigurationError, getRequiredEnvParam } from "./util";
export enum AnalysisKind {
CodeScanning = "code-scanning",
CodeQuality = "code-quality",
RiskAssessment = "risk-assessment",
}
export type CompatibilityMatrix = Record<AnalysisKind, Set<AnalysisKind>>;
/** A mapping from analysis kinds to other analysis kinds which can be enabled concurrently. */
export const compatibilityMatrix: CompatibilityMatrix = {
[AnalysisKind.CodeScanning]: new Set([AnalysisKind.CodeQuality]),
[AnalysisKind.CodeQuality]: new Set([AnalysisKind.CodeScanning]),
[AnalysisKind.RiskAssessment]: new Set(),
};
// Exported for testing. A set of all known analysis kinds.
export const supportedAnalysisKinds = new Set(Object.values(AnalysisKind));
@@ -67,7 +83,7 @@ export async function getAnalysisKinds(
return cachedAnalysisKinds;
}
cachedAnalysisKinds = await parseAnalysisKinds(
const analysisKinds = await parseAnalysisKinds(
getRequiredInput("analysis-kinds"),
);
@@ -85,12 +101,27 @@ export async function getAnalysisKinds(
// if an input to `quality-queries` was specified. We should remove this once
// `quality-queries` is no longer used.
if (
!cachedAnalysisKinds.includes(AnalysisKind.CodeQuality) &&
!analysisKinds.includes(AnalysisKind.CodeQuality) &&
qualityQueriesInput !== undefined
) {
cachedAnalysisKinds.push(AnalysisKind.CodeQuality);
analysisKinds.push(AnalysisKind.CodeQuality);
}
// Check that all enabled analysis kinds are compatible with each other.
for (const analysisKind of analysisKinds) {
for (const otherAnalysisKind of analysisKinds) {
if (analysisKind === otherAnalysisKind) continue;
if (!compatibilityMatrix[analysisKind].has(otherAnalysisKind)) {
throw new ConfigurationError(
`${analysisKind} and ${otherAnalysisKind} cannot be enabled at the same time`,
);
}
}
}
// Cache the analysis kinds and return them.
cachedAnalysisKinds = analysisKinds;
return cachedAnalysisKinds;
}
@@ -101,6 +132,7 @@ export const codeQualityQueries: string[] = ["code-quality"];
enum SARIF_UPLOAD_ENDPOINT {
CODE_SCANNING = "PUT /repos/:owner/:repo/code-scanning/analysis",
CODE_QUALITY = "PUT /repos/:owner/:repo/code-quality/analysis",
RISK_ASSESSMENT = "PUT /repos/:owner/:repo/code-scanning/risk-assessment",
}
// Represents configurations for different analysis kinds.
@@ -120,6 +152,8 @@ export interface AnalysisConfig {
fixCategory: (logger: Logger, category?: string) => string | undefined;
/** A prefix for environment variables used to track the uniqueness of SARIF uploads. */
sentinelPrefix: string;
/** Transforms the upload payload in an analysis-specific way. */
transformPayload: (payload: UploadPayload) => BasePayload;
}
// Represents the Code Scanning analysis configuration.
@@ -130,9 +164,11 @@ export const CodeScanning: AnalysisConfig = {
sarifExtension: ".sarif",
sarifPredicate: (name) =>
name.endsWith(CodeScanning.sarifExtension) &&
!CodeQuality.sarifPredicate(name),
!CodeQuality.sarifPredicate(name) &&
!RiskAssessment.sarifPredicate(name),
fixCategory: (_, category) => category,
sentinelPrefix: "CODEQL_UPLOAD_SARIF_",
transformPayload: (payload) => payload,
};
// Represents the Code Quality analysis configuration.
@@ -144,6 +180,38 @@ export const CodeQuality: AnalysisConfig = {
sarifPredicate: (name) => name.endsWith(CodeQuality.sarifExtension),
fixCategory: fixCodeQualityCategory,
sentinelPrefix: "CODEQL_UPLOAD_QUALITY_SARIF_",
transformPayload: (payload) => payload,
};
/**
* Retrieves the CSRA assessment id from an environment variable and adds it to the payload.
* @param payload The base payload.
*/
function addAssessmentId(payload: UploadPayload): AssessmentPayload {
const rawAssessmentId = getRequiredEnvParam(EnvVar.RISK_ASSESSMENT_ID);
const assessmentId = parseInt(rawAssessmentId, 10);
if (Number.isNaN(assessmentId)) {
throw new Error(
`${EnvVar.RISK_ASSESSMENT_ID} must not be NaN: ${rawAssessmentId}`,
);
}
if (assessmentId < 0) {
throw new Error(
`${EnvVar.RISK_ASSESSMENT_ID} must not be negative: ${rawAssessmentId}`,
);
}
return { sarif: payload.sarif, assessment_id: assessmentId };
}
export const RiskAssessment: AnalysisConfig = {
kind: AnalysisKind.RiskAssessment,
name: "code scanning risk assessment",
target: SARIF_UPLOAD_ENDPOINT.RISK_ASSESSMENT,
sarifExtension: ".csra.sarif",
sarifPredicate: (name) => name.endsWith(RiskAssessment.sarifExtension),
fixCategory: (_, category) => category,
sentinelPrefix: "CODEQL_UPLOAD_CSRA_SARIF_",
transformPayload: addAssessmentId,
};
/**
@@ -160,6 +228,8 @@ export function getAnalysisConfig(kind: AnalysisKind): AnalysisConfig {
return CodeScanning;
case AnalysisKind.CodeQuality:
return CodeQuality;
case AnalysisKind.RiskAssessment:
return RiskAssessment;
}
}
@@ -167,4 +237,8 @@ export function getAnalysisConfig(kind: AnalysisKind): AnalysisConfig {
// we want to scan a folder containing SARIF files in an order that finds the more
// specific extensions first. This constant defines an array in the order of analyis
// configurations with more specific extensions to less specific extensions.
export const SarifScanOrder = [CodeQuality, CodeScanning];
export const SarifScanOrder: AnalysisConfig[] = [
RiskAssessment,
CodeQuality,
CodeScanning,
];
+2 -1
View File
@@ -4,7 +4,7 @@ import * as path from "path";
import test from "ava";
import * as sinon from "sinon";
import { CodeQuality, CodeScanning } from "./analyses";
import { CodeQuality, CodeScanning, RiskAssessment } from "./analyses";
import {
runQueries,
defaultSuites,
@@ -155,5 +155,6 @@ test("addSarifExtension", (t) => {
addSarifExtension(CodeQuality, language),
`${language}.quality.sarif`,
);
t.is(addSarifExtension(RiskAssessment, language), `${language}.csra.sarif`);
}
});
+3 -6
View File
@@ -549,12 +549,9 @@ export async function runQueries(
): Promise<{ summary: string; sarifFile: string }> {
logger.info(`Interpreting ${analysis.name} results for ${language}`);
// If this is a Code Quality analysis, correct the category to one
// accepted by the Code Quality backend.
let category = automationDetailsId;
if (analysis.kind === analyses.AnalysisKind.CodeQuality) {
category = analysis.fixCategory(logger, automationDetailsId);
}
// Apply the analysis configuration's `fixCategory` function to adjust the category if needed.
// This is a no-op for Code Scanning.
const category = analysis.fixCategory(logger, automationDetailsId);
const sarifFile = path.join(
sarifFolder,
+20 -1
View File
@@ -7,7 +7,7 @@ import * as yaml from "js-yaml";
import * as sinon from "sinon";
import * as actionsUtil from "./actions-util";
import { AnalysisKind } from "./analyses";
import { AnalysisKind, supportedAnalysisKinds } from "./analyses";
import * as api from "./api-client";
import { CachingKind } from "./caching-utils";
import { createStubCodeQL } from "./codeql";
@@ -1829,3 +1829,22 @@ test("hasActionsWorkflows doesn't throw if workflows folder doesn't exist", asyn
t.notThrows(() => configUtils.hasActionsWorkflows(tmpDir));
});
});
test("getPrimaryAnalysisConfig - single analysis kind", (t) => {
// If only one analysis kind is configured, we expect to get the matching configuration.
for (const analysisKind of supportedAnalysisKinds) {
const singleKind = createTestConfig({ analysisKinds: [analysisKind] });
t.is(configUtils.getPrimaryAnalysisConfig(singleKind).kind, analysisKind);
}
});
test("getPrimaryAnalysisConfig - Code Scanning + Code Quality", (t) => {
// For CS+CQ, we expect to get the Code Scanning configuration.
const codeScanningAndCodeQuality = createTestConfig({
analysisKinds: [AnalysisKind.CodeScanning, AnalysisKind.CodeQuality],
});
t.is(
configUtils.getPrimaryAnalysisConfig(codeScanningAndCodeQuality).kind,
AnalysisKind.CodeScanning,
);
});
+11 -13
View File
@@ -12,9 +12,8 @@ import {
import {
AnalysisConfig,
AnalysisKind,
CodeQuality,
codeQualityQueries,
CodeScanning,
getAnalysisConfig,
} from "./analyses";
import * as api from "./api-client";
import { CachingKind, getCachingKind } from "./caching-utils";
@@ -1389,28 +1388,27 @@ export function isCodeQualityEnabled(config: Config): boolean {
}
/**
* Returns the primary analysis kind that the Action is initialised with. This is
* always `AnalysisKind.CodeScanning` unless `AnalysisKind.CodeScanning` is not enabled.
* Returns the primary analysis kind that the Action is initialised with. If there is only
* one analysis kind, then that is returned.
*
* @returns Returns `AnalysisKind.CodeScanning` if `AnalysisKind.CodeScanning` is enabled;
* otherwise `AnalysisKind.CodeQuality`.
* The special case is Code Scanning + Code Quality, which can be enabled at the same time.
* In that case, this function returns Code Scanning.
*/
function getPrimaryAnalysisKind(config: Config): AnalysisKind {
if (config.analysisKinds.length === 1) {
return config.analysisKinds[0];
}
return isCodeScanningEnabled(config)
? AnalysisKind.CodeScanning
: AnalysisKind.CodeQuality;
}
/**
* Returns the primary analysis configuration that the Action is initialised with. This is
* always `CodeScanning` unless `CodeScanning` is not enabled.
*
* @returns Returns `CodeScanning` if `AnalysisKind.CodeScanning` is enabled; otherwise `CodeQuality`.
* Returns the primary analysis configuration that the Action is initialised with.
*/
export function getPrimaryAnalysisConfig(config: Config): AnalysisConfig {
return getPrimaryAnalysisKind(config) === AnalysisKind.CodeScanning
? CodeScanning
: CodeQuality;
return getAnalysisConfig(getPrimaryAnalysisKind(config));
}
/** Logs the Git version as a telemetry diagnostic. */
+3
View File
@@ -141,4 +141,7 @@ export enum EnvVar {
* `getAnalysisKey`, but can also be set manually for testing and non-standard applications.
*/
ANALYSIS_KEY = "CODEQL_ACTION_ANALYSIS_KEY",
/** Used by Code Scanning Risk Assessment to communicate the assessment ID to the CodeQL Action. */
RISK_ASSESSMENT_ID = "CODEQL_ACTION_RISK_ASSESSMENT_ID",
}
+70 -5
View File
@@ -148,11 +148,64 @@ export function setupActionsVars(tempDir: string, toolsDir: string) {
process.env["GITHUB_EVENT_NAME"] = "push";
}
type LogLevel = "debug" | "info" | "warning" | "error";
export interface LoggedMessage {
type: "debug" | "info" | "warning" | "error";
type: LogLevel;
message: string | Error;
}
export class RecordingLogger implements Logger {
messages: LoggedMessage[] = [];
groups: string[] = [];
unfinishedGroups: Set<string> = new Set();
private currentGroup: string | undefined = undefined;
constructor(private readonly logToConsole: boolean = true) {}
private addMessage(level: LogLevel, message: string | Error): void {
this.messages.push({ type: level, message });
if (this.logToConsole) {
// eslint-disable-next-line no-console
console.debug(message);
}
}
isDebug() {
return true;
}
debug(message: string) {
this.addMessage("debug", message);
}
info(message: string) {
this.addMessage("info", message);
}
warning(message: string | Error) {
this.addMessage("warning", message);
}
error(message: string | Error) {
this.addMessage("error", message);
}
startGroup(name: string) {
this.groups.push(name);
this.currentGroup = name;
this.unfinishedGroups.add(name);
}
endGroup() {
if (this.currentGroup !== undefined) {
this.unfinishedGroups.delete(this.currentGroup);
}
this.currentGroup = undefined;
}
}
export function getRecordingLogger(
messages: LoggedMessage[],
{ logToConsole }: { logToConsole?: boolean } = { logToConsole: true },
@@ -197,14 +250,26 @@ export function checkExpectedLogMessages(
messages: LoggedMessage[],
expectedMessages: string[],
) {
const missingMessages: string[] = [];
for (const expectedMessage of expectedMessages) {
t.assert(
messages.some(
if (
!messages.some(
(msg) =>
typeof msg.message === "string" &&
msg.message.includes(expectedMessage),
),
`Expected '${expectedMessage}' in the logger output, but didn't find it in:\n ${messages.map((m) => ` - '${m.message}'`).join("\n")}`,
)
) {
missingMessages.push(expectedMessage);
}
}
if (missingMessages.length > 0) {
const listify = (lines: string[]) =>
lines.map((m) => ` - '${m}'`).join("\n");
t.fail(
`Expected\n\n${listify(missingMessages)}\n\nin the logger output, but didn't find it in:\n\n${messages.map((m) => ` - '${m.message}'`).join("\n")}`,
);
}
}
+70 -25
View File
@@ -12,6 +12,7 @@ import * as api from "./api-client";
import { getRunnerLogger, Logger } from "./logging";
import { setupTests } from "./testing-utils";
import * as uploadLib from "./upload-lib";
import { UploadPayload } from "./upload-lib/types";
import { GitHubVariant, initializeEnvironment, withTmpDir } from "./util";
setupTests(test);
@@ -128,11 +129,21 @@ test("finding SARIF files", async (t) => {
"file",
);
// add some `.quality.sarif` files that should be ignored, unless we look for them specifically
fs.writeFileSync(path.join(tmpDir, "a.quality.sarif"), "");
fs.writeFileSync(path.join(tmpDir, "dir1", "b.quality.sarif"), "");
// add some non-Code Scanning files that should be ignored, unless we look for them specifically
for (const analysisKind of analyses.supportedAnalysisKinds) {
if (analysisKind === AnalysisKind.CodeScanning) continue;
const expectedSarifFiles = [
const analysis = analyses.getAnalysisConfig(analysisKind);
fs.writeFileSync(path.join(tmpDir, `a${analysis.sarifExtension}`), "");
fs.writeFileSync(
path.join(tmpDir, "dir1", `b${analysis.sarifExtension}`),
"",
);
}
const expectedSarifFiles: Partial<Record<AnalysisKind, string[]>> = {};
expectedSarifFiles[AnalysisKind.CodeScanning] = [
path.join(tmpDir, "a.sarif"),
path.join(tmpDir, "b.sarif"),
path.join(tmpDir, "dir1", "d.sarif"),
@@ -143,18 +154,24 @@ test("finding SARIF files", async (t) => {
CodeScanning.sarifPredicate,
);
t.deepEqual(sarifFiles, expectedSarifFiles);
t.deepEqual(sarifFiles, expectedSarifFiles[AnalysisKind.CodeScanning]);
const expectedQualitySarifFiles = [
path.join(tmpDir, "a.quality.sarif"),
path.join(tmpDir, "dir1", "b.quality.sarif"),
];
const qualitySarifFiles = uploadLib.findSarifFilesInDir(
tmpDir,
CodeQuality.sarifPredicate,
);
for (const analysisKind of analyses.supportedAnalysisKinds) {
if (analysisKind === AnalysisKind.CodeScanning) continue;
t.deepEqual(qualitySarifFiles, expectedQualitySarifFiles);
const analysis = analyses.getAnalysisConfig(analysisKind);
expectedSarifFiles[analysisKind] = [
path.join(tmpDir, `a${analysis.sarifExtension}`),
path.join(tmpDir, "dir1", `b${analysis.sarifExtension}`),
];
const foundSarifFiles = uploadLib.findSarifFilesInDir(
tmpDir,
analysis.sarifPredicate,
);
t.deepEqual(foundSarifFiles, expectedSarifFiles[analysisKind]);
}
const groupedSarifFiles = await uploadLib.getGroupedSarifFilePaths(
getRunnerLogger(true),
@@ -162,16 +179,31 @@ test("finding SARIF files", async (t) => {
);
t.not(groupedSarifFiles, undefined);
t.not(groupedSarifFiles[AnalysisKind.CodeScanning], undefined);
t.not(groupedSarifFiles[AnalysisKind.CodeQuality], undefined);
t.deepEqual(
groupedSarifFiles[AnalysisKind.CodeScanning],
expectedSarifFiles,
);
t.deepEqual(
groupedSarifFiles[AnalysisKind.CodeQuality],
expectedQualitySarifFiles,
for (const analysisKind of analyses.supportedAnalysisKinds) {
t.not(groupedSarifFiles[analysisKind], undefined);
t.deepEqual(
groupedSarifFiles[analysisKind],
expectedSarifFiles[analysisKind],
);
}
});
});
test("getGroupedSarifFilePaths - Risk Assessment files", async (t) => {
await withTmpDir(async (tmpDir) => {
const sarifPath = path.join(tmpDir, "a.csra.sarif");
fs.writeFileSync(sarifPath, "");
const groupedSarifFiles = await uploadLib.getGroupedSarifFilePaths(
getRunnerLogger(true),
sarifPath,
);
t.not(groupedSarifFiles, undefined);
t.is(groupedSarifFiles[AnalysisKind.CodeScanning], undefined);
t.is(groupedSarifFiles[AnalysisKind.CodeQuality], undefined);
t.not(groupedSarifFiles[AnalysisKind.RiskAssessment], undefined);
t.deepEqual(groupedSarifFiles[AnalysisKind.RiskAssessment], [sarifPath]);
});
});
@@ -188,6 +220,7 @@ test("getGroupedSarifFilePaths - Code Quality file", async (t) => {
t.not(groupedSarifFiles, undefined);
t.is(groupedSarifFiles[AnalysisKind.CodeScanning], undefined);
t.not(groupedSarifFiles[AnalysisKind.CodeQuality], undefined);
t.is(groupedSarifFiles[AnalysisKind.RiskAssessment], undefined);
t.deepEqual(groupedSarifFiles[AnalysisKind.CodeQuality], [sarifPath]);
});
});
@@ -205,6 +238,7 @@ test("getGroupedSarifFilePaths - Code Scanning file", async (t) => {
t.not(groupedSarifFiles, undefined);
t.not(groupedSarifFiles[AnalysisKind.CodeScanning], undefined);
t.is(groupedSarifFiles[AnalysisKind.CodeQuality], undefined);
t.is(groupedSarifFiles[AnalysisKind.RiskAssessment], undefined);
t.deepEqual(groupedSarifFiles[AnalysisKind.CodeScanning], [sarifPath]);
});
});
@@ -222,6 +256,7 @@ test("getGroupedSarifFilePaths - Other file", async (t) => {
t.not(groupedSarifFiles, undefined);
t.not(groupedSarifFiles[AnalysisKind.CodeScanning], undefined);
t.is(groupedSarifFiles[AnalysisKind.CodeQuality], undefined);
t.is(groupedSarifFiles[AnalysisKind.RiskAssessment], undefined);
t.deepEqual(groupedSarifFiles[AnalysisKind.CodeScanning], [sarifPath]);
});
});
@@ -875,7 +910,15 @@ function createMockSarif(id?: string, tool?: string) {
function uploadPayloadFixtures(analysis: analyses.AnalysisConfig) {
const mockData = {
payload: { sarif: "base64data", commit_sha: "abc123" },
payload: {
commit_oid: "abc123",
ref: "ref",
sarif: "base64data",
workflow_run_id: 1,
workflow_run_attempt: 1,
checkout_uri: "uri",
tool_names: ["codeql"],
} satisfies UploadPayload,
owner: "test-owner",
repo: "test-repo",
response: {
@@ -907,7 +950,9 @@ function uploadPayloadFixtures(analysis: analyses.AnalysisConfig) {
};
}
for (const analysis of [CodeScanning, CodeQuality]) {
for (const analysisKind of analyses.supportedAnalysisKinds) {
const analysis = analyses.getAnalysisConfig(analysisKind);
test(`uploadPayload on ${analysis.name} uploads successfully`, async (t) => {
const { upload, requestStub, mockData } = uploadPayloadFixtures(analysis);
requestStub
+18 -15
View File
@@ -21,6 +21,7 @@ import * as gitUtils from "./git-utils";
import { initCodeQL } from "./init";
import { Logger } from "./logging";
import { getRepositoryNwo, RepositoryNwo } from "./repository";
import { BasePayload, UploadPayload } from "./upload-lib/types";
import * as util from "./util";
import {
ConfigurationError,
@@ -326,7 +327,7 @@ function getAutomationID(
* This is exported for testing purposes only.
*/
export async function uploadPayload(
payload: any,
payload: BasePayload,
repositoryNwo: RepositoryNwo,
logger: Logger,
analysis: analyses.AnalysisConfig,
@@ -618,8 +619,8 @@ export function buildPayload(
environment: string | undefined,
toolNames: string[],
mergeBaseCommitOid: string | undefined,
) {
const payloadObj = {
): UploadPayload {
const payloadObj: UploadPayload = {
commit_oid: commitOid,
ref,
analysis_key: analysisKey,
@@ -847,18 +848,20 @@ export async function uploadPostProcessedFiles(
const zippedSarif = zlib.gzipSync(sarifPayload).toString("base64");
const checkoutURI = url.pathToFileURL(checkoutPath).href;
const payload = buildPayload(
await gitUtils.getCommitOid(checkoutPath),
await gitUtils.getRef(),
postProcessingResults.analysisKey,
util.getRequiredEnvParam("GITHUB_WORKFLOW"),
zippedSarif,
actionsUtil.getWorkflowRunID(),
actionsUtil.getWorkflowRunAttempt(),
checkoutURI,
postProcessingResults.environment,
toolNames,
await gitUtils.determineBaseBranchHeadCommitOid(),
const payload = uploadTarget.transformPayload(
buildPayload(
await gitUtils.getCommitOid(checkoutPath),
await gitUtils.getRef(),
postProcessingResults.analysisKey,
util.getRequiredEnvParam("GITHUB_WORKFLOW"),
zippedSarif,
actionsUtil.getWorkflowRunID(),
actionsUtil.getWorkflowRunAttempt(),
checkoutURI,
postProcessingResults.environment,
toolNames,
await gitUtils.determineBaseBranchHeadCommitOid(),
),
);
// Log some useful debug info about the info
+45
View File
@@ -0,0 +1,45 @@
/**
* Represents the minimum, common payload for SARIF upload endpoints that we support.
*/
export interface BasePayload {
/** The gzipped contents of a SARIF file. */
sarif: string;
}
/**
* Represents the payload expected for Code Scanning and Code Quality SARIF uploads.
*/
export interface UploadPayload extends BasePayload {
/** The SHA of the commit that was analysed. */
commit_oid: string;
/** The ref that was analysed. */
ref: string;
/** The analysis key that identifies the analysis. */
analysis_key?: string;
/** The name of the analysis. */
analysis_name?: string;
/** The ID of the workflow run that performed the analysis. */
workflow_run_id: number;
/** The attempt number. */
workflow_run_attempt: number;
/** The URI where the repository was checked out. */
checkout_uri: string;
/** The matrix value. */
environment?: string;
/** A string representation of when the analysis was started. */
started_at?: string;
/** The names of the tools that performed the analysis. */
tool_names: string[];
/** For a pull request, the ref of the base the PR is targeting. */
base_ref?: string;
/** For a pull request, the commit SHA of the merge base. */
base_sha?: string;
}
/**
* Represents the payload expected for Code Scanning Risk Assessment SARIF uploads.
*/
export interface AssessmentPayload extends BasePayload {
/** The ID of the assessment for which the SARIF is for. */
assessment_id: number;
}