diff --git a/.github/actions/systemtests/generate-matrix/pipelines.yaml b/.github/actions/systemtests/generate-matrix/pipelines.yaml index cbbcc3ca29c..93cbba7c4e3 100644 --- a/.github/actions/systemtests/generate-matrix/pipelines.yaml +++ b/.github/actions/systemtests/generate-matrix/pipelines.yaml @@ -392,3 +392,26 @@ pipelines: strimzi_rbac_scope: "CLUSTER" cluster_operator_install_type: "yaml" parallel: 4 + ################### + ### Performance ### + ################### + # x86-64 + - agent: "oracle-vm-8cpu-32gb-x86-64" + arch: "amd64" + pipeline: "performance" + profile: "performance" + timeout: 180 + strimzi_feature_gates: "" + strimzi_rbac_scope: "CLUSTER" + cluster_operator_install_type: "yaml" + parallel: 1 + # arm64 + - agent: "oracle-vm-8cpu-32gb-arm64" + arch: "arm64" + pipeline: "performance" + profile: "performance" + timeout: 180 + strimzi_feature_gates: "" + strimzi_rbac_scope: "CLUSTER" + cluster_operator_install_type: "yaml" + parallel: 1 diff --git a/.github/actions/systemtests/run-perf-report/action.yml b/.github/actions/systemtests/run-perf-report/action.yml new file mode 100644 index 00000000000..6e6eb80e9d4 --- /dev/null +++ b/.github/actions/systemtests/run-perf-report/action.yml @@ -0,0 +1,64 @@ +name: Run Performance Report +description: Downloads artifacts, generates performance report, and posts results +inputs: + artifacts-pattern: + description: 'Pattern for performance artifacts to download' + required: false + default: 'performance-results-*' + artifacts-path: + description: 'Path where artifacts should be downloaded' + required: false + default: 'systemtest/target/performance-artifacts' +runs: + using: composite + steps: + - name: Download performance results + uses: actions/download-artifact@v4 + with: + pattern: ${{ inputs.artifacts-pattern }} + path: ${{ inputs.artifacts-path }} + # Each artifact gets its own subdirectory (not merged) to preserve arch-specific results + + - name: List downloaded artifacts + shell: bash + run: | + echo "Downloaded artifacts:" + find "${{ inputs.artifacts-path }}" -type f | head -n 20 + + - name: Generate performance report + id: generate_report + uses: actions/github-script@v7 + env: + PERF_DIR: ${{ inputs.artifacts-path }} + with: + github-token: ${{ github.token }} + script: | + const { generatePerformanceReport } = require('./.github/actions/systemtests/run-perf-report/generate-report.js'); + const perfDir = process.env.PERF_DIR || 'systemtest/target/performance'; + const result = generatePerformanceReport(perfDir, core); + core.setOutput('has_results', result.has_results); + core.setOutput('summary', result.summary); + core.setOutput('timestamp', result.timestamp); + + - name: Add performance report comment + if: ${{ steps.generate_report.outputs.has_results == 'true' }} + uses: ./.github/actions/utils/add-comment + with: + commentMessage: | + ${{ steps.generate_report.outputs.summary }} + + - name: Add performance report to job summary + if: ${{ steps.generate_report.outputs.has_results == 'true' }} + shell: bash + env: + SUMMARY_CONTENT: ${{ steps.generate_report.outputs.summary }} + run: | + echo "$SUMMARY_CONTENT" >> "$GITHUB_STEP_SUMMARY" + + - name: No results warning + if: ${{ steps.generate_report.outputs.has_results == 'false' }} + shell: bash + env: + WARNING_MESSAGE: "No performance results found in artifacts" + run: | + echo "::warning::$WARNING_MESSAGE" \ No newline at end of file diff --git a/.github/actions/systemtests/run-perf-report/generate-report.js b/.github/actions/systemtests/run-perf-report/generate-report.js new file mode 100644 index 00000000000..97434a1a933 --- /dev/null +++ b/.github/actions/systemtests/run-perf-report/generate-report.js @@ -0,0 +1,455 @@ +const fs = require('fs'); +const path = require('path'); + +/** + * Map agent name to architecture + */ +function getArchFromAgent(agentName) { + if (agentName.includes('arm')) { + return 'arm64'; + } + return 'amd64'; +} + +/** + * Detect artifact directories (performance-results-*) + */ +function findArtifactDirs(baseDir) { + if (!fs.existsSync(baseDir)) { + console.warn(`Performance directory not found: ${baseDir}`); + return []; + } + + const entries = fs.readdirSync(baseDir, { withFileTypes: true }); + const artifactDirs = entries + .filter(entry => entry.isDirectory() && entry.name.startsWith('performance-results-')) + .map(entry => ({ + name: entry.name, + path: path.join(baseDir, entry.name), + arch: getArchFromAgent(entry.name) + })); + + return artifactDirs; +} + +/** + * Find the timestamped results directory within an artifact + */ +function findTimestampedResultsDir(baseDir) { + if (!fs.existsSync(baseDir)) { + console.warn(`Performance directory not found: ${baseDir}`); + return null; + } + + const entries = fs.readdirSync(baseDir, { withFileTypes: true }); + const timestampDirs = entries + .filter(entry => entry.isDirectory()) + .map(entry => entry.name) + .sort() + .reverse(); + + if (timestampDirs.length === 0) { + console.warn(`No timestamp directories found in ${baseDir}`); + return null; + } + + return path.join(baseDir, timestampDirs[0]); +} + +/** + * Read markdown content from results-table.md file + */ +function readResultsMarkdown(componentDir) { + const mdPath = path.join(componentDir, 'results-table.md'); + if (!fs.existsSync(mdPath)) { + return null; + } + return fs.readFileSync(mdPath, 'utf8'); +} + +/** + * Parse markdown content to extract use cases with their tables + * Returns array of { useCase, config, header, rows } + */ +function parseMarkdownContent(content) { + if (!content) return []; + + const lines = content.trim().split('\n'); + const useCases = []; + let currentUseCase = null; + let inConfig = false; + let inResults = false; + let configLines = []; + let tableLines = []; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + + if (line.startsWith('**Use Case:**')) { + // Save previous use case if exists + if (currentUseCase) { + useCases.push(buildUseCaseData(currentUseCase, configLines, tableLines)); + } + currentUseCase = line.replace('**Use Case:**', '').trim(); + inConfig = false; + inResults = false; + configLines = []; + tableLines = []; + } else if (line.startsWith('**Configuration:**')) { + inConfig = true; + inResults = false; + } else if (line.startsWith('**Results:**')) { + inConfig = false; + inResults = true; + } else if (inConfig && line.startsWith('- ')) { + configLines.push(line.substring(2)); + } else if (inResults && line.startsWith('|')) { + tableLines.push(line); + } + } + + // Save last use case + if (currentUseCase) { + useCases.push(buildUseCaseData(currentUseCase, configLines, tableLines)); + } + + return useCases; +} + +/** + * Build use case data object from parsed lines + */ +function buildUseCaseData(useCase, configLines, tableLines) { + // Filter out separator lines and parse table + const dataLines = tableLines.filter(line => !line.match(/^\|[-:]+\|/)); + + let header = []; + let rows = []; + + if (dataLines.length > 0) { + header = dataLines[0].split('|').slice(1, -1).map(col => col.trim()); + for (let i = 1; i < dataLines.length; i++) { + const values = dataLines[i].split('|').slice(1, -1).map(col => col.trim()); + if (values.length === header.length) { + rows.push(values); + } + } + } + + return { + useCase, + config: configLines, + header, + rows + }; +} + +/** + * Format timestamp from directory name (yyyy-MM-dd-HH-mm-ss) to readable format + */ +function formatTimestamp(timestamp) { + const parts = timestamp.split('-'); + if (parts.length === 6) { + const [year, month, day, hour, minute] = parts; + return `${year}-${month}-${day} ${hour}:${minute}`; + } + return timestamp; +} + +/** + * Merge tables from multiple architectures + * For multi-arch, combines columns with architecture suffixes + */ +function mergeArchTables(archResults) { + const archList = Object.keys(archResults).sort(); + + if (archList.length === 1) { + // Single architecture - return as-is with proper formatting + const data = archResults[archList[0]]; + return { + header: data.header, + rows: data.rows, + merged: false + }; + } + + // Multi-architecture merge + const firstArch = archList[0]; + const baseData = archResults[firstArch]; + + // Identify identifier columns (# or columns that don't have metrics) + // Heuristic: first column is usually row number, columns with "IN:" are identifiers + const identifierIndices = []; + const metricIndices = []; + + baseData.header.forEach((col, idx) => { + if (col === '#' || col.startsWith('IN:')) { + identifierIndices.push(idx); + } else { + metricIndices.push(idx); + } + }); + + // Build merged header + const mergedHeader = identifierIndices.map(idx => baseData.header[idx]); + metricIndices.forEach(idx => { + archList.forEach(arch => { + mergedHeader.push(`${baseData.header[idx]} [${arch.toUpperCase()}]`); + }); + }); + + // Build merged rows + const mergedRows = []; + for (let rowIdx = 0; rowIdx < baseData.rows.length; rowIdx++) { + const newRow = identifierIndices.map(idx => baseData.rows[rowIdx][idx]); + + metricIndices.forEach(idx => { + archList.forEach(arch => { + const archData = archResults[arch]; + const value = archData.rows[rowIdx]?.[idx] || 'N/A'; + newRow.push(value); + }); + }); + + mergedRows.push(newRow); + } + + return { + header: mergedHeader, + rows: mergedRows, + merged: true + }; +} + +/** + * Generate markdown table from header and rows + */ +function generateMarkdownTable(header, rows) { + const lines = []; + + // Header row + lines.push('| ' + header.join(' | ') + ' |'); + + // Separator row + lines.push('|' + header.map(() => '---').join('|') + '|'); + + // Data rows + rows.forEach(row => { + lines.push('| ' + row.join(' | ') + ' |'); + }); + + return lines.join('\n'); +} + +/** + * Generate markdown summary for multiple architectures + */ +function generateMarkdownSummary(allResults) { + const lines = []; + + lines.push('## Performance Test Results'); + lines.push(''); + + // Get timestamp + const allArchs = Object.keys(allResults).sort(); + const timestamps = new Set(); + allArchs.forEach(arch => { + if (allResults[arch].timestamp) { + timestamps.add(allResults[arch].timestamp); + } + }); + + if (timestamps.size > 0) { + const timestamp = Array.from(timestamps)[0]; + lines.push(`**Test Run:** \`${formatTimestamp(timestamp)}\``); + lines.push(''); + } + + // Collect all operators + const operators = new Set(); + allArchs.forEach(arch => { + Object.keys(allResults[arch].operators || {}).forEach(op => operators.add(op)); + }); + + // Generate report for each operator + for (const operatorName of operators) { + const title = operatorName.replace(/-/g, ' ').replace(/\b\w/g, c => c.toUpperCase()); + lines.push(`## ${title}`); + lines.push(''); + + // Group results by use case across architectures + const useCasesByArch = new Map(); + + for (const arch of allArchs) { + const operatorData = allResults[arch].operators?.[operatorName]; + if (operatorData?.useCases) { + operatorData.useCases.forEach(uc => { + if (!useCasesByArch.has(uc.useCase)) { + useCasesByArch.set(uc.useCase, {}); + } + useCasesByArch.get(uc.useCase)[arch] = uc; + }); + } + } + + // For multi-arch, skip use cases that don't have data from all architectures + for (const [useCase, archData] of useCasesByArch) { + const numArchsWithData = Object.keys(archData).length; + if (allArchs.length > 1 && numArchsWithData < allArchs.length) { + console.warn(`Skipping ${operatorName}/${useCase}: only ${numArchsWithData} of ${allArchs.length} architectures have data`); + continue; + } + + lines.push(`**Use Case:** ${useCase}`); + lines.push(''); + + // Add configuration from first architecture + const firstArchData = Object.values(archData)[0]; + if (firstArchData.config && firstArchData.config.length > 0) { + lines.push('**Configuration:**'); + firstArchData.config.forEach(cfg => lines.push(`- ${cfg}`)); + lines.push(''); + } + + lines.push('**Results:**'); + lines.push(''); + + // Merge tables if multi-arch + const tableData = mergeArchTables(archData); + lines.push(generateMarkdownTable(tableData.header, tableData.rows)); + lines.push(''); + } + + if (useCasesByArch.size === 0) { + lines.push('_No results available_'); + lines.push(''); + } + } + + return lines.join('\n'); +} + +/** + * Main function to generate performance report + * @param {string} perfDir - Directory containing performance results + * @param {object} core - GitHub Actions core object for logging + * @returns {object} - Object with has_results, summary, and timestamp + */ +function generatePerformanceReport(perfDir, core) { + try { + const artifactDirs = findArtifactDirs(perfDir); + + let allResults = {}; + let hasResults = false; + let commonTimestamp = ''; + + if (artifactDirs.length > 0) { + core.info(`Found ${artifactDirs.length} artifact directories`); + + for (const artifactDir of artifactDirs) { + core.info(`Processing artifact: ${artifactDir.name} (${artifactDir.arch})`); + + const timestampedDir = findTimestampedResultsDir(artifactDir.path); + if (!timestampedDir) { + core.warning(`No results found in ${artifactDir.name}`); + continue; + } + + const timestamp = path.basename(timestampedDir); + if (!commonTimestamp) { + commonTimestamp = timestamp; + } + + const results = { + timestamp, + operators: {} + }; + + // Parse topic-operator results + const topicOpDir = path.join(timestampedDir, 'topic-operator'); + const topicOpMd = readResultsMarkdown(topicOpDir); + if (topicOpMd) { + results.operators['topic-operator'] = { + useCases: parseMarkdownContent(topicOpMd) + }; + hasResults = true; + } + + // Parse user-operator results + const userOpDir = path.join(timestampedDir, 'user-operator'); + const userOpMd = readResultsMarkdown(userOpDir); + if (userOpMd) { + results.operators['user-operator'] = { + useCases: parseMarkdownContent(userOpMd) + }; + hasResults = true; + } + + allResults[artifactDir.arch] = results; + } + } else { + // Fallback for single directory (backward compatibility) + core.info('No artifact directories found, checking for direct results'); + const timestampedDir = findTimestampedResultsDir(perfDir); + + if (timestampedDir) { + const timestamp = path.basename(timestampedDir); + commonTimestamp = timestamp; + core.info(`Found performance results: ${timestamp}`); + + const results = { + timestamp, + operators: {} + }; + + const topicOpDir = path.join(timestampedDir, 'topic-operator'); + const topicOpMd = readResultsMarkdown(topicOpDir); + if (topicOpMd) { + results.operators['topic-operator'] = { + useCases: parseMarkdownContent(topicOpMd) + }; + hasResults = true; + } + + const userOpDir = path.join(timestampedDir, 'user-operator'); + const userOpMd = readResultsMarkdown(userOpDir); + if (userOpMd) { + results.operators['user-operator'] = { + useCases: parseMarkdownContent(userOpMd) + }; + hasResults = true; + } + + allResults['amd64'] = results; + } + } + + if (!hasResults) { + return { + has_results: 'false', + summary: '_No performance results found_', + timestamp: '' + }; + } + + const summary = generateMarkdownSummary(allResults); + + core.info('Performance report generated successfully'); + + return { + has_results: 'true', + summary: summary, + timestamp: commonTimestamp + }; + + } catch (error) { + core.error(`Error generating performance report: ${error.message}`); + return { + has_results: 'false', + summary: `_Error generating performance report: ${error.message}_`, + timestamp: '' + }; + } +} + +module.exports = { generatePerformanceReport }; \ No newline at end of file diff --git a/.github/docs/README.md b/.github/docs/README.md index 7d03c6bb8b7..aaaf86e2ce1 100644 --- a/.github/docs/README.md +++ b/.github/docs/README.md @@ -144,4 +144,26 @@ Every generated `GITHUB_TOKEN` has only read access to the repo/org without acce ## Testing workflows and actions Unit and integration tests invoked via [actions-tests.yml](../workflows/actions-tests.yml) workflow. It uses files specified within [tests](../tests) folder and via [act](https://github.com/nektos/act) it tries to execute the actions and check the outputs. -Currently, we tests `check-permissions`, `generate-matrix`, and `parse-comment` actions. \ No newline at end of file +Currently, we tests `check-permissions`, `generate-matrix`, and `parse-comment` actions. + +### Performance Report Tests +The performance report generation workflow has test scenarios defined in [tests/scenarios/perf-report.yaml](../tests/scenarios/perf-report.yaml). +These tests validate the performance report generation for different operator configurations. + +> [!IMPORTANT] +> The test input directories in `.github/tests/inputs/perf-report/` must match the parser type constants defined in `systemtest/src/main/java/io/strimzi/systemtest/performance/PerformanceConstants.java`. + +When adding a new operator or component parser type to `PerformanceConstants.java`: +1. Add the corresponding constant (e.g., `NEW_OPERATOR_PARSER = "new-operator"`) +2. Create matching test input directories in `.github/tests/inputs/perf-report/` with subdirectories named after the parser type (e.g., `new-operator/`) +3. Update test scenarios in `.github/tests/scenarios/perf-report.yaml` to include the new operator in test cases +4. Generate corresponding expected output files in `.github/tests/expected/perf-report/` + +Example directory structure for operators: +``` +.github/tests/inputs/perf-report/ +└── single-arch-both-operators/ + └── 2025-11-18-10-30-00/ + ├── topic-operator/ # matches PerformanceConstants.TOPIC_OPERATOR_PARSER = "topic-operator" + └── user-operator/ # matches PerformanceConstants.USER_OPERATOR_PARSER = "user-operator" +``` \ No newline at end of file diff --git a/.github/tests/expected/perf-report/multi-arch-both-operators-expected.md b/.github/tests/expected/perf-report/multi-arch-both-operators-expected.md new file mode 100644 index 00000000000..227a6d204b1 --- /dev/null +++ b/.github/tests/expected/perf-report/multi-arch-both-operators-expected.md @@ -0,0 +1,55 @@ +## Performance Test Results + +**Test Run:** `2025-11-18-10-30-00` + +## Topic Operator + +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: MAX QUEUE SIZE: 2147483647 +- IN: MAX BATCH SIZE (ms): 100 +- IN: MAX BATCH LINGER (ms): 100 +- IN: PROCESS TYPE: TOPIC-CONCURRENT + +**Results:** + +| # | IN: NUMBER OF TOPICS | IN: NUMBER OF EVENTS | OUT: Reconciliation interval (ms) [AMD64] | OUT: Reconciliation interval (ms) [ARM64] | +|---|---|---|---|---| +| 1 | 2 | 8 | 10229 | 10229 | +| 2 | 32 | 98 | 11505 | 11505 | +| 3 | 125 | 375 | 42367 | 42367 | +| 4 | 250 | 750 | 74596 | 74596 | + +## User Operator + +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: WORK_QUEUE_SIZE: 1024 +- IN: BATCH_MAXIMUM_BLOCK_SIZE: 100 +- IN: BATCH_MAXIMUM_BLOCK_TIME_MS: 100 + +**Results:** + +| # | IN: NUMBER OF KAFKA USERS | OUT: Reconciliation interval (ms) [AMD64] | OUT: Reconciliation interval (ms) [ARM64] | +|---|---|---|---| +| 1 | 10 | 10472 | 10472 | +| 2 | 100 | 33036 | 33036 | +| 3 | 200 | 54940 | 54940 | +| 4 | 500 | 133782 | 133782 | + +**Use Case:** latencyUseCase + +**Configuration:** +- IN: WORK_QUEUE_SIZE: 2048 +- IN: BATCH_MAXIMUM_BLOCK_SIZE: 100 +- IN: BATCH_MAXIMUM_BLOCK_TIME_MS: 100 + +**Results:** + +| # | IN: NUMBER OF KAFKA USERS | OUT: Min Latency (ms) [AMD64] | OUT: Min Latency (ms) [ARM64] | OUT: Max Latency (ms) [AMD64] | OUT: Max Latency (ms) [ARM64] | OUT: Average Latency (ms) [AMD64] | OUT: Average Latency (ms) [ARM64] | OUT: P50 Latency (ms) [AMD64] | OUT: P50 Latency (ms) [ARM64] | OUT: P95 Latency (ms) [AMD64] | OUT: P95 Latency (ms) [ARM64] | OUT: P99 Latency (ms) [AMD64] | OUT: P99 Latency (ms) [ARM64] | +|---|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | 110 | 12 | 12 | 69 | 69 | 27.78 | 27.78 | 26 | 26 | 39 | 39 | 54 | 54 | +| 2 | 200 | 11 | 11 | 75 | 75 | 29.93 | 29.93 | 28 | 28 | 48 | 48 | 75 | 75 | +| 3 | 300 | 10 | 10 | 61 | 61 | 26.0 | 26.0 | 26 | 26 | 41 | 41 | 50 | 50 | \ No newline at end of file diff --git a/.github/tests/expected/perf-report/multi-arch-topic-operator-expected.md b/.github/tests/expected/perf-report/multi-arch-topic-operator-expected.md new file mode 100644 index 00000000000..54269e54a7a --- /dev/null +++ b/.github/tests/expected/perf-report/multi-arch-topic-operator-expected.md @@ -0,0 +1,22 @@ +## Performance Test Results + +**Test Run:** `2025-11-18-10-30-00` + +## Topic Operator + +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: MAX QUEUE SIZE: 2147483647 +- IN: MAX BATCH SIZE (ms): 100 +- IN: MAX BATCH LINGER (ms): 100 +- IN: PROCESS TYPE: TOPIC-CONCURRENT + +**Results:** + +| # | IN: NUMBER OF TOPICS | IN: NUMBER OF EVENTS | OUT: Reconciliation interval (ms) [AMD64] | OUT: Reconciliation interval (ms) [ARM64] | +|---|---|---|---|---| +| 1 | 2 | 8 | 10229 | 10229 | +| 2 | 32 | 98 | 11505 | 11505 | +| 3 | 125 | 375 | 42367 | 42367 | +| 4 | 250 | 750 | 74596 | 74596 | diff --git a/.github/tests/expected/perf-report/no-results-expected.md b/.github/tests/expected/perf-report/no-results-expected.md new file mode 100644 index 00000000000..d68bc3ae035 --- /dev/null +++ b/.github/tests/expected/perf-report/no-results-expected.md @@ -0,0 +1 @@ +_No performance results found_ \ No newline at end of file diff --git a/.github/tests/expected/perf-report/single-arch-both-operators-expected.md b/.github/tests/expected/perf-report/single-arch-both-operators-expected.md new file mode 100644 index 00000000000..901b6fe01ba --- /dev/null +++ b/.github/tests/expected/perf-report/single-arch-both-operators-expected.md @@ -0,0 +1,55 @@ +## Performance Test Results + +**Test Run:** `2025-11-18-10-30-00` + +## Topic Operator + +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: MAX QUEUE SIZE: 2147483647 +- IN: MAX BATCH SIZE (ms): 100 +- IN: MAX BATCH LINGER (ms): 100 +- IN: PROCESS TYPE: TOPIC-CONCURRENT + +**Results:** + +| # | IN: NUMBER OF TOPICS | IN: NUMBER OF EVENTS | OUT: Reconciliation interval (ms) | +|---|---|---|---| +| 1 | 2 | 8 | 10229 | +| 2 | 32 | 98 | 11505 | +| 3 | 125 | 375 | 42367 | +| 4 | 250 | 750 | 74596 | + +## User Operator + +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: WORK_QUEUE_SIZE: 1024 +- IN: BATCH_MAXIMUM_BLOCK_SIZE: 100 +- IN: BATCH_MAXIMUM_BLOCK_TIME_MS: 100 + +**Results:** + +| # | IN: NUMBER OF KAFKA USERS | OUT: Reconciliation interval (ms) | +|---|---|---| +| 1 | 10 | 10472 | +| 2 | 100 | 33036 | +| 3 | 200 | 54940 | +| 4 | 500 | 133782 | + +**Use Case:** latencyUseCase + +**Configuration:** +- IN: WORK_QUEUE_SIZE: 2048 +- IN: BATCH_MAXIMUM_BLOCK_SIZE: 100 +- IN: BATCH_MAXIMUM_BLOCK_TIME_MS: 100 + +**Results:** + +| # | IN: NUMBER OF KAFKA USERS | OUT: Min Latency (ms) | OUT: Max Latency (ms) | OUT: Average Latency (ms) | OUT: P50 Latency (ms) | OUT: P95 Latency (ms) | OUT: P99 Latency (ms) | +|---|---|---|---|---|---|---|---| +| 1 | 110 | 12 | 69 | 27.78 | 26 | 39 | 54 | +| 2 | 200 | 11 | 75 | 29.93 | 28 | 48 | 75 | +| 3 | 300 | 10 | 61 | 26.0 | 26 | 41 | 50 | diff --git a/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-amd64/2025-11-18-10-30-00/topic-operator/results-table.md b/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-amd64/2025-11-18-10-30-00/topic-operator/results-table.md new file mode 100644 index 00000000000..737a9afb0a2 --- /dev/null +++ b/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-amd64/2025-11-18-10-30-00/topic-operator/results-table.md @@ -0,0 +1,16 @@ +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: MAX QUEUE SIZE: 2147483647 +- IN: MAX BATCH SIZE (ms): 100 +- IN: MAX BATCH LINGER (ms): 100 +- IN: PROCESS TYPE: TOPIC-CONCURRENT + +**Results:** + +| # | IN: NUMBER OF TOPICS | IN: NUMBER OF EVENTS | OUT: Reconciliation interval (ms) | +|---|---|---|---| +| 1 | 2 | 8 | 10229 | +| 2 | 32 | 98 | 11505 | +| 3 | 125 | 375 | 42367 | +| 4 | 250 | 750 | 74596 | \ No newline at end of file diff --git a/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-amd64/2025-11-18-10-30-00/user-operator/results-table.md b/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-amd64/2025-11-18-10-30-00/user-operator/results-table.md new file mode 100644 index 00000000000..ff4139f1c33 --- /dev/null +++ b/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-amd64/2025-11-18-10-30-00/user-operator/results-table.md @@ -0,0 +1,30 @@ +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: WORK_QUEUE_SIZE: 1024 +- IN: BATCH_MAXIMUM_BLOCK_SIZE: 100 +- IN: BATCH_MAXIMUM_BLOCK_TIME_MS: 100 + +**Results:** + +| # | IN: NUMBER OF KAFKA USERS | OUT: Reconciliation interval (ms) | +|---|---|---| +| 1 | 10 | 10472 | +| 2 | 100 | 33036 | +| 3 | 200 | 54940 | +| 4 | 500 | 133782 | + +**Use Case:** latencyUseCase + +**Configuration:** +- IN: WORK_QUEUE_SIZE: 2048 +- IN: BATCH_MAXIMUM_BLOCK_SIZE: 100 +- IN: BATCH_MAXIMUM_BLOCK_TIME_MS: 100 + +**Results:** + +| # | IN: NUMBER OF KAFKA USERS | OUT: Min Latency (ms) | OUT: Max Latency (ms) | OUT: Average Latency (ms) | OUT: P50 Latency (ms) | OUT: P95 Latency (ms) | OUT: P99 Latency (ms) | +|---|---|---|---|---|---|---|---| +| 1 | 110 | 12 | 69 | 27.78 | 26 | 39 | 54 | +| 2 | 200 | 11 | 75 | 29.93 | 28 | 48 | 75 | +| 3 | 300 | 10 | 61 | 26.0 | 26 | 41 | 50 | \ No newline at end of file diff --git a/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-arm64/2025-11-18-10-30-00/topic-operator/results-table.md b/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-arm64/2025-11-18-10-30-00/topic-operator/results-table.md new file mode 100644 index 00000000000..737a9afb0a2 --- /dev/null +++ b/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-arm64/2025-11-18-10-30-00/topic-operator/results-table.md @@ -0,0 +1,16 @@ +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: MAX QUEUE SIZE: 2147483647 +- IN: MAX BATCH SIZE (ms): 100 +- IN: MAX BATCH LINGER (ms): 100 +- IN: PROCESS TYPE: TOPIC-CONCURRENT + +**Results:** + +| # | IN: NUMBER OF TOPICS | IN: NUMBER OF EVENTS | OUT: Reconciliation interval (ms) | +|---|---|---|---| +| 1 | 2 | 8 | 10229 | +| 2 | 32 | 98 | 11505 | +| 3 | 125 | 375 | 42367 | +| 4 | 250 | 750 | 74596 | \ No newline at end of file diff --git a/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-arm64/2025-11-18-10-30-00/user-operator/results-table.md b/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-arm64/2025-11-18-10-30-00/user-operator/results-table.md new file mode 100644 index 00000000000..ff4139f1c33 --- /dev/null +++ b/.github/tests/inputs/perf-report/multi-arch-both-operators/performance-results-arm64/2025-11-18-10-30-00/user-operator/results-table.md @@ -0,0 +1,30 @@ +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: WORK_QUEUE_SIZE: 1024 +- IN: BATCH_MAXIMUM_BLOCK_SIZE: 100 +- IN: BATCH_MAXIMUM_BLOCK_TIME_MS: 100 + +**Results:** + +| # | IN: NUMBER OF KAFKA USERS | OUT: Reconciliation interval (ms) | +|---|---|---| +| 1 | 10 | 10472 | +| 2 | 100 | 33036 | +| 3 | 200 | 54940 | +| 4 | 500 | 133782 | + +**Use Case:** latencyUseCase + +**Configuration:** +- IN: WORK_QUEUE_SIZE: 2048 +- IN: BATCH_MAXIMUM_BLOCK_SIZE: 100 +- IN: BATCH_MAXIMUM_BLOCK_TIME_MS: 100 + +**Results:** + +| # | IN: NUMBER OF KAFKA USERS | OUT: Min Latency (ms) | OUT: Max Latency (ms) | OUT: Average Latency (ms) | OUT: P50 Latency (ms) | OUT: P95 Latency (ms) | OUT: P99 Latency (ms) | +|---|---|---|---|---|---|---|---| +| 1 | 110 | 12 | 69 | 27.78 | 26 | 39 | 54 | +| 2 | 200 | 11 | 75 | 29.93 | 28 | 48 | 75 | +| 3 | 300 | 10 | 61 | 26.0 | 26 | 41 | 50 | \ No newline at end of file diff --git a/.github/tests/inputs/perf-report/multi-arch-topic-operator/performance-results-amd64/2025-11-18-10-30-00/topic-operator/results-table.md b/.github/tests/inputs/perf-report/multi-arch-topic-operator/performance-results-amd64/2025-11-18-10-30-00/topic-operator/results-table.md new file mode 100644 index 00000000000..737a9afb0a2 --- /dev/null +++ b/.github/tests/inputs/perf-report/multi-arch-topic-operator/performance-results-amd64/2025-11-18-10-30-00/topic-operator/results-table.md @@ -0,0 +1,16 @@ +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: MAX QUEUE SIZE: 2147483647 +- IN: MAX BATCH SIZE (ms): 100 +- IN: MAX BATCH LINGER (ms): 100 +- IN: PROCESS TYPE: TOPIC-CONCURRENT + +**Results:** + +| # | IN: NUMBER OF TOPICS | IN: NUMBER OF EVENTS | OUT: Reconciliation interval (ms) | +|---|---|---|---| +| 1 | 2 | 8 | 10229 | +| 2 | 32 | 98 | 11505 | +| 3 | 125 | 375 | 42367 | +| 4 | 250 | 750 | 74596 | \ No newline at end of file diff --git a/.github/tests/inputs/perf-report/multi-arch-topic-operator/performance-results-arm64/2025-11-18-10-30-00/topic-operator/results-table.md b/.github/tests/inputs/perf-report/multi-arch-topic-operator/performance-results-arm64/2025-11-18-10-30-00/topic-operator/results-table.md new file mode 100644 index 00000000000..737a9afb0a2 --- /dev/null +++ b/.github/tests/inputs/perf-report/multi-arch-topic-operator/performance-results-arm64/2025-11-18-10-30-00/topic-operator/results-table.md @@ -0,0 +1,16 @@ +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: MAX QUEUE SIZE: 2147483647 +- IN: MAX BATCH SIZE (ms): 100 +- IN: MAX BATCH LINGER (ms): 100 +- IN: PROCESS TYPE: TOPIC-CONCURRENT + +**Results:** + +| # | IN: NUMBER OF TOPICS | IN: NUMBER OF EVENTS | OUT: Reconciliation interval (ms) | +|---|---|---|---| +| 1 | 2 | 8 | 10229 | +| 2 | 32 | 98 | 11505 | +| 3 | 125 | 375 | 42367 | +| 4 | 250 | 750 | 74596 | \ No newline at end of file diff --git a/.github/tests/inputs/perf-report/single-arch-both-operators/2025-11-18-10-30-00/topic-operator/results-table.md b/.github/tests/inputs/perf-report/single-arch-both-operators/2025-11-18-10-30-00/topic-operator/results-table.md new file mode 100644 index 00000000000..737a9afb0a2 --- /dev/null +++ b/.github/tests/inputs/perf-report/single-arch-both-operators/2025-11-18-10-30-00/topic-operator/results-table.md @@ -0,0 +1,16 @@ +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: MAX QUEUE SIZE: 2147483647 +- IN: MAX BATCH SIZE (ms): 100 +- IN: MAX BATCH LINGER (ms): 100 +- IN: PROCESS TYPE: TOPIC-CONCURRENT + +**Results:** + +| # | IN: NUMBER OF TOPICS | IN: NUMBER OF EVENTS | OUT: Reconciliation interval (ms) | +|---|---|---|---| +| 1 | 2 | 8 | 10229 | +| 2 | 32 | 98 | 11505 | +| 3 | 125 | 375 | 42367 | +| 4 | 250 | 750 | 74596 | \ No newline at end of file diff --git a/.github/tests/inputs/perf-report/single-arch-both-operators/2025-11-18-10-30-00/user-operator/results-table.md b/.github/tests/inputs/perf-report/single-arch-both-operators/2025-11-18-10-30-00/user-operator/results-table.md new file mode 100644 index 00000000000..ff4139f1c33 --- /dev/null +++ b/.github/tests/inputs/perf-report/single-arch-both-operators/2025-11-18-10-30-00/user-operator/results-table.md @@ -0,0 +1,30 @@ +**Use Case:** scalabilityUseCase + +**Configuration:** +- IN: WORK_QUEUE_SIZE: 1024 +- IN: BATCH_MAXIMUM_BLOCK_SIZE: 100 +- IN: BATCH_MAXIMUM_BLOCK_TIME_MS: 100 + +**Results:** + +| # | IN: NUMBER OF KAFKA USERS | OUT: Reconciliation interval (ms) | +|---|---|---| +| 1 | 10 | 10472 | +| 2 | 100 | 33036 | +| 3 | 200 | 54940 | +| 4 | 500 | 133782 | + +**Use Case:** latencyUseCase + +**Configuration:** +- IN: WORK_QUEUE_SIZE: 2048 +- IN: BATCH_MAXIMUM_BLOCK_SIZE: 100 +- IN: BATCH_MAXIMUM_BLOCK_TIME_MS: 100 + +**Results:** + +| # | IN: NUMBER OF KAFKA USERS | OUT: Min Latency (ms) | OUT: Max Latency (ms) | OUT: Average Latency (ms) | OUT: P50 Latency (ms) | OUT: P95 Latency (ms) | OUT: P99 Latency (ms) | +|---|---|---|---|---|---|---|---| +| 1 | 110 | 12 | 69 | 27.78 | 26 | 39 | 54 | +| 2 | 200 | 11 | 75 | 29.93 | 28 | 48 | 75 | +| 3 | 300 | 10 | 61 | 26.0 | 26 | 41 | 50 | \ No newline at end of file diff --git a/.github/tests/scenarios/perf-report.yaml b/.github/tests/scenarios/perf-report.yaml new file mode 100644 index 00000000000..ceab01527f6 --- /dev/null +++ b/.github/tests/scenarios/perf-report.yaml @@ -0,0 +1,36 @@ +scenarios: + - id: single-arch-both-operators + description: "Generate performance report for single architecture with both operators" + event: workflow_dispatch + inputs: + performance_dir: ".github/tests/inputs/perf-report/single-arch-both-operators" + expectations: + expected_output_file: ".github/tests/expected/perf-report/single-arch-both-operators-expected.md" + has_results: "true" + + - id: multi-arch-topic-operator + description: "Generate performance report for multiple architectures with topic operator" + event: workflow_dispatch + inputs: + performance_dir: ".github/tests/inputs/perf-report/multi-arch-topic-operator" + expectations: + expected_output_file: ".github/tests/expected/perf-report/multi-arch-topic-operator-expected.md" + has_results: "true" + + - id: multi-arch-both-operators + description: "Generate performance report for multiple architectures with both operators" + event: workflow_dispatch + inputs: + performance_dir: ".github/tests/inputs/perf-report/multi-arch-both-operators" + expectations: + expected_output_file: ".github/tests/expected/perf-report/multi-arch-both-operators-expected.md" + has_results: "true" + + - id: no-results + description: "Handle case when no performance results are found" + event: workflow_dispatch + inputs: + performance_dir: ".github/tests/inputs/perf-report/no-results" + expectations: + expected_output_file: ".github/tests/expected/perf-report/no-results-expected.md" + has_results: "false" diff --git a/.github/tests/workflows/perf-report-template.yaml b/.github/tests/workflows/perf-report-template.yaml new file mode 100644 index 00000000000..53f1ca42fa3 --- /dev/null +++ b/.github/tests/workflows/perf-report-template.yaml @@ -0,0 +1,74 @@ +name: Perf-report template + +on: + workflow_dispatch: + inputs: + performance_dir: + description: "Path to performance results directory" + required: false + default: ".github/tests/inputs/perf-report/single-arch-both-operators" + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + + # Test the performance report generation step only (mocking download/upload steps) + - id: perf-report + uses: actions/github-script@v7 + env: + PERF_DIR: ${{ github.event.inputs.performance_dir }} + with: + github-token: ${{ github.token }} + script: | + const { generatePerformanceReport } = require('./.github/actions/systemtests/run-perf-report/generate-report.js'); + const perfDir = process.env.PERF_DIR || 'systemtest/target/performance'; + const result = generatePerformanceReport(perfDir, core); + core.setOutput('has_results', result.has_results); + core.setOutput('summary', result.summary); + core.setOutput('timestamp', result.timestamp); + + # Validate the generated report + - name: Validate performance report + shell: bash + env: + EXPECTED_OUTPUT_FILE: ${{ env.EXPECTED_OUTPUT_FILE }} + EXPECTED_HAS_RESULTS: ${{ env.EXPECTED_HAS_RESULTS }} + run: | + SUMMARY='${{ steps.perf-report.outputs.summary }}' + HAS_RESULTS='${{ steps.perf-report.outputs.has_results }}' + + echo "::group::Generated Summary" + echo "$SUMMARY" + echo "::endgroup::" + + echo "Has Results: $HAS_RESULTS" + + # Validate has_results flag if expectation is set + if [[ -n "$EXPECTED_HAS_RESULTS" && "$HAS_RESULTS" != "$EXPECTED_HAS_RESULTS" ]]; then + echo "❌ has_results mismatch: expected '$EXPECTED_HAS_RESULTS', got '$HAS_RESULTS'" + exit 1 + fi + + # Compare with expected output file if provided + if [[ -n "$EXPECTED_OUTPUT_FILE" ]]; then + if [[ ! -f "$EXPECTED_OUTPUT_FILE" ]]; then + echo "❌ Expected output file not found: $EXPECTED_OUTPUT_FILE" + exit 1 + fi + + # Remove timestamp line (it varies between runs) + ACTUAL_NO_TIMESTAMP=$(echo "$SUMMARY" | grep -v "^\*\*Test Run:\*\*") + EXPECTED_NO_TIMESTAMP=$(grep -v "^\*\*Test Run:\*\*" "$EXPECTED_OUTPUT_FILE") + + if ! diff -u <(echo "$EXPECTED_NO_TIMESTAMP") <(echo "$ACTUAL_NO_TIMESTAMP"); then + echo "❌ Output does not match expected file: $EXPECTED_OUTPUT_FILE" + exit 1 + fi + elif [[ "$HAS_RESULTS" == "true" && -z "$SUMMARY" ]]; then + echo "❌ Summary is empty but has_results is true" + exit 1 + fi + + echo "✅ Validation passed" diff --git a/.github/workflows/actions-tests.yml b/.github/workflows/actions-tests.yml index e16c6a4ba7e..9fd0821f6ee 100644 --- a/.github/workflows/actions-tests.yml +++ b/.github/workflows/actions-tests.yml @@ -377,3 +377,96 @@ jobs: echo "───────────────────────────────────────" echo "🎉 All $TOTAL validate-matrix scenarios passed!" echo " Matrix validation logic validated ✅" + + test-perf-report: + needs: + - lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + + # Install dependencies + - name: Install act + run: | + # Install act for workflow testing + curl -s https://raw.githubusercontent.com/nektos/act/master/install.sh | sudo bash + sudo install -m 0755 ./bin/act /usr/local/bin/act + + - name: Install yq + uses: ./.github/actions/dependencies/install-yq + + # Test all perf-report scenarios + - name: Test perf-report action scenarios + run: | + set -e + + SCENARIOS_FILE=".github/tests/scenarios/perf-report.yaml" + WORKFLOW=".github/tests/workflows/perf-report-template.yaml" + + echo "📊 Running perf-report action tests..." + echo "📁 Loading scenarios from: $SCENARIOS_FILE" + + # Get total number of scenarios + TOTAL=$(yq eval '.scenarios | length' "$SCENARIOS_FILE") + echo "📊 Found $TOTAL test scenarios" + echo + + # Initialize overall result tracker + overall_result=true + + # Loop through each scenario + for i in $(seq 0 $((TOTAL - 1))); do + # Extract scenario details using yq + id=$(yq eval ".scenarios[$i].id" "$SCENARIOS_FILE") + description=$(yq eval ".scenarios[$i].description" "$SCENARIOS_FILE") + event=$(yq eval ".scenarios[$i].event" "$SCENARIOS_FILE") + perf_dir=$(yq eval ".scenarios[$i].inputs.performance_dir" "$SCENARIOS_FILE") + expected_file=$(yq eval ".scenarios[$i].expectations.expected_output_file" "$SCENARIOS_FILE") + expected_has_results=$(yq eval ".scenarios[$i].expectations.has_results" "$SCENARIOS_FILE") + + echo "───────────────────────────────────────" + echo "🔍 Scenario $((i + 1))/$TOTAL: $id" + echo " Description: $description" + echo " Event: $event" + echo " Performance dir: $perf_dir" + + # Create a temporary event file with the performance_dir input + EVENT_FILE=$(mktemp) + cat > "$EVENT_FILE" < [!IMPORTANT] +> When modifying performance attributes in test code (e.g., changing the `performanceAttributes` map in `TopicOperatorPerformance.java`, TopicOperatorScalabilityPerformance` and other performance test suites), you must also update the corresponding GitHub Actions (GHA) tests. + +See the [Performance Report Tests section in .github/docs/README.md](../.github/docs/README.md#performance-report-tests) for details on maintaining test input directories and expected outputs that match the constants defined in `PerformanceConstants.java`. + #### 2. Scalability Tests Scalability tests assess how well Strimzi scales as the workload increases and help identify potential bottlenecks in the system. diff --git a/development-docs/systemtests/io.strimzi.systemtest.performance.UserOperatorScalabilityPerformance.md b/development-docs/systemtests/io.strimzi.systemtest.performance.UserOperatorScalabilityPerformance.md index f68f47dc4fd..060b230e977 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.performance.UserOperatorScalabilityPerformance.md +++ b/development-docs/systemtests/io.strimzi.systemtest.performance.UserOperatorScalabilityPerformance.md @@ -22,8 +22,8 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy Kafka cluster with User Operator configured with more resources to handle load and also non-default `STRIMZI_WORK_QUEUE_SIZE` set to 2048. | Kafka cluster with User Operator is deployed and ready. | -| 2. | For each configured load level (1000 existing users), create N KafkaUsers to establish the load. | N KafkaUsers are created and ready, establishing baseline load on the User Operator. | +| 1. | Deploy Kafka cluster with User Operator configured with more resources to handle load and also non-default `STRIMZI_WORK_QUEUE_SIZE` set to 4096. | Kafka cluster with User Operator is deployed and ready. | +| 2. | For each configured load level (1000, 2000, 3000 existing users), create N KafkaUsers to establish the load. | N KafkaUsers are created and ready, establishing baseline load on the User Operator. | | 3. | Perform 100 individual user modifications sequentially, measuring the latency of each modification. | Each modification latency is recorded independently. | | 4. | Calculate latency statistics: min, max, average, P50, P95, and P99 percentiles from the 100 measurements. | Statistical analysis shows how single-user modification latency degrades as system load (number of existing users) increases. | | 5. | Clean up all users and persist latency metrics to user-operator report directory. | Namespace is cleaned, latency data is saved showing how responsiveness changes at different load levels. | diff --git a/systemtest/src/main/java/io/strimzi/systemtest/listeners/ExecutionListener.java b/systemtest/src/main/java/io/strimzi/systemtest/listeners/ExecutionListener.java index deb0582cf62..19d4ff2237b 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/listeners/ExecutionListener.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/listeners/ExecutionListener.java @@ -6,6 +6,7 @@ import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.strimzi.systemtest.TestTags; +import io.strimzi.systemtest.performance.TimeHolder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.extension.ExtensionContext; @@ -32,6 +33,7 @@ public void testPlanExecutionStarted(TestPlan plan) { LOGGER.info("======================================================================="); testPlan = plan; printSelectedTestClasses(testPlan); + TimeHolder.resetTimestamp(); } public void testPlanExecutionFinished(TestPlan testPlan) { diff --git a/systemtest/src/main/java/io/strimzi/systemtest/performance/TimeHolder.java b/systemtest/src/main/java/io/strimzi/systemtest/performance/TimeHolder.java new file mode 100644 index 00000000000..8b629d52435 --- /dev/null +++ b/systemtest/src/main/java/io/strimzi/systemtest/performance/TimeHolder.java @@ -0,0 +1,30 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.systemtest.performance; + +import java.time.LocalDateTime; +import java.time.temporal.TemporalAccessor; + +/** + * Shared timestamp for all performance tests to ensure unified logging across systemtest/target/performance/* + * The timestamp is reset at the start of each test run (including maven reruns) so each run gets a unique timestamp, + * but all tests within that run share the same timestamp. + */ +public final class TimeHolder { + private TimeHolder() {} + + private static TemporalAccessor actualTime = LocalDateTime.now(); + + public static TemporalAccessor getActualTime() { + return actualTime; + } + + /** + * Resets the timestamp to the current time. Called by the test execution listener at the start of each test run. + */ + public static void resetTimestamp() { + actualTime = LocalDateTime.now(); + } +} \ No newline at end of file diff --git a/systemtest/src/main/java/io/strimzi/systemtest/performance/gather/schedulers/BaseMetricsCollectionScheduler.java b/systemtest/src/main/java/io/strimzi/systemtest/performance/gather/schedulers/BaseMetricsCollectionScheduler.java index f70ea4fecd9..56beb1d0d85 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/performance/gather/schedulers/BaseMetricsCollectionScheduler.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/performance/gather/schedulers/BaseMetricsCollectionScheduler.java @@ -109,6 +109,14 @@ public void executeMetricsCollection() { * @param unit the time unit of the initial delay and interval. */ public void startCollecting(long initialDelay, long interval, TimeUnit unit) { + // Recreate scheduler if it has been shut down + if (this.scheduler.isShutdown()) { + LOGGER.debug("Scheduler was shut down, creating a new one."); + this.scheduler = Executors.newSingleThreadScheduledExecutor(); + // Clear metrics from previous collection cycle when restarting + this.metricsStore.clear(); + } + // Capture the context in the thread where startCollecting is called final ExtensionContext currentContext = KubeResourceManager.get().getTestContext(); diff --git a/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/BasePerformanceMetricsParser.java b/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/BasePerformanceMetricsParser.java index d4f1a05b7ac..b973db1580d 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/BasePerformanceMetricsParser.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/BasePerformanceMetricsParser.java @@ -23,7 +23,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; -import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -44,7 +44,7 @@ public abstract class BasePerformanceMetricsParser { public BasePerformanceMetricsParser() { this.parentPath = determineBasePathBasedOnEnvironment(); - this.useCaseExperiments = new HashMap<>(); + this.useCaseExperiments = new LinkedHashMap<>(); } /** @@ -79,6 +79,16 @@ public BasePerformanceMetricsParser() { */ protected abstract String[] getHeadersForUseCase(ExperimentMetrics experimentMetrics); + /** + * Abstract method to get the metric key used for sorting experiments. + * This allows different parsers to specify which metric should be used to sort + * their experiments in ascending order. + * + * @return The metric key to use for sorting (e.g., "IN: NUMBER OF TOPICS"). + * Return null if no sorting is desired. + */ + protected abstract String getSortKey(); + /** * Checks if the current execution context is a test environment. * @@ -140,6 +150,9 @@ private Path findLatestDirectory() throws IOException { protected void parseComponentMetrics(File componentDir) throws IOException { File[] useCaseDirs = componentDir.listFiles(File::isDirectory); if (useCaseDirs != null) { + // Sort use case directories alphabetically + Arrays.sort(useCaseDirs, Comparator.comparing(File::getName)); + for (File useCaseDir : useCaseDirs) { String useCaseName = useCaseDir.getName(); List experimentsList = new ArrayList<>(); @@ -256,9 +269,32 @@ private void processFile(File file, ExperimentMetrics experimentMetrics) throws } } + /** + * Sorts a list of experiments based on the specified sort key. + * + * @param experimentsList The list of experiments to sort. + * @param sortKey The metric key to use for sorting. + */ + private void sortExperiments(List experimentsList, String sortKey) { + if (sortKey != null && !experimentsList.isEmpty()) { + experimentsList.sort(Comparator.comparingDouble(experiment -> { + String value = experiment.getTestMetrics().get(sortKey); + if (value != null) { + try { + return Double.parseDouble(value); + } catch (NumberFormatException e) { + LOGGER.warn("Failed to parse sort key value '{}' for key '{}', using 0 as default", value, sortKey); + return 0.0; + } + } + return 0.0; + })); + } + } + /** * Construct the values of parsed experiments in a formatted table. - * This method organizes metrics into rows and columns based on headers and formats the output. + * This method organizes metrics into a clean Markdown table format, showing only varying parameters. * @return A string representation of the formatted table. */ protected String buildResultTable() { @@ -266,39 +302,89 @@ protected String buildResultTable() { // Populate data for each experiment this.useCaseExperiments.forEach((useCaseName, experimentsList) -> { - output.append("Use Case: ") - .append(useCaseName) - .append("\n"); + if (experimentsList.isEmpty()) { + return; + } - final String[] headers = getHeadersForUseCase(experimentsList.get(0)); - final List allRows = new ArrayList<>(); + // Sort experiments by the specified sort key if provided + sortExperiments(experimentsList, getSortKey()); - allRows.add(headers); + output.append("**Use Case:** ").append(useCaseName).append("\n\n"); - // Determine max width for each column - final int[] columnWidths = new int[headers.length]; - for (final String[] row : allRows) { - for (int i = 0; i < row.length; i++) { - columnWidths[i] = Math.max(columnWidths[i], row[i].length()); - } - } + final String[] headers = getHeadersForUseCase(experimentsList.get(0)); + final List allRowsData = new ArrayList<>(); + // Collect all row data int experimentCounter = 1; - output.append(generateColumn(columnWidths)); - for (final ExperimentMetrics experimentMetrics : experimentsList) { - // Assume methods to extract and format metrics correctly are implemented final String[] rowData = extractAndFormatRowData(experimentCounter, experimentMetrics); - allRows.add(rowData); + allRowsData.add(rowData); experimentCounter++; } - allRows.forEach(row -> output.append(generateRow(row, columnWidths))); + // Find which columns have varying values vs fixed values + final List varyingColumns = new ArrayList<>(); + final List fixedColumns = new ArrayList<>(); + + for (int col = 1; col < headers.length; col++) { // Skip "Experiment" column + String firstValue = allRowsData.get(0)[col]; + boolean isVarying = false; + + for (String[] rowData : allRowsData) { + if (!rowData[col].equals(firstValue)) { + isVarying = true; + break; + } + } + + if (isVarying || headers[col].startsWith("OUT:")) { + varyingColumns.add(col); + } else { + fixedColumns.add(col); + } + } + + // Display fixed parameters first (if any) + if (!fixedColumns.isEmpty()) { + output.append("**Configuration:**\n"); + for (int col : fixedColumns) { + String label = headers[col]; + String value = allRowsData.get(0)[col]; + output.append("- ").append(label).append(": ").append(value).append("\n"); + } + output.append("\n"); + } - output.append(generateColumn(columnWidths)); + // Build markdown table for varying parameters + output.append("**Results:**\n\n"); + + // Table header + output.append("| # |"); + for (int col : varyingColumns) { + String label = headers[col]; + output.append(" ").append(label).append(" |"); + } + output.append("\n"); + + // Table separator + output.append("|---|"); + for (int ignored : varyingColumns) { + output.append("---|"); + } + output.append("\n"); + + // Table rows + for (int row = 0; row < allRowsData.size(); row++) { + String[] rowData = allRowsData.get(row); + output.append("| ").append(row + 1).append(" |"); + for (int col : varyingColumns) { + output.append(" ").append(rowData[col]).append(" |"); + } + output.append("\n"); + } + output.append("\n"); }); - // return result table return output.toString(); } @@ -384,7 +470,7 @@ protected double getMaxValueFromList(List values) { /** * Writes formatted experiment data to a specific file determined by the parser type. * This method finds the directory named after the {@link ParserType} provided and writes the data - * into a text file named according to the constant {@link PerformanceConstants#RESULTS_TABLE}. + * into a markdown file named according to the constant {@link PerformanceConstants#RESULTS_TABLE}. * This operation is performed in the latest directory returned by {@code findLatestDirectory()}. * * @param parserType The type of parser which determines the directory in which to write the file. @@ -403,7 +489,7 @@ protected void writeToFile(ParserType parserType) throws IOException { .orElseThrow(() -> new IOException("No directory found for parser type: " + parserType.getParserName())); final Path typeSpecificDir = componentDir.toPath(); - final Path file = typeSpecificDir.resolve(PerformanceConstants.RESULTS_TABLE + ".txt"); + final Path file = typeSpecificDir.resolve(PerformanceConstants.RESULTS_TABLE + ".md"); try (final BufferedWriter writer = Files.newBufferedWriter(file, StandardCharsets.UTF_8)) { writer.write(formattedData); diff --git a/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/TopicOperatorMetricsParser.java b/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/TopicOperatorMetricsParser.java index 357c697acd3..546994a798f 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/TopicOperatorMetricsParser.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/TopicOperatorMetricsParser.java @@ -151,4 +151,9 @@ public void parseMetrics() throws IOException { protected void showMetrics() { System.out.println(this.buildResultTable()); } + + @Override + protected String getSortKey() { + return PerformanceConstants.TOPIC_OPERATOR_IN_NUMBER_OF_TOPICS; + } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/UserOperatorMetricsParser.java b/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/UserOperatorMetricsParser.java index e2a317bb92d..ddf21d08550 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/UserOperatorMetricsParser.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/performance/report/parser/UserOperatorMetricsParser.java @@ -143,4 +143,9 @@ public void parseMetrics() throws IOException { protected void showMetrics() { System.out.println(this.buildResultTable()); } + + @Override + protected String getSortKey() { + return PerformanceConstants.USER_OPERATOR_IN_NUMBER_OF_KAFKA_USERS; + } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/performance/TopicOperatorPerformance.java b/systemtest/src/test/java/io/strimzi/systemtest/performance/TopicOperatorPerformance.java index d8f546a97e9..4cf26ec75a5 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/performance/TopicOperatorPerformance.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/performance/TopicOperatorPerformance.java @@ -38,8 +38,6 @@ import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; -import java.time.LocalDateTime; -import java.time.temporal.TemporalAccessor; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -56,7 +54,6 @@ public class TopicOperatorPerformance extends AbstractST { private TopicOperatorMetricsCollectionScheduler topicOperatorMetricsGatherer; private TestLogCollector logCollector; - protected static final TemporalAccessor ACTUAL_TIME = LocalDateTime.now(); protected static final String REPORT_DIRECTORY = "topic-operator"; protected TopicOperatorPerformanceReporter topicOperatorPerformanceReporter = new TopicOperatorPerformanceReporter(); @@ -213,7 +210,7 @@ void testCapacity(String maxBatchSize, String maxBatchLingerMs) throws IOExcepti performanceAttributes.put(PerformanceConstants.METRICS_HISTORY, this.topicOperatorMetricsGatherer.getMetricsStore()); // Map of metrics history - this.topicOperatorPerformanceReporter.logPerformanceData(this.testStorage, performanceAttributes, REPORT_DIRECTORY + "/" + PerformanceConstants.GENERAL_CAPACITY_USE_CASE, ACTUAL_TIME, Environment.PERFORMANCE_DIR); + this.topicOperatorPerformanceReporter.logPerformanceData(this.testStorage, performanceAttributes, REPORT_DIRECTORY + "/" + PerformanceConstants.GENERAL_CAPACITY_USE_CASE, TimeHolder.getActualTime(), Environment.PERFORMANCE_DIR); } } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/performance/TopicOperatorScalabilityPerformance.java b/systemtest/src/test/java/io/strimzi/systemtest/performance/TopicOperatorScalabilityPerformance.java index f643f3f92a2..751f8efca67 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/performance/TopicOperatorScalabilityPerformance.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/performance/TopicOperatorScalabilityPerformance.java @@ -28,8 +28,6 @@ import org.junit.jupiter.api.Tag; import java.io.IOException; -import java.time.LocalDateTime; -import java.time.temporal.TemporalAccessor; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -41,7 +39,6 @@ @Tag(SCALABILITY) public class TopicOperatorScalabilityPerformance extends AbstractST { - protected static final TemporalAccessor ACTUAL_TIME = LocalDateTime.now(); protected static final String REPORT_DIRECTORY = "topic-operator"; protected TopicOperatorPerformanceReporter topicOperatorPerformanceReporter = new TopicOperatorPerformanceReporter(); @@ -84,7 +81,7 @@ void testScalability() { performanceAttributes.put(PerformanceConstants.OPERATOR_OUT_RECONCILIATION_INTERVAL, reconciliationTimeMs); try { - this.topicOperatorPerformanceReporter.logPerformanceData(this.suiteTestStorage, performanceAttributes, REPORT_DIRECTORY + "/" + PerformanceConstants.GENERAL_SCALABILITY_USE_CASE, ACTUAL_TIME, Environment.PERFORMANCE_DIR); + this.topicOperatorPerformanceReporter.logPerformanceData(this.suiteTestStorage, performanceAttributes, REPORT_DIRECTORY + "/" + PerformanceConstants.GENERAL_SCALABILITY_USE_CASE, TimeHolder.getActualTime(), Environment.PERFORMANCE_DIR); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/performance/UserOperatorPerformance.java b/systemtest/src/test/java/io/strimzi/systemtest/performance/UserOperatorPerformance.java index 45d799ed475..1bb022151bf 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/performance/UserOperatorPerformance.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/performance/UserOperatorPerformance.java @@ -37,8 +37,6 @@ import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; -import java.time.LocalDateTime; -import java.time.temporal.TemporalAccessor; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -49,7 +47,6 @@ public class UserOperatorPerformance extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(UserOperatorPerformance.class); - private static final TemporalAccessor ACTUAL_TIME = LocalDateTime.now(); private static final String REPORT_DIRECTORY = "user-operator"; @@ -228,7 +225,7 @@ void testCapacity(String controllerThreadPoolSize, String cacheRefreshIntervalMs performanceAttributes.put(PerformanceConstants.METRICS_HISTORY, this.userOperatorMetricsGatherer.getMetricsStore()); // Map of metrics history - this.userOperatorPerformanceReporter.logPerformanceData(this.testStorage, performanceAttributes, REPORT_DIRECTORY + "/" + PerformanceConstants.GENERAL_CAPACITY_USE_CASE, ACTUAL_TIME, Environment.PERFORMANCE_DIR); + this.userOperatorPerformanceReporter.logPerformanceData(this.testStorage, performanceAttributes, REPORT_DIRECTORY + "/" + PerformanceConstants.GENERAL_CAPACITY_USE_CASE, TimeHolder.getActualTime(), Environment.PERFORMANCE_DIR); } } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/performance/UserOperatorScalabilityPerformance.java b/systemtest/src/test/java/io/strimzi/systemtest/performance/UserOperatorScalabilityPerformance.java index d87f7efcd85..d1b663ce24b 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/performance/UserOperatorScalabilityPerformance.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/performance/UserOperatorScalabilityPerformance.java @@ -15,8 +15,12 @@ import io.strimzi.api.kafka.model.user.KafkaUser; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.Environment; +import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.docs.TestDocsLabels; +import io.strimzi.systemtest.metrics.UserOperatorMetricsComponent; +import io.strimzi.systemtest.performance.gather.collectors.UserOperatorMetricsCollector; +import io.strimzi.systemtest.performance.gather.schedulers.UserOperatorMetricsCollectionScheduler; import io.strimzi.systemtest.performance.report.UserOperatorPerformanceReporter; import io.strimzi.systemtest.performance.report.parser.UserOperatorMetricsParser; import io.strimzi.systemtest.performance.utils.UserOperatorPerformanceUtils; @@ -26,6 +30,7 @@ import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; import io.strimzi.systemtest.templates.crd.KafkaTemplates; +import io.strimzi.systemtest.templates.specific.ScraperTemplates; import io.strimzi.systemtest.utils.kafkaUtils.KafkaUserUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -34,8 +39,6 @@ import org.junit.jupiter.api.Tag; import java.io.IOException; -import java.time.LocalDateTime; -import java.time.temporal.TemporalAccessor; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -56,7 +59,6 @@ @Tag(SCALABILITY) public class UserOperatorScalabilityPerformance extends AbstractST { - protected static final TemporalAccessor ACTUAL_TIME = LocalDateTime.now(); protected static final String REPORT_DIRECTORY = "user-operator"; protected UserOperatorPerformanceReporter userOperatorPerformanceReporter = new UserOperatorPerformanceReporter(); @@ -151,7 +153,7 @@ void testScalability() { performanceAttributes.put(PerformanceConstants.OPERATOR_OUT_RECONCILIATION_INTERVAL, reconciliationTimeMs); try { - this.userOperatorPerformanceReporter.logPerformanceData(testStorage, performanceAttributes, REPORT_DIRECTORY + "/" + PerformanceConstants.GENERAL_SCALABILITY_USE_CASE, ACTUAL_TIME, Environment.PERFORMANCE_DIR); + this.userOperatorPerformanceReporter.logPerformanceData(testStorage, performanceAttributes, REPORT_DIRECTORY + "/" + PerformanceConstants.GENERAL_SCALABILITY_USE_CASE, TimeHolder.getActualTime(), Environment.PERFORMANCE_DIR); } catch (IOException e) { throw new RuntimeException(e); } @@ -162,8 +164,8 @@ void testScalability() { @TestDoc( description = @Desc("This test measures user modification latency statistics under different load levels by performing multiple user modifications to understand how response time scales with system load."), steps = { - @Step(value = "Deploy Kafka cluster with User Operator configured with more resources to handle load and also non-default `STRIMZI_WORK_QUEUE_SIZE` set to 2048.", expected = "Kafka cluster with User Operator is deployed and ready."), - @Step(value = "For each configured load level (1000 existing users), create N KafkaUsers to establish the load.", expected = "N KafkaUsers are created and ready, establishing baseline load on the User Operator."), + @Step(value = "Deploy Kafka cluster with User Operator configured with more resources to handle load and also non-default `STRIMZI_WORK_QUEUE_SIZE` set to 4096.", expected = "Kafka cluster with User Operator is deployed and ready."), + @Step(value = "For each configured load level (1000, 2000, 3000 existing users), create N KafkaUsers to establish the load.", expected = "N KafkaUsers are created and ready, establishing baseline load on the User Operator."), @Step(value = "Perform 100 individual user modifications sequentially, measuring the latency of each modification.", expected = "Each modification latency is recorded independently."), @Step(value = "Calculate latency statistics: min, max, average, P50, P95, and P99 percentiles from the 100 measurements.", expected = "Statistical analysis shows how single-user modification latency degrades as system load (number of existing users) increases."), @Step(value = "Clean up all users and persist latency metrics to user-operator report directory.", expected = "Namespace is cleaned, latency data is saved showing how responsiveness changes at different load levels.") @@ -176,14 +178,13 @@ void testScalability() { @Tag(SCALABILITY) void testLatencyUnderLoad() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); - // TODO: after we switch to GHA we are limited to just 1k users (after switch we can add more 2k users ... etc.) - final List loadLevels = List.of(1000); + final List loadLevels = List.of(1000, 2000, 3000); final int numberOfModifications = 100; // default configuration of UO final int maxBatchSize = 100; final int maxBatchLingerMs = 100; // but maxWorkQueueSize must be a bit higher than default because we Queue will be `FULL` - final int maxWorkQueueSize = 2048; + final int maxWorkQueueSize = 4096; KubeResourceManager.get().createResourceWithWait( KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), testStorage.getBrokerPoolName(), testStorage.getClusterName(), 3).build(), @@ -191,7 +192,8 @@ void testLatencyUnderLoad() { ); KubeResourceManager.get().createResourceWithWait( - KafkaTemplates.kafka(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) + KafkaTemplates.kafkaMetricsConfigMap(testStorage.getNamespaceName(), testStorage.getClusterName()), + KafkaTemplates.kafkaWithMetrics(testStorage.getNamespaceName(), testStorage.getClusterName(), 3) .editSpec() .editKafka() .withNewKafkaAuthorizationSimple() @@ -200,11 +202,14 @@ void testLatencyUnderLoad() { .editEntityOperator() .editUserOperator() .withReconciliationIntervalMs(10_000L) + // CPU set to 2 cores: ARM64 GHA runners are slower than AMD64, causing timeouts + // with lower limits (750m, 1 CPU) when creating 1k+ users. 2 CPUs works reliably + // on both architectures (~30% slower than unlimited, but ensures consistent results). .withResources(new ResourceRequirementsBuilder() - .addToLimits("memory", new Quantity("768Mi")) - .addToLimits("cpu", new Quantity("750m")) - .addToRequests("memory", new Quantity("768Mi")) - .addToRequests("cpu", new Quantity("750m")) + .addToLimits("memory", new Quantity("1Gi")) + .addToLimits("cpu", new Quantity("2")) + .addToRequests("memory", new Quantity("1Gi")) + .addToRequests("cpu", new Quantity("2")) .build()) .endUserOperator() .editOrNewTemplate() @@ -225,11 +230,23 @@ void testLatencyUnderLoad() { .endTemplate() .endEntityOperator() .endSpec() - .build() + .build(), + ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build() ); + testStorage.addToTestStorage(TestConstants.SCRAPER_POD_KEY, + KubeResourceManager.get().kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), testStorage.getScraperName()).get(0).getMetadata().getName()); + loadLevels.forEach(numberOfExistingUsers -> { LatencyMetrics latencyMetrics = null; + final UserOperatorMetricsCollector userOperatorCollector = new UserOperatorMetricsCollector.Builder() + .withScraperPodName(testStorage.getScraperPodName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withComponent(UserOperatorMetricsComponent.create(testStorage.getNamespaceName(), testStorage.getClusterName())) + .build(); + + final UserOperatorMetricsCollectionScheduler userOperatorMetricsGatherer = UserOperatorMetricsCollectionScheduler.getInstance(userOperatorCollector, "strimzi.io/cluster=" + testStorage.getClusterName()); + userOperatorMetricsGatherer.startCollecting(); try { LOGGER.info("Measuring single-user modification latency with {} existing users in the system", numberOfExistingUsers); latencyMetrics = UserOperatorPerformanceUtils.measureLatencyUnderLoad(testStorage, numberOfExistingUsers, numberOfModifications); @@ -256,8 +273,12 @@ void testLatencyUnderLoad() { performanceAttributes.put(PerformanceConstants.OPERATOR_OUT_P95_LATENCY, latencyMetrics.p95()); performanceAttributes.put(PerformanceConstants.OPERATOR_OUT_P99_LATENCY, latencyMetrics.p99()); + userOperatorMetricsGatherer.stopCollecting(); + + performanceAttributes.put(PerformanceConstants.METRICS_HISTORY, userOperatorMetricsGatherer.getMetricsStore()); // Map of metrics history + try { - this.userOperatorPerformanceReporter.logPerformanceData(testStorage, performanceAttributes, REPORT_DIRECTORY + "/" + PerformanceConstants.GENERAL_LATENCY_USE_CASE, ACTUAL_TIME, Environment.PERFORMANCE_DIR); + this.userOperatorPerformanceReporter.logPerformanceData(testStorage, performanceAttributes, REPORT_DIRECTORY + "/" + PerformanceConstants.GENERAL_LATENCY_USE_CASE, TimeHolder.getActualTime(), Environment.PERFORMANCE_DIR); } catch (IOException e) { throw new RuntimeException(e); }