diff --git a/README.md b/README.md index da0b05d..027dcb1 100644 --- a/README.md +++ b/README.md @@ -140,11 +140,8 @@ Current checks focus on patterns that often show up in unreviewed generated code - [stringified unknown errors](src/rules/stringified-unknown-errors/README.md) - [async wrapper / `return await` noise](src/rules/async-noise/README.md) - [pass-through wrappers](src/rules/pass-through-wrappers/README.md) -- [barrel density](src/rules/barrel-density/README.md) - [duplicate helper/function signatures across source files](src/rules/duplicate-function-signatures/README.md) -- [over-fragmentation](src/rules/over-fragmentation/README.md) - [directory fan-out hotspots](src/rules/directory-fanout-hotspot/README.md) -- [placeholder comments](src/rules/placeholder-comments/README.md) - [duplicated test mock/setup patterns](src/rules/duplicate-mock-setup/README.md) `scan` reports raw + normalized scores, hotspot tables, and grouped findings. Use `--json` when you want the full evidence payload. diff --git a/src/default-registry.ts b/src/default-registry.ts index de0f43a..d3e6586 100644 --- a/src/default-registry.ts +++ b/src/default-registry.ts @@ -12,7 +12,6 @@ import { javascriptLikeLanguage } from "./languages/javascript-like"; import { jsonReporter } from "./reporters/json"; import { lintReporter } from "./reporters/lint"; import { textReporter } from "./reporters/text"; -import { placeholderCommentsRule } from "./rules/placeholder-comments"; import { asyncNoiseRule } from "./rules/async-noise"; import { emptyCatchRule } from "./rules/empty-catch"; import { errorObscuringRule } from "./rules/error-obscuring"; @@ -21,10 +20,8 @@ import { promiseDefaultFallbacksRule } from "./rules/promise-default-fallbacks"; import { genericStatusEnvelopesRule } from "./rules/generic-status-envelopes"; import { genericRecordCastsRule } from "./rules/generic-record-casts"; import { stringifiedUnknownErrorsRule } from "./rules/stringified-unknown-errors"; -import { barrelDensityRule } from "./rules/barrel-density"; import { directoryFanoutHotspotRule } from "./rules/directory-fanout-hotspot"; import { duplicateFunctionSignaturesRule } from "./rules/duplicate-function-signatures"; -import { overFragmentationRule } from "./rules/over-fragmentation"; import { passThroughWrappersRule } from "./rules/pass-through-wrappers"; import { duplicateMockSetupRule } from "./rules/duplicate-mock-setup"; @@ -42,7 +39,6 @@ export function createDefaultRegistry(): Registry { registry.registerFactProvider(directoryMetricsFactProvider); registry.registerFactProvider(testDuplicationFactProvider); - registry.registerRule(placeholderCommentsRule); registry.registerRule(asyncNoiseRule); registry.registerRule(errorSwallowingRule); registry.registerRule(errorObscuringRule); @@ -51,10 +47,8 @@ export function createDefaultRegistry(): Registry { registry.registerRule(genericStatusEnvelopesRule); registry.registerRule(genericRecordCastsRule); registry.registerRule(stringifiedUnknownErrorsRule); - registry.registerRule(barrelDensityRule); registry.registerRule(passThroughWrappersRule); registry.registerRule(duplicateFunctionSignaturesRule); - registry.registerRule(overFragmentationRule); registry.registerRule(directoryFanoutHotspotRule); registry.registerRule(duplicateMockSetupRule); diff --git a/src/rules/async-noise/README.md b/src/rules/async-noise/README.md index 133a477..cdb4a5e 100644 --- a/src/rules/async-noise/README.md +++ b/src/rules/async-noise/README.md @@ -41,6 +41,27 @@ async function getJson(url: string) { } ``` +## How to fix / do this better + +Prefer one of these instead: + +- remove `async` entirely when the function is just forwarding a promise +- remove redundant `await` when you are immediately returning the awaited value +- keep the wrapper only if it adds real behavior such as validation, normalization, retries, metrics, or error context + +```ts +function getUser(id: string) { + return fetchUser(id); +} + +async function loadUser(id: string) { + const user = await fetchUser(id); + return normalizeUser(user); +} +``` + +The goal is not "never use async". It is to avoid wrapper ceremony that makes the call graph larger without making behavior clearer. + ## Scoring Redundant `return await` sites add `1.5` each. diff --git a/src/rules/barrel-density/README.md b/src/rules/barrel-density/README.md index adefaa9..2b22e1a 100644 --- a/src/rules/barrel-density/README.md +++ b/src/rules/barrel-density/README.md @@ -36,6 +36,23 @@ export function createStore() { export { type Store } from "./types"; ``` +## How to fix / do this better + +Prefer barrels only when they improve discoverability without hiding module boundaries. + +Better options: + +- keep a barrel small and intentional +- export a stable public surface from one place, but avoid creating layers of barrel-to-barrel indirection +- import directly from the implementation module when a barrel adds little value + +```ts +export { createStore } from "./store"; +export { type Store } from "./types"; +``` + +If a file is just a wide list of re-exports, ask whether it is actually helping API design or only adding another place to chase symbols through. + ## Scoring The score starts at `1` and adds `0.5` per re-export statement, capped at `3`. diff --git a/src/rules/directory-fanout-hotspot/README.md b/src/rules/directory-fanout-hotspot/README.md index d1637eb..9da94df 100644 --- a/src/rules/directory-fanout-hotspot/README.md +++ b/src/rules/directory-fanout-hotspot/README.md @@ -47,6 +47,30 @@ src/icons/ Asset-like buckets and test-matrix directories are intentionally suppressed because wide directory shapes are expected there. +## How to fix / do this better + +A wide directory is usually a sign that one of these is missing: + +- a stronger domain split +- a deeper subdirectory boundary +- a more cohesive module with fewer one-file-per-concept fragments + +Better patterns: + +- group related files into subdomains once a folder becomes a grab bag +- merge ultra-thin files when the split adds naming overhead but not conceptual clarity +- separate generated output from hand-written source when possible + +```text +src/ +└── billing/ + ├── invoices/ + ├── subscriptions/ + └── shared/ +``` + +The goal is not tiny directories everywhere. It is to avoid a single hotspot folder becoming the dumping ground for too many loosely related files. + ## Scoring The rule starts at `2` and adds a bounded amount based on how far the directory is above the computed threshold. diff --git a/src/rules/duplicate-function-signatures/README.md b/src/rules/duplicate-function-signatures/README.md index bfd1e5b..70638db 100644 --- a/src/rules/duplicate-function-signatures/README.md +++ b/src/rules/duplicate-function-signatures/README.md @@ -50,6 +50,26 @@ export function getUser(id: string) { Pass-through wrappers are excluded, and a duplicate that only appears in 2 files is below the reporting threshold. +## How to fix / do this better + +When the same helper shape appears across multiple files, prefer one of these: + +- extract the shared logic into a single reusable helper +- create a small configurable normalizer instead of copy-pasting near-identical functions +- keep duplication only when the domain concepts are truly diverging and deserve separate behavior + +```ts +function normalizePersonLike(input: { name?: string; email?: string; active?: boolean }) { + return { + name: input.name?.trim() ?? "", + email: input.email?.toLowerCase() ?? "", + active: Boolean(input.active), + }; +} +``` + +The point is not to eliminate all repetition. It is to avoid silent copy-paste drift when several files are maintaining the same logic independently. + ## Scoring Each duplicate cluster adds `1.25 + 0.5 * (fileCount - 3)` for the current file, capped at `6`. diff --git a/src/rules/duplicate-mock-setup/README.md b/src/rules/duplicate-mock-setup/README.md index d2ca57f..01d605c 100644 --- a/src/rules/duplicate-mock-setup/README.md +++ b/src/rules/duplicate-mock-setup/README.md @@ -39,6 +39,24 @@ vi.clearAllMocks(); Generic mock declarations and cleanup-only statements do not contribute to this rule. +## How to fix / do this better + +When the same mock setup keeps reappearing, prefer shared test helpers over repeating the setup inline. + +Better options: + +- move repeated mock wiring into a factory or fixture helper +- centralize common setup in `beforeEach` when it is truly shared +- expose small scenario builders so tests vary only the interesting values + +```ts +function mockUserFetch(overrides: Partial = {}) { + vi.mocked(api.fetchUser).mockResolvedValue({ id: 1, name: "Ada", ...overrides }); +} +``` + +That keeps test intent focused on what changes per case instead of duplicating the same mock plumbing in every file. + ## Scoring Each duplicate setup cluster adds `1 + 0.5 * (fileCount - 2)` for the current file, capped at `5`. diff --git a/src/rules/empty-catch/README.md b/src/rules/empty-catch/README.md index c5ae4ca..1c61cd7 100644 --- a/src/rules/empty-catch/README.md +++ b/src/rules/empty-catch/README.md @@ -44,6 +44,27 @@ export function loadTheme() { } ``` +## How to fix / do this better + +An empty catch should usually become one of these instead: + +- rethrow the error +- return a deliberate typed fallback with a comment explaining the boundary behavior +- log meaningful context and then rethrow +- validate earlier so the exceptional path is narrower and more intentional + +```ts +export function parseConfig(raw: string) { + try { + return JSON.parse(raw); + } catch (error) { + throw new Error("Invalid config JSON", { cause: error }); + } +} +``` + +If swallowing the error is truly intentional, document why the fallback is safe and keep the scope local. + ## Scoring Each flagged catch uses the shared try/catch scoring helper, then the file total is capped at `8`. diff --git a/src/rules/error-obscuring/README.md b/src/rules/error-obscuring/README.md index 56c6604..0a1293e 100644 --- a/src/rules/error-obscuring/README.md +++ b/src/rules/error-obscuring/README.md @@ -50,6 +50,28 @@ export function readConfig(raw: string) { } ``` +## How to fix / do this better + +Prefer preserving failure meaning instead of replacing it with a cheap fallback. + +Better patterns: + +- rethrow the original error +- wrap with context while preserving `cause` +- return a deliberate result type that makes the failure explicit instead of pretending the operation succeeded + +```ts +export function loadProfile(id: string) { + try { + return fetchProfile(id); + } catch (error) { + throw new Error(`Failed to load profile ${id}`, { cause: error }); + } +} +``` + +If you truly need a fallback value, keep it narrow, document why it is safe, and avoid erasing the original failure in code paths that still need diagnosis. + ## Scoring Each flagged catch uses the shared try/catch scoring helper, then the file total is capped at `8`. diff --git a/src/rules/error-swallowing/README.md b/src/rules/error-swallowing/README.md index fc29261..33e9c97 100644 --- a/src/rules/error-swallowing/README.md +++ b/src/rules/error-swallowing/README.md @@ -39,6 +39,28 @@ export async function syncUser(id: string) { } ``` +## How to fix / do this better + +Logging is not a substitute for control flow. +If the caller still needs to know the operation failed, prefer one of these: + +- log and rethrow +- return an explicit result type such as `{ ok: false, error }` +- handle the failure completely at this layer only when you can prove continuing is safe + +```ts +export async function syncUser(id: string) { + try { + await pushUser(id); + } catch (error) { + logger.error({ error, id }, "failed to sync user"); + throw error; + } +} +``` + +The key is to make failure visible in the API contract instead of only visible in logs. + ## Scoring Each flagged catch uses the shared try/catch scoring helper, then the file total is capped at `8`. diff --git a/src/rules/generic-record-casts/README.md b/src/rules/generic-record-casts/README.md index 445b310..183c7ff 100644 --- a/src/rules/generic-record-casts/README.md +++ b/src/rules/generic-record-casts/README.md @@ -40,6 +40,32 @@ const token = value as { token: string }; const metadata = input as Map; ``` +## How to fix / do this better + +Treat unknown input as unknown for longer, then validate or narrow it at the boundary. + +Better options: + +- parse into a real domain type with a schema or decoder +- keep the value as `unknown` until you prove the fields you need +- use a very local cast only when you immediately narrow and contain it + +```ts +const input: unknown = JSON.parse(raw); +const parsed = UserConfigSchema.parse(input); +``` + +Or, without a schema library: + +```ts +const input: unknown = JSON.parse(raw); +if (!isUserConfig(input)) { + throw new Error("Invalid user config"); +} +``` + +The goal is to avoid turning uncertain external data into a roaming generic object bag that downstream code has to keep guessing about. + ## Scoring Each generic record cast adds `2` points. diff --git a/src/rules/generic-status-envelopes/README.md b/src/rules/generic-status-envelopes/README.md index 310df06..07b6c09 100644 --- a/src/rules/generic-status-envelopes/README.md +++ b/src/rules/generic-status-envelopes/README.md @@ -35,6 +35,25 @@ return { success: true, user }; return { error: "missing" }; ``` +## How to fix / do this better + +Prefer API shapes that express the actual domain outcome instead of wrapping everything in a shallow boolean envelope. + +Better options: + +- return the domain object directly on success +- use typed result variants when callers really need success/failure branching +- model specific failure cases instead of pushing everything into generic `message` / `error` strings + +```ts +type CreateRepoResult = + | { kind: "created"; repository: Repository } + | { kind: "forbidden" } + | { kind: "conflict"; reason: string }; +``` + +A small `{ ok, data }` wrapper is sometimes fine, but if it becomes the default shape for every operation it usually means the API is describing transport status rather than domain meaning. + ## Scoring Each generic status envelope adds `2` points. diff --git a/src/rules/over-fragmentation/README.md b/src/rules/over-fragmentation/README.md index 7e0e5cb..9bcf10b 100644 --- a/src/rules/over-fragmentation/README.md +++ b/src/rules/over-fragmentation/README.md @@ -47,6 +47,25 @@ src/icons/ Asset buckets and test-heavy directories are suppressed, and a directory full of small but substantial implementation files can also avoid a finding. +## How to fix / do this better + +Prefer module boundaries that follow behavior, not just naming. + +Better options: + +- merge ultra-thin wrapper files back into a cohesive module +- split by domain or workflow only when each file has meaningful independent behavior +- keep supporting types/helpers near the implementation they actually serve + +```text +src/payments/ +├── service.ts +├── types.ts +└── gateways/ +``` + +The goal is not fewer files at all costs. It is to avoid architecture that looks modular on disk while forcing readers to jump through many tiny files to understand one behavior. + ## Scoring The score is `4 + tinyRatio * 3 + ceremonyRatio * 2`. diff --git a/src/rules/pass-through-wrappers/README.md b/src/rules/pass-through-wrappers/README.md index 2b1a06d..c058e69 100644 --- a/src/rules/pass-through-wrappers/README.md +++ b/src/rules/pass-through-wrappers/README.md @@ -40,6 +40,28 @@ export function getJson(url: string) { } ``` +## How to fix / do this better + +A wrapper should earn its existence. +Keep it only if it adds something real, such as: + +- validation +- normalization +- retries or metrics +- naming a stable compatibility layer +- adapting one API shape into another + +Otherwise, call the underlying function directly or merge the wrapper away. + +```ts +export async function saveUser(input: UserInput) { + const normalized = normalizeUserInput(input); + return persistUser(normalized); +} +``` + +The goal is to reduce indirection that makes the codebase feel larger without adding behavior or clearer boundaries. + ## Scoring Each wrapper adds `2` points, capped at `5` for the file. diff --git a/src/rules/placeholder-comments/README.md b/src/rules/placeholder-comments/README.md index 39840e2..706ac1d 100644 --- a/src/rules/placeholder-comments/README.md +++ b/src/rules/placeholder-comments/README.md @@ -47,6 +47,25 @@ export function legacyMode() { } ``` +## How to fix / do this better + +Comments should explain current constraints, intent, or tradeoffs — not vaguely promise future completeness. + +Better options: + +- replace the placeholder with a concrete TODO that names the missing case +- document why the current implementation is intentionally partial +- remove the comment entirely if the code already says everything useful + +```ts +// TODO(ben): validate locale-specific edge cases before enabling CSV import. +export function normalizeName(input: string) { + return input.trim(); +} +``` + +A good comment tells the next reader what is true now or what exact work remains. A weak placeholder just signals uncertainty. + ## Scoring Each matching comment adds `0.75` to the file score, capped at `1.5`. diff --git a/src/rules/promise-default-fallbacks/README.md b/src/rules/promise-default-fallbacks/README.md index bd12e05..b931fef 100644 --- a/src/rules/promise-default-fallbacks/README.md +++ b/src/rules/promise-default-fallbacks/README.md @@ -52,6 +52,29 @@ export async function loadConfigResult() { } ``` +## How to fix / do this better + +A promise catch should usually preserve failure meaning instead of converting it into a cheap sentinel. + +Better options: + +- let the rejection propagate +- transform the error while preserving context +- return an explicit result type when the caller truly needs a non-throwing contract +- narrow the fallback to a boundary where a default is genuinely safe + +```ts +export async function loadConfig() { + try { + return await fetchConfig(); + } catch (error) { + throw new Error("Failed to load config", { cause: error }); + } +} +``` + +If a fallback is intentional, make it domain-shaped and explicit rather than `null`, `false`, or an empty object that hides why the operation failed. + ## Scoring Each flagged promise catch adds `2` points. diff --git a/src/rules/stringified-unknown-errors/README.md b/src/rules/stringified-unknown-errors/README.md index a082b51..1eea802 100644 --- a/src/rules/stringified-unknown-errors/README.md +++ b/src/rules/stringified-unknown-errors/README.md @@ -44,6 +44,26 @@ catch (error) { } ``` +## How to fix / do this better + +Prefer preserving structured error information over collapsing everything into a string. + +Better options: + +- propagate the original error object +- log structured fields and keep the original error attached +- map errors into typed domain variants instead of generic message strings +- stringify only at the final UI or logging boundary + +```ts +catch (error) { + logger.error({ error }); + return { success: false, error }; +} +``` + +If the UI really needs a display string, derive it at the edge of the system rather than erasing the richer error earlier in the flow. + ## Scoring Each unknown-error stringification site adds `2` points. diff --git a/tests/config.test.ts b/tests/config.test.ts index 13877cc..e7c58f7 100644 --- a/tests/config.test.ts +++ b/tests/config.test.ts @@ -19,7 +19,7 @@ async function createTempRepo(): Promise { await mkdir(path.join(rootDir, "src"), { recursive: true }); await writeFile( path.join(rootDir, "src", "comments.ts"), - "// Add more validation if needed\nexport const commentExample = true;\n", + "function loadValue(input: string) {\n return Promise.resolve(input);\n}\n\nexport async function fetchData(id: string) {\n return await loadValue(id);\n}\n", ); return rootDir; } @@ -52,11 +52,13 @@ describe("rule config support", () => { const rootDir = await createTempRepo(); const result = await analyzeRepository( rootDir, - withRuleConfig("comments.placeholder-comments", { enabled: false }), + withRuleConfig("defensive.async-noise", { enabled: false }), createDefaultRegistry(), ); - expect(result.findings).toHaveLength(0); + expect( + result.findings.filter((finding) => finding.ruleId === "defensive.async-noise"), + ).toHaveLength(0); }); test("can weight a rule via config", async () => { @@ -64,13 +66,20 @@ describe("rule config support", () => { const baseline = await analyzeRepository(rootDir, DEFAULT_CONFIG, createDefaultRegistry()); const weighted = await analyzeRepository( rootDir, - withRuleConfig("comments.placeholder-comments", { weight: 2 }), + withRuleConfig("defensive.async-noise", { weight: 2 }), createDefaultRegistry(), ); - expect(baseline.findings).toHaveLength(1); - expect(weighted.findings).toHaveLength(1); - expect(weighted.findings[0]?.score).toBeCloseTo((baseline.findings[0]?.score ?? 0) * 2, 6); + const baselineAsyncNoise = baseline.findings.find( + (finding) => finding.ruleId === "defensive.async-noise", + ); + const weightedAsyncNoise = weighted.findings.find( + (finding) => finding.ruleId === "defensive.async-noise", + ); + + expect(baselineAsyncNoise).toBeDefined(); + expect(weightedAsyncNoise).toBeDefined(); + expect(weightedAsyncNoise?.score).toBeCloseTo((baselineAsyncNoise?.score ?? 0) * 2, 6); }); test("loadConfig reads slop-scan.config.json", async () => { @@ -104,47 +113,56 @@ describe("rule config support", () => { const rootDir = await createTempRepo(); await writeFile( path.join(rootDir, "src", "nested.ts"), - "// Add more validation if needed\nexport const nested = true;\n", + "function fetchRemote(input: string) {\n return Promise.resolve(input);\n}\n\nexport async function loadValue(id: string) {\n return await fetchRemote(id);\n}\n", ); const result = await analyzeRepository( rootDir, withPathOverride(["src/comments.ts"], { - "comments.placeholder-comments": { enabled: false }, + "defensive.async-noise": { enabled: false }, }), createDefaultRegistry(), ); - expect(result.findings).toHaveLength(1); - expect(result.findings[0]?.path).toBe("src/nested.ts"); + const asyncNoiseFindings = result.findings.filter( + (finding) => finding.ruleId === "defensive.async-noise", + ); + + expect(asyncNoiseFindings).toHaveLength(1); + expect(asyncNoiseFindings[0]?.path).toBe("src/nested.ts"); }); test("can apply a path-scoped directory override", async () => { const rootDir = await createTempRepo(); - for (const dirName of ["src/rules/defensive", "src/other/defensive"]) { - for (let index = 0; index < 6; index += 1) { - await mkdir(path.join(rootDir, dirName), { recursive: true }); - await writeFile( - path.join(rootDir, dirName, `file-${index}.ts`), - `export const value${index} = ${index};\n`, - ); - } - } + await mkdir(path.join(rootDir, "src/rules/defensive"), { recursive: true }); + await writeFile( + path.join(rootDir, "src/rules/defensive/service.ts"), + "function fetchRule(input: string) {\n return Promise.resolve(input);\n}\n\nexport async function loadRule(id: string) {\n return await fetchRule(id);\n}\n", + ); + + await mkdir(path.join(rootDir, "src/other/defensive"), { recursive: true }); + await writeFile( + path.join(rootDir, "src/other/defensive/service.ts"), + "function fetchOther(input: string) {\n return Promise.resolve(input);\n}\n\nexport async function loadOther(id: string) {\n return await fetchOther(id);\n}\n", + ); const result = await analyzeRepository( rootDir, withPathOverride(["src/rules/**"], { - "structure.over-fragmentation": { enabled: false }, + "defensive.async-noise": { enabled: false }, }), createDefaultRegistry(), ); - const fragmentationFindings = result.findings.filter( - (finding) => finding.ruleId === "structure.over-fragmentation", + const asyncNoiseFindings = result.findings.filter( + (finding) => finding.ruleId === "defensive.async-noise", ); - expect(fragmentationFindings.map((finding) => finding.path)).toEqual(["src/other/defensive"]); + expect(asyncNoiseFindings.map((finding) => finding.path).sort()).toEqual([ + "src/comments.ts", + "src/other/defensive/service.ts", + ]); }); test("loadConfig reads path-scoped overrides", async () => { @@ -156,7 +174,7 @@ describe("rule config support", () => { { files: ["src/comments.ts"], rules: { - "comments.placeholder-comments": { enabled: false }, + "defensive.async-noise": { enabled: false }, }, }, ], @@ -169,7 +187,7 @@ describe("rule config support", () => { { files: ["src/comments.ts"], rules: { - "comments.placeholder-comments": { enabled: false }, + "defensive.async-noise": { enabled: false }, }, }, ]); diff --git a/tests/fixtures-regression.test.ts b/tests/fixtures-regression.test.ts index 9fbe55f..25e170c 100644 --- a/tests/fixtures-regression.test.ts +++ b/tests/fixtures-regression.test.ts @@ -28,24 +28,19 @@ describe("fixture regression suite", () => { createDefaultRegistry(), ); - expect(result.repoScore).toBeCloseTo(28.2833333333, 6); - expect(result.findings).toHaveLength(8); + expect(result.repoScore).toBeCloseTo(10.8666666667, 6); + expect(result.findings).toHaveLength(4); expect([...new Set(result.findings.map((finding) => finding.ruleId))].sort()).toEqual([ - "comments.placeholder-comments", "defensive.async-noise", "defensive.error-obscuring", - "structure.barrel-density", "structure.directory-fanout-hotspot", - "structure.over-fragmentation", "structure.pass-through-wrappers", ]); expect(result.fileScores.map((score) => score.path)).toEqual([ "src/service.ts", - "src/index.ts", "src/error.ts", - "src/comments.ts", ]); - expect(result.directoryScores.map((score) => score.path)).toEqual(["src/fragments", "src"]); + expect(result.directoryScores.map((score) => score.path)).toEqual(["src/fragments"]); }); test("mixed fixture localizes hotspots to the slop subtree", async () => { @@ -55,7 +50,7 @@ describe("fixture regression suite", () => { createDefaultRegistry(), ); - expect(result.repoScore).toBeCloseTo(27.1166666667, 6); + expect(result.repoScore).toBeCloseTo(9.7, 6); expect(result.fileScores[0]?.path).toBe("src/slop/service.ts"); expect(result.directoryScores[0]?.path).toBe("src/slop"); expect(result.fileScores.every((score) => score.path.startsWith("src/slop/"))).toBe(true); @@ -73,8 +68,8 @@ describe("fixture regression suite", () => { expect(output.status).toBe(0); const report = JSON.parse(output.stdout); - expect(report.summary.repoScore).toBeCloseTo(28.2833333333, 6); - expect(report.summary.findingCount).toBe(8); + expect(report.summary.repoScore).toBeCloseTo(10.8666666667, 6); + expect(report.summary.findingCount).toBe(4); expect(report.directoryScores[0].path).toBe("src/fragments"); expect(report.fileScores[0].path).toBe("src/service.ts"); }); @@ -89,19 +84,15 @@ describe("fixture regression suite", () => { ); expect(output.status).toBe(0); - expect(output.stdout).toContain( - "weak Found 1 placeholder-style comments comments.placeholder-comments", - ); - expect(output.stdout).toContain(" at src/comments.ts:1:1"); expect(output.stdout).toContain( "strong Found 1 error-obscuring catch block defensive.error-obscuring", ); expect(output.stdout).toContain(" at src/error.ts:2:1"); expect(output.stdout).toContain( - "medium File is primarily a barrel with 2 re-export statements structure.barrel-density", + "medium Directory fan-out is a repo hotspot (7 files vs baseline 1.0) structure.directory-fanout-hotspot", ); - expect(output.stdout).toContain(" at src/index.ts:1:1"); - expect(output.stdout).toContain("8 findings"); + expect(output.stdout).toContain(" at src/fragments:1:1"); + expect(output.stdout).toContain("4 findings"); expect(output.stdout).not.toContain("slop-scan report"); }); diff --git a/tests/heuristics.test.ts b/tests/heuristics.test.ts index 5a6198d..519905b 100644 --- a/tests/heuristics.test.ts +++ b/tests/heuristics.test.ts @@ -91,7 +91,6 @@ describe("heuristic rule pack", () => { const result = await analyzeRepository(rootDir, DEFAULT_CONFIG, createDefaultRegistry()); const ruleIds = new Set(result.findings.map((finding) => finding.ruleId)); - expect(ruleIds.has("comments.placeholder-comments")).toBe(true); expect(ruleIds.has("defensive.error-obscuring")).toBe(true); expect(ruleIds.has("defensive.promise-default-fallbacks")).toBe(true); expect(ruleIds.has("api.generic-status-envelopes")).toBe(true); @@ -99,8 +98,6 @@ describe("heuristic rule pack", () => { expect(ruleIds.has("defensive.stringified-unknown-errors")).toBe(true); expect(ruleIds.has("defensive.async-noise")).toBe(true); expect(ruleIds.has("structure.pass-through-wrappers")).toBe(true); - expect(ruleIds.has("structure.barrel-density")).toBe(true); - expect(ruleIds.has("structure.over-fragmentation")).toBe(true); expect(ruleIds.has("structure.directory-fanout-hotspot")).toBe(true); expect(result.fileScores.some((score) => score.path === "src/service.ts")).toBe(true);