1 Commits

Author SHA1 Message Date
b4b0b0cb4d Add Makefile with build and test targets 2026-03-04 19:35:42 +00:00
8 changed files with 48 additions and 415 deletions

1
.gitignore vendored
View File

@@ -1 +0,0 @@
build/

View File

@@ -1,24 +1,56 @@
SHELL := /bin/bash SHELL := /bin/bash
CC := cc CC := cc
MKDIR := mkdir CFLAGS := -Wall -O2
CFLAGS := -Wall -Werror -Os TARGET := fa2json
BUILD_DIR := build SRCS := fs-watcher.c json-writer.c
TARGET := $(BUILD_DIR)/fa2json
SRCS := fs-watcher.c json-writer.c
.PHONY: all test dev clean .PHONY: all test clean
all: $(TARGET) all: $(TARGET)
$(TARGET): $(SRCS) $(TARGET): $(SRCS)
$(MKDIR) -p $(BUILD_DIR)
$(CC) $(CFLAGS) -o $@ $^ $(CC) $(CFLAGS) -o $@ $^
# Requires root (fanotify FID + mount needs CAP_SYS_ADMIN).
# Loop device and mount are cleaned up automatically on exit.
# If the test aborts unexpectedly, check: losetup -l and /proc/mounts
test: $(TARGET) test: $(TARGET)
node test/test.mjs @set -euo pipefail; \
IMG=""; MNTDIR=""; LOOPDEV=""; FA2JSON_PID=""; \
dev: $(TARGET) cleanup() { \
node test/dev.mjs $(ARGS) [ -n "$$FA2JSON_PID" ] && { kill "$$FA2JSON_PID" 2>/dev/null; wait "$$FA2JSON_PID" 2>/dev/null || true; }; \
[ -n "$$MNTDIR" ] && umount "$$MNTDIR" 2>/dev/null || true; \
[ -n "$$LOOPDEV" ] && losetup -d "$$LOOPDEV" 2>/dev/null || true; \
[ -n "$$IMG" ] && rm -f "$$IMG"; \
[ -n "$$MNTDIR" ] && rmdir "$$MNTDIR" 2>/dev/null || true; \
}; \
trap cleanup EXIT; \
IMG=$$(mktemp /tmp/fa2json-test-XXXXXX.img); \
MNTDIR=$$(mktemp -d /tmp/fa2json-mnt-XXXXXX); \
echo "--- Creating 10M ext4 image ---"; \
dd if=/dev/zero of="$$IMG" bs=1M count=10 status=none; \
mkfs.ext4 -q "$$IMG"; \
LOOPDEV=$$(losetup --find --show "$$IMG"); \
mount "$$LOOPDEV" "$$MNTDIR"; \
echo "--- Starting fa2json on $$MNTDIR ---"; \
./$(TARGET) "$$MNTDIR" & FA2JSON_PID=$$!; \
sleep 0.3; \
echo "--- Filesystem operations ---"; \
mkdir "$$MNTDIR/dir_a"; \
touch "$$MNTDIR/file_a.txt"; \
echo "content" >> "$$MNTDIR/file_a.txt"; \
mkdir "$$MNTDIR/dir_b"; \
touch "$$MNTDIR/dir_b/nested.txt"; \
mv "$$MNTDIR/file_a.txt" "$$MNTDIR/file_b.txt"; \
mv "$$MNTDIR/dir_b" "$$MNTDIR/dir_a/dir_b_moved"; \
chmod 600 "$$MNTDIR/file_b.txt"; \
touch -m "$$MNTDIR/file_b.txt"; \
chmod 755 "$$MNTDIR/dir_a"; \
rm "$$MNTDIR/file_b.txt"; \
rm "$$MNTDIR/dir_a/dir_b_moved/nested.txt"; \
rm -rf "$$MNTDIR/dir_a"; \
sleep 0.3; \
echo "--- Done ---"
clean: clean:
rm -rf $(BUILD_DIR) rm -f $(TARGET)

View File

@@ -58,7 +58,7 @@ static void handle_events(int fafd, int mount_fd) {
clock_gettime(CLOCK_MONOTONIC, &mono); clock_gettime(CLOCK_MONOTONIC, &mono);
clock_gettime(CLOCK_REALTIME, &wall); clock_gettime(CLOCK_REALTIME, &wall);
fprintf(stdout, "{\"ts\": [%li, %li, %li, %li]", wall.tv_sec, wall.tv_nsec, mono.tv_sec, mono.tv_nsec); fprintf(stdout, "{\"ts\": [%i, %i, %i, %i]", wall.tv_sec, wall.tv_nsec, mono.tv_sec, mono.tv_nsec);
char *ptr = (char *)(metadata + 1); char *ptr = (char *)(metadata + 1);
char *end = (char *)metadata + metadata->event_len; char *end = (char *)metadata + metadata->event_len;
@@ -98,7 +98,7 @@ static void handle_events(int fafd, int mount_fd) {
} }
if (entry_index++) { fprintf(stdout, ", "); } if (entry_index++) { fprintf(stdout, ", "); }
fprintf(stdout, ", \"mask\": %lli}\n", metadata->mask); fprintf(stdout, ", \"mask\": %i}\n", metadata->mask);
metadata = FAN_EVENT_NEXT(metadata, size); metadata = FAN_EVENT_NEXT(metadata, size);

View File

@@ -1,82 +0,0 @@
## Manual experiment
> [!NOTE]
> This manual experiment shows how we can do the testing (teardown not included). Note that we don't need the `losetup`-stuff, we know where everything is.
### Compile
```sh
gcc fs-watcher.c json-writer.c -o fa2json
```
### Create image file
```sh
mktemp /tmp/fa2json-test-XXXXXX.img
```
```text
/tmp/fa2json-test-UrwpOb.img
```
```sh
truncate -s 10M /tmp/fa2json-test-UrwpOb.img
```
```sh
mkfs.ext4 /tmp/fa2json-test-UrwpOb.img
```
```text
mke2fs 1.47.3 (8-Jul-2025)
Discarding device blocks: done
Creating filesystem with 10240 1k blocks and 2560 inodes
Filesystem UUID: 035c508e-dec0-4a21-a4d1-1efb6fa72415
Superblock backups stored on blocks:
8193
Allocating group tables: done
Writing inode tables: done
Creating journal (1024 blocks): done
Writing superblocks and filesystem accounting information: done
```
### Create mount point
```sh
mktemp -d /tmp/fa2json-mnt-XXXXXX
```
```text
/tmp/fa2json-mnt-ts2Dik
```
### Mount loop device
```sh
sudo mount /tmp/fa2json-test-UrwpOb.img /tmp/fa2json-mnt-ts2Dik/
```
> [!NOTE]
> In a different terminal I now ran - but we could do this after `chown` or possibly `chown` + `sync`?
> ```sh
> fa2json /tmp/fa2json-mnt-ts2Dik
> ```
### Let current user own file system
```sh
sudo chown $(id -u) /tmp/fa2json-mnt-ts2Dik/
```
#### `fa2json` output
```json
{"ts": [1772658052, 704412412, 386988, 865842867], "name": "/tmp/fa2json-mnt-ts2Dik/.", "mask": 1073741828}
```
### Touch marker
```sh
touch /tmp/fa2json-mnt-ts2Dik/MARKER
```
#### `fa2json` output
```json
{"ts": [1772658064, 151070715, 387000, 312501190], "name": "/tmp/fa2json-mnt-ts2Dik/MARKER", "mask": 256}
{"ts": [1772658064, 151099105, 387000, 312529600], "name": "/tmp/fa2json-mnt-ts2Dik/MARKER", "mask": 12}
```

View File

@@ -1,123 +0,0 @@
# fa2json Test Plan
## Overview
A Node.js test runner (`test/test.mjs`) that exercises `fa2json` against a
temporary ext4 filesystem on a loop device. The test produces a single
pass/fail result and cleans up after itself unconditionally.
Requires root (`fanotify` FID reporting and `mount` both need `CAP_SYS_ADMIN`).
---
## Files
| File | Purpose |
|---|---|
| `test/test.mjs` | Test runner |
| `Makefile` | `make test` target calls `sudo node test/test.mjs` |
---
## Setup
1. Create a temporary image file (`mktemp /tmp/fa2json-test-XXXXXX.img`)
2. `truncate -s 10M` the image (sparse file, no need for `dd`)
3. `mkfs.ext4` the image
4. Create a temporary mount directory (`mktemp -d /tmp/fa2json-mnt-XXXXXX`)
5. `sudo mount <img> <mntdir>` (no `losetup` needed — `mount` accepts image files directly)
6. `sudo chown $(id -u) <mntdir>` to hand ownership to the current user
7. `sync` to flush before fa2json starts listening
8. `sudo` spawn `fa2json <mountpoint>` as a child process (needs `CAP_SYS_ADMIN`)
9. Attach a `readline` interface to its stdout; parse each line as JSON and
push into an event buffer
Steps 6 and 7 ensure the `chown` event never enters the fa2json stream, and
all subsequent FS operations run unprivileged.
---
## Teardown
Runs unconditionally in a `finally` block:
1. Kill the `fa2json` child process
2. `sudo umount <mountpoint>`
3. `rm` the image file
4. `rmdir` the mount directory
---
## Event Collection and the Marker Pattern
`fa2json` runs continuously for the entire test. To associate events with
specific operations, a marker file is used as a synchronisation barrier:
1. Perform a filesystem operation
2. Immediately `touch <mountpoint>/.marker_N` (where N is a counter)
3. Wait until the event stream contains a CREATE event for `.marker_N`
4. Collect all events since the previous marker — this batch belongs to the
current operation
5. Assert on the batch, then advance the counter
If a marker event never arrives the test hangs, which indicates a failure at
the fa2json level itself.
---
## Path Handling
`fa2json` emits full paths including the mount prefix
(e.g. `/tmp/fa2json-mnt-XXXXX/dir_a/file.txt`). The runner strips this prefix
so assertions work against a virtual root:
```
/tmp/fa2json-mnt-XXXXX/dir_a/file.txt → /dir_a/file.txt
```
---
## Fanotify Mask Constants
Relevant flags (bitwise, check with `mask & FLAG`):
| Constant | Value | Meaning |
|---|---|---|
| `FAN_ATTRIB` | `0x4` | Metadata/attribute change |
| `FAN_CLOSE_WRITE` | `0x8` | File closed after writing |
| `FAN_CREATE` | `0x100` | File or directory created |
| `FAN_DELETE` | `0x200` | File or directory deleted |
| `FAN_RENAME` | `0x10000000` | Rename (has `old` and `new` fields) |
| `FAN_ONDIR` | `0x40000000` | Event subject is a directory |
---
## Operations and Expected Events
Each row is one `doOp()` call. Events are matched by presence (not exact list)
— extra events from ext4 internals are ignored.
| Operation | Expected event(s) |
|---|---|
| `mkdir /dir_a` | CREATE \| ONDIR, name `/dir_a` |
| `touch /file_a.txt` | CREATE, name `/file_a.txt` |
| `echo "content" >> /file_a.txt` | CLOSE_WRITE, name `/file_a.txt` |
| `mkdir /dir_b` | CREATE \| ONDIR, name `/dir_b` |
| `touch /dir_b/nested.txt` | CREATE, name `/dir_b/nested.txt` |
| `mv /file_a.txt /file_b.txt` | RENAME, old `/file_a.txt`, new `/file_b.txt` |
| `mv /dir_b /dir_a/dir_b_moved` | RENAME \| ONDIR, old `/dir_b`, new `/dir_a/dir_b_moved` |
| `chmod 600 /file_b.txt` | ATTRIB, name `/file_b.txt` |
| `touch -m /file_b.txt` | ATTRIB, name `/file_b.txt` |
| `chmod 755 /dir_a` | ATTRIB \| ONDIR, name `/dir_a` |
| `rm /file_b.txt` | DELETE, name `/file_b.txt` |
| `rm /dir_a/dir_b_moved/nested.txt` | DELETE, name `/dir_a/dir_b_moved/nested.txt` |
| `rmdir /dir_a/dir_b_moved` | DELETE \| ONDIR, name `/dir_a/dir_b_moved` |
| `rmdir /dir_a` | DELETE \| ONDIR, name `/dir_a` |
---
## Pass / Fail
- All assertions pass → print summary, `process.exit(0)`
- Any assertion throws → print the failing operation, the expected event, and
the actual batch received, then `process.exit(1)`

View File

@@ -1,43 +0,0 @@
#!/usr/bin/env node
// Developer mode: set up loop device, stream fa2json output through jq,
// tear down on exit. Optionally launch a terminal at the mount point.
//
// Usage:
// sudo node test/dev.mjs
// sudo node test/dev.mjs --terminal konsole
// sudo node test/dev.mjs --terminal "konsole -e bash"
import { spawn } from 'node:child_process';
import { createInterface } from 'node:readline';
import { setup, spawnFa2json } from './lib/setup.mjs';
const terminalArg = (() => {
const i = process.argv.indexOf('--terminal');
return i !== -1 ? process.argv.slice(i + 1).join(' ') : null;
})();
const { mnt, teardown } = await setup();
console.error(`Mount point: ${mnt}`);
// Pipe fa2json stdout through jq for pretty coloured output
const fa2json = spawnFa2json(mnt);
const jq = spawn('jq', ['-C', '--unbuffered', '.'], { stdio: ['pipe', 'inherit', 'inherit'] });
fa2json.stdout.pipe(jq.stdin);
fa2json.on('exit', async () => {
jq.stdin.end();
await teardown();
process.exit(0);
});
// Launch optional terminal at mount point
if (terminalArg) {
const [cmd, ...args] = terminalArg.split(' ');
spawn(cmd, [...args, '--workdir', mnt], { detached: true, stdio: 'ignore' }).unref();
console.error(`Launched: ${terminalArg} --workdir ${mnt}`);
}
// Clean teardown on Ctrl+C
process.on('SIGINT', async () => {
fa2json.kill('SIGTERM');
});

View File

@@ -1,31 +0,0 @@
import { execFileSync, spawn } from 'node:child_process';
import { join } from 'node:path';
const FA2JSON = new URL('../../build/fa2json', import.meta.url).pathname;
export async function setup() {
// Create image file and format
const img = execFileSync('mktemp', ['/tmp/fa2json-test-XXXXXX.img']).toString().trim();
execFileSync('truncate', ['-s', '10M', img]);
execFileSync('mkfs.ext4', ['-q', img]);
// Create mount point and mount
const mnt = execFileSync('mktemp', ['-d', '/tmp/fa2json-mnt-XXXXXX']).toString().trim();
execFileSync('sudo', ['mount', img, mnt]);
// Hand ownership to current user, then sync before fa2json starts
execFileSync('sudo', ['chown', String(process.getuid()), mnt]);
execFileSync('sync');
async function teardown() {
try { execFileSync('sudo', ['umount', mnt]); } catch {}
try { execFileSync('rm', ['-f', img]); } catch {}
try { execFileSync('rmdir', [mnt]); } catch {}
}
return { img, mnt, teardown };
}
export function spawnFa2json(mnt) {
return spawn('sudo', [FA2JSON, mnt], { stdio: ['ignore', 'pipe', 'inherit'] });
}

View File

@@ -1,119 +0,0 @@
#!/usr/bin/env node
// Automated test runner. Exit 0 = pass, exit 1 = fail.
// Requires root (sudo) for mount and fa2json.
import { createInterface } from 'node:readline';
import { promises as fs } from 'node:fs';
import { join } from 'node:path';
import { setup, spawnFa2json } from './lib/setup.mjs';
// Fanotify mask flags
const FAN_ATTRIB = 0x4;
const FAN_CLOSE_WRITE = 0x8;
const FAN_CREATE = 0x100;
const FAN_DELETE = 0x200;
const FAN_RENAME = 0x10000000;
const FAN_ONDIR = 0x40000000;
const { mnt, teardown } = await setup();
const fa2json = spawnFa2json(mnt);
// Event buffer and marker machinery
const events = [];
let markerResolve = null;
let markerName = null;
let markerCounter = 0;
const rl = createInterface({ input: fa2json.stdout });
rl.on('line', line => {
const event = JSON.parse(line);
// Strip mount prefix from all path fields
for (const key of ['name', 'old', 'new']) {
if (event[key]) event[key] = event[key].slice(mnt.length);
}
events.push(event);
if (markerResolve && event.name === `/.marker_${markerName}` && (event.mask & FAN_CREATE)) {
markerResolve();
markerResolve = null;
}
});
// Perform a FS operation, drop a marker, collect events up to that marker
async function doOp(label, fn) {
const id = markerCounter++;
await fn();
await fs.writeFile(join(mnt, `.marker_${id}`), '');
await new Promise(resolve => {
markerName = id;
markerResolve = resolve;
});
// Collect all events since previous marker (excluding marker events themselves)
const batch = events.splice(0).filter(e => !e.name?.startsWith('/.marker_'));
return { label, batch };
}
function assert(label, batch, check) {
if (!check(batch)) {
console.error(`FAIL: ${label}`);
console.error('Batch:', JSON.stringify(batch, null, 2));
throw new Error(`Assertion failed: ${label}`);
}
}
function hasEvent(batch, flags, path, field = 'name') {
return batch.some(e => (e.mask & flags) === flags && e[field] === path);
}
try {
let op;
op = await doOp('mkdir /dir_a', () => fs.mkdir(join(mnt, 'dir_a')));
assert(op.label, op.batch, b => hasEvent(b, FAN_CREATE | FAN_ONDIR, '/dir_a'));
op = await doOp('touch /file_a.txt', () => fs.writeFile(join(mnt, 'file_a.txt'), ''));
assert(op.label, op.batch, b => hasEvent(b, FAN_CREATE, '/file_a.txt'));
op = await doOp('write /file_a.txt', () => fs.writeFile(join(mnt, 'file_a.txt'), 'content'));
assert(op.label, op.batch, b => hasEvent(b, FAN_CLOSE_WRITE, '/file_a.txt'));
op = await doOp('mkdir /dir_b', () => fs.mkdir(join(mnt, 'dir_b')));
assert(op.label, op.batch, b => hasEvent(b, FAN_CREATE | FAN_ONDIR, '/dir_b'));
op = await doOp('touch /dir_b/nested.txt', () => fs.writeFile(join(mnt, 'dir_b', 'nested.txt'), ''));
assert(op.label, op.batch, b => hasEvent(b, FAN_CREATE, '/dir_b/nested.txt'));
op = await doOp('mv /file_a.txt /file_b.txt', () => fs.rename(join(mnt, 'file_a.txt'), join(mnt, 'file_b.txt')));
assert(op.label, op.batch, b => b.some(e => (e.mask & FAN_RENAME) && e.old === '/file_a.txt' && e.new === '/file_b.txt'));
op = await doOp('mv /dir_b /dir_a/dir_b_moved', () => fs.rename(join(mnt, 'dir_b'), join(mnt, 'dir_a', 'dir_b_moved')));
assert(op.label, op.batch, b => b.some(e => (e.mask & FAN_RENAME) && (e.mask & FAN_ONDIR) && e.old === '/dir_b' && e.new === '/dir_a/dir_b_moved'));
op = await doOp('chmod 600 /file_b.txt', () => fs.chmod(join(mnt, 'file_b.txt'), 0o600));
assert(op.label, op.batch, b => hasEvent(b, FAN_ATTRIB, '/file_b.txt'));
op = await doOp('touch -m /file_b.txt', () => fs.utimes(join(mnt, 'file_b.txt'), new Date(), new Date()));
assert(op.label, op.batch, b => hasEvent(b, FAN_ATTRIB, '/file_b.txt'));
op = await doOp('chmod 755 /dir_a', () => fs.chmod(join(mnt, 'dir_a'), 0o755));
assert(op.label, op.batch, b => hasEvent(b, FAN_ATTRIB | FAN_ONDIR, '/dir_a'));
op = await doOp('rm /file_b.txt', () => fs.unlink(join(mnt, 'file_b.txt')));
assert(op.label, op.batch, b => hasEvent(b, FAN_DELETE, '/file_b.txt'));
op = await doOp('rm /dir_a/dir_b_moved/nested.txt', () => fs.unlink(join(mnt, 'dir_a', 'dir_b_moved', 'nested.txt')));
assert(op.label, op.batch, b => hasEvent(b, FAN_DELETE, '/dir_a/dir_b_moved/nested.txt'));
op = await doOp('rmdir /dir_a/dir_b_moved', () => fs.rmdir(join(mnt, 'dir_a', 'dir_b_moved')));
assert(op.label, op.batch, b => hasEvent(b, FAN_DELETE | FAN_ONDIR, '/dir_a/dir_b_moved'));
op = await doOp('rmdir /dir_a', () => fs.rmdir(join(mnt, 'dir_a')));
assert(op.label, op.batch, b => hasEvent(b, FAN_DELETE | FAN_ONDIR, '/dir_a'));
console.log('PASS');
} catch (e) {
console.error(e.message);
process.exitCode = 1;
} finally {
fa2json.kill();
await teardown();
}