Compare commits

...

4 Commits

7 changed files with 237 additions and 12 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
build/

24
Makefile Normal file
View File

@@ -0,0 +1,24 @@
SHELL := /bin/bash
CC := cc
MKDIR := mkdir
CFLAGS := -Wall -Werror -Os
BUILD_DIR := build
TARGET := $(BUILD_DIR)/fa2json
SRCS := fs-watcher.c json-writer.c
.PHONY: all test dev clean
all: $(TARGET)
$(TARGET): $(SRCS)
$(MKDIR) -p $(BUILD_DIR)
$(CC) $(CFLAGS) -o $@ $^
test: $(TARGET)
node test/test.mjs
dev: $(TARGET)
node test/dev.mjs $(ARGS)
clean:
rm -rf $(BUILD_DIR)

View File

@@ -58,7 +58,7 @@ static void handle_events(int fafd, int mount_fd) {
clock_gettime(CLOCK_MONOTONIC, &mono);
clock_gettime(CLOCK_REALTIME, &wall);
fprintf(stdout, "{\"ts\": [%i, %i, %i, %i]", wall.tv_sec, wall.tv_nsec, mono.tv_sec, mono.tv_nsec);
fprintf(stdout, "{\"ts\": [%li, %li, %li, %li]", wall.tv_sec, wall.tv_nsec, mono.tv_sec, mono.tv_nsec);
char *ptr = (char *)(metadata + 1);
char *end = (char *)metadata + metadata->event_len;
@@ -98,7 +98,7 @@ static void handle_events(int fafd, int mount_fd) {
}
if (entry_index++) { fprintf(stdout, ", "); }
fprintf(stdout, ", \"mask\": %i}\n", metadata->mask);
fprintf(stdout, ", \"mask\": %lli}\n", metadata->mask);
metadata = FAN_EVENT_NEXT(metadata, size);

View File

@@ -21,15 +21,20 @@ Requires root (`fanotify` FID reporting and `mount` both need `CAP_SYS_ADMIN`).
## Setup
1. Create a temporary image file (`mktemp`)
2. `dd` 10M of zeros into it
1. Create a temporary image file (`mktemp /tmp/fa2json-test-XXXXXX.img`)
2. `truncate -s 10M` the image (sparse file, no need for `dd`)
3. `mkfs.ext4` the image
4. `losetup --find --show` to attach it as a loop device
5. `mount` the loop device to a temporary directory (`mktemp -d`)
6. Spawn `fa2json <mountpoint>` as a child process
7. Attach a `readline` interface to its stdout; parse each line as JSON and
4. Create a temporary mount directory (`mktemp -d /tmp/fa2json-mnt-XXXXXX`)
5. `sudo mount <img> <mntdir>` (no `losetup` needed — `mount` accepts image files directly)
6. `sudo chown $(id -u) <mntdir>` to hand ownership to the current user
7. `sync` to flush before fa2json starts listening
8. `sudo` spawn `fa2json <mountpoint>` as a child process (needs `CAP_SYS_ADMIN`)
9. Attach a `readline` interface to its stdout; parse each line as JSON and
push into an event buffer
Steps 6 and 7 ensure the `chown` event never enters the fa2json stream, and
all subsequent FS operations run unprivileged.
---
## Teardown
@@ -37,10 +42,9 @@ Requires root (`fanotify` FID reporting and `mount` both need `CAP_SYS_ADMIN`).
Runs unconditionally in a `finally` block:
1. Kill the `fa2json` child process
2. `umount <mountpoint>`
3. `losetup -d <loopdev>`
4. `rm` the image file
5. `rmdir` the mount directory
2. `sudo umount <mountpoint>`
3. `rm` the image file
4. `rmdir` the mount directory
---

43
test/dev.mjs Normal file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env node
// Developer mode: set up loop device, stream fa2json output through jq,
// tear down on exit. Optionally launch a terminal at the mount point.
//
// Usage:
// sudo node test/dev.mjs
// sudo node test/dev.mjs --terminal konsole
// sudo node test/dev.mjs --terminal "konsole -e bash"
import { spawn } from 'node:child_process';
import { createInterface } from 'node:readline';
import { setup, spawnFa2json } from './lib/setup.mjs';
const terminalArg = (() => {
const i = process.argv.indexOf('--terminal');
return i !== -1 ? process.argv.slice(i + 1).join(' ') : null;
})();
const { mnt, teardown } = await setup();
console.error(`Mount point: ${mnt}`);
// Pipe fa2json stdout through jq for pretty coloured output
const fa2json = spawnFa2json(mnt);
const jq = spawn('jq', ['-C', '--unbuffered', '.'], { stdio: ['pipe', 'inherit', 'inherit'] });
fa2json.stdout.pipe(jq.stdin);
fa2json.on('exit', async () => {
jq.stdin.end();
await teardown();
process.exit(0);
});
// Launch optional terminal at mount point
if (terminalArg) {
const [cmd, ...args] = terminalArg.split(' ');
spawn(cmd, [...args, '--workdir', mnt], { detached: true, stdio: 'ignore' }).unref();
console.error(`Launched: ${terminalArg} --workdir ${mnt}`);
}
// Clean teardown on Ctrl+C
process.on('SIGINT', async () => {
fa2json.kill('SIGTERM');
});

34
test/lib/setup.mjs Normal file
View File

@@ -0,0 +1,34 @@
import { execSync, spawn } from 'node:child_process';
import { mkdtempSync, mkdirSync } from 'node:fs';
import { tmpdir } from 'node:os';
import { join } from 'node:path';
const FA2JSON = new URL('../../build/fa2json', import.meta.url).pathname;
export async function setup() {
// Create image file and format
const img = execSync('mktemp /tmp/fa2json-test-XXXXXX.img').toString().trim();
execSync(`truncate -s 10M ${img}`);
execSync(`mkfs.ext4 -q ${img}`);
// Create mount point and mount
const mnt = execSync('mktemp -d /tmp/fa2json-mnt-XXXXXX').toString().trim();
execSync(`sudo mount ${img} ${mnt}`);
// Hand ownership to current user, then sync before fa2json starts
execSync(`sudo chown ${process.getuid()} ${mnt}`);
execSync('sync');
async function teardown() {
try { execSync(`sudo umount ${mnt}`); } catch {}
try { execSync(`rm -f ${img}`); } catch {}
try { execSync(`rmdir ${mnt}`); } catch {}
}
return { img, mnt, teardown };
}
export function spawnFa2json(mnt) {
const proc = spawn('sudo', [FA2JSON, mnt], { stdio: ['ignore', 'pipe', 'inherit'] });
return proc;
}

119
test/test.mjs Normal file
View File

@@ -0,0 +1,119 @@
#!/usr/bin/env node
// Automated test runner. Exit 0 = pass, exit 1 = fail.
// Requires root (sudo) for mount and fa2json.
import { createInterface } from 'node:readline';
import { promises as fs } from 'node:fs';
import { join } from 'node:path';
import { setup, spawnFa2json } from './lib/setup.mjs';
// Fanotify mask flags
const FAN_ATTRIB = 0x4;
const FAN_CLOSE_WRITE = 0x8;
const FAN_CREATE = 0x100;
const FAN_DELETE = 0x200;
const FAN_RENAME = 0x10000000;
const FAN_ONDIR = 0x40000000;
const { mnt, teardown } = await setup();
const fa2json = spawnFa2json(mnt);
// Event buffer and marker machinery
const events = [];
let markerResolve = null;
let markerName = null;
let markerCounter = 0;
const rl = createInterface({ input: fa2json.stdout });
rl.on('line', line => {
const event = JSON.parse(line);
// Strip mount prefix from all path fields
for (const key of ['name', 'old', 'new']) {
if (event[key]) event[key] = event[key].slice(mnt.length);
}
events.push(event);
if (markerResolve && event.name === `/.marker_${markerName}` && (event.mask & FAN_CREATE)) {
markerResolve();
markerResolve = null;
}
});
// Perform a FS operation, drop a marker, collect events up to that marker
async function doOp(label, fn) {
const id = markerCounter++;
await fn();
await fs.writeFile(join(mnt, `.marker_${id}`), '');
await new Promise(resolve => {
markerName = id;
markerResolve = resolve;
});
// Collect all events since previous marker (excluding marker events themselves)
const batch = events.splice(0).filter(e => !e.name?.startsWith('/.marker_'));
return { label, batch };
}
function assert(label, batch, check) {
if (!check(batch)) {
console.error(`FAIL: ${label}`);
console.error('Batch:', JSON.stringify(batch, null, 2));
throw new Error(`Assertion failed: ${label}`);
}
}
function hasEvent(batch, flags, path, field = 'name') {
return batch.some(e => (e.mask & flags) === flags && e[field] === path);
}
try {
let op;
op = await doOp('mkdir /dir_a', () => fs.mkdir(join(mnt, 'dir_a')));
assert(op.label, op.batch, b => hasEvent(b, FAN_CREATE | FAN_ONDIR, '/dir_a'));
op = await doOp('touch /file_a.txt', () => fs.writeFile(join(mnt, 'file_a.txt'), ''));
assert(op.label, op.batch, b => hasEvent(b, FAN_CREATE, '/file_a.txt'));
op = await doOp('write /file_a.txt', () => fs.writeFile(join(mnt, 'file_a.txt'), 'content'));
assert(op.label, op.batch, b => hasEvent(b, FAN_CLOSE_WRITE, '/file_a.txt'));
op = await doOp('mkdir /dir_b', () => fs.mkdir(join(mnt, 'dir_b')));
assert(op.label, op.batch, b => hasEvent(b, FAN_CREATE | FAN_ONDIR, '/dir_b'));
op = await doOp('touch /dir_b/nested.txt', () => fs.writeFile(join(mnt, 'dir_b', 'nested.txt'), ''));
assert(op.label, op.batch, b => hasEvent(b, FAN_CREATE, '/dir_b/nested.txt'));
op = await doOp('mv /file_a.txt /file_b.txt', () => fs.rename(join(mnt, 'file_a.txt'), join(mnt, 'file_b.txt')));
assert(op.label, op.batch, b => b.some(e => (e.mask & FAN_RENAME) && e.old === '/file_a.txt' && e.new === '/file_b.txt'));
op = await doOp('mv /dir_b /dir_a/dir_b_moved', () => fs.rename(join(mnt, 'dir_b'), join(mnt, 'dir_a', 'dir_b_moved')));
assert(op.label, op.batch, b => b.some(e => (e.mask & FAN_RENAME) && (e.mask & FAN_ONDIR) && e.old === '/dir_b' && e.new === '/dir_a/dir_b_moved'));
op = await doOp('chmod 600 /file_b.txt', () => fs.chmod(join(mnt, 'file_b.txt'), 0o600));
assert(op.label, op.batch, b => hasEvent(b, FAN_ATTRIB, '/file_b.txt'));
op = await doOp('touch -m /file_b.txt', () => fs.utimes(join(mnt, 'file_b.txt'), new Date(), new Date()));
assert(op.label, op.batch, b => hasEvent(b, FAN_ATTRIB, '/file_b.txt'));
op = await doOp('chmod 755 /dir_a', () => fs.chmod(join(mnt, 'dir_a'), 0o755));
assert(op.label, op.batch, b => hasEvent(b, FAN_ATTRIB | FAN_ONDIR, '/dir_a'));
op = await doOp('rm /file_b.txt', () => fs.unlink(join(mnt, 'file_b.txt')));
assert(op.label, op.batch, b => hasEvent(b, FAN_DELETE, '/file_b.txt'));
op = await doOp('rm /dir_a/dir_b_moved/nested.txt', () => fs.unlink(join(mnt, 'dir_a', 'dir_b_moved', 'nested.txt')));
assert(op.label, op.batch, b => hasEvent(b, FAN_DELETE, '/dir_a/dir_b_moved/nested.txt'));
op = await doOp('rmdir /dir_a/dir_b_moved', () => fs.rmdir(join(mnt, 'dir_a', 'dir_b_moved')));
assert(op.label, op.batch, b => hasEvent(b, FAN_DELETE | FAN_ONDIR, '/dir_a/dir_b_moved'));
op = await doOp('rmdir /dir_a', () => fs.rmdir(join(mnt, 'dir_a')));
assert(op.label, op.batch, b => hasEvent(b, FAN_DELETE | FAN_ONDIR, '/dir_a'));
console.log('PASS');
} catch (e) {
console.error(e.message);
process.exitCode = 1;
} finally {
fa2json.kill();
await teardown();
}