- Fix uninitialized use of io_blocksize in nwipe_random_pass() by computing

the effective I/O block size before any sync-rate logic is executed.

- Add new helper function `nwipe_compute_sync_rate_for_device()` to `pass.c`,
  converting legacy `--sync` semantics (sync * st_blksize) into a per-write
  sync interval based on the actual `io_blocksize`, and disabling periodic
  syncing when using direct I/O.

- Update both `nwipe_random_pass()` and `nwipe_static_pass()` to use the new
  helper, ensuring consistent and correct sync behaviour for all cached-I/O
  passes and removing duplicated sync-calculation logic.
This commit is contained in:
Fabian Druschke
2025-11-27 23:02:28 +01:00
parent d97c8a1014
commit 259ee26273

View File

@@ -26,6 +26,7 @@
#include <stdlib.h> /* posix_memalign, malloc, free */
#include <string.h> /* memset, memcpy, memcmp */
#include <errno.h>
#include <limits.h>
#include "nwipe.h"
#include "context.h"
@@ -153,6 +154,48 @@ static void* nwipe_alloc_io_buffer( const nwipe_context_t* c, size_t size, int c
return ptr;
}
/*
* Compute the per-write sync rate for a given device and I/O block size.
*
* Historically, --sync=N meant "fdatasync() every N * st_blksize bytes".
* Now that we use large I/O blocks, we convert that into "sync every K writes",
* where each write is of size io_blocksize.
*
* For O_DIRECT we return 0 because write() already reports I/O errors directly.
*/
static int nwipe_compute_sync_rate_for_device( const nwipe_context_t* c, size_t io_blocksize )
{
int syncRate = nwipe_options.sync;
/* No periodic sync in direct I/O mode. */
if( nwipe_options.io_mode == NWIPE_IO_MODE_DIRECT )
return 0;
if( syncRate <= 0 )
return 0;
if( io_blocksize == 0 )
return 0;
/* Old semantics: bytes between syncs = sync * st_blksize. */
unsigned long long bytes_between_sync =
(unsigned long long) syncRate * (unsigned long long) c->device_stat.st_blksize;
if( bytes_between_sync == 0 )
return 0;
/* Convert to "writes between syncs". */
unsigned long long tmp = bytes_between_sync / (unsigned long long) io_blocksize;
if( tmp == 0 )
return 1; /* at least every write */
if( tmp > (unsigned long long) INT_MAX )
return INT_MAX; /* just in case */
return (int) tmp;
}
/*
* nwipe_random_verify
*
@@ -350,23 +393,8 @@ int nwipe_random_pass( NWIPE_METHOD_SIGNATURE )
syncRate = 0;
}
/* Preserve the original "bytes between syncs" behaviour:
* previously: sync writes every `sync` * st_blksize bytes.
* now that we use large io_blocksize, adjust syncRate accordingly. */
if( syncRate > 0 )
{
unsigned long long bytes_between_sync =
(unsigned long long) syncRate * (unsigned long long) c->device_stat.st_blksize;
if( bytes_between_sync > 0 && io_blocksize > 0 )
{
syncRate = (int) ( bytes_between_sync / io_blocksize );
if( syncRate < 1 )
{
syncRate = 1;
}
}
}
/* Compute the per-write sync rate based on io_blocksize and old semantics. */
syncRate = nwipe_compute_sync_rate_for_device( c, io_blocksize );
int i = 0;
int idx;
@@ -812,6 +840,9 @@ int nwipe_static_pass( NWIPE_METHOD_SIGNATURE, nwipe_pattern_t* pattern )
io_blocksize = nwipe_effective_io_blocksize( c );
/* Compute per-write sync rate (same semantics as random pass). */
syncRate = nwipe_compute_sync_rate_for_device( c, io_blocksize );
/*
* For static patterns we want enough buffer space to always have a
* contiguous window of "io_blocksize" bytes available starting at any