mirror of
https://github.com/torvalds/linux.git
synced 2026-03-07 23:04:33 +01:00
netfs: Fix unbuffered/DIO writes to dispatch subrequests in strict sequence
Fix netfslib such that when it's making an unbuffered or DIO write, to make
sure that it sends each subrequest strictly sequentially, waiting till the
previous one is 'committed' before sending the next so that we don't have
pieces landing out of order and potentially leaving a hole if an error
occurs (ENOSPC for example).
This is done by copying in just those bits of issuing, collecting and
retrying subrequests that are necessary to do one subrequest at a time.
Retrying, in particular, is simpler because if the current subrequest needs
retrying, the source iterator can just be copied again and the subrequest
prepped and issued again without needing to be concerned about whether it
needs merging with the previous or next in the sequence.
Note that the issuing loop waits for a subrequest to complete right after
issuing it, but this wait could be moved elsewhere allowing preparatory
steps to be performed whilst the subrequest is in progress. In particular,
once content encryption is available in netfslib, that could be done whilst
waiting, as could cleanup of buffers that have been completed.
Fixes: 153a9961b5 ("netfs: Implement unbuffered/DIO write support")
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://patch.msgid.link/58526.1772112753@warthog.procyon.org.uk
Tested-by: Steve French <sfrench@samba.org>
Reviewed-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
28aaa9c399
commit
a0b4c7a491
5 changed files with 221 additions and 77 deletions
|
|
@ -9,6 +9,202 @@
|
|||
#include <linux/uio.h>
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* Perform the cleanup rituals after an unbuffered write is complete.
|
||||
*/
|
||||
static void netfs_unbuffered_write_done(struct netfs_io_request *wreq)
|
||||
{
|
||||
struct netfs_inode *ictx = netfs_inode(wreq->inode);
|
||||
|
||||
_enter("R=%x", wreq->debug_id);
|
||||
|
||||
/* Okay, declare that all I/O is complete. */
|
||||
trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
|
||||
|
||||
if (!wreq->error)
|
||||
netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
|
||||
|
||||
if (wreq->origin == NETFS_DIO_WRITE &&
|
||||
wreq->mapping->nrpages) {
|
||||
/* mmap may have got underfoot and we may now have folios
|
||||
* locally covering the region we just wrote. Attempt to
|
||||
* discard the folios, but leave in place any modified locally.
|
||||
* ->write_iter() is prevented from interfering by the DIO
|
||||
* counter.
|
||||
*/
|
||||
pgoff_t first = wreq->start >> PAGE_SHIFT;
|
||||
pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
|
||||
|
||||
invalidate_inode_pages2_range(wreq->mapping, first, last);
|
||||
}
|
||||
|
||||
if (wreq->origin == NETFS_DIO_WRITE)
|
||||
inode_dio_end(wreq->inode);
|
||||
|
||||
_debug("finished");
|
||||
netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
|
||||
/* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
|
||||
|
||||
if (wreq->iocb) {
|
||||
size_t written = umin(wreq->transferred, wreq->len);
|
||||
|
||||
wreq->iocb->ki_pos += written;
|
||||
if (wreq->iocb->ki_complete) {
|
||||
trace_netfs_rreq(wreq, netfs_rreq_trace_ki_complete);
|
||||
wreq->iocb->ki_complete(wreq->iocb, wreq->error ?: written);
|
||||
}
|
||||
wreq->iocb = VFS_PTR_POISON;
|
||||
}
|
||||
|
||||
netfs_clear_subrequests(wreq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Collect the subrequest results of unbuffered write subrequests.
|
||||
*/
|
||||
static void netfs_unbuffered_write_collect(struct netfs_io_request *wreq,
|
||||
struct netfs_io_stream *stream,
|
||||
struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
trace_netfs_collect_sreq(wreq, subreq);
|
||||
|
||||
spin_lock(&wreq->lock);
|
||||
list_del_init(&subreq->rreq_link);
|
||||
spin_unlock(&wreq->lock);
|
||||
|
||||
wreq->transferred += subreq->transferred;
|
||||
iov_iter_advance(&wreq->buffer.iter, subreq->transferred);
|
||||
|
||||
stream->collected_to = subreq->start + subreq->transferred;
|
||||
wreq->collected_to = stream->collected_to;
|
||||
netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
|
||||
|
||||
trace_netfs_collect_stream(wreq, stream);
|
||||
trace_netfs_collect_state(wreq, wreq->collected_to, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write data to the server without going through the pagecache and without
|
||||
* writing it to the local cache. We dispatch the subrequests serially and
|
||||
* wait for each to complete before dispatching the next, lest we leave a gap
|
||||
* in the data written due to a failure such as ENOSPC. We could, however
|
||||
* attempt to do preparation such as content encryption for the next subreq
|
||||
* whilst the current is in progress.
|
||||
*/
|
||||
static int netfs_unbuffered_write(struct netfs_io_request *wreq)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq = NULL;
|
||||
struct netfs_io_stream *stream = &wreq->io_streams[0];
|
||||
int ret;
|
||||
|
||||
_enter("%llx", wreq->len);
|
||||
|
||||
if (wreq->origin == NETFS_DIO_WRITE)
|
||||
inode_dio_begin(wreq->inode);
|
||||
|
||||
stream->collected_to = wreq->start;
|
||||
|
||||
for (;;) {
|
||||
bool retry = false;
|
||||
|
||||
if (!subreq) {
|
||||
netfs_prepare_write(wreq, stream, wreq->start + wreq->transferred);
|
||||
subreq = stream->construct;
|
||||
stream->construct = NULL;
|
||||
stream->front = NULL;
|
||||
}
|
||||
|
||||
/* Check if (re-)preparation failed. */
|
||||
if (unlikely(test_bit(NETFS_SREQ_FAILED, &subreq->flags))) {
|
||||
netfs_write_subrequest_terminated(subreq, subreq->error);
|
||||
wreq->error = subreq->error;
|
||||
break;
|
||||
}
|
||||
|
||||
iov_iter_truncate(&subreq->io_iter, wreq->len - wreq->transferred);
|
||||
if (!iov_iter_count(&subreq->io_iter))
|
||||
break;
|
||||
|
||||
subreq->len = netfs_limit_iter(&subreq->io_iter, 0,
|
||||
stream->sreq_max_len,
|
||||
stream->sreq_max_segs);
|
||||
iov_iter_truncate(&subreq->io_iter, subreq->len);
|
||||
stream->submit_extendable_to = subreq->len;
|
||||
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
|
||||
stream->issue_write(subreq);
|
||||
|
||||
/* Async, need to wait. */
|
||||
netfs_wait_for_in_progress_stream(wreq, stream);
|
||||
|
||||
if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
|
||||
retry = true;
|
||||
} else if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) {
|
||||
ret = subreq->error;
|
||||
wreq->error = ret;
|
||||
netfs_see_subrequest(subreq, netfs_sreq_trace_see_failed);
|
||||
subreq = NULL;
|
||||
break;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
if (!retry) {
|
||||
netfs_unbuffered_write_collect(wreq, stream, subreq);
|
||||
subreq = NULL;
|
||||
if (wreq->transferred >= wreq->len)
|
||||
break;
|
||||
if (!wreq->iocb && signal_pending(current)) {
|
||||
ret = wreq->transferred ? -EINTR : -ERESTARTSYS;
|
||||
trace_netfs_rreq(wreq, netfs_rreq_trace_intr);
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* We need to retry the last subrequest, so first reset the
|
||||
* iterator, taking into account what, if anything, we managed
|
||||
* to transfer.
|
||||
*/
|
||||
subreq->error = -EAGAIN;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
if (subreq->transferred > 0)
|
||||
iov_iter_advance(&wreq->buffer.iter, subreq->transferred);
|
||||
|
||||
if (stream->source == NETFS_UPLOAD_TO_SERVER &&
|
||||
wreq->netfs_ops->retry_request)
|
||||
wreq->netfs_ops->retry_request(wreq, stream);
|
||||
|
||||
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
|
||||
__clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
|
||||
__clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
|
||||
subreq->io_iter = wreq->buffer.iter;
|
||||
subreq->start = wreq->start + wreq->transferred;
|
||||
subreq->len = wreq->len - wreq->transferred;
|
||||
subreq->transferred = 0;
|
||||
subreq->retry_count += 1;
|
||||
stream->sreq_max_len = UINT_MAX;
|
||||
stream->sreq_max_segs = INT_MAX;
|
||||
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
stream->prepare_write(subreq);
|
||||
|
||||
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
|
||||
netfs_stat(&netfs_n_wh_retry_write_subreq);
|
||||
}
|
||||
|
||||
netfs_unbuffered_write_done(wreq);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void netfs_unbuffered_write_async(struct work_struct *work)
|
||||
{
|
||||
struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
|
||||
|
||||
netfs_unbuffered_write(wreq);
|
||||
netfs_put_request(wreq, netfs_rreq_trace_put_complete);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform an unbuffered write where we may have to do an RMW operation on an
|
||||
* encrypted file. This can also be used for direct I/O writes.
|
||||
|
|
@ -70,35 +266,35 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
|
|||
*/
|
||||
wreq->buffer.iter = *iter;
|
||||
}
|
||||
|
||||
wreq->len = iov_iter_count(&wreq->buffer.iter);
|
||||
}
|
||||
|
||||
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
|
||||
if (async)
|
||||
__set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
|
||||
|
||||
/* Copy the data into the bounce buffer and encrypt it. */
|
||||
// TODO
|
||||
|
||||
/* Dispatch the write. */
|
||||
__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
|
||||
if (async)
|
||||
|
||||
if (async) {
|
||||
INIT_WORK(&wreq->work, netfs_unbuffered_write_async);
|
||||
wreq->iocb = iocb;
|
||||
wreq->len = iov_iter_count(&wreq->buffer.iter);
|
||||
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
|
||||
if (ret < 0) {
|
||||
_debug("begin = %zd", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!async) {
|
||||
ret = netfs_wait_for_write(wreq);
|
||||
if (ret > 0)
|
||||
iocb->ki_pos += ret;
|
||||
} else {
|
||||
queue_work(system_dfl_wq, &wreq->work);
|
||||
ret = -EIOCBQUEUED;
|
||||
} else {
|
||||
ret = netfs_unbuffered_write(wreq);
|
||||
if (ret < 0) {
|
||||
_debug("begin = %zd", ret);
|
||||
} else {
|
||||
iocb->ki_pos += wreq->transferred;
|
||||
ret = wreq->transferred ?: wreq->error;
|
||||
}
|
||||
|
||||
netfs_put_request(wreq, netfs_rreq_trace_put_complete);
|
||||
}
|
||||
|
||||
out:
|
||||
netfs_put_request(wreq, netfs_rreq_trace_put_return);
|
||||
return ret;
|
||||
|
||||
|
|
|
|||
|
|
@ -198,6 +198,9 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
|
|||
struct file *file,
|
||||
loff_t start,
|
||||
enum netfs_io_origin origin);
|
||||
void netfs_prepare_write(struct netfs_io_request *wreq,
|
||||
struct netfs_io_stream *stream,
|
||||
loff_t start);
|
||||
void netfs_reissue_write(struct netfs_io_stream *stream,
|
||||
struct netfs_io_subrequest *subreq,
|
||||
struct iov_iter *source);
|
||||
|
|
@ -212,7 +215,6 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
|
|||
struct folio **writethrough_cache);
|
||||
ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
|
||||
struct folio *writethrough_cache);
|
||||
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
|
||||
|
||||
/*
|
||||
* write_retry.c
|
||||
|
|
|
|||
|
|
@ -399,27 +399,6 @@ bool netfs_write_collection(struct netfs_io_request *wreq)
|
|||
ictx->ops->invalidate_cache(wreq);
|
||||
}
|
||||
|
||||
if ((wreq->origin == NETFS_UNBUFFERED_WRITE ||
|
||||
wreq->origin == NETFS_DIO_WRITE) &&
|
||||
!wreq->error)
|
||||
netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
|
||||
|
||||
if (wreq->origin == NETFS_DIO_WRITE &&
|
||||
wreq->mapping->nrpages) {
|
||||
/* mmap may have got underfoot and we may now have folios
|
||||
* locally covering the region we just wrote. Attempt to
|
||||
* discard the folios, but leave in place any modified locally.
|
||||
* ->write_iter() is prevented from interfering by the DIO
|
||||
* counter.
|
||||
*/
|
||||
pgoff_t first = wreq->start >> PAGE_SHIFT;
|
||||
pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
|
||||
invalidate_inode_pages2_range(wreq->mapping, first, last);
|
||||
}
|
||||
|
||||
if (wreq->origin == NETFS_DIO_WRITE)
|
||||
inode_dio_end(wreq->inode);
|
||||
|
||||
_debug("finished");
|
||||
netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
|
||||
/* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
|
||||
|
|
|
|||
|
|
@ -154,9 +154,9 @@ EXPORT_SYMBOL(netfs_prepare_write_failed);
|
|||
* Prepare a write subrequest. We need to allocate a new subrequest
|
||||
* if we don't have one.
|
||||
*/
|
||||
static void netfs_prepare_write(struct netfs_io_request *wreq,
|
||||
struct netfs_io_stream *stream,
|
||||
loff_t start)
|
||||
void netfs_prepare_write(struct netfs_io_request *wreq,
|
||||
struct netfs_io_stream *stream,
|
||||
loff_t start)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq;
|
||||
struct iov_iter *wreq_iter = &wreq->buffer.iter;
|
||||
|
|
@ -698,41 +698,6 @@ ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_c
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write data to the server without going through the pagecache and without
|
||||
* writing it to the local cache.
|
||||
*/
|
||||
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len)
|
||||
{
|
||||
struct netfs_io_stream *upload = &wreq->io_streams[0];
|
||||
ssize_t part;
|
||||
loff_t start = wreq->start;
|
||||
int error = 0;
|
||||
|
||||
_enter("%zx", len);
|
||||
|
||||
if (wreq->origin == NETFS_DIO_WRITE)
|
||||
inode_dio_begin(wreq->inode);
|
||||
|
||||
while (len) {
|
||||
// TODO: Prepare content encryption
|
||||
|
||||
_debug("unbuffered %zx", len);
|
||||
part = netfs_advance_write(wreq, upload, start, len, false);
|
||||
start += part;
|
||||
len -= part;
|
||||
rolling_buffer_advance(&wreq->buffer, part);
|
||||
if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags))
|
||||
netfs_wait_for_paused_write(wreq);
|
||||
if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
|
||||
break;
|
||||
}
|
||||
|
||||
netfs_end_issue_write(wreq);
|
||||
_leave(" = %d", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write some of a pending folio data back to the server and/or the cache.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -57,6 +57,7 @@
|
|||
EM(netfs_rreq_trace_done, "DONE ") \
|
||||
EM(netfs_rreq_trace_end_copy_to_cache, "END-C2C") \
|
||||
EM(netfs_rreq_trace_free, "FREE ") \
|
||||
EM(netfs_rreq_trace_intr, "INTR ") \
|
||||
EM(netfs_rreq_trace_ki_complete, "KI-CMPL") \
|
||||
EM(netfs_rreq_trace_recollect, "RECLLCT") \
|
||||
EM(netfs_rreq_trace_redirty, "REDIRTY") \
|
||||
|
|
@ -169,7 +170,8 @@
|
|||
EM(netfs_sreq_trace_put_oom, "PUT OOM ") \
|
||||
EM(netfs_sreq_trace_put_wip, "PUT WIP ") \
|
||||
EM(netfs_sreq_trace_put_work, "PUT WORK ") \
|
||||
E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
|
||||
EM(netfs_sreq_trace_put_terminated, "PUT TERM ") \
|
||||
E_(netfs_sreq_trace_see_failed, "SEE FAILED ")
|
||||
|
||||
#define netfs_folio_traces \
|
||||
EM(netfs_folio_is_uptodate, "mod-uptodate") \
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue