The CPageWrite flag was originally added to prevent a scenario
where a thread doing "writepage" would realize that the cache
was too full and that some of its contents need to be written
back to the server. Before writing back it would ask the OS to
flush any dirty VM associated with the vcache entries that are
to be written, to make sure the data is not stale. This flush
could itself trigger writeback, leading to deadly recursion.
One such scenario is a process doing mmap writes to a file larger
than the cache.
With some kernel versions and some callers of writepage, this
can cause the mapping to be marked as being in an error state,
leading to EIO errors passed back to user space.
Make the recursion check more specific to only bail when the
calling thread is one that was originally seen writing. A list
of current writers is maintained instead of a single state flag.
This lets other threads (like the flusher thread) go on with
writeback to the same file, and limits the WRITEPAGE_ACTIVATE
return case to call sites that can deal with it.
In testing this helps avoid EIO errors when writing large
chunks of data through mmap.
Thanks to Yadav Yadavendra for extensive analysis and testing.
Reviewed-on: http://gerrit.openafs.org/11124
Reviewed-by: Daria Brashear <shadow@your-file-system.com>
Tested-by: BuildBot <buildbot@rampaginggeek.com>
(cherry picked from commit
95b857399d71cb1f6619e625bff256f8c4c72c6a)
Change-Id: I08ced97c4f58f95375fda2ed9c707cdf7657e493
Reviewed-on: http://gerrit.openafs.org/11877
Reviewed-by: Benjamin Kaduk <kaduk@mit.edu>
Tested-by: BuildBot <buildbot@rampaginggeek.com>
Reviewed-by: Stephan Wiesand <stephan.wiesand@desy.de>
tvc->v = ip;
#endif
+ INIT_LIST_HEAD(&tvc->pagewriters);
+ spin_lock_init(&tvc->pagewriter_lock);
+
return tvc;
}
{
struct inode *ip = AFSTOV(avc);
- if (avc->f.states & CPageWrite)
+ if (!list_empty(&avc->pagewriters))
return; /* someone already writing */
/* filemap_fdatasync() only exported in 2.4.5 and above */
* locked */
static inline int
afs_linux_prepare_writeback(struct vcache *avc) {
- if (avc->f.states & CPageWrite) {
- return AOP_WRITEPAGE_ACTIVATE;
+ pid_t pid;
+ struct pagewriter *pw;
+
+ pid = MyPidxx2Pid(MyPidxx);
+ /* Prevent recursion into the writeback code */
+ spin_lock(&avc->pagewriter_lock);
+ list_for_each_entry(pw, &avc->pagewriters, link) {
+ if (pw->writer == pid) {
+ spin_unlock(&avc->pagewriter_lock);
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
}
- avc->f.states |= CPageWrite;
+ spin_unlock(&avc->pagewriter_lock);
+
+ /* Add ourselves to writer list */
+ pw = osi_Alloc(sizeof(struct pagewriter));
+ pw->writer = pid;
+ spin_lock(&avc->pagewriter_lock);
+ list_add_tail(&pw->link, &avc->pagewriters);
+ spin_unlock(&avc->pagewriter_lock);
+
return 0;
}
static inline void
afs_linux_complete_writeback(struct vcache *avc) {
- avc->f.states &= ~CPageWrite;
+ struct pagewriter *pw, *store;
+ pid_t pid;
+ struct list_head tofree;
+
+ INIT_LIST_HEAD(&tofree);
+ pid = MyPidxx2Pid(MyPidxx);
+ /* Remove ourselves from writer list */
+ spin_lock(&avc->pagewriter_lock);
+ list_for_each_entry_safe(pw, store, &avc->pagewriters, link) {
+ if (pw->writer == pid) {
+ list_del(&pw->link);
+ /* osi_Free may sleep so we need to defer it */
+ list_add_tail(&pw->link, &tofree);
+ }
+ }
+ spin_unlock(&avc->pagewriter_lock);
+ list_for_each_entry_safe(pw, store, &tofree, link) {
+ list_del(&pw->link);
+ osi_Free(pw, sizeof(struct pagewriter));
+ }
}
/* Writeback a given page syncronously. Called with no AFS locks held */
#define CBulkStat 0x00020000 /* loaded by a bulk stat, and not ref'd since */
#define CUnlinkedDel 0x00040000
#define CVFlushed 0x00080000
-#ifdef AFS_LINUX22_ENV
-#define CPageWrite 0x00200000 /* to detect vm deadlock - linux */
-#elif defined(AFS_SGI_ENV)
+#if defined(AFS_SGI_ENV)
#define CWritingUFS 0x00200000 /* to detect vm deadlock - used by sgi */
#elif defined(AFS_DARWIN80_ENV)
#define CEvent 0x00200000 /* to preclude deadlock when sending events */
struct afs_q multiPage; /* list of multiPage_range structs */
#endif
afs_uint32 lastBRLWarnTime; /* last time we warned about byte-range locks */
+#ifdef AFS_LINUX26_ENV
+ spinlock_t pagewriter_lock;
+ struct list_head pagewriters; /* threads that are writing vm pages */
+#endif
};
+#ifdef AFS_LINUX26_ENV
+struct pagewriter {
+ struct list_head link;
+ pid_t writer;
+};
+#endif
+
#define DONT_CHECK_MODE_BITS 0
#define CHECK_MODE_BITS 1
#define CMB_ALLOW_EXEC_AS_READ 2 /* For the NFS xlator */
/* remove entry from the volume hash table */
QRemove(&avc->vhashq);
+#if defined(AFS_LINUX26_ENV)
+ {
+ struct pagewriter *pw, *store;
+ struct list_head tofree;
+
+ INIT_LIST_HEAD(&tofree);
+ spin_lock(&avc->pagewriter_lock);
+ list_for_each_entry_safe(pw, store, &avc->pagewriters, link) {
+ list_del(&pw->link);
+ /* afs_osi_Free may sleep so we need to defer it */
+ list_add_tail(&pw->link, &tofree);
+ }
+ spin_unlock(&avc->pagewriter_lock);
+ list_for_each_entry_safe(pw, store, &tofree, link) {
+ list_del(&pw->link);
+ afs_osi_Free(pw, sizeof(struct pagewriter));
+ }
+ }
+#endif
+
if (avc->mvid)
osi_FreeSmallSpace(avc->mvid);
avc->mvid = (struct VenusFid *)0;