From: Ben Kaduk Date: Fri, 22 Mar 2013 17:51:02 +0000 (-0400) Subject: Catch up to FreeBSD VM object read/write locks X-Git-Tag: upstream/1.6.6_pre2^2~29 X-Git-Url: https://git.michaelhowe.org/gitweb/?a=commitdiff_plain;h=4dad6373c808e67d87faf14e2d011d58979dc737;p=packages%2Fo%2Fopenafs.git Catch up to FreeBSD VM object read/write locks Upstream r248084 changed the vm_object mutex to be a rwlock, allowing for future optimizations. This is a KPI change, so introduce conditionals to be compatible with both versions of the KPI. Reviewed-on: http://gerrit.openafs.org/10295 Tested-by: BuildBot Reviewed-by: Derrick Brashear (cherry picked from commit 897e970dbe09d163479719b4c9befa660d99874b) Change-Id: Ieeb30f3ad9accab37117109ccf82952812157aab Reviewed-on: http://gerrit.openafs.org/10378 Tested-by: BuildBot Reviewed-by: Andrew Deason Reviewed-by: Benjamin Kaduk Reviewed-by: Stephan Wiesand --- diff --git a/src/afs/FBSD/osi_vm.c b/src/afs/FBSD/osi_vm.c index 072142d79..8f4c4b5be 100644 --- a/src/afs/FBSD/osi_vm.c +++ b/src/afs/FBSD/osi_vm.c @@ -30,11 +30,14 @@ #include #include #include +#if __FreeBSD_version >= 1000030 +#include +#endif /* * FreeBSD implementation notes: * Most of these operations require us to frob vm_objects. Most - * functions require that the object be locked (with VM_OBJECT_LOCK) + * functions require that the object be locked (with VM_OBJECT_*LOCK) * on entry and leave it locked on exit. The locking protocol * requires that we access vp->v_object with the heavy vnode lock * held and the vnode interlock unlocked. @@ -57,6 +60,14 @@ #define islocked_vnode(v) VOP_ISLOCKED((v), curthread) #endif +#if __FreeBSD_version >= 1000030 +#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o) +#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o) +#else +#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_LOCK(o) +#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o) +#endif + /* Try to discard pages, in order to recycle a vcache entry. * * We also make some sanity checks: ref count, open count, held locks. @@ -154,9 +165,9 @@ osi_VM_StoreAllSegments(struct vcache *avc) if (!vget(vp, LK_EXCLUSIVE | LK_RETRY, curthread)) { obj = vp->v_object; if (obj != NULL) { - VM_OBJECT_LOCK(obj); + AFS_VM_OBJECT_WLOCK(obj); vm_object_page_clean(obj, 0, 0, OBJPC_SYNC); - VM_OBJECT_UNLOCK(obj); + AFS_VM_OBJECT_WUNLOCK(obj); anyio = 1; } vput(vp); @@ -202,7 +213,7 @@ osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync) lock_vnode(vp, LK_EXCLUSIVE); if (vp->v_bufobj.bo_object != NULL) { - VM_OBJECT_LOCK(vp->v_bufobj.bo_object); + AFS_VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); /* * Do we really want OBJPC_SYNC? OBJPC_INVAL would be * faster, if invalidation is really what we are being @@ -218,7 +229,7 @@ osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync) */ vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); - VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object); + AFS_VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); } tries = 5; @@ -248,9 +259,9 @@ osi_VM_FlushPages(struct vcache *avc, afs_ucred_t *credp) ASSERT_VOP_LOCKED(vp, __func__); obj = vp->v_object; if (obj != NULL) { - VM_OBJECT_LOCK(obj); + AFS_VM_OBJECT_WLOCK(obj); vm_object_page_remove(obj, 0, 0, FALSE); - VM_OBJECT_UNLOCK(obj); + AFS_VM_OBJECT_WUNLOCK(obj); } osi_vinvalbuf(vp, 0, 0, 0); } diff --git a/src/afs/FBSD/osi_vnodeops.c b/src/afs/FBSD/osi_vnodeops.c index adc7fc632..32c56647b 100644 --- a/src/afs/FBSD/osi_vnodeops.c +++ b/src/afs/FBSD/osi_vnodeops.c @@ -57,6 +57,9 @@ #include #include #include +#if __FreeBSD_version >= 1000030 +#include +#endif #include extern int afs_pbuf_freecnt; @@ -266,6 +269,14 @@ static __inline void ma_vm_page_unlock(vm_page_t m) {}; #define MA_PCPU_ADD(c, n) (c) += (n) #endif +#if __FreeBSD_version >= 1000030 +#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o) +#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o) +#else +#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_LOCK(o) +#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o) +#endif + #ifdef AFS_FBSD70_ENV #ifndef AFS_FBSD80_ENV /* From kern_lock.c */ @@ -806,7 +817,7 @@ afs_vop_getpages(struct vop_getpages_args *ap) { vm_page_t m = ap->a_m[ap->a_reqpage]; - VM_OBJECT_LOCK(object); + AFS_VM_OBJECT_WLOCK(object); ma_vm_page_lock_queues(); if (m->valid != 0) { /* handled by vm_fault now */ @@ -819,11 +830,11 @@ afs_vop_getpages(struct vop_getpages_args *ap) } } ma_vm_page_unlock_queues(); - VM_OBJECT_UNLOCK(object); + AFS_VM_OBJECT_WUNLOCK(object); return (0); } ma_vm_page_unlock_queues(); - VM_OBJECT_UNLOCK(object); + AFS_VM_OBJECT_WUNLOCK(object); } bp = getpbuf(&afs_pbuf_freecnt); @@ -851,19 +862,19 @@ afs_vop_getpages(struct vop_getpages_args *ap) relpbuf(bp, &afs_pbuf_freecnt); if (code && (uio.uio_resid == ap->a_count)) { - VM_OBJECT_LOCK(object); + AFS_VM_OBJECT_WLOCK(object); ma_vm_page_lock_queues(); for (i = 0; i < npages; ++i) { if (i != ap->a_reqpage) vm_page_free(ap->a_m[i]); } ma_vm_page_unlock_queues(); - VM_OBJECT_UNLOCK(object); + AFS_VM_OBJECT_WUNLOCK(object); return VM_PAGER_ERROR; } size = ap->a_count - uio.uio_resid; - VM_OBJECT_LOCK(object); + AFS_VM_OBJECT_WLOCK(object); ma_vm_page_lock_queues(); for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { vm_page_t m; @@ -929,7 +940,7 @@ afs_vop_getpages(struct vop_getpages_args *ap) } } ma_vm_page_unlock_queues(); - VM_OBJECT_UNLOCK(object); + AFS_VM_OBJECT_WUNLOCK(object); return 0; }