--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+#include "../afs/param.h" /* Should be always first */
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+#include "../afs/afs_stats.h" /* afs statistics */
+#include "../afs/osi_inode.h"
+
+
+int afs_osicred_initialized=0;
+struct AFS_UCRED afs_osi_cred;
+afs_lock_t afs_xosi; /* lock is for tvattr */
+extern struct osi_dev cacheDev;
+extern struct mount *afs_cacheVfsp;
+int afs_CacheFSType = -1;
+
+/* Initialize the cache operations. Called while initializing cache files. */
+void afs_InitDualFSCacheOps(struct vnode *vp)
+{
+ int code;
+ static int inited = 0;
+
+ if (inited)
+ return;
+ inited = 1;
+
+ if (vp == NULL)
+ return;
+ if (strncmp("hfs", vp->v_mount->mnt_vfc->vfc_name, 3) == 0)
+ afs_CacheFSType = AFS_APPL_HFS_CACHE;
+ else
+ if (strncmp("ufs", vp->v_mount->mnt_vfc->vfc_name, 3) == 0)
+ afs_CacheFSType = AFS_APPL_UFS_CACHE;
+ else
+ osi_Panic("Unknown cache vnode type\n");
+}
+
+ino_t VnodeToIno(vnode_t *avp)
+{
+ unsigned long ret;
+
+ if (afs_CacheFSType == AFS_APPL_UFS_CACHE) {
+ struct inode *ip = VTOI(avp);
+ ret=ip->i_number;
+ } else if (afs_CacheFSType == AFS_APPL_HFS_CACHE) {
+#ifndef VTOH
+ struct vattr va;
+ if (VOP_GETATTR(avp, &va, &afs_osi_cred, current_proc()))
+ osi_Panic("VOP_GETATTR failed in VnodeToIno\n");
+ ret=va.va_fileid;
+#else
+ struct hfsnode *hp = VTOH(avp);
+ ret=H_FILEID(hp);
+#endif
+ } else
+ osi_Panic("VnodeToIno called before cacheops initialized\n");
+ return ret;
+}
+
+
+dev_t VnodeToDev(vnode_t *avp)
+{
+
+
+ if (afs_CacheFSType == AFS_APPL_UFS_CACHE) {
+ struct inode *ip = VTOI(avp);
+ return ip->i_dev;
+ } else
+ if (afs_CacheFSType == AFS_APPL_HFS_CACHE) {
+#ifndef VTOH /* slow, but works */
+ struct vattr va;
+ if (VOP_GETATTR(avp, &va, &afs_osi_cred, current_proc()))
+ osi_Panic("VOP_GETATTR failed in VnodeToDev\n");
+ return va.va_fsid; /* XXX they say it's the dev.... */
+#else
+ struct hfsnode *hp = VTOH(avp);
+ return H_DEV(hp);
+#endif
+ } else
+ osi_Panic("VnodeToDev called before cacheops initialized\n");
+}
+
+void *osi_UFSOpen(ainode)
+ afs_int32 ainode;
+{
+ struct vnode *vp;
+ struct vattr va;
+ register struct osi_file *afile = NULL;
+ extern int cacheDiskType;
+ afs_int32 code = 0;
+ int dummy;
+ AFS_STATCNT(osi_UFSOpen);
+ if(cacheDiskType != AFS_FCACHE_TYPE_UFS) {
+ osi_Panic("UFSOpen called for non-UFS cache\n");
+ }
+ if (!afs_osicred_initialized) {
+ /* valid for alpha_osf, SunOS, Ultrix */
+ bzero((char *)&afs_osi_cred, sizeof(struct AFS_UCRED));
+ afs_osi_cred.cr_ref++;
+ afs_osi_cred.cr_ngroups=1;
+ afs_osicred_initialized = 1;
+ }
+ afile = (struct osi_file *) osi_AllocSmallSpace(sizeof(struct osi_file));
+ AFS_GUNLOCK();
+ if (afs_CacheFSType == AFS_APPL_HFS_CACHE)
+ code = igetinode(afs_cacheVfsp, (dev_t) cacheDev.dev, &ainode, &vp, &va, &dummy); /* XXX hfs is broken */
+ else
+ if (afs_CacheFSType == AFS_APPL_UFS_CACHE)
+ code = igetinode(afs_cacheVfsp, (dev_t) cacheDev.dev, (ino_t)ainode, &vp, &va, &dummy);
+ else
+ panic("osi_UFSOpen called before cacheops initialized\n");
+ AFS_GLOCK();
+ if (code) {
+ osi_FreeSmallSpace(afile);
+ osi_Panic("UFSOpen: igetinode failed");
+ }
+ afile->vnode = vp;
+ afile->size = va.va_size;
+ afile->offset = 0;
+ afile->proc = (int (*)()) 0;
+ afile->inum = ainode; /* for hint validity checking */
+ return (void *)afile;
+}
+
+afs_osi_Stat(afile, astat)
+ register struct osi_file *afile;
+ register struct osi_stat *astat; {
+ register afs_int32 code;
+ struct vattr tvattr;
+ AFS_STATCNT(osi_Stat);
+ MObtainWriteLock(&afs_xosi,320);
+ AFS_GUNLOCK();
+ code=VOP_GETATTR(afile->vnode, &tvattr, &afs_osi_cred, current_proc());
+ AFS_GLOCK();
+ if (code == 0) {
+ astat->size = tvattr.va_size;
+ astat->blksize = tvattr.va_blocksize;
+ astat->mtime = tvattr.va_mtime.tv_sec;
+ astat->atime = tvattr.va_atime.tv_sec;
+ }
+ MReleaseWriteLock(&afs_xosi);
+ return code;
+}
+
+osi_UFSClose(afile)
+ register struct osi_file *afile;
+ {
+ AFS_STATCNT(osi_Close);
+ if(afile->vnode) {
+ AFS_RELE(afile->vnode);
+ }
+
+ osi_FreeSmallSpace(afile);
+ return 0;
+ }
+
+osi_UFSTruncate(afile, asize)
+ register struct osi_file *afile;
+ afs_int32 asize; {
+ struct AFS_UCRED *oldCred;
+ struct vattr tvattr;
+ register afs_int32 code;
+ struct osi_stat tstat;
+ AFS_STATCNT(osi_Truncate);
+
+ /* This routine only shrinks files, and most systems
+ * have very slow truncates, even when the file is already
+ * small enough. Check now and save some time.
+ */
+ code = afs_osi_Stat(afile, &tstat);
+ if (code || tstat.size <= asize) return code;
+ MObtainWriteLock(&afs_xosi,321);
+ VATTR_NULL(&tvattr);
+ tvattr.va_size = asize;
+ AFS_GUNLOCK();
+ code=VOP_SETATTR(afile->vnode, &tvattr, &afs_osi_cred, current_proc());
+ AFS_GLOCK();
+ MReleaseWriteLock(&afs_xosi);
+ return code;
+}
+
+void osi_DisableAtimes(avp)
+struct vnode *avp;
+{
+
+
+ if (afs_CacheFSType == AFS_APPL_UFS_CACHE) {
+ struct inode *ip = VTOI(avp);
+ ip->i_flag &= ~IN_ACCESS;
+ }
+#ifdef VTOH /* can't do this without internals */
+ else if (afs_CacheFSType == AFS_APPL_HFS_CACHE) {
+ struct hfsnode *hp = VTOH(avp);
+ hp->h_nodeflags &= ~IN_ACCESS;
+ }
+#endif
+}
+
+
+/* Generic read interface */
+afs_osi_Read(afile, offset, aptr, asize)
+ register struct osi_file *afile;
+ int offset;
+ char *aptr;
+ afs_int32 asize; {
+ struct AFS_UCRED *oldCred;
+ unsigned int resid;
+ register afs_int32 code;
+ AFS_STATCNT(osi_Read);
+
+ /**
+ * If the osi_file passed in is NULL, panic only if AFS is not shutting
+ * down. No point in crashing when we are already shutting down
+ */
+ if ( !afile ) {
+ if ( !afs_shuttingdown )
+ osi_Panic("osi_Read called with null param");
+ else
+ return EIO;
+ }
+
+ if (offset != -1) afile->offset = offset;
+ AFS_GUNLOCK();
+ code = gop_rdwr(UIO_READ, afile->vnode, (caddr_t) aptr, asize, afile->offset,
+ AFS_UIOSYS, IO_UNIT, &afs_osi_cred, &resid);
+ AFS_GLOCK();
+ if (code == 0) {
+ code = asize - resid;
+ afile->offset += code;
+ osi_DisableAtimes(afile->vnode);
+ }
+ else {
+ afs_Trace2(afs_iclSetp, CM_TRACE_READFAILED, ICL_TYPE_INT32, resid,
+ ICL_TYPE_INT32, code);
+ code = -1;
+ }
+ return code;
+}
+
+/* Generic write interface */
+afs_osi_Write(afile, offset, aptr, asize)
+ register struct osi_file *afile;
+ char *aptr;
+ afs_int32 offset;
+ afs_int32 asize; {
+ struct AFS_UCRED *oldCred;
+ unsigned int resid;
+ register afs_int32 code;
+ AFS_STATCNT(osi_Write);
+ if ( !afile )
+ osi_Panic("afs_osi_Write called with null param");
+ if (offset != -1) afile->offset = offset;
+ {
+ AFS_GUNLOCK();
+ code = gop_rdwr(UIO_WRITE, afile->vnode, (caddr_t) aptr, asize, afile->offset,
+ AFS_UIOSYS, IO_UNIT, &afs_osi_cred, &resid);
+ AFS_GLOCK();
+ }
+ if (code == 0) {
+ code = asize - resid;
+ afile->offset += code;
+ }
+ else {
+ code = -1;
+ }
+ if (afile->proc) {
+ (*afile->proc)(afile, code);
+ }
+ return code;
+}
+
+
+
+
+
+void
+shutdown_osifile()
+{
+ extern int afs_cold_shutdown;
+
+ AFS_STATCNT(shutdown_osifile);
+ if (afs_cold_shutdown) {
+ afs_osicred_initialized = 0;
+ }
+}
+
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+/*
+ * osi_groups.c
+ *
+ * Implements:
+ * Afs_xsetgroups (syscall)
+ * setpag
+ *
+ */
+#include "../afs/param.h"
+#include "../afs/sysincludes.h"
+#include "../afs/afsincludes.h"
+#include "../afs/afs_stats.h" /* statistics */
+
+static int
+afs_getgroups(
+ struct ucred *cred,
+ int ngroups,
+ gid_t *gidset);
+
+static int
+afs_setgroups(
+ struct proc *proc,
+ struct ucred **cred,
+ int ngroups,
+ gid_t *gidset,
+ int change_parent);
+
+int
+Afs_xsetgroups(p, args, retval)
+ struct proc *p;
+ void *args;
+ int *retval;
+{
+ int code = 0;
+ struct vrequest treq;
+ struct ucred *cr;
+
+ pcred_readlock(p);
+ cr=crdup(p->p_cred->pc_ucred);
+ pcred_unlock(p);
+
+ AFS_STATCNT(afs_xsetgroups);
+ AFS_GLOCK();
+
+ code = afs_InitReq(&treq, cr);
+ AFS_GUNLOCK();
+ crfree(cr);
+ if (code) return setgroups(p, args, retval); /* afs has shut down */
+
+ code = setgroups(p, args, retval);
+ /* Note that if there is a pag already in the new groups we don't
+ * overwrite it with the old pag.
+ */
+ pcred_readlock(p);
+ cr=crdup(p->p_cred->pc_ucred);
+ pcred_unlock(p);
+
+ if (PagInCred(cr) == NOPAG) {
+ if (((treq.uid >> 24) & 0xff) == 'A') {
+ AFS_GLOCK();
+ /* we've already done a setpag, so now we redo it */
+ AddPag(p, treq.uid, &cr );
+ AFS_GUNLOCK();
+ }
+ }
+ crfree(cr);
+ return code;
+}
+
+
+int
+setpag(proc, cred, pagvalue, newpag, change_parent)
+ struct proc *proc;
+ struct ucred **cred;
+ afs_uint32 pagvalue;
+ afs_uint32 *newpag;
+ afs_uint32 change_parent;
+{
+ gid_t gidset[NGROUPS];
+ int ngroups, code;
+ int j;
+
+ AFS_STATCNT(setpag);
+ ngroups = afs_getgroups(*cred, NGROUPS, gidset);
+ if (afs_get_pag_from_groups(gidset[1], gidset[2]) == NOPAG) {
+ /* We will have to shift grouplist to make room for pag */
+ if (ngroups + 2 > NGROUPS) {
+ return (E2BIG);
+ }
+ for (j = ngroups -1; j >= 1; j--) {
+ gidset[j+2] = gidset[j];
+ }
+ ngroups += 2;
+ }
+ *newpag = (pagvalue == -1 ? genpag(): pagvalue);
+ afs_get_groups_from_pag(*newpag, &gidset[1], &gidset[2]);
+ code = afs_setgroups(proc, cred, ngroups, gidset, change_parent);
+ return code;
+}
+
+
+static int
+afs_getgroups(
+ struct ucred *cred,
+ int ngroups,
+ gid_t *gidset)
+{
+ int ngrps, savengrps;
+ gid_t *gp;
+
+ AFS_STATCNT(afs_getgroups);
+ savengrps = ngrps = MIN(ngroups, cred->cr_ngroups);
+ gp = cred->cr_groups;
+ while (ngrps--)
+ *gidset++ = *gp++;
+ return savengrps;
+}
+
+
+
+static int
+afs_setgroups(
+ struct proc *proc,
+ struct ucred **cred,
+ int ngroups,
+ gid_t *gidset,
+ int change_parent)
+{
+ int ngrps;
+ int i;
+ gid_t *gp;
+ struct ucred *oldcr, *cr;
+
+ AFS_STATCNT(afs_setgroups);
+ /*
+ * The real setgroups() call does this, so maybe we should too.
+ *
+ */
+ if (ngroups > NGROUPS)
+ return EINVAL;
+ cr = *cred;
+ cr->cr_ngroups = ngroups;
+ gp = cr->cr_groups;
+ while (ngroups--)
+ *gp++ = *gidset++;
+ if (change_parent) {
+ crhold(cr);
+ pcred_writelock(proc->p_pptr);
+ oldcr=proc->p_pptr->p_cred->pc_ucred;
+ proc->p_pptr->p_cred->pc_ucred=cr;
+ pcred_unlock(proc->p_pptr);
+ crfree(oldcr);
+ }
+ crhold(cr);
+ pcred_writelock(proc);
+ oldcr=proc->p_cred->pc_ucred;
+ proc->p_cred->pc_ucred=cr;
+ pcred_unlock(proc);
+ crfree(oldcr);
+ return(0);
+}
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+/*
+ * MACOS inode operations
+ *
+ * Implements:
+ *
+ */
+#include "../afs/param.h" /* Should be always first */
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+#include "../afs/osi_inode.h"
+#include "../afs/afs_stats.h" /* statistics stuff */
+#include <ufs/ufs/ufsmount.h>
+extern struct ucred afs_osi_cred;
+
+getinode(fs, dev, inode, vpp, perror)
+ struct mount *fs;
+ struct vnode **vpp;
+ dev_t dev;
+ ino_t inode;
+ int *perror;
+{
+ struct vnode *vp;
+ int code;
+
+ *vpp = 0;
+ *perror = 0;
+ if (!fs) {
+ register struct ufsmount *ump;
+#ifdef VFSTOHFS
+ register struct hfsmount *hmp;
+#endif
+ register struct vnode *vp;
+ register struct mount *mp;
+ extern struct mount *rootfs;
+ if (mp = rootfs) do {
+ /*
+ * XXX Also do the test for MFS
+ */
+ if (!strcmp(mp->mnt_vfc->vfc_name, "ufs")) {
+ ump = VFSTOUFS(mp);
+ if (ump->um_fs == NULL)
+ break;
+ if (ump->um_dev == dev) {
+ fs = ump->um_mountp;
+ }
+ }
+#ifdef VFSTOHFS
+ if (!strcmp(mp->mnt_vfc->vfc_name, "hfs")) {
+ hmp = VFSTOHFS(mp);
+#if 0
+ if (hmp->hfs_mp == NULL)
+ break;
+#endif
+ if (hmp->hfs_raw_dev == dev) {
+ fs = hmp->hfs_mp;
+ }
+ }
+#endif
+
+ mp = CIRCLEQ_NEXT(mp, mnt_list);
+ } while (mp != rootfs);
+ if (!fs)
+ return(ENXIO);
+ }
+ code=VFS_VGET(fs, (void *)inode, &vp);
+ if (code) {
+ *perror = BAD_IGET;
+ return code;
+ } else {
+ *vpp = vp;
+ return(0);
+ }
+}
+extern int afs_CacheFSType;
+igetinode(vfsp, dev, inode, vpp, va, perror)
+ struct vnode **vpp;
+ struct mount *vfsp;
+ dev_t dev;
+ ino_t inode;
+ struct vattr *va;
+ int *perror;
+{
+ struct vnode *pvp, *vp;
+ extern struct osi_dev cacheDev;
+ register int code = 0;
+
+ *perror = 0;
+
+ AFS_STATCNT(igetinode);
+ if ((code = getinode(vfsp, dev, inode, &vp, perror)) != 0) {
+ return(code);
+ }
+ if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
+ printf("igetinode: bad type %d\n", vp->v_type);
+ iforget(vp);
+ return(ENOENT);
+ }
+ VOP_GETATTR(vp, va, &afs_osi_cred, current_proc());
+ if (va->va_mode == 0) {
+ /* Not an allocated inode */
+ iforget(vp);
+ return(ENOENT);
+ }
+ if (vfsp && afs_CacheFSType == AFS_APPL_HFS_CACHE && va->va_nlink == 0) {
+ printf("igetinode: hfs nlink 0\n");
+ }
+ if (va->va_nlink == 0) {
+ vput(vp);
+ return(ENOENT);
+ }
+
+ VOP_UNLOCK(vp, 0, current_proc());
+ *vpp = vp;
+ return(0);
+}
+
+iforget(vp)
+struct vnode *vp;
+{
+
+ AFS_STATCNT(iforget);
+ /* XXX could sleep */
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
+ /* this whole thing is too wierd. Why??? XXX */
+ if (vp->v_usecount == 1) {
+ vp->v_usecount=0;
+ VOP_UNLOCK(vp,0, current_proc());
+#if 0
+ simple_lock(&vnode_free_list_slock);
+ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
+ freevnodes++;
+ simple_unlock(&vnode_free_list_slock);
+#else
+ printf("iforget: leaking vnode\n");
+#endif
+ } else {
+ vput(vp);
+ }
+}
+
+#if 0
+/*
+ * icreate system call -- create an inode
+ */
+afs_syscall_icreate(dev, near_inode, param1, param2, param3, param4, retval)
+ long *retval;
+ long dev, near_inode, param1, param2, param3, param4;
+{
+ int dummy, err=0;
+ struct inode *ip, *newip;
+ register int code;
+ struct vnode *vp;
+
+ AFS_STATCNT(afs_syscall_icreate);
+
+ if (!afs_suser())
+ return(EPERM);
+
+ code = getinode(0, (dev_t)dev, 2, &ip, &dummy);
+ if (code) {
+ return(ENOENT);
+ }
+ code = ialloc(ip, (ino_t)near_inode, 0, &newip);
+ iput(ip);
+ if (code) {
+ return(code);
+ }
+ IN_LOCK(newip);
+ newip->i_flag |= IACC|IUPD|ICHG;
+
+ newip->i_nlink = 1;
+
+ newip->i_mode = IFREG;
+
+ IN_UNLOCK(newip);
+ vp = ITOV(newip);
+ VN_LOCK(vp);
+ vp->v_type = VREG;
+ VN_UNLOCK(vp);
+
+ if ( !vp->v_object)
+ {
+ extern struct vfs_ubcops ufs_ubcops;
+ extern struct vm_ubc_object* ubc_object_allocate();
+ struct vm_ubc_object* vop;
+ vop = ubc_object_allocate(&vp, &ufs_ubcops,
+ vp->v_mount->m_funnel);
+ VN_LOCK(vp);
+ vp->v_object = vop;
+ VN_UNLOCK(vp);
+ }
+
+
+ IN_LOCK(newip);
+ newip->i_flags |= IC_XUID|IC_XGID;
+ newip->i_flags &= ~IC_PROPLIST;
+ newip->i_vicep1 = param1;
+ if (param2 == 0x1fffffff/*INODESPECIAL*/) {
+ newip->i_vicep2 = ((0x1fffffff << 3) + (param4 & 0x3));
+ newip->i_vicep3a = (u_short)(param3 >> 16);
+ newip->i_vicep3b = (u_short)param3;
+ } else {
+ newip->i_vicep2 = (((param2 >> 16) & 0x1f) << 27) +
+ (((param4 >> 16) & 0x1f) << 22) +
+ (param3 & 0x3fffff);
+ newip->i_vicep3a = (u_short)param4;
+ newip->i_vicep3b = (u_short)param2;
+ }
+ newip->i_vicemagic = VICEMAGIC;
+
+ *retval = newip->i_number;
+ IN_UNLOCK(newip);
+ iput(newip);
+ return(code);
+}
+
+
+afs_syscall_iopen(dev, inode, usrmod, retval)
+ long *retval;
+ int dev, inode, usrmod;
+{
+ struct file *fp;
+ struct inode *ip;
+ struct vnode *vp = (struct vnode *)0;
+ int dummy;
+ int fd;
+ extern struct fileops vnops;
+ register int code;
+
+ AFS_STATCNT(afs_syscall_iopen);
+
+ if (!afs_suser())
+ return(EPERM);
+
+ code = igetinode(0, (dev_t)dev, (ino_t)inode, &ip, &dummy);
+ if (code) {
+ return(code);
+ }
+ if ((code = falloc(&fp, &fd)) != 0) {
+ iput(ip);
+ return(code);
+ }
+ IN_UNLOCK(ip);
+
+ FP_LOCK(fp);
+ fp->f_flag = (usrmod-FOPEN) & FMASK;
+ fp->f_type = DTYPE_VNODE;
+ fp->f_ops = &vnops;
+ fp->f_data = (caddr_t)ITOV(ip);
+
+ FP_UNLOCK(fp);
+ U_FD_SET(fd, fp, &u.u_file_state);
+ *retval = fd;
+ return(0);
+}
+
+
+/*
+ * Support for iinc() and idec() system calls--increment or decrement
+ * count on inode.
+ * Restricted to super user.
+ * Only VICEMAGIC type inodes.
+ */
+afs_syscall_iincdec(dev, inode, inode_p1, amount)
+ int dev, inode, inode_p1, amount;
+{
+ int dummy;
+ struct inode *ip;
+ register int code;
+
+ if (!afs_suser())
+ return(EPERM);
+
+ code = igetinode(0, (dev_t)dev, (ino_t)inode, &ip, &dummy);
+ if (code) {
+ return(code);
+ }
+ if (!IS_VICEMAGIC(ip)) {
+ return(EPERM);
+ } else if (ip->i_vicep1 != inode_p1) {
+ return(ENXIO);
+ }
+ ip->i_nlink += amount;
+ if (ip->i_nlink == 0) {
+ CLEAR_VICEMAGIC(ip);
+ }
+ ip->i_flag |= ICHG;
+ iput(ip);
+ return(0);
+}
+#else
+afs_syscall_icreate(dev, near_inode, param1, param2, param3, param4, retval)
+ long *retval;
+ long dev, near_inode, param1, param2, param3, param4;
+{
+ return EOPNOTSUPP;
+}
+afs_syscall_iopen(dev, inode, usrmod, retval)
+ long *retval;
+ int dev, inode, usrmod;
+{
+ return EOPNOTSUPP;
+}
+afs_syscall_iincdec(dev, inode, inode_p1, amount)
+ int dev, inode, inode_p1, amount;
+{
+ return EOPNOTSUPP;
+}
+#endif
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+/*
+ * osi_inode.h
+ *
+ * Inode information required for MACOS servers and salvager.
+ */
+#ifndef _OSI_INODE_H_
+#define _OSI_INODE_H_
+
+#define BAD_IGET -1000
+
+#define VICEMAGIC 0x84fa1cb6
+
+#define DI_VICEP3(p) ((p)->di_vicep3)
+#define I_VICEP3(p) ((p)->i_vicep3)
+
+#define i_vicemagic i_din.di_flags
+#define i_vicep1 i_din.di_gen
+#define i_vicep2 i_din.di_uid
+#define i_vicep3 i_din.di_gid
+#define i_vicep4 i_din.di_spare[0] /* not used */
+
+#define di_vicemagic di_flags
+#define di_vicep1 di_gen
+#define di_vicep2 di_uid
+#define di_vicep3 di_gid
+#define di_vicep4 di_spare[0] /* not used */
+
+#define IS_VICEMAGIC(ip) ((ip)->i_vicemagic == VICEMAGIC)
+#define IS_DVICEMAGIC(dp) ((dp)->di_vicemagic == VICEMAGIC)
+
+#define CLEAR_VICEMAGIC(ip) (ip)->i_vicemagic = 0
+#define CLEAR_DVICEMAGIC(dp) (dp)->di_vicemagic = 0
+
+#endif /* _OSI_INODE_H_ */
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+/*
+ *
+ * MACOS OSI header file. Extends afs_osi.h.
+ *
+ * afs_osi.h includes this file, which is the only way this file should
+ * be included in a source file. This file can redefine macros declared in
+ * afs_osi.h.
+ */
+
+#ifndef _OSI_MACHDEP_H_
+#define _OSI_MACHDEP_H_
+
+#ifdef XAFS_DARWIN_ENV
+#ifndef _MACH_ETAP_H_
+#define _MACH_ETAP_H_
+typedef unsigned short etap_event_t;
+#endif
+#endif
+
+#include <sys/lock.h>
+#include <kern/thread.h>
+#include <sys/user.h>
+
+#define getpid() current_proc()->p_pid
+#define getppid() current_proc()->p_pptr->p_pid
+#undef gop_lookupname
+#define gop_lookupname osi_lookupname
+
+#define FTRUNC 0
+
+/* vcexcl - used only by afs_create */
+enum vcexcl { EXCL, NONEXCL } ;
+
+/*
+ * Time related macros
+ */
+extern struct timeval time;
+#define osi_Time() (time.tv_sec)
+#define afs_hz hz
+
+#define PAGESIZE 8192
+
+#define AFS_UCRED ucred
+
+#define AFS_PROC struct proc
+
+#define osi_vnhold(avc,r) do { \
+ if ((avc)->vrefCount) { VN_HOLD(&((avc)->v)); } \
+ else (avc)->vrefCount = 1; } while(0)
+
+#define gop_rdwr(rw,gp,base,len,offset,segflg,unit,cred,aresid) \
+ vn_rdwr((rw),(gp),(base),(len),(offset),(segflg),(unit),(cred),(aresid),current_proc())
+
+#undef afs_suser
+
+#ifdef KERNEL
+extern thread_t afs_global_owner;
+/* simple locks cannot be used since sleep can happen at any time */
+/* Should probably use mach locks rather than bsd locks, since we use the
+ mach thread control api's elsewhere (mach locks not used for consistency
+ with rx, since rx needs lock_write_try() in order to use mach locks
+ */
+extern struct lock__bsd__ afs_global_lock;
+#define AFS_GLOCK() \
+ do { \
+ lockmgr(&afs_global_lock, LK_EXCLUSIVE, 0, current_proc()); \
+ osi_Assert(afs_global_owner == 0); \
+ afs_global_owner = current_thread(); \
+ } while (0)
+#define AFS_GUNLOCK() \
+ do { \
+ osi_Assert(afs_global_owner == current_thread()); \
+ afs_global_owner = 0; \
+ lockmgr(&afs_global_lock, LK_RELEASE, 0, current_proc()); \
+ } while(0)
+#define ISAFS_GLOCK() (afs_global_owner == current_thread())
+#define AFS_RXGLOCK()
+#define AFS_RXGUNLOCK()
+#define ISAFS_RXGLOCK() 1
+
+#define SPLVAR
+#define NETPRI
+#define USERPRI
+#if 0
+#undef SPLVAR
+#define SPLVAR int x;
+#undef NETPRI
+#define NETPRI x=splnet();
+#undef USERPRI
+#define USERPRI splx(x);
+#endif
+
+#define AFS_APPL_UFS_CACHE 1
+#define AFS_APPL_HFS_CACHE 2
+
+extern ino_t VnodeToIno(vnode_t *vp);
+extern dev_t VnodeToDev(vnode_t *vp);
+
+#endif /* KERNEL */
+
+#endif /* _OSI_MACHDEP_H_ */
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+#include "../afs/param.h"
+#include "../afs/sysincludes.h"
+#include "../afs/afsincludes.h"
+#include <sys/namei.h>
+
+int osi_lookupname(char *aname, enum uio_seg seg, int followlink,
+ struct vnode **dirvpp, struct vnode **vpp)
+{
+ struct nameidata n;
+ int flags,error;
+ flags=0;
+ flags=LOCKLEAF;
+ if (followlink)
+ flags|=FOLLOW;
+ else
+ flags|=NOFOLLOW;
+/* if (dirvpp) flags|=WANTPARENT;*/ /* XXX LOCKPARENT? */
+ NDINIT(&n, LOOKUP, flags, seg, aname, current_proc());
+ if (error=namei(&n))
+ return error;
+ *vpp=n.ni_vp;
+/*
+ if (dirvpp)
+ *dirvpp = n.ni_dvp;
+#/
+ /* should we do this? */
+ VOP_UNLOCK(n.ni_vp, 0, current_proc());
+ return 0;
+}
+
+/*
+ * afs_suser() returns true if the caller is superuser, false otherwise.
+ *
+ * Note that it must NOT set errno.
+ */
+
+afs_suser() {
+ int error;
+ struct proc *p=current_proc();
+
+ if ((error = suser(p->p_ucred, &p->p_acflag)) == 0) {
+ return(1);
+ }
+ return(0);
+}
--- /dev/null
+#include "../afs/param.h"
+#include "../afs/sysincludes.h"
+#include "../afs/afsincludes.h"
+#include "../sys/syscall.h"
+#include <mach/kmod.h>
+#define VERSION "1.0.3"
+
+
+struct vfsconf afs_vfsconf;
+extern struct vfsops afs_vfsops;
+extern struct mount *afs_globalVFS;
+extern int Afs_xsetgroups();
+extern int afs_xioctl();
+extern int afs3_syscall();
+
+extern int ioctl();
+extern int setgroups();
+kern_return_t afs_modload(struct kmod_info *ki, void *data)
+{
+ if (sysent[AFS_SYSCALL].sy_call != nosys) {
+ printf("AFS_SYSCALL in use. aborting\n");
+ return KERN_FAILURE;
+ }
+ bzero(&afs_vfsconf, sizeof(struct vfsconf));
+ strcpy(afs_vfsconf.vfc_name, "afs");
+ afs_vfsconf.vfc_vfsops=&afs_vfsops;
+ afs_vfsconf.vfc_typenum=VT_AFS;
+ afs_vfsconf.vfc_flags=MNT_NODEV;
+ if (vfsconf_add(&afs_vfsconf)) {
+ printf("AFS: vfsconf_add failed. aborting\n");
+ return KERN_FAILURE;
+ }
+ sysent[SYS_setgroups].sy_call=Afs_xsetgroups;
+ sysent[SYS_ioctl].sy_call=afs_xioctl;
+ sysent[AFS_SYSCALL].sy_call=afs3_syscall;
+ sysent[AFS_SYSCALL].sy_narg = 5;
+ sysent[AFS_SYSCALL].sy_parallel = 0;
+#ifdef KERNEL_FUNNEL
+ sysent[AFS_SYSCALL].sy_funnel=KERNEL_FUNNEL;
+#endif
+ return KERN_SUCCESS;
+}
+kern_return_t afs_modunload(struct kmod_info *ki, void *data)
+{
+ if (afs_globalVFS)
+ return KERN_FAILURE;
+ if (vfsconf_del("afs"))
+ return KERN_FAILURE;
+ /* give up syscall entries for ioctl & setgroups, which we've stolen */
+ sysent[SYS_ioctl].sy_call = ioctl;
+ sysent[SYS_setgroups].sy_call = setgroups;
+ /* give up the stolen syscall entry */
+ sysent[AFS_SYSCALL].sy_narg = 0;
+ sysent[AFS_SYSCALL].sy_call = nosys;
+ return KERN_SUCCESS;
+ }
+
+KMOD_EXPLICIT_DECL(openafs, VERSION, afs_modload, afs_modunload)
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+/*
+ * osi_prototypes.h
+ *
+ * Exported macos support routines.
+ */
+#ifndef _OSI_PROTO_H_
+#define _OSI_PROTO_H_
+
+/* osi_misc.c */
+extern int osi_lookupname(char *aname, enum uio_seg seg, int followlink,
+ struct vnode **dirvpp, struct vnode **vpp);
+/* osi_vm.c */
+extern void osi_VM_NukePages(struct vnode *vp, off_t offset, off_t size);
+extern int osi_VM_Setup(struct vcache *avc);
+#endif /* _OSI_PROTO_H_ */
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+#include "../afs/param.h" /* Should be always first */
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+#include "../afs/afs_stats.h" /* afs statistics */
+
+
+static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
+void afs_osi_Wakeup(char *event);
+void afs_osi_Sleep(char *event);
+
+static char waitV;
+
+
+void afs_osi_InitWaitHandle(struct afs_osi_WaitHandle *achandle)
+{
+ AFS_STATCNT(osi_InitWaitHandle);
+ achandle->proc = (caddr_t) 0;
+}
+
+/* cancel osi_Wait */
+void afs_osi_CancelWait(struct afs_osi_WaitHandle *achandle)
+{
+ caddr_t proc;
+
+ AFS_STATCNT(osi_CancelWait);
+ proc = achandle->proc;
+ if (proc == 0) return;
+ achandle->proc = (caddr_t) 0; /* so dude can figure out he was signalled */
+ afs_osi_Wakeup(&waitV);
+}
+
+/* afs_osi_Wait
+ * Waits for data on ahandle, or ams ms later. ahandle may be null.
+ * Returns 0 if timeout and EINTR if signalled.
+ */
+int afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok)
+{
+ int code;
+ afs_int32 endTime, tid;
+ struct proc *p=current_proc();
+
+ AFS_STATCNT(osi_Wait);
+ endTime = osi_Time() + (ams/1000);
+ if (ahandle)
+ ahandle->proc = (caddr_t)p;
+ do {
+ AFS_ASSERT_GLOCK();
+ code = 0;
+ code = osi_TimedSleep(&waitV, ams, aintok);
+
+ if (code) break; /* if something happened, quit now */
+ /* if we we're cancelled, quit now */
+ if (ahandle && (ahandle->proc == (caddr_t) 0)) {
+ /* we've been signalled */
+ break;
+ }
+ } while (osi_Time() < endTime);
+ return code;
+}
+
+
+
+typedef struct afs_event {
+ struct afs_event *next; /* next in hash chain */
+ char *event; /* lwp event: an address */
+ int refcount; /* Is it in use? */
+ int seq; /* Sequence number: this is incremented
+ by wakeup calls; wait will not return until
+ it changes */
+} afs_event_t;
+
+#define HASHSIZE 128
+afs_event_t *afs_evhasht[HASHSIZE];/* Hash table for events */
+#define afs_evhash(event) (afs_uint32) ((((long)event)>>2) & (HASHSIZE-1));
+int afs_evhashcnt = 0;
+
+/* Get and initialize event structure corresponding to lwp event (i.e. address)
+ * */
+static afs_event_t *afs_getevent(char *event)
+{
+ afs_event_t *evp, *newp = 0;
+ int hashcode;
+
+ AFS_ASSERT_GLOCK();
+ hashcode = afs_evhash(event);
+ evp = afs_evhasht[hashcode];
+ while (evp) {
+ if (evp->event == event) {
+ evp->refcount++;
+ return evp;
+ }
+ if (evp->refcount == 0)
+ newp = evp;
+ evp = evp->next;
+ }
+ if (!newp) {
+ newp = (afs_event_t *) osi_AllocSmallSpace(sizeof (afs_event_t));
+ afs_evhashcnt++;
+ newp->next = afs_evhasht[hashcode];
+ afs_evhasht[hashcode] = newp;
+ newp->seq = 0;
+ }
+ newp->event = event;
+ newp->refcount = 1;
+ return newp;
+}
+
+/* Release the specified event */
+#define relevent(evp) ((evp)->refcount--)
+
+
+void afs_osi_Sleep(char *event)
+{
+ struct afs_event *evp;
+ int seq;
+
+ evp = afs_getevent(event);
+ seq = evp->seq;
+ while (seq == evp->seq) {
+ AFS_ASSERT_GLOCK();
+ assert_wait((event_t)event, 0);
+ AFS_GUNLOCK();
+ thread_block(0);
+ AFS_GLOCK();
+ }
+ relevent(evp);
+}
+
+/* osi_TimedSleep
+ *
+ * Arguments:
+ * event - event to sleep on
+ * ams --- max sleep time in milliseconds
+ * aintok - 1 if should sleep interruptibly
+ *
+ * Returns 0 if timeout and EINTR if signalled.
+ */
+static int osi_TimedSleep(char *event, afs_int32 ams, int aintok)
+{
+ int code = 0;
+ struct afs_event *evp;
+ int ticks,seq;
+
+ ticks = ( ams * afs_hz )/1000;
+
+
+ evp = afs_getevent(event);
+ seq=evp->seq;
+ assert_wait((event_t)event, aintok ? THREAD_ABORTSAFE : 0);
+ AFS_GUNLOCK();
+ thread_set_timer(ticks, NSEC_PER_SEC / hz);
+ thread_block(0);
+ AFS_GLOCK();
+#if 0 /* thread_t structure only available if MACH_KERNEL_PRIVATE */
+ if (current_thread()->wait_result != THREAD_AWAKENED)
+ code = EINTR;
+#else
+ if (seq == evp->seq)
+ code = EINTR;
+#endif
+
+ relevent(evp);
+ return code;
+}
+
+
+void afs_osi_Wakeup(char *event)
+{
+ struct afs_event *evp;
+
+ evp = afs_getevent(event);
+ if (evp->refcount > 1) {
+ evp->seq++;
+ thread_wakeup((event_t)event);
+ }
+ relevent(evp);
+}
--- /dev/null
+#include <afs/param.h> /* Should be always first */
+#include <afs/sysincludes.h> /* Standard vendor system headers */
+#include <afs/afsincludes.h> /* Afs-based standard headers */
+#include <afs/afs_stats.h> /* statistics */
+#include <sys/malloc.h>
+#include <sys/namei.h>
+#include <sys/conf.h>
+#include <sys/syscall.h>
+
+struct vcache *afs_globalVp = 0;
+struct mount *afs_globalVFS = 0;
+
+int
+afs_quotactl()
+{
+ return EOPNOTSUPP;
+}
+
+int
+afs_fhtovp(mp, fhp, vpp)
+struct mount *mp;
+struct fid *fhp;
+struct vnode **vpp;
+{
+
+ return (EINVAL);
+}
+
+int
+afs_vptofh(vp, fhp)
+struct vnode *vp;
+struct fid *fhp;
+{
+
+ return (EINVAL);
+}
+
+int
+afs_start(mp, flags, p)
+struct mount *mp;
+int flags;
+struct proc *p;
+{
+ return (0); /* nothing to do. ? */
+}
+
+int
+afs_mount(mp, path, data, ndp, p)
+register struct mount *mp;
+char *path;
+caddr_t data;
+struct nameidata *ndp;
+struct proc *p;
+{
+ /* ndp contains the mounted-from device. Just ignore it.
+ we also don't care about our proc struct. */
+ size_t size;
+ int error;
+
+ if (mp->mnt_flag & MNT_UPDATE)
+ return EINVAL;
+
+ AFS_GLOCK();
+ AFS_STATCNT(afs_mount);
+
+ if (afs_globalVFS) { /* Don't allow remounts. */
+ AFS_GUNLOCK();
+ return (EBUSY);
+ }
+
+ afs_globalVFS = mp;
+ mp->vfs_bsize = 8192;
+ vfs_getnewfsid(mp);
+ mp->mnt_stat.f_iosize=8192;
+
+ (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN-1, &size);
+ bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
+ bzero(mp->mnt_stat.f_mntfromname, MNAMELEN);
+ strcpy(mp->mnt_stat.f_mntfromname, "AFS");
+ /* null terminated string "AFS" will fit, just leave it be. */
+ strcpy(mp->mnt_stat.f_fstypename, "afs");
+ AFS_GUNLOCK();
+ (void) afs_statfs(mp, &mp->mnt_stat, p);
+ return 0;
+}
+
+int
+afs_unmount(mp, flags, p)
+struct mount *mp;
+int flags;
+struct proc *p;
+{
+
+ AFS_GLOCK();
+ AFS_STATCNT(afs_unmount);
+ afs_globalVFS = 0;
+ afs_shutdown();
+ AFS_GUNLOCK();
+
+ return 0;
+}
+
+int
+afs_root(struct mount *mp,
+ struct vnode **vpp)
+{
+ int error;
+ struct vrequest treq;
+ register struct vcache *tvp=0;
+ struct proc *p=current_proc();
+ struct ucred cr;
+
+ pcred_readlock(p);
+ cr=*p->p_cred->pc_ucred;
+ pcred_unlock(p);
+ AFS_GLOCK();
+ AFS_STATCNT(afs_root);
+ if (afs_globalVp && (afs_globalVp->states & CStatd)) {
+ tvp = afs_globalVp;
+ error=0;
+ } else {
+
+ if (!(error = afs_InitReq(&treq, &cr)) &&
+ !(error = afs_CheckInit())) {
+ tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
+ (struct vcache*)0, WRITE_LOCK);
+ /* we really want this to stay around */
+ if (tvp) {
+ afs_globalVp = tvp;
+ } else
+ error = ENOENT;
+ }
+ }
+ if (tvp) {
+ osi_vnhold(tvp,0);
+ AFS_GUNLOCK();
+ vn_lock((struct vnode *)tvp, LK_EXCLUSIVE | LK_RETRY, p);
+ AFS_GLOCK();
+ afs_globalVFS = mp;
+ *vpp = (struct vnode *) tvp;
+ tvp->v.v_flag |= VROOT;
+ }
+
+ afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, *vpp,
+ ICL_TYPE_INT32, error);
+ AFS_GUNLOCK();
+ return error;
+}
+
+int
+afs_vget(mp, lfl, vp)
+struct mount *mp;
+struct vnode *vp;
+int lfl;
+{
+ int error;
+ printf("vget called. help!\n");
+ if (vp->v_usecount < 0) {
+ vprint("bad usecount", vp);
+ panic("afs_vget");
+ }
+ error = vget(vp, lfl, current_proc());
+ if (!error)
+ insmntque(vp, afs_globalVFS); /* take off free list */
+ return error;
+}
+
+int afs_statfs(struct mount *mp, struct statfs *abp, struct proc *p)
+{
+ AFS_GLOCK();
+ AFS_STATCNT(afs_statfs);
+
+#if 0
+ abp->f_type = MOUNT_AFS;
+#endif
+ abp->f_bsize = mp->vfs_bsize;
+ abp->f_iosize = mp->vfs_bsize;
+
+ /* Fake a high number below to satisfy programs that use the statfs call
+ * to make sure that there's enough space in the device partition before
+ * storing something there.
+ */
+ abp->f_blocks = abp->f_bfree = abp->f_bavail = abp->f_files =
+ abp->f_ffree = 2000000;
+
+ abp->f_fsid.val[0] = mp->mnt_stat.f_fsid.val[0];
+ abp->f_fsid.val[1] = mp->mnt_stat.f_fsid.val[1];
+ if (abp != &mp->mnt_stat) {
+ abp->f_type = mp->mnt_vfc->vfc_typenum;
+ bcopy((caddr_t)mp->mnt_stat.f_mntonname,
+ (caddr_t)&abp->f_mntonname[0], MNAMELEN);
+ bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
+ (caddr_t)&abp->f_mntfromname[0], MNAMELEN);
+ }
+
+ AFS_GUNLOCK();
+ return 0;
+}
+
+int afs_sync(mp, waitfor, cred, p)
+struct mount *mp;
+int waitfor;
+struct ucred *cred;
+struct prioc *p;
+{
+return 0;
+}
+
+int afs_sysctl() {
+ return EOPNOTSUPP;
+}
+
+
+typedef (*PFI)();
+extern int vfs_opv_numops; /* The total number of defined vnode operations */
+extern struct vnodeopv_desc afs_vnodeop_opv_desc;
+int afs_init(struct vfsconf *vfc) {
+ int j;
+ int (**opv_desc_vector)();
+ struct vnodeopv_entry_desc *opve_descp;
+
+
+
+ MALLOC(afs_vnodeop_p, PFI *, vfs_opv_numops*sizeof(PFI), M_TEMP, M_WAITOK);
+
+ bzero (afs_vnodeop_p, vfs_opv_numops*sizeof(PFI));
+
+ opv_desc_vector = afs_vnodeop_p;
+ for (j=0; afs_vnodeop_opv_desc.opv_desc_ops[j].opve_op; j++) {
+ opve_descp = &(afs_vnodeop_opv_desc.opv_desc_ops[j]);
+
+ /*
+ * Sanity check: is this operation listed
+ * in the list of operations? We check this
+ * by seeing if its offest is zero. Since
+ * the default routine should always be listed
+ * first, it should be the only one with a zero
+ * offset. Any other operation with a zero
+ * offset is probably not listed in
+ * vfs_op_descs, and so is probably an error.
+ *
+ * A panic here means the layer programmer
+ * has committed the all-too common bug
+ * of adding a new operation to the layer's
+ * list of vnode operations but
+ * not adding the operation to the system-wide
+ * list of supported operations.
+ */
+ if (opve_descp->opve_op->vdesc_offset == 0 &&
+ opve_descp->opve_op->vdesc_offset != VOFFSET(vop_default)) {
+ printf("afs_init: operation %s not listed in %s.\n",
+ opve_descp->opve_op->vdesc_name,
+ "vfs_op_descs");
+ panic ("load_afs: bad operation");
+ }
+ /*
+ * Fill in this entry.
+ */
+ opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
+ opve_descp->opve_impl;
+ }
+
+ /*
+ * Finally, go back and replace unfilled routines
+ * with their default. (Sigh, an O(n^3) algorithm. I
+ * could make it better, but that'd be work, and n is small.)
+ */
+
+ /*
+ * Force every operations vector to have a default routine.
+ */
+ opv_desc_vector = afs_vnodeop_p;
+ if (opv_desc_vector[VOFFSET(vop_default)]==NULL) {
+ panic("afs_init: operation vector without default routine.");
+ }
+ for (j = 0;j<vfs_opv_numops; j++)
+ if (opv_desc_vector[j] == NULL)
+ opv_desc_vector[j] =
+ opv_desc_vector[VOFFSET(vop_default)];
+}
+
+struct vfsops afs_vfsops = {
+ afs_mount,
+ afs_start,
+ afs_unmount,
+ afs_root,
+ afs_quotactl,
+ afs_statfs,
+ afs_sync,
+ afs_vget,
+ afs_fhtovp,
+ afs_vptofh,
+ afs_init,
+ afs_sysctl
+};
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+#include "../afs/param.h" /* Should be always first */
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+#include "../afs/afs_stats.h" /* statistics */
+#include <sys/ubc.h>
+
+/* Try to discard pages, in order to recycle a vcache entry.
+ *
+ * We also make some sanity checks: ref count, open count, held locks.
+ *
+ * We also do some non-VM-related chores, such as releasing the cred pointer
+ * (for AIX and Solaris) and releasing the gnode (for AIX).
+ *
+ * Locking: afs_xvcache lock is held. If it is dropped and re-acquired,
+ * *slept should be set to warn the caller.
+ *
+ * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it
+ * is not dropped and re-acquired for any platform. It may be that *slept is
+ * therefore obsolescent.
+ *
+ * OSF/1 Locking: VN_LOCK has been called.
+ */
+int
+osi_VM_FlushVCache(avc, slept)
+ struct vcache *avc;
+ int *slept;
+{
+ struct vnode *vp=(struct vnode *)avc;
+ if (avc->vrefCount)
+ return EBUSY;
+
+ if (avc->opens)
+ return EBUSY;
+
+ /* if a lock is held, give up */
+ if (CheckLock(&avc->lock) || afs_CheckBozonLock(&avc->pvnLock))
+ return EBUSY;
+
+ AFS_GUNLOCK();
+ cache_purge(vp);
+ if (UBCINFOEXISTS(vp))
+ {
+ ubc_clean(vp, 1);
+ ubc_uncache(vp);
+ ubc_release(vp);
+ ubc_info_free(vp);
+ }
+
+ AFS_GLOCK();
+
+ return 0;
+}
+
+
+/* Try to store pages to cache, in order to store a file back to the server.
+ *
+ * Locking: the vcache entry's lock is held. It will usually be dropped and
+ * re-obtained.
+ */
+void
+osi_VM_StoreAllSegments(avc)
+ struct vcache *avc;
+{
+ struct vnode *vp=(struct vnode *)avc;
+ ReleaseWriteLock(&avc->lock);
+ AFS_GUNLOCK();
+ if (UBCINFOEXISTS(vp)) {
+ ubc_pushdirty(vp);
+ }
+ AFS_GLOCK();
+ ObtainWriteLock(&avc->lock,94);
+}
+
+/* Try to invalidate pages, for "fs flush" or "fs flushv"; or
+ * try to free pages, when deleting a file.
+ *
+ * Locking: the vcache entry's lock is held. It may be dropped and
+ * re-obtained.
+ *
+ * Since we drop and re-obtain the lock, we can't guarantee that there won't
+ * be some pages around when we return, newly created by concurrent activity.
+ */
+void
+osi_VM_TryToSmush(avc, acred, sync)
+ struct vcache *avc;
+ struct AFS_UCRED *acred;
+ int sync;
+{
+ struct vnode *vp=(struct vnode *)avc;
+ void *object;
+ kern_return_t kret;
+ off_t size, lastpg;
+
+ ReleaseWriteLock(&avc->lock);
+ AFS_GUNLOCK();
+ if (UBCINFOEXISTS(vp)) {
+ size=ubc_getsize(vp);
+ kret=ubc_invalidate(vp,0,size);
+ if (kret != 1) /* should be KERN_SUCCESS */
+ printf("TryToSmush: invalidate failed (error = %d)\n", kret);
+ }
+ AFS_GLOCK();
+ ObtainWriteLock(&avc->lock,59);
+}
+
+/* Purge VM for a file when its callback is revoked.
+ *
+ * Locking: No lock is held, not even the global lock.
+ */
+/* XXX this seems to not be referenced anywhere. *somebody* ought to be calling
+ this, and also making sure that ubc's idea of the filesize is right more
+ often */
+void
+osi_VM_FlushPages(avc, credp)
+ struct vcache *avc;
+ struct AFS_UCRED *credp;
+{
+ struct vnode *vp=(struct vnode *)avc;
+ void *object;
+ kern_return_t kret;
+ off_t size;
+ if (UBCINFOEXISTS(vp)) {
+ size=ubc_getsize(vp);
+ kret=ubc_invalidate(vp,0,size);
+ if (kret != 1) /* Should be KERN_SUCCESS */
+ printf("VMFlushPages: invalidate failed (error = %d)\n", kret);
+ /* XXX what about when not CStatd */
+ if (avc->states & CStatd && size != avc->m.Length)
+ ubc_setsize(vp, avc->m.Length);
+ }
+}
+
+/* Purge pages beyond end-of-file, when truncating a file.
+ *
+ * Locking: no lock is held, not even the global lock.
+ * activeV is raised. This is supposed to block pageins, but at present
+ * it only works on Solaris.
+ */
+void
+osi_VM_Truncate(avc, alen, acred)
+ struct vcache *avc;
+ int alen;
+ struct AFS_UCRED *acred;
+{
+ struct vnode *vp=(struct vnode *)avc;
+ if (UBCINFOEXISTS(vp)) {
+ ubc_setsize(vp, alen);
+ }
+}
+
+extern struct AFS_UCRED afs_osi_cred;
+extern afs_rwlock_t afs_xvcache;
+/* vnreclaim and vinactive are probably not aggressive enough to keep
+ enough afs vcaches free, so we try to do some of it ourselves */
+/* XXX there's probably not nearly enough locking here */
+void osi_VM_TryReclaim(avc, slept)
+ struct vcache *avc;
+ int *slept;
+{
+ struct proc *p=current_proc();
+ struct vnode *vp=(struct vnode *)avc;
+ void *obj;
+
+ if (slept)
+ *slept=0;
+ VN_HOLD(vp); /* remove from inactive list */
+ if (!simple_lock_try(&vp->v_interlock)) {
+ AFS_RELE(vp);
+ return;
+ }
+ if (!UBCINFOEXISTS(vp) || vp->v_count != 2) {
+ simple_unlock(&vp->v_interlock);
+ AFS_RELE(vp);
+ return;
+ }
+ if (vp->v_ubcinfo->ui_holdcnt) {
+ simple_unlock(&vp->v_interlock);
+ AFS_RELE(vp);
+ return;
+ }
+ if (slept && ubc_issetflags(vp, UI_WASMAPPED)) {
+ /* We can't possibly release this in time for this NewVCache to get it */
+ simple_unlock(&vp->v_interlock);
+ AFS_RELE(vp);
+ return;
+ }
+
+ vp->v_usecount--; /* we want the usecount to be 1 */
+
+ if (slept) {
+ ReleaseWriteLock(&afs_xvcache);
+ *slept=1;
+ } else
+ ReleaseReadLock(&afs_xvcache);
+ AFS_GUNLOCK();
+ obj=0;
+ if (ubc_issetflags(vp, UI_WASMAPPED)) {
+ simple_unlock(&vp->v_interlock);
+ ubc_release(vp);
+ if (ubc_issetflags(vp, UI_HASOBJREF))
+ printf("ubc_release didn't release the reference?!\n");
+ } else if (!vn_lock(vp, LK_EXCLUSIVE|LK_INTERLOCK,current_proc())) {
+#ifdef UBC_NOREACTIVATE
+ obj = ubc_getobject(vp,(UBC_NOREACTIVATE|UBC_HOLDOBJECT));
+#else
+ obj = ubc_getobject(vp);
+#endif
+ (void)ubc_clean(vp, 1);
+ vinvalbuf(vp, V_SAVE, &afs_osi_cred, p, 0, 0);
+ if (vp->v_usecount == 1)
+ VOP_INACTIVE(vp, p);
+ else
+ VOP_UNLOCK(vp, 0, p);
+ if (ISSET(vp->v_flag, VTERMINATE))
+ panic("afs_vnreclaim: already teminating");
+ SET(vp->v_flag, VTERMINATE);
+ memory_object_destroy(obj, 0);
+ while (ISSET(vp->v_flag, VTERMINATE)) {
+ SET(vp->v_flag, VTERMWANT);
+ tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "afs_vnreclaim", 0);
+ }
+ } else {
+ if (simple_lock_try(&vp->v_interlock))
+ panic("afs_vnreclaim: slept, but did no work :(");
+ if (UBCINFOEXISTS(vp) && vp->v_count == 1) {
+ vp->v_usecount++;
+ simple_unlock(&vp->v_interlock);
+ AFS_RELE(vp);
+ } else
+ simple_unlock(&vp->v_interlock);
+ }
+ AFS_GLOCK();
+ if (slept)
+ ObtainWriteLock(&afs_xvcache,175);
+ else
+ ObtainReadLock(&afs_xvcache);
+}
+
+void osi_VM_NukePages(struct vnode *vp, off_t offset, off_t size) {
+
+ void *object;
+ struct vcache *avc = (struct vcache *)vp;
+
+ object=NULL;
+#ifdef UBC_NOREACTIVATE
+ if (UBCINFOEXISTS(vp))
+ object = ubc_getobject(vp, UBC_NOREACTIVATE);
+#else
+ if (UBCINFOEXISTS(vp))
+ object = ubc_getobject(vp);
+#endif
+ if (!object)
+ return;
+
+ offset=trunc_page(offset);
+ size=round_page(size+1);
+
+#ifdef UBC_NOREACTIVATE
+ while (size) {
+ memory_object_page_op(object, (vm_offset_t)offset,
+ UPL_POP_SET | UPL_POP_BUSY | UPL_POP_DUMP,
+ 0, 0);
+ size-=PAGE_SIZE;
+ offset+=PAGE_SIZE;
+ }
+#else
+ ubc_setsize(vp, offset);
+ size=(offset + size > avc->m.Length) ? offset + size : avc->m.Length;
+ ubc_setsize(vp, size);
+#endif
+
+}
+int osi_VM_Setup(struct vcache *avc) {
+ int error;
+ struct vnode *vp=(struct vnode *)avc;
+
+ if (UBCISVALID(vp) && (avc->states & CStatd)) {
+ if (!UBCINFOEXISTS(vp) && !ISSET(vp->v_flag, VTERMINATE)) {
+ osi_vnhold(avc,0);
+ AFS_GUNLOCK();
+ if ((error=ubc_info_init(&avc->v))) {
+ AFS_GLOCK();
+ AFS_RELE(avc);
+ return error;
+ }
+ simple_lock(&avc->v.v_interlock);
+ if (!ubc_issetflags(&avc->v, UI_HASOBJREF))
+#ifdef UBC_NOREACTIVATE
+ if (ubc_getobject(&avc->v, (UBC_NOREACTIVATE|UBC_HOLDOBJECT)))
+ panic("VM_Setup: null object");
+#else
+ (void)_ubc_getobject(&avc->v, 1); /* return value not used */
+#endif
+ simple_unlock(&avc->v.v_interlock);
+ AFS_GLOCK();
+ AFS_RELE(avc);
+ }
+ if (UBCINFOEXISTS(&avc->v))
+ ubc_setsize(&avc->v, avc->m.Length);
+ }
+ return 0;
+}
--- /dev/null
+#include <afs/param.h> /* Should be always first */
+#include <afs/sysincludes.h> /* Standard vendor system headers */
+#include <afs/afsincludes.h> /* Afs-based standard headers */
+#include <afs/afs_stats.h> /* statistics */
+#include <sys/malloc.h>
+#include <sys/namei.h>
+#include <sys/ubc.h>
+
+int afs_vop_lookup(struct vop_lookup_args *);
+int afs_vop_create(struct vop_create_args *);
+int afs_vop_mknod(struct vop_mknod_args *);
+int afs_vop_open(struct vop_open_args *);
+int afs_vop_close(struct vop_close_args *);
+int afs_vop_access(struct vop_access_args *);
+int afs_vop_getattr(struct vop_getattr_args *);
+int afs_vop_setattr(struct vop_setattr_args *);
+int afs_vop_read(struct vop_read_args *);
+int afs_vop_write(struct vop_write_args *);
+int afs_vop_pagein(struct vop_pagein_args *);
+int afs_vop_pageout(struct vop_pageout_args *);
+int afs_vop_ioctl(struct vop_ioctl_args *);
+int afs_vop_select(struct vop_select_args *);
+int afs_vop_mmap(struct vop_mmap_args *);
+int afs_vop_fsync(struct vop_fsync_args *);
+int afs_vop_seek(struct vop_seek_args *);
+int afs_vop_remove(struct vop_remove_args *);
+int afs_vop_link(struct vop_link_args *);
+int afs_vop_rename(struct vop_rename_args *);
+int afs_vop_mkdir(struct vop_mkdir_args *);
+int afs_vop_rmdir(struct vop_rmdir_args *);
+int afs_vop_symlink(struct vop_symlink_args *);
+int afs_vop_readdir(struct vop_readdir_args *);
+int afs_vop_readlink(struct vop_readlink_args *);
+extern int ufs_abortop(struct vop_abortop_args *);
+int afs_vop_inactive(struct vop_inactive_args *);
+int afs_vop_reclaim(struct vop_reclaim_args *);
+int afs_vop_lock(struct vop_lock_args *);
+int afs_vop_unlock(struct vop_unlock_args *);
+int afs_vop_bmap(struct vop_bmap_args *);
+int afs_vop_strategy(struct vop_strategy_args *);
+int afs_vop_print(struct vop_print_args *);
+int afs_vop_islocked(struct vop_islocked_args *);
+int afs_vop_pathconf(struct vop_pathconf_args *);
+int afs_vop_advlock(struct vop_advlock_args *);
+int afs_vop_truncate(struct vop_truncate_args *);
+int afs_vop_update(struct vop_update_args *);
+int afs_vop_blktooff __P((struct vop_blktooff_args *));
+int afs_vop_offtoblk __P((struct vop_offtoblk_args *));
+int afs_vop_cmap __P((struct vop_cmap_args *));
+
+
+#define afs_vop_opnotsupp \
+ ((int (*) __P((struct vop_reallocblks_args *)))eopnotsupp)
+#define afs_vop_valloc afs_vop_opnotsupp
+#define afs_vop_vfree afs_vop_opnotsupp
+#define afs_vop_blkatoff afs_vop_opnotsupp
+#define afs_vop_reallocblks afs_vop_opnotsupp
+
+/* Global vfs data structures for AFS. */
+int (**afs_vnodeop_p)();
+struct vnodeopv_entry_desc afs_vnodeop_entries[] = {
+ { &vop_default_desc, vn_default_error },
+ { &vop_lookup_desc, afs_vop_lookup }, /* lookup */
+ { &vop_create_desc, afs_vop_create }, /* create */
+ { &vop_mknod_desc, afs_vop_mknod }, /* mknod */
+ { &vop_open_desc, afs_vop_open }, /* open */
+ { &vop_close_desc, afs_vop_close }, /* close */
+ { &vop_access_desc, afs_vop_access }, /* access */
+ { &vop_getattr_desc, afs_vop_getattr }, /* getattr */
+ { &vop_setattr_desc, afs_vop_setattr }, /* setattr */
+ { &vop_read_desc, afs_vop_read }, /* read */
+ { &vop_write_desc, afs_vop_write }, /* write */
+ { &vop_pagein_desc, afs_vop_pagein }, /* read */
+ { &vop_pageout_desc, afs_vop_pageout }, /* write */
+ { &vop_ioctl_desc, afs_vop_ioctl }, /* XXX ioctl */
+ { &vop_select_desc, afs_vop_select }, /* select */
+ { &vop_mmap_desc, afs_vop_mmap }, /* mmap */
+ { &vop_fsync_desc, afs_vop_fsync }, /* fsync */
+ { &vop_seek_desc, afs_vop_seek }, /* seek */
+ { &vop_remove_desc, afs_vop_remove }, /* remove */
+ { &vop_link_desc, afs_vop_link }, /* link */
+ { &vop_rename_desc, afs_vop_rename }, /* rename */
+ { &vop_mkdir_desc, afs_vop_mkdir }, /* mkdir */
+ { &vop_rmdir_desc, afs_vop_rmdir }, /* rmdir */
+ { &vop_symlink_desc, afs_vop_symlink }, /* symlink */
+ { &vop_readdir_desc, afs_vop_readdir }, /* readdir */
+ { &vop_readlink_desc, afs_vop_readlink }, /* readlink */
+ /* Yes, we use the ufs_abortop call. It just releases the namei
+ buffer stuff */
+ { &vop_abortop_desc, ufs_abortop }, /* abortop */
+ { &vop_inactive_desc, afs_vop_inactive }, /* inactive */
+ { &vop_reclaim_desc, afs_vop_reclaim }, /* reclaim */
+ { &vop_lock_desc, afs_vop_lock }, /* lock */
+ { &vop_unlock_desc, afs_vop_unlock }, /* unlock */
+ { &vop_bmap_desc, afs_vop_bmap }, /* bmap */
+ { &vop_strategy_desc, afs_vop_strategy }, /* strategy */
+ { &vop_print_desc, afs_vop_print }, /* print */
+ { &vop_islocked_desc, afs_vop_islocked }, /* islocked */
+ { &vop_pathconf_desc, afs_vop_pathconf }, /* pathconf */
+ { &vop_advlock_desc, afs_vop_advlock }, /* advlock */
+ { &vop_blkatoff_desc, afs_vop_blkatoff }, /* blkatoff */
+ { &vop_valloc_desc, afs_vop_valloc }, /* valloc */
+ { &vop_reallocblks_desc, afs_vop_reallocblks }, /* reallocblks */
+ { &vop_vfree_desc, afs_vop_vfree }, /* vfree */
+ { &vop_truncate_desc, afs_vop_truncate }, /* truncate */
+ { &vop_update_desc, afs_vop_update }, /* update */
+ { &vop_blktooff_desc, afs_vop_blktooff }, /* blktooff */
+ { &vop_offtoblk_desc, afs_vop_offtoblk }, /* offtoblk */
+ { &vop_cmap_desc, afs_vop_cmap }, /* cmap */
+ { &vop_bwrite_desc, vn_bwrite },
+ { (struct vnodeop_desc*)NULL, (int(*)())NULL }
+};
+struct vnodeopv_desc afs_vnodeop_opv_desc =
+ { &afs_vnodeop_p, afs_vnodeop_entries };
+
+#define GETNAME() \
+ struct componentname *cnp = ap->a_cnp; \
+ char *name; \
+ MALLOC(name, char *, cnp->cn_namelen+1, M_TEMP, M_WAITOK); \
+ bcopy(cnp->cn_nameptr, name, cnp->cn_namelen); \
+ name[cnp->cn_namelen] = '\0'
+
+#define DROPNAME() FREE(name, M_TEMP)
+
+
+
+int
+afs_vop_lookup(ap)
+struct vop_lookup_args /* {
+ struct vnodeop_desc * a_desc;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ } */ *ap;
+{
+ int error;
+ struct vcache *vcp;
+ struct vnode *vp, *dvp;
+ register int flags = ap->a_cnp->cn_flags;
+ int lockparent; /* 1 => lockparent flag is set */
+ int wantparent; /* 1 => wantparent or lockparent flag */
+ struct proc *p;
+ GETNAME();
+ p=cnp->cn_proc;
+ lockparent = flags & LOCKPARENT;
+ wantparent = flags & (LOCKPARENT|WANTPARENT);
+
+ if (ap->a_dvp->v_type != VDIR) {
+ *ap->a_vpp = 0;
+ DROPNAME();
+ return ENOTDIR;
+ }
+ dvp = ap->a_dvp;
+ if (flags & ISDOTDOT)
+ VOP_UNLOCK(dvp, 0, p);
+ AFS_GLOCK();
+ error = afs_lookup((struct vcache *)dvp, name, &vcp, cnp->cn_cred);
+ AFS_GUNLOCK();
+ if (error) {
+ if (flags & ISDOTDOT)
+ VOP_LOCK(dvp, LK_EXCLUSIVE | LK_RETRY, p);
+ if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
+ (flags & ISLASTCN) && error == ENOENT)
+ error = EJUSTRETURN;
+ if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
+ cnp->cn_flags |= SAVENAME;
+ DROPNAME();
+ *ap->a_vpp = 0;
+ return (error);
+ }
+ vp = (struct vnode *)vcp; /* always get a node if no error */
+
+ /* The parent directory comes in locked. We unlock it on return
+ unless the caller wants it left locked.
+ we also always return the vnode locked. */
+
+ if (flags & ISDOTDOT) {
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ /* always return the child locked */
+ if (lockparent && (flags & ISLASTCN) &&
+ (error = vn_lock(dvp, LK_EXCLUSIVE, p))) {
+ vput(vp);
+ DROPNAME();
+ return (error);
+ }
+ } else if (vp == dvp) {
+ /* they're the same; afs_lookup() already ref'ed the leaf.
+ It came in locked, so we don't need to ref OR lock it */
+ } else {
+ if (!lockparent || !(flags & ISLASTCN))
+ VOP_UNLOCK(dvp, 0, p); /* done with parent. */
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ /* always return the child locked */
+ }
+ *ap->a_vpp = vp;
+
+ if ((cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN) ||
+ (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))))
+ cnp->cn_flags |= SAVENAME;
+
+ DROPNAME();
+ return error;
+}
+
+int
+afs_vop_create(ap)
+ struct vop_create_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ } */ *ap;
+{
+ int error = 0;
+ struct vcache *vcp;
+ register struct vnode *dvp = ap->a_dvp;
+ struct proc *p;
+ GETNAME();
+ p=cnp->cn_proc;
+
+ /* vnode layer handles excl/nonexcl */
+ AFS_GLOCK();
+ error = afs_create((struct vcache *)dvp, name, ap->a_vap, NONEXCL,
+ ap->a_vap->va_mode, &vcp,
+ cnp->cn_cred);
+ AFS_GUNLOCK();
+ if (error) {
+ VOP_ABORTOP(dvp, cnp);
+ vput(dvp);
+ DROPNAME();
+ return(error);
+ }
+
+ if (vcp) {
+ *ap->a_vpp = (struct vnode *)vcp;
+ vn_lock((struct vnode *)vcp, LK_EXCLUSIVE| LK_RETRY, p);
+ if (UBCINFOMISSING((struct vnode *)vcp) ||
+ UBCINFORECLAIMED((struct vnode *)vcp))
+ ubc_info_init((struct vnode *)vcp);
+ }
+ else *ap->a_vpp = 0;
+
+ if ((cnp->cn_flags & SAVESTART) == 0)
+ FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
+ vput(dvp);
+ DROPNAME();
+ return error;
+}
+
+int
+afs_vop_mknod(ap)
+ struct vop_mknod_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ } */ *ap;
+{
+ FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI);
+ vput(ap->a_dvp);
+ return(ENODEV);
+}
+
+int
+afs_vop_open(ap)
+ struct vop_open_args /* {
+ struct vnode *a_vp;
+ int a_mode;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ int error;
+ struct vcache *vc = (struct vcache *)ap->a_vp;
+ AFS_GLOCK();
+ error = afs_open(&vc, ap->a_mode, ap->a_cred);
+#ifdef DIAGNOSTIC
+ if ((struct vnode *)vc != ap->a_vp)
+ panic("AFS open changed vnode!");
+#endif
+ afs_BozonLock(&vc->pvnLock, vc);
+ osi_FlushPages(vc);
+ afs_BozonUnlock(&vc->pvnLock, vc);
+ AFS_GUNLOCK();
+ return error;
+}
+
+int
+afs_vop_close(ap)
+ struct vop_close_args /* {
+ struct vnode *a_vp;
+ int a_fflag;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ int code;
+ struct vcache *avc=ap->a_vp;
+ AFS_GLOCK();
+ if (ap->a_cred)
+ code=afs_close(avc, ap->a_fflag, ap->a_cred, ap->a_p);
+ else
+ code=afs_close(avc, ap->a_fflag, &afs_osi_cred, ap->a_p);
+ afs_BozonLock(&avc->pvnLock, avc);
+ osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
+ afs_BozonUnlock(&avc->pvnLock, avc);
+ AFS_GUNLOCK();
+ return code;
+}
+
+int
+afs_vop_access(ap)
+ struct vop_access_args /* {
+ struct vnode *a_vp;
+ int a_mode;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ int code;
+ AFS_GLOCK();
+ code=afs_access((struct vcache *)ap->a_vp, ap->a_mode, ap->a_cred);
+ AFS_GUNLOCK();
+ return code;
+}
+int
+afs_vop_getattr(ap)
+ struct vop_getattr_args /* {
+ struct vnode *a_vp;
+ struct vattr *a_vap;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ int code;
+ AFS_GLOCK();
+ code=afs_getattr((struct vcache *)ap->a_vp, ap->a_vap, ap->a_cred);
+ AFS_GUNLOCK();
+ return code;
+}
+int
+afs_vop_setattr(ap)
+ struct vop_setattr_args /* {
+ struct vnode *a_vp;
+ struct vattr *a_vap;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ int code;
+ AFS_GLOCK();
+ code=afs_setattr((struct vcache *)ap->a_vp, ap->a_vap, ap->a_cred);
+ AFS_GUNLOCK();
+ return code;
+}
+int
+afs_vop_read(ap)
+ struct vop_read_args /* {
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ int a_ioflag;
+ struct ucred *a_cred;
+ } */ *ap;
+{
+ int code;
+ struct vcache *avc=(struct vcache *)ap->a_vp;
+ AFS_GLOCK();
+ afs_BozonLock(&avc->pvnLock, avc);
+ osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
+ code=afs_read(avc, ap->a_uio, ap->a_cred, 0, 0, 0);
+ afs_BozonUnlock(&avc->pvnLock, avc);
+ AFS_GUNLOCK();
+ return code;
+}
+int
+afs_vop_pagein(ap)
+ struct vop_pagein_args /* {
+ struct vnode *a_vp;
+ upl_t a_pl;
+ vm_offset_t a_pl_offset;
+ off_t a_f_offset;
+ size_t a_size;
+ struct ucred *a_cred;
+ int a_flags;
+ } */ *ap;
+{
+ register struct vnode *vp = ap->a_vp;
+ upl_t pl = ap->a_pl;
+ size_t size= ap->a_size;
+ off_t f_offset = ap->a_f_offset;
+ vm_offset_t pl_offset = ap->a_pl_offset;
+ int flags = ap->a_flags;
+ struct ucred *cred;
+ vm_offset_t ioaddr;
+ struct uio auio;
+ struct iovec aiov;
+ struct uio * uio = &auio;
+ int nocommit = flags & UPL_NOCOMMIT;
+
+ int code;
+ struct vcache *tvc=(struct vcache *)vp;
+
+ if (UBCINVALID(vp)) {
+#if DIAGNOSTIC
+ panic("afs_vop_pagein: invalid vp");
+#endif /* DIAGNOSTIC */
+ return (EPERM);
+ }
+
+ UBCINFOCHECK("afs_vop_pagein", vp);
+ if(pl == (upl_t)NULL) {
+ panic("afs_vop_pagein: no upl");
+ }
+
+ cred = ubc_getcred(vp);
+ if (cred == NOCRED)
+ cred = ap->a_cred;
+
+ if (size == 0) {
+ if (!nocommit)
+ kernel_upl_abort_range(pl, pl_offset, size,
+ UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
+ return (0);
+ }
+ if (f_offset < 0) {
+ if (!nocommit)
+ kernel_upl_abort_range(pl, pl_offset, size,
+ UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
+ return (EINVAL);
+ }
+ if (f_offset & PAGE_MASK)
+ panic("afs_vop_pagein: offset not page aligned");
+
+ auio.uio_iov = &aiov;
+ auio.uio_iovcnt = 1;
+ auio.uio_offset = f_offset;
+ auio.uio_segflg = UIO_SYSSPACE;
+ auio.uio_rw = UIO_READ;
+ auio.uio_procp = NULL;
+ kernel_upl_map(kernel_map, pl, &ioaddr);
+ ioaddr += pl_offset;
+ auio.uio_resid = aiov.iov_len = size;
+ aiov.iov_base = (caddr_t)ioaddr;
+ AFS_GLOCK();
+ afs_BozonLock(&tvc->pvnLock, tvc);
+ osi_FlushPages(tvc); /* hold bozon lock, but not basic vnode lock */
+ code=afs_read(tvc, uio, cred, 0, 0, 0);
+ if (code == 0) {
+ ObtainWriteLock(&tvc->lock, 2);
+ tvc->states |= CMAPPED;
+ ReleaseWriteLock(&tvc->lock);
+ }
+ afs_BozonUnlock(&tvc->pvnLock, tvc);
+ AFS_GUNLOCK();
+ kernel_upl_unmap(kernel_map, pl);
+ if (!nocommit) {
+ if (code)
+ kernel_upl_abort_range(pl, pl_offset, size,
+ UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
+ else
+ kernel_upl_commit_range(pl, pl_offset, size,
+ UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY,
+ UPL_GET_INTERNAL_PAGE_LIST(pl), MAX_UPL_TRANSFER);
+ }
+ return code;
+}
+
+int
+afs_vop_write(ap)
+ struct vop_write_args /* {
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ int a_ioflag;
+ struct ucred *a_cred;
+ } */ *ap;
+{
+ int code;
+ struct vcache *avc=(struct vcache *)ap->a_vp;
+ void *object;
+ AFS_GLOCK();
+ afs_BozonLock(&avc->pvnLock, avc);
+ osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
+ if (UBCINFOEXISTS(ap->a_vp))
+ ubc_clean(ap->a_vp, 1);
+ if (UBCINFOEXISTS(ap->a_vp))
+ osi_VM_NukePages(ap->a_vp, ap->a_uio->uio_offset, ap->a_uio->uio_resid);
+ code=afs_write((struct vcache *)ap->a_vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
+ afs_BozonUnlock(&avc->pvnLock, avc);
+ AFS_GUNLOCK();
+ return code;
+}
+
+int
+afs_vop_pageout(ap)
+ struct vop_pageout_args /* {
+ struct vnode *a_vp;
+ upl_t a_pl,
+ vm_offset_t a_pl_offset,
+ off_t a_f_offset,
+ size_t a_size,
+ struct ucred *a_cred,
+ int a_flags
+ } */ *ap;
+{
+ register struct vnode *vp = ap->a_vp;
+ upl_t pl = ap->a_pl;
+ size_t size= ap->a_size;
+ off_t f_offset = ap->a_f_offset;
+ vm_offset_t pl_offset = ap->a_pl_offset;
+ int flags = ap->a_flags;
+ struct ucred *cred;
+ vm_offset_t ioaddr;
+ struct uio auio;
+ struct iovec aiov;
+ struct uio * uio = &auio;
+ int nocommit = flags & UPL_NOCOMMIT;
+
+ int code;
+ struct vcache *tvc=(struct vcache *)vp;
+
+ if (UBCINVALID(vp)) {
+#if DIAGNOSTIC
+ panic("afs_vop_pageout: invalid vp");
+#endif /* DIAGNOSTIC */
+ return (EPERM);
+ }
+
+ UBCINFOCHECK("afs_vop_pageout", vp);
+ if(pl == (upl_t)NULL) {
+ panic("afs_vop_pageout: no upl");
+ }
+#if 1
+ { int lbn, iosize, s;
+ struct buf *bp;
+ int biosize = DEV_BSIZE;
+
+ lbn = f_offset / DEV_BSIZE;
+
+ for (iosize = size; iosize > 0; iosize -= biosize, lbn++) {
+
+ s = splbio();
+ if (bp = incore(vp, lbn)) {
+ if (ISSET(bp->b_flags, B_BUSY))
+ panic("nfs_pageout: found BUSY buffer incore\n")
+;
+
+ bremfree(bp);
+ SET(bp->b_flags, (B_BUSY | B_INVAL));
+ brelse(bp);
+ }
+ splx(s);
+ }
+ }
+#endif
+ cred = ubc_getcred(vp);
+ if (cred == NOCRED)
+ cred = ap->a_cred;
+
+ if (size == 0) {
+ if (!nocommit)
+ kernel_upl_abort_range(pl, pl_offset, size,
+ UPL_ABORT_FREE_ON_EMPTY);
+ return (0);
+ }
+ if (flags & (IO_APPEND | IO_SYNC))
+ panic("nfs_pageout: (IO_APPEND | IO_SYNC)");
+ if (f_offset < 0) {
+ if (!nocommit)
+ kernel_upl_abort_range(pl, pl_offset, size,
+ UPL_ABORT_FREE_ON_EMPTY);
+ return (EINVAL);
+ }
+ if (f_offset >= tvc->m.Length) {
+ if (!nocommit)
+ kernel_upl_abort_range(pl, pl_offset, size,
+ UPL_ABORT_FREE_ON_EMPTY);
+ return (EINVAL);
+ }
+
+ if (f_offset & PAGE_MASK)
+ panic("afs_vop_pageout: offset not page aligned");
+
+ auio.uio_iov = &aiov;
+ auio.uio_iovcnt = 1;
+ auio.uio_offset = f_offset;
+ auio.uio_segflg = UIO_SYSSPACE;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_procp = NULL;
+ kernel_upl_map(kernel_map, pl, &ioaddr);
+ ioaddr += pl_offset;
+ auio.uio_resid = aiov.iov_len = size;
+ aiov.iov_base = (caddr_t)ioaddr;
+#if 1 /* USV [ */
+ {
+ /*
+ * check for partial page and clear the
+ * contents past end of the file before
+ * releasing it in the VM page cache
+ */
+ if ((f_offset < tvc->m.Length) && (f_offset + size) > tvc->m.Length) {
+ size_t io = tvc->m.Length - f_offset;
+
+ bzero((caddr_t)(ioaddr + pl_offset + io), size - io);
+ }
+ }
+#endif /* ] USV */
+
+ AFS_GLOCK();
+ afs_BozonLock(&tvc->pvnLock, tvc);
+ osi_FlushPages(tvc); /* hold bozon lock, but not basic vnode lock */
+ ObtainWriteLock(&tvc->lock, 1);
+ afs_FakeOpen(tvc);
+ ReleaseWriteLock(&tvc->lock);
+
+ code=afs_write(tvc, uio, flags, cred, 0);
+
+ ObtainWriteLock(&tvc->lock, 1);
+ afs_FakeClose(tvc, cred);
+ ReleaseWriteLock(&tvc->lock);
+ afs_BozonUnlock(&tvc->pvnLock, tvc);
+ AFS_GUNLOCK();
+ kernel_upl_unmap(kernel_map, pl);
+ if (!nocommit) {
+ if(code)
+ kernel_upl_abort_range(pl, pl_offset, size,
+ UPL_ABORT_FREE_ON_EMPTY);
+ else
+ kernel_upl_commit_range(pl, pl_offset, size,
+ UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY,
+ UPL_GET_INTERNAL_PAGE_LIST(pl), MAX_UPL_TRANSFER);
+ }
+
+ return code;
+}
+int
+afs_vop_ioctl(ap)
+ struct vop_ioctl_args /* {
+ struct vnode *a_vp;
+ int a_command;
+ caddr_t a_data;
+ int a_fflag;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ struct vcache *tvc = (struct vcache *)ap->a_vp;
+ struct afs_ioctl data;
+ int error = 0;
+
+ /* in case we ever get in here... */
+
+ AFS_STATCNT(afs_ioctl);
+ if (((ap->a_command >> 8) & 0xff) == 'V') {
+ /* This is a VICEIOCTL call */
+ AFS_GLOCK();
+ error = HandleIoctl(tvc, (struct file *)0/*Not used*/,
+ ap->a_command, ap->a_data);
+ AFS_GUNLOCK();
+ return(error);
+ } else {
+ /* No-op call; just return. */
+ return(ENOTTY);
+ }
+}
+
+/* ARGSUSED */
+int
+afs_vop_select(ap)
+ struct vop_select_args /* {
+ struct vnode *a_vp;
+ int a_which;
+ int a_fflags;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ /*
+ * We should really check to see if I/O is possible.
+ */
+ return (1);
+}
+/*
+ * Mmap a file
+ *
+ * NB Currently unsupported.
+ */
+/* ARGSUSED */
+int
+afs_vop_mmap(ap)
+ struct vop_mmap_args /* {
+ struct vnode *a_vp;
+ int a_fflags;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ return (EINVAL);
+}
+
+int
+afs_vop_fsync(ap)
+ struct vop_fsync_args /* {
+ struct vnode *a_vp;
+ struct ucred *a_cred;
+ int a_waitfor;
+ struct proc *a_p;
+ } */ *ap;
+{
+ int wait = ap->a_waitfor == MNT_WAIT;
+ int error;
+ register struct vnode *vp = ap->a_vp;
+
+ AFS_GLOCK();
+ /*vflushbuf(vp, wait);*/
+ if (ap->a_cred)
+ error=afs_fsync((struct vcache *)vp, ap->a_cred);
+ else
+ error=afs_fsync((struct vcache *)vp, &afs_osi_cred);
+ AFS_GUNLOCK();
+ return error;
+}
+
+int
+afs_vop_seek(ap)
+ struct vop_seek_args /* {
+ struct vnode *a_vp;
+ off_t a_oldoff;
+ off_t a_newoff;
+ struct ucred *a_cred;
+ } */ *ap;
+{
+ if (ap->a_newoff > ULONG_MAX) /* AFS doesn't support 64-bit offsets */
+ return EINVAL;
+ return (0);
+}
+
+int
+afs_vop_remove(ap)
+ struct vop_remove_args /* {
+ struct vnode *a_dvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+ } */ *ap;
+{
+ int error = 0;
+ register struct vnode *vp = ap->a_vp;
+ register struct vnode *dvp = ap->a_dvp;
+
+ GETNAME();
+ AFS_GLOCK();
+ error = afs_remove((struct vcache *)dvp, name, cnp->cn_cred);
+ AFS_GUNLOCK();
+ cache_purge(vp);
+ if (dvp == vp)
+ vrele(vp);
+ else
+ vput(vp);
+ vput(dvp);
+ if (UBCINFOEXISTS(vp)) {
+ int wasmapped=ubc_issetflags(vp, UI_WASMAPPED);
+ int hasobjref=ubc_issetflags(vp, UI_HASOBJREF);
+ if (wasmapped)
+ (void) ubc_uncache(vp);
+ if (hasobjref)
+ ubc_release(vp);
+ /* WARNING vp may not be valid after this */
+ }
+
+ FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
+ DROPNAME();
+ return error;
+}
+
+int
+afs_vop_link(ap)
+ struct vop_link_args /* {
+ struct vnode *a_vp;
+ struct vnode *a_tdvp;
+ struct componentname *a_cnp;
+ } */ *ap;
+{
+ int error = 0;
+ register struct vnode *dvp = ap->a_tdvp;
+ register struct vnode *vp = ap->a_vp;
+ struct proc *p;
+
+ GETNAME();
+ p=cnp->cn_proc;
+ if (dvp->v_mount != vp->v_mount) {
+ VOP_ABORTOP(vp, cnp);
+ error = EXDEV;
+ goto out;
+ }
+ if (vp->v_type == VDIR) {
+ VOP_ABORTOP(vp, cnp);
+ error = EISDIR;
+ goto out;
+ }
+ if (error = vn_lock(vp, LK_EXCLUSIVE, p)) {
+ VOP_ABORTOP(dvp, cnp);
+ goto out;
+ }
+ AFS_GLOCK();
+ error = afs_link((struct vcache *)vp, (struct vcache *)dvp, name, cnp->cn_cred);
+ AFS_GUNLOCK();
+ FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
+ if (dvp != vp)
+ VOP_UNLOCK(vp,0, p);
+out:
+ vput(dvp);
+ DROPNAME();
+ return error;
+}
+
+int
+afs_vop_rename(ap)
+ struct vop_rename_args /* {
+ struct vnode *a_fdvp;
+ struct vnode *a_fvp;
+ struct componentname *a_fcnp;
+ struct vnode *a_tdvp;
+ struct vnode *a_tvp;
+ struct componentname *a_tcnp;
+ } */ *ap;
+{
+ int error = 0;
+ struct componentname *fcnp = ap->a_fcnp;
+ char *fname;
+ struct componentname *tcnp = ap->a_tcnp;
+ char *tname;
+ struct vnode *tvp = ap->a_tvp;
+ register struct vnode *tdvp = ap->a_tdvp;
+ struct vnode *fvp = ap->a_fvp;
+ register struct vnode *fdvp = ap->a_fdvp;
+ struct proc *p=fcnp->cn_proc;
+
+ /*
+ * Check for cross-device rename.
+ */
+ if ((fvp->v_mount != tdvp->v_mount) ||
+ (tvp && (fvp->v_mount != tvp->v_mount))) {
+ error = EXDEV;
+abortit:
+ VOP_ABORTOP(tdvp, tcnp); /* XXX, why not in NFS? */
+ if (tdvp == tvp)
+ vrele(tdvp);
+ else
+ vput(tdvp);
+ if (tvp)
+ vput(tvp);
+ VOP_ABORTOP(fdvp, fcnp); /* XXX, why not in NFS? */
+ vrele(fdvp);
+ vrele(fvp);
+ return (error);
+ }
+ /*
+ * if fvp == tvp, we're just removing one name of a pair of
+ * directory entries for the same element. convert call into rename.
+ ( (pinched from NetBSD 1.0's ufs_rename())
+ */
+ if (fvp == tvp) {
+ if (fvp->v_type == VDIR) {
+ error = EINVAL;
+ goto abortit;
+ }
+
+ /* Release destination completely. */
+ VOP_ABORTOP(tdvp, tcnp);
+ vput(tdvp);
+ vput(tvp);
+
+ /* Delete source. */
+ vrele(fdvp);
+ vrele(fvp);
+ fcnp->cn_flags &= ~MODMASK;
+ fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
+ if ((fcnp->cn_flags & SAVESTART) == 0)
+ panic("afs_rename: lost from startdir");
+ fcnp->cn_nameiop = DELETE;
+ (void) relookup(fdvp, &fvp, fcnp);
+ return (VOP_REMOVE(fdvp, fvp, fcnp));
+ }
+ if (error = vn_lock(fvp, LK_EXCLUSIVE, p))
+ goto abortit;
+
+ MALLOC(fname, char *, fcnp->cn_namelen+1, M_TEMP, M_WAITOK);
+ bcopy(fcnp->cn_nameptr, fname, fcnp->cn_namelen);
+ fname[fcnp->cn_namelen] = '\0';
+ MALLOC(tname, char *, tcnp->cn_namelen+1, M_TEMP, M_WAITOK);
+ bcopy(tcnp->cn_nameptr, tname, tcnp->cn_namelen);
+ tname[tcnp->cn_namelen] = '\0';
+
+
+ AFS_GLOCK();
+ /* XXX use "from" or "to" creds? NFS uses "to" creds */
+ error = afs_rename((struct vcache *)fdvp, fname, (struct vcache *)tdvp, tname, tcnp->cn_cred);
+ AFS_GUNLOCK();
+
+ VOP_UNLOCK(fvp, 0, p);
+ FREE(fname, M_TEMP);
+ FREE(tname, M_TEMP);
+ if (error)
+ goto abortit; /* XXX */
+ if (tdvp == tvp)
+ vrele(tdvp);
+ else
+ vput(tdvp);
+ if (tvp)
+ vput(tvp);
+ vrele(fdvp);
+ vrele(fvp);
+ return error;
+}
+
+int
+afs_vop_mkdir(ap)
+ struct vop_mkdir_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ } */ *ap;
+{
+ register struct vnode *dvp = ap->a_dvp;
+ register struct vattr *vap = ap->a_vap;
+ int error = 0;
+ struct vcache *vcp;
+ struct proc *p;
+
+ GETNAME();
+ p=cnp->cn_proc;
+#ifdef DIAGNOSTIC
+ if ((cnp->cn_flags & HASBUF) == 0)
+ panic("afs_vop_mkdir: no name");
+#endif
+ AFS_GLOCK();
+ error = afs_mkdir((struct vcache *)dvp, name, vap, &vcp, cnp->cn_cred);
+ AFS_GUNLOCK();
+ if (error) {
+ VOP_ABORTOP(dvp, cnp);
+ vput(dvp);
+ DROPNAME();
+ return(error);
+ }
+ if (vcp) {
+ *ap->a_vpp = (struct vnode *)vcp;
+ vn_lock((struct vnode *)vcp, LK_EXCLUSIVE|LK_RETRY, p);
+ } else
+ *ap->a_vpp = 0;
+ DROPNAME();
+ FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
+ vput(dvp);
+ return error;
+}
+
+int
+afs_vop_rmdir(ap)
+ struct vop_rmdir_args /* {
+ struct vnode *a_dvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+ } */ *ap;
+{
+ int error = 0;
+ register struct vnode *vp = ap->a_vp;
+ register struct vnode *dvp = ap->a_dvp;
+
+ GETNAME();
+ if (dvp == vp) {
+ vrele(dvp);
+ vput(vp);
+ FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
+ DROPNAME();
+ return (EINVAL);
+ }
+
+ AFS_GLOCK();
+ error = afs_rmdir((struct vcache *)dvp, name, cnp->cn_cred);
+ AFS_GUNLOCK();
+ DROPNAME();
+ vput(dvp);
+ vput(vp);
+ return error;
+}
+
+int
+afs_vop_symlink(ap)
+ struct vop_symlink_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ char *a_target;
+ } */ *ap;
+{
+ register struct vnode *dvp = ap->a_dvp;
+ int error = 0;
+ /* NFS ignores a_vpp; so do we. */
+
+ GETNAME();
+ AFS_GLOCK();
+ error = afs_symlink((struct vcache *)dvp, name, ap->a_vap, ap->a_target,
+ cnp->cn_cred);
+ AFS_GUNLOCK();
+ DROPNAME();
+ FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
+ vput(dvp);
+ return error;
+}
+
+int
+afs_vop_readdir(ap)
+ struct vop_readdir_args /* {
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+ int *a_eofflag;
+ u_long *a_cookies;
+ int ncookies;
+ } */ *ap;
+{
+ int error;
+ off_t off;
+/* printf("readdir %x cookies %x ncookies %d\n", ap->a_vp, ap->a_cookies,
+ ap->a_ncookies); */
+ off=ap->a_uio->uio_offset;
+ AFS_GLOCK();
+ error= afs_readdir((struct vcache *)ap->a_vp, ap->a_uio, ap->a_cred,
+ ap->a_eofflag);
+ AFS_GUNLOCK();
+ if (!error && ap->a_ncookies != NULL) {
+ struct uio *uio = ap->a_uio;
+ const struct dirent *dp, *dp_start, *dp_end;
+ int ncookies;
+ u_long *cookies, *cookiep;
+
+ if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
+ panic("afs_readdir: burned cookies");
+ dp = (const struct dirent *)
+ ((const char *)uio->uio_iov->iov_base - (uio->uio_offset - off));
+
+ dp_end = (const struct dirent *) uio->uio_iov->iov_base;
+ for (dp_start = dp, ncookies = 0;
+ dp < dp_end;
+ dp = (const struct dirent *)((const char *) dp + dp->d_reclen))
+ ncookies++;
+
+ MALLOC(cookies, u_long *, ncookies * sizeof(u_long),
+ M_TEMP, M_WAITOK);
+ for (dp = dp_start, cookiep = cookies;
+ dp < dp_end;
+ dp = (const struct dirent *)((const char *) dp + dp->d_reclen)) {
+ off += dp->d_reclen;
+ *cookiep++ = off;
+ }
+ *ap->a_cookies = cookies;
+ *ap->a_ncookies = ncookies;
+ }
+
+ return error;
+}
+
+int
+afs_vop_readlink(ap)
+ struct vop_readlink_args /* {
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+ } */ *ap;
+{
+ int error;
+/* printf("readlink %x\n", ap->a_vp);*/
+ AFS_GLOCK();
+ error= afs_readlink((struct vcache *)ap->a_vp, ap->a_uio, ap->a_cred);
+ AFS_GUNLOCK();
+ return error;
+}
+
+extern int prtactive;
+
+int
+afs_vop_inactive(ap)
+ struct vop_inactive_args /* {
+ struct vnode *a_vp;
+ struct proc *a_p;
+ } */ *ap;
+{
+ register struct vnode *vp = ap->a_vp;
+
+ if (prtactive && vp->v_usecount != 0)
+ vprint("afs_vop_inactive(): pushing active", vp);
+
+ AFS_GLOCK();
+ afs_InactiveVCache((struct vcache *)vp, 0); /* decrs ref counts */
+ AFS_GUNLOCK();
+ VOP_UNLOCK(vp, 0, ap->a_p);
+ return 0;
+}
+
+int
+afs_vop_reclaim(ap)
+ struct vop_reclaim_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+{
+ int error;
+ int sl;
+ register struct vnode *vp = ap->a_vp;
+
+ cache_purge(vp); /* just in case... */
+
+#if 0
+ AFS_GLOCK();
+ error = afs_FlushVCache((struct vcache *)vp, &sl); /* tosses our stuff from vnode */
+ AFS_GUNLOCK();
+ ubc_unlink(vp);
+ if (!error && vp->v_data)
+ panic("afs_reclaim: vnode not cleaned");
+ return error;
+#else
+ if (vp->v_usecount == 2) {
+ vprint("reclaim count==2", vp);
+ } else if (vp->v_usecount == 1) {
+ vprint("reclaim count==1", vp);
+ } else
+ vprint("reclaim bad count", vp);
+
+ return 0;
+#endif
+}
+
+int
+afs_vop_lock(ap)
+ struct vop_lock_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+{
+ register struct vnode *vp = ap->a_vp;
+ register struct vcache *avc = (struct vcache *)vp;
+
+ if (vp->v_tag == VT_NON)
+ return (ENOENT);
+ return (lockmgr(&avc->rwlock, ap->a_flags, &vp->v_interlock,
+ ap->a_p));
+}
+
+int
+afs_vop_unlock(ap)
+ struct vop_unlock_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+ struct vcache *avc = (struct vcache *)vp;
+ return (lockmgr(&avc->rwlock, ap->a_flags | LK_RELEASE,
+ &vp->v_interlock, ap->a_p));
+
+}
+
+int
+afs_vop_bmap(ap)
+ struct vop_bmap_args /* {
+ struct vnode *a_vp;
+ daddr_t a_bn;
+ struct vnode **a_vpp;
+ daddr_t *a_bnp;
+ int *a_runp;
+ int *a_runb;
+ } */ *ap;
+{
+ struct vcache *vcp;
+ int error;
+ if (ap->a_bnp) {
+ *ap->a_bnp = ap->a_bn * (PAGE_SIZE / DEV_BSIZE);
+ }
+ if (ap->a_vpp) {
+ *ap->a_vpp = ap->a_vp;
+ }
+ if (ap->a_runp != NULL)
+ *ap->a_runp = 0;
+#ifdef notyet
+ if (ap->a_runb != NULL)
+ *ap->a_runb = 0;
+#endif
+
+ return 0;
+}
+int
+afs_vop_strategy(ap)
+ struct vop_strategy_args /* {
+ struct buf *a_bp;
+ } */ *ap;
+{
+ int error;
+ AFS_GLOCK();
+ error= afs_ustrategy(ap->a_bp);
+ AFS_GUNLOCK();
+ return error;
+}
+int
+afs_vop_print(ap)
+ struct vop_print_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+{
+ register struct vnode *vp = ap->a_vp;
+ register struct vcache *vc = (struct vcache *)ap->a_vp;
+ int s = vc->states;
+ char buf[20];
+ printf("tag %d, fid: %ld.%x.%x.%x, opens %d, writers %d", vp->v_tag, vc->fid.Cell,
+ vc->fid.Fid.Volume, vc->fid.Fid.Vnode, vc->fid.Fid.Unique, vc->opens,
+ vc->execsOrWriters);
+ printf("\n states%s%s%s%s%s", (s&CStatd) ? " statd" : "", (s&CRO) ? " readonly" : "",(s&CDirty) ? " dirty" : "",(s&CMAPPED) ? " mapped" : "", (s&CVFlushed) ? " flush in progress" : "");
+ if (UBCISVALID(vp))
+ printf("\n UBC: %s%s",
+ UBCINFOEXISTS(vp) ? "exists, " : "does not exist",
+ UBCINFOEXISTS(vp) ?
+ sprintf(buf, "holdcnt %d", vp->v_ubcinfo->ui_holdcnt),buf : "");
+ printf("\n");
+ return 0;
+}
+
+int
+afs_vop_islocked(ap)
+ struct vop_islocked_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+{
+ struct vcache *vc = (struct vcache *)ap->a_vp;
+ return lockstatus(&vc->rwlock);
+}
+
+/*
+ * Return POSIX pathconf information applicable to ufs filesystems.
+ */
+afs_vop_pathconf(ap)
+ struct vop_pathconf_args /* {
+ struct vnode *a_vp;
+ int a_name;
+ int *a_retval;
+ } */ *ap;
+{
+ AFS_STATCNT(afs_cntl);
+ switch (ap->a_name) {
+ case _PC_LINK_MAX:
+ *ap->a_retval = LINK_MAX;
+ break;
+ case _PC_NAME_MAX:
+ *ap->a_retval = NAME_MAX;
+ break;
+ case _PC_PATH_MAX:
+ *ap->a_retval = PATH_MAX;
+ break;
+ case _PC_CHOWN_RESTRICTED:
+ *ap->a_retval = 1;
+ break;
+ case _PC_NO_TRUNC:
+ *ap->a_retval = 1;
+ break;
+ case _PC_PIPE_BUF:
+ return EINVAL;
+ break;
+ default:
+ return EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Advisory record locking support (fcntl() POSIX style)
+ */
+int
+afs_vop_advlock(ap)
+ struct vop_advlock_args /* {
+ struct vnode *a_vp;
+ caddr_t a_id;
+ int a_op;
+ struct flock *a_fl;
+ int a_flags;
+ } */ *ap;
+{
+ int error;
+ struct proc *p=current_proc();
+ struct ucred cr;
+ pcred_readlock(p);
+ cr=*p->p_cred->pc_ucred;
+ pcred_unlock(p);
+ AFS_GLOCK();
+ error= afs_lockctl((struct vcache *)ap->a_vp, ap->a_fl, ap->a_op, &cr,
+ (int) ap->a_id);
+ AFS_GUNLOCK();
+ return error;
+}
+
+int
+afs_vop_truncate(ap)
+ struct vop_truncate_args /* {
+ struct vnode *a_vp;
+ off_t a_length;
+ int a_flags;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ printf("stray afs_vop_truncate\n");
+ return EOPNOTSUPP;
+}
+
+int
+afs_vop_update(ap)
+ struct vop_update_args /* {
+ struct vnode *a_vp;
+ struct timeval *a_access;
+ struct timeval *a_modify;
+ int a_waitfor;
+ } */ *ap;
+{
+ printf("stray afs_vop_update\n");
+ return EOPNOTSUPP;
+}
+
+int afs_vop_blktooff(ap)
+ struct vop_blktooff_args /* {
+ struct vnode *a_vp;
+ daddr_t a_lblkno;
+ off_t *a_offset;
+ } */ *ap;
+{
+ *ap->a_offset = (off_t)(ap->a_lblkno * DEV_BSIZE);
+ return 0;
+}
+
+int afs_vop_offtoblk(ap)
+ struct vop_offtoblk_args /* {
+ struct vnode *a_vp;
+ off_t a_offset;
+ daddr_t *a_lblkno;
+ } */ *ap;
+{
+ *ap->a_lblkno = (daddr_t)(ap->a_offset / DEV_BSIZE);
+
+ return (0);
+}
+
+int afs_vop_cmap(ap)
+ struct vop_cmap_args /* {
+ struct vnode *a_vp;
+ off_t a_foffset;
+ size_t a_size;
+ daddr_t *a_bpn;
+ size_t *a_run;
+ void *a_poff;
+ } */ *ap;
+{
+ *ap->a_bpn = (daddr_t)(ap->a_foffset / DEV_BSIZE);
+ *ap->a_run= MAX(ap->a_size, AFS_CHUNKSIZE(ap->a_foffset));
+ return 0;
+}
+
--- /dev/null
+#define P1003_1B 1
+#define _KPOSIX_PRIORITY_SCHEDULING 1
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+#include "../afs/param.h" /* Should be always first */
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+#include "../afs/afs_stats.h" /* afs statistics */
+
+
+int afs_osicred_initialized=0;
+struct AFS_UCRED afs_osi_cred;
+afs_lock_t afs_xosi; /* lock is for tvattr */
+extern struct osi_dev cacheDev;
+extern struct mount *afs_cacheVfsp;
+
+
+void *osi_UFSOpen(ainode)
+ afs_int32 ainode;
+{
+ struct inode *ip;
+ register struct osi_file *afile = NULL;
+ extern int cacheDiskType;
+ afs_int32 code = 0;
+ int dummy;
+ AFS_STATCNT(osi_UFSOpen);
+ if(cacheDiskType != AFS_FCACHE_TYPE_UFS) {
+ osi_Panic("UFSOpen called for non-UFS cache\n");
+ }
+ if (!afs_osicred_initialized) {
+ /* valid for alpha_osf, SunOS, Ultrix */
+ bzero((char *)&afs_osi_cred, sizeof(struct AFS_UCRED));
+ afs_osi_cred.cr_ref++;
+ afs_osicred_initialized = 1;
+ }
+ afile = (struct osi_file *) osi_AllocSmallSpace(sizeof(struct osi_file));
+ AFS_GUNLOCK();
+ code = igetinode(afs_cacheVfsp, (dev_t) cacheDev.dev, (ino_t)ainode, &ip, &dummy);
+ AFS_GLOCK();
+ if (code) {
+ osi_FreeSmallSpace(afile);
+ osi_Panic("UFSOpen: igetinode failed");
+ }
+ IN_UNLOCK(ip);
+ afile->vnode = ITOV(ip);
+ afile->size = VTOI(afile->vnode)->i_size;
+ afile->offset = 0;
+ afile->proc = (int (*)()) 0;
+ afile->inum = ainode; /* for hint validity checking */
+ return (void *)afile;
+}
+
+afs_osi_Stat(afile, astat)
+ register struct osi_file *afile;
+ register struct osi_stat *astat; {
+ register afs_int32 code;
+ struct vattr tvattr;
+ AFS_STATCNT(osi_Stat);
+ MObtainWriteLock(&afs_xosi,320);
+ AFS_GUNLOCK();
+ VOP_GETATTR(afile->vnode, &tvattr, &afs_osi_cred, code);
+ AFS_GLOCK();
+ if (code == 0) {
+ astat->size = tvattr.va_size;
+ astat->blksize = tvattr.va_blocksize;
+ astat->mtime = tvattr.va_mtime.tv_sec;
+ astat->atime = tvattr.va_atime.tv_sec;
+ }
+ MReleaseWriteLock(&afs_xosi);
+ return code;
+}
+
+osi_UFSClose(afile)
+ register struct osi_file *afile;
+ {
+ AFS_STATCNT(osi_Close);
+ if(afile->vnode) {
+ AFS_RELE(afile->vnode);
+ }
+
+ osi_FreeSmallSpace(afile);
+ return 0;
+ }
+
+osi_UFSTruncate(afile, asize)
+ register struct osi_file *afile;
+ afs_int32 asize; {
+ struct AFS_UCRED *oldCred;
+ struct vattr tvattr;
+ register afs_int32 code;
+ struct osi_stat tstat;
+ AFS_STATCNT(osi_Truncate);
+
+ /* This routine only shrinks files, and most systems
+ * have very slow truncates, even when the file is already
+ * small enough. Check now and save some time.
+ */
+ code = afs_osi_Stat(afile, &tstat);
+ if (code || tstat.size <= asize) return code;
+ MObtainWriteLock(&afs_xosi,321);
+ VATTR_NULL(&tvattr);
+ /* note that this credential swapping stuff is only necessary because
+ of ufs's references directly to cred instead of to
+ credentials parameter. Probably should fix ufs some day. */
+ oldCred = curproc->p_cred->pc_ucred; /* remember old credentials pointer */
+ curproc->p_cred->pc_ucred = &afs_osi_cred;
+ /* temporarily use superuser credentials */
+ tvattr.va_size = asize;
+ AFS_GUNLOCK();
+ VOP_SETATTR(afile->vnode, &tvattr, &afs_osi_cred, code);
+ AFS_GLOCK();
+ curproc->p_cred->pc_ucred = oldCred; /* restore */
+ MReleaseWriteLock(&afs_xosi);
+ return code;
+}
+
+void osi_DisableAtimes(avp)
+struct vnode *avp;
+{
+ struct inode *ip = VTOI(avp);
+ ip->i_flag &= ~IACC;
+}
+
+
+/* Generic read interface */
+afs_osi_Read(afile, offset, aptr, asize)
+ register struct osi_file *afile;
+ int offset;
+ char *aptr;
+ afs_int32 asize; {
+ struct AFS_UCRED *oldCred;
+ unsigned int resid;
+ register afs_int32 code;
+ register afs_int32 cnt1=0;
+ AFS_STATCNT(osi_Read);
+
+ /**
+ * If the osi_file passed in is NULL, panic only if AFS is not shutting
+ * down. No point in crashing when we are already shutting down
+ */
+ if ( !afile ) {
+ if ( !afs_shuttingdown )
+ osi_Panic("osi_Read called with null param");
+ else
+ return EIO;
+ }
+
+ if (offset != -1) afile->offset = offset;
+ AFS_GUNLOCK();
+ code = gop_rdwr(UIO_READ, afile->vnode, (caddr_t) aptr, asize, afile->offset,
+ AFS_UIOSYS, IO_UNIT, &afs_osi_cred, &resid);
+ AFS_GLOCK();
+ if (code == 0) {
+ code = asize - resid;
+ afile->offset += code;
+ osi_DisableAtimes(afile->vnode);
+ }
+ else {
+ afs_Trace2(afs_iclSetp, CM_TRACE_READFAILED, ICL_TYPE_INT32, resid,
+ ICL_TYPE_INT32, code);
+ code = -1;
+ }
+ return code;
+}
+
+/* Generic write interface */
+afs_osi_Write(afile, offset, aptr, asize)
+ register struct osi_file *afile;
+ char *aptr;
+ afs_int32 offset;
+ afs_int32 asize; {
+ struct AFS_UCRED *oldCred;
+ unsigned int resid;
+ register afs_int32 code;
+ AFS_STATCNT(osi_Write);
+ if ( !afile )
+ osi_Panic("afs_osi_Write called with null param");
+ if (offset != -1) afile->offset = offset;
+ {
+ struct ucred *tmpcred = curproc->p_cred->pc_ucred;
+ curproc->p_cred->pc_ucred = &afs_osi_cred;
+ AFS_GUNLOCK();
+ code = gop_rdwr(UIO_WRITE, afile->vnode, (caddr_t) aptr, asize, afile->offset,
+ AFS_UIOSYS, IO_UNIT, &afs_osi_cred, &resid);
+ AFS_GLOCK();
+ curproc->p_cred->pc_ucred = tmpcred;
+ }
+ if (code == 0) {
+ code = asize - resid;
+ afile->offset += code;
+ }
+ else {
+ code = -1;
+ }
+ if (afile->proc) {
+ (*afile->proc)(afile, code);
+ }
+ return code;
+}
+
+
+/* This work should be handled by physstrat in ca/machdep.c.
+ This routine written from the RT NFS port strategy routine.
+ It has been generalized a bit, but should still be pretty clear. */
+int afs_osi_MapStrategy(aproc, bp)
+ int (*aproc)();
+ register struct buf *bp;
+{
+ afs_int32 returnCode;
+
+ AFS_STATCNT(osi_MapStrategy);
+ returnCode = (*aproc) (bp);
+
+ return returnCode;
+}
+
+
+
+void
+shutdown_osifile()
+{
+ extern int afs_cold_shutdown;
+
+ AFS_STATCNT(shutdown_osifile);
+ if (afs_cold_shutdown) {
+ afs_osicred_initialized = 0;
+ }
+}
+
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+/*
+ * osi_groups.c
+ *
+ * Implements:
+ * Afs_xsetgroups (syscall)
+ * setpag
+ *
+ */
+#include "../afs/param.h"
+#include "../afs/sysincludes.h"
+#include "../afs/afsincludes.h"
+#include "../afs/afs_stats.h" /* statistics */
+
+#define NOCRED ((struct ucred *) -1)
+#define NOUID ((uid_t) -1)
+#define NOGID ((gid_t) -1)
+
+
+static int
+afs_getgroups(
+ struct ucred *cred,
+ int ngroups,
+ gid_t *gidset);
+
+static int
+afs_setgroups(
+ struct proc *proc,
+ struct ucred **cred,
+ int ngroups,
+ gid_t *gidset,
+ int change_parent);
+
+int
+Afs_xsetgroups(p, args, retval)
+ struct proc *p;
+ void *args;
+ int *retval;
+{
+ int code = 0;
+ struct vrequest treq;
+
+ AFS_STATCNT(afs_xsetgroups);
+ AFS_GLOCK();
+
+ /* code = afs_InitReq(&treq, u.u_cred); */
+ code = afs_InitReq(&treq, curproc->p_cred->pc_ucred);
+ AFS_GUNLOCK();
+ if (code) return code;
+
+ code = setgroups(p, args, retval);
+ /* Note that if there is a pag already in the new groups we don't
+ * overwrite it with the old pag.
+ */
+ if (PagInCred(curproc->p_cred->pc_ucred) == NOPAG) {
+ if (((treq.uid >> 24) & 0xff) == 'A') {
+ AFS_GLOCK();
+ /* we've already done a setpag, so now we redo it */
+ AddPag(p, treq.uid, &p->p_rcred);
+ AFS_GUNLOCK();
+ }
+ }
+ return code;
+}
+
+
+int
+setpag(proc, cred, pagvalue, newpag, change_parent)
+ struct proc *proc;
+ struct ucred **cred;
+ afs_uint32 pagvalue;
+ afs_uint32 *newpag;
+ afs_uint32 change_parent;
+{
+ gid_t gidset[NGROUPS];
+ int ngroups, code;
+ int j;
+
+ AFS_STATCNT(setpag);
+ ngroups = afs_getgroups(*cred, NGROUPS, gidset);
+ if (afs_get_pag_from_groups(gidset[0], gidset[1]) == NOPAG) {
+ /* We will have to shift grouplist to make room for pag */
+ if (ngroups + 2 > NGROUPS) {
+ return (E2BIG);
+ }
+ for (j = ngroups -1; j >= 0; j--) {
+ gidset[j+2] = gidset[j];
+ }
+ ngroups += 2;
+ }
+ *newpag = (pagvalue == -1 ? genpag(): pagvalue);
+ afs_get_groups_from_pag(*newpag, &gidset[0], &gidset[1]);
+ code = afs_setgroups(proc, cred, ngroups, gidset, change_parent);
+ return code;
+}
+
+
+static int
+afs_getgroups(
+ struct ucred *cred,
+ int ngroups,
+ gid_t *gidset)
+{
+ int ngrps, savengrps;
+ gid_t *gp;
+
+ AFS_STATCNT(afs_getgroups);
+ savengrps = ngrps = MIN(ngroups, cred->cr_ngroups);
+ gp = cred->cr_groups;
+ while (ngrps--)
+ *gidset++ = *gp++;
+ return savengrps;
+}
+
+
+
+static int
+afs_setgroups(
+ struct proc *proc,
+ struct ucred **cred,
+ int ngroups,
+ gid_t *gidset,
+ int change_parent)
+{
+ int ngrps;
+ int i;
+ gid_t *gp;
+ struct ucred *newcr, *cr;
+
+ AFS_STATCNT(afs_setgroups);
+ /*
+ * The real setgroups() call does this, so maybe we should too.
+ *
+ */
+ if (ngroups > NGROUPS)
+ return EINVAL;
+ cr = *cred;
+ if (!change_parent) {
+ crhold(cr);
+ newcr = crcopy(cr);
+ } else
+ newcr = cr;
+ newcr->cr_ngroups = ngroups;
+ gp = newcr->cr_groups;
+ while (ngroups--)
+ *gp++ = *gidset++;
+ if (!change_parent) {
+ substitute_real_creds(proc, NOUID, NOUID, NOGID, NOGID, newcr);
+ }
+ *cred = newcr;
+ return(0);
+}
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+/*
+ * FreeBSD inode operations
+ *
+ * Implements:
+ *
+ */
+#include "../afs/param.h" /* Should be always first */
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+#include "../afs/osi_inode.h"
+#include "../afs/afs_stats.h" /* statistics stuff */
+#include <sys/queue.h>
+#include <sys/lock.h>
+#include <ufs/ufsmount.h>
+#include <ufs/ufs/dinode.h>
+
+getinode(fs, dev, inode, ipp, perror)
+ struct mount *fs;
+ struct inode **ipp;
+ dev_t dev;
+ ino_t inode;
+ int *perror;
+{
+ register struct vnode *vp;
+ char fake_vnode[FAKE_INODE_SIZE];
+ struct inode *ip;
+ int code;
+
+ *ipp = 0;
+ *perror = 0;
+ if (!fs) {
+ register struct ufsmount *ump;
+ register struct vnode *vp;
+ register struct mount *mp;
+
+ MOUNTLIST_LOCK();
+ if (mp = TAILQ_FIRST(&mountlist)) do {
+ /*
+ * XXX Also do the test for MFS
+ */
+#undef m_data
+#undef m_next
+ if (mp->mnt_stat.f_type == MOUNT_UFS) {
+ MOUNTLIST_UNLOCK();
+ ump = VFSTOUFS(mp);
+ if (ump->um_fs == NULL)
+ break;
+ if (ump->um_dev == dev) {
+ fs = ump->um_mountp;
+ }
+ MOUNTLIST_LOCK();
+ }
+ mp = TAILQ_NEXT(mp, mnt_list);
+ } while (mp != TAILQ_FIRST(&mountlist));
+ MOUNTLIST_UNLOCK();
+ if (!fs)
+ return(ENXIO);
+ }
+ vp = (struct vnode *) fake_vnode;
+ fake_inode_init(vp, fs);
+ code = iget(VTOI(vp), inode, &ip, 0);
+ if (code != 0) {
+ *perror = BAD_IGET;
+ return code;
+ } else {
+ *ipp = ip;
+ return(0);
+ }
+}
+
+igetinode(vfsp, dev, inode, ipp, perror)
+ struct inode **ipp;
+ struct mount *vfsp;
+ dev_t dev;
+ ino_t inode;
+ int *perror;
+{
+ struct inode *pip, *ip;
+ extern struct osi_dev cacheDev;
+ register int code = 0;
+
+ *perror = 0;
+
+ AFS_STATCNT(igetinode);
+
+ if ((code = getinode(vfsp, dev, inode, &ip, perror)) != 0) {
+ return(code);
+ }
+
+ if (ip->i_mode == 0) {
+ /* Not an allocated inode */
+ iforget(ip);
+ return(ENOENT);
+ }
+
+ if (ip->i_nlink == 0 || (ip->i_mode&IFMT) != IFREG) {
+ iput(ip);
+ return(ENOENT);
+ }
+
+ *ipp = ip;
+ return(0);
+}
+
+iforget(ip)
+struct inode *ip;
+{
+ struct vnode *vp = ITOV(ip);
+
+ AFS_STATCNT(iforget);
+
+ VN_LOCK(vp);
+ /* this whole thing is too wierd. Why??? XXX */
+ if (vp->v_usecount == 1) {
+ VN_UNLOCK(vp);
+ idrop(ip);
+ } else {
+ VN_UNLOCK(vp);
+ }
+}
+
+/*
+ * icreate system call -- create an inode
+ */
+afs_syscall_icreate(dev, near_inode, param1, param2, param3, param4, retval)
+ long *retval;
+ long dev, near_inode, param1, param2, param3, param4;
+{
+ int dummy, err=0;
+ struct inode *ip, *newip;
+ register int code;
+ struct vnode *vp;
+
+ AFS_STATCNT(afs_syscall_icreate);
+
+ if (!afs_suser())
+ return(EPERM);
+
+ code = getinode(0, (dev_t)dev, 2, &ip, &dummy);
+ if (code) {
+ return(ENOENT);
+ }
+ code = ialloc(ip, (ino_t)near_inode, 0, &newip);
+ iput(ip);
+ if (code) {
+ return(code);
+ }
+ IN_LOCK(newip);
+ newip->i_flag |= IACC|IUPD|ICHG;
+
+ newip->i_nlink = 1;
+
+ newip->i_mode = IFREG;
+
+ IN_UNLOCK(newip);
+ vp = ITOV(newip);
+ VN_LOCK(vp);
+ vp->v_type = VREG;
+ VN_UNLOCK(vp);
+
+ /*
+ if ( !vp->v_object)
+ {
+ extern struct vfs_ubcops ufs_ubcops;
+ extern struct vm_ubc_object* ubc_object_allocate();
+ struct vm_ubc_object* vop;
+ vop = ubc_object_allocate(&vp, &ufs_ubcops,
+ vp->v_mount->m_funnel);
+ VN_LOCK(vp);
+ vp->v_object = vop;
+ VN_UNLOCK(vp);
+ }
+ */
+
+ IN_LOCK(newip);
+ /* newip->i_flags |= IC_XUID|IC_XGID; */
+ /* newip->i_flags &= ~IC_PROPLIST; */
+ newip->i_vicep1 = param1;
+ if (param2 == 0x1fffffff/*INODESPECIAL*/) {
+ newip->i_vicep2 = ((0x1fffffff << 3) + (param4 & 0x3));
+ newip->i_vicep3a = (u_short)(param3 >> 16);
+ newip->i_vicep3b = (u_short)param3;
+ } else {
+ newip->i_vicep2 = (((param2 >> 16) & 0x1f) << 27) +
+ (((param4 >> 16) & 0x1f) << 22) +
+ (param3 & 0x3fffff);
+ newip->i_vicep3a = (u_short)param4;
+ newip->i_vicep3b = (u_short)param2;
+ }
+ newip->i_vicemagic = VICEMAGIC;
+
+ *retval = newip->i_number;
+ IN_UNLOCK(newip);
+ iput(newip);
+ return(code);
+}
+
+
+afs_syscall_iopen(dev, inode, usrmod, retval)
+ long *retval;
+ int dev, inode, usrmod;
+{
+ struct file *fp;
+ struct inode *ip;
+ struct vnode *vp = (struct vnode *)0;
+ int dummy;
+ int fd;
+ extern struct fileops vnops;
+ register int code;
+
+ AFS_STATCNT(afs_syscall_iopen);
+
+ if (!afs_suser())
+ return(EPERM);
+
+ code = igetinode(0, (dev_t)dev, (ino_t)inode, &ip, &dummy);
+ if (code) {
+ return(code);
+ }
+ if ((code = falloc(curproc, &fp, &fd)) != 0) {
+ iput(ip);
+ return(code);
+ }
+ IN_UNLOCK(ip);
+
+ /* FreeBSD doesn't do much mp stuff yet :( */
+ /* FP_LOCK(fp); */
+ fp->f_flag = (usrmod) & FMASK;
+ fp->f_type = DTYPE_VNODE;
+ fp->f_ops = &vnops;
+ fp->f_data = (caddr_t)ITOV(ip);
+
+ /* FP_UNLOCK(fp); */
+ return(0);
+}
+
+
+/*
+ * Support for iinc() and idec() system calls--increment or decrement
+ * count on inode.
+ * Restricted to super user.
+ * Only VICEMAGIC type inodes.
+ */
+afs_syscall_iincdec(dev, inode, inode_p1, amount)
+ int dev, inode, inode_p1, amount;
+{
+ int dummy;
+ struct inode *ip;
+ register int code;
+
+ if (!afs_suser())
+ return(EPERM);
+
+ code = igetinode(0, (dev_t)dev, (ino_t)inode, &ip, &dummy);
+ if (code) {
+ return(code);
+ }
+ if (!IS_VICEMAGIC(ip)) {
+ return(EPERM);
+ } else if (ip->i_vicep1 != inode_p1) {
+ return(ENXIO);
+ }
+ ip->i_nlink += amount;
+ if (ip->i_nlink == 0) {
+ CLEAR_VICEMAGIC(ip);
+ }
+ ip->i_flag |= ICHG;
+ iput(ip);
+ return(0);
+}
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+/*
+ * osi_inode.h
+ *
+ * Inode information required for DUX servers and salvager.
+ */
+#ifndef _OSI_INODE_H_
+#define _OSI_INODE_H_
+
+#define BAD_IGET -1000
+
+#define VICEMAGIC 0xb61cfa84
+
+#define DI_VICEP3(p) \
+ ( ((u_int)((p)->di_vicep3a)) << 16 | ((u_int)((p)->di_vicep3b)) )
+#define I_VICE3(p) \
+ ( ((u_int)((p)->i_vicep3a)) << 16 | ((u_int)((p)->i_vicep3b)) )
+
+#define FAKE_INODE_SIZE (sizeof(struct vnode)+sizeof(struct inode))
+#define MOUNTLIST_UNLOCK() simple_lock_unlock(&mountlist_slock)
+#define MOUNTLIST_LOCK() simple_lock(&mountlist_slock)
+
+/* FreeBSD doesn't actually have a di_proplb, so we use di_spare[0] */
+#define di_proplb di_spare[0]
+/* For some reason, they're called "oldids" instead of "bc_{u,g}id" */
+#define di_bcuid di_u.oldids[0]
+#define di_bcgid di_u.oldids[1]
+
+#define i_vicemagic i_din.di_spare[0]
+#define i_vicep1 i_din.di_uid
+#define i_vicep2 i_din.di_gid
+#define i_vicep3a i_din.di_u.oldids[0]
+#define i_vicep3b i_din.di_u.oldids[1]
+#define i_vicep4 i_din.di_spare[1] /* not used */
+
+#define di_vicemagic di_spare[0]
+#define di_vicep1 di_uid
+#define di_vicep2 di_gid
+#define di_vicep3a di_u.oldids[0]
+#define di_vicep3b di_u.oldids[1]
+#define di_vicep4 di_spare[1] /* not used */
+
+/*
+ * Macros for handling inode numbers:
+ * inode number to file system block offset.
+ * inode number to cylinder group number.
+ * inode number to file system block address.
+ */
+#define itoo(fs, x) ((x) % INOPB(fs))
+#define itog(fs, x) ((x) / (fs)->fs_ipg)
+#define itod(fs, x) \
+ ((daddr_t)(cgimin(fs, itog(fs, x)) + \
+ (blkstofrags((fs), (((x) % (fs)->fs_ipg) / INOPB(fs))))))
+
+
+#define IS_VICEMAGIC(ip) ((ip)->i_vicemagic == VICEMAGIC)
+#define IS_DVICEMAGIC(dp) ((dp)->di_vicemagic == VICEMAGIC)
+
+#define CLEAR_VICEMAGIC(ip) (ip)->i_vicemagic = 0
+#define CLEAR_DVICEMAGIC(dp) (dp)->di_vicemagic = 0
+
+#endif /* _OSI_INODE_H_ */
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+/*
+ *
+ * DUX OSI header file. Extends afs_osi.h.
+ *
+ * afs_osi.h includes this file, which is the only way this file should
+ * be included in a source file. This file can redefine macros declared in
+ * afs_osi.h.
+ */
+
+#ifndef _OSI_MACHDEP_H_
+#define _OSI_MACHDEP_H_
+
+#include <sys/lock.h>
+/* #include <kern/sched_prim.h> */
+/* #include <sys/unix_defs.h> */
+
+#define getpid() curproc
+extern struct simplelock afs_rxglobal_lock;
+
+/*
+ * Time related macros
+ */
+extern struct timeval time;
+#define osi_Time() (time.tv_sec)
+#define afs_hz hz
+
+#define PAGESIZE 8192
+
+#define AFS_UCRED ucred
+#define AFS_PROC struct proc
+
+#define afs_bufferpages bufpages
+
+#define osi_vnhold(avc,r) do { \
+ if ((avc)->vrefCount) { VN_HOLD((struct vnode *)(avc)); } \
+ else osi_Panic("refcnt==0"); } while(0)
+
+#define gop_rdwr(rw,gp,base,len,offset,segflg,unit,cred,aresid) \
+ vn_rdwr((rw),(gp),(base),(len),(offset),(segflg),(unit),(cred),(aresid), curproc)
+
+#undef afs_suser
+
+#ifdef KERNEL
+extern struct simplelock afs_global_lock;
+#if 0
+extern thread_t afs_global_owner;
+#define AFS_GLOCK() \
+ do { \
+ usimple_lock(&afs_global_lock); \
+ osi_Assert(afs_global_owner == (thread_t)0); \
+ afs_global_owner = current_thread(); \
+ } while (0)
+#define AFS_GUNLOCK() \
+ do { \
+ osi_Assert(afs_global_owner == current_thread()); \
+ afs_global_owner = (thread_t)0; \
+ usimple_unlock(&afs_global_lock); \
+ } while(0)
+#define ISAFS_GLOCK() (afs_global_owner == current_thread())
+#else
+#define AFS_GLOCK() \
+ do { \
+ simple_lock(&afs_global_lock); \
+ } while (0)
+#define AFS_GUNLOCK() \
+ do { \
+ simple_unlock(&afs_global_lock); \
+ } while(0)
+#endif /* 0 */
+#define AFS_RXGLOCK()
+#define AFS_RXGUNLOCK()
+#define ISAFS_RXGLOCK() 1
+
+#undef SPLVAR
+#define SPLVAR
+#undef NETPRI
+#define NETPRI
+#undef USERPRI
+#define USERPRI
+#endif /* KERNEL */
+
+#endif /* _OSI_MACHDEP_H_ */
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+/*
+ * osi_misc.c
+ *
+ * Implements:
+ * afs_suser
+ */
+
+#include "../afs/param.h" /* Should be always first */
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+
+/*
+ * afs_suser() returns true if the caller is superuser, false otherwise.
+ *
+ * Note that it must NOT set errno.
+ */
+
+afs_suser() {
+ int error;
+
+ if (suser(curproc) == 0) {
+ return(1);
+ }
+ return(0);
+}
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+
+#include "../afs/param.h" /* Should be always first */
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+#include "../afs/afs_stats.h" /* afs statistics */
+
+
+
+static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
+void afs_osi_Wakeup(char *event);
+void afs_osi_Sleep(char *event);
+
+static char waitV;
+
+
+void afs_osi_InitWaitHandle(struct afs_osi_WaitHandle *achandle)
+{
+ AFS_STATCNT(osi_InitWaitHandle);
+ achandle->proc = (caddr_t) 0;
+}
+
+/* cancel osi_Wait */
+void afs_osi_CancelWait(struct afs_osi_WaitHandle *achandle)
+{
+ caddr_t proc;
+
+ AFS_STATCNT(osi_CancelWait);
+ proc = achandle->proc;
+ if (proc == 0) return;
+ achandle->proc = (caddr_t) 0; /* so dude can figure out he was signalled */
+ afs_osi_Wakeup(&waitV);
+}
+
+/* afs_osi_Wait
+ * Waits for data on ahandle, or ams ms later. ahandle may be null.
+ * Returns 0 if timeout and EINTR if signalled.
+ */
+int afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok)
+{
+ int code;
+ afs_int32 endTime, tid;
+
+ AFS_STATCNT(osi_Wait);
+ endTime = osi_Time() + (ams/1000);
+ if (ahandle)
+ ahandle->proc = (caddr_t) curproc;
+ do {
+ AFS_ASSERT_GLOCK();
+ code = 0;
+ code = osi_TimedSleep(&waitV, ams, aintok);
+
+ if (code) break; /* if something happened, quit now */
+ /* if we we're cancelled, quit now */
+ if (ahandle && (ahandle->proc == (caddr_t) 0)) {
+ /* we've been signalled */
+ break;
+ }
+ } while (osi_Time() < endTime);
+ return code;
+}
+
+
+
+
+typedef struct afs_event {
+ struct afs_event *next; /* next in hash chain */
+ char *event; /* lwp event: an address */
+ int refcount; /* Is it in use? */
+ int seq; /* Sequence number: this is incremented
+ by wakeup calls; wait will not return until
+ it changes */
+ int cond;
+} afs_event_t;
+
+#define HASHSIZE 128
+afs_event_t *afs_evhasht[HASHSIZE];/* Hash table for events */
+#define afs_evhash(event) (afs_uint32) ((((long)event)>>2) & (HASHSIZE-1));
+int afs_evhashcnt = 0;
+
+/* Get and initialize event structure corresponding to lwp event (i.e. address)
+ * */
+static afs_event_t *afs_getevent(char *event)
+{
+ afs_event_t *evp, *newp = 0;
+ int hashcode;
+
+ AFS_ASSERT_GLOCK();
+ hashcode = afs_evhash(event);
+ evp = afs_evhasht[hashcode];
+ while (evp) {
+ if (evp->event == event) {
+ evp->refcount++;
+ return evp;
+ }
+ if (evp->refcount == 0)
+ newp = evp;
+ evp = evp->next;
+ }
+ if (!newp) {
+ newp = (afs_event_t *) osi_AllocSmallSpace(sizeof (afs_event_t));
+ afs_evhashcnt++;
+ newp->next = afs_evhasht[hashcode];
+ afs_evhasht[hashcode] = newp;
+ newp->seq = 0;
+ }
+ newp->event = event;
+ newp->refcount = 1;
+ return newp;
+}
+
+/* Release the specified event */
+#define relevent(evp) ((evp)->refcount--)
+
+
+void afs_osi_Sleep(char *event)
+{
+ struct afs_event *evp;
+ int seq;
+
+ evp = afs_getevent(event);
+ seq = evp->seq;
+ while (seq == evp->seq) {
+ AFS_ASSERT_GLOCK();
+ assert_wait((vm_offset_t)(&evp->cond), 0);
+ AFS_GUNLOCK();
+ thread_block();
+ AFS_GLOCK();
+ }
+ relevent(evp);
+}
+
+/* osi_TimedSleep
+ *
+ * Arguments:
+ * event - event to sleep on
+ * ams --- max sleep time in milliseconds
+ * aintok - 1 if should sleep interruptibly
+ *
+ * Returns 0 if timeout and EINTR if signalled.
+ */
+static int osi_TimedSleep(char *event, afs_int32 ams, int aintok)
+{
+ int code = 0;
+ struct afs_event *evp;
+ int ticks;
+
+ ticks = ( ams * afs_hz )/1000;
+
+
+ evp = afs_getevent(event);
+
+ assert_wait((vm_offset_t)(&evp->cond), aintok);
+ AFS_GUNLOCK();
+ thread_set_timeout(ticks);
+ thread_block();
+ AFS_GLOCK();
+ /* if (current_thread()->wait_result != THREAD_AWAKENED)
+ code = EINTR; */
+
+ relevent(evp);
+ return code;
+}
+
+
+void afs_osi_Wakeup(char *event)
+{
+ struct afs_event *evp;
+
+ evp = afs_getevent(event);
+ if (evp->refcount > 1) {
+ evp->seq++;
+ thread_wakeup((vm_offset_t)(&evp->cond));
+ }
+ relevent(evp);
+}
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+/*
+ * osi_vfsops.c for DUX
+ */
+#include "../afs/param.h" /* Should be always first */
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+#include "../afs/afs_stats.h" /* statistics stuff */
+#include <sys/types.h>
+#include <kern/mach_param.h>
+#include <sys/sysconfig.h>
+#include <sys/systm.h>
+#include <sys/resource.h>
+#include <sys/errno.h>
+#include <sys/conf.h>
+#include <machine/machlimits.h>
+
+
+struct vcache *afs_globalVp = 0;
+struct mount *afs_globalVFS = 0;
+
+static u_char afs_mntid;
+int afs_vfsdev = 0;
+udecl_simple_lock_data(, afsmntid_lock)
+#define AFSMNTID_LOCK() usimple_lock(&afsmntid_lock)
+#define AFSMNTID_UNLOCK() usimple_unlock(&afsmntid_lock)
+#define AFSMNTID_LOCK_INIT() usimple_lock_init(&afsmntid_lock)
+
+
+int mp_afs_mount(struct mount *afsp,char * path, caddr_t data,
+ struct nameidata *ndp)
+{
+ u_int size;
+
+ fsid_t tfsid;
+ struct mount *xmp, *getvfs();
+ int code;
+
+ AFS_GLOCK();
+ AFS_STATCNT(afs_mount);
+
+ if (afs_globalVFS) { /* Don't allow remounts. */
+ AFS_GUNLOCK();
+ return (EBUSY);
+ }
+
+ afs_globalVFS = afsp;
+ afsp->vfs_bsize = 8192;
+/*
+ * Generate a unique afs mount i.d. ( see nfs_mount() ).
+ */
+ afsp->m_stat.f_fsid.val[0] = makedev(130, 0);
+ afsp->m_stat.f_fsid.val[1] = MOUNT_AFS;
+ AFSMNTID_LOCK();
+ if (++afs_mntid == 0)
+ ++afs_mntid;
+ AFSMNTID_UNLOCK();
+ BM(AFSMNTID_LOCK());
+ tfsid.val[0] = makedev(130, afs_mntid);
+ tfsid.val[1] = MOUNT_AFS;
+ BM(AFSMNTID_UNLOCK());
+
+ while (xmp = getvfs(&tfsid)) {
+ UNMOUNT_READ_UNLOCK(xmp);
+ tfsid.val[0]++;
+ AFSMNTID_LOCK();
+ afs_mntid++;
+ AFSMNTID_UNLOCK();
+ }
+ if (major(tfsid.val[0]) != 130) {
+ AFS_GUNLOCK();
+ return (ENOENT);
+ }
+ afsp->m_stat.f_fsid.val[0] = tfsid.val[0];
+
+ afsp->m_stat.f_mntonname = AFS_KALLOC(MNAMELEN);
+ afsp->m_stat.f_mntfromname = AFS_KALLOC(MNAMELEN);
+ if ( !afsp->m_stat.f_mntonname || !afsp->m_stat.f_mntfromname)
+ panic("malloc failure in afs_mount\n");
+
+ bzero(afsp->m_stat.f_mntonname, MNAMELEN);
+ bzero(afsp->m_stat.f_mntfromname, MNAMELEN);
+ AFS_COPYINSTR(path, (caddr_t)afsp->m_stat.f_mntonname, MNAMELEN, &size, code);
+ bcopy("AFS", afsp->m_stat.f_mntfromname, 4);
+ AFS_GUNLOCK();
+ (void) mp_afs_statfs(afsp);
+ AFS_GLOCK();
+ afs_vfsdev = afsp->m_stat.f_fsid.val[0];
+
+#ifndef AFS_NONFSTRANS
+ /* Set up the xlator in case it wasn't done elsewhere */
+ afs_xlatorinit_v2();
+ afs_xlatorinit_v3();
+#endif
+ AFS_GUNLOCK();
+ return 0;
+}
+
+
+int mp_afs_unmount (struct mount *afsp, int flag)
+{
+ AFS_GLOCK();
+ AFS_STATCNT(afs_unmount);
+ afs_globalVFS = 0;
+ afs_shutdown();
+ AFS_GUNLOCK();
+ return 0;
+}
+
+
+int mp_afs_start(struct mount *mp, int flags)
+{
+ return(0);
+}
+
+int mp_afs_root (struct mount *afsp, struct vnode **avpp)
+{
+ register afs_int32 code = 0;
+ struct vrequest treq;
+ register struct vcache *tvp=0;
+
+ AFS_GLOCK();
+ AFS_STATCNT(afs_root);
+ if (afs_globalVp && (afs_globalVp->states & CStatd)) {
+ tvp = afs_globalVp;
+ } else {
+ if (!(code = afs_InitReq(&treq, cred)) &&
+ !(code = afs_CheckInit())) {
+ tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
+ (struct vcache*)0, WRITE_LOCK);
+ /* we really want this to stay around */
+ if (tvp) {
+ afs_globalVp = tvp;
+ } else
+ code = ENOENT;
+ }
+ }
+ if (tvp) {
+ AFS_GUNLOCK();
+ VN_HOLD((struct vnode *)tvp);
+ VN_LOCK((struct vnode *)tvp);
+ tvp->v.v_flag |= VROOT; /* No-op on Ultrix 2.2 */
+ VN_UNLOCK((struct vnode *)tvp);
+ AFS_GLOCK();
+
+ afs_globalVFS = afsp;
+ *avpp = (struct vnode *) tvp;
+ }
+
+ afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, *avpp,
+ ICL_TYPE_INT32, code);
+ AFS_GUNLOCK();
+ return code;
+}
+
+
+mp_afs_quotactl(struct mount *mp, int cmd, uid_t uid, caddr_t arg)
+{
+ return EOPNOTSUPP;
+}
+
+int mp_afs_statfs(struct mount *afsp)
+{
+ struct nstatfs *abp = &afsp->m_stat;
+
+ AFS_GLOCK();
+ AFS_STATCNT(afs_statfs);
+
+ abp->f_type = MOUNT_AFS;
+ abp->f_bsize = afsp->vfs_bsize;
+
+ /* Fake a high number below to satisfy programs that use the statfs call
+ * to make sure that there's enough space in the device partition before
+ * storing something there.
+ */
+ abp->f_blocks = abp->f_bfree = abp->f_bavail = abp->f_files =
+ abp->f_ffree = 2000000;
+ abp->f_fsize = 1024;
+
+ abp->f_fsid.val[0] = afsp->m_stat.f_fsid.val[0];
+ abp->f_fsid.val[1] = afsp->m_stat.f_fsid.val[1];
+
+ AFS_GUNLOCK();
+ return 0;
+}
+
+
+int mp_afs_sync(struct mount *mp, int flags)
+{
+ AFS_STATCNT(afs_sync);
+ return 0;
+}
+
+
+int mp_afs_fhtovp(struct mount *afsp, struct fid *fidp, struct vnode **avcp)
+{
+ struct vrequest treq;
+ register code = 0;
+
+ AFS_GLOCK();
+ AFS_STATCNT(afs_vget);
+
+ *avcp = NULL;
+
+ if ((code = afs_InitReq(&treq, cred)) == 0) {
+ code = afs_osi_vget((struct vcache**)avcp, fidp, &treq);
+ }
+
+ afs_Trace3(afs_iclSetp, CM_TRACE_VGET, ICL_TYPE_POINTER, *avcp,
+ ICL_TYPE_INT32, treq.uid, ICL_TYPE_FID, fidp);
+
+ code = afs_CheckCode(code, &treq, 42);
+ AFS_GUNLOCK();
+ return code;
+}
+
+
+/*
+ * afs_vptofh
+ *
+ * afs_vptofh can return two flavors of NFS fid, depending on if submounts are
+ * allowed. The reason for this is that we can't guarantee that we found all
+ * the entry points any OS might use to get the fid for the NFS mountd.
+ * Hence we return a "magic" fid for all but /afs. If it goes through the
+ * translator code, it will get transformed into a SmallFid that we recognize.
+ * So, if submounts are disallowed, and an NFS client tries a submount, it will
+ * get a fid which we don't recognize and the mount will either fail or we
+ * will ignore subsequent requests for that mount.
+ *
+ * The Alpha fid is organized differently than for other platforms. Their
+ * intention was to have the data portion of the fid aligned on a 4 byte
+ * boundary. To do so, the fid is organized as:
+ * u_short reserved
+ * u_short len
+ * char data[8]
+ * The len field is the length of the entire fid, from reserved through data.
+ * This length is used by fid_copy to include copying the reserved field.
+ * Alpha's zero the reserved field before handing us the fid, but they use
+ * it in fid_cmp. We use the reserved field to store the 16 bits of the Vnode.
+ *
+ * Note that the SmallFid only allows for 8 bits of the cell index and
+ * 16 bits of the vnode.
+ */
+
+#define AFS_FIDDATASIZE 8
+#define AFS_SIZEOFSMALLFID 12 /* full size of fid, including len field */
+extern int afs_NFSRootOnly; /* 1 => only allow NFS mounts of /afs. */
+int afs_fid_vnodeoverflow=0, afs_fid_uniqueoverflow=0;
+
+int mp_afs_vptofh(struct vnode *avn, struct fid *fidp)
+{
+ struct SmallFid Sfid;
+ long addr[2];
+ register struct cell *tcell;
+ int rootvp = 0;
+ struct vcache *avc = (struct vcache *)avn;
+
+ AFS_GLOCK();
+ AFS_STATCNT(afs_fid);
+
+ if (afs_shuttingdown) {
+ AFS_GUNLOCK();
+ return EIO;
+ }
+
+ if (afs_NFSRootOnly && (avc == afs_globalVp)) rootvp = 1;
+ if (!afs_NFSRootOnly || rootvp) {
+ tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
+ Sfid.Volume = avc->fid.Fid.Volume;
+ fidp->fid_reserved = avc->fid.Fid.Vnode;
+ Sfid.CellAndUnique = ((tcell->cellIndex << 24) +
+ (avc->fid.Fid.Unique & 0xffffff));
+ afs_PutCell(tcell, READ_LOCK);
+ if (avc->fid.Fid.Vnode > 0xffff)
+ afs_fid_vnodeoverflow++;
+ if (avc->fid.Fid.Unique > 0xffffff)
+ afs_fid_uniqueoverflow++;
+ } else {
+ fidp->fid_reserved = AFS_XLATOR_MAGIC;
+ addr[0] = (long)avc;
+ AFS_GUNLOCK();
+ VN_HOLD((struct vnode *)avc);
+ AFS_GLOCK();
+ }
+
+ /* Use the fid pointer passed to us. */
+ fidp->fid_len = AFS_SIZEOFSMALLFID;
+ if (afs_NFSRootOnly) {
+ if (rootvp) {
+ bcopy((caddr_t)&Sfid, fidp->fid_data, AFS_FIDDATASIZE);
+ } else {
+ bcopy((caddr_t)addr, fidp->fid_data, AFS_FIDDATASIZE);
+ }
+ } else {
+ bcopy((caddr_t)&Sfid, fidp->fid_data, AFS_FIDDATASIZE);
+ }
+ AFS_GUNLOCK();
+ return 0;
+}
+
+
+int mp_Afs_init(void); /* vfs_init - defined below */
+
+
+/* This is only called by vfs_mount when afs is going to be mounted as root.
+ * Since we don't support diskless clients we shouldn't come here.
+ */
+int afsmountroot=0;
+int mp_afs_mountroot(struct mount *afsp, struct vnode **vp)
+{
+ AFS_GLOCK();
+ AFS_STATCNT(afs_mountroot);
+ afsmountroot++;
+ AFS_GUNLOCK();
+ return EINVAL;
+}
+
+
+/* It's called to setup swapping over the net for diskless clients; again
+ * not for us.
+ */
+int afsswapvp=0;
+int mp_afs_swapvp(void)
+{
+ AFS_GLOCK();
+ AFS_STATCNT(afs_swapvp);
+ afsswapvp++;
+ AFS_GUNLOCK();
+ return EINVAL;
+}
+
+
+struct vfsops afs_vfsops = {
+ mp_afs_mount,
+ mp_afs_start,
+ mp_afs_unmount,
+ mp_afs_root,
+ mp_afs_quotactl,
+ mp_afs_statfs,
+ mp_afs_sync,
+ mp_afs_fhtovp, /* afs_vget */
+ mp_afs_vptofh,
+ mp_Afs_init,
+ mp_afs_mountroot,
+ mp_afs_swapvp
+};
+
+
+/*
+ * System Call Entry Points
+ */
+#define NULL_FUNC (int (*)(int))0
+
+int (*afs_syscall_func)() = NULL_FUNC;
+int (*afs_xsetgroups_func)() = NULL_FUNC;
+int (*afs_xioctl_func)() = NULL_FUNC;
+
+afssyscall(p, args, retval)
+ struct proc *p;
+ void *args;
+ long *retval;
+{
+ int (*func)();
+ int code;
+
+ AFS_GLOCK();
+ func = afs_syscall_func;
+ if (func == NULL_FUNC) {
+ code = nosys(p, args, retval);
+ } else {
+ code = (*func)(p, args, retval);
+ }
+ AFS_GUNLOCK();
+ return code;
+}
+
+afsxsetgroups(p, args, retval)
+ struct proc *p;
+ void *args;
+ long *retval;
+{
+ int (*func)();
+ int code;
+
+ AFS_GLOCK();
+ func = afs_xsetgroups_func;
+ if (func == NULL_FUNC) {
+ code = nosys(p, args, retval);
+ } else {
+ code = (*func)(p, args, retval);
+ }
+ AFS_GUNLOCK();
+ return code;
+}
+
+afsxioctl(p, args, retval)
+ struct proc *p;
+ void *args;
+ long *retval;
+{
+ int (*func)();
+ int code;
+
+ AFS_GLOCK();
+ func = afs_xioctl_func;
+ if (func == NULL_FUNC) {
+ code = nosys(p, args, retval);
+ } else {
+ code = (*func)(p, args, retval);
+ }
+ AFS_GUNLOCK();
+ return code;
+}
+
+
+/*
+ * VFS initialization and unload
+ */
+
+afs_unconfig()
+{
+ return EBUSY;
+}
+
+
+cfg_subsys_attr_t afs_attributes[] = {
+ {"", 0, 0, 0, 0, 0, 0} /* must be the last element */
+};
+
+afs_configure(cfg_op_t op, caddr_t indata, size_t indata_size, caddr_t outdata, size_t outdata_size)
+{
+ cfg_attr_t *attributes;
+ int ret = ESUCCESS;
+ int i, j, size;
+ caddr_t p;
+
+ switch (op) {
+ case CFG_OP_CONFIGURE:
+ /*
+ * The indata parameter is a list of attributes to be configured, and
+ * indata_size is the count of attributes.
+ */
+ if ((ret = vfssw_add_fsname(MOUNT_AFS, &afs_vfsops, "afs")) != 0)
+ return(ret);
+ break;
+ case CFG_OP_UNCONFIGURE:
+ if ((ret = afs_unconfig()) != 0)
+ return(ret);
+ break;
+ default:
+ ret = EINVAL;
+ break;
+ }
+ return ret;
+}
+
+
+int mp_Afs_init(void)
+{
+ extern int Afs_xsetgroups(), afs_xioctl(), afs3_syscall();
+
+ AFS_GLOCK();
+ sysent[AFS_SYSCALL].sy_call = afs3_syscall;
+ sysent[AFS_SYSCALL].sy_parallel = 0;
+ sysent[AFS_SYSCALL].sy_narg = 6;
+ sysent[SYS_setgroups].sy_call = Afs_xsetgroups;
+ afs_xioctl_func = afsxioctl;
+ afs_xsetgroups_func = afsxsetgroups;
+ afs_syscall_func = afssyscall;
+ AFS_GUNLOCK();
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+
+/* osi_vm.c implements:
+ *
+ * osi_VM_FlushVCache(avc, slept)
+ * osi_ubc_flush_dirty_and_wait(vp, flags)
+ * osi_VM_StoreAllSegments(avc)
+ * osi_VM_TryToSmush(avc, acred, sync)
+ * osi_VM_FlushPages(avc, credp)
+ * osi_VM_Truncate(avc, alen, acred)
+ */
+
+#include "../afs/param.h" /* Should be always first */
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+#include "../afs/afs_stats.h" /* statistics */
+/* #include <vm/vm_ubc.h> */
+#include <limits.h>
+#include <float.h>
+
+/* Try to discard pages, in order to recycle a vcache entry.
+ *
+ * We also make some sanity checks: ref count, open count, held locks.
+ *
+ * We also do some non-VM-related chores, such as releasing the cred pointer
+ * (for AIX and Solaris) and releasing the gnode (for AIX).
+ *
+ * Locking: afs_xvcache lock is held. If it is dropped and re-acquired,
+ * *slept should be set to warn the caller.
+ *
+ * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it
+ * is not dropped and re-acquired for any platform. It may be that *slept is
+ * therefore obsolescent.
+ *
+ * OSF/1 Locking: VN_LOCK has been called.
+ */
+int
+osi_VM_FlushVCache(avc, slept)
+ struct vcache *avc;
+ int *slept;
+{
+#ifdef SECRETLY_OSF1
+ if (avc->vrefCount > 1)
+ return EBUSY;
+
+ if (avc->opens)
+ return EBUSY;
+
+ /* if a lock is held, give up */
+ if (CheckLock(&avc->lock) || afs_CheckBozonLock(&avc->pvnLock))
+ return EBUSY;
+
+ AFS_GUNLOCK();
+ ubc_invalidate(((struct vnode *)avc)->v_object, 0, 0, B_INVAL);
+ AFS_GLOCK();
+#endif /* SECRETLY_OSF1 */
+
+ return 0;
+}
+
+/*
+ * osi_ubc_flush_dirty_and_wait -- ensure all dirty pages cleaned
+ *
+ * Alpha OSF/1 doesn't make it easy to wait for all dirty pages to be cleaned.
+ * NFS tries to do this by calling waitforio(), which waits for v_numoutput
+ * to go to zero. But that isn't good enough, because afs_putpage() doesn't
+ * increment v_numoutput until it has obtained the vcache entry lock. Suppose
+ * that Process A, trying to flush a page, is waiting for that lock, and
+ * Process B tries to close the file. Process B calls waitforio() which thinks
+ * that everything is cool because v_numoutput is still zero. Process B then
+ * proceeds to call afs_StoreAllSegments(). Finally when B is finished, A gets
+ * to proceed and flush its page. But then it's too late because the file is
+ * already closed.
+ *
+ * (I suspect that waitforio() is not adequate for NFS, just as it isn't
+ * adequate for us. But that's not my problem.)
+ *
+ * The only way we can be sure that there are no more dirty pages is if there
+ * are no more pages with pg_busy set. We look for them on the cleanpl.
+ *
+ * For some reason, ubc_flush_dirty() only looks at the dirtypl, not the
+ * dirtywpl. I don't know why this is good enough, but I assume it is. By
+ * the same token, I only look for busy pages on the cleanpl, not the cleanwpl.
+ *
+ * Called with the global lock NOT held.
+ */
+void
+osi_ubc_flush_dirty_and_wait(vp, flags)
+struct vnode *vp;
+int flags; {
+ int retry;
+ vm_page_t pp;
+ int first;
+
+#ifdef SECRETLY_OSF1
+ do {
+ struct vm_ubc_object* vop;
+ vop = (struct vm_ubc_object*)(vp->v_object);
+ ubc_flush_dirty(vop, flags);
+
+ vm_object_lock(vop);
+ if (vop->vu_dirtypl)
+ /* shouldn't happen, but who knows */
+ retry = 1;
+ else {
+ retry = 0;
+ if (vop->vu_cleanpl) {
+ for (first = 1, pp = vop->vu_cleanpl;
+ first || pp != vop->vu_cleanpl;
+ first = 0, pp = pp->pg_onext) {
+ if (pp->pg_busy) {
+ retry = 1;
+ pp->pg_wait = 1;
+ assert_wait_mesg((vm_offset_t)pp, FALSE, "pg_wait");
+ vm_object_unlock(vop);
+ thread_block();
+ break;
+ }
+ }
+ }
+ if (retry) continue;
+ }
+ vm_object_unlock(vop);
+ } while (retry);
+#endif /* SECRETLY_OSF1 */
+}
+
+/* Try to store pages to cache, in order to store a file back to the server.
+ *
+ * Locking: the vcache entry's lock is held. It will usually be dropped and
+ * re-obtained.
+ */
+void
+osi_VM_StoreAllSegments(avc)
+ struct vcache *avc;
+{
+#ifdef SECRETLY_OSF1
+ ReleaseWriteLock(&avc->lock);
+ AFS_GUNLOCK();
+ osi_ubc_flush_dirty_and_wait((struct vnode *)avc, 0);
+ AFS_GLOCK();
+ ObtainWriteLock(&avc->lock,94);
+#endif /* SECRETLY_OSF1 */
+}
+
+/* Try to invalidate pages, for "fs flush" or "fs flushv"; or
+ * try to free pages, when deleting a file.
+ *
+ * Locking: the vcache entry's lock is held. It may be dropped and
+ * re-obtained.
+ *
+ * Since we drop and re-obtain the lock, we can't guarantee that there won't
+ * be some pages around when we return, newly created by concurrent activity.
+ */
+void
+osi_VM_TryToSmush(avc, acred, sync)
+ struct vcache *avc;
+ struct AFS_UCRED *acred;
+ int sync;
+{
+#ifdef SECRETLY_OSF1
+ ReleaseWriteLock(&avc->lock);
+ AFS_GUNLOCK();
+ osi_ubc_flush_dirty_and_wait((struct vnode *)avc, 0);
+ ubc_invalidate(((struct vnode *)avc)->v_object, 0, 0, B_INVAL);
+ AFS_GLOCK();
+ ObtainWriteLock(&avc->lock,59);
+#endif /* SECRETLY_OSF1 */
+}
+
+/* Purge VM for a file when its callback is revoked.
+ *
+ * Locking: No lock is held, not even the global lock.
+ */
+void
+osi_VM_FlushPages(avc, credp)
+ struct vcache *avc;
+ struct AFS_UCRED *credp;
+{
+#ifdef SECRETLY_OSF1
+ ubc_flush_dirty(((struct vnode *)avc)->v_object, 0);
+ ubc_invalidate(((struct vnode *)avc)->v_object, 0, 0, B_INVAL);
+#endif /* SECRETLY_OSF1 */
+}
+
+/* Purge pages beyond end-of-file, when truncating a file.
+ *
+ * Locking: no lock is held, not even the global lock.
+ * activeV is raised. This is supposed to block pageins, but at present
+ * it only works on Solaris.
+ */
+void
+osi_VM_Truncate(avc, alen, acred)
+ struct vcache *avc;
+ int alen;
+ struct AFS_UCRED *acred;
+{
+#ifdef SECRETLY_OSF1
+ ubc_invalidate(((struct vnode *)avc)->v_object, alen,
+ MAXINT - alen, B_INVAL);
+#endif /* SECRETLY_OSF1 */
+}
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+/*
+ * vnodeops structure and Digital Unix specific ops and support routines.
+ */
+
+#include "../afs/param.h" /* Should be always first */
+
+#include "../afs/sysincludes.h" /* Standard vendor system headers */
+#include "../afs/afsincludes.h" /* Afs-based standard headers */
+#include "../afs/afs_stats.h" /* statistics */
+#include <vm/vm.h>
+#include <vm/vnode_pager.h>
+#include <vm/vm_map.h>
+/* #include <vm/vm_ubc.h> */
+#include "../afs/afs_cbqueue.h"
+#include "../afs/nfsclient.h"
+#include "../afs/afs_osidnlc.h"
+
+
+extern int afs_lookup(), afs_create(), afs_noop(), afs_open(), afs_close();
+extern int afs_access(), afs_getattr(), afs_setattr(), afs_badop();
+extern int afs_fsync(), afs_seek(), afs_remove(), afs_link(), afs_rename();
+extern int afs_mkdir(), afs_rmdir(), afs_symlink(), afs_readdir();
+extern int afs_readlink(), afs_lockctl();
+extern int vn_pathconf_default(), seltrue();
+
+int mp_afs_lookup(), mp_afs_create(), mp_afs_open();
+int mp_afs_access(), mp_afs_getattr(), mp_afs_setattr(), mp_afs_ubcrdwr();
+int mp_afs_ubcrdwr(), mp_afs_mmap();
+int mp_afs_fsync(), mp_afs_seek(), mp_afs_remove(), mp_afs_link();
+int mp_afs_rename(), mp_afs_mkdir(), mp_afs_rmdir(), mp_afs_symlink();
+int mp_afs_readdir(), mp_afs_readlink(), mp_afs_abortop(), mp_afs_inactive();
+int mp_afs_reclaim(), mp_afs_bmap(), mp_afs_strategy(), mp_afs_print();
+int mp_afs_page_read(), mp_afs_page_write(), mp_afs_swap(), mp_afs_bread();
+int mp_afs_brelse(), mp_afs_lockctl(), mp_afs_syncdata(), mp_afs_close();
+int mp_afs_closex();
+
+#if 0
+/* AFS vnodeops */
+struct vnodeops Afs_vnodeops = {
+ mp_afs_lookup,
+ mp_afs_create,
+ afs_noop, /* vn_mknod */
+ mp_afs_open,
+ mp_afs_close,
+ mp_afs_access,
+ mp_afs_getattr,
+ mp_afs_setattr,
+ mp_afs_ubcrdwr,
+ mp_afs_ubcrdwr,
+ afs_badop, /* vn_ioctl */
+ seltrue, /* vn_select */
+ mp_afs_mmap,
+ mp_afs_fsync,
+ mp_afs_seek,
+ mp_afs_remove,
+ mp_afs_link,
+ mp_afs_rename,
+ mp_afs_mkdir,
+ mp_afs_rmdir,
+ mp_afs_symlink,
+ mp_afs_readdir,
+ mp_afs_readlink,
+ mp_afs_abortop,
+ mp_afs_inactive,
+ mp_afs_reclaim,
+ mp_afs_bmap,
+ mp_afs_strategy,
+ mp_afs_print,
+ mp_afs_page_read,
+ mp_afs_page_write,
+ mp_afs_swap,
+ mp_afs_bread,
+ mp_afs_brelse,
+ mp_afs_lockctl,
+ mp_afs_syncdata,
+ afs_noop, /* Lock */
+ afs_noop, /* unLock */
+ afs_noop, /* get ext attrs */
+ afs_noop, /* set ext attrs */
+ afs_noop, /* del ext attrs */
+ vn_pathconf_default,
+};
+struct vnodeops *afs_ops = &Afs_vnodeops;
+#endif /* 0 */
+
+/* vnode file operations, and our own */
+extern int vn_read();
+extern int vn_write();
+extern int vn_ioctl();
+extern int vn_select();
+extern int afs_closex();
+
+struct fileops afs_fileops = {
+ vn_read,
+ vn_write,
+ vn_ioctl,
+ vn_select,
+ mp_afs_closex,
+};
+
+#if 0
+mp_afs_lookup(adp, ndp)
+ struct vcache *adp;
+ struct nameidata *ndp;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_lookup(adp, ndp);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_create(ndp, attrs)
+ struct nameidata *ndp;
+ struct vattr *attrs;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_create(ndp, attrs);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_open(avcp, aflags, acred)
+ struct vcache **avcp;
+ afs_int32 aflags;
+ struct AFS_UCRED *acred;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_open(avcp, aflags, acred);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_access(avc, amode, acred)
+ struct vcache *avc;
+ afs_int32 amode;
+ struct AFS_UCRED *acred;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_access(avc, amode, acred);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_close(avc, flags, cred)
+ struct vnode *avc;
+ int flags;
+ struct ucred *cred;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_close(avc, flags, cred);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_getattr(avc, attrs, acred)
+ struct vcache *avc;
+ struct vattr *attrs;
+ struct AFS_UCRED *acred;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_getattr(avc, attrs, acred);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_setattr(avc, attrs, acred)
+ struct vcache *avc;
+ struct vattr *attrs;
+ struct AFS_UCRED *acred;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_setattr(avc, attrs, acred);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_fsync(avc, fflags, acred, waitfor)
+ struct vcache *avc;
+ int fflags;
+ struct AFS_UCRED *acred;
+ int waitfor;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_fsync(avc, fflags, acred, waitfor);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_remove(ndp)
+ struct nameidata *ndp;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_remove(ndp);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_link(avc, ndp)
+ struct vcache *avc;
+ struct nameidata *ndp;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_link(avc, ndp);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_rename(fndp, tndp)
+ struct nameidata *fndp, *tndp;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_rename(fndp, tndp);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_mkdir(ndp, attrs)
+ struct nameidata *ndp;
+ struct vattr *attrs;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_mkdir(ndp, attrs);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_rmdir(ndp)
+ struct nameidata *ndp;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_rmdir(ndp);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_symlink(ndp, attrs, atargetName)
+ struct nameidata *ndp;
+ struct vattr *attrs;
+ register char *atargetName;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_symlink(ndp, attrs, atargetName);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_readdir(avc, auio, acred, eofp)
+ struct vcache *avc;
+ struct uio *auio;
+ struct AFS_UCRED *acred;
+ int *eofp;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_readdir(avc, auio, acred, eofp);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_readlink(avc, auio, acred)
+ struct vcache *avc;
+ struct uio *auio;
+ struct AFS_UCRED *acred;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_readlink(avc, auio, acred);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_lockctl(avc, af, flag, acred, clid, offset)
+ struct vcache *avc;
+ struct eflock *af;
+ struct AFS_UCRED *acred;
+ int flag;
+ pid_t clid;
+ off_t offset;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_lockctl(avc, af, flag, acred, clid, offset);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_closex(afd)
+ struct file *afd;
+{
+ int code;
+ AFS_GLOCK();
+ code = afs_closex(afd);
+ AFS_GUNLOCK();
+ return code;
+}
+
+mp_afs_seek(avc, oldoff, newoff, cred)
+ struct vcache *avc;
+ off_t oldoff, newoff;
+ struct ucred *cred;
+{
+ if ((int) newoff < 0)
+ return(EINVAL);
+ else
+ return(0);
+}
+
+mp_afs_abortop(ndp)
+ struct nameidata *ndp;
+{
+ return(0);
+}
+
+mp_afs_inactive(avc, acred)
+ register struct vcache *avc;
+ struct AFS_UCRED *acred;
+{
+ AFS_GLOCK();
+ afs_InactiveVCache(avc, acred);
+ AFS_GUNLOCK();
+}
+
+
+mp_afs_reclaim(avc)
+ struct vcache *avc;
+{
+ return(0);
+}
+
+mp_afs_print(avc)
+ struct vcache *avc;
+{
+ return(0);
+}
+
+mp_afs_page_read(avc, uio, acred)
+ struct vcache *avc;
+ struct uio *uio;
+ struct ucred *acred;
+{
+ int error;
+ struct vrequest treq;
+
+ AFS_GLOCK();
+ error = afs_rdwr(avc, uio, UIO_READ, 0, acred);
+ afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_READ, ICL_TYPE_POINTER, avc,
+ ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
+ if (error) {
+ error = EIO;
+ } else if ((avc->states) == 0) {
+ afs_InitReq(&treq, acred);
+ ObtainWriteLock(&avc->lock,161);
+ afs_Wire(avc, &treq);
+ ReleaseWriteLock(&avc->lock);
+ }
+ AFS_GUNLOCK();
+ return(error);
+}
+
+
+mp_afs_page_write(avc, uio, acred, pager, offset)
+ struct vcache *avc;
+ struct uio *uio;
+ struct ucred *acred;
+ memory_object_t pager;
+ vm_offset_t offset;
+{
+ int error;
+
+ AFS_GLOCK();
+ error = afs_rdwr(avc, uio, UIO_WRITE, 0, acred);
+ afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_WRITE, ICL_TYPE_POINTER, avc,
+ ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
+ if (error) {
+ error = EIO;
+ }
+ AFS_GUNLOCK();
+ return(error);
+}
+
+
+int DO_FLUSH=1;
+mp_afs_ubcrdwr(avc, uio, ioflag, cred)
+ struct vcache *avc;
+ struct uio *uio;
+ int ioflag;
+ struct ucred *cred;
+{
+ register afs_int32 code;
+ register char *data;
+ afs_int32 fileBase, size, cnt=0;
+ afs_int32 pageBase;
+ register afs_int32 tsize;
+ register afs_int32 pageOffset;
+ int eof;
+ struct vrequest treq;
+ int rw = uio->uio_rw;
+ int rv, flags;
+ int newpage=0;
+ vm_page_t page;
+ afs_int32 save_resid;
+ struct dcache *tdc;
+ int didFakeOpen=0;
+ int counter=0;
+
+ AFS_GLOCK();
+ afs_InitReq(&treq, cred);
+ if (AFS_NFSXLATORREQ(cred) && rw == UIO_READ) {
+ if (!afs_AccessOK(avc, PRSFS_READ, &treq,
+ CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
+ AFS_GUNLOCK();
+ return EACCES;
+ }
+ }
+ afs_Trace4(afs_iclSetp, CM_TRACE_VMRW, ICL_TYPE_POINTER, avc,
+ ICL_TYPE_INT32, (rw==UIO_WRITE? 1 : 0),
+ ICL_TYPE_LONG, uio->uio_offset,
+ ICL_TYPE_LONG, uio->uio_resid);
+ code = afs_VerifyVCache(avc, &treq);
+ if (code) {
+ code = afs_CheckCode(code, &treq, 35);
+ AFS_GUNLOCK();
+ return code;
+ }
+ if (vType(avc) != VREG) {
+ AFS_GUNLOCK();
+ return EISDIR; /* can't read or write other things */
+ }
+ afs_BozonLock(&avc->pvnLock, avc);
+ osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
+ ObtainWriteLock(&avc->lock,162);
+ /* adjust parameters when appending files */
+ if ((ioflag & IO_APPEND) && uio->uio_rw == UIO_WRITE)
+ uio->uio_offset = avc->m.Length; /* write at EOF position */
+ if (uio->uio_rw == UIO_WRITE) {
+ avc->states |= CDirty;
+ afs_FakeOpen(avc);
+ didFakeOpen=1;
+ /*
+ * before starting any I/O, we must ensure that the file is big enough
+ * to hold the results (since afs_putpage will be called to force
+ * the I/O.
+ */
+ size = uio->afsio_resid + uio->afsio_offset; /* new file size */
+ if (size > avc->m.Length) avc->m.Length = size; /* file grew */
+ avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
+ if (uio->afsio_resid > PAGE_SIZE)
+ cnt = uio->afsio_resid / PAGE_SIZE;
+ save_resid = uio->afsio_resid;
+ }
+
+ while (1) {
+ /*
+ * compute the amount of data to move into this block,
+ * based on uio->afsio_resid.
+ */
+ size = uio->afsio_resid; /* transfer size */
+ fileBase = uio->afsio_offset; /* start file position */
+ pageBase = fileBase & ~(PAGE_SIZE-1); /* file position of the page */
+ pageOffset = fileBase & (PAGE_SIZE-1); /* start offset within page */
+ tsize = PAGE_SIZE-pageOffset; /* amount left in this page */
+ /*
+ * we'll read tsize bytes,
+ * but first must make sure tsize isn't too big
+ */
+ if (tsize > size) tsize = size; /* don't read past end of request */
+ eof = 0; /* flag telling us if we hit the EOF on the read */
+ if (uio->uio_rw == UIO_READ) { /* we're doing a read operation */
+ /* don't read past EOF */
+ if (tsize + fileBase > avc->m.Length) {
+ tsize = avc->m.Length - fileBase;
+ eof = 1; /* we did hit the EOF */
+ if (tsize < 0) tsize = 0; /* better safe than sorry */
+ }
+ }
+ if (tsize <= 0) break; /* nothing to transfer, we're done */
+
+ /* Purge dirty chunks of file if there are too many dirty chunks.
+ * Inside the write loop, we only do this at a chunk boundary.
+ * Clean up partial chunk if necessary at end of loop.
+ */
+ if (uio->uio_rw == UIO_WRITE && counter > 0
+ && AFS_CHUNKOFFSET(fileBase) == 0) {
+ code = afs_DoPartialWrite(avc, &treq);
+ avc->states |= CDirty;
+ }
+
+ if (code) {
+ break;
+ }
+
+ flags = 0;
+ ReleaseWriteLock(&avc->lock);
+ AFS_GUNLOCK();
+ code = ubc_lookup(((struct vnode *)avc)->v_object, pageBase,
+ PAGE_SIZE, PAGE_SIZE, &page, &flags);
+ AFS_GLOCK();
+ ObtainWriteLock(&avc->lock,163);
+
+ if (code) {
+ break;
+ }
+ if (flags & B_NOCACHE) {
+ /*
+ No page found. We should not read the page in if
+ 1. the write starts on a page edge (ie, pageoffset == 0)
+ and either
+ 1. we will fill the page (ie, size == PAGESIZE), or
+ 2. we are writing past eof
+ */
+ if ((uio->uio_rw == UIO_WRITE) &&
+ ((pageOffset == 0 && (size == PAGE_SIZE || fileBase >= avc->m.Length)))) {
+ struct vnode *vp = (struct vnode *)avc;
+ /* we're doing a write operation past eof; no need to read it */
+ newpage = 1;
+ AFS_GUNLOCK();
+ ubc_page_zero(page, 0, PAGE_SIZE);
+ ubc_page_release(page, B_DONE);
+ AFS_GLOCK();
+ } else {
+ /* page wasn't cached, read it in. */
+ struct buf *bp;
+
+ AFS_GUNLOCK();
+ bp = ubc_bufalloc(page, 1, PAGE_SIZE, 1, B_READ);
+ AFS_GLOCK();
+ bp->b_dev = 0;
+ bp->b_vp = (struct vnode *)avc;
+ bp->b_blkno = btodb(pageBase);
+ ReleaseWriteLock(&avc->lock);
+ code = afs_ustrategy(bp, cred); /* do the I/O */
+ ObtainWriteLock(&avc->lock,164);
+ AFS_GUNLOCK();
+ ubc_sync_iodone(bp);
+ AFS_GLOCK();
+ if (code) {
+ AFS_GUNLOCK();
+ ubc_page_release(page, 0);
+ AFS_GLOCK();
+ break;
+ }
+ }
+ }
+ AFS_GUNLOCK();
+ ubc_page_wait(page);
+ data = (char *)page->pg_addr; /* DUX 4.0D */
+ if (data == 0)
+ data = (char *)PHYS_TO_KSEG(page->pg_phys_addr); /* DUX 4.0E */
+ AFS_GLOCK();
+ ReleaseWriteLock(&avc->lock); /* uiomove may page fault */
+ AFS_GUNLOCK();
+ code = uiomove(data+pageOffset, tsize, uio);
+ ubc_unload(page, pageOffset, page_size);
+ if (uio->uio_rw == UIO_WRITE) {
+ vm_offset_t toffset;
+
+ /* Mark the page dirty and release it to avoid a deadlock
+ * in ubc_dirty_kluster when more than one process writes
+ * this page at the same time. */
+ toffset = page->pg_offset;
+ flags |= B_DIRTY;
+ ubc_page_release(page, flags);
+
+ if (cnt > 10) {
+ vm_page_t pl;
+ int kpcnt;
+ struct buf *bp;
+
+ /* We released the page, so we can get a null page
+ * list if another thread calls the strategy routine.
+ */
+ pl = ubc_dirty_kluster(((struct vnode *)avc)->v_object,
+ NULL, toffset, 0, B_WANTED, FALSE, &kpcnt);
+ if (pl) {
+ bp = ubc_bufalloc(pl, 1, PAGE_SIZE, 1, B_WRITE);
+ bp->b_dev = 0;
+ bp->b_vp = (struct vnode *)avc;
+ bp->b_blkno = btodb(pageBase);
+ AFS_GLOCK();
+ code = afs_ustrategy(bp, cred); /* do the I/O */
+ AFS_GUNLOCK();
+ ubc_sync_iodone(bp);
+ if (code) {
+ AFS_GLOCK();
+ ObtainWriteLock(&avc->lock,415);
+ break;
+ }
+ }
+ }
+ } else {
+ ubc_page_release(page, flags);
+ }
+ AFS_GLOCK();
+ ObtainWriteLock(&avc->lock,165);
+ /*
+ * If reading at a chunk boundary, start prefetch of next chunk.
+ */
+ if (uio->uio_rw == UIO_READ
+ && (counter == 0 || AFS_CHUNKOFFSET(fileBase) == 0)) {
+ tdc = afs_FindDCache(avc, fileBase);
+ if (tdc) {
+ if (!(tdc->flags & DFNextStarted))
+ afs_PrefetchChunk(avc, tdc, cred, &treq);
+ afs_PutDCache(tdc);
+ }
+ }
+ counter++;
+ if (code) break;
+ }
+ if (didFakeOpen)
+ afs_FakeClose(avc, cred);
+ if (uio->uio_rw == UIO_WRITE && code == 0 && (avc->states & CDirty)) {
+ code = afs_DoPartialWrite(avc, &treq);
+ }
+ ReleaseWriteLock(&avc->lock);
+ afs_BozonUnlock(&avc->pvnLock, avc);
+ if (DO_FLUSH || (!newpage && (cnt < 10))) {
+ AFS_GUNLOCK();
+ ubc_flush_dirty(((struct vnode *)avc)->v_object, flags);
+ AFS_GLOCK();
+ }
+
+ ObtainSharedLock(&avc->lock, 409);
+ if (!code) {
+ if (avc->vc_error) {
+ code = avc->vc_error;
+ }
+ }
+ /* This is required since we may still have dirty pages after the write.
+ * I could just let close do the right thing, but stat's before the close
+ * return the wrong length.
+ */
+ if (code == EDQUOT || code == ENOSPC) {
+ uio->uio_resid = save_resid;
+ UpgradeSToWLock(&avc->lock, 410);
+ osi_ReleaseVM(avc, cred);
+ ConvertWToSLock(&avc->lock);
+ }
+ ReleaseSharedLock(&avc->lock);
+
+ if (!code && (ioflag & IO_SYNC) && (uio->uio_rw == UIO_WRITE)
+ && !AFS_NFSXLATORREQ(cred)) {
+ code = afs_fsync(avc, 0, cred, 0);
+ }
+out:
+ code = afs_CheckCode(code, &treq, 36);
+ AFS_GUNLOCK();
+ return code;
+}
+
+
+/*
+ * Now for some bad news. Since we artificially hold on to vnodes by doing
+ * and extra VNHOLD in afs_NewVCache(), there is no way for us to know
+ * when we need to flush the pages when a program exits. Particularly
+ * if it closes the file after mapping it R/W.
+ *
+ */
+
+mp_afs_mmap(avc, offset, map, addrp, len, prot, maxprot, flags, cred)
+ register struct vcache *avc;
+ vm_offset_t offset;
+ vm_map_t map;
+ vm_offset_t *addrp;
+ vm_size_t len;
+ vm_prot_t prot;
+ vm_prot_t maxprot;
+ int flags;
+ struct ucred *cred;
+{
+ struct vp_mmap_args args;
+ register struct vp_mmap_args *ap = &args;
+ struct vnode *vp = (struct vnode *)avc;
+ int code;
+ struct vrequest treq;
+#if !defined(DYNEL)
+ extern kern_return_t u_vp_create();
+#endif
+
+ AFS_GLOCK();
+ afs_InitReq(&treq, cred);
+ code = afs_VerifyVCache(avc, &treq);
+ if (code) {
+ code = afs_CheckCode(code, &treq, 37);
+ AFS_GUNLOCK();
+ return code;
+ }
+ afs_BozonLock(&avc->pvnLock, avc);
+ osi_FlushPages(avc); /* ensure old pages are gone */
+ afs_BozonUnlock(&avc->pvnLock, avc);
+ ObtainWriteLock(&avc->lock,166);
+ avc->states |= CMAPPED;
+ ReleaseWriteLock(&avc->lock);
+ ap->a_offset = offset;
+ ap->a_vaddr = addrp;
+ ap->a_size = len;
+ ap->a_prot = prot,
+ ap->a_maxprot = maxprot;
+ ap->a_flags = flags;
+ AFS_GUNLOCK();
+ code = u_vp_create(map, vp->v_object, (vm_offset_t) ap);
+ AFS_GLOCK();
+ code = afs_CheckCode(code, &treq, 38);
+ AFS_GUNLOCK();
+ return code;
+}
+
+
+int mp_afs_getpage(vop, offset, len, protp, pl, plsz, mape, addr, rw, cred)
+ vm_ubc_object_t vop;
+ vm_offset_t offset;
+ vm_size_t len;
+ vm_prot_t *protp;
+ vm_page_t *pl;
+ int plsz;
+ vm_map_entry_t mape;
+ vm_offset_t addr;
+ int rw;
+ struct ucred *cred;
+{
+ register afs_int32 code;
+ struct vrequest treq;
+ int flags = 0;
+ int i, pages = (len + PAGE_SIZE - 1) >> page_shift;
+ vm_page_t *pagep;
+ vm_offset_t off;
+
+ struct vcache *avc = (struct vcache *)vop->vu_vp;
+
+ /* first, obtain the proper lock for the VM system */
+
+ AFS_GLOCK();
+ afs_InitReq(&treq, cred);
+ code = afs_VerifyVCache(avc, &treq);
+ if (code) {
+ *pl = VM_PAGE_NULL;
+ code = afs_CheckCode(code, &treq, 39); /* failed to get it */
+ AFS_GUNLOCK();
+ return code;
+ }
+
+ /* clean all dirty pages for this vnode */
+ AFS_GUNLOCK();
+ ubc_flush_dirty(vop,0);
+ AFS_GLOCK();
+
+ afs_BozonLock(&avc->pvnLock, avc);
+ ObtainWriteLock(&avc->lock,167);
+ afs_Trace4(afs_iclSetp, CM_TRACE_PAGEIN, ICL_TYPE_POINTER, avc,
+ ICL_TYPE_LONG, offset, ICL_TYPE_LONG, len,
+ ICL_TYPE_INT32, (int) rw);
+ for (i = 0; i < pages; i++) {
+ pagep = &pl[i];
+ off = offset + PAGE_SIZE * i;
+ if (protp) protp[i] = 0;
+ flags = 0;
+ ReleaseWriteLock(&avc->lock);
+ AFS_GUNLOCK();
+ code = ubc_lookup(((struct vnode *)avc)->v_object, off,
+ PAGE_SIZE, PAGE_SIZE, pagep, &flags);
+ AFS_GLOCK();
+ ObtainWriteLock(&avc->lock,168);
+ if (code) {
+ goto out;
+ }
+ if(flags & B_NOCACHE) { /* if (page) */
+ if ((rw & B_WRITE) && (offset+len >= avc->m.Length)) {
+ struct vnode *vp = (struct vnode *)avc;
+ /* we're doing a write operation past eof; no need to read it */
+ AFS_GUNLOCK();
+ ubc_page_zero(*pagep, 0, PAGE_SIZE);
+ ubc_page_release(*pagep, B_DONE);
+ AFS_GLOCK();
+ } else {
+ /* page wasn't cached, read it in. */
+ struct buf *bp;
+
+ AFS_GUNLOCK();
+ bp = ubc_bufalloc(*pagep, 1, PAGE_SIZE, 1, B_READ);
+ AFS_GLOCK();
+ bp->b_dev = 0;
+ bp->b_vp = (struct vnode *)avc;
+ bp->b_blkno = btodb(off);
+ ReleaseWriteLock(&avc->lock);
+ code = afs_ustrategy(bp, cred); /* do the I/O */
+ ObtainWriteLock(&avc->lock,169);
+ AFS_GUNLOCK();
+ ubc_sync_iodone(bp);
+ AFS_GLOCK();
+ if (code) {
+ AFS_GUNLOCK();
+ ubc_page_release(pl[i], 0);
+ AFS_GLOCK();
+ goto out;
+ }
+ }
+ }
+ if ((rw & B_READ) == 0) {
+ AFS_GUNLOCK();
+ ubc_page_dirty(pl[i]);
+ AFS_GLOCK();
+ } else {
+ if (protp && (flags & B_DIRTY) == 0) {
+ protp[i] = VM_PROT_WRITE;
+ }
+ }
+ }
+out:
+ pl[i] = VM_PAGE_NULL;
+ ReleaseWriteLock(&avc->lock);
+ afs_BozonUnlock(&avc->pvnLock, avc);
+ afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_INT32, code,
+ ICL_TYPE_POINTER, *pagep, ICL_TYPE_INT32, flags);
+ code = afs_CheckCode(code, &treq, 40);
+ AFS_GUNLOCK();
+ return code;
+}
+
+
+int mp_afs_putpage(vop, pl, pcnt, flags, cred)
+ vm_ubc_object_t vop;
+ vm_page_t *pl;
+ int pcnt;
+ int flags;
+ struct ucred *cred;
+{
+ register afs_int32 code=0;
+ struct vcache *avc = (struct vcache *)vop->vu_vp;
+ struct vnode *vp = (struct vnode *)avc;
+ int i;
+
+ AFS_GLOCK();
+ afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUT, ICL_TYPE_POINTER, avc,
+ ICL_TYPE_INT32, pcnt, ICL_TYPE_INT32, vp->v_flag,
+ ICL_TYPE_INT32, flags);
+ if (flags & B_UBC) {
+ AFS_GUNLOCK();
+ VN_LOCK(vp);
+ if (vp->v_flag & VXLOCK) {
+ VN_UNLOCK(vp);
+ for (i = 0; i < pcnt; i++) {
+ ubc_page_release(pl[i], B_DONE|B_DIRTY);
+ pl[i] = VM_PAGE_NULL;
+ }
+ return(0);
+ } else {
+ VN_UNLOCK(vp);
+ }
+ AFS_GLOCK();
+ }
+
+ /* first, obtain the proper lock for the VM system */
+ afs_BozonLock(&avc->pvnLock, avc);
+ ObtainWriteLock(&avc->lock,170);
+ for (i = 0; i < pcnt; i++) {
+ vm_page_t page = pl[i];
+ struct buf *bp;
+
+ /* write it out */
+ AFS_GUNLOCK();
+ bp = ubc_bufalloc(page, 1, PAGE_SIZE, 1, B_WRITE);
+ AFS_GLOCK();
+ bp->b_dev = 0;
+ bp->b_vp = (struct vnode *)avc;
+ bp->b_blkno = btodb(page->pg_offset);
+ ReleaseWriteLock(&avc->lock);
+ code = afs_ustrategy(bp, cred); /* do the I/O */
+ ObtainWriteLock(&avc->lock,171);
+ AFS_GUNLOCK();
+ ubc_sync_iodone(bp);
+ AFS_GLOCK();
+ if (code) {
+ goto done;
+ } else {
+ pl[i] = VM_PAGE_NULL;
+ }
+ }
+done:
+ ReleaseWriteLock(&avc->lock);
+ afs_BozonUnlock(&avc->pvnLock, avc);
+ afs_Trace2(afs_iclSetp, CM_TRACE_PAGEOUTDONE, ICL_TYPE_INT32, code,
+ ICL_TYPE_INT32, avc->m.Length);
+ AFS_GUNLOCK();
+ return code;
+}
+
+
+int mp_afs_swap(avc, swapop, argp)
+ struct vcache *avc;
+ vp_swap_op_t swapop;
+ vm_offset_t argp;
+{
+ return EIO;
+}
+
+int mp_afs_syncdata(avc, flag, offset, length, cred)
+ struct vcache *avc;
+ int flag;
+ vm_offset_t offset;
+ vm_size_t length;
+ struct ucred *cred;
+{
+ /* NFS V3 makes this call, ignore it. We'll sync the data in afs_fsync. */
+ if (AFS_NFSXLATORREQ(cred))
+ return 0;
+ else
+ return EINVAL;
+}
+
+/* a freelist of one */
+struct buf *afs_bread_freebp = 0;
+
+/*
+ * Only rfs_read calls this, and it only looks at bp->b_un.b_addr.
+ * Thus we can use fake bufs (ie not from the real buffer pool).
+ */
+mp_afs_bread(vp, lbn, bpp, cred)
+ struct ucred *cred;
+ struct vnode *vp;
+ daddr_t lbn;
+ struct buf **bpp;
+{
+ int offset, fsbsize, error;
+ struct buf *bp;
+ struct iovec iov;
+ struct uio uio;
+
+ AFS_GLOCK();
+ AFS_STATCNT(afs_bread);
+ fsbsize = vp->v_vfsp->vfs_bsize;
+ offset = lbn * fsbsize;
+ if (afs_bread_freebp) {
+ bp = afs_bread_freebp;
+ afs_bread_freebp = 0;
+ } else {
+ bp = (struct buf *) AFS_KALLOC(sizeof(*bp));
+ bp->b_un.b_addr = (caddr_t) AFS_KALLOC(fsbsize);
+ }
+
+ iov.iov_base = bp->b_un.b_addr;
+ iov.iov_len = fsbsize;
+ uio.afsio_iov = &iov;
+ uio.afsio_iovcnt = 1;
+ uio.afsio_seg = AFS_UIOSYS;
+ uio.afsio_offset = offset;
+ uio.afsio_resid = fsbsize;
+ *bpp = 0;
+ error = afs_read((struct vcache *)vp, &uio, cred, lbn, bpp, 0);
+ if (error) {
+ afs_bread_freebp = bp;
+ AFS_GUNLOCK();
+ return error;
+ }
+ if (*bpp) {
+ afs_bread_freebp = bp;
+ } else {
+ *(struct buf **)&bp->b_vp = bp; /* mark as fake */
+ *bpp = bp;
+ }
+ AFS_GUNLOCK();
+ return 0;
+}
+
+
+mp_afs_brelse(vp, bp)
+struct vnode *vp;
+struct buf *bp;
+{
+ AFS_GLOCK();
+ AFS_STATCNT(afs_brelse);
+ if ((struct buf *)bp->b_vp != bp) { /* not fake */
+ brelse(bp);
+ } else if (afs_bread_freebp) {
+ AFS_KFREE(bp->b_un.b_addr, vp->v_vfsp->vfs_bsize);
+ AFS_KFREE(bp, sizeof(*bp));
+ } else {
+ afs_bread_freebp = bp;
+ }
+ AFS_GUNLOCK();
+}
+
+
+mp_afs_bmap(avc, abn, anvp, anbn)
+ register struct vcache *avc;
+ afs_int32 abn, *anbn;
+ struct vcache **anvp;
+{
+ AFS_GLOCK();
+ AFS_STATCNT(afs_bmap);
+ if (anvp)
+ *anvp = avc;
+ if (anbn)
+ *anbn = abn * (8192 / DEV_BSIZE); /* in 512 byte units */
+ AFS_GUNLOCK();
+ return 0;
+}
+
+
+/* real strategy */
+mp_afs_strategy (abp)
+ register struct buf *abp;
+{
+ register afs_int32 code;
+
+ AFS_GLOCK();
+ AFS_STATCNT(afs_strategy);
+ code = afs_osi_MapStrategy(afs_ustrategy, abp);
+ AFS_GUNLOCK();
+ return code;
+}
+
+
+mp_afs_refer(vm_ubc_object_t vop)
+{
+ VREF(vop->vu_vp);
+}
+
+
+mp_afs_release(vm_ubc_object_t vop)
+{
+ vrele(vop->vu_vp);
+}
+
+
+mp_afs_write_check(vm_ubc_object_t vop, vm_page_t pp)
+{
+ return TRUE;
+}
+
+
+
+struct vfs_ubcops afs_ubcops = {
+ mp_afs_refer, /* refer vnode */
+ mp_afs_release, /* release vnode */
+ mp_afs_getpage, /* get page */
+ mp_afs_putpage, /* put page */
+ mp_afs_write_check, /* check writablity */
+};
+#endif /* 0 */
+
+/*
+ * Cover function for lookup name using OSF equivalent, namei()
+ *
+ * Note, the result vnode (ni_vp) in the namei data structure is remains
+ * locked after return.
+ */
+lookupname(namep, seg, follow, dvpp, cvpp)
+ char *namep; /* path name */
+ int seg; /* address space containing name */
+ int follow; /* follow symbolic links */
+ struct vnode **dvpp; /* result, containing parent vnode */
+ struct vnode **cvpp; /* result, containing final component vnode */
+{
+ /* Should I use free-bee in u-area? */
+ struct nameidata *ndp = &u.u_nd;
+ int error;
+
+ ndp->ni_nameiop = ((follow) ? (LOOKUP|FOLLOW) : (LOOKUP));
+ ndp->ni_segflg = seg;
+ ndp->ni_dirp = namep;
+ error = namei(ndp);
+ if (dvpp != (struct vnode **)0)
+ *dvpp = ndp->ni_dvp;
+ if (cvpp != (struct vnode **)0)
+ *cvpp = ndp->ni_vp;
+ return(error);
+}
+
--- /dev/null
+/*
+ * Copyright 2000, International Business Machines Corporation and others.
+ * All Rights Reserved.
+ *
+ * This software has been released under the terms of the IBM Public
+ * License. For details, see the LICENSE file in the top-level source
+ * directory or online at http://www.openafs.org/dl/license10.html
+ */
+
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from @(#)vnode_if.sh 8.1 (Berkeley) 6/10/93
+ */
+
+extern struct vnodeop_desc vop_default_desc;
+struct vop_islocked_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_islocked_desc;
+static __inline int VOP_ISLOCKED __P((
+ struct vnode *vp,
+ struct proc *p));
+static __inline int VOP_ISLOCKED(vp, p)
+ struct vnode *vp;
+ struct proc *p;
+{
+ struct vop_islocked_args a;
+ int rc;
+ a.a_desc = VDESC(vop_islocked);
+ a.a_vp = vp;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_islocked), &a);
+ return (rc);
+}
+struct vop_lookup_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+};
+extern struct vnodeop_desc vop_lookup_desc;
+static __inline int VOP_LOOKUP __P((
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp));
+static __inline int VOP_LOOKUP(dvp, vpp, cnp)
+ struct vnode *dvp;
+ struct vnode **vpp;
+ struct componentname *cnp;
+{
+ struct vop_lookup_args a;
+ int rc;
+ a.a_desc = VDESC(vop_lookup);
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ rc = VCALL(dvp, VOFFSET(vop_lookup), &a);
+ return (rc);
+}
+struct vop_cachedlookup_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+};
+extern struct vnodeop_desc vop_cachedlookup_desc;
+static __inline int VOP_CACHEDLOOKUP __P((
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp));
+static __inline int VOP_CACHEDLOOKUP(dvp, vpp, cnp)
+ struct vnode *dvp;
+ struct vnode **vpp;
+ struct componentname *cnp;
+{
+ struct vop_cachedlookup_args a;
+ int rc;
+ a.a_desc = VDESC(vop_cachedlookup);
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ rc = VCALL(dvp, VOFFSET(vop_cachedlookup), &a);
+ return (rc);
+}
+struct vop_create_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+};
+extern struct vnodeop_desc vop_create_desc;
+static __inline int VOP_CREATE __P((
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp,
+ struct vattr *vap));
+static __inline int VOP_CREATE(dvp, vpp, cnp, vap)
+ struct vnode *dvp;
+ struct vnode **vpp;
+ struct componentname *cnp;
+ struct vattr *vap;
+{
+ struct vop_create_args a;
+ int rc;
+ a.a_desc = VDESC(vop_create);
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ a.a_vap = vap;
+ rc = VCALL(dvp, VOFFSET(vop_create), &a);
+ return (rc);
+}
+struct vop_whiteout_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct componentname *a_cnp;
+ int a_flags;
+};
+extern struct vnodeop_desc vop_whiteout_desc;
+static __inline int VOP_WHITEOUT __P((
+ struct vnode *dvp,
+ struct componentname *cnp,
+ int flags));
+static __inline int VOP_WHITEOUT(dvp, cnp, flags)
+ struct vnode *dvp;
+ struct componentname *cnp;
+ int flags;
+{
+ struct vop_whiteout_args a;
+ int rc;
+ a.a_desc = VDESC(vop_whiteout);
+ a.a_dvp = dvp;
+ a.a_cnp = cnp;
+ a.a_flags = flags;
+ rc = VCALL(dvp, VOFFSET(vop_whiteout), &a);
+ return (rc);
+}
+struct vop_mknod_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+};
+extern struct vnodeop_desc vop_mknod_desc;
+static __inline int VOP_MKNOD __P((
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp,
+ struct vattr *vap));
+static __inline int VOP_MKNOD(dvp, vpp, cnp, vap)
+ struct vnode *dvp;
+ struct vnode **vpp;
+ struct componentname *cnp;
+ struct vattr *vap;
+{
+ struct vop_mknod_args a;
+ int rc;
+ a.a_desc = VDESC(vop_mknod);
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ a.a_vap = vap;
+ rc = VCALL(dvp, VOFFSET(vop_mknod), &a);
+ return (rc);
+}
+struct vop_open_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ int a_mode;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_open_desc;
+static __inline int VOP_OPEN __P((
+ struct vnode *vp,
+ int mode,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_OPEN(vp, mode, cred, p)
+ struct vnode *vp;
+ int mode;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_open_args a;
+ int rc;
+ a.a_desc = VDESC(vop_open);
+ a.a_vp = vp;
+ a.a_mode = mode;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_open), &a);
+ return (rc);
+}
+struct vop_close_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ int a_fflag;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_close_desc;
+static __inline int VOP_CLOSE __P((
+ struct vnode *vp,
+ int fflag,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_CLOSE(vp, fflag, cred, p)
+ struct vnode *vp;
+ int fflag;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_close_args a;
+ int rc;
+ a.a_desc = VDESC(vop_close);
+ a.a_vp = vp;
+ a.a_fflag = fflag;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_close), &a);
+ return (rc);
+}
+struct vop_access_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ int a_mode;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_access_desc;
+static __inline int VOP_ACCESS __P((
+ struct vnode *vp,
+ int mode,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_ACCESS(vp, mode, cred, p)
+ struct vnode *vp;
+ int mode;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_access_args a;
+ int rc;
+ a.a_desc = VDESC(vop_access);
+ a.a_vp = vp;
+ a.a_mode = mode;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_access), &a);
+ return (rc);
+}
+struct vop_getattr_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct vattr *a_vap;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_getattr_desc;
+static __inline int VOP_GETATTR __P((
+ struct vnode *vp,
+ struct vattr *vap,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_GETATTR(vp, vap, cred, p)
+ struct vnode *vp;
+ struct vattr *vap;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_getattr_args a;
+ int rc;
+ a.a_desc = VDESC(vop_getattr);
+ a.a_vp = vp;
+ a.a_vap = vap;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_getattr), &a);
+ return (rc);
+}
+struct vop_setattr_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct vattr *a_vap;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_setattr_desc;
+static __inline int VOP_SETATTR __P((
+ struct vnode *vp,
+ struct vattr *vap,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_SETATTR(vp, vap, cred, p)
+ struct vnode *vp;
+ struct vattr *vap;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_setattr_args a;
+ int rc;
+ a.a_desc = VDESC(vop_setattr);
+ a.a_vp = vp;
+ a.a_vap = vap;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_setattr), &a);
+ return (rc);
+}
+struct vop_read_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ int a_ioflag;
+ struct ucred *a_cred;
+};
+extern struct vnodeop_desc vop_read_desc;
+static __inline int VOP_READ __P((
+ struct vnode *vp,
+ struct uio *uio,
+ int ioflag,
+ struct ucred *cred));
+static __inline int VOP_READ(vp, uio, ioflag, cred)
+ struct vnode *vp;
+ struct uio *uio;
+ int ioflag;
+ struct ucred *cred;
+{
+ struct vop_read_args a;
+ int rc;
+ a.a_desc = VDESC(vop_read);
+ a.a_vp = vp;
+ a.a_uio = uio;
+ a.a_ioflag = ioflag;
+ a.a_cred = cred;
+ rc = VCALL(vp, VOFFSET(vop_read), &a);
+ return (rc);
+}
+struct vop_write_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ int a_ioflag;
+ struct ucred *a_cred;
+};
+extern struct vnodeop_desc vop_write_desc;
+static __inline int VOP_WRITE __P((
+ struct vnode *vp,
+ struct uio *uio,
+ int ioflag,
+ struct ucred *cred));
+static __inline int VOP_WRITE(vp, uio, ioflag, cred)
+ struct vnode *vp;
+ struct uio *uio;
+ int ioflag;
+ struct ucred *cred;
+{
+ struct vop_write_args a;
+ int rc;
+ a.a_desc = VDESC(vop_write);
+ a.a_vp = vp;
+ a.a_uio = uio;
+ a.a_ioflag = ioflag;
+ a.a_cred = cred;
+ rc = VCALL(vp, VOFFSET(vop_write), &a);
+ return (rc);
+}
+struct vop_lease_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct proc *a_p;
+ struct ucred *a_cred;
+ int a_flag;
+};
+extern struct vnodeop_desc vop_lease_desc;
+static __inline int VOP_LEASE __P((
+ struct vnode *vp,
+ struct proc *p,
+ struct ucred *cred,
+ int flag));
+static __inline int VOP_LEASE(vp, p, cred, flag)
+ struct vnode *vp;
+ struct proc *p;
+ struct ucred *cred;
+ int flag;
+{
+ struct vop_lease_args a;
+ int rc;
+ a.a_desc = VDESC(vop_lease);
+ a.a_vp = vp;
+ a.a_p = p;
+ a.a_cred = cred;
+ a.a_flag = flag;
+ rc = VCALL(vp, VOFFSET(vop_lease), &a);
+ return (rc);
+}
+struct vop_ioctl_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ u_long a_command;
+ caddr_t a_data;
+ int a_fflag;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_ioctl_desc;
+static __inline int VOP_IOCTL __P((
+ struct vnode *vp,
+ u_long command,
+ caddr_t data,
+ int fflag,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_IOCTL(vp, command, data, fflag, cred, p)
+ struct vnode *vp;
+ u_long command;
+ caddr_t data;
+ int fflag;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_ioctl_args a;
+ int rc;
+ a.a_desc = VDESC(vop_ioctl);
+ a.a_vp = vp;
+ a.a_command = command;
+ a.a_data = data;
+ a.a_fflag = fflag;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_ioctl), &a);
+ return (rc);
+}
+struct vop_poll_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ int a_events;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_poll_desc;
+static __inline int VOP_POLL __P((
+ struct vnode *vp,
+ int events,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_POLL(vp, events, cred, p)
+ struct vnode *vp;
+ int events;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_poll_args a;
+ int rc;
+ a.a_desc = VDESC(vop_poll);
+ a.a_vp = vp;
+ a.a_events = events;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_poll), &a);
+ return (rc);
+}
+struct vop_revoke_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ int a_flags;
+};
+extern struct vnodeop_desc vop_revoke_desc;
+static __inline int VOP_REVOKE __P((
+ struct vnode *vp,
+ int flags));
+static __inline int VOP_REVOKE(vp, flags)
+ struct vnode *vp;
+ int flags;
+{
+ struct vop_revoke_args a;
+ int rc;
+ a.a_desc = VDESC(vop_revoke);
+ a.a_vp = vp;
+ a.a_flags = flags;
+ rc = VCALL(vp, VOFFSET(vop_revoke), &a);
+ return (rc);
+}
+struct vop_mmap_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ int a_fflags;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_mmap_desc;
+static __inline int VOP_MMAP __P((
+ struct vnode *vp,
+ int fflags,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_MMAP(vp, fflags, cred, p)
+ struct vnode *vp;
+ int fflags;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_mmap_args a;
+ int rc;
+ a.a_desc = VDESC(vop_mmap);
+ a.a_vp = vp;
+ a.a_fflags = fflags;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_mmap), &a);
+ return (rc);
+}
+struct vop_fsync_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct ucred *a_cred;
+ int a_waitfor;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_fsync_desc;
+static __inline int VOP_FSYNC __P((
+ struct vnode *vp,
+ struct ucred *cred,
+ int waitfor,
+ struct proc *p));
+static __inline int VOP_FSYNC(vp, cred, waitfor, p)
+ struct vnode *vp;
+ struct ucred *cred;
+ int waitfor;
+ struct proc *p;
+{
+ struct vop_fsync_args a;
+ int rc;
+ a.a_desc = VDESC(vop_fsync);
+ a.a_vp = vp;
+ a.a_cred = cred;
+ a.a_waitfor = waitfor;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_fsync), &a);
+ return (rc);
+}
+struct vop_remove_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+};
+extern struct vnodeop_desc vop_remove_desc;
+static __inline int VOP_REMOVE __P((
+ struct vnode *dvp,
+ struct vnode *vp,
+ struct componentname *cnp));
+static __inline int VOP_REMOVE(dvp, vp, cnp)
+ struct vnode *dvp;
+ struct vnode *vp;
+ struct componentname *cnp;
+{
+ struct vop_remove_args a;
+ int rc;
+ a.a_desc = VDESC(vop_remove);
+ a.a_dvp = dvp;
+ a.a_vp = vp;
+ a.a_cnp = cnp;
+ rc = VCALL(dvp, VOFFSET(vop_remove), &a);
+ return (rc);
+}
+struct vop_link_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_tdvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+};
+extern struct vnodeop_desc vop_link_desc;
+static __inline int VOP_LINK __P((
+ struct vnode *tdvp,
+ struct vnode *vp,
+ struct componentname *cnp));
+static __inline int VOP_LINK(tdvp, vp, cnp)
+ struct vnode *tdvp;
+ struct vnode *vp;
+ struct componentname *cnp;
+{
+ struct vop_link_args a;
+ int rc;
+ a.a_desc = VDESC(vop_link);
+ a.a_tdvp = tdvp;
+ a.a_vp = vp;
+ a.a_cnp = cnp;
+ rc = VCALL(tdvp, VOFFSET(vop_link), &a);
+ return (rc);
+}
+struct vop_rename_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_fdvp;
+ struct vnode *a_fvp;
+ struct componentname *a_fcnp;
+ struct vnode *a_tdvp;
+ struct vnode *a_tvp;
+ struct componentname *a_tcnp;
+};
+extern struct vnodeop_desc vop_rename_desc;
+static __inline int VOP_RENAME __P((
+ struct vnode *fdvp,
+ struct vnode *fvp,
+ struct componentname *fcnp,
+ struct vnode *tdvp,
+ struct vnode *tvp,
+ struct componentname *tcnp));
+static __inline int VOP_RENAME(fdvp, fvp, fcnp, tdvp, tvp, tcnp)
+ struct vnode *fdvp;
+ struct vnode *fvp;
+ struct componentname *fcnp;
+ struct vnode *tdvp;
+ struct vnode *tvp;
+ struct componentname *tcnp;
+{
+ struct vop_rename_args a;
+ int rc;
+ a.a_desc = VDESC(vop_rename);
+ a.a_fdvp = fdvp;
+ a.a_fvp = fvp;
+ a.a_fcnp = fcnp;
+ a.a_tdvp = tdvp;
+ a.a_tvp = tvp;
+ a.a_tcnp = tcnp;
+ rc = VCALL(fdvp, VOFFSET(vop_rename), &a);
+ return (rc);
+}
+struct vop_mkdir_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+};
+extern struct vnodeop_desc vop_mkdir_desc;
+static __inline int VOP_MKDIR __P((
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp,
+ struct vattr *vap));
+static __inline int VOP_MKDIR(dvp, vpp, cnp, vap)
+ struct vnode *dvp;
+ struct vnode **vpp;
+ struct componentname *cnp;
+ struct vattr *vap;
+{
+ struct vop_mkdir_args a;
+ int rc;
+ a.a_desc = VDESC(vop_mkdir);
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ a.a_vap = vap;
+ rc = VCALL(dvp, VOFFSET(vop_mkdir), &a);
+ return (rc);
+}
+struct vop_rmdir_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+};
+extern struct vnodeop_desc vop_rmdir_desc;
+static __inline int VOP_RMDIR __P((
+ struct vnode *dvp,
+ struct vnode *vp,
+ struct componentname *cnp));
+static __inline int VOP_RMDIR(dvp, vp, cnp)
+ struct vnode *dvp;
+ struct vnode *vp;
+ struct componentname *cnp;
+{
+ struct vop_rmdir_args a;
+ int rc;
+ a.a_desc = VDESC(vop_rmdir);
+ a.a_dvp = dvp;
+ a.a_vp = vp;
+ a.a_cnp = cnp;
+ rc = VCALL(dvp, VOFFSET(vop_rmdir), &a);
+ return (rc);
+}
+struct vop_symlink_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ char *a_target;
+};
+extern struct vnodeop_desc vop_symlink_desc;
+static __inline int VOP_SYMLINK __P((
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp,
+ struct vattr *vap,
+ char *target));
+static __inline int VOP_SYMLINK(dvp, vpp, cnp, vap, target)
+ struct vnode *dvp;
+ struct vnode **vpp;
+ struct componentname *cnp;
+ struct vattr *vap;
+ char *target;
+{
+ struct vop_symlink_args a;
+ int rc;
+ a.a_desc = VDESC(vop_symlink);
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ a.a_vap = vap;
+ a.a_target = target;
+ rc = VCALL(dvp, VOFFSET(vop_symlink), &a);
+ return (rc);
+}
+struct vop_readdir_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+ int *a_eofflag;
+ int *a_ncookies;
+ u_long **a_cookies;
+};
+extern struct vnodeop_desc vop_readdir_desc;
+static __inline int VOP_READDIR __P((
+ struct vnode *vp,
+ struct uio *uio,
+ struct ucred *cred,
+ int *eofflag,
+ int *ncookies,
+ u_long **cookies));
+static __inline int VOP_READDIR(vp, uio, cred, eofflag, ncookies, cookies)
+ struct vnode *vp;
+ struct uio *uio;
+ struct ucred *cred;
+ int *eofflag;
+ int *ncookies;
+ u_long **cookies;
+{
+ struct vop_readdir_args a;
+ int rc;
+ a.a_desc = VDESC(vop_readdir);
+ a.a_vp = vp;
+ a.a_uio = uio;
+ a.a_cred = cred;
+ a.a_eofflag = eofflag;
+ a.a_ncookies = ncookies;
+ a.a_cookies = cookies;
+ rc = VCALL(vp, VOFFSET(vop_readdir), &a);
+ return (rc);
+}
+struct vop_readlink_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+};
+extern struct vnodeop_desc vop_readlink_desc;
+static __inline int VOP_READLINK __P((
+ struct vnode *vp,
+ struct uio *uio,
+ struct ucred *cred));
+static __inline int VOP_READLINK(vp, uio, cred)
+ struct vnode *vp;
+ struct uio *uio;
+ struct ucred *cred;
+{
+ struct vop_readlink_args a;
+ int rc;
+ a.a_desc = VDESC(vop_readlink);
+ a.a_vp = vp;
+ a.a_uio = uio;
+ a.a_cred = cred;
+ rc = VCALL(vp, VOFFSET(vop_readlink), &a);
+ return (rc);
+}
+struct vop_inactive_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_inactive_desc;
+static __inline int VOP_INACTIVE __P((
+ struct vnode *vp,
+ struct proc *p));
+static __inline int VOP_INACTIVE(vp, p)
+ struct vnode *vp;
+ struct proc *p;
+{
+ struct vop_inactive_args a;
+ int rc;
+ a.a_desc = VDESC(vop_inactive);
+ a.a_vp = vp;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_inactive), &a);
+ return (rc);
+}
+struct vop_reclaim_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_reclaim_desc;
+static __inline int VOP_RECLAIM __P((
+ struct vnode *vp,
+ struct proc *p));
+static __inline int VOP_RECLAIM(vp, p)
+ struct vnode *vp;
+ struct proc *p;
+{
+ struct vop_reclaim_args a;
+ int rc;
+ a.a_desc = VDESC(vop_reclaim);
+ a.a_vp = vp;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_reclaim), &a);
+ return (rc);
+}
+struct vop_lock_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_lock_desc;
+static __inline int VOP_LOCK __P((
+ struct vnode *vp,
+ int flags,
+ struct proc *p));
+static __inline int VOP_LOCK(vp, flags, p)
+ struct vnode *vp;
+ int flags;
+ struct proc *p;
+{
+ struct vop_lock_args a;
+ int rc;
+ a.a_desc = VDESC(vop_lock);
+ a.a_vp = vp;
+ a.a_flags = flags;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_lock), &a);
+ return (rc);
+}
+struct vop_unlock_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_unlock_desc;
+static __inline int VOP_UNLOCK __P((
+ struct vnode *vp,
+ int flags,
+ struct proc *p));
+static __inline int VOP_UNLOCK(vp, flags, p)
+ struct vnode *vp;
+ int flags;
+ struct proc *p;
+{
+ struct vop_unlock_args a;
+ int rc;
+ a.a_desc = VDESC(vop_unlock);
+ a.a_vp = vp;
+ a.a_flags = flags;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_unlock), &a);
+ return (rc);
+}
+struct vop_bmap_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ daddr_t a_bn;
+ struct vnode **a_vpp;
+ daddr_t *a_bnp;
+ int *a_runp;
+ int *a_runb;
+};
+extern struct vnodeop_desc vop_bmap_desc;
+static __inline int VOP_BMAP __P((
+ struct vnode *vp,
+ daddr_t bn,
+ struct vnode **vpp,
+ daddr_t *bnp,
+ int *runp,
+ int *runb));
+static __inline int VOP_BMAP(vp, bn, vpp, bnp, runp, runb)
+ struct vnode *vp;
+ daddr_t bn;
+ struct vnode **vpp;
+ daddr_t *bnp;
+ int *runp;
+ int *runb;
+{
+ struct vop_bmap_args a;
+ int rc;
+ a.a_desc = VDESC(vop_bmap);
+ a.a_vp = vp;
+ a.a_bn = bn;
+ a.a_vpp = vpp;
+ a.a_bnp = bnp;
+ a.a_runp = runp;
+ a.a_runb = runb;
+ rc = VCALL(vp, VOFFSET(vop_bmap), &a);
+ return (rc);
+}
+struct vop_strategy_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct buf *a_bp;
+};
+extern struct vnodeop_desc vop_strategy_desc;
+static __inline int VOP_STRATEGY __P((
+ struct vnode *vp,
+ struct buf *bp));
+static __inline int VOP_STRATEGY(vp, bp)
+ struct vnode *vp;
+ struct buf *bp;
+{
+ struct vop_strategy_args a;
+ int rc;
+ a.a_desc = VDESC(vop_strategy);
+ a.a_vp = vp;
+ a.a_bp = bp;
+ rc = VCALL(vp, VOFFSET(vop_strategy), &a);
+ return (rc);
+}
+struct vop_print_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+};
+extern struct vnodeop_desc vop_print_desc;
+static __inline int VOP_PRINT __P((
+ struct vnode *vp));
+static __inline int VOP_PRINT(vp)
+ struct vnode *vp;
+{
+ struct vop_print_args a;
+ int rc;
+ a.a_desc = VDESC(vop_print);
+ a.a_vp = vp;
+ rc = VCALL(vp, VOFFSET(vop_print), &a);
+ return (rc);
+}
+struct vop_pathconf_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ int a_name;
+ register_t *a_retval;
+};
+extern struct vnodeop_desc vop_pathconf_desc;
+static __inline int VOP_PATHCONF __P((
+ struct vnode *vp,
+ int name,
+ register_t *retval));
+static __inline int VOP_PATHCONF(vp, name, retval)
+ struct vnode *vp;
+ int name;
+ register_t *retval;
+{
+ struct vop_pathconf_args a;
+ int rc;
+ a.a_desc = VDESC(vop_pathconf);
+ a.a_vp = vp;
+ a.a_name = name;
+ a.a_retval = retval;
+ rc = VCALL(vp, VOFFSET(vop_pathconf), &a);
+ return (rc);
+}
+struct vop_advlock_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ caddr_t a_id;
+ int a_op;
+ struct flock *a_fl;
+ int a_flags;
+};
+extern struct vnodeop_desc vop_advlock_desc;
+static __inline int VOP_ADVLOCK __P((
+ struct vnode *vp,
+ caddr_t id,
+ int op,
+ struct flock *fl,
+ int flags));
+static __inline int VOP_ADVLOCK(vp, id, op, fl, flags)
+ struct vnode *vp;
+ caddr_t id;
+ int op;
+ struct flock *fl;
+ int flags;
+{
+ struct vop_advlock_args a;
+ int rc;
+ a.a_desc = VDESC(vop_advlock);
+ a.a_vp = vp;
+ a.a_id = id;
+ a.a_op = op;
+ a.a_fl = fl;
+ a.a_flags = flags;
+ rc = VCALL(vp, VOFFSET(vop_advlock), &a);
+ return (rc);
+}
+struct vop_balloc_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ off_t a_startoffset;
+ int a_size;
+ struct ucred *a_cred;
+ int a_flags;
+ struct buf **a_bpp;
+};
+extern struct vnodeop_desc vop_balloc_desc;
+static __inline int VOP_BALLOC __P((
+ struct vnode *vp,
+ off_t startoffset,
+ int size,
+ struct ucred *cred,
+ int flags,
+ struct buf **bpp));
+static __inline int VOP_BALLOC(vp, startoffset, size, cred, flags, bpp)
+ struct vnode *vp;
+ off_t startoffset;
+ int size;
+ struct ucred *cred;
+ int flags;
+ struct buf **bpp;
+{
+ struct vop_balloc_args a;
+ int rc;
+ a.a_desc = VDESC(vop_balloc);
+ a.a_vp = vp;
+ a.a_startoffset = startoffset;
+ a.a_size = size;
+ a.a_cred = cred;
+ a.a_flags = flags;
+ a.a_bpp = bpp;
+ rc = VCALL(vp, VOFFSET(vop_balloc), &a);
+ return (rc);
+}
+struct vop_reallocblks_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct cluster_save *a_buflist;
+};
+extern struct vnodeop_desc vop_reallocblks_desc;
+static __inline int VOP_REALLOCBLKS __P((
+ struct vnode *vp,
+ struct cluster_save *buflist));
+static __inline int VOP_REALLOCBLKS(vp, buflist)
+ struct vnode *vp;
+ struct cluster_save *buflist;
+{
+ struct vop_reallocblks_args a;
+ int rc;
+ a.a_desc = VDESC(vop_reallocblks);
+ a.a_vp = vp;
+ a.a_buflist = buflist;
+ rc = VCALL(vp, VOFFSET(vop_reallocblks), &a);
+ return (rc);
+}
+struct vop_getpages_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ vm_page_t *a_m;
+ int a_count;
+ int a_reqpage;
+ vm_ooffset_t a_offset;
+};
+extern struct vnodeop_desc vop_getpages_desc;
+static __inline int VOP_GETPAGES __P((
+ struct vnode *vp,
+ vm_page_t *m,
+ int count,
+ int reqpage,
+ vm_ooffset_t offset));
+static __inline int VOP_GETPAGES(vp, m, count, reqpage, offset)
+ struct vnode *vp;
+ vm_page_t *m;
+ int count;
+ int reqpage;
+ vm_ooffset_t offset;
+{
+ struct vop_getpages_args a;
+ int rc;
+ a.a_desc = VDESC(vop_getpages);
+ a.a_vp = vp;
+ a.a_m = m;
+ a.a_count = count;
+ a.a_reqpage = reqpage;
+ a.a_offset = offset;
+ rc = VCALL(vp, VOFFSET(vop_getpages), &a);
+ return (rc);
+}
+struct vop_putpages_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ vm_page_t *a_m;
+ int a_count;
+ int a_sync;
+ int *a_rtvals;
+ vm_ooffset_t a_offset;
+};
+extern struct vnodeop_desc vop_putpages_desc;
+static __inline int VOP_PUTPAGES __P((
+ struct vnode *vp,
+ vm_page_t *m,
+ int count,
+ int sync,
+ int *rtvals,
+ vm_ooffset_t offset));
+static __inline int VOP_PUTPAGES(vp, m, count, sync, rtvals, offset)
+ struct vnode *vp;
+ vm_page_t *m;
+ int count;
+ int sync;
+ int *rtvals;
+ vm_ooffset_t offset;
+{
+ struct vop_putpages_args a;
+ int rc;
+ a.a_desc = VDESC(vop_putpages);
+ a.a_vp = vp;
+ a.a_m = m;
+ a.a_count = count;
+ a.a_sync = sync;
+ a.a_rtvals = rtvals;
+ a.a_offset = offset;
+ rc = VCALL(vp, VOFFSET(vop_putpages), &a);
+ return (rc);
+}
+struct vop_freeblks_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ daddr_t a_addr;
+ daddr_t a_length;
+};
+extern struct vnodeop_desc vop_freeblks_desc;
+static __inline int VOP_FREEBLKS __P((
+ struct vnode *vp,
+ daddr_t addr,
+ daddr_t length));
+static __inline int VOP_FREEBLKS(vp, addr, length)
+ struct vnode *vp;
+ daddr_t addr;
+ daddr_t length;
+{
+ struct vop_freeblks_args a;
+ int rc;
+ a.a_desc = VDESC(vop_freeblks);
+ a.a_vp = vp;
+ a.a_addr = addr;
+ a.a_length = length;
+ rc = VCALL(vp, VOFFSET(vop_freeblks), &a);
+ return (rc);
+}
+struct vop_bwrite_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct buf *a_bp;
+};
+extern struct vnodeop_desc vop_bwrite_desc;
+static __inline int VOP_BWRITE __P((
+ struct vnode *vp,
+ struct buf *bp));
+static __inline int VOP_BWRITE(vp, bp)
+ struct vnode *vp;
+ struct buf *bp;
+{
+ struct vop_bwrite_args a;
+ int rc;
+ a.a_desc = VDESC(vop_bwrite);
+ a.a_vp = vp;
+ a.a_bp = bp;
+ rc = VCALL(vp, VOFFSET(vop_bwrite), &a);
+ return (rc);
+}
+struct vop_getacl_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ acl_type_t a_type;
+ struct acl *a_aclp;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_getacl_desc;
+static __inline int VOP_GETACL __P((
+ struct vnode *vp,
+ acl_type_t type,
+ struct acl *aclp,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_GETACL(vp, type, aclp, cred, p)
+ struct vnode *vp;
+ acl_type_t type;
+ struct acl *aclp;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_getacl_args a;
+ int rc;
+ a.a_desc = VDESC(vop_getacl);
+ a.a_vp = vp;
+ a.a_type = type;
+ a.a_aclp = aclp;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_getacl), &a);
+ return (rc);
+}
+struct vop_setacl_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ acl_type_t a_type;
+ struct acl *a_aclp;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_setacl_desc;
+static __inline int VOP_SETACL __P((
+ struct vnode *vp,
+ acl_type_t type,
+ struct acl *aclp,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_SETACL(vp, type, aclp, cred, p)
+ struct vnode *vp;
+ acl_type_t type;
+ struct acl *aclp;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_setacl_args a;
+ int rc;
+ a.a_desc = VDESC(vop_setacl);
+ a.a_vp = vp;
+ a.a_type = type;
+ a.a_aclp = aclp;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_setacl), &a);
+ return (rc);
+}
+struct vop_aclcheck_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ acl_type_t a_type;
+ struct acl *a_aclp;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_aclcheck_desc;
+static __inline int VOP_ACLCHECK __P((
+ struct vnode *vp,
+ acl_type_t type,
+ struct acl *aclp,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_ACLCHECK(vp, type, aclp, cred, p)
+ struct vnode *vp;
+ acl_type_t type;
+ struct acl *aclp;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_aclcheck_args a;
+ int rc;
+ a.a_desc = VDESC(vop_aclcheck);
+ a.a_vp = vp;
+ a.a_type = type;
+ a.a_aclp = aclp;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_aclcheck), &a);
+ return (rc);
+}
+struct vop_getextattr_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ char *a_name;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_getextattr_desc;
+static __inline int VOP_GETEXTATTR __P((
+ struct vnode *vp,
+ char *name,
+ struct uio *uio,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_GETEXTATTR(vp, name, uio, cred, p)
+ struct vnode *vp;
+ char *name;
+ struct uio *uio;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_getextattr_args a;
+ int rc;
+ a.a_desc = VDESC(vop_getextattr);
+ a.a_vp = vp;
+ a.a_name = name;
+ a.a_uio = uio;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_getextattr), &a);
+ return (rc);
+}
+struct vop_setextattr_args {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ char *a_name;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+ struct proc *a_p;
+};
+extern struct vnodeop_desc vop_setextattr_desc;
+static __inline int VOP_SETEXTATTR __P((
+ struct vnode *vp,
+ char *name,
+ struct uio *uio,
+ struct ucred *cred,
+ struct proc *p));
+static __inline int VOP_SETEXTATTR(vp, name, uio, cred, p)
+ struct vnode *vp;
+ char *name;
+ struct uio *uio;
+ struct ucred *cred;
+ struct proc *p;
+{
+ struct vop_setextattr_args a;
+ int rc;
+ a.a_desc = VDESC(vop_setextattr);
+ a.a_vp = vp;
+ a.a_name = name;
+ a.a_uio = uio;
+ a.a_cred = cred;
+ a.a_p = p;
+ rc = VCALL(vp, VOFFSET(vop_setextattr), &a);
+ return (rc);
+}