#include "afs/afs_bypasscache.h"
#endif
-#include "osi_pagecopy.h"
-
#ifdef pgoff2loff
#define pageoff(pp) pgoff2loff((pp)->index)
#else
#define pageoff(pp) pp->offset
#endif
-#ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
-#define __pagevec_lru_add_file __pagevec_lru_add
-#endif
-
#ifndef MAX_ERRNO
#define MAX_ERRNO 1000L
#endif
#endif /* AFS_LINUX24_ENV */
#endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
-#if defined(AFS_CACHE_BYPASS)
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-
-static inline int
-afs_linux_can_bypass(struct inode *ip) {
- switch(cache_bypass_strategy) {
- case NEVER_BYPASS_CACHE:
- return 0;
- case ALWAYS_BYPASS_CACHE:
- return 1;
- case LARGE_FILES_BYPASS_CACHE:
- if(i_size_read(ip) > cache_bypass_threshold)
- return 1;
- default:
- }
- return 0;
-}
-
-static int
-afs_linux_cache_bypass_read(struct file *fp, struct address_space *mapping,
- struct list_head *page_list, unsigned num_pages)
-{
- afs_int32 page_ix;
- uio_t *auio;
- afs_offs_t offset;
- struct iovec* iovecp;
- struct nocache_read_request *ancr;
- struct page *pp, *ppt;
- struct pagevec lrupv;
- afs_int32 code = 0;
-
- cred_t *credp;
- struct inode *ip = FILE_INODE(fp);
- struct vcache *avc = VTOAFS(ip);
- afs_int32 bypasscache = 0; /* bypass for this read */
- afs_int32 base_index = 0;
- afs_int32 page_count = 0;
- afs_int32 isize;
-
- /* background thread must free: iovecp, auio, ancr */
- iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
-
- auio = osi_Alloc(sizeof(uio_t));
- auio->uio_iov = iovecp;
- auio->uio_iovcnt = num_pages;
- auio->uio_flag = UIO_READ;
- auio->uio_seg = AFS_UIOSYS;
- auio->uio_resid = num_pages * PAGE_SIZE;
-
- ancr = osi_Alloc(sizeof(struct nocache_read_request));
- ancr->auio = auio;
- ancr->offset = auio->uio_offset;
- ancr->length = auio->uio_resid;
-
- pagevec_init(&lrupv, 0);
-
- for(page_ix = 0; page_ix < num_pages; ++page_ix) {
-
- if(list_empty(page_list))
- break;
-
- pp = list_entry(page_list->prev, struct page, lru);
- /* If we allocate a page and don't remove it from page_list,
- * the page cache gets upset. */
- list_del(&pp->lru);
- isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
- if(pp->index > isize) {
- if(PageLocked(pp))
- UnlockPage(pp);
- continue;
- }
-
- if(page_ix == 0) {
- offset = ((loff_t) pp->index) << PAGE_CACHE_SHIFT;
- auio->uio_offset = offset;
- base_index = pp->index;
- }
- iovecp[page_ix].iov_len = PAGE_SIZE;
- code = add_to_page_cache(pp, mapping, pp->index, GFP_KERNEL);
- if(base_index != pp->index) {
- if(PageLocked(pp))
- UnlockPage(pp);
- page_cache_release(pp);
- iovecp[page_ix].iov_base = (void *) 0;
- base_index++;
- continue;
- }
- base_index++;
- if(code) {
- if(PageLocked(pp))
- UnlockPage(pp);
- page_cache_release(pp);
- iovecp[page_ix].iov_base = (void *) 0;
- } else {
- page_count++;
- if(!PageLocked(pp)) {
- LockPage(pp);
- }
-
- /* save the page for background map */
- iovecp[page_ix].iov_base = (void*) pp;
-
- /* and put it on the LRU cache */
- if (!pagevec_add(&lrupv, pp))
- __pagevec_lru_add(&lrupv);
- }
- }
-
- /* If there were useful pages in the page list, make sure all pages
- * are in the LRU cache, then schedule the read */
- if(page_count) {
- pagevec_lru_add(&lrupv);
- credp = crref();
- code = afs_ReadNoCache(avc, ancr, credp);
- crfree(credp);
- } else {
- /* If there is nothing for the background thread to handle,
- * it won't be freeing the things that we never gave it */
- osi_Free(iovecp, num_pages * sizeof(struct iovec));
- osi_Free(auio, sizeof(uio_t));
- osi_Free(ancr, sizeof(struct nocache_read_request));
- }
- /* we do not flush, release, or unmap pages--that will be
- * done for us by the background thread as each page comes in
- * from the fileserver */
-out:
- return afs_convert_code(code);
-}
-
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) */
-#endif /* defined(AFS_CACHE_BYPASS */
-
-static int
-afs_linux_read_cache(struct file *cachefp, struct page *page,
- int chunk, struct pagevec *lrupv,
- struct afs_pagecopy_task *task) {
- loff_t offset = page_offset(page);
- struct page *newpage, *cachepage;
- struct address_space *cachemapping;
- int pageindex;
- int code = 0;
-
- cachemapping = cachefp->f_dentry->d_inode->i_mapping;
- newpage = NULL;
- cachepage = NULL;
-
- /* From our offset, we now need to work out which page in the disk
- * file it corresponds to. This will be fun ... */
- pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_CACHE_SHIFT;
-
- while (cachepage == NULL) {
- cachepage = find_get_page(cachemapping, pageindex);
- if (!cachepage) {
- if (!newpage)
- newpage = page_cache_alloc_cold(cachemapping);
- if (!newpage) {
- code = -ENOMEM;
- goto out;
- }
-
- code = add_to_page_cache(newpage, cachemapping,
- pageindex, GFP_KERNEL);
- if (code == 0) {
- cachepage = newpage;
- newpage = NULL;
-
- page_cache_get(cachepage);
- if (!pagevec_add(lrupv, cachepage))
- __pagevec_lru_add_file(lrupv);
-
- } else {
- page_cache_release(newpage);
- newpage = NULL;
- if (code != -EEXIST)
- goto out;
- }
- } else {
- lock_page(cachepage);
- }
- }
-
- if (!PageUptodate(cachepage)) {
- ClearPageError(cachepage);
- code = cachemapping->a_ops->readpage(NULL, cachepage);
- if (!code && !task) {
- wait_on_page_locked(cachepage);
- }
- } else {
- unlock_page(cachepage);
- }
-
- if (!code) {
- if (PageUptodate(cachepage)) {
- copy_highpage(page, cachepage);
- flush_dcache_page(page);
- SetPageUptodate(page);
- UnlockPage(page);
- } else if (task) {
- afs_pagecopy_queue_page(task, cachepage, page);
- } else {
- code = -EIO;
- }
- }
-
- if (code) {
- UnlockPage(page);
- }
-
-out:
- if (cachepage)
- page_cache_release(cachepage);
-
- return code;
-}
-
-static int inline
-afs_linux_readpage_fastpath(struct file *fp, struct page *pp, int *codep)
-{
- loff_t offset = page_offset(pp);
- struct inode *ip = FILE_INODE(fp);
- struct vcache *avc = VTOAFS(ip);
- struct dcache *tdc;
- struct file *cacheFp = NULL;
- int code;
- int dcLocked = 0;
- struct pagevec lrupv;
-
- /* Not a UFS cache, don't do anything */
- if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
- return 0;
-
- /* Can't do anything if the vcache isn't statd , or if the read
- * crosses a chunk boundary.
- */
- if (!(avc->f.states & CStatd) ||
- AFS_CHUNK(offset) != AFS_CHUNK(offset + PAGE_SIZE)) {
- return 0;
- }
-
- ObtainWriteLock(&avc->lock, 911);
-
- /* XXX - See if hinting actually makes things faster !!! */
-
- /* See if we have a suitable entry already cached */
- tdc = avc->dchint;
-
- if (tdc) {
- /* We need to lock xdcache, then dcache, to handle situations where
- * the hint is on the free list. However, we can't safely do this
- * according to the locking hierarchy. So, use a non blocking lock.
- */
- ObtainReadLock(&afs_xdcache);
- dcLocked = ( 0 == NBObtainReadLock(&tdc->lock));
-
- if (dcLocked && (tdc->index != NULLIDX)
- && !FidCmp(&tdc->f.fid, &avc->f.fid)
- && tdc->f.chunk == AFS_CHUNK(offset)
- && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
- /* Bonus - the hint was correct */
- afs_RefDCache(tdc);
- } else {
- /* Only destroy the hint if its actually invalid, not if there's
- * just been a locking failure */
- if (dcLocked) {
- ReleaseReadLock(&tdc->lock);
- avc->dchint = NULL;
- }
-
- tdc = NULL;
- dcLocked = 0;
- }
- ReleaseReadLock(&afs_xdcache);
- }
-
- /* No hint, or hint is no longer valid - see if we can get something
- * directly from the dcache
- */
- if (!tdc)
- tdc = afs_FindDCache(avc, offset);
-
- if (!tdc) {
- ReleaseWriteLock(&avc->lock);
- return 0;
- }
-
- if (!dcLocked)
- ObtainReadLock(&tdc->lock);
-
- /* Is the dcache we've been given currently up to date */
- if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
- (tdc->dflags & DFFetching)) {
- ReleaseWriteLock(&avc->lock);
- ReleaseReadLock(&tdc->lock);
- afs_PutDCache(tdc);
- return 0;
- }
-
- /* Update our hint for future abuse */
- avc->dchint = tdc;
-
- /* Okay, so we've now got a cache file that is up to date */
-
- /* XXX - I suspect we should be locking the inodes before we use them! */
- AFS_GUNLOCK();
- cacheFp = afs_linux_raw_open(&tdc->f.inode, NULL);
- pagevec_init(&lrupv, 0);
-
- code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
-
- if (pagevec_count(&lrupv))
- __pagevec_lru_add_file(&lrupv);
-
- filp_close(cacheFp, NULL);
- AFS_GLOCK();
-
- ReleaseReadLock(&tdc->lock);
- ReleaseWriteLock(&avc->lock);
- afs_PutDCache(tdc);
-
- *codep = code;
- return 1;
-}
-
/* afs_linux_readpage
* all reads come through here. A strategy-like read call.
*/
struct vcache *avc = VTOAFS(ip);
cred_t *credp;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- AFS_GLOCK();
- if (afs_linux_readpage_fastpath(fp, pp, &code)) {
- AFS_GUNLOCK();
- return code;
- }
- AFS_GUNLOCK();
-#endif
-
credp = crref();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
address = kmap(pp);
atomic_add(1, &pp->count);
set_bit(PG_locked, &pp->flags); /* other bits? See mm.h */
clear_bit(PG_error, &pp->flags);
-#endif
-#if defined(AFS_CACHE_BYPASS)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- /* If the page is past the end of the file, skip it */
- isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
- if(pp->index > isize) {
- if(PageLocked(pp))
- UnlockPage(pp);
- goto done;
- }
-#endif
#endif
/* if bypasscache, receiver frees, else we do */
auio = osi_Alloc(sizeof(uio_t));
return afs_convert_code(code);
}
-/* Readpages reads a number of pages for a particular file. We use
- * this to optimise the reading, by limiting the number of times upon which
- * we have to lookup, lock and open vcaches and dcaches
- */
-
-static int
-afs_linux_readpages(struct file *fp, struct address_space *mapping,
- struct list_head *page_list, unsigned int num_pages)
-{
- struct inode *inode = mapping->host;
- struct vcache *avc = VTOAFS(inode);
- struct dcache *tdc;
- struct file *cacheFp = NULL;
- int code;
- unsigned int page_idx;
- loff_t offset;
- struct pagevec lrupv;
- struct afs_pagecopy_task *task;
-
-#if defined(AFS_CACHE_BYPASS)
- bypasscache = afs_linux_can_bypass(ip);
-
- /* In the new incarnation of selective caching, a file's caching policy
- * can change, eg because file size exceeds threshold, etc. */
- trydo_cache_transition(avc, credp, bypasscache);
-
- if (bypasscache)
- return afs_linux_cache_bypass_read(ip, mapping, page_list, num_pages);
-#endif
-
- AFS_GLOCK();
- if ((code = afs_linux_VerifyVCache(avc, NULL))) {
- AFS_GUNLOCK();
- return code;
- }
-
- ObtainWriteLock(&avc->lock, 912);
- AFS_GUNLOCK();
-
- task = afs_pagecopy_init_task();
-
- tdc = NULL;
- pagevec_init(&lrupv, 0);
- for (page_idx = 0; page_idx < num_pages; page_idx++) {
- struct page *page = list_entry(page_list->prev, struct page, lru);
- list_del(&page->lru);
- offset = page_offset(page);
-
- if (tdc && tdc->f.chunk != AFS_CHUNK(offset)) {
- AFS_GLOCK();
- ReleaseReadLock(&tdc->lock);
- afs_PutDCache(tdc);
- AFS_GUNLOCK();
- tdc = NULL;
- if (cacheFp)
- filp_close(cacheFp, NULL);
- }
-
- if (!tdc) {
- AFS_GLOCK();
- if ((tdc = afs_FindDCache(avc, offset))) {
- ObtainReadLock(&tdc->lock);
- if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
- (tdc->dflags & DFFetching)) {
- ReleaseReadLock(&tdc->lock);
- afs_PutDCache(tdc);
- tdc = NULL;
- }
- }
- AFS_GUNLOCK();
- if (tdc)
- cacheFp = afs_linux_raw_open(&tdc->f.inode, NULL);
- }
-
- if (tdc && !add_to_page_cache(page, mapping, page->index,
- GFP_KERNEL)) {
- page_cache_get(page);
- if (!pagevec_add(&lrupv, page))
- __pagevec_lru_add_file(&lrupv);
-
- afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
- }
- page_cache_release(page);
- }
- if (pagevec_count(&lrupv))
- __pagevec_lru_add_file(&lrupv);
-
- if (tdc)
- filp_close(cacheFp, NULL);
-
- afs_pagecopy_put_task(task);
-
- AFS_GLOCK();
- if (tdc) {
- ReleaseReadLock(&tdc->lock);
- afs_PutDCache(tdc);
- }
-
- ReleaseWriteLock(&avc->lock);
- AFS_GUNLOCK();
- return 0;
-}
-
#if defined(AFS_LINUX24_ENV)
static int
afs_linux_writepage_sync(struct inode *ip, struct page *pp,
#if defined(AFS_LINUX24_ENV)
static struct address_space_operations afs_file_aops = {
.readpage = afs_linux_readpage,
- .readpages = afs_linux_readpages,
.writepage = afs_linux_writepage,
#if defined (STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
.write_begin = afs_linux_write_begin,