4 * Copyright (C) 1997 by Bill Hawes
6 * Routines to support directory cacheing using the page cache.
7 * Right now this only works for smbfs, but will be generalized
8 * for use with other filesystems.
10 * Please add a note about your changes to smbfs in the ChangeLog file.
13 #include <linux/sched.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
17 #include <linux/dirent.h>
18 #include <linux/smb_fs.h>
19 #include <linux/pagemap.h>
23 #include "smb_debug.h"
26 static inline struct address_space *
27 get_cache_inode(struct cache_head *cachep)
29 return page_cache_entry((unsigned long) cachep)->mapping;
33 * Try to reassemble the old dircache. If we fail - set ->valid to 0.
34 * In any case, get at least the page at offset 0 (with ->valid==0 if
35 * the old one didn't make it, indeed).
38 smb_get_dircache(struct dentry * dentry)
40 struct address_space * mapping = &dentry->d_inode->i_data;
41 struct cache_head * cachep = NULL;
44 page = find_lock_page(mapping, 0);
46 /* Sorry, not even page 0 around */
47 page = grab_cache_page(mapping, 0);
51 memset((char*)cachep, 0, PAGE_SIZE);
57 * OK, at least the page 0 survived and seems to be promising.
58 * Let's try to reassemble the rest.
60 struct cache_index * index = cachep->index;
64 for (offset = 0, i = 0; i < cachep->pages; i++, index++) {
66 page = find_lock_page(mapping,offset>>PAGE_CACHE_SHIFT);
68 /* Alas, poor Yorick */
72 index->block = kmap(page);
80 * Unlock and release the data blocks.
83 smb_free_cache_blocks(struct cache_head * cachep)
85 struct cache_index * index = cachep->index;
89 VERBOSE("freeing %d blocks\n", cachep->pages);
90 for (i = 0; i < cachep->pages; i++, index++) {
93 page = page_cache_entry((unsigned long) index->block);
97 page_cache_release(page);
102 * Unlocks and releases the dircache.
105 smb_free_dircache(struct cache_head * cachep)
108 VERBOSE("freeing cache\n");
109 smb_free_cache_blocks(cachep);
110 page = page_cache_entry((unsigned long) cachep);
113 page_cache_release(page);
117 * Initializes the dircache. We release any existing data blocks,
118 * and then clear the cache_head structure.
121 smb_init_dircache(struct cache_head * cachep)
123 VERBOSE("initializing cache, %d blocks\n", cachep->pages);
124 smb_free_cache_blocks(cachep);
125 memset(cachep, 0, sizeof(struct cache_head));
129 * Add a new entry to the cache. This assumes that the
130 * entries are coming in order and are added to the end.
133 smb_add_to_cache(struct cache_head * cachep, struct cache_dirent *entry,
136 struct address_space * mapping = get_cache_inode(cachep);
137 struct cache_index * index;
138 struct cache_block * block;
140 unsigned long page_off;
141 unsigned int nent, offset, len = entry->len;
142 unsigned int needed = len + sizeof(struct cache_entry);
144 VERBOSE("cache %p, status %d, adding %.*s at %ld\n",
145 mapping, cachep->status, entry->len, entry->name, fpos);
148 * Don't do anything if we've had an error ...
153 index = &cachep->index[cachep->idx];
157 /* space available? */
158 if (needed < index->space) {
160 nent = index->num_entries;
161 index->num_entries++;
162 index->space -= needed;
163 offset = index->space +
164 index->num_entries * sizeof(struct cache_entry);
165 block = index->block;
166 memcpy(&block->cb_data.names[offset], entry->name, len);
167 block->cb_data.table[nent].namelen = len;
168 block->cb_data.table[nent].offset = offset;
169 block->cb_data.table[nent].ino = entry->ino;
172 VERBOSE("added entry %.*s, len=%d, pos=%ld, entries=%d\n",
173 entry->len, entry->name, len, fpos, cachep->entries);
177 * This block is full ... advance the index.
180 if (cachep->idx > NINDEX) /* not likely */
184 * Get the next cache block. We don't care for its contents.
188 page_off = PAGE_SIZE + (cachep->idx << PAGE_SHIFT);
189 page = grab_cache_page(mapping, page_off>>PAGE_CACHE_SHIFT);
192 index->block = block;
193 index->space = PAGE_SIZE;
197 * On failure, just set the return status ...
200 cachep->status = -ENOMEM;
206 smb_find_in_cache(struct cache_head * cachep, off_t pos,
207 struct cache_dirent *entry)
209 struct cache_index * index = cachep->index;
210 struct cache_block * block;
211 unsigned int i, nent, offset = 0;
214 VERBOSE("smb_find_in_cache: cache %p, looking for pos=%ld\n",
216 for (i = 0; i < cachep->pages; i++, index++)
220 nent = pos - next_pos;
221 next_pos += index->num_entries;
225 * The entry is in this block. Note: we return
226 * then name as a reference with _no_ null byte.
228 block = index->block;
229 entry->ino = block->cb_data.table[nent].ino;
230 entry->len = block->cb_data.table[nent].namelen;
231 offset = block->cb_data.table[nent].offset;
232 entry->name = &block->cb_data.names[offset];
234 VERBOSE("found %.*s, len=%d, pos=%ld\n",
235 entry->len, entry->name, entry->len, pos);
242 smb_refill_dircache(struct cache_head * cachep, struct dentry *dentry)
244 struct inode * inode = dentry->d_inode;
247 VERBOSE("smb_refill_dircache: cache %s/%s, blocks=%d\n",
248 DENTRY_PATH(dentry), cachep->pages);
250 * Fill the cache, starting at position 2.
253 inode->u.smbfs_i.cache_valid |= SMB_F_CACHEVALID;
254 result = smb_proc_readdir(dentry, 2, cachep);
257 PARANOIA("readdir failed, result=%d\n", result);
262 * Check whether the cache was invalidated while
263 * we were doing the scan ...
265 if (!(inode->u.smbfs_i.cache_valid & SMB_F_CACHEVALID))
267 PARANOIA("cache invalidated, retrying\n");
271 result = cachep->status;
276 VERBOSE("cache %s/%s status=%d, entries=%d\n",
277 DENTRY_PATH(dentry), cachep->status, cachep->entries);
283 smb_invalid_dir_cache(struct inode * dir)
286 * Get rid of any unlocked pages, and clear the
287 * 'valid' flag in case a scan is in progress.
289 invalidate_inode_pages(dir);
290 dir->u.smbfs_i.cache_valid &= ~SMB_F_CACHEVALID;
291 dir->u.smbfs_i.oldmtime = 0;