echo "/*"; \
echo " * DO NOT MODIFY."; \
echo " *"; \
- echo " * This file was generated by $(srctree)/Kbuild"; \
- echo " *"; \
echo " */"; \
echo ""; \
- sed -ne $(sed-y); \
+ sed -ne $(sed-y) $<; \
echo ""; \
echo "#endif" ) > $@
endef
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 15
--EXTRAVERSION =
++EXTRAVERSION = -git8
NAME=Sliding Snow Leopard
# *DOCUMENTATION*
-ffreestanding
AFLAGS := -D__ASSEMBLY__
-# Read KERNELRELEASE from .kernelrelease (if it exists)
-KERNELRELEASE = $(shell cat .kernelrelease 2> /dev/null)
+# Warn about unsupported modules in kernels built inside Autobuild
+ifneq ($(wildcard /.buildenv),)
+CFLAGS += -DUNSUPPORTED_MODULES=1
+endif
+
- export VERSION PATCHLEVEL SUBLEVEL EXTRAVERSION LOCALVERSION KERNELRELEASE \
++ifneq ($(wildcard $(srctree)/rpm-release),)
++RPM_RELEASE := -$(shell cat $(srctree)/rpm-release)
++endif
++
++KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)$(RPM_RELEASE)$(LOCALVERSION)
++
+
+ export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE \
ARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \
CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE \
HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
bust_spinlocks(0);
die.lock_owner = -1;
- spin_unlock_irq(&die.lock);
+ spin_unlock_irqrestore(&die.lock, flags);
+#ifdef CONFIG_KDB
+ kdb_diemsg = str;
+ kdb(KDB_REASON_OOPS, err, regs);
+#endif /* CONFIG_KDB */
if (kexec_should_crash(current))
crash_kexec(regs);
printk(" on CPU%d, eip %08lx, registers:\n",
smp_processor_id(), regs->eip);
show_registers(regs);
- printk("console shuts up ...\n");
+ printk(KERN_EMERG "console shuts up ...\n");
+#ifdef CONFIG_KDB
+ kdb(KDB_REASON_NMI, 0, regs);
+#endif /* CONFIG_KDB */
console_silent();
spin_unlock(&nmi_print_lock);
bust_spinlocks(0);
#endif
set_trap_gate(19,&simd_coprocessor_error);
+ if (cpu_has_fxsr) {
+ /*
+ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
+ * Generates a compile-time "error: zero width for bit-field" if
+ * the alignment is wrong.
+ */
+ struct fxsrAlignAssert {
+ int _:!(offsetof(struct task_struct,
+ thread.i387.fxsave) & 15);
+ };
+
+ printk(KERN_INFO "Enabling fast FPU save and restore... ");
+ set_in_cr4(X86_CR4_OSFXSR);
+ printk("done.\n");
+ }
+ if (cpu_has_xmm) {
+ printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
+ "support... ");
+ set_in_cr4(X86_CR4_OSXMMEXCPT);
+ printk("done.\n");
+ }
+
set_system_gate(SYSCALL_VECTOR,&system_call);
+#ifdef CONFIG_KDB
+ kdb_enablehwfault();
+ /*
+ * A trap gate, used by the kernel to enter the
+ * debugger, preserving all registers.
+ */
+ set_trap_gate(KDBENTER_VECTOR, &kdb_call);
+#endif /* CONFIG_KDB */
/*
* Should be a barrier for any external CPU state.
int apic_verbosity;
int disable_apic_timer __initdata;
+/* just used to communicate with shared i386 code: */
+int enable_local_apic = 1;
+
+ /*
+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
+ * IPIs in place of local APIC timers
+ */
+ static cpumask_t timer_interrupt_broadcast_ipi_mask;
+
/* Using APIC to generate smp_local_timer_interrupt? */
int using_apic_timer = 0;
},
},
{
+ .ident = "Sharp Actius MM20",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
+ },
+ },
++ {
+ .ident = "Sony Vaio FS-115b",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
+ },
+ },
{ }
};
ich5_sata = 1,
piix4_pata = 2,
ich6_sata = 3,
- ich6_sata_rm = 4,
- ich7_sata = 5,
- esb2_sata = 6,
- ich8_sata = 7,
+ ich6_sata_ahci = 4,
++ ich8_sata = 5,
PIIX_AHCI_DEVICE = 6,
};
{ 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
{ 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
{ 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
- { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_rm },
- { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_rm },
- { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich7_sata },
- { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich7_sata },
- { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb2_sata },
+ { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+ { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+ { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+ { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+ { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+ { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
{ } /* terminate list */
};
char flag;
do {
- /* The following is not allowed by the tty layer and
- unsafe. It should be fixed ASAP */
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- if (tty->low_latency) {
- spin_unlock(&up->port.lock);
- tty_flip_buffer_push(tty);
- spin_lock(&up->port.lock);
- }
- /*
- * If this failed then we will throw away the
- * bytes but must do so to clear interrupts
- */
- }
ch = serial_inp(up, UART_RX);
+#ifdef CONFIG_KDB
+ if ((up->port.line == kdb_serial_line) && kdb_on == 1) {
+ if (ch == *kdb_serial_ptr) {
+ if (!(*++kdb_serial_ptr)) {
+ kdb(KDB_REASON_KEYBOARD, 0, regs);
+ kdb_serial_ptr = kdb_serial_str;
+ break;
+ }
+ } else
+ kdb_serial_ptr = kdb_serial_str;
+ }
+#endif /* CONFIG_KDB */
+ if (arch_8250_sysrq_via_ctrl_o(ch, &up->port))
+ goto ignore_char;
flag = TTY_NORMAL;
up->port.icount.rx++;
--- /dev/null
+/*
+ * linux/fs/lockd/nsmproc.c
+ *
+ * Kernel-based status monitor. This is an alternative to
+ * the code in mon.c.
+ *
+ * When asked to monitor a host, we add it to /var/lib/nsm/sm
+ * ourselves, and that's it. In order to catch SM_NOTIFY calls
+ * we implement a minimal statd.
+ *
+ * Minimal user space requirements for this implementation:
+ * /var/lib/nfs/state
+ * must exist, and must contain the NSM state as a 32bit
+ * binary counter.
+ * /var/lib/nfs/sm
+ * must exist
+ *
+ * Copyright (C) 2004, Olaf Kirch <okir@suse.de>
+ */
+
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/lockd/lockd.h>
+#include <linux/lockd/share.h>
+#include <linux/lockd/sm_inter.h>
+#include <linux/file.h>
+#include <linux/namei.h>
+#include <asm/uaccess.h>
+#include <linux/buffer_head.h>
+
+
+/* XXX make this a module parameter? */
+#define NSM_BASE_PATH "/var/lib/nfs"
+#define NSM_SM_PATH NSM_BASE_PATH "/sm"
+#define NSM_STATE_PATH NSM_BASE_PATH "/state"
+
+#define NLMDBG_FACILITY NLMDBG_CLIENT
+
+static int __nsm_monitor(struct nlm_host *host);
+static int __nsm_unmonitor(struct nlm_host *host);
+
+/*
+ * Initialize local NSM state variable
+ */
+int
+nsm_kernel_statd_init(void)
+{
+ struct file *filp;
+ char buffer[32];
+ mm_segment_t fs;
+ int res;
+
+ dprintk("lockd: nsm_init()\n");
+ filp = filp_open(NSM_STATE_PATH, O_RDONLY, 0444);
+ if (IS_ERR(filp)) {
+ res = PTR_ERR(filp);
+ printk(KERN_NOTICE "lockd: failed to open %s: err=%d\n",
+ NSM_STATE_PATH, res);
+ return res;
+ }
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ res = vfs_read(filp, buffer, sizeof(buffer), &filp->f_pos);
+ set_fs(fs);
+ filp_close(filp, NULL);
+
+ if (res < 0)
+ return res;
+ if (res == 4)
+ nsm_local_state = *(u32 *) buffer;
+ else
+ nsm_local_state = simple_strtol(buffer, NULL, 10);
+
+ nsm_monitor = __nsm_monitor;
+ nsm_unmonitor = __nsm_unmonitor;
+ return 0;
+}
+
+/*
+ * Build the NSM file name
+ */
+static char *
+nsm_filename(struct nsm_handle *nsm)
+{
+ char *name;
+
+ name = (char *) __get_free_page(GFP_KERNEL);
+ if (name == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ if (nsm_use_hostnames) {
+ snprintf(name, PAGE_SIZE, "%s/%s",
+ NSM_SM_PATH, nsm->sm_name);
+ } else {
+ /* FIXME IPV6 */
+ snprintf(name, PAGE_SIZE, "%s/%u.%u.%u.%u",
+ NSM_SM_PATH,
+ NIPQUAD(nsm->sm_addr.sin_addr));
+ }
+ return name;
+}
+
+static void
+nsm_put_name(char *name)
+{
+ free_page((unsigned long) name);
+}
+
+/*
+ * Create the NSM monitor file
+ */
+static int
+nsm_create(struct nsm_handle *nsm)
+{
+ struct file *filp;
+ char *filename;
+ int res = 0;
+
+ dprintk("lockd: creating statd monitor file for %s\n", nsm->sm_name);
+
+ if (!(filename = nsm_filename(nsm)))
+ return -ENOMEM;
+
+ filp = filp_open(filename, O_CREAT|O_SYNC|O_RDWR, 0644);
+ if (IS_ERR(filp)) {
+ res = PTR_ERR(filp);
+ printk(KERN_NOTICE
+ "lockd/statd: failed to create %s: err=%d\n",
+ filename, res);
+ } else {
+ fsync_super(filp->f_dentry->d_inode->i_sb);
+ filp_close(filp, NULL);
+ }
+
+ nsm_put_name(filename);
+ return res;
+}
+
+static int
+nsm_unlink(struct nsm_handle *nsm)
+{
+ struct nameidata nd;
+ struct inode *inode = NULL;
+ struct dentry *dentry;
+ char *filename;
+ int res = 0;
+
+ if (!(filename = nsm_filename(nsm)))
+ return -ENOMEM;
+
+ if ((res = path_lookup(filename, LOOKUP_PARENT, &nd)) != 0)
+ goto exit;
+
+ if (nd.last_type == LAST_NORM && !nd.last.name[nd.last.len]) {
- down(&nd.dentry->d_inode->i_sem);
++ mutex_lock(&nd.dentry->d_inode->i_mutex);
+
+ dentry = lookup_hash(&nd);
+ if (!IS_ERR(dentry)) {
+ if ((inode = dentry->d_inode) != NULL)
+ atomic_inc(&inode->i_count);
+ res = vfs_unlink(nd.dentry->d_inode, dentry);
+ dput(dentry);
+ } else {
+ res = PTR_ERR(dentry);
+ }
- up(&nd.dentry->d_inode->i_sem);
++ mutex_unlock(&nd.dentry->d_inode->i_mutex);
+ } else {
+ res = -EISDIR;
+ }
+ path_release(&nd);
+
+exit:
+ if (res < 0) {
+ printk(KERN_NOTICE
+ "lockd/statd: failed to unlink %s: err=%d\n",
+ filename, res);
+ }
+
+ if (inode)
+ iput(inode);
+ nsm_put_name(filename);
+ return res;
+}
+
+/*
+ * Call nsm_create/nsm_unlink with CAP_DAC_OVERRIDE
+ */
+#define swap_ugid(type, var) { \
+ type tmp = current->var; current->var = var; var = tmp; \
+}
+
+static int
+with_privilege(int (*func)(struct nsm_handle *), struct nsm_handle *nsm)
+{
+ kernel_cap_t cap = current->cap_effective;
+ int res = 0, mask;
+ uid_t fsuid = 0;
+ gid_t fsgid = 0;
+
+ /* If we're unprivileged, a call to capable() will set the
+ * SUPERPRIV flag */
+ mask = current->flags | ~PF_SUPERPRIV;
+
+ /* Raise capability to that we're able to create/unlink the file.
+ * Set fsuid/fsgid to 0 so the file will be owned by root. */
+ cap_raise(current->cap_effective, CAP_DAC_OVERRIDE);
+ swap_ugid(uid_t, fsuid);
+ swap_ugid(gid_t, fsgid);
+
+ res = func(nsm);
+
+ /* drop privileges */
+ current->cap_effective = cap;
+ swap_ugid(uid_t, fsuid);
+ swap_ugid(gid_t, fsgid);
+
+ /* Clear PF_SUPERPRIV unless it was set to begin with */
+ current->flags &= mask;
+
+ return res;
+}
+
+/*
+ * Set up monitoring of a remote host
+ * Note we hold the semaphore for the host table while
+ * we're here.
+ */
+static int
+__nsm_monitor(struct nlm_host *host)
+{
+ struct nsm_handle *nsm;
+ int res = 0;
+
+ dprintk("lockd: nsm_monitor(%s)\n", host->h_name);
+ if ((nsm = host->h_nsmhandle) == NULL)
+ BUG();
+
+ if (!nsm->sm_monitored) {
+ res = with_privilege(nsm_create, nsm);
+ if (res >= 0) {
+ nsm->sm_monitored = 1;
+ } else {
+ dprintk(KERN_NOTICE "nsm_monitor(%s) failed: errno=%d\n",
+ nsm->sm_name, -res);
+ }
+ }
+
+ return res;
+}
+
+/*
+ * Cease to monitor remote host
+ * Code stolen from sys_unlink.
+ */
+static int
+__nsm_unmonitor(struct nlm_host *host)
+{
+ struct nsm_handle *nsm;
+ int res = 0;
+
+ nsm = host->h_nsmhandle;
+ host->h_nsmhandle = NULL;
+
+ /* If the host was invalidated due to lockd restart/shutdown,
+ * don't unmonitor it.
+ * (Strictly speaking, we would have to keep the SM file
+ * until the next reboot. The only way to achieve that
+ * would be to link the monitor file to sm.bak now.)
+ */
+ if (nsm && atomic_read(&nsm->sm_count) == 1
+ && nsm->sm_monitored && !nsm->sm_sticky) {
+ dprintk("lockd: nsm_unmonitor(%s)\n", host->h_name);
+
+ res = with_privilege(nsm_unlink, nsm);
+ }
+
+ nsm_release(nsm);
+ return res;
+}
+
+/*
+ * NSM server implementation starts here
+ */
+int
+nsmsvc_authenticate(struct svc_rqst *rqstp)
+{
+ /* No authentication for statd. Many statd implementations
+ * even send their reboot notifications from an unprivileged
+ * port.
+ */
+ rqstp->rq_client = NULL;
+ return SVC_OK;
+}
+
+
+/*
+ * NULL: Test for presence of service
+ */
+static int
+nsmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+ dprintk("statd: NULL called\n");
+ return rpc_success;
+}
+
+/*
+ * NOTIFY: receive notification that remote host rebooted
+ */
+static int
+nsmsvc_proc_notify(struct svc_rqst *rqstp, struct nsm_args *argp,
+ struct nsm_res *resp)
+{
+ struct sockaddr_in saddr = rqstp->rq_addr;
+
+ dprintk("statd: NOTIFY called\n");
+
+ nlm_host_rebooted(&saddr, argp->mon_name, argp->mon_name_len, argp->state);
+ return rpc_success;
+}
+
+/*
+ * All other operations: return failure
+ */
+static int
+nsmsvc_proc_fail(struct svc_rqst *rqstp, struct nsm_args *argp,
+ struct nsm_res *resp)
+{
+ dprintk("statd: proc %u called\n", rqstp->rq_proc);
+ resp->status = 0;
+ resp->state = -1;
+ return rpc_success;
+}
+
+/*
+ * NSM XDR routines
+ */
+static int
+nsmsvc_decode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+ return xdr_argsize_check(rqstp, p);
+}
+
+static int
+nsmsvc_encode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
+{
+ return xdr_ressize_check(rqstp, p);
+}
+
+static int
+nsmsvc_decode_stat_chge(struct svc_rqst *rqstp, u32 *p, struct nsm_args *argp)
+{
+ /* Skip over the client's mon_name */
- p = xdr_decode_string(p, &argp->mon_name, &argp->mon_name_len, SM_MAXSTRLEN);
++ p = xdr_decode_string_inplace(p, &argp->mon_name, &argp->mon_name_len, SM_MAXSTRLEN);
+ if (p == NULL)
+ return 0;
+
+ argp->state = ntohl(*p++);
+ return xdr_argsize_check(rqstp, p);
+}
+
+static int
+nsmsvc_encode_res(struct svc_rqst *rqstp, u32 *p, struct nsm_res *resp)
+{
+ *p++ = resp->status;
+ return xdr_ressize_check(rqstp, p);
+}
+
+static int
+nsmsvc_encode_stat_res(struct svc_rqst *rqstp, u32 *p, struct nsm_res *resp)
+{
+ *p++ = resp->status;
+ *p++ = resp->state;
+ return xdr_ressize_check(rqstp, p);
+}
+
+struct nsm_void { int dummy; };
+
+#define PROC(name, xargt, xrest, argt, rest, respsize) \
+ { .pc_func = (svc_procfunc) nsmsvc_proc_##name, \
+ .pc_decode = (kxdrproc_t) nsmsvc_decode_##xargt, \
+ .pc_encode = (kxdrproc_t) nsmsvc_encode_##xrest, \
+ .pc_release = NULL, \
+ .pc_argsize = sizeof(struct nsm_##argt), \
+ .pc_ressize = sizeof(struct nsm_##rest), \
+ .pc_xdrressize = respsize, \
+ }
+
+struct svc_procedure nsmsvc_procedures[] = {
+ PROC(null, void, void, void, void, 1),
+ PROC(fail, void, stat_res, void, res, 2),
+ PROC(fail, void, stat_res, void, res, 2),
+ PROC(fail, void, res, void, res, 1),
+ PROC(fail, void, res, void, res, 1),
+ PROC(fail, void, res, void, res, 1),
+ PROC(notify, stat_chge, void, args, void, 1)
+};
#endif
static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
-static int nlmsvc_remove_block(struct nlm_block *block);
+static void nlmsvc_remove_block(struct nlm_block *block);
- static void nlmsvc_grant_callback(struct rpc_task *task);
+
+ static const struct rpc_call_ops nlmsvc_grant_ops;
/*
* The list of blocked locks to retry
{
struct file_lock *fl = &block->b_call.a_args.lock.fl;
struct nlm_file *file = block->b_file;
- struct nlm_block **bp;
+ int status = 0;
dprintk("lockd: deleting block %p...\n", block);
if (block->b_host)
nlm_release_host(block->b_host);
nlmclnt_freegrantargs(&block->b_call);
+ nlm_release_file(file);
kfree(block);
+ return status;
}
/*
* (NLM_ACT_CHECK handled by nlmsvc_inspect_file).
*/
int
-nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action)
+nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file,
+ nlm_host_visitor_t fn)
{
struct nlm_block *block, *next;
+ /* XXX: Will everything get cleaned up if we don't unlock here? */
down(&file->f_sema);
- for (block = file->f_blocks; block; block = next) {
- next = block->b_fnext;
- if (action == NLM_ACT_MARK)
- block->b_host->h_inuse = 1;
- else if (action == NLM_ACT_UNLOCK) {
- if (host == NULL || host == block->b_host)
- nlmsvc_delete_block(block, 1);
- }
+ list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
+ if (fn && fn(block->b_host, host))
+ nlmsvc_delete_block(block, 1);
}
up(&file->f_sema);
return 0;
file->f_count++;
down(&file->f_sema);
- if ((block = nlmsvc_find_block(cookie)) != NULL) {
- block = nlmsvc_find_block(cookie, &rqstp->rq_addr);
++ block = nlmsvc_find_block(cookie);
+ if (block) {
if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
/* Try again in a couple of seconds */
nlmsvc_insert_block(block, 10 * HZ);
}
nfs_inode_return_delegation(old_inode);
- if (new_inode) {
+ if (new_inode != NULL) {
+ nfs_inode_return_delegation(new_inode);
++
+ /* If this is the last reference to the inode make
+ * sure the VFS zaps it and all associated caches.
+ */
+ new_inode->i_nlink--;
d_delete(new_dentry);
}
/*
* No support for async yet
*/
- up(&inode->i_sem);
++ mutex_lock(&inode->i_mutex);
+
if (!is_sync_kiocb(iocb))
return result;
default:
break;
}
+
- down(&inode->i_sem);
++ mutex_unlock(&inode->i_mutex);
return result;
}
--- /dev/null
+/*
+ * subfs.c
+ *
+ * Copyright (C) 2003-2004 Eugene S. Weiss <eweiss@sbclobal.net>
+ *
+ * * Feb 25, 2005: Cleaned up code and locking
+ * Jeff Mahoney <jeffm@suse.com>
+ *
+ * Distributed under the terms of the GNU General Public License version 2
+ * or above.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/parser.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+#include <linux/list.h>
+#include <linux/mount.h>
+#include <linux/namespace.h>
+#include <linux/namei.h>
+#include <linux/dcache.h>
+#include <linux/sysfs.h>
+#include <asm/semaphore.h>
+#include <asm/signal.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+
+#define SUBFS_MAGIC 0x2c791058
+#define SUBFS_VER "0.9"
+#define SUBMOUNTD_PATH "/sbin/submountd"
+#define ROOT_MODE 0777
+
+struct subfs_mount {
+ char *device;
+ char *options;
+ char *req_fs;
+ char *helper_prog;
+ struct super_block *sb;
+ struct semaphore sem;
+ int procuid;
+};
+
+/* Same as set_fs_pwd from namespace.c. There's a problem with the
+ * symbol. When it is fixed, discard this.
+ * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
+ * It can block. Requires the big lock held.
+ */
+static void subfs_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
+ struct dentry *dentry)
+{
+ struct dentry *old_pwd;
+ struct vfsmount *old_pwdmnt;
+
+ write_lock(&fs->lock);
+ old_pwd = fs->pwd;
+ old_pwdmnt = fs->pwdmnt;
+ fs->pwdmnt = mntget(mnt);
+ fs->pwd = dget(dentry);
+ write_unlock(&fs->lock);
+
+ if (old_pwd) {
+ dput(old_pwd);
+ mntput(old_pwdmnt);
+ }
+}
+
+
+/* Quickly sends an ignored signal to the signal handling system. This
+ * causes the system to restart the system call when it receives the
+ * -ERESTARTSYS error.
+ */
+static void subfs_send_signal(void)
+{
+ struct task_struct *task = current;
+ int signal = SIGCONT;
+
+ read_lock(&tasklist_lock);
+ spin_lock_irq(&task->sighand->siglock);
+ sigaddset(&task->pending.signal, signal);
+ spin_unlock_irq(&task->sighand->siglock);
+ read_unlock(&tasklist_lock);
+ set_tsk_thread_flag(task, TIF_SIGPENDING);
+ return;
+}
+
+
+/* If the option "procuid" is chosen when subfs is mounted, the uid
+ * and gid numbers for the current process will be added to the mount
+ * option line. Hence, non-unix filesystems will be mounted with
+ * that ownership.
+ */
+static void add_procuid(struct subfs_mount *sfs_mnt)
+{
+ struct task_struct *task = current;
+
+ char *o = kmalloc(strlen(sfs_mnt->options) + 1 + 32 + 1, GFP_KERNEL);
+
+ if (sfs_mnt->options[0] == '\0')
+ sprintf(o, "uid=%d,gid=%d", task->uid, task->gid);
+ else
+ sprintf(o, "%s,uid=%d,gid=%d", sfs_mnt->options, task->uid, task->gid);
+
+ kfree(sfs_mnt->options);
+ sfs_mnt->options = o;
+}
+
+
+/* This routine calls the /sbin/submountd program to mount the
+ * appropriate filesystem on top of the subfs mount. Returns
+ * 0 if the userspace program exited normally, or an error if
+ * it did not.
+ */
+static int mount_real_fs(struct subfs_mount *sfs_mnt, struct vfsmount *mnt, unsigned long flags)
+{
+ char *argv[7] =
+ { sfs_mnt->helper_prog, NULL, NULL, NULL, NULL, NULL, NULL };
+ char *envp[2] = { "HOME=/", NULL };
+ char *path_buf;
+ int result, len = 0;
+
+ argv[1] = sfs_mnt->device;
+ path_buf = (char *) __get_free_page(GFP_KERNEL);
+ if (!path_buf)
+ return -ENOMEM;
+ argv[2] = d_path(mnt->mnt_mountpoint, mnt->mnt_parent,
+ path_buf, PAGE_SIZE);
+ argv[3] = sfs_mnt->req_fs;
+ if (!(argv[4] = kmalloc(17, GFP_KERNEL))) {
+ free_page((unsigned long) path_buf);
+ return -ENOMEM; /* 64 bits on some platforms */
+ }
+ sprintf(argv[4], "%lx", flags);
+ len = strlen(sfs_mnt->options);
+ if (sfs_mnt->procuid)
+ add_procuid(sfs_mnt);
+ argv[5] = sfs_mnt->options;
+ result = call_usermodehelper(sfs_mnt->helper_prog, argv, envp, 1);
+ free_page((unsigned long) path_buf);
+ kfree(argv[4]);
+ if (sfs_mnt->procuid)
+ sfs_mnt->options[len] = '\0';
+ return result;
+}
+
+
+/* This routine returns a pointer to the filesystem mounted on top
+ * of the subfs mountpoint, or an error pointer if it was unable to.
+ */
+static struct vfsmount *get_child_mount (struct subfs_mount *sfs_mnt,
+ struct vfsmount *mnt)
+{
+ struct vfsmount *child;
+ int result;
+ unsigned long flags = 0;
+
+ /* We're sitting in a detached namespace -
+ * don't mount the filesystem. */
+ if (mnt->mnt_mountpoint == mnt->mnt_root) {
+ printk (KERN_ERR "subfs: refusing to mount media in "
+ "deleted directory\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ /* Lookup the child mount - if it's not mounted, mount it */
+ child = lookup_mnt (mnt, sfs_mnt->sb->s_root);
+ if (!child) {
+ flags = sfs_mnt->sb->s_flags;
+ if (mnt->mnt_flags & MNT_NOSUID) flags |= MS_NOSUID;
+ if (mnt->mnt_flags & MNT_NODEV) flags |= MS_NODEV;
+ if (mnt->mnt_flags & MNT_NOEXEC) flags |= MS_NOEXEC;
+
+ result = mount_real_fs (sfs_mnt, mnt, flags);
+ if (result) {
+ printk (KERN_ERR "subfs: unsuccessful attempt to "
+ "mount media (%d)\n", result);
+ /* Workaround for call_usermodehelper return value bug. */
+ if (result < 0)
+ return ERR_PTR(result);
+ return ERR_PTR(-ENOMEDIUM);
+ }
+
+ child = lookup_mnt (mnt, sfs_mnt->sb->s_root);
+
+ /* The mount did succeed (error caught directly above), but
+ * it was umounted already. Tell the process to retry.
+ */
+ if (!child) {
+ subfs_send_signal();
+ return ERR_PTR(-ERESTARTSYS);
+ }
+ }
+
+ return child;
+}
+
+
+/* Implements the lookup method for subfs. Tries to get the child
+ * mount. If it succeeds, it emits a signal and returns
+ * -ERESTARSYS. If it receives an error, it passes it on to the
+ * system. It raises the semaphore in the directory inode before mounting
+ * because the mount routine also calls lookup, and hence a function is
+ * calling itself from within semaphore protected code. Only the semaphore
+ * on the subfs pseudo-directory is effected, so this isn't deadly.
+ */
+static struct dentry *subfs_lookup(struct inode *dir,
+ struct dentry *dentry, struct nameidata *nd)
+{
+ struct subfs_mount *sfs_mnt = dir->i_sb->s_fs_info;
+ struct vfsmount *child;
+
+ /* This is ugly, but prevents a lockup during mount. */
- up(&dir->i_sem);
++ mutex_unlock(&dir->i_mutex);
+ if (down_interruptible(&sfs_mnt->sem)) {
- down(&dir->i_sem);/*put the dir sem back down if interrupted*/
++ mutex_lock(&dir->i_mutex);/*put the dir sem back down if interrupted*/
+ return ERR_PTR(-ERESTARTSYS);
+ }
+ child = get_child_mount(sfs_mnt, nd->mnt);
+ up(&sfs_mnt->sem);
- down(&dir->i_sem);
++ mutex_lock(&dir->i_mutex);
+ if (IS_ERR(child))
+ return (void *) child;
+ subfs_send_signal();
+ if (nd->mnt == current->fs->pwdmnt)
+ subfs_set_fs_pwd(current->fs, child, child->mnt_root);
+ mntput (child);
+ return ERR_PTR(-ERESTARTSYS);
+}
+
+
+/* Implements the open method for subfs. Tries to get the child mount
+ * for the subfs mountpoint which is being opened. Returns -ERESTARTSYS
+ * and emits an ignored signal to the calling process if it succeeds,
+ * or passes the error message received if it fails.
+ */
+static int subfs_open(struct inode *inode, struct file *filp)
+{
+ struct subfs_mount *sfs_mnt = filp->f_dentry->d_sb->s_fs_info;
+ struct vfsmount *child;
+
+ if (down_interruptible(&sfs_mnt->sem))
+ return -ERESTARTSYS;
+ child = get_child_mount(sfs_mnt, filp->f_vfsmnt);
+ up(&sfs_mnt->sem);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ subfs_send_signal();
+ if (filp->f_vfsmnt == current->fs->pwdmnt)
+ subfs_set_fs_pwd(current->fs, child, child->mnt_root);
+ mntput (child);
+ return -ERESTARTSYS;
+}
+
+
+/* Implements the statfs method so df and such will work on the mountpoint.
+ */
+static int subfs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+#if 1
+ /* disable stafs, so "df" and other tools do not trigger to mount
+ * the media, which might cause error messages or hang, if the block
+ * device driver hangs.
+ */
+ return 0;
+#else
+ struct subfs_mount *sfs_mnt = sb->s_fs_info;
+ struct vfsmount *child;
+ if (down_interruptible(&sfs_mnt->sem))
+ return -ERESTARTSYS;
+ child = get_child_mount(sfs_mnt);
+ up(&sfs_mnt->sem);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ subfs_send_signal();
+ mntput (child);
+ return -ERESTARTSYS;
+#endif
+}
+
+static struct super_operations subfs_s_ops = {
+ .statfs = subfs_statfs,
+ .drop_inode = generic_delete_inode,
+};
+
+
+static struct inode_operations subfs_dir_inode_operations = {
+ .lookup = subfs_lookup,
+};
+
+
+static struct file_operations subfs_file_ops = {
+ .open = subfs_open,
+};
+
+
+/* Creates the inodes for subfs superblocks.
+ */
+static struct inode *subfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret = new_inode(sb);
+
+ if (ret) {
+ ret->i_mode = mode;
+ ret->i_uid = ret->i_gid = 0;
+ ret->i_blksize = PAGE_CACHE_SIZE;
+ ret->i_blocks = 0;
+ ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+ ret->i_fop = &subfs_file_ops;
+ }
+ return ret;
+}
+
+/* Fills the fields for the superblock created when subfs is mounted.
+ */
+static int subfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *root;
+ struct dentry *root_dentry;
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = SUBFS_MAGIC;
+ sb->s_op = &subfs_s_ops;
+ root = subfs_make_inode(sb, S_IFDIR|ROOT_MODE);
+ if (!root)
+ goto out;
+ root->i_op = &subfs_dir_inode_operations;
+ root_dentry = d_alloc_root(root);
+ if (!root_dentry)
+ goto out_iput;
+ sb->s_root = root_dentry;
+ return 0;
+ out_iput:
+ iput(root);
+ out:
+ return -ENOMEM;
+}
+
+enum {
+ Opt_program, Opt_fs, Opt_procuid, Opt_other
+};
+
+static match_table_t tokens = {
+ {Opt_program, "program=%s"},
+ {Opt_fs, "fs=%s"},
+ {Opt_procuid, "procuid"},
+ {Opt_other, NULL}
+};
+
+/* Parse the options string and remove submount specific options
+ * and store the appropriate data.
+ */
+static int proc_opts(struct subfs_mount *sfs_mnt, void *data)
+{
+ char *opts = data, *opt, *fs, *prog, *nopts = NULL;
+ substring_t args[MAX_OPT_ARGS];
+ int len, err = -ENOMEM;
+
+ if (!opts) {
+ if (!(nopts = opts = kmalloc(8, GFP_KERNEL)))
+ goto out;
+ strcpy(opts, "fs=auto");
+ }
+ len = strnlen(opts, PAGE_SIZE - 1) + 1;
+ if (!(sfs_mnt->options = kmalloc(len, GFP_KERNEL)))
+ goto out;
+ sfs_mnt->options[0] = '\0';
+ while ((opt = strsep(&opts, ","))) {
+ int token;
+ if (!*opt)
+ continue;
+
+ token = match_token(opt, tokens, args);
+ switch (token) {
+ case Opt_program:
+ if (!(prog = match_strdup(&args[0])))
+ goto out;
+ kfree(sfs_mnt->helper_prog);
+ sfs_mnt->helper_prog = prog;
+ break;
+ case Opt_fs:
+ if (!(fs = match_strdup(&args[0])))
+ goto out;
+ kfree(sfs_mnt->req_fs);
+ sfs_mnt->req_fs = fs;
+ break;
+ case Opt_procuid:
+ sfs_mnt->procuid = 1;
+ break;
+ default:
+ if (sfs_mnt->options[0])
+ strlcat(sfs_mnt->options, ",", len);
+ strlcat(sfs_mnt->options, opt, len);
+ break;
+ }
+ }
+ if (!sfs_mnt->req_fs) {
+ if (!(sfs_mnt->req_fs = kmalloc(5, GFP_KERNEL)))
+ goto out;
+ strcpy(sfs_mnt->req_fs, "auto");
+ }
+ err = 0;
+ out:
+ kfree(nopts);
+ return err;
+}
+
+
+/* subfs_get_super is the subfs implementation of the get_sb method on
+ * the file_system_type structure. It should only be called in the
+ * case of a mount. It creates a new subfs_mount structure, fills
+ * the fields of the structure, except for the mount structure, and then
+ * calls a generic get_sb function. The superblock pointer is stored on
+ * the subfs_mount structure, and returned to the calling function. The
+ * subfs_mount structure is pointed to by the s_fs_info field of the
+ * superblock structure.
+ */
+static struct super_block *subfs_get_super(struct file_system_type *fst,
+ int flags, const char *devname, void *data)
+{
+ char *device;
+ struct subfs_mount *newmount;
+ int ret;
+
+ if (!(newmount = kmalloc(sizeof(struct subfs_mount), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+ newmount->req_fs = NULL;
+ newmount->sb = NULL;
+ newmount->procuid = 0;
+ sema_init(&newmount->sem, 1);
+ if (!(device = kmalloc((strlen(devname) + 1), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+ strcpy(device, devname);
+ newmount->device = device;
+ if (!(newmount->helper_prog =
+ kmalloc(sizeof(SUBMOUNTD_PATH), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+ strcpy(newmount->helper_prog, SUBMOUNTD_PATH);
+ if ((ret = proc_opts(newmount, data)))
+ return ERR_PTR(ret);
+ newmount->sb = get_sb_nodev(fst, flags, data, subfs_fill_super);
+ newmount->sb->s_fs_info = newmount;
+ return newmount->sb;
+}
+
+
+/* subfs_kill_super is the subfs implementation of the kill_sb method.
+ * It should be called only on umount. It cleans up the appropriate
+ * subfs_mount structure and then calls a generic function to actually
+ * clean up the superblock structure.
+ */
+static void subfs_kill_super(struct super_block *sb)
+{
+ struct subfs_mount *sfs_mnt = sb->s_fs_info;
+
+ if (sfs_mnt) {
+ kfree(sfs_mnt->device);
+ kfree(sfs_mnt->options);
+ kfree(sfs_mnt->req_fs);
+ kfree(sfs_mnt->helper_prog);
+ kfree(sfs_mnt);
+ sb->s_fs_info = NULL;
+ }
+ kill_litter_super(sb);
+ return;
+}
+
+static struct file_system_type subfs_type = {
+ .owner = THIS_MODULE,
+ .name = "subfs",
+ .get_sb = subfs_get_super,
+ .kill_sb = subfs_kill_super,
+};
+
+static int __init subfs_init(void)
+{
+ printk(KERN_INFO "subfs %s\n", SUBFS_VER);
+ return register_filesystem(&subfs_type);
+}
+
+static void __exit subfs_exit(void)
+{
+ printk(KERN_INFO "subfs exiting.\n");
+ unregister_filesystem(&subfs_type);
+}
+
+MODULE_DESCRIPTION("subfs virtual filesystem " SUBFS_VER );
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eugene S. Weiss");
+
+module_init(subfs_init);
+module_exit(subfs_exit);
#define PTRACE_GET_THREAD_AREA 25
#define PTRACE_SET_THREAD_AREA 26
+ #define PTRACE_SYSEMU 31
+ #define PTRACE_SYSEMU_SINGLESTEP 32
+
+enum EFLAGS {
+ EF_CF = 0x00000001,
+ EF_PF = 0x00000004,
+ EF_AF = 0x00000010,
+ EF_ZF = 0x00000040,
+ EF_SF = 0x00000080,
+ EF_TF = 0x00000100,
+ EF_IE = 0x00000200,
+ EF_DF = 0x00000400,
+ EF_OF = 0x00000800,
+ EF_IOPL = 0x00003000,
+ EF_IOPL_RING0 = 0x00000000,
+ EF_IOPL_RING1 = 0x00001000,
+ EF_IOPL_RING2 = 0x00002000,
+ EF_NT = 0x00004000, /* nested task */
+ EF_RF = 0x00010000, /* resume */
+ EF_VM = 0x00020000, /* virtual mode */
+ EF_AC = 0x00040000, /* alignment */
+ EF_VIF = 0x00080000, /* virtual interrupt */
+ EF_VIP = 0x00100000, /* virtual interrupt pending */
+ EF_ID = 0x00200000, /* id */
+};
+
#ifdef __KERNEL__
#include <asm/vm86.h>
/* Default baud base if not found in device-tree */
#define BASE_BAUD ( 1843200 / 16 )
+ #ifdef CONFIG_PPC_UDBG_16550
extern void find_legacy_serial_ports(void);
+ #else
+ #define find_legacy_serial_ports() do { } while (0)
+ #endif
+#if defined(SUPPORT_SYSRQ) && defined(CONFIG_PPC_PSERIES)
+#undef arch_8250_sysrq_via_ctrl_o
+extern int power4_sysrq_via_ctrl_o;
+#define arch_8250_sysrq_via_ctrl_o(ch, port) ((ch) == '\x0f' && power4_sysrq_via_ctrl_o && uart_handle_break((port)))
+#endif
+
#endif /* _PPC64_SERIAL_H */
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/buffer_head.h>
+#include <linux/kdb.h>
+#include <linux/kdbprivate.h>
+#include <linux/blkdev.h>
+#include <linux/ctype.h>
+
+MODULE_AUTHOR("SGI");
+MODULE_DESCRIPTION("Debug page information");
+MODULE_LICENSE("GPL");
+
+/* Standard Linux page stuff */
+
+#ifndef CONFIG_DISCONTIGMEM
+/* From include/linux/page_flags.h */
+static char *pg_flag_vals[] = {
+ "PG_locked", "PG_error", "PG_referenced", "PG_uptodate",
+ "PG_dirty", "PG_lru", "PG_active", "PG_slab",
+ "PG_checked", "PG_arch_1", "PG_reserved", "PG_private",
+ "PG_writeback", "PG_nosave", "PG_compound", "PG_swapcache",
+ "PG_mappedtodisk", "PG_reclaim", "PG_nosave_free", "PG_uncached",
+ NULL };
+#endif
+
+/* From include/linux/buffer_head.h */
+static char *bh_state_vals[] = {
+ "Uptodate", "Dirty", "Lock", "Req",
+ "Uptodate_Lock", "Mapped", "New", "Async_read",
+ "Async_write", "Delay", "Boundary", "Write_EIO",
+ "Ordered", "Eopnotsupp", "Private",
+ NULL };
+
+/* From include/linux/bio.h */
+static char *bio_flag_vals[] = {
+ "Uptodate", "RW_block", "EOF", "Seg_valid",
+ "Cloned", "Bounced", "User_mapped", "Eopnotsupp",
+ NULL };
+
+/* From include/linux/fs.h */
+static char *inode_flag_vals[] = {
+ "I_DIRTY_SYNC", "I_DIRTY_DATASYNC", "I_DIRTY_PAGES", "I_LOCK",
+ "I_FREEING", "I_CLEAR", "I_NEW", "I_WILL_FREE",
+ NULL };
+
+static char *map_flags(unsigned long flags, char *mapping[])
+{
+ static char buffer[256];
+ int index;
+ int offset = 12;
+
+ buffer[0] = '\0';
+
+ for (index = 0; flags && mapping[index]; flags >>= 1, index++) {
+ if (flags & 1) {
+ if ((offset + strlen(mapping[index]) + 1) >= 80) {
+ strcat(buffer, "\n ");
+ offset = 12;
+ } else if (offset > 12) {
+ strcat(buffer, " ");
+ offset++;
+ }
+ strcat(buffer, mapping[index]);
+ offset += strlen(mapping[index]);
+ }
+ }
+
+ return (buffer);
+}
+
+static int
+kdbm_buffers(int argc, const char **argv, const char **envp,
+ struct pt_regs *regs)
+{
+ struct buffer_head bh;
+ unsigned long addr;
+ long offset = 0;
+ int nextarg;
+ int diag;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs)) ||
+ (diag = kdb_getarea(bh, addr)))
+ return(diag);
+
+ kdb_printf("buffer_head at 0x%lx\n", addr);
+ kdb_printf(" bno %llu size %d dev 0x%x\n",
+ (unsigned long long)bh.b_blocknr,
+ bh.b_size,
+ bh.b_bdev ? bh.b_bdev->bd_dev : 0);
+ kdb_printf(" count %d state 0x%lx [%s]\n",
+ bh.b_count.counter, bh.b_state,
+ map_flags(bh.b_state, bh_state_vals));
+ kdb_printf(" b_data 0x%p\n",
+ bh.b_data);
+ kdb_printf(" b_page 0x%p b_this_page 0x%p b_private 0x%p\n",
+ bh.b_page, bh.b_this_page, bh.b_private);
+ kdb_printf(" b_end_io ");
+ if (bh.b_end_io)
+ kdb_symbol_print(kdba_funcptr_value(bh.b_end_io), NULL, KDB_SP_VALUE);
+ else
+ kdb_printf("(NULL)");
+ kdb_printf("\n");
+
+ return 0;
+}
+
+static int
+print_biovec(struct bio_vec *vec, int vcount)
+{
+ struct bio_vec bvec;
+ unsigned long addr;
+ int diag;
+ int i;
+
+ if (vcount < 1 || vcount > BIO_MAX_PAGES) {
+ kdb_printf(" [skipped iovecs, vcnt is %d]\n", vcount);
+ return 0;
+ }
+
+ addr = (unsigned long)vec;
+ for (i = 0; i < vcount; i++) {
+ if ((diag = kdb_getarea(bvec, addr)))
+ return(diag);
+ addr += sizeof(bvec);
+ kdb_printf(" [%d] page 0x%p length=%u offset=%u\n",
+ i, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
+ }
+ return 0;
+}
+
+static int
+kdbm_bio(int argc, const char **argv, const char **envp,
+ struct pt_regs *regs)
+{
+ struct bio bio;
+ unsigned long addr;
+ long offset = 0;
+ int nextarg;
+ int diag;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs)) ||
+ (diag = kdb_getarea(bio, addr)))
+ return(diag);
+
+ kdb_printf("bio at 0x%lx\n", addr);
+ kdb_printf(" bno %llu next 0x%p dev 0x%x\n",
+ (unsigned long long)bio.bi_sector,
+ bio.bi_next, bio.bi_bdev ? bio.bi_bdev->bd_dev : 0);
+ kdb_printf(" vcnt %u vec 0x%p rw 0x%lx flags 0x%lx [%s]\n",
+ bio.bi_vcnt, bio.bi_io_vec, bio.bi_rw, bio.bi_flags,
+ map_flags(bio.bi_flags, bio_flag_vals));
+ print_biovec(bio.bi_io_vec, bio.bi_vcnt);
+ kdb_printf(" count %d private 0x%p\n",
+ atomic_read(&bio.bi_cnt), bio.bi_private);
+ kdb_printf(" bi_end_io ");
+ if (bio.bi_end_io)
+ kdb_symbol_print(kdba_funcptr_value(bio.bi_end_io), NULL, KDB_SP_VALUE);
+ else
+ kdb_printf("(NULL)");
+ kdb_printf("\n");
+
+ return 0;
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static char *page_flags(unsigned long flags)
+{
+ return(map_flags(flags, pg_flag_vals));
+}
+
+static int
+kdbm_page(int argc, const char **argv, const char **envp,
+ struct pt_regs *regs)
+{
+ struct page page;
+ unsigned long addr;
+ long offset = 0;
+ int nextarg;
+ int diag;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs);
+ if (diag)
+ return diag;
+
+#ifdef __ia64__
+ if (rgn_index(addr) == 0)
+ addr = (unsigned long) &mem_map[addr]; /* assume region 0 is a page index, not an address */
+#else
+ if (addr < PAGE_OFFSET)
+ addr = (unsigned long) &mem_map[addr];
+#endif
+
+ if ((diag = kdb_getarea(page, addr)))
+ return(diag);
+
+ kdb_printf("struct page at 0x%lx\n", addr);
+ kdb_printf(" addr space 0x%p index %lu (offset 0x%llx)\n",
+ page.mapping, page.index,
+ (unsigned long long)page.index << PAGE_CACHE_SHIFT);
+ kdb_printf(" count %d flags %s\n",
+ page._count.counter, page_flags(page.flags));
+ kdb_printf(" virtual 0x%p\n", page_address((struct page *)addr));
+ if (page_has_buffers(&page))
+ kdb_printf(" buffers 0x%p\n", page_buffers(&page));
+ else
+ kdb_printf(" private 0x%lx\n", page.u.private);
+
+ return 0;
+}
+#endif /* CONFIG_DISCONTIGMEM */
+
+static unsigned long
+print_request(unsigned long addr)
+{
+ struct request rq;
+
+ if (kdb_getarea(rq, addr))
+ return(0);
+
+ kdb_printf("struct request at 0x%lx\n", addr);
+ kdb_printf(" errors %d sector %llu nr_sectors %lu waiting 0x%p\n",
+ rq.errors,
+ (unsigned long long)rq.sector, rq.nr_sectors,
+ rq.waiting);
+
+ kdb_printf(" hsect %llu hnrsect %lu nrseg %u nrhwseg %u currnrsect %u\n",
+ (unsigned long long)rq.hard_sector, rq.hard_nr_sectors,
+ rq.nr_phys_segments, rq.nr_hw_segments,
+ rq.current_nr_sectors);
+
+ return (unsigned long) rq.queuelist.next;
+}
+
+static int
+kdbm_request(int argc, const char **argv, const char **envp,
+ struct pt_regs *regs)
+{
+ long offset = 0;
+ unsigned long addr;
+ int nextarg;
+ int diag;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs);
+ if (diag)
+ return diag;
+
+ print_request(addr);
+ return 0;
+}
+
+
+static int
+kdbm_rqueue(int argc, const char **argv, const char **envp,
+ struct pt_regs *regs)
+{
+ struct request_queue rq;
+ unsigned long addr, head_addr, next;
+ long offset = 0;
+ int nextarg;
+ int i, diag;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs)) ||
+ (diag = kdb_getarea(rq, addr)))
+ return(diag);
+
+ kdb_printf("struct request_queue at 0x%lx\n", addr);
+ i = 0;
+ next = (unsigned long)rq.queue_head.next;
+ head_addr = addr + offsetof(struct request_queue, queue_head);
+ kdb_printf(" request queue: %s\n", next == head_addr ?
+ "empty" : "");
+ while (next != head_addr) {
+ i++;
+ next = print_request(next);
+ }
+
+ if (i)
+ kdb_printf("%d requests found\n", i);
+
+ return 0;
+}
+
+
+static void
+do_buffer(unsigned long addr)
+{
+ struct buffer_head bh;
+
+ if (kdb_getarea(bh, addr))
+ return;
+
+ kdb_printf("\tbh 0x%lx bno %8llu [%s]\n", addr,
+ (unsigned long long)bh.b_blocknr,
+ map_flags(bh.b_state, bh_state_vals));
+}
+
+static void
+kdbm_show_page(struct page *page, int first)
+{
+ if (first)
+ kdb_printf("page_struct index cnt zone nid flags\n");
+ kdb_printf("%p%s %6lu %5d %3ld %3ld 0x%lx",
+ page_address(page), sizeof(void *) == 4 ? " " : "",
+ page->index, atomic_read(&(page->_count)),
+ page_zonenum(page), page_to_nid(page),
+ page->flags & (~0UL >> ZONES_SHIFT));
+#define kdb_page_flags(page, type) if (Page ## type(page)) kdb_printf(" " #type);
+ kdb_page_flags(page, Locked);
+ kdb_page_flags(page, Error);
+ kdb_page_flags(page, Referenced);
+ kdb_page_flags(page, Uptodate);
+ kdb_page_flags(page, Dirty);
+ kdb_page_flags(page, LRU);
+ kdb_page_flags(page, Active);
+ kdb_page_flags(page, Slab);
+ kdb_page_flags(page, Checked);
+ if (page->flags & (1UL << PG_arch_1))
+ kdb_printf(" arch_1");
+ kdb_page_flags(page, Reserved);
+ kdb_page_flags(page, Private);
+ kdb_page_flags(page, Writeback);
+ kdb_page_flags(page, Nosave);
+ kdb_page_flags(page, Compound);
+ kdb_page_flags(page, SwapCache);
+ kdb_page_flags(page, MappedToDisk);
+ kdb_page_flags(page, Reclaim);
+ kdb_page_flags(page, NosaveFree);
+ kdb_page_flags(page, Uncached);
+
+ /* PageHighMem is not a flag any more, but treat it as one */
+ kdb_page_flags(page, HighMem);
+
+ if (page_has_buffers(page)) {
+ struct buffer_head *head, *bh;
+ kdb_printf("\n");
+ head = bh = page_buffers(page);
+ do {
+ do_buffer((unsigned long) bh);
+ } while ((bh = bh->b_this_page) != head);
- } else if (page->u.private) {
- kdb_printf(" private= 0x%lx", page->u.private);
++ } else if (page->private) {
++ kdb_printf(" private= 0x%lx", page->private);
+ }
+ kdb_printf("\n");
+#undef kdb_page_flags
+}
+
+static int
+kdbm_inode_pages(int argc, const char **argv, const char **envp,
+ struct pt_regs *regs)
+{
+ struct inode *inode = NULL;
+ struct address_space *ap = NULL;
+ unsigned long addr, addr1 = 0;
+ long offset = 0;
+ int nextarg;
+ int diag;
+ pgoff_t next = 0;
+ struct page *page;
+ int first;
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs);
+ if (diag)
+ goto out;
+
+ if (argc == 2) {
+ nextarg = 2;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr1,
+ &offset, NULL, regs);
+ if (diag)
+ goto out;
+ kdb_printf("Looking for page index 0x%lx ... \n", addr1);
+ next = addr1;
+ }
+
+ if (!(inode = kmalloc(sizeof(*inode), GFP_ATOMIC))) {
+ kdb_printf("kdbm_inode_pages: cannot kmalloc inode\n");
+ goto out;
+ }
+ if (!(ap = kmalloc(sizeof(*ap), GFP_ATOMIC))) {
+ kdb_printf("kdbm_inode_pages: cannot kmalloc ap\n");
+ goto out;
+ }
+ if ((diag = kdb_getarea(*inode, addr)))
+ goto out;
+ if (!inode->i_mapping) {
+ kdb_printf("inode has no mapping\n");
+ goto out;
+ }
+ if ((diag = kdb_getarea(*ap, (unsigned long) inode->i_mapping)))
+ goto out;
+
+ /* Run the pages in the radix tree, printing the state of each page */
+ first = 1;
+ while (radix_tree_gang_lookup(&ap->page_tree, (void **)&page, next, 1)) {
+ kdbm_show_page(page, first);
+ if (addr1)
+ break;
+ first = 0;
+ next = page->index + 1;
+ }
+
+out:
+ if (inode)
+ kfree(inode);
+ if (ap)
+ kfree(ap);
+ return diag;
+}
+
+static int
+kdbm_inode(int argc, const char **argv, const char **envp,
+ struct pt_regs *regs)
+{
+ struct inode *inode = NULL;
+ unsigned long addr;
+ unsigned char *iaddr;
+ long offset = 0;
+ int nextarg;
+ int diag;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs)))
+ goto out;
+ if (!(inode = kmalloc(sizeof(*inode), GFP_ATOMIC))) {
+ kdb_printf("kdbm_inode: cannot kmalloc inode\n");
+ goto out;
+ }
+ if ((diag = kdb_getarea(*inode, addr)))
+ goto out;
+
+ kdb_printf("struct inode at 0x%lx\n", addr);
+
+ kdb_printf(" i_ino = %lu i_count = %u i_size %Ld\n",
+ inode->i_ino, atomic_read(&inode->i_count),
+ inode->i_size);
+
+ kdb_printf(" i_mode = 0%o i_nlink = %d i_rdev = 0x%x\n",
+ inode->i_mode, inode->i_nlink,
+ inode->i_rdev);
+
+ kdb_printf(" i_hash.nxt = 0x%p i_hash.pprev = 0x%p\n",
+ inode->i_hash.next,
+ inode->i_hash.pprev);
+
+ kdb_printf(" i_list.nxt = 0x%p i_list.prv = 0x%p\n",
+ list_entry(inode->i_list.next, struct inode, i_list),
+ list_entry(inode->i_list.prev, struct inode, i_list));
+
+ kdb_printf(" i_dentry.nxt = 0x%p i_dentry.prv = 0x%p\n",
+ list_entry(inode->i_dentry.next, struct dentry, d_alias),
+ list_entry(inode->i_dentry.prev, struct dentry, d_alias));
+
+ kdb_printf(" i_sb = 0x%p i_op = 0x%p i_data = 0x%lx nrpages = %lu\n",
+ inode->i_sb, inode->i_op,
+ addr + offsetof(struct inode, i_data),
+ inode->i_data.nrpages);
+ kdb_printf(" i_fop= 0x%p i_flock = 0x%p i_mapping = 0x%p\n",
+ inode->i_fop, inode->i_flock, inode->i_mapping);
+
+ kdb_printf(" i_flags 0x%x i_state 0x%lx [%s]",
+ inode->i_flags, inode->i_state,
+ map_flags(inode->i_state, inode_flag_vals));
+
+ iaddr = (char *)addr;
+ iaddr += offsetof(struct inode, u);
+
+ kdb_printf(" fs specific info @ 0x%p\n", iaddr);
+out:
+ if (inode)
+ kfree(inode);
+ return diag;
+}
+
+static int
+kdbm_sb(int argc, const char **argv, const char **envp,
+ struct pt_regs *regs)
+{
+ struct super_block *sb = NULL;
+ unsigned long addr;
+ long offset = 0;
+ int nextarg;
+ int diag;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs)))
+ goto out;
+ if (!(sb = kmalloc(sizeof(*sb), GFP_ATOMIC))) {
+ kdb_printf("kdbm_sb: cannot kmalloc sb\n");
+ goto out;
+ }
+ if ((diag = kdb_getarea(*sb, addr)))
+ goto out;
+
+ kdb_printf("struct super_block at 0x%lx\n", addr);
+ kdb_printf(" s_dev 0x%x blocksize 0x%lx\n", sb->s_dev, sb->s_blocksize);
+ kdb_printf(" s_flags 0x%lx s_root 0x%p\n", sb->s_flags, sb->s_root);
+ kdb_printf(" s_dirt %d s_dirty.next 0x%p s_dirty.prev 0x%p\n",
+ sb->s_dirt, sb->s_dirty.next, sb->s_dirty.prev);
+ kdb_printf(" s_frozen %d s_id [%s]\n", sb->s_frozen, sb->s_id);
+out:
+ if (sb)
+ kfree(sb);
+ return diag;
+}
+
+
+
+#if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
+/* According to Steve Lord, this code is ix86 specific. Patches to extend it to
+ * other architectures will be greatefully accepted.
+ */
+static int
+kdbm_memmap(int argc, const char **argv, const char **envp,
+ struct pt_regs *regs)
+{
+ struct page page;
+ int i, page_count;
+ int slab_count = 0;
+ int dirty_count = 0;
+ int locked_count = 0;
+ int page_counts[9];
+ int buffered_count = 0;
+#ifdef buffer_delay
+ int delay_count = 0;
+#endif
+ int diag;
+ unsigned long addr;
+
+ addr = (unsigned long)mem_map;
+ page_count = max_mapnr;
+ memset(page_counts, 0, sizeof(page_counts));
+
+ for (i = 0; i < page_count; i++) {
+ if ((diag = kdb_getarea(page, addr)))
+ return(diag);
+ addr += sizeof(page);
+
+ if (PageSlab(&page))
+ slab_count++;
+ if (PageDirty(&page))
+ dirty_count++;
+ if (PageLocked(&page))
+ locked_count++;
+ if (page._count.counter < 8)
+ page_counts[page._count.counter]++;
+ else
+ page_counts[8]++;
+ if (page_has_buffers(&page)) {
+ buffered_count++;
+#ifdef buffer_delay
+ if (buffer_delay(page.buffers))
+ delay_count++;
+#endif
+ }
+
+ }
+
+ kdb_printf(" Total pages: %6d\n", page_count);
+ kdb_printf(" Slab pages: %6d\n", slab_count);
+ kdb_printf(" Dirty pages: %6d\n", dirty_count);
+ kdb_printf(" Locked pages: %6d\n", locked_count);
+ kdb_printf(" Buffer pages: %6d\n", buffered_count);
+#ifdef buffer_delay
+ kdb_printf(" Delalloc pages: %6d\n", delay_count);
+#endif
+ for (i = 0; i < 8; i++) {
+ kdb_printf(" %d page count: %6d\n",
+ i, page_counts[i]);
+ }
+ kdb_printf(" high page count: %6d\n", page_counts[8]);
+ return 0;
+}
+#endif /* CONFIG_X86 && !CONFIG_X86_64 */
+
+static int __init kdbm_pg_init(void)
+{
+#ifndef CONFIG_DISCONTIGMEM
+ kdb_register("page", kdbm_page, "<vaddr>", "Display page", 0);
+#endif
+ kdb_register("inode", kdbm_inode, "<vaddr>", "Display inode", 0);
+ kdb_register("sb", kdbm_sb, "<vaddr>", "Display super_block", 0);
+ kdb_register("bh", kdbm_buffers, "<buffer head address>", "Display buffer", 0);
+ kdb_register("bio", kdbm_bio, "<bio address>", "Display bio", 0);
+ kdb_register("inode_pages", kdbm_inode_pages, "<inode *>", "Display pages in an inode", 0);
+ kdb_register("req", kdbm_request, "<vaddr>", "dump request struct", 0);
+ kdb_register("rqueue", kdbm_rqueue, "<vaddr>", "dump request queue", 0);
+#if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
+ kdb_register("memmap", kdbm_memmap, "", "page table summary", 0);
+#endif
+
+ return 0;
+}
+
+
+static void __exit kdbm_pg_exit(void)
+{
+#ifndef CONFIG_DISCONTIGMEM
+ kdb_unregister("page");
+#endif
+ kdb_unregister("inode");
+ kdb_unregister("sb");
+ kdb_unregister("bh");
+ kdb_unregister("bio");
+ kdb_unregister("inode_pages");
+ kdb_unregister("req");
+ kdb_unregister("rqueue");
+#if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
+ kdb_unregister("memmap");
+#endif
+}
+
+module_init(kdbm_pg_init)
+module_exit(kdbm_pg_exit)
}
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
- del_timer_sync(&tsk->signal->real_timer);
+ hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
- acct_process(code);
}
+ acct_process(tsk, code, group_dead);
exit_mm(tsk);
exit_sem(tsk);
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SHMEM) += shmem.o
+obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o
+ obj-$(CONFIG_SLOB) += slob.o
+ obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
}
}
+ static void shmem_truncate(struct inode *inode)
+ {
+ shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
+ }
+
+extern struct generic_acl_operations shmem_acl_ops;
+
static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
static struct inode_operations shmem_inode_operations = {
.truncate = shmem_truncate,
.setattr = shmem_notify_change,
+ .truncate_range = shmem_truncate_range,
+#ifdef CONFIG_TMPFS_POSIX_ACL
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
+ .permission = shmem_permission,
+#endif
++
};
static struct inode_operations shmem_dir_inode_operations = {
To compile it as a module, choose M here. If unsure, say N.
+ config IP_NF_MATCH_POLICY
+ tristate "IPsec policy match support"
+ depends on IP_NF_IPTABLES && XFRM
+ help
+ Policy matching allows you to match packets based on the
+ IPsec policy that was used during decapsulation/will
+ be used during encapsulation.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_NF_MATCH_IPV4OPTIONS
+ tristate 'IPV4OPTIONS match support'
+ depends on IP_NF_IPTABLES
+ help
+ This option adds a IPV4OPTIONS match.
+ It allows you to filter options like source routing,
+ record route, timestamp and router-altert.
+
+ If you say Y here, try iptables -m ipv4options --help for more information.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say \ e'.
+
# `filter', generic and specific targets
config IP_NF_FILTER
tristate "Packet filtering"
obj-$(CONFIG_IP_NF_MATCH_REALM) += ipt_realm.o
obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
+ obj-$(CONFIG_IP_NF_MATCH_POLICY) += ipt_policy.o
obj-$(CONFIG_IP_NF_MATCH_COMMENT) += ipt_comment.o
obj-$(CONFIG_IP_NF_MATCH_STRING) += ipt_string.o
- obj-$(CONFIG_IP_NF_MATCH_POLICY) += ipt_policy.o
+obj-$(CONFIG_IP_NF_MATCH_IPV4OPTIONS) += ipt_ipv4options.o
# targets
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
#include <linux/ptrace.h>
#include <linux/moduleparam.h>
-static struct security_operations capability_ops = {
- .ptrace = cap_ptrace,
- .capget = cap_capget,
- .capset_check = cap_capset_check,
- .capset_set = cap_capset_set,
- .capable = cap_capable,
- .settime = cap_settime,
- .netlink_send = cap_netlink_send,
- .netlink_recv = cap_netlink_recv,
-
- .bprm_apply_creds = cap_bprm_apply_creds,
- .bprm_set_security = cap_bprm_set_security,
- .bprm_secureexec = cap_bprm_secureexec,
-
- .inode_setxattr = cap_inode_setxattr,
- .inode_removexattr = cap_inode_removexattr,
-
- .task_post_setuid = cap_task_post_setuid,
- .task_reparent_to_init = cap_task_reparent_to_init,
-
- .syslog = cap_syslog,
+/* Note: Capabilities are default now, even if CONFIG_SECURITY
+ * is enabled and no LSM is loaded. (Previously, the dummy
+ * functions would have been called in that case which resulted
+ * in a slightly unusable system.)
+ * The capability LSM may still be compiled and loaded; it won't
+ * make a difference though except for slowing down some operations
+ * a tiny bit and (more severly) for disallowing loading another LSM.
+ * To have it as LSM may still be useful: It could be stacked on top
+ * of another LSM (if the other LSM allows this or if the stacker
+ * is used).
+ * If the capability LSM is loaded, we do NOT register the
+ * capability_security_ops but a second structure capability_ops
+ * that has identical entries. We need to differentiate
+ * between capabilities used as default and used as LSM as in
+ * the latter case replacing it by just loading another LSM is
+ * not possible.
+ */
- .vm_enough_memory = cap_vm_enough_memory,
-};
+/* Struct from commoncaps */
+extern struct security_operations capability_security_ops;
+/* Struct to hold the copy */
+static struct security_operations capability_ops;
- #define MY_NAME __stringify(KBUILD_MODNAME)
-
/* flag to keep track of how we were registered */
static int secondary;