DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
writing_usb_driver.xml networking.xml \
- kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
+ kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml utrace.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
- mac80211.xml debugobjects.xml sh.xml regulator.xml \
+ 80211.xml debugobjects.xml sh.xml regulator.xml \
alsa-driver-api.xml writing-an-alsa-driver.xml \
tracepoint.xml media.xml drm.xml
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else
+ # Some targets (ARM with Thumb2, for example), can't be built with frame
+ # pointers. For those, we don't have FUNCTION_TRACER automatically
+ # select FRAME_POINTER. However, FUNCTION_TRACER adds -pg, and this is
+ # incompatible with -fomit-frame-pointer with current GCC, so we don't use
+ # -fomit-frame-pointer with FUNCTION_TRACER.
+ ifndef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS += -fomit-frame-pointer
endif
+ endif
+ifdef CONFIG_UNWIND_INFO
+KBUILD_CFLAGS += -fasynchronous-unwind-tables
+LDFLAGS_vmlinux += --eh-frame-hdr
+endif
+
ifdef CONFIG_DEBUG_INFO
KBUILD_CFLAGS += -g
KBUILD_AFLAGS += -gdwarf-2
config X86_VSMP
bool "ScaleMP vSMP"
+ select PARAVIRT_GUEST
- select PARAVIRT
+ select PARAVIRT_ALL
depends on X86_64 && PCI
depends on X86_EXTENDED_PLATFORM
---help---
source "arch/x86/xen/Kconfig"
- config VMI
- bool "VMI Guest support (DEPRECATED)"
- select PARAVIRT_ALL
- depends on X86_32
- ---help---
- VMI provides a paravirtualized interface to the VMware ESX server
- (it could be used by other hypervisors in theory too, but is not
- at the moment), by linking the kernel to a GPL-ed ROM module
- provided by the hypervisor.
-
- As of September 2009, VMware has started a phased retirement
- of this feature from VMware's products. Please see
- feature-removal-schedule.txt for details. If you are
- planning to enable this option, please note that you cannot
- live migrate a VMI enabled VM to a future VMware product,
- which doesn't support VMI. So if you expect your kernel to
- seamlessly migrate to newer VMware products, keep this
- disabled.
-
config KVM_CLOCK
bool "KVM paravirtualized clock"
- select PARAVIRT
select PARAVIRT_CLOCK
---help---
Turning on this option will allow you to run a paravirtualized clock
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
-#else
+#endif
+
#ifndef __ASSEMBLY__
+#ifndef CONFIG_PARAVIRT_IRQ
- static inline unsigned long __raw_local_save_flags(void)
+ static inline unsigned long arch_local_save_flags(void)
{
return native_save_fl();
}
/*
* For spinlocks, etc:
*/
- static inline unsigned long __raw_local_irq_save(void)
+ static inline unsigned long arch_local_irq_save(void)
{
- unsigned long flags = __raw_local_save_flags();
-
- raw_local_irq_disable();
-
+ unsigned long flags = arch_local_save_flags();
+ arch_local_irq_disable();
return flags;
}
-#else
+#endif /* CONFIG_PARAVIRT_IRQ */
+
+#else /* __ASSEMBLY__ */
+#ifndef CONFIG_PARAVIRT_IRQ
#define ENABLE_INTERRUPTS(x) sti
#define DISABLE_INTERRUPTS(x) cli
+#endif /* !CONFIG_PARAVIRT_IRQ */
#ifdef CONFIG_X86_64
+#ifndef CONFIG_PARAVIRT_CPU
#define SWAPGS swapgs
/*
* Currently paravirt can't handle swapgs nicely when we
#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_PARAVIRT */
#ifndef __ASSEMBLY__
- #define raw_local_save_flags(flags) \
- do { (flags) = __raw_local_save_flags(); } while (0)
-
- #define raw_local_irq_save(flags) \
- do { (flags) = __raw_local_irq_save(); } while (0)
-
- static inline int raw_irqs_disabled_flags(unsigned long flags)
+ static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & X86_EFLAGS_IF);
}
}
#endif
+#ifdef CONFIG_PARAVIRT_IRQ
- static inline void raw_safe_halt(void)
+ static inline void arch_safe_halt(void)
{
PVOP_VCALL0(pv_irq_ops.safe_halt);
}
#define __PV_IS_CALLEE_SAVE(func) \
((struct paravirt_callee_save) { func })
+#ifdef CONFIG_PARAVIRT_IRQ
- static inline unsigned long __raw_local_save_flags(void)
+ static inline unsigned long arch_local_save_flags(void)
{
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
}
{
unsigned long f;
- f = __raw_local_save_flags();
- raw_local_irq_disable();
+ f = arch_local_save_flags();
+ arch_local_irq_disable();
return f;
}
+#endif /* CONFIG_PARAVIRT_IRQ */
/* Make sure as little as possible of this mess escapes. */
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
+ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_MMU
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
#endif
#ifdef CONFIG_X86_64
- #if defined(CONFIG_PARAVIRT_MMU) || defined(CONFIG_XEN)
-#ifdef CONFIG_PARAVIRT
++#ifdef CONFIG_PARAVIRT_MMU
/* Paravirtualized systems may not have PSE or PGE available */
#define NEED_PSE 0
#define NEED_PGE 0
}
return 0;
}
- #endif
+static int __init force_acpi_rsdt(const struct dmi_system_id *d)
+{
+ if (!acpi_force) {
+ printk(KERN_NOTICE "%s detected: force use of acpi=rsdt\n",
+ d->ident);
+ acpi_rsdt_forced = 1;
+ } else {
+ printk(KERN_NOTICE
+ "Warning: acpi=force overrules DMI blacklist: "
+ "acpi=rsdt\n");
+ }
+ return 0;
+
+}
+
/*
* If your system is blacklisted here, but you find that acpi=force
* works for you, please contact linux-acpi@vger.kernel.org
obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
+obj-$(CONFIG_X86_MCE_XEON75XX) += mce-xeon75xx.o
obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o
- obj-$(CONFIG_X86_XEN_MCE) += mce_dom0.o
obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
CFI_ENDPROC
END(call_softirq)
+#ifdef CONFIG_STACK_UNWIND
+ENTRY(arch_unwind_init_running)
+ CFI_STARTPROC
+ movq %r15, R15(%rdi)
+ movq %r14, R14(%rdi)
+ xchgq %rsi, %rdx
+ movq %r13, R13(%rdi)
+ movq %r12, R12(%rdi)
+ xorl %eax, %eax
+ movq %rbp, RBP(%rdi)
+ movq %rbx, RBX(%rdi)
+ movq (%rsp), %r9
+ xchgq %rdx, %rcx
+ movq %rax, R11(%rdi)
+ movq %rax, R10(%rdi)
+ movq %rax, R9(%rdi)
+ movq %rax, R8(%rdi)
+ movq %rax, RAX(%rdi)
+ movq %rax, RCX(%rdi)
+ movq %rax, RDX(%rdi)
+ movq %rax, RSI(%rdi)
+ movq %rax, RDI(%rdi)
+ movq %rax, ORIG_RAX(%rdi)
+ movq %r9, RIP(%rdi)
+ leaq 8(%rsp), %r9
+ movq $__KERNEL_CS, CS(%rdi)
+ movq %rax, EFLAGS(%rdi)
+ movq %r9, RSP(%rdi)
+ movq $__KERNEL_DS, SS(%rdi)
+ jmpq *%rcx
+ CFI_ENDPROC
+END(arch_unwind_init_running)
+#endif
+
- #ifdef CONFIG_PARAVIRT_XEN
+ #ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
/*
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(empty_zero_page);
- #if !defined(CONFIG_PARAVIRT_CPU) && !defined(CONFIG_XEN)
-#ifndef CONFIG_PARAVIRT
++#ifndef CONFIG_PARAVIRT_CPU
EXPORT_SYMBOL(native_load_gs_index);
#endif
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
#endif
}
+EXPORT_SYMBOL_GPL(mark_rodata_ro);
+
+void mark_rodata_rw(void)
+{
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long size = PFN_ALIGN(_etext) - start;
+
+ start += size;
+ size = (unsigned long)__end_rodata - start;
+ set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT);
+ printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n",
+ size >> 10);
+}
+EXPORT_SYMBOL_GPL(mark_rodata_rw);
#endif
- int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
- int flags)
- {
- return reserve_bootmem(phys, len, flags);
- }
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
#endif
- free_init_pages("unused kernel memory",
- (unsigned long) page_address(virt_to_page(text_end)),
- (unsigned long)
+ if (!initmem_freed) {
+ initmem_freed = 1;
+ free_init_pages("unused kernel memory",
+ (unsigned long)
+ page_address(virt_to_page(text_end)),
+ (unsigned long)
page_address(virt_to_page(rodata_start)));
- free_init_pages("unused kernel memory",
- (unsigned long) page_address(virt_to_page(rodata_end)),
- (unsigned long) page_address(virt_to_page(data_start)));
+ free_init_pages("unused kernel memory",
+ (unsigned long)
+ page_address(virt_to_page(rodata_end)),
+ (unsigned long)
+ page_address(virt_to_page(data_start)));
+ }
}
+EXPORT_SYMBOL_GPL(mark_rodata_ro);
+
+void mark_rodata_rw(void)
+{
+ unsigned long rodata_start =
+ ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
+ unsigned long end = (unsigned long) &__end_rodata_hpage_align;
+ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+ (end - rodata_start) >> 10);
+ set_memory_rw_force(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(mark_rodata_rw);
#endif
- int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
- int flags)
- {
- #ifdef CONFIG_NUMA
- int nid, next_nid;
- int ret;
- #endif
- unsigned long pfn = phys >> PAGE_SHIFT;
-
- if (pfn >= max_pfn) {
- /*
- * This can happen with kdump kernels when accessing
- * firmware tables:
- */
- if (pfn < max_pfn_mapped)
- return -EFAULT;
-
- printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n",
- phys, len);
- return -EFAULT;
- }
-
- /* Should check here against the e820 map to avoid double free */
- #ifdef CONFIG_NUMA
- nid = phys_to_nid(phys);
- next_nid = phys_to_nid(phys + len - 1);
- if (nid == next_nid)
- ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags);
- else
- ret = reserve_bootmem(phys, len, flags);
-
- if (ret != 0)
- return ret;
-
- #else
- reserve_bootmem(phys, len, flags);
- #endif
-
- if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
- dma_reserve += len / PAGE_SIZE;
- set_dma_reserve(dma_reserve);
- }
-
- return 0;
- }
-
int kern_addr_valid(unsigned long addr)
{
unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
# This Kconfig describes xen options
#
- config PARAVIRT_XEN
+ config XEN
bool "Xen guest support"
- select PARAVIRT
+ select PARAVIRT_ALL
select PARAVIRT_CLOCK
depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
depends on X86_CMPXCHG && X86_TSC
tz->kelvin_offset = 2732;
}
+static struct dmi_system_id thermal_psv_dmi_table[] = {
+ {
+ .ident = "IBM ThinkPad T41",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T41"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad T42",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T42"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad T43",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T43"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad T41p",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T41p"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad T42p",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T42p"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad T43p",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T43p"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad R40",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad R40"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad R50p",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad R50p"),
+ },
+ },
+ {},
+};
+
++static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds)
++{
++ if (!tz)
++ return -EINVAL;
++
++ /* Convert value to deci-seconds */
++ tz->polling_frequency = seconds * 10;
++
++ tz->thermal_zone->polling_delay = seconds * 1000;
++
++ if (tz->tz_enabled)
++ thermal_zone_device_update(tz->thermal_zone);
++
++ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
++ "Polling frequency set to %lu seconds\n",
++ tz->polling_frequency/10));
++
++ return 0;
++}
++
static int acpi_thermal_add(struct acpi_device *device)
{
int result = 0;
if (result)
goto free_memory;
+ if (dmi_check_system(thermal_psv_dmi_table)) {
+ if (tz->trips.passive.flags.valid &&
+ tz->trips.passive.temperature > CELSIUS_TO_KELVIN(85)) {
+ printk (KERN_INFO "Adjust passive trip point from %lu"
+ " to %lu\n",
+ KELVIN_TO_CELSIUS(tz->trips.passive.temperature),
+ KELVIN_TO_CELSIUS(tz->trips.passive.temperature - 150));
+ tz->trips.passive.temperature -= 150;
+ acpi_thermal_set_polling(tz, 5);
+ }
+ }
+
- result = acpi_thermal_add_fs(device);
- if (result)
- goto unregister_thermal_zone;
-
printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
acpi_device_name(device), acpi_device_bid(device),
KELVIN_TO_CELSIUS(tz->temperature));
obj-$(CONFIG_BLK_DEV_UB) += ub.o
obj-$(CONFIG_BLK_DEV_HD) += hd.o
- obj-$(CONFIG_PARAVIRT_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
+ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
+ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
+obj-$(CONFIG_CIPHER_TWOFISH) += loop_fish2.o
+
swim_mod-objs := swim.o swim_asm.o
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/mutex.h>
- #include <linux/smp_lock.h>
#include <linux/gfp.h>
+ #include <linux/compat.h>
+#include <linux/vmalloc.h>
#include <asm/uaccess.h>
raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
{
struct block_device *bdev = filp->private_data;
- int ret;
+ return blkdev_ioctl(bdev, 0, command, arg);
+ }
+
+ static int bind_set(int number, u64 major, u64 minor)
+ {
+ dev_t dev = MKDEV(major, minor);
+ struct raw_device_data *rawdev;
+ int err = 0;
+
- if (number <= 0 || number >= MAX_RAW_MINORS)
++ if (number <= 0 || number >= max_raw_minors)
+ return -EINVAL;
+
+ if (MAJOR(dev) != major || MINOR(dev) != minor)
+ return -EINVAL;
- lock_kernel();
- ret = blkdev_ioctl(bdev, 0, command, arg);
- unlock_kernel();
+ rawdev = &raw_devices[number];
- return ret;
+ /*
+ * This is like making block devices, so demand the
+ * same capability
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * For now, we don't need to check that the underlying
+ * block device is present or not: we can do that when
+ * the raw device is opened. Just check that the
+ * major/minor numbers make sense.
+ */
+
+ if (MAJOR(dev) == 0 && dev != 0)
+ return -EINVAL;
+
+ mutex_lock(&raw_mutex);
+ if (rawdev->inuse) {
+ mutex_unlock(&raw_mutex);
+ return -EBUSY;
+ }
+ if (rawdev->binding) {
+ bdput(rawdev->binding);
+ module_put(THIS_MODULE);
+ }
+ if (!dev) {
+ /* unbind */
+ rawdev->binding = NULL;
+ device_destroy(raw_class, MKDEV(RAW_MAJOR, number));
+ } else {
+ rawdev->binding = bdget(dev);
+ if (rawdev->binding == NULL) {
+ err = -ENOMEM;
+ } else {
+ dev_t raw = MKDEV(RAW_MAJOR, number);
+ __module_get(THIS_MODULE);
+ device_destroy(raw_class, raw);
+ device_create(raw_class, NULL, raw, NULL,
+ "raw%d", number);
+ }
+ }
+ mutex_unlock(&raw_mutex);
+ return err;
}
- static void bind_device(struct raw_config_request *rq)
+ static int bind_get(int number, dev_t *dev)
{
- device_destroy(raw_class, MKDEV(RAW_MAJOR, rq->raw_minor));
- device_create(raw_class, NULL, MKDEV(RAW_MAJOR, rq->raw_minor), NULL,
- "raw%d", rq->raw_minor);
+ struct raw_device_data *rawdev;
+ struct block_device *bdev;
+
+ if (number <= 0 || number >= MAX_RAW_MINORS)
+ return -EINVAL;
+
+ rawdev = &raw_devices[number];
+
+ mutex_lock(&raw_mutex);
+ bdev = rawdev->binding;
+ *dev = bdev ? bdev->bd_dev : 0;
+ mutex_unlock(&raw_mutex);
+ return 0;
}
/*
}
}
+ void vcs_scr_updated(struct vc_data *vc)
+ {
+ notify_update(vc);
+ }
+
+#ifdef CONFIG_BOOTSPLASH
+void con_remap_def_color(struct vc_data *vc, int new_color)
+{
+ unsigned short *sbuf = screenpos(vc, 0, 1);
+ unsigned c, len = vc->vc_screenbuf_size >> 1;
+ int old_color;
+
+ if (sbuf) {
+ old_color = vc->vc_def_color << 8;
+ new_color <<= 8;
+ while(len--) {
+ c = scr_readw(sbuf);
+ if (((c ^ old_color) & 0xf000) == 0)
+ scr_writew(c ^ ((old_color ^ new_color) & 0xf000), sbuf);
+ *sbuf ^= (old_color ^ new_color) & 0xf000;
+ if (((c ^ old_color) & 0x0f00) == 0)
+ scr_writew(c ^ ((old_color ^ new_color) & 0x0f00), sbuf);
+ *sbuf ^= (old_color ^ new_color) & 0x0f00;
+ sbuf++;
+ }
+ new_color >>= 8;
+ }
+ vc->vc_def_color = vc->vc_color = new_color;
+ update_attr(vc);
+}
+#endif
+
/*
* Visible symbols for modules
*/
unsigned char pkt_type; /* packet type - old, new, etc */
unsigned char mode; /* current mode byte */
int scroll;
+
+ struct serio *pt_port; /* Pass-through serio port */
+ struct synaptics_led *led;
};
void synaptics_module_init(void);
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
+obj-$(CONFIG_TOUCHSCREEN_ELOUSB) += elousb.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
+ obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
+ obj-$(CONFIG_TOUCHSCREEN_LPC32XX) += lpc32xx_ts.o
obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
- if (bio_empty_barrier(bio))
+ if (bio->bi_rw & REQ_FLUSH)
continue;
- rh_inc(rh, dm_rh_bio_to_region(rh, bio));
+ dm_rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
}
EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
+ int retval = 0;
- lock_kernel();
+ mutex_lock(&dm_mutex);
spin_lock(&_minor_lock);
md = bdev->bd_disk->private_data;
out:
spin_unlock(&_minor_lock);
- unlock_kernel();
+ mutex_unlock(&dm_mutex);
- return md ? 0 : -ENXIO;
+ return retval;
}
static int dm_blk_close(struct gendisk *disk, fmode_t mode)
unsigned int i;
int ret;
+ ret = mutex_lock_interruptible(&chain->ctrl_mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+
+ if ((chain->dev->quirks & UVC_QUIRK_HUE_EPIPE) &&
+ (v4l2_ctrl->id == V4L2_CID_HUE))
+ return -EINVAL;
+
ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
- if (ctrl == NULL)
- return -EINVAL;
+ if (ctrl == NULL) {
+ ret = -EINVAL;
+ goto done;
+ }
memset(v4l2_ctrl, 0, sizeof *v4l2_ctrl);
v4l2_ctrl->id = mapping->id;
struct net_device *netdev = adapter->netdev;
irqreturn_t (*handler)(int, void *);
int i, vector, q_vectors, err;
- int ri=0, ti=0;
+ int ri = 0, ti = 0;
+ int irq_flags;
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (vector = 0; vector < q_vectors; vector++) {
handler = SET_HANDLER(adapter->q_vector[vector]);
+ irq_flags = 0;
- if(handler == &ixgbe_msix_clean_rx) {
+ if (handler == &ixgbe_msix_clean_rx) {
sprintf(adapter->name[vector], "%s-%s-%d",
netdev->name, "rx", ri++);
+ if (entropy)
+ irq_flags = IRQF_SAMPLE_RANDOM;
- }
- else if(handler == &ixgbe_msix_clean_tx) {
+ } else if (handler == &ixgbe_msix_clean_tx) {
sprintf(adapter->name[vector], "%s-%s-%d",
netdev->name, "tx", ti++);
- }
- else {
- } else
++ } else {
sprintf(adapter->name[vector], "%s-%s-%d",
netdev->name, "TxRx", vector);
+ if (entropy)
+ irq_flags = IRQF_SAMPLE_RANDOM;
+ }
err = request_irq(adapter->msix_entries[vector].vector,
- handler, irq_flags, adapter->name[vector],
- adapter->q_vector[vector]);
- handler, 0, adapter->name[vector],
++ handler, irq_flags, adapter->name[vector],
+ adapter->q_vector[vector]);
if (err) {
e_err(probe, "request_irq failed for MSIX interrupt "
"Error: %d\n", err);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
err = ixgbe_request_msix_irqs(adapter);
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
- err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
+ err = request_irq(adapter->pdev->irq, ixgbe_intr, irq_flags,
- netdev->name, netdev);
+ netdev->name, netdev);
} else {
- err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
+ irq_flags |= IRQF_SHARED;
+ err = request_irq(adapter->pdev->irq, ixgbe_intr, irq_flags,
- netdev->name, netdev);
+ netdev->name, netdev);
}
if (err)
#include "tg3.h"
+static int entropy = 0;
+module_param(entropy, int, 0);
+MODULE_PARM_DESC(entropy, "Allow tg3 to populate the /dev/random entropy pool");
+
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
- #define TG3_MIN_NUM 113
+ #define TG3_MIN_NUM 115
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
- #define DRV_MODULE_RELDATE "August 2, 2010"
+ #define DRV_MODULE_RELDATE "October 14, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
tw32_f(MAC_MODE, tp->mac_mode);
udelay(40);
+ tg3_phy_eee_adjust(tp, current_link_up);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ if (tp->link_config.active_speed == SPEED_10)
+ tw32(MAC_MI_STAT,
+ MAC_MI_STAT_10MBPS_MODE |
+ MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+ else
+ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+ }
+
if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
/* Polled via timer. */
tw32_f(MAC_EVENT, 0);
if (scan_active)
goto out;
- if (memcmp(&priv->active_rxon,
- &priv->staging_rxon, sizeof(priv->staging_rxon)))
- iwlcore_commit_rxon(priv);
- else
- IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration.\n");
+ for_each_context(priv, ctx) {
+ if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+ iwlcore_commit_rxon(priv, ctx);
+ else
+ IWL_DEBUG_INFO(priv,
+ "Not re-sending same RXON configuration.\n");
+ }
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+ priv->tx_power_user_lmt, conf->power_level);
+
+ iwl_set_tx_power(priv, conf->power_level, false);
+ }
+
+
-
out:
IWL_DEBUG_MAC80211(priv, "leave\n");
mutex_unlock(&priv->mutex);
struct alua_dh_data *h = get_alua_data(sdev);
int err = SCSI_DH_OK;
- if (h->group_id != -1) {
- err = alua_rtpg(sdev, h);
- if (err != SCSI_DH_OK)
- goto out;
- }
+ err = alua_rtpg(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
- if (h->tpgs & TPGS_MODE_EXPLICIT &&
+ if ((h->tpgs & TPGS_MODE_EXPLICIT) &&
- h->state != TPGS_STATE_OPTIMIZED) {
+ h->state != TPGS_STATE_OPTIMIZED &&
+ h->state != TPGS_STATE_LBA_DEPENDENT) {
h->callback_fn = fn;
h->callback_data = data;
err = submit_stpg(h);
}
static const struct scsi_dh_devlist rdac_dev_list[] = {
- {"IBM", "1722"},
- {"IBM", "1724"},
- {"IBM", "1726"},
- {"IBM", "1742"},
- {"IBM", "1745"},
- {"IBM", "1746"},
- {"IBM", "1814"},
- {"IBM", "1815"},
- {"IBM", "1818"},
- {"IBM", "3526"},
- {"SGI", "TP9400"},
- {"SGI", "TP9500"},
- {"SGI", "IS"},
- {"STK", "OPENstorage D280"},
- {"SUN", "CSM200_R"},
- {"SUN", "LCSM100_I"},
- {"SUN", "LCSM100_S"},
- {"SUN", "LCSM100_E"},
- {"SUN", "LCSM100_F"},
- {"DELL", "MD3000"},
- {"DELL", "MD3000i"},
- {"DELL", "MD32xx"},
- {"DELL", "MD32xxi"},
- {"DELL", "MD36xxi"},
- {"LSI", "INF-01-00"},
- {"ENGENIO", "INF-01-00"},
- {"STK", "FLEXLINE 380"},
- {"SUN", "CSM100_R_FC"},
- {"SUN", "STK6580_6780"},
- {"SUN", "SUN_6180"},
- {NULL, NULL},
+ {"IBM", "1722", 0},
+ {"IBM", "1724", 0},
+ {"IBM", "1726", 0},
+ {"IBM", "1742", 0},
+ {"IBM", "1745", 0},
+ {"IBM", "1746", 0},
+ {"IBM", "1814", 0},
+ {"IBM", "1815", 0},
+ {"IBM", "1818", 0},
+ {"IBM", "3526", 0},
+ {"SGI", "TP9400", 0},
+ {"SGI", "TP9500", 0},
+ {"SGI", "IS", 0},
+ {"STK", "OPENstorage D280", 0},
+ {"STK", "FLEXLINE 380", 0},
+ {"SUN", "STK6580_6780", 0},
+ {"SUN", "CSM200_R", 0},
+ {"SUN", "LCSM100_I", 0},
+ {"SUN", "LCSM100_S", 0},
+ {"SUN", "LCSM100_E", 0},
+ {"SUN", "LCSM100_F", 0},
+ {"DELL", "MD3000", 0},
+ {"DELL", "MD3000i", 0},
+ {"DELL", "MD32xx", 0},
+ {"DELL", "MD32xxi", 0},
+ {"DELL", "MD36xxi", 0},
+ {"LSI", "INF-01-00", 0},
+ {"ENGENIO", "INF-01-00", 0},
+ {"STK", "FLEXLINE 380", 0},
+ {"SUN", "CSM100_R_FC", 0},
++ {"SUN", "STK6580_6780", 0},
++ {"SUN", "SUN_6180", 0},
+ {NULL, NULL, 0},
};
static int rdac_bus_attach(struct scsi_device *sdev);
--- /dev/null
+ //------------------------------------------------------------------------------
+ // Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
+ //
+ //
+ // Permission to use, copy, modify, and/or distribute this software for any
+ // purpose with or without fee is hereby granted, provided that the above
+ // copyright notice and this permission notice appear in all copies.
+ //
+ // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ //
+ //
+ //------------------------------------------------------------------------------
+ //==============================================================================
+ // HIF scatter implementation
+ //
+ // Author(s): ="Atheros"
+ //==============================================================================
+
+ #include <linux/mmc/card.h>
+ #include <linux/mmc/host.h>
+ #include <linux/mmc/sdio_func.h>
+ #include <linux/mmc/sdio_ids.h>
+ #include <linux/mmc/sdio.h>
+ #include <linux/kthread.h>
+ #include "hif_internal.h"
+ #define ATH_MODULE_NAME hif
+ #include "a_debug.h"
+
+ #ifdef HIF_LINUX_MMC_SCATTER_SUPPORT
+
+ #define _CMD53_ARG_READ 0
+ #define _CMD53_ARG_WRITE 1
+ #define _CMD53_ARG_BLOCK_BASIS 1
+ #define _CMD53_ARG_FIXED_ADDRESS 0
+ #define _CMD53_ARG_INCR_ADDRESS 1
+
+ #define SDIO_SET_CMD53_ARG(arg,rw,func,mode,opcode,address,bytes_blocks) \
+ (arg) = (((rw) & 1) << 31) | \
+ (((func) & 0x7) << 28) | \
+ (((mode) & 1) << 27) | \
+ (((opcode) & 1) << 26) | \
+ (((address) & 0x1FFFF) << 9) | \
+ ((bytes_blocks) & 0x1FF)
+
+ static void FreeScatterReq(HIF_DEVICE *device, HIF_SCATTER_REQ *pReq)
+ {
+ unsigned long flag;
+
+ spin_lock_irqsave(&device->lock, flag);
+
+ DL_ListInsertTail(&device->ScatterReqHead, &pReq->ListLink);
+
+ spin_unlock_irqrestore(&device->lock, flag);
+
+ }
+
+ static HIF_SCATTER_REQ *AllocScatterReq(HIF_DEVICE *device)
+ {
+ DL_LIST *pItem;
+ unsigned long flag;
+
+ spin_lock_irqsave(&device->lock, flag);
+
+ pItem = DL_ListRemoveItemFromHead(&device->ScatterReqHead);
+
+ spin_unlock_irqrestore(&device->lock, flag);
+
+ if (pItem != NULL) {
+ return A_CONTAINING_STRUCT(pItem, HIF_SCATTER_REQ, ListLink);
+ }
+
+ return NULL;
+ }
+
+ /* called by async task to perform the operation synchronously using direct MMC APIs */
+ A_STATUS DoHifReadWriteScatter(HIF_DEVICE *device, BUS_REQUEST *busrequest)
+ {
+ int i;
+ A_UINT8 rw;
+ A_UINT8 opcode;
+ struct mmc_request mmcreq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ HIF_SCATTER_REQ_PRIV *pReqPriv;
+ HIF_SCATTER_REQ *pReq;
+ A_STATUS status = A_OK;
+ struct scatterlist *pSg;
+
+ pReqPriv = busrequest->pScatterReq;
+
+ A_ASSERT(pReqPriv != NULL);
+
+ pReq = pReqPriv->pHifScatterReq;
+
+ memset(&mmcreq, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ data.blksz = HIF_MBOX_BLOCK_SIZE;
+ data.blocks = pReq->TotalLength / HIF_MBOX_BLOCK_SIZE;
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: (%s) Address: 0x%X, (BlockLen: %d, BlockCount: %d) , (tot:%d,sg:%d)\n",
+ (pReq->Request & HIF_WRITE) ? "WRITE":"READ", pReq->Address, data.blksz, data.blocks,
+ pReq->TotalLength,pReq->ValidScatterEntries));
+
+ if (pReq->Request & HIF_WRITE) {
+ rw = _CMD53_ARG_WRITE;
+ data.flags = MMC_DATA_WRITE;
+ } else {
+ rw = _CMD53_ARG_READ;
+ data.flags = MMC_DATA_READ;
+ }
+
+ if (pReq->Request & HIF_FIXED_ADDRESS) {
+ opcode = _CMD53_ARG_FIXED_ADDRESS;
+ } else {
+ opcode = _CMD53_ARG_INCR_ADDRESS;
+ }
+
+ /* fill SG entries */
+ pSg = pReqPriv->sgentries;
+ sg_init_table(pSg, pReq->ValidScatterEntries);
+
+ /* assemble SG list */
+ for (i = 0 ; i < pReq->ValidScatterEntries ; i++, pSg++) {
+ /* setup each sg entry */
+ if ((unsigned long)pReq->ScatterList[i].pBuffer & 0x3) {
+ /* note some scatter engines can handle unaligned buffers, print this
+ * as informational only */
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
+ ("HIF: (%s) Scatter Buffer is unaligned 0x%lx\n",
+ pReq->Request & HIF_WRITE ? "WRITE":"READ",
+ (unsigned long)pReq->ScatterList[i].pBuffer));
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, (" %d: Addr:0x%lX, Len:%d \n",
+ i,(unsigned long)pReq->ScatterList[i].pBuffer,pReq->ScatterList[i].Length));
+
+ sg_set_buf(pSg, pReq->ScatterList[i].pBuffer, pReq->ScatterList[i].Length);
+ }
+ /* set scatter-gather table for request */
+ data.sg = pReqPriv->sgentries;
+ data.sg_len = pReq->ValidScatterEntries;
+ /* set command argument */
+ SDIO_SET_CMD53_ARG(cmd.arg,
+ rw,
+ device->func->num,
+ _CMD53_ARG_BLOCK_BASIS,
+ opcode,
+ pReq->Address,
+ data.blocks);
+
+ cmd.opcode = SD_IO_RW_EXTENDED;
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ mmcreq.cmd = &cmd;
+ mmcreq.data = &data;
+
+ mmc_set_data_timeout(&data, device->func->card);
+ /* synchronous call to process request */
+ mmc_wait_for_req(device->func->card->host, &mmcreq);
+
+ if (cmd.error) {
+ status = A_ERROR;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: cmd error: %d \n",cmd.error));
+ }
+
+ if (data.error) {
+ status = A_ERROR;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: data error: %d \n",data.error));
+ }
+
+ if (A_FAILED(status)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: FAILED!!! (%s) Address: 0x%X, Block mode (BlockLen: %d, BlockCount: %d)\n",
+ (pReq->Request & HIF_WRITE) ? "WRITE":"READ",pReq->Address, data.blksz, data.blocks));
+ }
+
+ /* set completion status, fail or success */
+ pReq->CompletionStatus = status;
+
+ if (pReq->Request & HIF_ASYNCHRONOUS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: async_task completion routine req: 0x%lX (%d)\n",(unsigned long)busrequest, status));
+ /* complete the request */
+ A_ASSERT(pReq->CompletionRoutine != NULL);
+ pReq->CompletionRoutine(pReq);
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER async_task upping busrequest : 0x%lX (%d)\n", (unsigned long)busrequest,status));
+ /* signal wait */
+ up(&busrequest->sem_req);
+ }
+
+ return status;
+ }
+
+ /* callback to issue a read-write scatter request */
+ static A_STATUS HifReadWriteScatter(HIF_DEVICE *device, HIF_SCATTER_REQ *pReq)
+ {
+ A_STATUS status = A_EINVAL;
+ A_UINT32 request = pReq->Request;
+ HIF_SCATTER_REQ_PRIV *pReqPriv = (HIF_SCATTER_REQ_PRIV *)pReq->HIFPrivate[0];
+
+ do {
+
+ A_ASSERT(pReqPriv != NULL);
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: total len: %d Scatter Entries: %d\n",
+ pReq->TotalLength, pReq->ValidScatterEntries));
+
+ if (!(request & HIF_EXTENDED_IO)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("HIF-SCATTER: Invalid command type: 0x%08x\n", request));
+ break;
+ }
+
+ if (!(request & (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS))) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("HIF-SCATTER: Invalid execution mode: 0x%08x\n", request));
+ break;
+ }
+
+ if (!(request & HIF_BLOCK_BASIS)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("HIF-SCATTER: Invalid data mode: 0x%08x\n", request));
+ break;
+ }
+
+ if (pReq->TotalLength > MAX_SCATTER_REQ_TRANSFER_SIZE) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+ ("HIF-SCATTER: Invalid length: %d \n", pReq->TotalLength));
+ break;
+ }
+
+ if (pReq->TotalLength == 0) {
+ A_ASSERT(FALSE);
+ break;
+ }
+
+ /* add bus request to the async list for the async I/O thread to process */
+ AddToAsyncList(device, pReqPriv->busrequest);
+
+ if (request & HIF_SYNCHRONOUS) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: queued sync req: 0x%lX\n", (unsigned long)pReqPriv->busrequest));
+ /* signal thread and wait */
+ up(&device->sem_async);
+ if (down_interruptible(&pReqPriv->busrequest->sem_req) != 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,("HIF-SCATTER: interrupted! \n"));
+ /* interrupted, exit */
+ status = A_ERROR;
+ break;
+ } else {
+ status = pReq->CompletionStatus;
+ }
+ } else {
+ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: queued async req: 0x%lX\n", (unsigned long)pReqPriv->busrequest));
+ /* wake thread, it will process and then take care of the async callback */
+ up(&device->sem_async);
+ status = A_OK;
+ }
+
+ } while (FALSE);
+
+ if (A_FAILED(status) && (request & HIF_ASYNCHRONOUS)) {
+ pReq->CompletionStatus = status;
+ pReq->CompletionRoutine(pReq);
+ status = A_OK;
+ }
+
+ return status;
+ }
+
+ /* setup of HIF scatter resources */
+ A_STATUS SetupHIFScatterSupport(HIF_DEVICE *device, HIF_DEVICE_SCATTER_SUPPORT_INFO *pInfo)
+ {
+ A_STATUS status = A_ERROR;
+ int i;
+ HIF_SCATTER_REQ_PRIV *pReqPriv;
+ BUS_REQUEST *busrequest;
+
+ do {
+
+ /* check if host supports scatter requests and it meets our requirements */
- if (device->func->card->host->max_hw_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
++ if (device->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : host only supports scatter of : %d entries, need: %d \n",
- device->func->card->host->max_hw_segs, MAX_SCATTER_ENTRIES_PER_REQ));
++ device->func->card->host->max_segs, MAX_SCATTER_ENTRIES_PER_REQ));
+ status = A_ENOTSUP;
+ break;
+ }
+
+ AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("HIF-SCATTER Enabled: max scatter req : %d entries: %d \n",
+ MAX_SCATTER_REQUESTS, MAX_SCATTER_ENTRIES_PER_REQ));
+
+ for (i = 0; i < MAX_SCATTER_REQUESTS; i++) {
+ /* allocate the private request blob */
+ pReqPriv = (HIF_SCATTER_REQ_PRIV *)A_MALLOC(sizeof(HIF_SCATTER_REQ_PRIV));
+ if (NULL == pReqPriv) {
+ break;
+ }
+ A_MEMZERO(pReqPriv, sizeof(HIF_SCATTER_REQ_PRIV));
+ /* save the device instance*/
+ pReqPriv->device = device;
+ /* allocate the scatter request */
+ pReqPriv->pHifScatterReq = (HIF_SCATTER_REQ *)A_MALLOC(sizeof(HIF_SCATTER_REQ) +
+ (MAX_SCATTER_ENTRIES_PER_REQ - 1) * (sizeof(HIF_SCATTER_ITEM)));
+
+ if (NULL == pReqPriv->pHifScatterReq) {
+ A_FREE(pReqPriv);
+ break;
+ }
+ /* just zero the main part of the scatter request */
+ A_MEMZERO(pReqPriv->pHifScatterReq, sizeof(HIF_SCATTER_REQ));
+ /* back pointer to the private struct */
+ pReqPriv->pHifScatterReq->HIFPrivate[0] = pReqPriv;
+ /* allocate a bus request for this scatter request */
+ busrequest = hifAllocateBusRequest(device);
+ if (NULL == busrequest) {
+ A_FREE(pReqPriv->pHifScatterReq);
+ A_FREE(pReqPriv);
+ break;
+ }
+ /* assign the scatter request to this bus request */
+ busrequest->pScatterReq = pReqPriv;
+ /* point back to the request */
+ pReqPriv->busrequest = busrequest;
+ /* add it to the scatter pool */
+ FreeScatterReq(device,pReqPriv->pHifScatterReq);
+ }
+
+ if (i != MAX_SCATTER_REQUESTS) {
+ status = A_NO_MEMORY;
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : failed to alloc scatter resources !\n"));
+ break;
+ }
+
+ /* set scatter function pointers */
+ pInfo->pAllocateReqFunc = AllocScatterReq;
+ pInfo->pFreeReqFunc = FreeScatterReq;
+ pInfo->pReadWriteScatterFunc = HifReadWriteScatter;
+ pInfo->MaxScatterEntries = MAX_SCATTER_ENTRIES_PER_REQ;
+ pInfo->MaxTransferSizePerScatterReq = MAX_SCATTER_REQ_TRANSFER_SIZE;
+
+ status = A_OK;
+
+ } while (FALSE);
+
+ if (A_FAILED(status)) {
+ CleanupHIFScatterResources(device);
+ }
+
+ return status;
+ }
+
+ /* clean up scatter support */
+ void CleanupHIFScatterResources(HIF_DEVICE *device)
+ {
+ HIF_SCATTER_REQ_PRIV *pReqPriv;
+ HIF_SCATTER_REQ *pReq;
+
+ /* empty the free list */
+
+ while (1) {
+
+ pReq = AllocScatterReq(device);
+
+ if (NULL == pReq) {
+ break;
+ }
+
+ pReqPriv = (HIF_SCATTER_REQ_PRIV *)pReq->HIFPrivate[0];
+ A_ASSERT(pReqPriv != NULL);
+
+ if (pReqPriv->busrequest != NULL) {
+ pReqPriv->busrequest->pScatterReq = NULL;
+ /* free bus request */
+ hifFreeBusRequest(device, pReqPriv->busrequest);
+ pReqPriv->busrequest = NULL;
+ }
+
+ if (pReqPriv->pHifScatterReq != NULL) {
+ A_FREE(pReqPriv->pHifScatterReq);
+ pReqPriv->pHifScatterReq = NULL;
+ }
+
+ A_FREE(pReqPriv);
+ }
+ }
+
+ #endif // HIF_LINUX_MMC_SCATTER_SUPPORT
&solo_enc->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
-- sizeof(struct videobuf_buffer), fh);
++ sizeof(struct videobuf_buffer), fh, NULL);
spin_unlock(&solo_enc->lock);
&solo_dev->pdev->dev, &fh->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
SOLO_DISP_PIX_FIELD,
-- sizeof(struct videobuf_buffer), fh);
++ sizeof(struct videobuf_buffer), fh, NULL);
return 0;
}
{
struct inode *inode = dentry->d_inode;
int error, rc = 0;
+ int orphan = 0;
const unsigned int ia_valid = attr->ia_valid;
- error = inode_change_ok(inode, attr);
+ if (EXT4_IS_RICHACL(inode))
+ error = richacl_inode_change_ok(inode, attr,
+ ext4_richacl_permission);
+ else
+ error = inode_change_ok(inode, attr);
if (error)
return error;
* If the call to ext4_truncate failed to get a transaction handle at
* all, we need to clean up the in-core orphan list manually.
*/
- if (inode->i_nlink)
+ if (orphan && inode->i_nlink)
ext4_orphan_del(NULL, inode);
- if (!rc && (ia_valid & ATTR_MODE))
- rc = ext4_acl_chmod(inode);
-
+ if (!rc && (ia_valid & ATTR_MODE)) {
+ if (EXT4_IS_RICHACL(inode))
+ rc = ext4_richacl_chmod(inode);
+ else
+ rc = ext4_acl_chmod(inode);
+ }
err_out:
ext4_std_error(inode->i_sb, error);
if (!error)
If unsure, say Y.
+config NFS_SWAP
+ bool "Provide swap over NFS support"
+ default n
+ depends on NFS_FS
+ select SUNRPC_SWAP
+ help
+ This option enables swapon to work on files located on NFS mounts.
+
+ For more details, see Documentation/network-swap.txt
+
config NFS_V4_1
bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
- depends on NFS_V4 && EXPERIMENTAL
+ depends on NFS_FS && NFS_V4 && EXPERIMENTAL
+ select PNFS_FILE_LAYOUT
help
This option enables support for minor version 1 of the NFSv4 protocol
- (draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
+ (RFC 5661) in the kernel's NFS client.
If unsure, say N.
goto out;
}
+ ctx = nameidata_to_nfs_open_context(dentry, nd);
+ res = ERR_CAST(ctx);
+ if (IS_ERR(ctx))
+ goto out;
+
+ open_flags = nd->intent.open.flags;
+ if (nd->flags & LOOKUP_CREATE) {
+ attr.ia_mode = nd->intent.open.create_mode;
+ attr.ia_valid = ATTR_MODE;
- if (!IS_POSIXACL(dir))
++ if (!IS_ACL(dir))
+ attr.ia_mode &= ~current_umask();
+ } else {
+ open_flags &= ~(O_EXCL | O_CREAT);
+ attr.ia_valid = 0;
+ }
+
/* Open the file on the server */
- res = nfs4_atomic_open(dir, dentry, nd);
- if (IS_ERR(res)) {
- error = PTR_ERR(res);
- switch (error) {
+ nfs_block_sillyrename(dentry->d_parent);
+ inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr);
+ if (IS_ERR(inode)) {
+ nfs_unblock_sillyrename(dentry->d_parent);
+ put_nfs_open_context(ctx);
+ switch (PTR_ERR(inode)) {
/* Make a negative dentry */
case -ENOENT:
+ d_add(dentry, NULL);
res = NULL;
goto out;
/* This turned out not to be a regular file */
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
- nfs_pageio_cond_complete(pgio, page->index);
+ nfs_pageio_cond_complete(pgio, page_file_index(page));
- ret = nfs_page_async_flush(pgio, page,
- wbc->sync_mode == WB_SYNC_NONE ||
- wbc->nonblocking != 0);
+ ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
if (ret == -EAGAIN) {
redirty_page_for_writepage(wbc, page);
ret = 0;
--- /dev/null
+/*
+ * Novell NCP Redirector for Linux
+ * Author: James Turner
+ *
+ * This file contains all the functions necessary for sending commands to our
+ * daemon module.
+ *
+ * Copyright (C) 2005 Novell, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/poll.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include <linux/semaphore.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+#include <linux/time.h>
+
+#include "vfs.h"
+#include "nwcapi.h"
+#include "commands.h"
+#include "nwerror.h"
+
+#define QUEUE_SENDING 0
+#define QUEUE_WAITING 1
+#define QUEUE_TIMEOUT 2
+#define QUEUE_ACKED 3
+#define QUEUE_DONE 4
+
+#define TIMEOUT_VALUE 10
+
+#define DH_TYPE_UNDEFINED 0
+#define DH_TYPE_STREAM 1
+#define DH_TYPE_CONNECTION 2
+
+struct daemon_queue {
+ struct list_head list; /* Must be first entry */
+ spinlock_t lock; /* Used to control access to list */
+ struct semaphore semaphore; /* Used to signal when data is available */
+};
+
+struct daemon_cmd {
+ struct list_head list; /* Must be first entry */
+ atomic_t reference;
+ unsigned int status;
+ unsigned int flags;
+ struct semaphore semaphore;
+ unsigned long sequence;
+ struct timer_list timer;
+ void *request;
+ unsigned long reqlen;
+ void *data;
+ int datalen;
+ void *reply;
+ unsigned long replen;
+};
+
+struct daemon_handle {
+ struct list_head list;
+ rwlock_t lock;
+ struct novfs_schandle session;
+};
+
+struct daemon_resource {
+ struct list_head list;
+ int type;
+ void *connection;
+ unsigned char handle[6];
+ mode_t mode;
+ loff_t size;
+};
+
+struct drive_map {
+ struct list_head list; /* Must be first item */
+ struct novfs_schandle session;
+ unsigned long hash;
+ int namelen;
+ char name[1];
+};
+
+static void Queue_get(struct daemon_cmd *Que);
+static void Queue_put(struct daemon_cmd *Que);
+static void RemoveDriveMaps(void);
+static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle);
+static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle);
+static int set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session);
+static int unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session);
+static int NwdGetMountPath(struct novfs_xplat *pdata);
+static long local_unlink(const char *pathname);
+
+/*===[ Global variables ]=================================================*/
+static struct daemon_queue Daemon_Queue;
+
+static DECLARE_WAIT_QUEUE_HEAD(Read_waitqueue);
+
+static atomic_t Sequence = ATOMIC_INIT(-1);
+static atomic_t Daemon_Open_Count = ATOMIC_INIT(0);
+
+static unsigned long Daemon_Command_Timeout = TIMEOUT_VALUE;
+
- static DECLARE_MUTEX(DriveMapLock);
++static DEFINE_MUTEX(DriveMapLock);
+static LIST_HEAD(DriveMapList);
+
+int novfs_max_iosize = PAGE_SIZE;
+
+void novfs_daemon_queue_init()
+{
+ INIT_LIST_HEAD(&Daemon_Queue.list);
+ spin_lock_init(&Daemon_Queue.lock);
- init_MUTEX_LOCKED(&Daemon_Queue.semaphore);
++ sema_init(&Daemon_Queue.semaphore, 0);
+}
+
+void novfs_daemon_queue_exit(void)
+{
+ /* Does nothing for now but we maybe should clear the queue. */
+}
+
+/*++======================================================================*/
+static void novfs_daemon_timer(unsigned long data)
+{
+ struct daemon_cmd *que = (struct daemon_cmd *)data;
+
+ if (QUEUE_ACKED != que->status) {
+ que->status = QUEUE_TIMEOUT;
+ }
+ up(&que->semaphore);
+}
+
+/*++======================================================================*/
+int Queue_Daemon_Command(void *request,
+ unsigned long reqlen, void *data, int dlen, void **reply, unsigned long *replen, int interruptible)
+{
+ struct daemon_cmd *que;
+ int retCode = 0;
+ uint64_t ts1, ts2;
+
+ ts1 = get_nanosecond_time();
+
+ DbgPrint("0x%p %d", request, reqlen);
+
+ if (atomic_read(&Daemon_Open_Count)) {
+
+ que = kmalloc(sizeof(*que), GFP_KERNEL);
+
+ DbgPrint("que=0x%p", que);
+ if (que) {
+ atomic_set(&que->reference, 0);
+ que->status = QUEUE_SENDING;
+ que->flags = 0;
+
- init_MUTEX_LOCKED(&que->semaphore);
++ sema_init(&que->semaphore, 0);
+
+ que->sequence = atomic_inc_return(&Sequence);
+
+ ((struct novfs_command_request_header *)request)->SequenceNumber = que->sequence;
+
+ /*
+ * Setup and start que timer
+ */
+ init_timer(&que->timer);
+ que->timer.expires = jiffies + (HZ * Daemon_Command_Timeout);
+ que->timer.data = (unsigned long)que;
+ que->timer.function = novfs_daemon_timer;
+ add_timer(&que->timer);
+
+ /*
+ * Setup request
+ */
+ que->request = request;
+ que->reqlen = reqlen;
+ que->data = data;
+ que->datalen = dlen;
+ que->reply = NULL;
+ que->replen = 0;
+
+ /*
+ * Added entry to queue.
+ */
+ /*
+ * Check to see if interruptible and set flags.
+ */
+ if (interruptible) {
+ que->flags |= INTERRUPTIBLE;
+ }
+
+ Queue_get(que);
+
+ spin_lock(&Daemon_Queue.lock);
+ list_add_tail(&que->list, &Daemon_Queue.list);
+ spin_unlock(&Daemon_Queue.lock);
+
+ /*
+ * Signal that there is data to be read
+ */
+ up(&Daemon_Queue.semaphore);
+
+ /*
+ * Give a change to the other processes.
+ */
+ yield();
+
+ /*
+ * Block waiting for reply or timeout
+ */
+ down(&que->semaphore);
+
+ if (QUEUE_ACKED == que->status) {
+ que->status = QUEUE_WAITING;
+ mod_timer(&que->timer, jiffies + (HZ * 2 * Daemon_Command_Timeout));
+ if (interruptible) {
+ retCode = down_interruptible(&que->semaphore);
+ } else {
+ down(&que->semaphore);
+ }
+ }
+
+ /*
+ * Delete timer
+ */
+ del_timer(&que->timer);
+
+ /*
+ * Check for timeout
+ */
+ if ((QUEUE_TIMEOUT == que->status)
+ && (NULL == que->reply)) {
+ DbgPrint("Timeout");
+ retCode = -ETIME;
+ }
+ *reply = que->reply;
+ *replen = que->replen;
+
+ /*
+ * Remove item from queue
+ */
+ Queue_put(que);
+
+ } else { /* Error case with no memory */
+
+ retCode = -ENOMEM;
+ *reply = NULL;
+ *replen = 0;
+ }
+ } else {
+ retCode = -EIO;
+ *reply = NULL;
+ *replen = 0;
+
+ }
+ ts2 = get_nanosecond_time();
+ ts2 = ts2 - ts1;
+
+ DbgPrint("%llu retCode=%d", ts2, retCode);
+ return (retCode);
+}
+
+static void Queue_get(struct daemon_cmd *Que)
+{
+ DbgPrint("que=0x%p %d", Que, atomic_read(&Que->reference));
+ atomic_inc(&Que->reference);
+}
+
+static void Queue_put(struct daemon_cmd *Que)
+{
+
+ DbgPrint("que=0x%p %d", Que, atomic_read(&Que->reference));
+ spin_lock(&Daemon_Queue.lock);
+
+ if (atomic_dec_and_test(&Que->reference)) {
+ /*
+ * Remove item from queue
+ */
+ list_del(&Que->list);
+ spin_unlock(&Daemon_Queue.lock);
+
+ /*
+ * Free item memory
+ */
+ kfree(Que);
+ } else {
+ spin_unlock(&Daemon_Queue.lock);
+ }
+}
+
+struct daemon_cmd *get_next_queue(int Set_Queue_Waiting)
+{
+ struct daemon_cmd *que;
+
+ DbgPrint("que=0x%p", Daemon_Queue.list.next);
+
+ spin_lock(&Daemon_Queue.lock);
+ que = (struct daemon_cmd *)Daemon_Queue.list.next;
+
+ while (que && (que != (struct daemon_cmd *)&Daemon_Queue.list.next)
+ && (que->status != QUEUE_SENDING)) {
+ que = (struct daemon_cmd *)que->list.next;
+ }
+
+ if ((NULL == que) || (que == (struct daemon_cmd *)&Daemon_Queue.list)
+ || (que->status != QUEUE_SENDING)) {
+ que = NULL;
+ } else if (Set_Queue_Waiting) {
+ que->status = QUEUE_WAITING;
+ }
+
+ if (que) {
+ atomic_inc(&que->reference);
+ }
+
+ spin_unlock(&Daemon_Queue.lock);
+
+ DbgPrint("return=0x%p", que);
+ return (que);
+}
+
+static struct daemon_cmd *find_queue(unsigned long sequence)
+{
+ struct daemon_cmd *que;
+
+ DbgPrint("0x%x", sequence);
+
+ spin_lock(&Daemon_Queue.lock);
+ que = (struct daemon_cmd *)Daemon_Queue.list.next;
+
+ while (que && (que != (struct daemon_cmd *)&Daemon_Queue.list.next)
+ && (que->sequence != sequence)) {
+ que = (struct daemon_cmd *)que->list.next;
+ }
+
+ if ((NULL == que)
+ || (que == (struct daemon_cmd *)&Daemon_Queue.list.next)
+ || (que->sequence != sequence)) {
+ que = NULL;
+ }
+
+ if (que) {
+ atomic_inc(&que->reference);
+ }
+
+ spin_unlock(&Daemon_Queue.lock);
+
+ DbgPrint("return 0x%p", que);
+ return (que);
+}
+
+int novfs_daemon_open_control(struct inode *Inode, struct file *File)
+{
+ DbgPrint("pid=%d Count=%d", current->pid, atomic_read(&Daemon_Open_Count));
+ atomic_inc(&Daemon_Open_Count);
+
+ return (0);
+}
+
+int novfs_daemon_close_control(struct inode *Inode, struct file *File)
+{
+ struct daemon_cmd *que;
+
+ DbgPrint("pid=%d Count=%d", current->pid, atomic_read(&Daemon_Open_Count));
+
+ if (atomic_dec_and_test(&Daemon_Open_Count)) {
+ /*
+ * Signal any pending que itmes.
+ */
+
+ spin_lock(&Daemon_Queue.lock);
+ que = (struct daemon_cmd *)Daemon_Queue.list.next;
+
+ while (que && (que != (struct daemon_cmd *)&Daemon_Queue.list.next)
+ && (que->status != QUEUE_DONE)) {
+ que->status = QUEUE_TIMEOUT;
+ up(&que->semaphore);
+
+ que = (struct daemon_cmd *)que->list.next;
+ }
+ spin_unlock(&Daemon_Queue.lock);
+
+ RemoveDriveMaps();
+
+ novfs_scope_cleanup();
+ }
+
+ return (0);
+}
+
+ssize_t novfs_daemon_cmd_send(struct file * file, char *buf, size_t len, loff_t * off)
+{
+ struct daemon_cmd *que;
+ size_t retValue = 0;
+ int Finished = 0;
+ struct novfs_data_list *dlist;
+ int i, dcnt, bcnt, ccnt, error;
+ char *vadr;
+ unsigned long cpylen;
+
+ DbgPrint("%u %lld", len, *off);
+ if (len > novfs_max_iosize) {
+ novfs_max_iosize = len;
+ }
+
+ while (!Finished) {
+ que = get_next_queue(1);
+ DbgPrint("0x%p", que);
+ if (que) {
+ retValue = que->reqlen;
+ if (retValue > len) {
+ retValue = len;
+ }
+ if (retValue > 0x80)
+ novfs_dump(0x80, que->request);
+ else
+ novfs_dump(retValue, que->request);
+
+ cpylen = copy_to_user(buf, que->request, retValue);
+ if (que->datalen && (retValue < len)) {
+ buf += retValue;
+ dlist = que->data;
+ dcnt = que->datalen;
+ for (i = 0; i < dcnt; i++, dlist++) {
+ if (DLREAD == dlist->rwflag) {
+ bcnt = dlist->len;
+ DbgPrint("page=0x%p "
+ "offset=0x%p len=%d", i, dlist->page, dlist->offset, dlist->len);
+ if ((bcnt + retValue) <= len) {
+ void *km_adr = NULL;
+
+ if (dlist->page) {
+ km_adr = kmap(dlist->page);
+ vadr = km_adr;
+ vadr += (unsigned long)
+ dlist->offset;
+ } else {
+ vadr = dlist->offset;
+ }
+
+ ccnt = copy_to_user(buf, vadr, bcnt);
+
+ DbgPrint("Copy %d from 0x%p to 0x%p.", bcnt, vadr, buf);
+ if (bcnt > 0x80)
+ novfs_dump(0x80, vadr);
+ else
+ novfs_dump(bcnt, vadr);
+
+ if (km_adr) {
+ kunmap(dlist->page);
+ }
+
+ retValue += bcnt;
+ buf += bcnt;
+ } else {
+ break;
+ }
+ }
+ }
+ }
+ Queue_put(que);
+ break;
+ }
+
+ if (O_NONBLOCK & file->f_flags) {
+ retValue = -EAGAIN;
+ break;
+ } else {
+ if ((error = down_interruptible(&Daemon_Queue.semaphore))) {
+ DbgPrint("after down_interruptible error...%d", error);
+ retValue = -EINTR;
+ break;
+ }
+ DbgPrint("after down_interruptible");
+ }
+ }
+
+ *off = *off;
+
+ DbgPrint("return 0x%x", retValue);
+
+ return (retValue);
+}
+
+ssize_t novfs_daemon_recv_reply(struct file * file, const char *buf, size_t nbytes, loff_t * ppos)
+{
+ struct daemon_cmd *que;
+ size_t retValue = 0;
+ void *reply;
+ unsigned long sequence, cpylen;
+
+ struct novfs_data_list *dlist;
+ char *vadr;
+ int i;
+
+ DbgPrint("buf=0x%p nbytes=%d ppos=%llx", buf, nbytes, *ppos);
+
+ /*
+ * Get sequence number from reply buffer
+ */
+
+ cpylen = copy_from_user(&sequence, buf, sizeof(sequence));
+
+ /*
+ * Find item based on sequence number
+ */
+ que = find_queue(sequence);
+
+ DbgPrint("0x%x 0x%p %d", sequence, que, nbytes);
+ if (que) {
+ do {
+ retValue = nbytes;
+ /*
+ * Ack packet from novfsd. Remove timer and
+ * return
+ */
+ if (nbytes == sizeof(sequence)) {
+ que->status = QUEUE_ACKED;
+ break;
+ }
+
+ if (NULL != (dlist = que->data)) {
+ int thiscopy, left = nbytes;
+ retValue = 0;
+
+ DbgPrint("dlist=0x%p count=%d", dlist, que->datalen);
+ for (i = 0; (i < que->datalen) && (retValue < nbytes); i++, dlist++) {
+ __DbgPrint("\n"
+ " dlist[%d].page: 0x%p\n"
+ " dlist[%d].offset: 0x%p\n"
+ " dlist[%d].len: 0x%x\n"
+ " dlist[%d].rwflag: 0x%x\n",
+ i, dlist->page, i, dlist->offset, i, dlist->len, i, dlist->rwflag);
+
+ if (DLWRITE == dlist->rwflag) {
+ void *km_adr = NULL;
+
+ if (dlist->page) {
+ km_adr = kmap(dlist->page);
+ vadr = km_adr;
+ vadr += (unsigned long)dlist->offset;
+ } else {
+ vadr = dlist->offset;
+ }
+
+ thiscopy = dlist->len;
+ if (thiscopy > left) {
+ thiscopy = left;
+ dlist->len = left;
+ }
+ cpylen = copy_from_user(vadr, buf, thiscopy);
+
+ if (thiscopy > 0x80)
+ novfs_dump(0x80, vadr);
+ else
+ novfs_dump(thiscopy, vadr);
+
+ if (km_adr) {
+ kunmap(dlist->page);
+ }
+
+ left -= thiscopy;
+ retValue += thiscopy;
+ buf += thiscopy;
+ }
+ }
+ que->replen = retValue;
+ } else {
+ reply = kmalloc(nbytes, GFP_KERNEL);
+ DbgPrint("reply=0x%p", reply);
+ if (reply) {
+ retValue = nbytes;
+ que->reply = reply;
+ que->replen = nbytes;
+
+ retValue -= copy_from_user(reply, buf, retValue);
+ if (retValue > 0x80)
+ novfs_dump(0x80, reply);
+ else
+ novfs_dump(retValue, reply);
+
+ } else {
+ retValue = -ENOMEM;
+ }
+ }
+
+ /*
+ * Set status that packet is done.
+ */
+ que->status = QUEUE_DONE;
+
+ } while (0);
+ up(&que->semaphore);
+ Queue_put(que);
+ }
+
+ DbgPrint("return 0x%x", retValue);
+
+ return (retValue);
+}
+
+int novfs_do_login(struct ncl_string *Server, struct ncl_string *Username,
+ struct ncl_string *Password, void **lgnId, struct novfs_schandle *Session)
+{
+ struct novfs_login_user_request *cmd;
+ struct novfs_login_user_reply *reply;
+ unsigned long replylen = 0;
+ int retCode, cmdlen, datalen;
+ unsigned char *data;
+
+ datalen = Server->len + Username->len + Password->len;
+ cmdlen = sizeof(*cmd) + datalen;
+ cmd = kmalloc(cmdlen, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ data = (unsigned char *)cmd + sizeof(*cmd);
+ cmd->Command.CommandType = VFS_COMMAND_LOGIN_USER;
+ cmd->Command.SequenceNumber = 0;
+ memcpy(&cmd->Command.SessionId, Session, sizeof(*Session));
+
+ cmd->srvNameType = Server->type;
+ cmd->serverLength = Server->len;
+ cmd->serverOffset = (unsigned long)(data - (unsigned char *)cmd);
+ memcpy(data, Server->buffer, Server->len);
+ data += Server->len;
+
+ cmd->usrNameType = Username->type;
+ cmd->userNameLength = Username->len;
+ cmd->userNameOffset = (unsigned long)(data - (unsigned char *)cmd);
+ memcpy(data, Username->buffer, Username->len);
+ data += Username->len;
+
+ cmd->pwdNameType = Password->type;
+ cmd->passwordLength = Password->len;
+ cmd->passwordOffset = (unsigned long)(data - (unsigned char *)cmd);
+ memcpy(data, Password->buffer, Password->len);
+ data += Password->len;
+
+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE);
+ if (reply) {
+ if (reply->Reply.ErrorCode) {
+ retCode = reply->Reply.ErrorCode;
+ } else {
+ retCode = 0;
+ if (lgnId) {
+ *lgnId = reply->loginIdentity;
+ }
+ }
+ kfree(reply);
+ }
+ memset(cmd, 0, cmdlen);
+ kfree(cmd);
+ return (retCode);
+
+}
+
+int novfs_daemon_logout(struct qstr *Server, struct novfs_schandle *Session)
+{
+ struct novfs_logout_request *cmd;
+ struct novfs_logout_reply *reply;
+ unsigned long replylen = 0;
+ int retCode, cmdlen;
+
+ cmdlen = offsetof(struct novfs_logout_request, Name) + Server->len;
+ cmd = kmalloc(cmdlen, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->Command.CommandType = VFS_COMMAND_LOGOUT_USER;
+ cmd->Command.SequenceNumber = 0;
+ memcpy(&cmd->Command.SessionId, Session, sizeof(*Session));
+ cmd->length = Server->len;
+ memcpy(cmd->Name, Server->name, Server->len);
+
+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE);
+ if (reply) {
+ if (reply->Reply.ErrorCode) {
+ retCode = -EIO;
+ }
+ kfree(reply);
+ }
+ kfree(cmd);
+ return (retCode);
+
+}
+
+int novfs_daemon_getpwuid(uid_t uid, int unamelen, char *uname)
+{
+ struct novfs_getpwuid_request cmd;
+ struct novfs_getpwuid_reply *reply;
+ unsigned long replylen = 0;
+ int retCode;
+
+ cmd.Command.CommandType = VFS_COMMAND_GETPWUD;
+ cmd.Command.SequenceNumber = 0;
+ SC_INITIALIZE(cmd.Command.SessionId);
+ cmd.uid = uid;
+
+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE);
+ if (reply) {
+ if (reply->Reply.ErrorCode) {
+ retCode = -EIO;
+ } else {
+ retCode = 0;
+ memset(uname, 0, unamelen);
+ replylen = replylen - offsetof(struct
+ novfs_getpwuid_reply, UserName);
+ if (replylen) {
+ if (replylen > unamelen) {
+ retCode = -EINVAL;
+ replylen = unamelen - 1;
+ }
+ memcpy(uname, reply->UserName, replylen);
+ }
+ }
+ kfree(reply);
+ }
+ return (retCode);
+
+}
+
+int novfs_daemon_getversion(char *Buf, int length)
+{
+ struct novfs_get_version_request cmd;
+ struct novfs_get_version_reply *reply;
+ unsigned long replylen = 0;
+ int retVal = 0;
+
+ cmd.Command.CommandType = VFS_COMMAND_GET_VERSION;
+ cmd.Command.SequenceNumber = 0;
+ SC_INITIALIZE(cmd.Command.SessionId);
+
+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE);
+ if (reply) {
+ if (reply->Reply.ErrorCode) {
+ retVal = -EIO;
+ } else {
+ retVal = replylen - offsetof(struct
+ novfs_get_version_reply, Version);
+ if (retVal < length) {
+ memcpy(Buf, reply->Version, retVal);
+ Buf[retVal] = '\0';
+ }
+ }
+ kfree(reply);
+ }
+ return (retVal);
+
+}
+
+static int daemon_login(struct novfs_login *Login, struct novfs_schandle *Session)
+{
+ int retCode = -ENOMEM;
+ struct novfs_login lLogin;
+ struct ncl_string server;
+ struct ncl_string username;
+ struct ncl_string password;
+
+ if (!copy_from_user(&lLogin, Login, sizeof(lLogin))) {
+ if (lLogin.Server.length > MAX_SERVER_NAME_LENGTH || lLogin.UserName.length > MAX_NAME_LEN ||
+ lLogin.Password.length > MAX_PASSWORD_LENGTH)
+ return -EINVAL;
+ server.buffer = kmalloc(lLogin.Server.length, GFP_KERNEL);
+ if (server.buffer) {
+ server.len = lLogin.Server.length;
+ server.type = NWC_STRING_TYPE_ASCII;
+ if (!copy_from_user((void *)server.buffer, lLogin.Server.data, server.len)) {
+ username.buffer = kmalloc(lLogin.UserName.length, GFP_KERNEL);
+ if (username.buffer) {
+ username.len = lLogin.UserName.length;
+ username.type = NWC_STRING_TYPE_ASCII;
+ if (!copy_from_user((void *)username.buffer, lLogin.UserName.data, username.len)) {
+ password.buffer = kmalloc(lLogin.Password.length, GFP_KERNEL);
+ if (password.buffer) {
+ password.len = lLogin.Password.length;
+ password.type = NWC_STRING_TYPE_ASCII;
+ if (!copy_from_user
+ ((void *)password.buffer, lLogin.Password.data, password.len)) {
+ retCode =
+ novfs_do_login(&server, &username, &password, NULL, Session);
+ if (!retCode) {
+ char *username;
+ username = novfs_scope_get_username();
+ if (username) {
+ novfs_add_to_root(username);
+ }
+ }
+ }
+ kfree(password.buffer);
+ }
+ }
+ kfree(username.buffer);
+ }
+ }
+ kfree(server.buffer);
+ }
+ }
+
+ return (retCode);
+}
+
+static int daemon_logout(struct novfs_logout *Logout, struct novfs_schandle *Session)
+{
+ struct novfs_logout lLogout;
+ struct qstr server;
+ int retCode = 0;
+
+ if (copy_from_user(&lLogout, Logout, sizeof(lLogout)))
+ return -EFAULT;
+ if (lLogout.Server.length > MAX_SERVER_NAME_LENGTH)
+ return -EINVAL;
+ server.name = kmalloc(lLogout.Server.length, GFP_KERNEL);
+ if (!server.name)
+ return -ENOMEM;
+ server.len = lLogout.Server.length;
+ if (copy_from_user((void *)server.name, lLogout.Server.data, server.len))
+ goto exit;
+ retCode = novfs_daemon_logout(&server, Session);
+exit:
+ kfree(server.name);
+ return (retCode);
+}
+
+int novfs_daemon_create_sessionId(struct novfs_schandle *SessionId)
+{
+ struct novfs_create_context_request cmd;
+ struct novfs_create_context_reply *reply;
+ unsigned long replylen = 0;
+ int retCode = 0;
+
+ DbgPrint("%d", current->pid);
+
+ cmd.Command.CommandType = VFS_COMMAND_CREATE_CONTEXT;
+ cmd.Command.SequenceNumber = 0;
+ SC_INITIALIZE(cmd.Command.SessionId);
+
+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE);
+ if (reply) {
+ if (!reply->Reply.ErrorCode && replylen > sizeof(struct novfs_command_reply_header)) {
+ *SessionId = reply->SessionId;
+ retCode = 0;
+ } else {
+ SessionId->hTypeId = 0;
+ SessionId->hId = 0;
+ retCode = -EIO;
+ }
+ kfree(reply);
+ }
+ DbgPrint("SessionId=0x%llx", *SessionId);
+ return (retCode);
+}
+
+int novfs_daemon_destroy_sessionId(struct novfs_schandle SessionId)
+{
+ struct novfs_destroy_context_request cmd;
+ struct novfs_destroy_context_reply *reply;
+ unsigned long replylen = 0;
+ int retCode = 0;
+
+ DbgPrint("0x%p:%p", SessionId.hTypeId, SessionId.hId);
+
+ cmd.Command.CommandType = VFS_COMMAND_DESTROY_CONTEXT;
+ cmd.Command.SequenceNumber = 0;
+ cmd.Command.SessionId = SessionId;
+
+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE);
+ if (reply) {
+ if (!reply->Reply.ErrorCode) {
+ struct drive_map *dm;
+ struct list_head *list;
+
+ retCode = 0;
+
+ /*
+ * When destroying the session check to see if there are any
+ * mapped drives. If there are then remove them.
+ */
- down(&DriveMapLock);
++ mutex_lock(&DriveMapLock);
+ list_for_each(list, &DriveMapList) {
+ dm = list_entry(list, struct drive_map, list);
+ if (SC_EQUAL(SessionId, dm->session)) {
+ local_unlink(dm->name);
+ list = list->prev;
+ list_del(&dm->list);
+ kfree(dm);
+ }
+
+ }
- up(&DriveMapLock);
++ mutex_unlock(&DriveMapLock);
+
+ } else {
+ retCode = -EIO;
+ }
+ kfree(reply);
+ }
+ return (retCode);
+}
+
+int novfs_daemon_get_userspace(struct novfs_schandle SessionId, uint64_t * TotalSize,
+ uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties)
+{
+ struct novfs_get_user_space cmd;
+ struct novfs_get_user_space_reply *reply;
+ unsigned long replylen = 0;
+ int retCode = 0;
+
+ DbgPrint("0x%p:%p", SessionId.hTypeId, SessionId.hId);
+
+ cmd.Command.CommandType = VFS_COMMAND_GET_USER_SPACE;
+ cmd.Command.SequenceNumber = 0;
+ cmd.Command.SessionId = SessionId;
+
+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE);
+ if (reply) {
+ if (!reply->Reply.ErrorCode) {
+
+ __DbgPrint("TotalSpace: %llu\n", reply->TotalSpace);
+ __DbgPrint("FreeSpace: %llu\n", reply->FreeSpace);
+ __DbgPrint("TotalEnties: %llu\n", reply->TotalEnties);
+ __DbgPrint("FreeEnties: %llu\n", reply->FreeEnties);
+
+ if (TotalSize)
+ *TotalSize = reply->TotalSpace;
+ if (Free)
+ *Free = reply->FreeSpace;
+ if (TotalEnties)
+ *TotalEnties = reply->TotalEnties;
+ if (FreeEnties)
+ *FreeEnties = reply->FreeEnties;
+ retCode = 0;
+ } else {
+ retCode = -EIO;
+ }
+ kfree(reply);
+ }
+ return (retCode);
+}
+
+int novfs_daemon_set_mnt_point(char *Path)
+{
+ struct novfs_set_mount_path *cmd;
+ struct novfs_set_mount_path_reply *reply;
+ unsigned long replylen, cmdlen;
+ int retCode = -ENOMEM;
+
+ DbgPrint("%s", Path);
+
+ replylen = strlen(Path);
+
+ cmdlen = sizeof(struct novfs_set_mount_path) + replylen;
+
+ cmd = kmalloc(cmdlen, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+ cmd->Command.CommandType = VFS_COMMAND_SET_MOUNT_PATH;
+ cmd->Command.SequenceNumber = 0;
+ SC_INITIALIZE(cmd->Command.SessionId);
+ cmd->PathLength = replylen;
+
+ strcpy(cmd->Path, Path);
+
+ replylen = 0;
+
+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE);
+ if (reply) {
+ if (!reply->Reply.ErrorCode) {
+ retCode = 0;
+ } else {
+ retCode = -EIO;
+ }
+ kfree(reply);
+ }
+ kfree(cmd);
+ return retCode;
+}
+
+int novfs_daemon_debug_cmd_send(char *Command)
+{
+ struct novfs_debug_request cmd;
+ struct novfs_debug_reply *reply;
+ struct novfs_debug_reply lreply;
+ unsigned long replylen, cmdlen;
+ struct novfs_data_list dlist[2];
+
+ int retCode = -ENOMEM;
+
+ DbgPrint("%s", Command);
+
+ dlist[0].page = NULL;
+ dlist[0].offset = (char *)Command;
+ dlist[0].len = strlen(Command);
+ dlist[0].rwflag = DLREAD;
+
+ dlist[1].page = NULL;
+ dlist[1].offset = (char *)&lreply;
+ dlist[1].len = sizeof(lreply);
+ dlist[1].rwflag = DLWRITE;
+
+ cmdlen = offsetof(struct novfs_debug_request, dbgcmd);
+
+ cmd.Command.CommandType = VFS_COMMAND_DBG;
+ cmd.Command.SequenceNumber = 0;
+ SC_INITIALIZE(cmd.Command.SessionId);
+ cmd.cmdlen = strlen(Command);
+
+ replylen = 0;
+
+ retCode = Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, (void *)&reply, &replylen, INTERRUPTIBLE);
+ if (reply) {
+ kfree(reply);
+ }
+ if (0 == retCode) {
+ retCode = lreply.Reply.ErrorCode;
+ }
+
+ return (retCode);
+}
+
+int novfs_daemon_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int retCode = -ENOSYS;
+ unsigned long cpylen;
+ struct novfs_schandle session_id;
+
+ lock_kernel(); /* needed? */
+
+ session_id = novfs_scope_get_sessionId(NULL);
+
+ switch (cmd) {
+ case IOC_LOGIN:
+ retCode = daemon_login((struct novfs_login *)arg, &session_id);
+ break;
+
+ case IOC_LOGOUT:
+ retCode = daemon_logout((struct novfs_logout *)arg, &session_id);
+ break;
+ case IOC_DEBUGPRINT:
+ {
+ struct Ioctl_Debug {
+ int length;
+ char *data;
+ } io;
+ char *buf;
+ io.length = 0;
+ cpylen = copy_from_user(&io, (char *)arg, sizeof(io));
+ if (io.length <= 0 || io.length > 1024) {
+ unlock_kernel();
+ return -EINVAL;
+ }
+ if (io.length) {
+ buf = kmalloc(io.length + 1, GFP_KERNEL);
+ if (buf) {
+ buf[0] = 0;
+ cpylen = copy_from_user(buf, io.data, io.length);
+ buf[io.length] = '\0';
+ DbgPrint("%s", buf);
+ kfree(buf);
+ retCode = 0;
+ }
+ }
+ break;
+ }
+
+ case IOC_XPLAT:
+ {
+ struct novfs_xplat data;
+
+ cpylen = copy_from_user(&data, (void *)arg, sizeof(data));
+ retCode = ((data.xfunction & 0x0000FFFF) | 0xCC000000);
+
+ switch (data.xfunction) {
+ case NWC_GET_MOUNT_PATH:
+ DbgPrint("Call NwdGetMountPath");
+ retCode = NwdGetMountPath(&data);
+ break;
+ }
+
+ DbgPrint("[NOVFS XPLAT] status Code = %X\n", retCode);
+ break;
+ }
+
+ }
+
+ unlock_kernel();
+
+ return (retCode);
+}
+
+static int daemon_added_resource(struct daemon_handle *DHandle, int Type, void *CHandle,
+ unsigned char *FHandle, unsigned long Mode, u_long Size)
+{
+ struct daemon_resource *resource;
+
+ if (FHandle)
+ DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p FHandle=0x%x "
+ "Mode=0x%x Size=%d", DHandle, Type, CHandle, *(u32 *) & FHandle[2], Mode, Size);
+ else
+ DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p\n", DHandle, Type, CHandle);
+
+ resource = kmalloc(sizeof(struct daemon_resource), GFP_KERNEL);
+ if (!resource)
+ return -ENOMEM;
+
+ resource->type = Type;
+ resource->connection = CHandle;
+ if (FHandle)
+ memcpy(resource->handle, FHandle, sizeof(resource->handle));
+ else
+ memset(resource->handle, 0, sizeof(resource->handle));
+ resource->mode = Mode;
+ resource->size = Size;
+ write_lock(&DHandle->lock);
+ list_add(&resource->list, &DHandle->list);
+ write_unlock(&DHandle->lock);
+ DbgPrint("Adding resource=0x%p", resource);
+ return 0;
+}
+
+static int daemon_remove_resource(struct daemon_handle *DHandle, int Type, void *CHandle, unsigned long FHandle)
+{
+ struct daemon_resource *resource;
+ struct list_head *l;
+ int retVal = -ENOMEM;
+
+ DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p FHandle=0x%x", DHandle, Type, CHandle, FHandle);
+
+ write_lock(&DHandle->lock);
+
+ list_for_each(l, &DHandle->list) {
+ resource = list_entry(l, struct daemon_resource, list);
+
+ if ((Type == resource->type) && (resource->connection == CHandle)) {
+ DbgPrint("Found resource=0x%p", resource);
+ l = l->prev;
+ list_del(&resource->list);
+ kfree(resource);
+ break;
+ }
+ }
+
+ write_unlock(&DHandle->lock);
+
+ return (retVal);
+}
+
+int novfs_daemon_lib_open(struct inode *inode, struct file *file)
+{
+ struct daemon_handle *dh;
+
+ DbgPrint("inode=0x%p file=0x%p", inode, file);
+ dh = kmalloc(sizeof(struct daemon_handle), GFP_KERNEL);
+ if (!dh)
+ return -ENOMEM;
+ file->private_data = dh;
+ INIT_LIST_HEAD(&dh->list);
+ rwlock_init(&dh->lock);
+ dh->session = novfs_scope_get_sessionId(NULL);
+ return 0;
+}
+
+int novfs_daemon_lib_close(struct inode *inode, struct file *file)
+{
+ struct daemon_handle *dh;
+ struct daemon_resource *resource;
+ struct list_head *l;
+
+ char commanddata[sizeof(struct novfs_xplat_call_request) + sizeof(struct nwd_close_conn)];
+ struct novfs_xplat_call_request *cmd;
+ struct xplat_call_reply *reply;
+ struct nwd_close_conn *nwdClose;
+ unsigned long cmdlen, replylen;
+
+ DbgPrint("inode=0x%p file=0x%p", inode, file);
+ if (file->private_data) {
+ dh = (struct daemon_handle *)file->private_data;
+
+ list_for_each(l, &dh->list) {
+ resource = list_entry(l, struct daemon_resource, list);
+
+ if (DH_TYPE_STREAM == resource->type) {
+ novfs_close_stream(resource->connection, resource->handle, dh->session);
+ } else if (DH_TYPE_CONNECTION == resource->type) {
+ cmd = (struct novfs_xplat_call_request *)commanddata;
+ cmdlen = offsetof(struct novfs_xplat_call_request, data) + sizeof(struct nwd_close_conn);
+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL;
+ cmd->Command.SequenceNumber = 0;
+ cmd->Command.SessionId = dh->session;
+ cmd->NwcCommand = NWC_CLOSE_CONN;
+
+ cmd->dataLen = sizeof(struct nwd_close_conn);
+ nwdClose = (struct nwd_close_conn *)cmd->data;
+ nwdClose->ConnHandle = (void *)resource->connection;
+
+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, 0);
+ if (reply)
+ kfree(reply);
+ }
+ l = l->prev;
+ list_del(&resource->list);
+ kfree(resource);
+ }
+ kfree(dh);
+ file->private_data = NULL;
+ }
+
+ return (0);
+}
+
+ssize_t novfs_daemon_lib_read(struct file * file, char *buf, size_t len, loff_t * off)
+{
+ struct daemon_handle *dh;
+ struct daemon_resource *resource;
+
+ size_t thisread, totalread = 0;
+ loff_t offset = *off;
+
+ DbgPrint("file=0x%p len=%d off=%lld", file, len, *off);
+
+ if (file->private_data) {
+ dh = file->private_data;
+ read_lock(&dh->lock);
+ if (&dh->list != dh->list.next) {
+ resource = list_entry(dh->list.next, struct daemon_resource, list);
+
+ if (DH_TYPE_STREAM == resource->type) {
+ while (len > 0 && (offset < resource->size)) {
+ thisread = len;
+ if (novfs_read_stream
+ (resource->connection, resource->handle, buf, &thisread, &offset, 1, dh->session)
+ || !thisread) {
+ break;
+ }
+ len -= thisread;
+ buf += thisread;
+ offset += thisread;
+ totalread += thisread;
+ }
+ }
+ }
+ read_unlock(&dh->lock);
+ }
+ *off = offset;
+ DbgPrint("return = 0x%x", totalread);
+ return (totalread);
+}
+
+ssize_t novfs_daemon_lib_write(struct file * file, const char *buf, size_t len, loff_t * off)
+{
+ struct daemon_handle *dh;
+ struct daemon_resource *resource;
+
+ size_t thiswrite, totalwrite = -EINVAL;
+ loff_t offset = *off;
+ int status;
+
+ DbgPrint("file=0x%p len=%d off=%lld", file, len, *off);
+
+ if (file->private_data) {
+ dh = file->private_data;
+ write_lock(&dh->lock);
+ if (&dh->list != dh->list.next) {
+ resource = list_entry(dh->list.next, struct daemon_resource, list);
+
+ if ((DH_TYPE_STREAM == resource->type) && (len >= 0)) {
+ totalwrite = 0;
+ do {
+ thiswrite = len;
+ status =
+ novfs_write_stream(resource->connection,
+ resource->handle, (void *)buf, &thiswrite, &offset, dh->session);
+ if (status || !thiswrite) {
+ /*
+ * If len is zero then the file will have just been
+ * truncated to offset. Update size.
+ */
+ if (!status && !len) {
+ resource->size = offset;
+ }
+ totalwrite = status;
+ break;
+ }
+ len -= thiswrite;
+ buf += thiswrite;
+ offset += thiswrite;
+ totalwrite += thiswrite;
+ if (offset > resource->size) {
+ resource->size = offset;
+ }
+ } while (len > 0);
+ }
+ }
+ write_unlock(&dh->lock);
+ }
+ *off = offset;
+ DbgPrint("return = 0x%x", totalwrite);
+
+ return (totalwrite);
+}
+
+loff_t novfs_daemon_lib_llseek(struct file * file, loff_t offset, int origin)
+{
+ struct daemon_handle *dh;
+ struct daemon_resource *resource;
+
+ loff_t retVal = -EINVAL;
+
+ DbgPrint("file=0x%p offset=%lld origin=%d", file, offset, origin);
+
+ if (file->private_data) {
+ dh = file->private_data;
+ read_lock(&dh->lock);
+ if (&dh->list != dh->list.next) {
+ resource = list_entry(dh->list.next, struct daemon_resource, list);
+
+ if (DH_TYPE_STREAM == resource->type) {
+ switch (origin) {
+ case 2:
+ offset += resource->size;
+ break;
+ case 1:
+ offset += file->f_pos;
+ }
+ if (offset >= 0) {
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+ retVal = offset;
+ }
+ }
+ }
+ read_unlock(&dh->lock);
+ }
+
+ DbgPrint("ret %lld", retVal);
+
+ return retVal;
+}
+
+#define DbgIocCall(str) __DbgPrint("[VFS XPLAT] Call " str "\n")
+
+int novfs_daemon_lib_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int retCode = -ENOSYS;
+ struct daemon_handle *dh;
+ void *handle = NULL;
+ unsigned long cpylen;
+
+ lock_kernel(); /* needed? */
+
+ dh = file->private_data;
+
+ DbgPrint("file=0x%p 0x%x 0x%p dh=0x%p", file, cmd, arg, dh);
+
+ if (dh) {
+
+ switch (cmd) {
+ case IOC_LOGIN:
+ retCode = daemon_login((struct novfs_login *)arg, &dh->session);
+ break;
+
+ case IOC_LOGOUT:
+ retCode = daemon_logout((struct novfs_logout *)arg, &dh->session);
+ break;
+
+ case IOC_DEBUGPRINT:
+ {
+ struct Ioctl_Debug {
+ int length;
+ char *data;
+ } io;
+ char *buf;
+ io.length = 0;
+ cpylen = copy_from_user(&io, (void *)arg, sizeof(io));
+ if (io.length <= 0 || io.length > 1024) {
+ unlock_kernel();
+ return -EINVAL;
+ }
+ if (io.length) {
+ buf = kmalloc(io.length + 1, GFP_KERNEL);
+ if (buf) {
+ buf[0] = 0;
+ cpylen = copy_from_user(buf, io.data, io.length);
+ buf[io.length] = '\0';
+ __DbgPrint("%s", buf);
+ kfree(buf);
+ retCode = 0;
+ }
+ }
+ break;
+ }
+
+ case IOC_XPLAT:
+ {
+ struct novfs_xplat data;
+
+ cpylen = copy_from_user(&data, (void *)arg, sizeof(data));
+ retCode = ((data.xfunction & 0x0000FFFF) | 0xCC000000);
+
+ switch (data.xfunction) {
+ case NWC_OPEN_CONN_BY_NAME:
+ DbgIocCall("NwOpenConnByName");
+ retCode = novfs_open_conn_by_name(&data, &handle, dh->session);
+ if (!retCode)
+ daemon_added_resource(dh, DH_TYPE_CONNECTION, handle, 0, 0, 0);
+ break;
+
+ case NWC_OPEN_CONN_BY_ADDRESS:
+ DbgIocCall("NwOpenConnByAddress");
+ retCode = novfs_open_conn_by_addr(&data, &handle, dh->session);
+ if (!retCode)
+ daemon_added_resource(dh, DH_TYPE_CONNECTION, handle, 0, 0, 0);
+ break;
+
+ case NWC_OPEN_CONN_BY_REFERENCE:
+
+ DbgIocCall("NwOpenConnByReference");
+ retCode = novfs_open_conn_by_ref(&data, &handle, dh->session);
+ if (!retCode)
+ daemon_added_resource(dh, DH_TYPE_CONNECTION, handle, 0, 0, 0);
+ break;
+
+ case NWC_SYS_CLOSE_CONN:
+ DbgIocCall("NwSysCloseConn");
+ retCode = novfs_sys_conn_close(&data, (unsigned long *)&handle, dh->session);
+ daemon_remove_resource(dh, DH_TYPE_CONNECTION, handle, 0);
+ break;
+
+ case NWC_CLOSE_CONN:
+ DbgIocCall("NwCloseConn");
+ retCode = novfs_conn_close(&data, &handle, dh->session);
+ daemon_remove_resource(dh, DH_TYPE_CONNECTION, handle, 0);
+ break;
+
+ case NWC_LOGIN_IDENTITY:
+ DbgIocCall("" "NwLoginIdentity");
+ retCode = novfs_login_id(&data, dh->session);
+ break;
+
+ case NWC_RAW_NCP_REQUEST:
+ DbgIocCall("[VFS XPLAT] Send Raw " "NCP Request");
+ retCode = novfs_raw_send(&data, dh->session);
+ break;
+
+ case NWC_AUTHENTICATE_CONN_WITH_ID:
+ DbgIocCall("[VFS XPLAT] Authenticate " "Conn With ID");
+ retCode = novfs_auth_conn(&data, dh->session);
+ break;
+
+ case NWC_UNAUTHENTICATE_CONN:
+ DbgIocCall("[VFS XPLAT] UnAuthenticate " "Conn With ID");
+ retCode = novfs_unauthenticate(&data, dh->session);
+ break;
+
+ case NWC_LICENSE_CONN:
+ DbgIocCall("Call NwLicenseConn");
+ retCode = novfs_license_conn(&data, dh->session);
+ break;
+
+ case NWC_LOGOUT_IDENTITY:
+ DbgIocCall("NwLogoutIdentity");
+ retCode = novfs_logout_id(&data, dh->session);
+ break;
+
+ case NWC_UNLICENSE_CONN:
+ DbgIocCall("NwUnlicense");
+ retCode = novfs_unlicense_conn(&data, dh->session);
+ break;
+
+ case NWC_GET_CONN_INFO:
+ DbgIocCall("NwGetConnInfo");
+ retCode = novfs_get_conn_info(&data, dh->session);
+ break;
+
+ case NWC_SET_CONN_INFO:
+ DbgIocCall("NwSetConnInfo");
+ retCode = novfs_set_conn_info(&data, dh->session);
+ break;
+
+ case NWC_SCAN_CONN_INFO:
+ DbgIocCall("NwScanConnInfo");
+ retCode = novfs_scan_conn_info(&data, dh->session);
+ break;
+
+ case NWC_GET_IDENTITY_INFO:
+ DbgIocCall("NwGetIdentityInfo");
+ retCode = novfs_get_id_info(&data, dh->session);
+ break;
+
+ case NWC_GET_REQUESTER_VERSION:
+ DbgIocCall("NwGetDaemonVersion");
+ retCode = novfs_get_daemon_ver(&data, dh->session);
+ break;
+
+ case NWC_GET_PREFERRED_DS_TREE:
+ DbgIocCall("NwcGetPreferredDsTree");
+ retCode = novfs_get_preferred_DS_tree(&data, dh->session);
+ break;
+
+ case NWC_SET_PREFERRED_DS_TREE:
+ DbgIocCall("NwcSetPreferredDsTree");
+ retCode = novfs_set_preferred_DS_tree(&data, dh->session);
+ break;
+
+ case NWC_GET_DEFAULT_NAME_CONTEXT:
+ DbgIocCall("NwcGetDefaultNameContext");
+ retCode = novfs_get_default_ctx(&data, dh->session);
+ break;
+
+ case NWC_SET_DEFAULT_NAME_CONTEXT:
+ DbgIocCall("NwcSetDefaultNameContext");
+ retCode = novfs_set_default_ctx(&data, dh->session);
+ break;
+
+ case NWC_QUERY_FEATURE:
+ DbgIocCall("NwQueryFeature");
+ retCode = novfs_query_feature(&data, dh->session);
+ break;
+
+ case NWC_GET_TREE_MONITORED_CONN_REF:
+ DbgIocCall("NwcGetTreeMonitoredConn");
+ retCode = novfs_get_tree_monitored_conn(&data, dh->session);
+ break;
+
+ case NWC_ENUMERATE_IDENTITIES:
+ DbgIocCall("NwcEnumerateIdentities");
+ retCode = novfs_enum_ids(&data, dh->session);
+ break;
+
+ case NWC_CHANGE_KEY:
+ DbgIocCall("NwcChangeAuthKey");
+ retCode = novfs_change_auth_key(&data, dh->session);
+ break;
+
+ case NWC_CONVERT_LOCAL_HANDLE:
+ DbgIocCall("NwdConvertLocalHandle");
+ retCode = NwdConvertLocalHandle(&data, dh);
+ break;
+
+ case NWC_CONVERT_NETWARE_HANDLE:
+ DbgIocCall("NwdConvertNetwareHandle");
+ retCode = NwdConvertNetwareHandle(&data, dh);
+ break;
+
+ case NWC_SET_PRIMARY_CONN:
+ DbgIocCall("NwcSetPrimaryConn");
+ retCode = novfs_set_pri_conn(&data, dh->session);
+ break;
+
+ case NWC_GET_PRIMARY_CONN:
+ DbgIocCall("NwcGetPrimaryConn");
+ retCode = novfs_get_pri_conn(&data, dh->session);
+ break;
+
+ case NWC_MAP_DRIVE:
+ DbgIocCall("NwcMapDrive");
+ retCode = set_map_drive(&data, dh->session);
+ break;
+
+ case NWC_UNMAP_DRIVE:
+ DbgIocCall("NwcUnMapDrive");
+ retCode = unmap_drive(&data, dh->session);
+ break;
+
+ case NWC_ENUMERATE_DRIVES:
+ DbgIocCall("NwcEnumerateDrives");
+ retCode = novfs_enum_drives(&data, dh->session);
+ break;
+
+ case NWC_GET_MOUNT_PATH:
+ DbgIocCall("NwdGetMountPath");
+ retCode = NwdGetMountPath(&data);
+ break;
+
+ case NWC_GET_BROADCAST_MESSAGE:
+ DbgIocCall("NwdGetBroadcastMessage");
+ retCode = novfs_get_bcast_msg(&data, dh->session);
+ break;
+
+ case NWC_SET_KEY:
+ DbgIocCall("NwdSetKey");
+ retCode = novfs_set_key_value(&data, dh->session);
+ break;
+
+ case NWC_VERIFY_KEY:
+ DbgIocCall("NwdVerifyKey");
+ retCode = novfs_verify_key_value(&data, dh->session);
+ break;
+
+ case NWC_RAW_NCP_REQUEST_ALL:
+ case NWC_NDS_RESOLVE_NAME_TO_ID:
+ case NWC_FRAGMENT_REQUEST:
+ case NWC_GET_CONFIGURED_NSPS:
+ default:
+ break;
+
+ }
+
+ DbgPrint("[NOVFS XPLAT] status Code = %X\n", retCode);
+ break;
+ }
+ }
+ }
+
+ unlock_kernel();
+
+ return (retCode);
+}
+
+unsigned int novfs_daemon_poll(struct file *file, struct poll_table_struct *poll_table)
+{
+ struct daemon_cmd *que;
+ unsigned int mask = POLLOUT | POLLWRNORM;
+
+ que = get_next_queue(0);
+ if (que)
+ mask |= (POLLIN | POLLRDNORM);
+ return mask;
+}
+
+static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle)
+{
+ int retVal;
+ struct nwc_convert_netware_handle nh;
+ unsigned long cpylen;
+
+ DbgPrint("DHandle=0x%p", DHandle);
+
+ cpylen = copy_from_user(&nh, pdata->reqData, sizeof(struct nwc_convert_netware_handle));
+
+ retVal =
+ daemon_added_resource(DHandle, DH_TYPE_STREAM,
+ Uint32toHandle(nh.ConnHandle), nh.NetWareHandle, nh.uAccessMode, nh.uFileSize);
+
+ return (retVal);
+}
+
+static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle)
+{
+ int retVal = NWE_REQUESTER_FAILURE;
+ struct daemon_resource *resource;
+ struct nwc_convert_local_handle lh;
+ struct list_head *l;
+ unsigned long cpylen;
+
+ DbgPrint("DHandle=0x%p", DHandle);
+
+ read_lock(&DHandle->lock);
+
+ list_for_each(l, &DHandle->list) {
+ resource = list_entry(l, struct daemon_resource, list);
+
+ if (DH_TYPE_STREAM == resource->type) {
+ lh.uConnReference = HandletoUint32(resource->connection);
+
+//sgled memcpy(lh.NwWareHandle, resource->handle, sizeof(resource->handle));
+ memcpy(lh.NetWareHandle, resource->handle, sizeof(resource->handle)); //sgled
+ if (pdata->repLen >= sizeof(struct nwc_convert_local_handle)) {
+ cpylen = copy_to_user(pdata->repData, &lh, sizeof(struct nwc_convert_local_handle));
+ retVal = 0;
+ } else {
+ retVal = NWE_BUFFER_OVERFLOW;
+ }
+ break;
+ }
+ }
+
+ read_unlock(&DHandle->lock);
+
+ return (retVal);
+}
+
+static int NwdGetMountPath(struct novfs_xplat *pdata)
+{
+ int retVal = NWE_REQUESTER_FAILURE;
+ int len;
+ unsigned long cpylen;
+ struct nwc_get_mount_path mp;
+
+ if (pdata->reqLen != sizeof(mp))
+ return -EINVAL;
+ cpylen = copy_from_user(&mp, pdata->reqData, pdata->reqLen);
+
+ if (novfs_current_mnt) {
+
+ len = strlen(novfs_current_mnt) + 1;
+ if ((len > mp.MountPathLen) && mp.pMountPath) {
+ retVal = NWE_BUFFER_OVERFLOW;
+ } else {
+ if (mp.pMountPath) {
+ cpylen = copy_to_user(mp.pMountPath, novfs_current_mnt, len);
+ }
+ retVal = 0;
+ }
+
+ mp.MountPathLen = len;
+
+ if (pdata->repData && (pdata->repLen >= sizeof(mp))) {
+ cpylen = copy_to_user(pdata->repData, &mp, sizeof(mp));
+ }
+ }
+
+ return (retVal);
+}
+
+static int set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session)
+{
+ int retVal;
+ unsigned long cpylen;
+ struct nwc_map_drive_ex symInfo;
+ char *path;
+ struct drive_map *drivemap, *dm;
+ struct list_head *list;
+
+ retVal = novfs_set_map_drive(pdata, Session);
+ if (retVal)
+ return retVal;
+ if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo)))
+ return -EFAULT;
+ if (symInfo.linkOffsetLength > MAX_NAME_LEN)
+ return -EINVAL;
+ drivemap = kmalloc(sizeof(struct drive_map) + symInfo.linkOffsetLength, GFP_KERNEL);
+ if (!drivemap)
+ return -ENOMEM;
+
+ path = (char *)pdata->reqData;
+ path += symInfo.linkOffset;
+ cpylen = copy_from_user(drivemap->name, path, symInfo.linkOffsetLength);
+
+ drivemap->session = Session;
+ drivemap->hash = full_name_hash(drivemap->name, symInfo.linkOffsetLength - 1);
+ drivemap->namelen = symInfo.linkOffsetLength - 1;
+ DbgPrint("hash=0x%lx path=%s", drivemap->hash, drivemap->name);
+
+ dm = (struct drive_map *)&DriveMapList.next;
+
- down(&DriveMapLock);
++ mutex_lock(&DriveMapLock);
+
+ list_for_each(list, &DriveMapList) {
+ dm = list_entry(list, struct drive_map, list);
+ __DbgPrint("%s: dm=0x%p\n"
+ " hash: 0x%lx\n"
+ " namelen: %d\n" " name: %s\n", __func__, dm, dm->hash, dm->namelen, dm->name);
+
+ if (drivemap->hash == dm->hash) {
+ if (0 == strcmp(dm->name, drivemap->name)) {
+ dm = NULL;
+ break;
+ }
+ } else if (drivemap->hash < dm->hash) {
+ break;
+ }
+ }
+
+ if (dm) {
+ if ((dm == (struct drive_map *)&DriveMapList) || (dm->hash < drivemap->hash)) {
+ list_add(&drivemap->list, &dm->list);
+ } else {
+ list_add_tail(&drivemap->list, &dm->list);
+ }
+ } else
+ kfree(drivemap);
- up(&DriveMapLock);
++ mutex_unlock(&DriveMapLock);
+ return (retVal);
+}
+
+static int unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session)
+{
+ int retVal = NWE_REQUESTER_FAILURE;
+ struct nwc_unmap_drive_ex symInfo;
+ char *path;
+ struct drive_map *dm;
+ struct list_head *list;
+ unsigned long hash;
+
+ retVal = novfs_unmap_drive(pdata, Session);
+ if (retVal)
+ return retVal;
+ if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo)))
+ return -EFAULT;
+ if (symInfo.linkLen > MAX_NAME_LEN || symInfo.linkLen == 0)
+ return -EINVAL;
+ path = kmalloc(symInfo.linkLen, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+ if (copy_from_user(path, ((struct nwc_unmap_drive_ex *)pdata->reqData)->linkData, symInfo.linkLen)) {
+ kfree(path);
+ return -EFAULT;
+ }
+
+ hash = full_name_hash(path, symInfo.linkLen - 1);
+ DbgPrint("hash=0x%x path=%s", hash, path);
+
+ dm = NULL;
+
- down(&DriveMapLock);
++ mutex_lock(&DriveMapLock);
+
+ list_for_each(list, &DriveMapList) {
+ dm = list_entry(list, struct drive_map, list);
+ __DbgPrint("%s: dm=0x%p %s\n"
+ " hash: 0x%x\n" " namelen: %d\n", __func__, dm, dm->name, dm->hash, dm->namelen);
+
+ if (hash == dm->hash) {
+ if (0 == strcmp(dm->name, path)) {
+ break;
+ }
+ } else if (hash < dm->hash) {
+ dm = NULL;
+ break;
+ }
+ }
+
+ if (dm) {
+ __DbgPrint("%s: Remove dm=0x%p %s\n"
+ " hash: 0x%x\n" " namelen: %d\n", __func__, dm, dm->name, dm->hash, dm->namelen);
+ list_del(&dm->list);
+ kfree(dm);
+ }
+
- up(&DriveMapLock);
++ mutex_unlock(&DriveMapLock);
+ return (retVal);
+}
+
+static void RemoveDriveMaps(void)
+{
+ struct drive_map *dm;
+ struct list_head *list;
+
- down(&DriveMapLock);
++ mutex_lock(&DriveMapLock);
+ list_for_each(list, &DriveMapList) {
+ dm = list_entry(list, struct drive_map, list);
+
+ __DbgPrint("%s: dm=0x%p\n"
+ " hash: 0x%x\n"
+ " namelen: %d\n" " name: %s\n", __func__, dm, dm->hash, dm->namelen, dm->name);
+ local_unlink(dm->name);
+ list = list->prev;
+ list_del(&dm->list);
+ kfree(dm);
+ }
- up(&DriveMapLock);
++ mutex_unlock(&DriveMapLock);
+}
+
+/* As picked from do_unlinkat() */
+
+static long local_unlink(const char *pathname)
+{
+ int error;
+ struct dentry *dentry;
+ char *name, *c;
+ struct nameidata nd;
+ struct inode *inode = NULL;
+
+ error = path_lookup(pathname, LOOKUP_PARENT, &nd);
+ DbgPrint("path_lookup %s error: %d\n", pathname, error);
+ if (error)
+ return error;
+
+ error = -EISDIR;
+ if (nd.last_type != LAST_NORM)
+ goto exit1;
+ mutex_lock(&nd.path.dentry->d_inode->i_mutex);
+ /* Get the filename of pathname */
+ name = c = (char *)pathname;
+ while (*c != '\0') {
+ if (*c == '/')
+ name = ++c;
+ else
+ c++;
+ }
+ dentry = lookup_one_len(name, nd.path.dentry, strlen(name));
+ error = PTR_ERR(dentry);
+
+ if (!IS_ERR(dentry)) {
+ DbgPrint("dentry %p", dentry);
+ if (!(dentry->d_inode) || !(dentry->d_inode->i_mode & S_IFLNK)) {
+ DbgPrint("%s not a link", name);
+ error = -ENOENT;
+ goto exit1;
+ }
+ /* Why not before? Because we want correct error value */
+ if (nd.last.name[nd.last.len])
+ goto slashes;
+ inode = dentry->d_inode;
+ if (inode)
+ atomic_inc(&inode->i_count);
+ error = mnt_want_write(nd.path.mnt);
+ DbgPrint("inode %p mnt_want_write error %d", inode, error);
+ if (error)
+ goto exit2;
+ error = vfs_unlink(nd.path.dentry->d_inode, dentry);
+ mnt_drop_write(nd.path.mnt);
+exit2:
+ dput(dentry);
+ }
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ if (inode)
+ iput(inode); /* truncate the inode here */
+exit1:
+ path_put(&nd.path);
+ DbgPrint("returning error %d", error);
+ return error;
+
+slashes:
+ error = !dentry->d_inode ? -ENOENT : S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
+ goto exit2;
+}
--- /dev/null
+/*
+ * Novell NCP Redirector for Linux
+ * Author: James Turner
+ *
+ * This file contains functions used to control access to the Linux file
+ * system.
+ *
+ * Copyright (C) 2005 Novell, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/unistd.h>
+#include <asm/statfs.h>
+#include <asm/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/statfs.h>
+#include <linux/pagevec.h>
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+
+/*===[ Include files specific to this module ]============================*/
+#include "vfs.h"
+
+struct inode_data {
+ void *Scope;
+ unsigned long Flags;
+ struct list_head IList;
+ struct inode *Inode;
+ unsigned long cntDC;
+ struct list_head DirCache;
- struct semaphore DirCacheLock;
++ struct mutex DirCacheLock;
+ void *FileHandle;
+ int CacheFlag;
+ char Name[1]; /* Needs to be last entry */
+};
+
+#define FILE_UPDATE_TIMEOUT 2
+
+/*===[ Function prototypes ]=============================================*/
+
+static unsigned long novfs_internal_hash(struct qstr *name);
+static int novfs_d_add(struct dentry *p, struct dentry *d, struct inode *i, int add);
+
+static int novfs_get_sb(struct file_system_type *Fstype, int Flags, const char *Dev_name, void *Data, struct vfsmount *Mnt);
+
+static void novfs_kill_sb(struct super_block *SB);
+
+/*
+ * Declared dentry_operations
+ */
+int novfs_d_revalidate(struct dentry *, struct nameidata *);
+int novfs_d_hash(struct dentry *, struct qstr *);
+int novfs_d_compare(struct dentry *, struct qstr *, struct qstr *);
+int novfs_d_delete(struct dentry *dentry);
+void novfs_d_release(struct dentry *dentry);
+void novfs_d_iput(struct dentry *dentry, struct inode *inode);
+
+/*
+ * Declared directory operations
+ */
+int novfs_dir_open(struct inode *inode, struct file *file);
+int novfs_dir_release(struct inode *inode, struct file *file);
+loff_t novfs_dir_lseek(struct file *file, loff_t offset, int origin);
+ssize_t novfs_dir_read(struct file *file, char *buf, size_t len, loff_t * off);
+void addtodentry(struct dentry *Parent, unsigned char *List, int Level);
+int novfs_filldir(void *data, const char *name, int namelen, loff_t off, ino_t ino, unsigned ftype);
+int novfs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir);
+int novfs_dir_fsync(struct file *file, int datasync);
+
+/*
+ * Declared address space operations
+ */
+int novfs_a_writepage(struct page *page, struct writeback_control *wbc);
+int novfs_a_writepages(struct address_space *mapping, struct writeback_control *wbc);
+int novfs_a_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata);
+int novfs_a_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied, struct page *pagep, void *fsdata);
+int novfs_a_readpage(struct file *file, struct page *page);
+int novfs_a_readpages(struct file *file, struct address_space *mapping, struct list_head *page_lst, unsigned nr_pages);
+ssize_t novfs_a_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs);
+
+/*
+ * Declared file_operations
+ */
+ssize_t novfs_f_read(struct file *, char *, size_t, loff_t *);
+ssize_t novfs_f_write(struct file *, const char *, size_t, loff_t *);
+int novfs_f_readdir(struct file *, void *, filldir_t);
+int novfs_f_mmap(struct file *file, struct vm_area_struct *vma);
+int novfs_f_open(struct inode *, struct file *);
+int novfs_f_flush(struct file *, fl_owner_t);
+int novfs_f_release(struct inode *, struct file *);
+int novfs_f_fsync(struct file *, int datasync);
+int novfs_f_lock(struct file *, int, struct file_lock *);
+
+/*
+ * Declared inode_operations
+ */
+int novfs_i_create(struct inode *, struct dentry *, int, struct nameidata *);
+struct dentry *novfs_i_lookup(struct inode *, struct dentry *, struct nameidata *);
+int novfs_i_mkdir(struct inode *, struct dentry *, int);
+int novfs_i_unlink(struct inode *dir, struct dentry *dentry);
+int novfs_i_rmdir(struct inode *, struct dentry *);
+int novfs_i_mknod(struct inode *, struct dentry *, int, dev_t);
+int novfs_i_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+int novfs_i_setattr(struct dentry *, struct iattr *);
+int novfs_i_getattr(struct vfsmount *mnt, struct dentry *, struct kstat *);
+int novfs_i_revalidate(struct dentry *dentry);
+
+/*
+ * Extended attributes operations
+ */
+
+ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
+int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, size_t value_size, int flags);
+ssize_t novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
+
+void update_inode(struct inode *Inode, struct novfs_entry_info *Info);
+
+/*
+ * Declared super_operations
+ */
+void novfs_read_inode(struct inode *inode);
+void novfs_write_inode(struct inode *inode);
+int novfs_notify_change(struct dentry *dentry, struct iattr *attr);
+void novfs_evict_inode(struct inode *inode);
+int novfs_show_options(struct seq_file *s, struct vfsmount *m);
+
+int novfs_statfs(struct dentry *de, struct kstatfs *buf);
+
+/*
+ * Declared control interface functions
+ */
+ssize_t novfs_control_Read(struct file *file, char *buf, size_t nbytes, loff_t * ppos);
+
+ssize_t novfs_control_write(struct file *file, const char *buf, size_t nbytes, loff_t * ppos);
+
+int __init init_novfs(void);
+void __exit exit_novfs(void);
+
+int novfs_lock_inode_cache(struct inode *i);
+void novfs_unlock_inode_cache(struct inode *i);
+int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, ino_t * ino, struct novfs_entry_info *info);
+int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info);
+int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, struct novfs_entry_info *info);
+int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info, u64 * EntryTime);
+int novfs_get_remove_entry(struct inode *i, ino_t * ino, struct novfs_entry_info *info);
+void novfs_invalidate_inode_cache(struct inode *i);
+struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, ino_t ino);
+int novfs_lookup_validate(struct inode *i, struct qstr *name, ino_t ino);
+int novfs_add_inode_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info);
+int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info);
+void novfs_remove_inode_entry(struct inode *i, struct qstr *name, ino_t ino);
+void novfs_free_invalid_entries(struct inode *i);
+void novfs_free_inode_cache(struct inode *i);
+
+/*===[ Global variables ]=================================================*/
+struct dentry_operations novfs_dentry_operations = {
+ .d_revalidate = novfs_d_revalidate,
+ .d_hash = novfs_d_hash,
+ .d_compare = novfs_d_compare,
+ //.d_delete = novfs_d_delete,
+ .d_release = novfs_d_release,
+ .d_iput = novfs_d_iput,
+};
+
+struct file_operations novfs_dir_operations = {
+ .owner = THIS_MODULE,
+ .open = novfs_dir_open,
+ .release = novfs_dir_release,
+ .llseek = novfs_dir_lseek,
+ .read = novfs_dir_read,
+ .readdir = novfs_dir_readdir,
+ .fsync = novfs_dir_fsync,
+};
+
+static struct file_operations novfs_file_operations = {
+ .owner = THIS_MODULE,
+ .read = novfs_f_read,
+ .write = novfs_f_write,
+ .readdir = novfs_f_readdir,
+ .mmap = novfs_f_mmap,
+ .open = novfs_f_open,
+ .flush = novfs_f_flush,
+ .release = novfs_f_release,
+ .fsync = novfs_f_fsync,
+ .llseek = generic_file_llseek,
+ .lock = novfs_f_lock,
+};
+
+static struct address_space_operations novfs_nocache_aops = {
+ .readpage = novfs_a_readpage,
+};
+
+struct backing_dev_info novfs_backing_dev_info = {
+ .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
+ .state = 0,
+ .capabilities = BDI_CAP_NO_WRITEBACK | BDI_CAP_MAP_COPY,
+ .unplug_io_fn = default_unplug_io_fn,
+};
+
+static struct address_space_operations novfs_aops = {
+ .readpage = novfs_a_readpage,
+ .readpages = novfs_a_readpages,
+ .writepage = novfs_a_writepage,
+ .writepages = novfs_a_writepages,
+ .write_begin = novfs_a_write_begin,
+ .write_end = novfs_a_write_end,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ .direct_IO = novfs_a_direct_IO,
+};
+
+static struct inode_operations novfs_inode_operations = {
+ .create = novfs_i_create,
+ .lookup = novfs_i_lookup,
+ .unlink = novfs_i_unlink,
+ .mkdir = novfs_i_mkdir,
+ .rmdir = novfs_i_rmdir,
+ .mknod = novfs_i_mknod,
+ .rename = novfs_i_rename,
+ .setattr = novfs_i_setattr,
+ .getattr = novfs_i_getattr,
+ .getxattr = novfs_i_getxattr,
+ .setxattr = novfs_i_setxattr,
+ .listxattr = novfs_i_listxattr,
+};
+
+static struct inode_operations novfs_file_inode_operations = {
+ .setattr = novfs_i_setattr,
+ .getattr = novfs_i_getattr,
+ .getxattr = novfs_i_getxattr,
+ .setxattr = novfs_i_setxattr,
+ .listxattr = novfs_i_listxattr,
+};
+
+static struct super_operations novfs_ops = {
+ .statfs = novfs_statfs,
+ .evict_inode = novfs_evict_inode,
+ .drop_inode = generic_delete_inode,
+ .show_options = novfs_show_options,
+
+};
+
+/* Not currently used
+static struct file_operations novfs_Control_operations = {
+ .read = novfs_Control_read,
+ .write = novfs_Control_write,
+};
+*/
+
+static atomic_t novfs_Inode_Number = ATOMIC_INIT(0);
+
+struct dentry *novfs_root = NULL;
+char *novfs_current_mnt = NULL;
+
- DECLARE_MUTEX(InodeList_lock);
++DEFINE_MUTEX(InodeList_lock);
+
+LIST_HEAD(InodeList);
+
- DECLARE_MUTEX(TimeDir_Lock);
++DEFINE_MUTEX(TimeDir_Lock);
+uint64_t lastTime;
+char lastDir[PATH_MAX];
+
+uint64_t inHAXTime;
+int inHAX;
+
+unsigned long InodeCount = 0, DCCount = 0;
+unsigned long novfs_update_timeout = FILE_UPDATE_TIMEOUT;
+int novfs_page_cache = 0;
+
+struct file_private {
+ int listedall;
+ void *enumHandle;
+};
+
+static void PRINT_DENTRY(const char *s, struct dentry *d)
+{
+ __DbgPrint("%s: 0x%p\n", s, d);
+ __DbgPrint(" d_count: 0x%x\n", d->d_count);
+ __DbgPrint(" d_lock: 0x%x\n", d->d_lock);
+ __DbgPrint(" d_inode: 0x%x\n", d->d_inode);
+ __DbgPrint(" d_lru: 0x%p\n"
+ " next: 0x%p\n" " prev: 0x%p\n", &d->d_lru, d->d_lru.next, d->d_lru.prev);
+ __DbgPrint(" d_child: 0x%p\n" " next: 0x%p\n"
+ " prev: 0x%p\n", &d->d_u.d_child, d->d_u.d_child.next, d->d_u.d_child.prev);
+ __DbgPrint(" d_subdirs: 0x%p\n" " next: 0x%p\n"
+ " prev: 0x%p\n", &d->d_subdirs, d->d_subdirs.next, d->d_subdirs.prev);
+ __DbgPrint(" d_alias: 0x%p\n" " next: 0x%p\n"
+ " prev: 0x%p\n", &d->d_alias, d->d_alias.next, d->d_alias.prev);
+ __DbgPrint(" d_time: 0x%x\n", d->d_time);
+ __DbgPrint(" d_op: 0x%p\n", d->d_op);
+ __DbgPrint(" d_sb: 0x%p\n", d->d_sb);
+ __DbgPrint(" d_flags: 0x%x\n", d->d_flags);
+ __DbgPrint(" d_mounted: 0x%x\n", d->d_mounted);
+ __DbgPrint(" d_fsdata: 0x%p\n", d->d_fsdata);
+/* DbgPrint(" d_cookie: 0x%x\n", d->d_cookie); */
+ __DbgPrint(" d_parent: 0x%p\n", d->d_parent);
+ __DbgPrint(" d_name: 0x%p %.*s\n", &d->d_name, d->d_name.len, d->d_name.name);
+ __DbgPrint(" name: 0x%p\n" " len: %d\n"
+ " hash: 0x%x\n", d->d_name.name, d->d_name.len, d->d_name.hash);
+ __DbgPrint(" d_hash: 0x%x\n" " next: 0x%x\n"
+ " pprev: 0x%x\n", d->d_hash, d->d_hash.next, d->d_hash.pprev);
+}
+
+/*++======================================================================*/
+int novfs_remove_from_root(char *RemoveName)
+{
+ struct qstr name;
+ struct dentry *dentry;
+ struct inode *dir;
+
+ DbgPrint("%s", RemoveName);
+ name.len = strlen(RemoveName);
+ name.name = RemoveName;
+ novfs_d_hash(novfs_root, &name);
+
+ dentry = d_lookup(novfs_root, &name);
+ if (dentry) {
+ if (dentry->d_inode && dentry->d_inode->i_private) {
+ struct inode_data *n_inode = dentry->d_inode->i_private;
+ n_inode->Scope = NULL;
+ }
+ dput(dentry);
+ }
+
+ dir = novfs_root->d_inode;
+
+ novfs_lock_inode_cache(dir);
+ novfs_remove_inode_entry(dir, &name, 0);
+ novfs_unlock_inode_cache(dir);
+
+ return (0);
+}
+
+/*++======================================================================*/
+int novfs_add_to_root(char *AddName)
+{
+ struct qstr name;
+ struct inode *dir;
+ struct novfs_entry_info info;
+ ino_t ino;
+
+ DbgPrint("%s", AddName);
+ name.len = strlen(AddName);
+ name.name = AddName;
+ novfs_d_hash(novfs_root, &name);
+
+ dir = novfs_root->d_inode;
+
+ novfs_lock_inode_cache(dir);
+
+ ino = 0;
+
+ if (!novfs_lookup_inode_cache(dir, &name, 0)) {
+ info.mode = S_IFDIR | 0700;
+ info.size = 0;
+ info.atime = info.ctime = info.mtime = CURRENT_TIME;
+
+ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number);
+ novfs_add_inode_entry(dir, &name, ino, &info);
+ }
+
+ novfs_unlock_inode_cache(dir);
+
+ return (0);
+}
+
+/*++======================================================================*/
+int novfs_Add_to_Root2(char *AddName)
+{
+ struct dentry *entry;
+ struct qstr name;
+ struct inode *inode;
+ void *scope;
+
+ DbgPrint("%s", AddName);
+ name.len = strlen(AddName);
+ name.name = AddName;
+
+ novfs_d_hash(novfs_root, &name);
+
+ entry = d_lookup(novfs_root, &name);
+ DbgPrint("novfs_d_lookup 0x%p", entry);
+ if (NULL == entry) {
+ scope = novfs_scope_lookup();
+
+ entry = d_alloc(novfs_root, &name);
+ DbgPrint("d_alloc 0x%p", entry);
+ if (entry) {
+ entry->d_op = &novfs_dentry_operations;
+ entry->d_time = jiffies + (novfs_update_timeout * HZ);
+ /*
+ * done in novfs_d_add now... entry->d_fsdata = (void *)novfs_internal_hash( &name );
+ */
+ inode = novfs_get_inode(novfs_root->d_sb, S_IFDIR | 0700, 0, novfs_scope_get_uid(scope), 0, &name);
+ DbgPrint("Inode=0x%p", inode);
+ if (inode) {
+ inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+ if (!novfs_d_add(novfs_root, entry, inode, 1)) {
+ if (inode->i_private) {
+ struct inode_data *n_inode = inode->i_private;
+ n_inode->Flags = USER_INODE;
+ }
+ PRINT_DENTRY("After novfs_d_add", entry);
+ } else {
+ dput(entry);
+ iput(inode);
+ }
+ }
+ }
+ } else {
+ dput(entry);
+ PRINT_DENTRY("novfs_Add_to_Root: After dput Dentry", entry);
+ }
+ return (0);
+}
+
+char *novfs_dget_path(struct dentry *Dentry, char *Buf, unsigned int Buflen)
+{
+ char *retval = &Buf[Buflen];
+ struct dentry *p = Dentry;
+
+ *(--retval) = '\0';
+ Buflen--;
+
+ if (!IS_ROOT(p) && !IS_ROOT(p->d_parent)) {
+ while (Buflen && !IS_ROOT(p) && !IS_ROOT(p->d_parent)) {
+ if (Buflen > p->d_name.len) {
+ retval -= p->d_name.len;
+ Buflen -= p->d_name.len;
+ memcpy(retval, p->d_name.name, p->d_name.len);
+ *(--retval) = '\\';
+ Buflen--;
+ p = p->d_parent;
+ } else {
+ retval = NULL;
+ break;
+ }
+ }
+ } else {
+ *(--retval) = '\\';
+ }
+
+ if (retval)
+ DbgPrint("%s", retval);
+ return (retval);
+}
+
+int verify_dentry(struct dentry *dentry, int Flags)
+{
+ int retVal = -ENOENT;
+ struct inode *dir;
+ struct novfs_entry_info *info = NULL;
+ struct inode_data *id;
+ struct novfs_schandle session;
+ char *path, *list = NULL, *cp;
+ ino_t ino = 0;
+ struct qstr name;
+ int iLock = 0;
+ struct dentry *parent = NULL;
+ u64 ctime;
+ struct inode *inode;
+
+ if (IS_ROOT(dentry)) {
+ DbgPrint("Root entry");
+ return (0);
+ }
+
+ if (dentry && dentry->d_parent && (dir = dentry->d_parent->d_inode) && (id = dir->i_private)) {
+ parent = dget_parent(dentry);
+
+ info = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL);
+
+ if (info) {
+ if (novfs_lock_inode_cache(dir)) {
+ name.len = dentry->d_name.len;
+ name.name = dentry->d_name.name;
+ name.hash = novfs_internal_hash(&name);
+ if (!novfs_get_entry_time(dir, &name, &ino, info, &ctime)) {
+ inode = dentry->d_inode;
+ if (inode && inode->i_private &&
+ ((inode->i_size != info->size) || (inode->i_mtime.tv_sec != info->mtime.tv_sec)
+ || (inode->i_mtime.tv_nsec != info->mtime.tv_nsec))) {
+ /*
+ * Values don't match so update.
+ */
+ struct inode_data *n_inode = inode->i_private;
+ n_inode->Flags |= UPDATE_INODE;
+ }
+
+ ctime = get_jiffies_64() - ctime;
+ if (Flags || ctime < (u64) (novfs_update_timeout * HZ)) {
+ retVal = 0;
+ novfs_unlock_inode_cache(dir);
+ dput(parent);
+ kfree(info);
+ return (0);
+ }
+ }
+ novfs_unlock_inode_cache(dir);
+ }
+
+ if (IS_ROOT(dentry->d_parent)) {
+ session = novfs_scope_get_sessionId(novfs_get_scope_from_name(&dentry->d_name));
+ } else
+ session = novfs_scope_get_sessionId(id->Scope);
+
+ if (!SC_PRESENT(session)) {
+ id->Scope = novfs_get_scope(dentry);
+ session = novfs_scope_get_sessionId(id->Scope);
+ }
+
+ ino = 0;
+ retVal = 0;
+
+ if (IS_ROOT(dentry->d_parent)) {
+ DbgPrint("parent is Root directory");
+ list = novfs_get_scopeusers();
+
+ iLock = novfs_lock_inode_cache(dir);
+ novfs_invalidate_inode_cache(dir);
+
+ if (list) {
+ cp = list;
+ while (*cp) {
+ name.name = cp;
+ name.len = strlen(cp);
+ name.hash = novfs_internal_hash(&name);
+ cp += (name.len + 1);
+ ino = 0;
+ if (novfs_get_entry(dir, &name, &ino, info)) {
+ info->mode = S_IFDIR | 0700;
+ info->size = 0;
+ info->atime = info->ctime = info->mtime = CURRENT_TIME;
+ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number);
+ novfs_add_inode_entry(dir, &name, ino, info);
+ }
+ }
+ }
+ novfs_free_invalid_entries(dir);
+ } else {
+
+ path = novfs_dget_path(dentry, info->name, PATH_LENGTH_BUFFER);
+ if (path) {
+ if (dentry->d_name.len <= NW_MAX_PATH_LENGTH) {
+ name.hash = novfs_internal_hash(&dentry->d_name);
+ name.len = dentry->d_name.len;
+ name.name = dentry->d_name.name;
+
+ retVal = novfs_get_file_info(path, info, session);
+ if (0 == retVal) {
+ dentry->d_time = jiffies + (novfs_update_timeout * HZ);
+ iLock = novfs_lock_inode_cache(dir);
+ if (novfs_update_entry(dir, &name, 0, info)) {
+ if (dentry->d_inode) {
+ ino = dentry->d_inode->i_ino;
+ } else {
+ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number);
+ }
+ novfs_add_inode_entry(dir, &name, ino, info);
+ }
+ if (dentry->d_inode) {
+ update_inode(dentry->d_inode, info);
+ id->Flags &= ~UPDATE_INODE;
+
+ dentry->d_inode->i_flags &= ~S_DEAD;
+ if (dentry->d_inode->i_private) {
+ ((struct inode_data *)dentry->d_inode->i_private)->Scope =
+ id->Scope;
+ }
+ }
+ } else if (-EINTR != retVal) {
+ retVal = 0;
+ iLock = novfs_lock_inode_cache(dir);
+ novfs_remove_inode_entry(dir, &name, 0);
+ if (dentry->d_inode && !(dentry->d_inode->i_flags & S_DEAD)) {
+ dentry->d_inode->i_flags |= S_DEAD;
+ dentry->d_inode->i_size = 0;
+ dentry->d_inode->i_atime.tv_sec =
+ dentry->d_inode->i_atime.tv_nsec =
+ dentry->d_inode->i_ctime.tv_sec =
+ dentry->d_inode->i_ctime.tv_nsec =
+ dentry->d_inode->i_mtime.tv_sec =
+ dentry->d_inode->i_mtime.tv_nsec = 0;
+ dentry->d_inode->i_blocks = 0;
+ d_delete(dentry); /* Remove from cache */
+ }
+ }
+ } else {
+ retVal = -ENAMETOOLONG;
+ }
+ }
+ }
+ } else {
+ retVal = -ENOMEM;
+ }
+ if (iLock) {
+ novfs_unlock_inode_cache(dir);
+ }
+ dput(parent);
+ }
+
+ if (list)
+ kfree(list);
+ if (info)
+ kfree(info);
+
+ DbgPrint("return=0x%x", retVal);
+
+ return (retVal);
+}
+
+static int novfs_d_add(struct dentry *Parent, struct dentry *d, struct inode *i, int a)
+{
+ void *scope;
+ struct inode_data *id = NULL;
+
+ char *path, *buf;
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_dget_path(d, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ DbgPrint("inode=0x%p ino=%d path %s", i, i->i_ino, path);
+ }
+ kfree(buf);
+ }
+
+ if (Parent && Parent->d_inode && Parent->d_inode->i_private) {
+ id = (struct inode_data *)Parent->d_inode->i_private;
+ }
+
+ if (id && id->Scope) {
+ scope = id->Scope;
+ } else {
+ scope = novfs_get_scope(d);
+ }
+
+ ((struct inode_data *)i->i_private)->Scope = scope;
+
+ d->d_time = jiffies + (novfs_update_timeout * HZ);
+ if (a) {
+ d_add(d, i);
+ } else {
+ d_instantiate(d, i);
+ }
+
+ return (0);
+}
+
+int novfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ int retCode = 0;
+ struct inode *dir;
+ struct inode_data *id;
+ struct qstr name;
+
+ __DbgPrint("%s: 0x%p %.*s\n"
+ " d_count: %d\n"
+ " d_inode: 0x%p\n", __func__,
+ dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_count, dentry->d_inode);
+
+ if (IS_ROOT(dentry)) {
+ retCode = 1;
+ } else {
+ if (dentry->d_inode && dentry->d_parent && (dir = dentry->d_parent->d_inode) && (id = dir->i_private)) {
+ /*
+ * Check timer to see if in valid time limit
+ */
+ if (jiffies > dentry->d_time) {
+ /*
+ * Revalidate entry
+ */
+ name.len = dentry->d_name.len;
+ name.name = dentry->d_name.name;
+ name.hash = novfs_internal_hash(&dentry->d_name);
+ dentry->d_time = 0;
+
+ if (0 == verify_dentry(dentry, 0)) {
+ if (novfs_lock_inode_cache(dir)) {
+ if (novfs_lookup_inode_cache(dir, &name, 0)) {
+ dentry->d_time = jiffies + (novfs_update_timeout * HZ);
+ retCode = 1;
+ }
+ novfs_unlock_inode_cache(dir);
+ }
+ }
+ } else {
+ retCode = 1;
+ }
+ }
+ }
+
+ if ((0 == retCode) && dentry->d_inode) {
+ /*
+ * Entry has become invalid
+ */
+/* dput(dentry);
+*/
+ }
+
+ DbgPrint("return 0x%x %.*s", retCode, dentry->d_name.len, dentry->d_name.name);
+
+ return (retCode);
+}
+
+static unsigned long novfs_internal_hash(struct qstr *name)
+{
+ unsigned long hash = 0;
+ unsigned int len = name->len;
+ unsigned char *c = (unsigned char *)name->name;
+
+ while (len--) {
+ /*
+ * Lower case values for the hash.
+ */
+ hash = partial_name_hash(tolower(*c++), hash);
+ }
+
+ return (hash);
+}
+
+int novfs_d_hash(struct dentry *dentry, struct qstr *name)
+{
+ DbgPrint("%.*s", name->len, name->name);
+
+ name->hash = novfs_internal_hash(name);
+
+ return (0);
+}
+
+int novfs_d_strcmp(struct qstr *s1, struct qstr *s2)
+{
+ int retCode = 1;
+ unsigned char *str1, *str2;
+ unsigned int len;
+
+ DbgPrint("s1=%.*s s2=%.*s", s1->len, s1->name, s2->len, s2->name);
+
+ if (s1->len && (s1->len == s2->len) && (s1->hash == s2->hash)) {
+ len = s1->len;
+ str1 = (unsigned char *)s1->name;
+ str2 = (unsigned char *)s2->name;
+ for (retCode = 0; len--; str1++, str2++) {
+ if (*str1 != *str2) {
+ if (tolower(*str1) != tolower(*str2)) {
+ retCode = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ DbgPrint("retCode=0x%x", retCode);
+ return (retCode);
+}
+
+int novfs_d_compare(struct dentry *parent, struct qstr *s1, struct qstr *s2)
+{
+ int retCode;
+
+ retCode = novfs_d_strcmp(s1, s2);
+
+ DbgPrint("retCode=0x%x", retCode);
+ return (retCode);
+}
+
+int novfs_d_delete(struct dentry *dentry)
+{
+ int retVal = 0;
+
+ DbgPrint("0x%p %.*s; d_count: %d; d_inode: 0x%p",
+ dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_count, dentry->d_inode);
+
+ if (dentry->d_inode && (dentry->d_inode->i_flags & S_DEAD)) {
+ retVal = 1;
+ }
+
+ dentry->d_time = 0;
+
+ return (retVal);
+}
+
+void novfs_d_release(struct dentry *dentry)
+{
+ DbgPrint("0x%p %.*s", dentry, dentry->d_name.len, dentry->d_name.name);
+}
+
+void novfs_d_iput(struct dentry *dentry, struct inode *inode)
+{
+ DbgPrint("Inode=0x%p Ino=%d Dentry=0x%p i_state=%d Name=%.*s",
+ inode, inode->i_ino, dentry, inode->i_state, dentry->d_name.len, dentry->d_name.name);
+
+ iput(inode);
+
+}
+
+int novfs_dir_open(struct inode *dir, struct file *file)
+{
+ char *path, *buf;
+ struct file_private *file_private = NULL;
+
+ DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_dget_path(file->f_dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ DbgPrint("path %s", path);
+ }
+ kfree(buf);
+ }
+
+ file_private = kmalloc(sizeof(struct file_private), GFP_KERNEL);
+ file_private->listedall = 0;
+ file_private->enumHandle = NULL;
+
+ file->private_data = file_private;
+
+ return (0);
+}
+
+int novfs_dir_release(struct inode *dir, struct file *file)
+{
+ struct file_private *file_private = file->private_data;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct novfs_schandle sessionId;
+
+ DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+
+ if (file_private) {
+ if (file_private->enumHandle && (file_private->enumHandle != ((void *)-1))) {
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ if (SC_PRESENT(sessionId) == 0) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry);
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ }
+ novfs_end_directory_enumerate(file_private->enumHandle, sessionId);
+ }
+ kfree(file_private);
+ file->private_data = NULL;
+ }
+
+ return (0);
+}
+
+loff_t novfs_dir_lseek(struct file * file, loff_t offset, int origin)
+{
+ struct file_private *file_private = NULL;
+
+ DbgPrint("offset %lld %d Name %.*s", offset, origin, file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+ //printk("<1> seekdir file = %.*s offset = %i\n", file->f_dentry->d_name.len, file->f_dentry->d_name.name, (int)offset);
+
+ if (0 != offset) {
+ return -ESPIPE;
+ }
+
+ file->f_pos = 0;
+
+ file_private = (struct file_private *)file->private_data;
+ file_private->listedall = 0;
+ if (file_private->enumHandle && (file_private->enumHandle != ((void *)-1))) {
+ struct novfs_schandle sessionId;
+ struct inode *inode = file->f_dentry->d_inode;
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ if (SC_PRESENT(sessionId) == 0) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry);
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ }
+ novfs_end_directory_enumerate(file_private->enumHandle, sessionId);
+ }
+ file_private->enumHandle = NULL;
+
+ return 0;
+ //return(default_llseek(file, offset, origin));
+}
+
+ssize_t novfs_dir_read(struct file * file, char *buf, size_t len, loff_t * off)
+{
+/*
+ int rlen = 0;
+
+ DbgPrint("dentry path %.*s buf=0x%p len=%d off=%lld", file->f_dentry->d_name.len, file->f_dentry->d_name.name, buf, len, *off);
+
+ if (0 == *off)
+ {
+ rlen = 8;
+ rlen -= copy_to_user(buf, "Testing\n", 8);
+ *off += rlen;
+ }
+ return(rlen);
+*/
+ DbgPrint("%lld %d Name %.*s", *off, len, file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+ return (generic_read_dir(file, buf, len, off));
+}
+
+static void novfs_Dump_Info(struct novfs_entry_info *info)
+{
+ char atime_buf[32], mtime_buf[32], ctime_buf[32];
+ char namebuf[512];
+ int len = 0;
+
+ if (info == NULL) {
+ DbgPrint("Dump_Info info == NULL");
+ return;
+ }
+
+ if (info->namelength >= 512) {
+ len = 511;
+ } else {
+ len = info->namelength;
+ }
+
+ memcpy(namebuf, info->name, len);
+ namebuf[len] = '\0';
+
+ ctime_r(&info->atime.tv_sec, atime_buf);
+ ctime_r(&info->mtime.tv_sec, mtime_buf);
+ ctime_r(&info->ctime.tv_sec, ctime_buf);
+ DbgPrint("type = %i", info->type);
+ DbgPrint("mode = %x", info->mode);
+ DbgPrint("uid = %d", info->uid);
+ DbgPrint("gid = %d", info->gid);
+ DbgPrint("size = %i", info->size);
+ DbgPrint("atime = %s", atime_buf);
+ DbgPrint("mtime = %s", mtime_buf);
+ DbgPrint("ctime = %s", ctime_buf);
+ DbgPrint("namelength = %i", info->namelength);
+ DbgPrint("name = %s", namebuf);
+}
+
+void processList(struct file *file, void *dirent, filldir_t filldir, char *list, int type, struct novfs_schandle SessionId)
+{
+ unsigned char *path, *buf = NULL, *cp;
+ struct qstr name;
+ struct novfs_entry_info *pinfo = NULL;
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ path = buf;
+ if (buf) {
+ path = novfs_dget_path(file->f_dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ strcpy(buf, path);
+ }
+ path = buf + strlen(buf);
+ *path++ = '\\';
+ }
+
+ if (list) {
+ cp = list;
+ while (*cp) {
+ name.name = cp;
+ DbgPrint("name.name = %s", name.name);
+ name.len = strlen(cp);
+ name.hash = novfs_internal_hash(&name);
+ cp += (name.len + 1);
+
+ pinfo = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL);
+ pinfo->mode = S_IFDIR | 0700;
+ pinfo->size = 0;
+ pinfo->atime = pinfo->ctime = pinfo->mtime = CURRENT_TIME;
+ strcpy(pinfo->name, name.name);
+ pinfo->namelength = name.len;
+
+ novfs_Dump_Info(pinfo);
+
+ filldir(dirent, pinfo->name, pinfo->namelength, file->f_pos, file->f_pos, pinfo->mode >> 12);
+ file->f_pos += 1;
+
+ kfree(pinfo);
+ }
+ }
+
+ if (buf) {
+ kfree(buf);
+ }
+}
+
+int processEntries(struct file *file, void *dirent, filldir_t filldir, void **enumHandle, struct novfs_schandle sessionId)
+{
+ unsigned char *path = NULL, *buf = NULL;
+ int count = 0, status = 0;
+ struct novfs_entry_info *pinfo = NULL;
+ struct novfs_entry_info *pInfoMem = NULL;
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (!buf) {
+ return -ENOMEM;
+ }
+
+ path = novfs_dget_path(file->f_dentry, buf, PATH_LENGTH_BUFFER);
+ if (!path) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+ //NWSearchfiles
+ count = 0;
+ status = novfs_get_dir_listex(path, enumHandle, &count, &pinfo, sessionId);
+ pInfoMem = pinfo;
+
+ if ((count == -1) || (count == 0) || (status != 0)) {
+ kfree(pInfoMem);
+ kfree(buf);
+ return -1;
+ }
+ // parse resultset
+ while (pinfo && count--) {
+ filldir(dirent, pinfo->name, pinfo->namelength, file->f_pos, file->f_pos, pinfo->mode >> 12);
+ file->f_pos += 1;
+
+ pinfo = (struct novfs_entry_info *)(pinfo->name + pinfo->namelength);
+ }
+
+ kfree(pInfoMem);
+ kfree(buf);
+ return 0;
+}
+
+int novfs_dir_readdir(struct file *file, void *dirent, filldir_t filldir)
+{
+ unsigned char *list = NULL;
+ int status = 0; //-ENOMEM;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct novfs_schandle sessionId;
+ uid_t uid;
+ int type = 0;
+ struct file_private *file_private = NULL;
+ int lComm;
+
+ file_private = (struct file_private *)file->private_data;
+ DbgPrint("Name %.*s", file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+
+ //printk("<1> file = %.*s\n", file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+
+// Use this hack by default
+#ifndef SKIP_CROSSOVER_HACK
+ // Hack for crossover - begin
- down(&TimeDir_Lock);
++ mutex_lock(&TimeDir_Lock);
+ if ((file->f_dentry->d_name.len == 7) &&
+ ((0 == strncmp(file->f_dentry->d_name.name, " !xover", 7)) ||
+ (0 == strncmp(file->f_dentry->d_name.name, "z!xover", 7)))) {
+ //printk("<1> xoverhack: we are in xoverHack\n");
+
+ inHAX = 1;
+ inHAXTime = get_nanosecond_time();
+ //up( &TimeDir_Lock );
+ //return 0;
+ file_private->listedall = 1;
+ } else {
+ if (inHAX) {
+ if (get_nanosecond_time() - inHAXTime > 100 * 1000 * 1000) {
+ //printk("<1> xoverhack: it was long, long, long ago...\n");
+ inHAX = 0;
+ } else {
+ //printk("<1> xoverhack: word gotcha in xoverHack...\n");
+ inHAXTime = get_nanosecond_time();
+ //up( &TimeDir_Lock );
+ //return 0;
+ file_private->listedall = 1;
+ }
+ }
+ }
+
- up(&TimeDir_Lock);
++ mutex_unlock(&TimeDir_Lock);
+ // Hack for crossover - end
+#endif
+
+ if (file->f_pos == 0) {
+ if (filldir(dirent, ".", 1, file->f_pos, inode->i_ino, DT_DIR) < 0)
+ return 1;
+ file->f_pos++;
+ return 1;
+ }
+
+ if (file->f_pos == 1) {
+ if (filldir(dirent, "..", 2, file->f_pos, file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0)
+ return 1;
+ file->f_pos++;
+ return 1;
+ }
+
+ if (file_private->listedall != 0) {
+ return 0;
+ }
+
+ inode = file->f_dentry->d_inode;
+ if (inode && inode->i_private) {
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ if (0 == SC_PRESENT(sessionId)) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry);
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ }
+ uid = novfs_scope_get_uid(((struct inode_data *)inode->i_private)->Scope);
+ } else {
+ SC_INITIALIZE(sessionId);
+ uid = current_euid();
+ }
+
+ if (IS_ROOT(file->f_dentry) || // Root
+ IS_ROOT(file->f_dentry->d_parent) || // User
+ IS_ROOT(file->f_dentry->d_parent->d_parent)) // Server
+ {
+ if (IS_ROOT(file->f_dentry)) {
+ DbgPrint("Root directory");
+ list = novfs_get_scopeusers();
+ type = USER_LIST;
+ } else if (IS_ROOT(file->f_dentry->d_parent)) {
+ DbgPrint("Parent is Root directory");
+ novfs_get_servers(&list, sessionId);
+ type = SERVER_LIST;
+ } else {
+ DbgPrint("Parent-Parent is Root directory");
+ novfs_get_vols(&file->f_dentry->d_name, &list, sessionId);
+ type = VOLUME_LIST;
+ }
+
+ processList(file, dirent, filldir, list, type, sessionId);
+ file_private->listedall = 1;
+ } else {
+ status = processEntries(file, dirent, filldir, &file_private->enumHandle, sessionId);
+
+ if (status != 0) {
+ file_private->listedall = 1;
+#ifndef SKIP_CROSSOVER_HACK
+ // Hack for crossover part 2 - begin
+ lComm = strlen(current->comm);
+ if ((lComm > 4)
+ && (0 == strcmp(current->comm + lComm - 4, ".EXE"))) {
+ if (filldir(dirent, " !xover", 7, file->f_pos, inode->i_ino, DT_DIR) < 0)
+ return 1;
+ if (filldir(dirent, "z!xover", 7, file->f_pos, inode->i_ino, DT_DIR) < 0)
+ return 1;
+ file->f_pos += 2;
+ }
+ // Hack for crossover part2 - end
+#endif
+ }
+ }
+
+ file->private_data = file_private;
+ return 1;
+}
+
+int novfs_dir_fsync(struct file *file, int datasync)
+{
+ DbgPrint("Name %.*s", file->f_dentry->d_name.len,
+ file->f_dentry->d_name.name);
+ return generic_file_fsync(file, datasync);
+}
+
+ssize_t novfs_f_read(struct file * file, char *buf, size_t len, loff_t * off)
+{
+ size_t thisread, totalread = 0;
+ loff_t offset = *off;
+ struct inode *inode;
+ struct novfs_schandle session;
+ struct inode_data *id;
+
+ if (file->f_dentry && (inode = file->f_dentry->d_inode) && (id = (struct inode_data *)inode->i_private)) {
+
+ DbgPrint("(0x%p 0x%p %d %lld %.*s)",
+ file->private_data, buf, len, offset, file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+
+ if (novfs_page_cache && !(file->f_flags & O_DIRECT) && id->CacheFlag) {
+ totalread = do_sync_read(file, buf, len, off);
+ } else {
+ session = novfs_scope_get_sessionId(id->Scope);
+ if (0 == SC_PRESENT(session)) {
+ id->Scope = novfs_get_scope(file->f_dentry);
+ session = novfs_scope_get_sessionId(id->Scope);
+ }
+
+ while (len > 0 && (offset < i_size_read(inode))) {
+ int retval;
+ thisread = len;
+ retval = novfs_read_file(file->private_data, buf, &thisread, &offset, session);
+ if (retval || !thisread) {
+ if (retval) {
+ totalread = retval;
+ }
+ break;
+ }
+ DbgPrint("thisread = 0x%x", thisread);
+ len -= thisread;
+ buf += thisread;
+ offset += thisread;
+ totalread += thisread;
+ }
+ *off = offset;
+ }
+ }
+ DbgPrint("return = %d", totalread);
+
+ return (totalread);
+}
+
+ssize_t novfs_f_write(struct file * file, const char *buf, size_t len, loff_t * off)
+{
+ ssize_t thiswrite, totalwrite = 0;
+ loff_t offset = *off;
+ struct novfs_schandle session;
+ struct inode *inode;
+ int status;
+ struct inode_data *id;
+
+ if (file->f_dentry && (inode = file->f_dentry->d_inode) && (id = file->f_dentry->d_inode->i_private)) {
+ DbgPrint("(0x%p 0x%p 0x%p %d %lld %.*s)",
+ file->private_data, inode, id->FileHandle, len, offset,
+ file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+
+ if (novfs_page_cache && !(file->f_flags & O_DIRECT) && id->CacheFlag && !(file->f_flags & O_WRONLY)) {
+ totalwrite = do_sync_write(file, buf, len, off);
+ } else {
+ if (file->f_flags & O_APPEND) {
+ offset = i_size_read(inode);
+ DbgPrint("appending to end %lld %.*s",
+ offset, file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+ }
+
+ session = novfs_scope_get_sessionId(id->Scope);
+ if (0 == SC_PRESENT(session)) {
+ id->Scope = novfs_get_scope(file->f_dentry);
+ session = novfs_scope_get_sessionId(id->Scope);
+ }
+
+ while (len > 0) {
+ thiswrite = len;
+ if ((status =
+ novfs_write_file(file->private_data,
+ (unsigned char *)buf, &thiswrite, &offset, session)) || !thiswrite) {
+ totalwrite = status;
+ break;
+ }
+ DbgPrint("thiswrite = 0x%x", thiswrite);
+ len -= thiswrite;
+ buf += thiswrite;
+ offset += thiswrite;
+ totalwrite += thiswrite;
+ if (offset > i_size_read(inode)) {
+ i_size_write(inode, offset);
+ inode->i_blocks = (offset + inode->i_sb->s_blocksize - 1) >> inode->i_blkbits;
+ }
+ inode->i_mtime = inode->i_atime = CURRENT_TIME;
+ id->Flags |= UPDATE_INODE;
+
+ }
+ *off = offset;
+ }
+ }
+ DbgPrint("return = 0x%x", totalwrite);
+
+ return (totalwrite);
+}
+
+int novfs_f_readdir(struct file *file, void *data, filldir_t fill)
+{
+ return -EISDIR;
+}
+
+int novfs_f_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int retCode = -EINVAL;
+
+ DbgPrint("file=0x%p %.*s", file, file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+
+ retCode = generic_file_mmap(file, vma);
+
+ DbgPrint("retCode=0x%x", retCode);
+ return (retCode);
+}
+
+int novfs_f_open(struct inode *inode, struct file *file)
+{
+ struct novfs_entry_info *info = NULL;
+ int retCode = -ENOENT;
+ struct novfs_schandle session;
+ char *path;
+ struct dentry *parent;
+ ino_t ino;
+ struct inode_data *id;
+ int errInfo;
+
+ DbgPrint("inode=0x%p file=0x%p dentry=0x%p dentry->d_inode=0x%p %.*s",
+ inode, file, file->f_dentry, file->f_dentry->d_inode, file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+ if (file->f_dentry) {
+ DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o",
+ file->f_dentry->d_name.len, file->f_dentry->d_name.name, file->f_flags, file->f_mode, inode->i_mode);
+ }
+
+ if (inode && inode->i_private) {
+ id = (struct inode_data *)file->f_dentry->d_inode->i_private;
+ session = novfs_scope_get_sessionId(id->Scope);
+ if (0 == SC_PRESENT(session)) {
+ id->Scope = novfs_get_scope(file->f_dentry);
+ session = novfs_scope_get_sessionId(id->Scope);
+ }
+
+ info = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (info) {
+ path = novfs_dget_path(file->f_dentry, info->name, PATH_LENGTH_BUFFER);
+ if (path) {
+ if (file->f_flags & O_TRUNC) {
+ errInfo = novfs_get_file_info(path, info, session);
+
+ if (errInfo || info->size == 0) {
+ // clear O_TRUNC flag, bug #275366
+ file->f_flags = file->f_flags & (~O_TRUNC);
+ }
+ }
+
+ DbgPrint("%s", path);
+ retCode = novfs_open_file(path, file->f_flags & ~O_EXCL, info, &file->private_data, session);
+
+ DbgPrint("0x%x 0x%p", retCode, file->private_data);
+ if (!retCode) {
+ /*
+ *update_inode(inode, &info);
+ */
+ //id->FileHandle = file->private_data;
+ id->CacheFlag = novfs_get_file_cache_flag(path, session);
+
+ if (!novfs_get_file_info(path, info, session)) {
+ update_inode(inode, info);
+ }
+
+ parent = dget_parent(file->f_dentry);
+
+ if (parent && parent->d_inode) {
+ struct inode *dir = parent->d_inode;
+ novfs_lock_inode_cache(dir);
+ ino = 0;
+ if (novfs_get_entry(dir, &file->f_dentry->d_name, &ino, info)) {
+ ((struct inode_data *)inode->i_private)->Flags |= UPDATE_INODE;
+ }
+
+ novfs_unlock_inode_cache(dir);
+ }
+ dput(parent);
+ }
+ }
+ kfree(info);
+ }
+ }
+ DbgPrint("retCode=0x%x", retCode);
+ return (retCode);
+}
+
+int novfs_flush_mapping(void *Handle, struct address_space *mapping, struct novfs_schandle Session)
+{
+ struct pagevec pagevec;
+ unsigned nrpages;
+ pgoff_t index = 0;
+ int done, rc = 0;
+
+ pagevec_init(&pagevec, 0);
+
+ do {
+ done = 1;
+ nrpages = pagevec_lookup_tag(&pagevec, mapping, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE);
+
+ if (nrpages) {
+ struct page *page;
+ int i;
+
+ DbgPrint("%u", nrpages);
+
+ done = 0;
+ for (i = 0; !rc && (i < nrpages); i++) {
+ page = pagevec.pages[i];
+
+ DbgPrint("page 0x%p %lu", page, page->index);
+
+ lock_page(page);
+ page_cache_get(page);
+ if (page->mapping == mapping) {
+ if (clear_page_dirty_for_io(page)) {
+ rc = novfs_write_page(Handle, page, Session);
+ if (!rc) {
+ //ClearPageDirty(page);
+ radix_tree_tag_clear
+ (&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY);
+ }
+ }
+ }
+
+ page_cache_release(page);
+ unlock_page(page);
+ }
+ pagevec_release(&pagevec);
+ }
+ } while (!rc && !done);
+
+ DbgPrint("return %d", rc);
+
+ return (rc);
+}
+
+int novfs_f_flush(struct file *file, fl_owner_t ownid)
+{
+
+ int rc = 0;
+#ifdef FLUSH
+ struct inode *inode;
+ struct novfs_schandle session;
+ struct inode_data *id;
+
+ DbgPrint("Called from 0x%p", __builtin_return_address(0));
+ if (file->f_dentry && (inode = file->f_dentry->d_inode)
+ && (id = file->f_dentry->d_inode->i_private)) {
+
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
+ inode = file->f_dentry->d_inode;
+ DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o",
+ file->f_dentry->d_name.len,
+ file->f_dentry->d_name.name, file->f_flags, file->f_mode, inode->i_mode);
+
+ session = novfs_scope_get_sessionId(id->Scope);
+ if (0 == SC_PRESENT(session)) {
+ id->Scope = novfs_get_scope(file->f_dentry);
+ session = novfs_scope_get_sessionId(id->Scope);
+ }
+
+ if (inode && inode->i_mapping && inode->i_mapping->nrpages) {
+
+ DbgPrint("%.*s pages=%lu",
+ file->f_dentry->d_name.len, file->f_dentry->d_name.name, inode->i_mapping->nrpages);
+
+ if (file->f_dentry &&
+ file->f_dentry->d_inode &&
+ file->f_dentry->d_inode->i_mapping &&
+ file->f_dentry->d_inode->i_mapping->a_ops &&
+ file->f_dentry->d_inode->i_mapping->a_ops->writepage) {
+ rc = filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
+ } else {
+ rc = novfs_flush_mapping(file->private_data, file->f_dentry->d_inode->i_mapping, session);
+ }
+ }
+ }
+ }
+#endif
+ return (rc);
+}
+
+int novfs_f_release(struct inode *inode, struct file *file)
+{
+ int retCode = -EACCES;
+ struct novfs_schandle session;
+ struct inode_data *id;
+
+ DbgPrint("path=%.*s handle=%p", file->f_dentry->d_name.len, file->f_dentry->d_name.name, file->private_data);
+
+ if (inode && (id = inode->i_private)) {
+ session = novfs_scope_get_sessionId(id->Scope);
+ if (0 == SC_PRESENT(session)) {
+ id->Scope = novfs_get_scope(file->f_dentry);
+ session = novfs_scope_get_sessionId(id->Scope);
+ }
+
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
+ DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o",
+ file->f_dentry->d_name.len,
+ file->f_dentry->d_name.name, file->f_flags, file->f_mode, inode->i_mode);
+
+ if (inode->i_mapping && inode->i_mapping->nrpages) {
+
+ DbgPrint("%.*s pages=%lu",
+ file->f_dentry->d_name.len, file->f_dentry->d_name.name, inode->i_mapping->nrpages);
+
+ if (inode->i_mapping->a_ops && inode->i_mapping->a_ops->writepage) {
+ filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
+ } else {
+ novfs_flush_mapping(file->private_data, file->f_dentry->d_inode->i_mapping, session);
+ }
+ }
+ }
+
+ if (file->f_dentry && file->f_dentry->d_inode) {
+ invalidate_remote_inode(file->f_dentry->d_inode);
+ }
+
+ retCode = novfs_close_file(file->private_data, session);
+ //id->FileHandle = 0;
+ }
+ return (retCode);
+}
+
+int novfs_f_fsync(struct file *file, int datasync)
+{
+ return 0;
+}
+
+int novfs_f_llseek(struct file *file, loff_t offset, int origin)
+{
+ DbgPrint("File=0x%p Name=%.*s offset=%lld origin=%d",
+ file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, offset, origin);
+ return (generic_file_llseek(file, offset, origin));
+}
+
+/*++======================================================================*/
+int novfs_f_lock(struct file *file, int cmd, struct file_lock *lock)
+/*
+ * Arguments:
+ * "file" - pointer to file structure - contains file handle in "file->private_data"
+ *
+ * "cmd" could be F_SETLK, F_SETLKW, F_GETLK
+ * F_SETLK/F_SETLKW are for setting/unsetting file lock
+ * F_GETLK is for getting infomation about region - is it locked, or not
+ *
+ * "lock" structure - contains "start" and "end" of locking region
+ *
+ * Returns:
+ * 0 on success
+ * -ENOSYS on F_GETLK cmd. It's not implemented.
+ * -EINVAL if (lock->fl_start > lock->fl_end)
+ * -EAGAIN on all other errors
+ * Abstract:
+ *
+ * Notes:
+ * "lock->fl_start" and "lock->fl_end" are of type "long long",
+ * but xtier functions in novfsd "NCFsdLockFile" and "NCFsdUnlockFile"
+ * receive arguments in u64 type.
+ *
+ *
+ *========================================================================*/
+{
+ int err_code;
+
+ struct inode *inode;
+ struct novfs_schandle session;
+ struct inode_data *id;
+ loff_t len;
+
+ DbgPrint("(0x%p): begin in novfs_f_lock 0x%p", __builtin_return_address(0), file->private_data);
+ DbgPrint("cmd = %d, F_GETLK = %d, F_SETLK = %d, F_SETLKW = %d", cmd, F_GETLK, F_SETLK, F_SETLKW);
+ DbgPrint("lock->fl_start = 0x%llX, lock->fl_end = 0x%llX", lock->fl_start, lock->fl_end);
+
+ err_code = -1;
+ if (lock->fl_start <= lock->fl_end) {
+ /* Get len from "start" and "end" */
+ len = lock->fl_end - lock->fl_start + 1;
+ if ((0 == lock->fl_start) && (OFFSET_MAX == lock->fl_end)) {
+ len = 0;
+ }
+
+ if (file->f_dentry && (inode = file->f_dentry->d_inode) && (id = (struct inode_data *)inode->i_private)) {
+ DbgPrint("(0x%p 0x%p %.*s)",
+ file->private_data, inode, file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+
+ session = novfs_scope_get_sessionId(id->Scope);
+ if (0 == SC_PRESENT(session)) {
+ id->Scope = novfs_get_scope(file->f_dentry);
+ session = novfs_scope_get_sessionId(id->Scope);
+ }
+
+ /* fl_type = F_RDLCK, F_WRLCK, F_UNLCK */
+ switch (cmd) {
+ case F_SETLK:
+#ifdef F_GETLK64
+ case F_SETLK64:
+#endif
+
+ err_code = novfs_set_file_lock(session, file->private_data, lock->fl_type, lock->fl_start, len);
+ break;
+
+ case F_SETLKW:
+#ifdef F_GETLK64
+ case F_SETLKW64:
+#endif
+ err_code = novfs_set_file_lock(session, file->private_data, lock->fl_type, lock->fl_start, len);
+ break;
+
+ case F_GETLK:
+#ifdef F_GETLK64
+ case F_GETLK64:
+#endif
+ err_code = -ENOSYS;
+ /*
+ * Not implemented. We doesn't have appropriate xtier function.
+ * */
+ break;
+
+ default:
+ printk("<1> novfs in novfs_f_lock, not implemented cmd = %d\n", cmd);
+ DbgPrint("novfs in novfs_f_lock, not implemented cmd = %d", cmd);
+ break;
+ }
+ }
+
+ DbgPrint("lock->fl_type = %u, err_code 0x%X", lock->fl_type, err_code);
+
+ if ((err_code != 0) && (err_code != -1)
+ && (err_code != -ENOSYS)) {
+ err_code = -EAGAIN;
+ }
+ } else {
+ err_code = -EINVAL;
+ }
+
+ return (err_code);
+}
+
+/*++======================================================================*/
+static void novfs_copy_cache_pages(struct address_space *mapping,
+ struct list_head *pages, int bytes_read, char *data, struct pagevec *plru_pvec)
+{
+ struct page *page;
+ char *target;
+
+ while (bytes_read > 0) {
+ if (list_empty(pages))
+ break;
+
+ page = list_entry(pages->prev, struct page, lru);
+ list_del(&page->lru);
+
+ if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
+ page_cache_release(page);
+ data += PAGE_CACHE_SIZE;
+ bytes_read -= PAGE_CACHE_SIZE;
+ continue;
+ }
+
+ target = kmap_atomic(page, KM_USER0);
+
+ if (PAGE_CACHE_SIZE > bytes_read) {
+ memcpy(target, data, bytes_read);
+ /* zero the tail end of this partial page */
+ memset(target + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read);
+ bytes_read = 0;
+ } else {
+ memcpy(target, data, PAGE_CACHE_SIZE);
+ bytes_read -= PAGE_CACHE_SIZE;
+ }
+ kunmap_atomic(target, KM_USER0);
+
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ if (!pagevec_add(plru_pvec, page))
+ __pagevec_lru_add_file(plru_pvec);
+ data += PAGE_CACHE_SIZE;
+ }
+ return;
+}
+
+int novfs_a_writepage(struct page *page, struct writeback_control *wbc)
+{
+ int retCode = -EFAULT;
+ struct inode *inode = page->mapping->host;
+ struct inode_data *id = inode->i_private;
+ loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT);
+ struct novfs_schandle session;
+ struct novfs_data_list dlst[2];
+ size_t len = PAGE_CACHE_SIZE;
+
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+
+ page_cache_get(page);
+
+ pos = ((loff_t) page->index << PAGE_CACHE_SHIFT);
+
+ /*
+ * Leave first dlst entry for reply header.
+ */
+ dlst[1].page = page;
+ dlst[1].offset = NULL;
+ dlst[1].len = len;
+ dlst[1].rwflag = DLREAD;
+
+ /*
+ * Check size so we don't write pass end of file.
+ */
+ if ((pos + (loff_t) len) > i_size_read(inode)) {
+ len = (size_t) (i_size_read(inode) - pos);
+ }
+
+ retCode = novfs_write_pages(id->FileHandle, dlst, 2, len, pos, session);
+ if (!retCode) {
+ SetPageUptodate(page);
+ }
+
+ unlock_page(page);
+ page_cache_release(page);
+
+ return (retCode);
+}
+
+int novfs_a_writepages(struct address_space *mapping, struct writeback_control *wbc)
+{
+ int retCode = 0;
+ struct inode *inode = mapping->host;
+ struct novfs_schandle session;
+ void *fh = NULL;
+ struct inode_data *id = NULL;
+
+ int max_page_lookup = novfs_max_iosize / PAGE_CACHE_SIZE;
+
+ struct novfs_data_list *dlist, *dlptr;
+ struct page **pages;
+
+ int dlist_idx, i = 0;
+ pgoff_t index, next_index = 0;
+ loff_t pos = 0;
+ size_t tsize;
+
+ SC_INITIALIZE(session);
+ DbgPrint("inode=0x%p mapping=0x%p wbc=0x%p nr_to_write=%d", inode, mapping, wbc, wbc->nr_to_write);
+
+ if (inode) {
+ DbgPrint("Inode=0x%p Ino=%d Id=0x%p", inode, inode->i_ino, inode->i_private);
+
+ if (NULL != (id = inode->i_private)) {
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ fh = ((struct inode_data *)inode->i_private)->FileHandle;
+ }
+ }
+
+ dlist = kmalloc(sizeof(struct novfs_data_list) * max_page_lookup, GFP_KERNEL);
+ pages = kmalloc(sizeof(struct page *) * max_page_lookup, GFP_KERNEL);
+
+ if (id)
+ DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p %s", inode, fh, dlist, pages, id->Name);
+ else
+ DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p", inode, fh, dlist, pages);
+
+ if (dlist && pages) {
+ struct backing_dev_info *bdi = mapping->backing_dev_info;
+ int done = 0;
+ int nr_pages = 0;
+ int scanned = 0;
+
+ if (wbc->nonblocking && bdi_write_congested(bdi)) {
+ wbc->encountered_congestion = 1;
+ return 0;
+ }
+
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ index = mapping->writeback_index; /* Start from prev offset */
+ } else {
+ index = 0; /* whole-file sweep */
+ scanned = 1;
+ }
+
+ next_index = index;
+
+ while (!done && (wbc->nr_to_write > 0)) {
+ dlist_idx = 0;
+ dlptr = &dlist[1];
+
+ DbgPrint("nr_pages=%d", nr_pages);
+ if (!nr_pages) {
+ memset(pages, 0, sizeof(struct page *) * max_page_lookup);
+
+ spin_lock_irq(&mapping->tree_lock);
+
+ /*
+ * Need to ask for one less then max_page_lookup or we
+ * will overflow the request buffer. This also frees
+ * the first entry for the reply buffer.
+ */
+ nr_pages =
+ radix_tree_gang_lookup_tag(&mapping->page_tree,
+ (void **)pages, index, max_page_lookup - 1, PAGECACHE_TAG_DIRTY);
+
+ DbgPrint("2; nr_pages=%d\n", nr_pages);
+ /*
+ * Check to see if there are dirty pages and there is a valid
+ * file handle.
+ */
+ if (nr_pages && !fh) {
+ set_bit(AS_EIO, &mapping->flags);
+ done = 1;
+ DbgPrint("set_bit AS_EIO");
+ break;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ page_cache_get(pages[i]);
+ }
+
+ spin_unlock_irq(&mapping->tree_lock);
+
+ if (nr_pages) {
+ index = pages[nr_pages - 1]->index + 1;
+ pos = (loff_t) pages[0]->index << PAGE_CACHE_SHIFT;
+ }
+
+ if (!nr_pages) {
+ if (scanned) {
+ index = 0;
+ scanned = 0;
+ continue;
+ }
+ done = 1;
+ } else {
+ next_index = pages[0]->index;
+ i = 0;
+ }
+ } else {
+ if (pages[i]) {
+ pos = (loff_t) pages[i]->index << PAGE_CACHE_SHIFT;
+ }
+ }
+
+ for (; i < nr_pages; i++) {
+ struct page *page = pages[i];
+
+ /*
+ * At this point we hold neither mapping->tree_lock nor
+ * lock on the page itself: the page may be truncated or
+ * invalidated (changing page->mapping to NULL), or even
+ * swizzled back from swapper_space to tmpfs file
+ * mapping
+ */
+
+ DbgPrint
+ ("novfs_a_writepages: pos=0x%llx index=%d page->index=%d next_index=%d\n",
+ pos, index, page->index, next_index);
+
+ if (page->index != next_index) {
+ next_index = page->index;
+ break;
+ }
+ next_index = page->index + 1;
+
+ lock_page(page);
+
+ if (wbc->sync_mode != WB_SYNC_NONE)
+ wait_on_page_writeback(page);
+
+ if (page->mapping != mapping || PageWriteback(page)
+ || !clear_page_dirty_for_io(page)) {
+ unlock_page(page);
+ continue;
+ }
+
+ dlptr[dlist_idx].page = page;
+ dlptr[dlist_idx].offset = NULL;
+ dlptr[dlist_idx].len = PAGE_CACHE_SIZE;
+ dlptr[dlist_idx].rwflag = DLREAD;
+ dlist_idx++;
+ DbgPrint("Add page=0x%p index=0x%lx", page, page->index);
+ }
+
+ DbgPrint("dlist_idx=%d", dlist_idx);
+ if (dlist_idx) {
+ tsize = dlist_idx * PAGE_CACHE_SIZE;
+ /*
+ * Check size so we don't write pass end of file.
+ */
+ if ((pos + tsize) > i_size_read(inode)) {
+ tsize = (size_t) (i_size_read(inode) - pos);
+ }
+
+ retCode = novfs_write_pages(fh, dlist, dlist_idx + 1, tsize, pos, session);
+ switch (retCode) {
+ case 0:
+ wbc->nr_to_write -= dlist_idx;
+ break;
+
+ case -ENOSPC:
+ set_bit(AS_ENOSPC, &mapping->flags);
+ done = 1;
+ break;
+
+ default:
+ set_bit(AS_EIO, &mapping->flags);
+ done = 1;
+ break;
+ }
+
+ do {
+ unlock_page((struct page *)
+ dlptr[dlist_idx - 1].page);
+ page_cache_release((struct page *)
+ dlptr[dlist_idx - 1].page);
+ DbgPrint("release page=0x%p index=0x%lx", dlptr[dlist_idx - 1].page, ((struct page *)
+ dlptr[dlist_idx -
+ 1].page)->
+ index);
+ if (!retCode) {
+ wbc->nr_to_write--;
+ }
+ } while (--dlist_idx);
+ }
+
+ if (i >= nr_pages) {
+ nr_pages = 0;
+ }
+ }
+
+ mapping->writeback_index = index;
+
+ } else {
+ DbgPrint("set_bit AS_EIO");
+ set_bit(AS_EIO, &mapping->flags);
+ }
+ if (dlist)
+ kfree(dlist);
+ if (pages)
+ kfree(pages);
+
+ DbgPrint("retCode=%d", retCode);
+ return (0);
+
+}
+
+int novfs_a_readpage(struct file *file, struct page *page)
+{
+ int retCode = 0;
+ void *pbuf;
+ struct inode *inode = NULL;
+ struct dentry *dentry = NULL;
+ loff_t offset;
+ size_t len;
+ struct novfs_schandle session;
+
+ SC_INITIALIZE(session);
+ DbgPrint("File=0x%p Name=%.*s Page=0x%p", file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, page);
+
+ dentry = file->f_dentry;
+
+ if (dentry) {
+ DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, dentry->d_name.name);
+ if (dentry->d_inode) {
+ inode = dentry->d_inode;
+ }
+ }
+
+ if (inode) {
+ DbgPrint("Inode=0x%p Ino=%d", inode, inode->i_ino);
+
+ if (inode->i_private) {
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ if (0 == SC_PRESENT(session)) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry);
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ }
+ }
+ }
+
+ if (!PageUptodate(page)) {
+ struct novfs_data_list dlst[2];
+
+ offset = page->index << PAGE_CACHE_SHIFT;
+ len = PAGE_CACHE_SIZE;
+
+ /*
+ * Save the first entry for the reply header.
+ */
+ dlst[1].page = page;
+ dlst[1].offset = NULL;
+ dlst[1].len = PAGE_CACHE_SIZE;
+ dlst[1].rwflag = DLWRITE;
+
+ DbgPrint("calling= novfs_Read_Pages %lld", offset);
+ retCode = novfs_read_pages(file->private_data, dlst, 2, &len, &offset, session);
+ if (len && (len < PAGE_CACHE_SIZE)) {
+ pbuf = kmap_atomic(page, KM_USER0);
+ memset(&((char *)pbuf)[len], 0, PAGE_CACHE_SIZE - len);
+ kunmap_atomic(pbuf, KM_USER0);
+ }
+
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ }
+ unlock_page(page);
+
+ DbgPrint("retCode=%d", retCode);
+ return (retCode);
+
+}
+
+int novfs_a_readpages(struct file *file, struct address_space *mapping, struct list_head *page_lst, unsigned nr_pages)
+{
+ int retCode = 0;
+ struct inode *inode = NULL;
+ struct dentry *dentry = NULL;
+ struct novfs_schandle session;
+ loff_t offset;
+ size_t len;
+
+ unsigned page_idx;
+ struct pagevec lru_pvec;
+ pgoff_t next_index;
+
+ char *rbuf, done = 0;
+ SC_INITIALIZE(session);
+
+ DbgPrint("File=0x%p Name=%.*s Pages=%d", file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, nr_pages);
+
+ dentry = file->f_dentry;
+
+ if (dentry) {
+ DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, dentry->d_name.name);
+ if (dentry->d_inode) {
+ inode = dentry->d_inode;
+ }
+ }
+
+ if (inode) {
+ DbgPrint("Inode=0x%p Ino=%d", inode, inode->i_ino);
+
+ if (inode->i_private) {
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ if (0 == SC_PRESENT(session)) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry);
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ }
+ }
+ }
+
+ rbuf = kmalloc(novfs_max_iosize, GFP_KERNEL);
+ if (rbuf) {
+ pagevec_init(&lru_pvec, 0);
+ for (page_idx = 0; page_idx < nr_pages && !done;) {
+ struct page *page, *tpage;
+
+ if (list_empty(page_lst))
+ break;
+
+ page = list_entry(page_lst->prev, struct page, lru);
+
+ next_index = page->index;
+ offset = (loff_t) page->index << PAGE_CACHE_SHIFT;
+ len = 0;
+
+ /*
+ * Count number of contiguous pages.
+ */
+ list_for_each_entry_reverse(tpage, page_lst, lru) {
+ if ((next_index != tpage->index) || (len >= novfs_max_iosize - PAGE_SIZE)) {
+ break;
+ }
+ len += PAGE_SIZE;
+ next_index++;
+ }
+
+ if (len && !done) {
+ struct novfs_data_list dllst[2];
+
+ dllst[1].page = NULL;
+ dllst[1].offset = rbuf;
+ dllst[1].len = len;
+ dllst[1].rwflag = DLWRITE;
+
+ DbgPrint("calling novfs_Read_Pages %lld", offset);
+ if (!novfs_read_pages(file->private_data, dllst, 2, &len, &offset, session)) {
+ novfs_copy_cache_pages(mapping, page_lst, len, rbuf, &lru_pvec);
+ page_idx += len >> PAGE_CACHE_SHIFT;
+ if ((int)(len & PAGE_CACHE_MASK) != len) {
+ page_idx++;
+ }
+ if (len == 0) {
+ done = 1;
+ }
+ } else {
+ done = 1;
+ }
+ }
+ }
+
+ /*
+ * Free any remaining pages.
+ */
+ while (!list_empty(page_lst)) {
+ struct page *page = list_entry(page_lst->prev, struct page, lru);
+
+ list_del(&page->lru);
+ page_cache_release(page);
+ }
+
+ pagevec_lru_add_file(&lru_pvec);
+ kfree(rbuf);
+ } else {
+ retCode = -ENOMEM;
+ }
+
+ DbgPrint("retCode=%d", retCode);
+ return (retCode);
+
+}
+
+int novfs_a_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata)
+{
+ int retVal = 0;
+ loff_t offset = pos;
+ struct novfs_schandle session;
+ struct novfs_data_list dllst[2];
+ struct inode *inode = file->f_dentry->d_inode;
+ struct page *page;
+ pgoff_t index;
+ unsigned from, to;
+ SC_INITIALIZE(session);
+
+ index = pos >> PAGE_CACHE_SHIFT;
+ from = pos & (PAGE_CACHE_SIZE - 1);
+ to = from + len;
+
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+
+ *pagep = page;
+
+ DbgPrint("File=0x%p Page=0x%p offset=0x%llx From=%u To=%u "
+ "filesize=%lld\n", file, page, offset, from, to, i_size_read(file->f_dentry->d_inode));
+ if (!PageUptodate(page)) {
+ /*
+ * Check to see if whole page
+ */
+ if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
+ SetPageUptodate(page);
+ }
+
+ /*
+ * Check to see if we can read page.
+ */
+ else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
+ /*
+ * Get session.
+ */
+ if (file->f_dentry && file->f_dentry->d_inode) {
+ if (file->f_dentry->d_inode->i_private) {
+ session = novfs_scope_get_sessionId(((struct inode_data *)
+ inode->i_private)->Scope);
+ if (0 == SC_PRESENT(session)) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry);
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ }
+ }
+ }
+
+ page_cache_get(page);
+
+ len = i_size_read(inode) - offset;
+ if (len > PAGE_CACHE_SIZE) {
+ len = PAGE_CACHE_SIZE;
+ }
+
+ if (len) {
+ /*
+ * Read page from server.
+ */
+
+ dllst[1].page = page;
+ dllst[1].offset = 0;
+ dllst[1].len = len;
+ dllst[1].rwflag = DLWRITE;
+
+ DbgPrint("calling novfs_Read_Pages %lld", offset);
+ novfs_read_pages(file->private_data, dllst, 2, &len, &offset, session);
+
+ /*
+ * Zero unnsed page.
+ */
+ }
+
+ if (len < PAGE_CACHE_SIZE) {
+ char *adr = kmap_atomic(page, KM_USER0);
+ memset(adr + len, 0, PAGE_CACHE_SIZE - len);
+ kunmap_atomic(adr, KM_USER0);
+ }
+ } else {
+ /*
+ * Zero section of memory that not going to be used.
+ */
+ char *adr = kmap_atomic(page, KM_USER0);
+ memset(adr, 0, from);
+ memset(adr + to, 0, PAGE_CACHE_SIZE - to);
+ kunmap_atomic(adr, KM_USER0);
+
+ DbgPrint("memset 0x%p", adr);
+ }
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ }
+// DbgPrint("return %d", retVal);
+ return (retVal);
+}
+
+int novfs_a_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
+{
+ int retCode = 0;
+ struct inode *inode = page->mapping->host;
+ loff_t offset = pos;
+ struct novfs_schandle session;
+ struct inode_data *id;
+ struct novfs_data_list dlst[1];
+ pgoff_t index;
+ unsigned from, to;
+ SC_INITIALIZE(session);
+
+ index = pos >> PAGE_CACHE_SHIFT;
+ from = pos & (PAGE_CACHE_SIZE - 1);
+ to = from + len;
+
+ DbgPrint("File=0x%p Page=0x%p offset=0x%x To=%u filesize=%lld",
+ file, page, offset, to, i_size_read(file->f_dentry->d_inode));
+ if (file->f_dentry->d_inode && (id = file->f_dentry->d_inode->i_private)) {
+ session = novfs_scope_get_sessionId(id->Scope);
+ if (0 == SC_PRESENT(session)) {
+ id->Scope = novfs_get_scope(file->f_dentry);
+ session = novfs_scope_get_sessionId(id->Scope);
+ }
+
+ /*
+ * Setup file handle
+ */
+ id->FileHandle = file->private_data;
+
+ if (pos > inode->i_size) {
+ i_size_write(inode, pos);
+ }
+
+ if (!PageUptodate(page)) {
+ pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset;
+
+ if (to < offset) {
+ return (retCode);
+ }
+ dlst[0].page = page;
+ dlst[0].offset = (void *)(unsigned long)offset;
+ dlst[0].len = len;
+ dlst[0].rwflag = DLREAD;
+
+ retCode = novfs_write_pages(id->FileHandle, dlst, 1, len, pos, session);
+
+ } else {
+ set_page_dirty(page);
+ }
+ }
+
+ return (retCode);
+}
+
+/*++======================================================================*/
+ssize_t novfs_a_direct_IO(int rw, struct kiocb * kiocb, const struct iovec * iov, loff_t offset, unsigned long nr_segs)
+/*
+ *
+ * Notes: This is a dummy function so that we can allow a file
+ * to get the direct IO flag set. novfs_f_read and
+ * novfs_f_write will do the work. Maybe not the best
+ * way to do but it was the easiest to implement.
+ *
+ *========================================================================*/
+{
+ return (-EIO);
+}
+
+/*++======================================================================*/
+int novfs_i_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+{
+ char *path, *buf;
+ struct novfs_entry_info info;
+ void *handle;
+ struct novfs_schandle session;
+ int retCode = -EACCES;
+
+ DbgPrint("mode=0%o flags=0%o %.*s", mode, nd->NDOPENFLAGS, dentry->d_name.len, dentry->d_name.name);
+
+ if (IS_ROOT(dentry) || /* Root */
+ IS_ROOT(dentry->d_parent) || /* User */
+ IS_ROOT(dentry->d_parent->d_parent) || /* Server */
+ IS_ROOT(dentry->d_parent->d_parent->d_parent)) { /* Volume */
+ return (-EACCES);
+ }
+
+ if (mode | S_IFREG) {
+ if (dir->i_private) {
+ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope);
+ if (0 == SC_PRESENT(session)) {
+ ((struct inode_data *)dir->i_private)->Scope = novfs_get_scope(dentry);
+ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope);
+ }
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ retCode = novfs_open_file(path, nd->NDOPENFLAGS | O_RDWR, &info, &handle, session);
+ if (!retCode && handle) {
+ novfs_close_file(handle, session);
+ if (!novfs_i_mknod(dir, dentry, mode | S_IFREG, 0)) {
+ if (dentry->d_inode) {
+ ((struct inode_data *)
+ dentry->d_inode->i_private)->Flags |= UPDATE_INODE;
+ }
+ }
+ }
+ }
+ kfree(buf);
+ }
+ }
+ }
+ return (retCode);
+}
+
+void update_inode(struct inode *Inode, struct novfs_entry_info *Info)
+{
+ static char dbuf[128];
+
+ DbgPrint("Inode=0x%p I_ino=%d", Inode, Inode->i_ino);
+
+ DbgPrint("atime=%s", ctime_r(&Info->atime.tv_sec, dbuf));
+ DbgPrint("ctime=%s", ctime_r(&Info->ctime.tv_sec, dbuf));
+ DbgPrint("mtime=%s %d", ctime_r(&Info->mtime.tv_sec, dbuf), Info->mtime.tv_nsec);
+ DbgPrint("size=%lld", Info->size);
+ DbgPrint("mode=0%o", Info->mode);
+
+ if (Inode &&
+ ((Inode->i_size != Info->size) ||
+ (Inode->i_mtime.tv_sec != Info->mtime.tv_sec) || (Inode->i_mtime.tv_nsec != Info->mtime.tv_nsec))) {
+ DbgPrint("calling invalidate_remote_inode sz %d %d", Inode->i_size, Info->size);
+ DbgPrint("calling invalidate_remote_inode sec %d %d", Inode->i_mtime.tv_sec, Info->mtime.tv_sec);
+ DbgPrint("calling invalidate_remote_inode ns %d %d", Inode->i_mtime.tv_nsec, Info->mtime.tv_nsec);
+
+ if (Inode && Inode->i_mapping) {
+ invalidate_remote_inode(Inode);
+ }
+ }
+
+ Inode->i_mode = Info->mode;
+ Inode->i_size = Info->size;
+ Inode->i_atime = Info->atime;
+ Inode->i_ctime = Info->ctime;
+ Inode->i_mtime = Info->mtime;
+
+ if (Inode->i_size && Inode->i_sb->s_blocksize) {
+
+ /*
+ * Filling number of blocks as in NSS filesystem.
+ * The s_blocksize is initialized to PAGE_CACHE_SIZE in
+ * the super block initialization.
+ *
+ * Update i_blocks to have the number of 512 blocks
+ */
+ Inode->i_blocks = (((loff_t) Info->size) + Inode->i_sb->s_blocksize - 1)
+ >> (loff_t) Inode->i_blkbits;
+ Inode->i_blocks = Inode->i_blocks << (PAGE_CACHE_SHIFT - 9);
+ Inode->i_bytes = Info->size & (Inode->i_sb->s_blocksize - 1);
+
+ DbgPrint("i_sb->s_blocksize=%d", Inode->i_sb->s_blocksize);
+ DbgPrint("i_blkbits=%d", Inode->i_blkbits);
+ DbgPrint("i_blocks=%d", Inode->i_blocks);
+ DbgPrint("i_bytes=%d", Inode->i_bytes);
+ }
+}
+
+struct dentry *novfs_i_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+ struct dentry *retVal = ERR_PTR(-ENOENT);
+ struct dentry *parent;
+ struct novfs_entry_info *info = NULL;
+ struct inode_data *id;
+ struct inode *inode = NULL;
+ uid_t uid = current_euid();
+ ino_t ino = 0;
+ struct qstr name;
+ char *buf;
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ char *path;
+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ DbgPrint("dir 0x%p %d hash %d inode 0x%0p %s", dir, dir->i_ino, dentry->d_name.hash, dentry->d_inode, path);
+ }
+ kfree(buf);
+ } else {
+ DbgPrint("dir 0x%p %d name %.*s hash %d inode 0x%0p",
+ dir, dir->i_ino, dentry->d_name.len, dentry->d_name.name, dentry->d_name.hash, dentry->d_inode);
+ }
+
+ if ((dentry->d_name.len == 7)
+ && (0 == strncmp(dentry->d_name.name, " !xover", 7))) {
+ dentry->d_op = &novfs_dentry_operations;
+ igrab(dir);
+ d_add(dentry, dir);
+ return NULL;
+ }
+ if ((dentry->d_name.len == 7)
+ && (0 == strncmp(dentry->d_name.name, "z!xover", 7))) {
+ dentry->d_op = &novfs_dentry_operations;
+ igrab(dir);
+ d_add(dentry, dir);
+ return NULL;
+ }
+
+ if (dir && (id = dir->i_private)) {
+ retVal = 0;
+ if (IS_ROOT(dentry)) {
+ DbgPrint("Root entry=0x%p", novfs_root);
+ inode = novfs_root->d_inode;
+ return (0);
+ } else {
+ info = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (info) {
+ if (NULL == (retVal = ERR_PTR(verify_dentry(dentry, 1)))) {
+ name.name = dentry->d_name.name;
+ name.len = dentry->d_name.len;
+ name.hash = novfs_internal_hash(&name);
+
+ if (novfs_lock_inode_cache(dir)) {
+ if (!novfs_get_entry(dir, &name, &ino, info)) {
+ inode = ilookup(dentry->d_sb, ino);
+ if (inode) {
+ update_inode(inode, info);
+ }
+ }
+ novfs_unlock_inode_cache(dir);
+ }
+
+ if (!inode && ino) {
+ if (id && id->Scope) {
+ uid = novfs_scope_get_uid(id->Scope);
+ } else {
+ uid = novfs_scope_get_uid(novfs_get_scope(dentry));
+ }
+ if (novfs_lock_inode_cache(dir)) {
+ inode = novfs_get_inode(dentry->d_sb, info->mode, 0, uid, ino, &name);
+ if (inode) {
+ if (!novfs_get_entry(dir, &dentry->d_name, &ino, info)) {
+ update_inode(inode, info);
+ }
+ }
+ novfs_unlock_inode_cache(dir);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (!retVal) {
+ dentry->d_op = &novfs_dentry_operations;
+ if (inode) {
+ parent = dget_parent(dentry);
+ novfs_d_add(dentry->d_parent, dentry, inode, 1);
+ dput(parent);
+ } else {
+ d_add(dentry, inode);
+ }
+ }
+
+ if (info)
+ kfree(info);
+
+ DbgPrint("inode=0x%p dentry->d_inode=0x%p return=0x%p", dir, dentry->d_inode, retVal);
+
+ return (retVal);
+}
+
+int novfs_i_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int retCode = -ENOENT;
+ struct inode *inode;
+ struct novfs_schandle session;
+ char *path, *buf;
+ uint64_t t64;
+
+ DbgPrint("dir=0x%p dir->i_ino=%d %.*s", dir, dir->i_ino, dentry->d_name.len, dentry->d_name.name);
+ DbgPrint("IS_ROOT(dentry)=%d", IS_ROOT(dentry));
+ DbgPrint("IS_ROOT(dentry->d_parent)=%d", IS_ROOT(dentry->d_parent));
+ DbgPrint("IS_ROOT(dentry->d_parent->d_parent)=%d", IS_ROOT(dentry->d_parent->d_parent));
+ DbgPrint("IS_ROOT(dentry->d_parent->d_parent->d_parent)=%d", IS_ROOT(dentry->d_parent->d_parent->d_parent));
+
+ if (IS_ROOT(dentry) || /* Root */
+ IS_ROOT(dentry->d_parent) || /* User */
+ (!IS_ROOT(dentry->d_parent->d_parent) && /* Server */
+ IS_ROOT(dentry->d_parent->d_parent->d_parent))) { /* Volume */
+ return (-EACCES);
+ }
+
+ inode = dentry->d_inode;
+ if (inode) {
+ DbgPrint("dir=0x%p dir->i_ino=%d inode=0x%p ino=%d", dir, dir->i_ino, inode, inode->i_ino);
+ if (inode->i_private) {
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ if (0 == SC_PRESENT(session)) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry);
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ }
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ DbgPrint("path %s mode 0%o", path, inode->i_mode);
+ if (IS_ROOT(dentry->d_parent->d_parent)) {
+ retCode = novfs_daemon_logout(&dentry->d_name, &session);
+ } else {
+ retCode = novfs_delete(path, S_ISDIR(inode->i_mode), session);
+ if (retCode) {
+ struct iattr ia;
+ memset(&ia, 0, sizeof(ia));
+ ia.ia_valid = ATTR_MODE;
+ ia.ia_mode = S_IRWXU;
+ novfs_set_attr(path, &ia, session);
+ retCode = novfs_delete(path, S_ISDIR(inode->i_mode), session);
+ }
+ }
+ if (!retCode || IS_DEADDIR(inode)) {
+ novfs_remove_inode_entry(dir, &dentry->d_name, 0);
+ dentry->d_time = 0;
+ t64 = 0;
+ novfs_scope_set_userspace(&t64, &t64, &t64, &t64);
+ retCode = 0;
+ }
+ }
+ kfree(buf);
+ }
+ }
+ }
+
+ DbgPrint("retCode 0x%x", retCode);
+ return (retCode);
+}
+
+int novfs_i_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ char *path, *buf;
+ struct novfs_schandle session;
+ int retCode = 0;
+ struct inode *inode;
+ struct novfs_entry_info info;
+ uid_t uid;
+
+ DbgPrint("dir=0x%p ino=%d dentry=0x%p %.*s mode=0%lo",
+ dir, dir->i_ino, dentry, dentry->d_name.len, dentry->d_name.name, mode);
+
+ if (IS_ROOT(dentry) || /* Root */
+ IS_ROOT(dentry->d_parent) || /* User */
+ IS_ROOT(dentry->d_parent->d_parent) || /* Server */
+ IS_ROOT(dentry->d_parent->d_parent->d_parent)) { /* Volume */
+ return (-EACCES);
+ }
+
+ mode |= S_IFDIR;
+ mode &= (S_IFMT | S_IRWXU);
+ if (dir->i_private) {
+ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope);
+ if (0 == SC_PRESENT(session)) {
+ ((struct inode_data *)dir->i_private)->Scope = novfs_get_scope(dentry);
+ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope);
+ }
+
+ uid = novfs_scope_get_uid(((struct inode_data *)dir->i_private)->Scope);
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ DbgPrint("path %s", path);
+ retCode = novfs_create(path, S_ISDIR(mode), session);
+ if (!retCode) {
+ retCode = novfs_get_file_info(path, &info, session);
+ if (!retCode) {
+ retCode = novfs_i_mknod(dir, dentry, mode, 0);
+ inode = dentry->d_inode;
+ if (inode) {
+ update_inode(inode, &info);
+ ((struct inode_data *)inode->i_private)->Flags &= ~UPDATE_INODE;
+
+ dentry->d_time = jiffies + (novfs_update_timeout * HZ);
+
+ novfs_lock_inode_cache(dir);
+ if (novfs_update_entry(dir, &dentry->d_name, 0, &info)) {
+ novfs_add_inode_entry(dir, &dentry->d_name, inode->i_ino, &info);
+ }
+ novfs_unlock_inode_cache(dir);
+ }
+
+ }
+ }
+ }
+ kfree(buf);
+ }
+ }
+
+ return (retCode);
+}
+
+int novfs_i_rmdir(struct inode *inode, struct dentry *dentry)
+{
+ return (novfs_i_unlink(inode, dentry));
+}
+
+int novfs_i_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+{
+ struct inode *inode = NULL;
+ int retCode = -EACCES;
+ uid_t uid;
+ struct dentry *parent;
+
+ if (IS_ROOT(dentry) || /* Root */
+ IS_ROOT(dentry->d_parent) || /* User */
+ IS_ROOT(dentry->d_parent->d_parent) || /* Server */
+ IS_ROOT(dentry->d_parent->d_parent->d_parent)) { /* Volume */
+ return (-EACCES);
+ }
+
+ if (((struct inode_data *)dir->i_private)) {
+ uid = novfs_scope_get_uid(((struct inode_data *)dir->i_private)->Scope);
+ if (mode & (S_IFREG | S_IFDIR)) {
+ inode = novfs_get_inode(dir->i_sb, mode, dev, uid, 0, &dentry->d_name);
+ }
+ }
+ if (inode) {
+ struct novfs_entry_info info;
+
+ dentry->d_op = &novfs_dentry_operations;
+ parent = dget_parent(dentry);
+ novfs_d_add(parent, dentry, inode, 0);
+ memset(&info, 0, sizeof(info));
+ info.mode = inode->i_mode;
+ novfs_lock_inode_cache(dir);
+ novfs_add_inode_entry(dir, &dentry->d_name, inode->i_ino, &info);
+ novfs_unlock_inode_cache(dir);
+
+ dput(parent);
+
+ retCode = 0;
+ }
+ DbgPrint("return 0x%x", retCode);
+ return retCode;
+}
+
+int novfs_i_rename(struct inode *odir, struct dentry *od, struct inode *ndir, struct dentry *nd)
+{
+ int retCode = -ENOTEMPTY;
+ char *newpath, *newbuf, *newcon;
+ char *oldpath, *oldbuf, *oldcon;
+ struct qstr newname, oldname;
+ struct novfs_entry_info *info = NULL;
+ int oldlen, newlen;
+ struct novfs_schandle session;
+ ino_t ino;
+
+ if (IS_ROOT(od) || /* Root */
+ IS_ROOT(od->d_parent) || /* User */
+ IS_ROOT(od->d_parent->d_parent) || /* Server */
+ IS_ROOT(od->d_parent->d_parent->d_parent)) { /* Volume */
+ return (-EACCES);
+ }
+
+ DbgPrint("odir=0x%p ino=%d ndir=0x%p ino=%d", odir, odir->i_ino, ndir, ndir->i_ino);
+
+ oldbuf = kmalloc(PATH_LENGTH_BUFFER * 2, GFP_KERNEL);
+ newbuf = oldbuf + PATH_LENGTH_BUFFER;
+ if (oldbuf && newbuf) {
+ oldpath = novfs_dget_path(od, oldbuf, PATH_LENGTH_BUFFER);
+ newpath = novfs_dget_path(nd, newbuf, PATH_LENGTH_BUFFER);
+ if (oldpath && newpath) {
+ oldlen = PATH_LENGTH_BUFFER - (int)(oldpath - oldbuf);
+ newlen = PATH_LENGTH_BUFFER - (int)(newpath - newbuf);
+
+ DbgPrint("od=0x%p od->inode=0x%p od->inode->i_ino=%d %s", od, od->d_inode, od->d_inode->i_ino, oldpath);
+ if (nd->d_inode) {
+ DbgPrint("nd=0x%p nd->inode=0x%p nd->inode->i_ino=%d %s",
+ nd, nd->d_inode, nd->d_inode->i_ino, newpath);
+ } else {
+ DbgPrint("nd=0x%p nd->inode=0x%p %s", nd, nd->d_inode, newpath);
+ }
+
+ /*
+ * Check to see if two different servers or different volumes
+ */
+ newcon = strchr(newpath + 1, '\\');
+ oldcon = strchr(oldpath + 1, '\\');
+ DbgPrint("newcon=0x%p newpath=0x%p", newcon, newpath);
+ DbgPrint("oldcon=0x%p oldpath=0x%p", oldcon, oldpath);
+ retCode = -EXDEV;
+ if (newcon && oldcon && ((int)(newcon - newpath) == (int)(oldcon - oldpath))) {
+ newcon = strchr(newcon + 1, '\\');
+ oldcon = strchr(oldcon + 1, '\\');
+ DbgPrint("2; newcon=0x%p newpath=0x%p", newcon, newpath);
+ DbgPrint("2; oldcon=0x%p oldpath=0x%p", oldcon, oldpath);
+ if (newcon && oldcon && ((int)(newcon - newpath) == (int)(oldcon - oldpath))) {
+ newname.name = newpath;
+ newname.len = (int)(newcon - newpath);
+ newname.hash = 0;
+
+ oldname.name = oldpath;
+ oldname.len = (int)(oldcon - oldpath);
+ oldname.hash = 0;
+ if (!novfs_d_strcmp(&newname, &oldname)) {
+
+ if (od->d_inode && od->d_inode->i_private) {
+
+ if (nd->d_inode && nd->d_inode->i_private) {
+ session =
+ novfs_scope_get_sessionId
+ (((struct inode_data *)ndir->i_private)->Scope);
+ if (0 == SC_PRESENT(session)) {
+ ((struct inode_data *)ndir->i_private)->Scope =
+ novfs_get_scope(nd);
+ session =
+ novfs_scope_get_sessionId(((struct inode_data *)ndir->
+ i_private)->Scope);
+ }
+
+ retCode =
+ novfs_delete(newpath, S_ISDIR(nd->d_inode->i_mode), session);
+ if (retCode) {
+ struct iattr ia;
+ memset(&ia, 0, sizeof(ia));
+ ia.ia_valid = ATTR_MODE;
+ ia.ia_mode = S_IRWXU;
+ novfs_set_attr(newpath, &ia, session);
+ retCode =
+ novfs_delete(newpath, S_ISDIR(nd->d_inode->i_mode),
+ session);
+ }
+
+ }
+
+ session =
+ novfs_scope_get_sessionId(((struct inode_data *)ndir->i_private)->
+ Scope);
+ if (0 == SC_PRESENT(session)) {
+ ((struct inode_data *)ndir->i_private)->Scope = novfs_get_scope(nd);
+ session =
+ novfs_scope_get_sessionId(((struct inode_data *)ndir->
+ i_private)->Scope);
+ }
+ retCode =
+ novfs_rename_file(S_ISDIR(od->d_inode->i_mode), oldpath, oldlen - 1,
+ newpath, newlen - 1, session);
+
+ if (!retCode) {
+ info = (struct novfs_entry_info *)oldbuf;
+ od->d_time = 0;
+ novfs_remove_inode_entry(odir, &od->d_name, 0);
+ novfs_remove_inode_entry(ndir, &nd->d_name, 0);
+ novfs_get_file_info(newpath, info, session);
+ nd->d_time = jiffies + (novfs_update_timeout * HZ);
+
+ if (od->d_inode && od->d_inode->i_ino) {
+ ino = od->d_inode->i_ino;
+ } else {
+ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number);
+ }
+ novfs_add_inode_entry(ndir, &nd->d_name, ino, info);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (oldbuf)
+ kfree(oldbuf);
+
+ DbgPrint("return %d", retCode);
+ return (retCode);
+}
+
+int novfs_i_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ char *path, *buf;
+ struct inode *inode = dentry->d_inode;
+ char atime_buf[32];
+ char mtime_buf[32];
+ char ctime_buf[32];
+ unsigned int ia_valid = attr->ia_valid;
+ struct novfs_schandle session;
+ int retVal = 0;
+
+ if (IS_ROOT(dentry) || /* Root */
+ IS_ROOT(dentry->d_parent) || /* User */
+ IS_ROOT(dentry->d_parent->d_parent) || /* Server */
+ IS_ROOT(dentry->d_parent->d_parent->d_parent)) { /* Volume */
+ return (-EACCES);
+ }
+
+ if (inode && inode->i_private) {
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ if (0 == SC_PRESENT(session)) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry);
+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ }
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ strcpy(atime_buf, "Unspecified");
+ strcpy(mtime_buf, "Unspecified");
+ strcpy(ctime_buf, "Unspecified");
+ if (attr->ia_valid & ATTR_ATIME) {
+ ctime_r(&attr->ia_atime.tv_sec, atime_buf);
+ }
+ if (attr->ia_valid & ATTR_MTIME) {
+ ctime_r(&attr->ia_mtime.tv_sec, mtime_buf);
+ }
+ if (attr->ia_valid & ATTR_CTIME) {
+ ctime_r(&attr->ia_ctime.tv_sec, ctime_buf);
+ }
+ /* Removed for Bug 132374. jlt */
+ __DbgPrint("%s: %s\n"
+ " ia_valid: 0x%x\n"
+ " ia_mode: 0%o\n"
+ " ia_uid: %d\n"
+ " ia_gid: %d\n"
+ " ia_size: %lld\n"
+ " ia_atime: %s\n"
+ " ia_mtime: %s\n"
+ " ia_ctime: %s\n", __func__,
+ path,
+ attr->ia_valid,
+ attr->ia_mode,
+ attr->ia_uid, attr->ia_gid, attr->ia_size, atime_buf, mtime_buf, ctime_buf);
+
+ if (ia_valid && !(retVal = novfs_set_attr(path, attr, session))) {
+ ((struct inode_data *)inode->i_private)->Flags |= UPDATE_INODE;
+
+ if (ia_valid & ATTR_ATIME)
+ inode->i_atime = attr->ia_atime;
+ if (ia_valid & ATTR_MTIME)
+ inode->i_mtime = attr->ia_mtime;
+ if (ia_valid & ATTR_CTIME)
+ inode->i_ctime = attr->ia_ctime;
+ if (ia_valid & ATTR_MODE) {
+ inode->i_mode = attr->ia_mode & (S_IFMT | S_IRWXU);
+ }
+ }
+ }
+ }
+ kfree(buf);
+ }
+ DbgPrint("return 0x%x", retVal);
+
+ return (retVal);
+}
+
+int novfs_i_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *kstat)
+{
+ int retCode = 0;
+ char atime_buf[32];
+ char mtime_buf[32];
+ char ctime_buf[32];
+ struct inode *inode = dentry->d_inode;
+
+ struct novfs_entry_info info;
+ char *path, *buf;
+ struct novfs_schandle session;
+ struct inode_data *id;
+
+ if (!IS_ROOT(dentry) && !IS_ROOT(dentry->d_parent)) {
+ SC_INITIALIZE(session);
+ id = dentry->d_inode->i_private;
+
+ if (id && (id->Flags & UPDATE_INODE)) {
+ session = novfs_scope_get_sessionId(id->Scope);
+
+ if (0 == SC_PRESENT(session)) {
+ id->Scope = novfs_get_scope(dentry);
+ session = novfs_scope_get_sessionId(id->Scope);
+ }
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ retCode = novfs_get_file_info(path, &info, session);
+ if (!retCode) {
+ update_inode(inode, &info);
+ id->Flags &= ~UPDATE_INODE;
+ }
+ }
+ kfree(buf);
+ }
+ }
+ }
+
+ kstat->ino = inode->i_ino;
+ kstat->dev = inode->i_sb->s_dev;
+ kstat->mode = inode->i_mode;
+ kstat->nlink = inode->i_nlink;
+ kstat->uid = inode->i_uid;
+ kstat->gid = inode->i_gid;
+ kstat->rdev = inode->i_rdev;
+ kstat->size = i_size_read(inode);
+ kstat->atime = inode->i_atime;
+ kstat->mtime = inode->i_mtime;
+ kstat->ctime = inode->i_ctime;
+ kstat->blksize = inode->i_sb->s_blocksize;
+ kstat->blocks = inode->i_blocks;
+ if (inode->i_bytes) {
+ kstat->blocks++;
+ }
+ ctime_r(&kstat->atime.tv_sec, atime_buf);
+ ctime_r(&kstat->mtime.tv_sec, mtime_buf);
+ ctime_r(&kstat->ctime.tv_sec, ctime_buf);
+
+ __DbgPrint("%s: 0x%x 0x%p <%.*s>\n"
+ " ino: %d\n"
+ " dev: 0x%x\n"
+ " mode: 0%o\n"
+ " nlink: 0x%x\n"
+ " uid: 0x%x\n"
+ " gid: 0x%x\n"
+ " rdev: 0x%x\n"
+ " size: 0x%llx\n"
+ " atime: %s\n"
+ " mtime: %s\n"
+ " ctime: %s\n"
+ " blksize: 0x%x\n"
+ " blocks: 0x%x\n", __func__,
+ retCode, dentry, dentry->d_name.len, dentry->d_name.name,
+ kstat->ino,
+ kstat->dev,
+ kstat->mode,
+ kstat->nlink,
+ kstat->uid,
+ kstat->gid, kstat->rdev, kstat->size, atime_buf, mtime_buf, ctime_buf, kstat->blksize, kstat->blocks);
+ return (retCode);
+}
+
+ssize_t novfs_i_getxattr(struct dentry * dentry, const char *name, void *buffer, size_t buffer_size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct novfs_schandle sessionId;
+ char *path, *buf, *bufRead;
+ ssize_t dataLen;
+
+ int retxcode = 0;
+
+ SC_INITIALIZE(sessionId);
+
+ DbgPrint("Ian"); /*%.*s\n", dentry->d_name.len, dentry->d_name.name); */
+ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", dentry->d_name.len, dentry->d_name.name);
+ DbgPrint("name %s", name);
+ DbgPrint("size %u", buffer_size);
+
+ if (inode && inode->i_private) {
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ DbgPrint("SessionId = %u", sessionId);
+ //if (0 == sessionId)
+ if (0 == SC_PRESENT(sessionId)) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry);
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ DbgPrint("SessionId = %u", sessionId);
+ }
+ }
+
+ dataLen = 0;
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ bufRead = kmalloc(XA_BUFFER, GFP_KERNEL);
+ if (bufRead) {
+ retxcode = novfs_getx_file_info(path, name, bufRead, XA_BUFFER, &dataLen, sessionId);
+ DbgPrint("after novfs_GetX_File_Info retxcode = %d", retxcode);
+ if (!retxcode) {
+ novfs_dump(64, bufRead);
+ if (buffer_size != 0) {
+ if (buffer_size >= dataLen) {
+ memcpy(buffer, bufRead, dataLen);
+ } else {
+ DbgPrint("(!!!) not enough buffer_size. buffer_size = %d, dataLen = %d",
+ buffer_size, dataLen);
+ retxcode = -ERANGE;
+ }
+ }
+ }
+ kfree(bufRead);
+ }
+ }
+ kfree(buf);
+ }
+
+ if (retxcode) {
+ dataLen = retxcode;
+ } else {
+ if ((buffer_size > 0) && (buffer_size < dataLen)) {
+ dataLen = -ERANGE;
+ }
+ }
+
+ return (dataLen);
+}
+
+int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, size_t value_size, int flags)
+{
+
+ struct inode *inode = dentry->d_inode;
+ struct novfs_schandle sessionId;
+ char *path, *buf;
+ unsigned long bytesWritten = 0;
+ int retError = 0;
+ int retxcode = 0;
+
+ SC_INITIALIZE(sessionId);
+
+ DbgPrint("Ian"); /*%.*s\n", dentry->d_name.len, dentry->d_name.name); */
+ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", dentry->d_name.len, dentry->d_name.name);
+ DbgPrint("name %s", name);
+ DbgPrint("value_size %u", value_size);
+ DbgPrint("flags %d", flags);
+
+ if (inode && inode->i_private) {
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ DbgPrint("SessionId = %u", sessionId);
+ //if (0 == sessionId)
+ if (0 == SC_PRESENT(sessionId)) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry);
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ DbgPrint("SessionId = %u", sessionId);
+ }
+ }
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ retxcode = novfs_setx_file_info(path, name, value, value_size, &bytesWritten, flags, sessionId);
+ if (!retxcode) {
+ DbgPrint("bytesWritten = %u", bytesWritten);
+ }
+ }
+ kfree(buf);
+ }
+
+ if (retxcode) {
+ retError = retxcode;
+ }
+
+ if (bytesWritten < value_size) {
+ retError = retxcode;
+ }
+ return (retError);
+}
+
+ssize_t novfs_i_listxattr(struct dentry * dentry, char *buffer, size_t buffer_size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct novfs_schandle sessionId;
+ char *path, *buf, *bufList;
+ ssize_t dataLen;
+ int retxcode = 0;
+
+ SC_INITIALIZE(sessionId);
+
+ DbgPrint("Ian"); //%.*s\n", dentry->d_name.len, dentry->d_name.name);
+ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", dentry->d_name.len, dentry->d_name.name);
+ DbgPrint("size %u", buffer_size);
+
+ if (inode && inode->i_private) {
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ DbgPrint("SessionId = %u", sessionId);
+ //if (0 == sessionId)
+ if (0 == SC_PRESENT(sessionId)) {
+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry);
+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
+ DbgPrint("SessionId = %u", sessionId);
+ }
+ }
+
+ dataLen = 0;
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ bufList = kmalloc(XA_BUFFER, GFP_KERNEL);
+ if (bufList) {
+ retxcode = novfs_listx_file_info(path, bufList, XA_BUFFER, &dataLen, sessionId);
+
+ novfs_dump(64, bufList);
+ if (buffer_size != 0) {
+ if (buffer_size >= dataLen) {
+ memcpy(buffer, bufList, dataLen);
+ } else {
+ DbgPrint("(!!!) not enough buffer_size. buffer_size = %d, dataLen = %d",
+ buffer_size, dataLen);
+ retxcode = -1;
+ }
+ }
+
+ if (bufList) {
+ kfree(bufList);
+ }
+ }
+
+ }
+ kfree(buf);
+ }
+
+ if (retxcode) {
+ dataLen = -1;
+ } else {
+
+ if ((buffer_size > 0) && (buffer_size < dataLen)) {
+ dataLen = -ERANGE;
+ }
+ }
+ return (dataLen);
+}
+
+int novfs_i_revalidate(struct dentry *dentry)
+{
+
+ DbgPrint("name %.*s", dentry->d_name.len, dentry->d_name.name);
+
+ return (0);
+}
+
+void novfs_read_inode(struct inode *inode)
+{
+ DbgPrint("0x%p %d", inode, inode->i_ino);
+}
+
+void novfs_write_inode(struct inode *inode)
+{
+ DbgPrint("Inode=0x%p Ino=%d", inode, inode->i_ino);
+}
+
+int novfs_notify_change(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+
+ DbgPrint("Dentry=0x%p Name=%.*s Inode=0x%p Ino=%d ia_valid=0x%x",
+ dentry, dentry->d_name.len, dentry->d_name.name, inode, inode->i_ino, attr->ia_valid);
+ return (0);
+}
+
+void novfs_evict_inode(struct inode *inode)
+{
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+
+ InodeCount--;
+
+ if (inode->i_private) {
+ struct inode_data *id = inode->i_private;
+
+ DbgPrint("inode=0x%p ino=%d Scope=0x%p Name=%s", inode, inode->i_ino, id->Scope, id->Name);
+
+ novfs_free_inode_cache(inode);
+
- down(&InodeList_lock);
++ mutex_lock(&InodeList_lock);
+ list_del(&id->IList);
- up(&InodeList_lock);
++ mutex_unlock(&InodeList_lock);
+
+ kfree(inode->i_private);
+ inode->i_private = NULL;
+
+ remove_inode_hash(inode);
+
+ } else {
+ DbgPrint("inode=0x%p ino=%d", inode, inode->i_ino);
+ }
+}
+
+/* Called when /proc/mounts is read */
+int novfs_show_options(struct seq_file *s, struct vfsmount *m)
+{
+ char *buf, *path, *tmp;
+
+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ struct path my_path;
+ my_path.mnt = m;
+ my_path.dentry = m->mnt_root;
+ path = d_path(&my_path, buf, PATH_LENGTH_BUFFER);
+ if (path) {
+ if (!novfs_current_mnt || (novfs_current_mnt && strcmp(novfs_current_mnt, path))) {
+ DbgPrint("%.*s %.*s %s",
+ m->mnt_root->d_name.len,
+ m->mnt_root->d_name.name,
+ m->mnt_mountpoint->d_name.len, m->mnt_mountpoint->d_name.name, path);
+ tmp = kmalloc(PATH_LENGTH_BUFFER - (int)(path - buf), GFP_KERNEL);
+ if (tmp) {
+ strcpy(tmp, path);
+ path = novfs_current_mnt;
+ novfs_current_mnt = tmp;
+ novfs_daemon_set_mnt_point(novfs_current_mnt);
+
+ if (path) {
+ kfree(path);
+ }
+ }
+ }
+ }
+ kfree(buf);
+ }
+ return (0);
+}
+
+/* Called when statfs(2) system called. */
+int novfs_statfs(struct dentry *de, struct kstatfs *buf)
+{
+ uint64_t td, fd, te, fe;
+ struct super_block *sb = de->d_sb;
+
+ DbgPrint("");
+
+ td = fd = te = fe = 0;
+
+ novfs_scope_get_userspace(&td, &fd, &te, &fe);
+
+ DbgPrint("td=%llu", td);
+ DbgPrint("fd=%llu", fd);
+ DbgPrint("te=%llu", te);
+ DbgPrint("fe=%llu", fd);
+ /* fix for Nautilus */
+ if (sb->s_blocksize == 0)
+ sb->s_blocksize = 4096;
+
+ buf->f_type = sb->s_magic;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = NW_MAX_PATH_LENGTH;
+ buf->f_blocks = (sector_t) (td + (uint64_t) (sb->s_blocksize - 1)) >> (uint64_t) sb->s_blocksize_bits;
+ buf->f_bfree = (sector_t) fd >> (uint64_t) sb->s_blocksize_bits;
+ buf->f_bavail = (sector_t) buf->f_bfree;
+ buf->f_files = (sector_t) te;
+ buf->f_ffree = (sector_t) fe;
+ buf->f_frsize = sb->s_blocksize;
+ if (te > 0xffffffff)
+ buf->f_files = 0xffffffff;
+
+ if (fe > 0xffffffff)
+ buf->f_ffree = 0xffffffff;
+
+ DbgPrint("f_type: 0x%x", buf->f_type);
+ DbgPrint("f_bsize: %u", buf->f_bsize);
+ DbgPrint("f_namelen: %d", buf->f_namelen);
+ DbgPrint("f_blocks: %llu", buf->f_blocks);
+ DbgPrint("f_bfree: %llu", buf->f_bfree);
+ DbgPrint("f_bavail: %llu", buf->f_bavail);
+ DbgPrint("f_files: %llu", buf->f_files);
+ DbgPrint("f_ffree: %llu", buf->f_ffree);
+ DbgPrint("f_frsize: %u", buf->f_frsize);
+
+ return 0;
+}
+
+struct inode *novfs_get_inode(struct super_block *sb, int mode, int dev, uid_t Uid, ino_t ino, struct qstr *name)
+{
+ struct inode *inode = new_inode(sb);
+
+ if (inode) {
+ InodeCount++;
+ inode->i_mode = mode;
+ inode->i_uid = Uid;
+ inode->i_gid = 0;
+ inode->i_blkbits = sb->s_blocksize_bits;
+ inode->i_blocks = 0;
+ inode->i_rdev = 0;
+ inode->i_ino = (ino) ? ino : (ino_t) atomic_inc_return(&novfs_Inode_Number);
+ if (novfs_page_cache) {
+ inode->i_mapping->a_ops = &novfs_aops;
+ } else {
+ inode->i_mapping->a_ops = &novfs_nocache_aops;
+ }
+ inode->i_mapping->backing_dev_info = &novfs_backing_dev_info;
+ inode->i_atime.tv_sec = 0;
+ inode->i_atime.tv_nsec = 0;
+ inode->i_mtime = inode->i_ctime = inode->i_atime;
+
+ DbgPrint("Inode=0x%p I_ino=%d len=%d", inode, inode->i_ino, name->len);
+
+ if (NULL != (inode->i_private = kmalloc(sizeof(struct inode_data) + name->len, GFP_KERNEL))) {
+ struct inode_data *id;
+ id = inode->i_private;
+
+ DbgPrint("i_private 0x%p", id);
+
+ id->Scope = NULL;
+ id->Flags = 0;
+ id->Inode = inode;
+
+ id->cntDC = 1;
+
+ INIT_LIST_HEAD(&id->DirCache);
- init_MUTEX(&id->DirCacheLock);
++ mutex_init(&id->DirCacheLock);
+
+ id->FileHandle = 0;
+ id->CacheFlag = 0;
+
- down(&InodeList_lock);
++ mutex_lock(&InodeList_lock);
+
+ list_add_tail(&id->IList, &InodeList);
- up(&InodeList_lock);
++ mutex_unlock(&InodeList_lock);
+
+ id->Name[0] = '\0';
+
+ memcpy(id->Name, name->name, name->len);
+ id->Name[name->len] = '\0';
+
+ DbgPrint("name %s", id->Name);
+ }
+
+ insert_inode_hash(inode);
+
+ switch (mode & S_IFMT) {
+
+ case S_IFREG:
+ inode->i_op = &novfs_file_inode_operations;
+ inode->i_fop = &novfs_file_operations;
+ break;
+
+ case S_IFDIR:
+ inode->i_op = &novfs_inode_operations;
+ inode->i_fop = &novfs_dir_operations;
+ inode->i_blkbits = 0;
+ break;
+
+ default:
+ init_special_inode(inode, mode, dev);
+ break;
+ }
+
+ DbgPrint("size=%lld", inode->i_size);
+ DbgPrint("mode=0%o", inode->i_mode);
+ DbgPrint("i_sb->s_blocksize=%d", inode->i_sb->s_blocksize);
+ DbgPrint("i_blkbits=%d", inode->i_blkbits);
+ DbgPrint("i_blocks=%d", inode->i_blocks);
+ DbgPrint("i_bytes=%d", inode->i_bytes);
+ }
+
+ DbgPrint("0x%p %d", inode, inode->i_ino);
+ return (inode);
+}
+
+int novfs_fill_super(struct super_block *SB, void *Data, int Silent)
+{
+ struct inode *inode;
+ struct dentry *server, *tree;
+ struct qstr name;
+ struct novfs_entry_info info;
+
+ SB->s_blocksize = PAGE_CACHE_SIZE;
+ SB->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ SB->s_maxbytes = MAX_LFS_FILESIZE; /* Max file size */
+ SB->s_op = &novfs_ops;
+ SB->s_flags |= (MS_NODIRATIME | MS_NODEV | MS_POSIXACL);
+ SB->s_magic = NOVFS_MAGIC;
+
+ name.len = 1;
+ name.name = "/";
+
+ inode = novfs_get_inode(SB, S_IFDIR | 0777, 0, 0, 0, &name);
+ if (!inode) {
+ return (-ENOMEM);
+ }
+
+ novfs_root = d_alloc_root(inode);
+
+ if (!novfs_root) {
+ iput(inode);
+ return (-ENOMEM);
+ }
+ novfs_root->d_time = jiffies + (novfs_update_timeout * HZ);
+
+ inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+
+ SB->s_root = novfs_root;
+
+ DbgPrint("root 0x%p", novfs_root);
+
+ if (novfs_root) {
+ novfs_root->d_op = &novfs_dentry_operations;
+
+ name.name = SERVER_DIRECTORY_NAME;
+ name.len = strlen(SERVER_DIRECTORY_NAME);
+ name.hash = novfs_internal_hash(&name);
+
+ inode = novfs_get_inode(SB, S_IFDIR | 0777, 0, 0, 0, &name);
+ if (inode) {
+ info.mode = inode->i_mode;
+ info.namelength = 0;
+ inode->i_size = info.size = 0;
+ inode->i_uid = info.uid = 0;
+ inode->i_gid = info.gid = 0;
+ inode->i_atime = info.atime = inode->i_ctime = info.ctime = inode->i_mtime = info.mtime = CURRENT_TIME;
+
+ server = d_alloc(novfs_root, &name);
+ if (server) {
+ server->d_op = &novfs_dentry_operations;
+ server->d_time = 0xffffffff;
+ d_add(server, inode);
+ DbgPrint("d_add %s 0x%p", SERVER_DIRECTORY_NAME, server);
+ novfs_add_inode_entry(novfs_root->d_inode, &name, inode->i_ino, &info);
+ }
+ }
+
+ name.name = TREE_DIRECTORY_NAME;
+ name.len = strlen(TREE_DIRECTORY_NAME);
+ name.hash = novfs_internal_hash(&name);
+
+ inode = novfs_get_inode(SB, S_IFDIR | 0777, 0, 0, 0, &name);
+ if (inode) {
+ info.mode = inode->i_mode;
+ info.namelength = 0;
+ inode->i_size = info.size = 0;
+ inode->i_uid = info.uid = 0;
+ inode->i_gid = info.gid = 0;
+ inode->i_atime = info.atime = inode->i_ctime = info.ctime = inode->i_mtime = info.mtime = CURRENT_TIME;
+ tree = d_alloc(novfs_root, &name);
+ if (tree) {
+ tree->d_op = &novfs_dentry_operations;
+ tree->d_time = 0xffffffff;
+
+ d_add(tree, inode);
+ DbgPrint("d_add %s 0x%p", TREE_DIRECTORY_NAME, tree);
+ novfs_add_inode_entry(novfs_root->d_inode, &name, inode->i_ino, &info);
+ }
+ }
+ }
+
+ return (0);
+}
+
+static int novfs_get_sb(struct file_system_type *Fstype, int Flags, const char *Dev_name, void *Data, struct vfsmount *Mnt)
+{
+ DbgPrint("Fstype=0x%x Dev_name=%s", Fstype, Dev_name);
+ return get_sb_nodev(Fstype, Flags, Data, novfs_fill_super, Mnt);
+}
+
+static void novfs_kill_sb(struct super_block *super)
+{
+ shrink_dcache_sb(super);
+ kill_litter_super(super);
+}
+
+ssize_t novfs_Control_read(struct file *file, char *buf, size_t nbytes, loff_t * ppos)
+{
+ ssize_t retval = 0;
+
+ DbgPrint("kernel_locked 0x%x", kernel_locked());
+
+ return retval;
+}
+
+ssize_t novfs_Control_write(struct file * file, const char *buf, size_t nbytes, loff_t * ppos)
+{
+ ssize_t retval = 0;
+
+ DbgPrint("kernel_locked 0x%x", kernel_locked());
+ if (buf && nbytes) {
+ }
+
+ return (retval);
+}
+
+static struct file_system_type novfs_fs_type = {
+ .name = "novfs",
+ .get_sb = novfs_get_sb,
+ .kill_sb = novfs_kill_sb,
+ .owner = THIS_MODULE,
+};
+
+int __init init_novfs(void)
+{
+ int retCode;
+
+ lastDir[0] = 0;
+ lastTime = get_nanosecond_time();
+
+ inHAX = 0;
+ inHAXTime = get_nanosecond_time();
+
+ retCode = bdi_init(&novfs_backing_dev_info);
+
+ if (!retCode)
+ retCode = bdi_register(&novfs_backing_dev_info, NULL, "novfs-map");
+ if (retCode) {
+ bdi_destroy(&novfs_backing_dev_info);
+ goto bdi_fail;
+ }
+
+ retCode = novfs_proc_init();
+
+ novfs_profile_init();
+
+ if (!retCode) {
+ DbgPrint("%s %s %s", __DATE__, __TIME__, NOVFS_VERSION_STRING);
+ novfs_daemon_queue_init();
+ novfs_scope_init();
+ retCode = register_filesystem(&novfs_fs_type);
+ if (retCode) {
+ novfs_proc_exit();
+ novfs_daemon_queue_exit();
+ novfs_scope_exit();
+ }
+ }
+
+bdi_fail:
+ return (retCode);
+}
+
+void __exit exit_novfs(void)
+{
+ novfs_scope_exit();
+ novfs_daemon_queue_exit();
+ novfs_profile_exit();
+ novfs_proc_exit();
+ unregister_filesystem(&novfs_fs_type);
+
+ if (novfs_current_mnt) {
+ kfree(novfs_current_mnt);
+ novfs_current_mnt = NULL;
+ }
+
+ bdi_destroy(&novfs_backing_dev_info);
+}
+
+int novfs_lock_inode_cache(struct inode *i)
+{
+ struct inode_data *id;
+ int retVal = 0;
+
+ DbgPrint("0x%p", i);
+ if (i && (id = i->i_private) && id->DirCache.next) {
- down(&id->DirCacheLock);
++ mutex_lock(&id->DirCacheLock);
+ retVal = 1;
+ }
+ DbgPrint("return %d", retVal);
+ return (retVal);
+}
+
+void novfs_unlock_inode_cache(struct inode *i)
+{
+ struct inode_data *id;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
- up(&id->DirCacheLock);
++ mutex_unlock(&id->DirCacheLock);
+ }
+}
+
+int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, ino_t * ino, struct novfs_entry_info *info)
+/*
+ * Arguments: struct inode *i - pointer to directory inode
+ *
+ * Returns: 0 - item found
+ * -1 - done
+ *
+ * Abstract: Unlocks inode cache.
+ *
+ * Notes: DirCacheLock should be held before calling this routine.
+ *========================================================================*/
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ struct list_head *l = NULL;
+ int retVal = -1;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ if ((NULL == iteration) || (NULL == *iteration)) {
+ l = id->DirCache.next;
+ } else {
+ l = *iteration;
+ }
+
+ if (l == &id->DirCache) {
+ l = NULL;
+ } else {
+ dc = list_entry(l, struct novfs_dir_cache, list);
+
+ *ino = dc->ino;
+ info->type = 0;
+ info->mode = dc->mode;
+ info->size = dc->size;
+ info->atime = dc->atime;
+ info->mtime = dc->mtime;
+ info->ctime = dc->ctime;
+ info->namelength = dc->nameLen;
+ memcpy(info->name, dc->name, dc->nameLen);
+ info->name[dc->nameLen] = '\0';
+ retVal = 0;
+
+ l = l->next;
+ }
+ }
+ *iteration = l;
+ return (retVal);
+}
+
+/* DirCacheLock should be held before calling this routine. */
+int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info)
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ int retVal = -1;
+ char *n = "<NULL>";
+ int nl = 6;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ if (name && name->len) {
+ n = (char *)name->name;
+ nl = name->len;
+ }
+
+ dc = novfs_lookup_inode_cache(i, name, *ino);
+ if (dc) {
+ dc->flags |= ENTRY_VALID;
+ retVal = 0;
+ *ino = dc->ino;
+ info->type = 0;
+ info->mode = dc->mode;
+ info->size = dc->size;
+ info->atime = dc->atime;
+ info->mtime = dc->mtime;
+ info->ctime = dc->ctime;
+ info->namelength = dc->nameLen;
+ memcpy(info->name, dc->name, dc->nameLen);
+ info->name[dc->nameLen] = '\0';
+ retVal = 0;
+ }
+
+ DbgPrint("inode: 0x%p; name: %.*s; ino: %d\n", i, nl, n, *ino);
+ }
+ DbgPrint("return %d", retVal);
+ return (retVal);
+}
+
+ /*DirCacheLock should be held before calling this routine. */
+int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, struct novfs_entry_info *info)
+{
+ int retVal = -1;
+ loff_t count = 0;
+ loff_t i_pos = pos - 2;
+ struct list_head *inter = NULL;
+ while (!novfs_enumerate_inode_cache(i, &inter, ino, info)) {
+ DbgPrint("info->name = %s", info->name);
+ if (count == i_pos) {
+ retVal = 0;
+ break;
+ } else
+ count++;
+ }
+
+ return retVal;
+}
+
+/* DirCacheLock should be held before calling this routine. */
+int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info, u64 * EntryTime)
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ int retVal = -1;
+ char *n = "<NULL>";
+ int nl = 6;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ if (name && name->len) {
+ n = (char *)name->name;
+ nl = name->len;
+ }
+ DbgPrint("inode: 0x%p; name: %.*s; ino: %d", i, nl, n, *ino);
+
+ dc = novfs_lookup_inode_cache(i, name, *ino);
+ if (dc) {
+ retVal = 0;
+ *ino = dc->ino;
+ info->type = 0;
+ info->mode = dc->mode;
+ info->size = dc->size;
+ info->atime = dc->atime;
+ info->mtime = dc->mtime;
+ info->ctime = dc->ctime;
+ info->namelength = dc->nameLen;
+ memcpy(info->name, dc->name, dc->nameLen);
+ info->name[dc->nameLen] = '\0';
+ if (EntryTime) {
+ *EntryTime = dc->jiffies;
+ }
+ retVal = 0;
+ }
+ }
+ DbgPrint("return %d", retVal);
+ return (retVal);
+}
+
+/*
+ * Abstract: This routine will return the first entry on the list
+ * and then remove it.
+ *
+ * Notes: DirCacheLock should be held before calling this routine.
+ *
+ */
+int novfs_get_remove_entry(struct inode *i, ino_t * ino, struct novfs_entry_info *info)
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ struct list_head *l = NULL;
+ int retVal = -1;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ l = id->DirCache.next;
+
+ if (l != &id->DirCache) {
+ dc = list_entry(l, struct novfs_dir_cache, list);
+
+ *ino = dc->ino;
+ info->type = 0;
+ info->mode = dc->mode;
+ info->size = dc->size;
+ info->atime = dc->atime;
+ info->mtime = dc->mtime;
+ info->ctime = dc->ctime;
+ info->namelength = dc->nameLen;
+ memcpy(info->name, dc->name, dc->nameLen);
+ info->name[dc->nameLen] = '\0';
+ retVal = 0;
+
+ list_del(&dc->list);
+ kfree(dc);
+ DCCount--;
+
+ id->cntDC--;
+ }
+ }
+ return (retVal);
+}
+
+/*
+ * Abstract: Marks all entries in the directory cache as invalid.
+ *
+ * Notes: DirCacheLock should be held before calling this routine.
+ *
+ *========================================================================*/
+void novfs_invalidate_inode_cache(struct inode *i)
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ struct list_head *l;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ list_for_each(l, &id->DirCache) {
+ dc = list_entry(l, struct novfs_dir_cache, list);
+ dc->flags &= ~ENTRY_VALID;
+ }
+ }
+}
+
+/*++======================================================================*/
+struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, ino_t ino)
+/*
+ * Returns: struct novfs_dir_cache entry if match
+ * NULL - if there is no match.
+ *
+ * Abstract: Checks a inode directory to see if there are any enties
+ * matching name or ino. If name is specified then ino is
+ * not used. ino is use if name is not specified.
+ *
+ * Notes: DirCacheLock should be held before calling this routine.
+ *
+ *========================================================================*/
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc, *retVal = NULL;
+ struct list_head *l;
+ char *n = "<NULL>";
+ int nl = 6;
+ int hash = 0;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ if (name && name->name) {
+ nl = name->len;
+ n = (char *)name->name;
+ hash = name->hash;
+ }
+ DbgPrint("inode: 0x%p; name: %.*s; hash: 0x%x;\n" " len: %d; ino: %d", i, nl, n, hash, nl, ino);
+
+ list_for_each(l, &id->DirCache) {
+ dc = list_entry(l, struct novfs_dir_cache, list);
+ if (name) {
+
+/* DbgPrint("novfs_lookup_inode_cache: 0x%p\n" \
+ " ino: %d\n" \
+ " hash: 0x%x\n" \
+ " len: %d\n" \
+ " name: %.*s\n",
+ dc, dc->ino, dc->hash, dc->nameLen, dc->nameLen, dc->name);
+*/
+ if ((name->hash == dc->hash) &&
+ (name->len == dc->nameLen) && (0 == memcmp(name->name, dc->name, name->len))) {
+ retVal = dc;
+ break;
+ }
+ } else {
+ if (ino == dc->ino) {
+ retVal = dc;
+ break;
+ }
+ }
+ }
+ }
+
+ DbgPrint("return 0x%p", retVal);
+ return (retVal);
+}
+
+/*
+ * Checks a inode directory to see if there are any enties matching name
+ * or ino. If entry is found the valid bit is set.
+ *
+ * DirCacheLock should be held before calling this routine.
+ */
+int novfs_lookup_validate(struct inode *i, struct qstr *name, ino_t ino)
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ int retVal = -1;
+ char *n = "<NULL>";
+ int nl = 6;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ if (name && name->len) {
+ n = (char *)name->name;
+ nl = name->len;
+ }
+ DbgPrint("inode: 0x%p; name: %.*s; ino: %d", i, nl, n, ino);
+
+ dc = novfs_lookup_inode_cache(i, name, ino);
+ if (dc) {
+ dc->flags |= ENTRY_VALID;
+ retVal = 0;
+ }
+ }
+ return (retVal);
+}
+
+/*
+ * Added entry to directory cache.
+ *
+ * DirCacheLock should be held before calling this routine.
+ */
+int novfs_add_inode_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info)
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *new;
+ int retVal = -ENOMEM;
+
+ //SClark
+ DbgPrint("i: %p", i);
+ if ((id = i->i_private)) {
+ DbgPrint("i->i_private: %p", id);
+ if (id->DirCache.next)
+ DbgPrint("id->DirCache.next: %p", id->DirCache.next);
+ }
+ //SClark
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ new = kmalloc(sizeof(struct novfs_dir_cache) + name->len, GFP_KERNEL);
+ if (new) {
+ id->cntDC++;
+
+ DCCount++;
+ DbgPrint("inode: 0x%p; id: 0x%p; DC: 0x%p; new: 0x%p; "
+ "name: %.*s; ino: %d; size: %lld; mode: 0x%x",
+ i, id, &id->DirCache, new, name->len, name->name, ino, info->size, info->mode);
+
+ retVal = 0;
+ new->flags = ENTRY_VALID;
+ new->jiffies = get_jiffies_64();
+ new->size = info->size;
+ new->mode = info->mode;
+ new->atime = info->atime;
+ new->mtime = info->mtime;
+ new->ctime = info->ctime;
+ new->ino = ino;
+ new->hash = name->hash;
+ new->nameLen = name->len;
+ memcpy(new->name, name->name, name->len);
+ new->name[new->nameLen] = '\0';
+ list_add(&new->list, &id->DirCache);
+ }
+ }
+ return (retVal);
+}
+
+/*
+ * DirCacheLock should be held before calling this routine.
+ */
+int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info)
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ int retVal = -1;
+ char *n = "<NULL>";
+ int nl = 6;
+ char atime_buf[32];
+ char mtime_buf[32];
+ char ctime_buf[32];
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+
+ if (name && name->len) {
+ n = (char *)name->name;
+ nl = name->len;
+ }
+ ctime_r(&info->atime.tv_sec, atime_buf);
+ ctime_r(&info->mtime.tv_sec, mtime_buf);
+ ctime_r(&info->ctime.tv_sec, ctime_buf);
+ DbgPrint("inode: 0x%p; name: %.*s; ino: %d; size: %lld; "
+ "atime: %s; mtime: %s; ctime: %s", i, nl, n, ino, info->size, atime_buf, mtime_buf, ctime_buf);
+
+ dc = novfs_lookup_inode_cache(i, name, ino);
+ if (dc) {
+ retVal = 0;
+ dc->flags = ENTRY_VALID;
+ dc->jiffies = get_jiffies_64();
+ dc->size = info->size;
+ dc->mode = info->mode;
+ dc->atime = info->atime;
+ dc->mtime = info->mtime;
+ dc->ctime = info->ctime;
+
+ ctime_r(&dc->atime.tv_sec, atime_buf);
+ ctime_r(&dc->mtime.tv_sec, mtime_buf);
+ ctime_r(&dc->ctime.tv_sec, ctime_buf);
+ DbgPrint("entry: 0x%p; flags: 0x%x; jiffies: %lld; "
+ "ino: %d; size: %lld; mode: 0%o; atime: %s; "
+ "mtime: %s %d; ctime: %s; hash: 0x%x; "
+ " nameLen: %d; name: %s",
+ dc, dc->flags, dc->jiffies, dc->ino, dc->size,
+ dc->mode, atime_buf, mtime_buf, dc->mtime.tv_nsec, ctime_buf, dc->hash, dc->nameLen, dc->name);
+ }
+ }
+ DbgPrint("return %d", retVal);
+ return (retVal);
+}
+
+/*
+ * Removes entry from directory cache. You can specify a name
+ * or an inode number.
+ *
+ * DirCacheLock should be held before calling this routine.
+ */
+void novfs_remove_inode_entry(struct inode *i, struct qstr *name, ino_t ino)
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ char *n = "<NULL>";
+ int nl = 6;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ dc = novfs_lookup_inode_cache(i, name, ino);
+ if (dc) {
+ if (name && name->name) {
+ nl = name->len;
+ n = (char *)name->name;
+ }
+ DbgPrint("inode: 0x%p; id: 0x%p; DC: 0x%p; "
+ "name: %.*s; ino: %d entry: 0x%p "
+ "[name: %.*s; ino: %d; next: 0x%p; "
+ "prev: 0x%p]",
+ i, id, &id->DirCache, nl, n, ino, dc,
+ dc->nameLen, dc->name, dc->ino, dc->list.next, dc->list.prev);
+ list_del(&dc->list);
+ kfree(dc);
+ DCCount--;
+
+ id->cntDC--;
+ }
+ }
+}
+
+/*
+ * Frees all invalid entries in the directory cache.
+ *
+ * DirCacheLock should be held before calling this routine.
+ */
+void novfs_free_invalid_entries(struct inode *i)
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ struct list_head *l;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ list_for_each(l, &id->DirCache) {
+ dc = list_entry(l, struct novfs_dir_cache, list);
+ if (0 == (dc->flags & ENTRY_VALID)) {
+ DbgPrint("inode: 0x%p; id: 0x%p; entry: 0x%p; "
+ "name: %.*s; ino: %d", i, id, dc, dc->nameLen, dc->name, dc->ino);
+ l = l->prev;
+ list_del(&dc->list);
+ kfree(dc);
+ DCCount--;
+
+ id->cntDC--;
+ }
+ }
+ }
+}
+
+/*
+ * Frees all entries in the inode cache.
+ *
+ * DirCacheLock should be held before calling this routine.
+ */
+void novfs_free_inode_cache(struct inode *i)
+{
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ struct list_head *l;
+
+ if (i && (id = i->i_private) && id->DirCache.next) {
+ list_for_each(l, &id->DirCache) {
+ dc = list_entry(l, struct novfs_dir_cache, list);
+ l = l->prev;
+ list_del(&dc->list);
+ kfree(dc);
+ DCCount--;
+
+ id->cntDC--;
+ }
+ }
+}
+
+void novfs_dump_inode(void *pf)
+{
+ struct inode *inode;
+ void (*pfunc) (char *Fmt, ...) = pf;
+ struct inode_data *id;
+ struct novfs_dir_cache *dc;
+ struct list_head *il, *l;
+ char atime_buf[32];
+ char mtime_buf[32];
+ char ctime_buf[32];
+ unsigned long icnt = 0, dccnt = 0;
+
- down(&InodeList_lock);
++ mutex_lock(&InodeList_lock);
+ list_for_each(il, &InodeList) {
+ id = list_entry(il, struct inode_data, IList);
+ inode = id->Inode;
+ if (inode) {
+ icnt++;
+
+ pfunc("Inode=0x%p I_ino=%d\n", inode, inode->i_ino);
+
+ pfunc(" atime=%s\n", ctime_r(&inode->i_atime.tv_sec, atime_buf));
+ pfunc(" ctime=%s\n", ctime_r(&inode->i_mtime.tv_sec, atime_buf));
+ pfunc(" mtime=%s\n", ctime_r(&inode->i_ctime.tv_sec, atime_buf));
+ pfunc(" size=%lld\n", inode->i_size);
+ pfunc(" mode=0%o\n", inode->i_mode);
+ pfunc(" count=0%o\n", atomic_read(&inode->i_count));
+ }
+
+ pfunc(" nofs_inode_data: 0x%p Name=%s Scope=0x%p\n", id, id->Name, id->Scope);
+
+ if (id->DirCache.next) {
+ list_for_each(l, &id->DirCache) {
+ dccnt++;
+ dc = list_entry(l, struct novfs_dir_cache, list);
+ ctime_r(&dc->atime.tv_sec, atime_buf);
+ ctime_r(&dc->mtime.tv_sec, mtime_buf);
+ ctime_r(&dc->ctime.tv_sec, ctime_buf);
+
+ pfunc(" Cache Entry: 0x%p\n"
+ " flags: 0x%x\n"
+ " jiffies: %llu\n"
+ " ino: %u\n"
+ " size: %llu\n"
+ " mode: 0%o\n"
+ " atime: %s\n"
+ " mtime: %s\n"
+ " ctime: %s\n"
+ " hash: 0x%x\n"
+ " len: %d\n"
+ " name: %s\n",
+ dc, dc->flags, dc->jiffies,
+ dc->ino, dc->size, dc->mode,
+ atime_buf, mtime_buf, ctime_buf, dc->hash, dc->nameLen, dc->name);
+ }
+ }
+ }
- up(&InodeList_lock);
++ mutex_unlock(&InodeList_lock);
+
+ pfunc("Inodes: %d(%d) DirCache: %d(%d)\n", InodeCount, icnt, DCCount, dccnt);
+
+}
+
+module_init(init_novfs);
+module_exit(exit_novfs);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Novell Inc.");
+MODULE_DESCRIPTION("Novell NetWare Client for Linux");
+MODULE_VERSION(NOVFS_VERSION_STRING);
--- /dev/null
+/*
+ * Novell NCP Redirector for Linux
+ * Author: James Turner
+ *
+ * This file contains a debugging code for the novfs VFS.
+ *
+ * Copyright (C) 2005 Novell, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/time.h>
+
+#include <linux/profile.h>
+#include <linux/notifier.h>
+
+#include "vfs.h"
+
+/*===[ Manifest constants ]===============================================*/
+#define DBGBUFFERSIZE (1024*1024*32)
+
+/*===[ Type definitions ]=================================================*/
+struct local_rtc_time {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+};
+
+char *DbgPrintBuffer = NULL;
+char DbgPrintOn = 0;
+char DbgSyslogOn = 0;
+char DbgProfileOn = 0;
+
+static unsigned long DbgPrintBufferOffset = 0;
+static unsigned long DbgPrintBufferReadOffset = 0;
+static unsigned long DbgPrintBufferSize = DBGBUFFERSIZE;
+
+static struct file_operations Dbg_proc_file_operations;
+static struct file_operations dentry_proc_file_ops;
+static struct file_operations inode_proc_file_ops;
+
+static struct proc_dir_entry *dbg_dir = NULL;
+static struct proc_dir_entry *dbg_file = NULL;
+static struct proc_dir_entry *dentry_file = NULL;
+static struct proc_dir_entry *inode_file = NULL;
+
- static DECLARE_MUTEX(LocalPrint_lock);
++static DEFINE_MUTEX(LocalPrint_lock);
+
+static ssize_t User_proc_write_DbgBuffer(struct file *file, const char __user * buf, size_t nbytes, loff_t * ppos)
+{
+ ssize_t retval = nbytes;
+ u_char *lbuf, *p;
+ int i;
+ u_long cpylen;
+
+ lbuf = kmalloc(nbytes + 1, GFP_KERNEL);
+ if (lbuf) {
+ cpylen = copy_from_user(lbuf, buf, nbytes);
+
+ lbuf[nbytes] = 0;
+ DbgPrint("%s", lbuf);
+
+ for (i = 0; lbuf[i] && lbuf[i] != '\n'; i++) ;
+
+ if ('\n' == lbuf[i]) {
+ lbuf[i] = '\0';
+ }
+
+ if (!strcmp("on", lbuf)) {
+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0;
+ DbgPrintOn = 1;
+ } else if (!strcmp("off", lbuf)) {
+ DbgPrintOn = 0;
+ } else if (!strcmp("reset", lbuf)) {
+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0;
+ } else if (NULL != (p = strchr(lbuf, ' '))) {
+ *p++ = '\0';
+ if (!strcmp("syslog", lbuf)) {
+
+ if (!strcmp("on", p)) {
+ DbgSyslogOn = 1;
+ } else if (!strcmp("off", p)) {
+ DbgSyslogOn = 0;
+ }
+ } else if (!strcmp("novfsd", lbuf)) {
+ novfs_daemon_debug_cmd_send(p);
+ } else if (!strcmp("file_update_timeout", lbuf)) {
+ novfs_update_timeout = simple_strtoul(p, NULL, 0);
+ } else if (!strcmp("cache", lbuf)) {
+ if (!strcmp("on", p)) {
+ novfs_page_cache = 1;
+ } else if (!strcmp("off", p)) {
+ novfs_page_cache = 0;
+ }
+ } else if (!strcmp("profile", lbuf)) {
+ if (!strcmp("on", p)) {
+ DbgProfileOn = 1;
+ } else if (!strcmp("off", p)) {
+ DbgProfileOn = 0;
+ }
+ }
+ }
+ kfree(lbuf);
+ }
+
+ return (retval);
+}
+
+static ssize_t User_proc_read_DbgBuffer(struct file *file, char *buf, size_t nbytes, loff_t * ppos)
+{
+ ssize_t retval = 0;
+ size_t count;
+
+ if (0 != (count = DbgPrintBufferOffset - DbgPrintBufferReadOffset)) {
+
+ if (count > nbytes) {
+ count = nbytes;
+ }
+
+ count -= copy_to_user(buf, &DbgPrintBuffer[DbgPrintBufferReadOffset], count);
+
+ if (count == 0) {
+ if (retval == 0)
+ retval = -EFAULT;
+ } else {
+ DbgPrintBufferReadOffset += count;
+ if (DbgPrintBufferReadOffset >= DbgPrintBufferOffset) {
+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0;
+ }
+ retval = count;
+ }
+ }
+
+ return retval;
+}
+
+static int proc_read_DbgBuffer(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len;
+
+ printk(KERN_ALERT "proc_read_DbgBuffer: off=%ld count=%d DbgPrintBufferOffset=%lu DbgPrintBufferReadOffset=%lu\n", off,
+ count, DbgPrintBufferOffset, DbgPrintBufferReadOffset);
+
+ len = DbgPrintBufferOffset - DbgPrintBufferReadOffset;
+
+ if ((int)(DbgPrintBufferOffset - DbgPrintBufferReadOffset) > count)
+ len = count;
+
+ if (len) {
+ memcpy(page, &DbgPrintBuffer[DbgPrintBufferReadOffset], len);
+ DbgPrintBufferReadOffset += len;
+ }
+
+ if (DbgPrintBufferReadOffset >= DbgPrintBufferOffset)
+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0;
+
+ printk(KERN_ALERT "proc_read_DbgBuffer: return %d\n", len);
+
+ return len;
+}
+
+#define DBG_BUFFER_SIZE (2*1024)
+
+static int LocalPrint(char *Fmt, ...)
+{
+ int len = 0;
+ va_list args;
+
+ if (DbgPrintBuffer) {
+ va_start(args, Fmt);
+ len += vsnprintf(DbgPrintBuffer + DbgPrintBufferOffset, DbgPrintBufferSize - DbgPrintBufferOffset, Fmt, args);
+ DbgPrintBufferOffset += len;
+ }
+
+ return (len);
+}
+
+int ___DbgPrint(const char *site, const char *Fmt, ...)
+{
+ char *buf;
+ int len = 0;
+ unsigned long offset;
+ va_list args;
+
+ if ((DbgPrintBuffer && DbgPrintOn) || DbgSyslogOn) {
+ buf = kmalloc(DBG_BUFFER_SIZE, GFP_KERNEL);
+
+ if (buf) {
+ va_start(args, Fmt);
+ len = snprintf(buf, DBG_BUFFER_SIZE, "[%d] %s ", current->pid, site);
+ len += vsnprintf(buf + len, DBG_BUFFER_SIZE - len, Fmt, args);
+ if (-1 == len) {
+ len = DBG_BUFFER_SIZE - 1;
+ buf[len] = '\0';
+ }
+ /*
+ len = sprintf(&DbgPrintBuffer[offset], "[%llu] ", ts);
+ len += vsprintf(&DbgPrintBuffer[offset+len], Fmt, args);
+ */
+
+ if (len) {
+ if (DbgSyslogOn) {
+ printk("<6>%s", buf);
+ }
+
+ if (DbgPrintBuffer && DbgPrintOn) {
+ if ((DbgPrintBufferOffset + len) > DbgPrintBufferSize) {
+ offset = DbgPrintBufferOffset;
+ DbgPrintBufferOffset = 0;
+ memset(&DbgPrintBuffer[offset], 0, DbgPrintBufferSize - offset);
+ }
+
+ mb();
+
+ if ((DbgPrintBufferOffset + len) < DbgPrintBufferSize) {
+ DbgPrintBufferOffset += len;
+ offset = DbgPrintBufferOffset - len;
+ memcpy(&DbgPrintBuffer[offset], buf, len + 1);
+ }
+ }
+ }
+ kfree(buf);
+ }
+ }
+
+ return (len);
+}
+
+static void doline(unsigned char *b, unsigned char *e, unsigned char *l)
+{
+ unsigned char c;
+
+ *b++ = ' ';
+
+ while (l < e) {
+ c = *l++;
+ if ((c < ' ') || (c > '~')) {
+ c = '.';
+ }
+ *b++ = c;
+ *b = '\0';
+ }
+}
+
+void novfs_dump(int size, void *dumpptr)
+{
+ unsigned char *ptr = (unsigned char *)dumpptr;
+ unsigned char *line = NULL, buf[100], *bptr = buf;
+ int i;
+
+ if (DbgPrintBuffer || DbgSyslogOn) {
+ if (size) {
+ for (i = 0; i < size; i++) {
+ if (0 == (i % 16)) {
+ if (line) {
+ doline(bptr, ptr, line);
+ __DbgPrint("%s\n", buf);
+ bptr = buf;
+ }
+ bptr += sprintf(bptr, "0x%p: ", ptr);
+ line = ptr;
+ }
+ bptr += sprintf(bptr, "%02x ", *ptr++);
+ }
+ doline(bptr, ptr, line);
+ __DbgPrint("%s\n", buf);
+ }
+ }
+}
+
+#define FEBRUARY 2
+#define STARTOFTIME 1970
+#define SECDAY 86400L
+#define SECYR (SECDAY * 365)
+#define leapyear(year) ((year) % 4 == 0)
+#define days_in_year(a) (leapyear(a) ? 366 : 365)
+#define days_in_month(a) (month_days[(a) - 1])
+
+static int month_days[12] = {
+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
+};
+
+/*
+ * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
+ */
+static void NovfsGregorianDay(struct local_rtc_time *tm)
+{
+ int leapsToDate;
+ int lastYear;
+ int day;
+ int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
+
+ lastYear = tm->tm_year - 1;
+
+ /*
+ * Number of leap corrections to apply up to end of last year
+ */
+ leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
+
+ /*
+ * This year is a leap year if it is divisible by 4 except when it is
+ * divisible by 100 unless it is divisible by 400
+ *
+ * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be
+ */
+ if ((tm->tm_year % 4 == 0) && ((tm->tm_year % 100 != 0) || (tm->tm_year % 400 == 0)) && (tm->tm_mon > 2)) {
+ /*
+ * We are past Feb. 29 in a leap year
+ */
+ day = 1;
+ } else {
+ day = 0;
+ }
+
+ day += lastYear * 365 + leapsToDate + MonthOffset[tm->tm_mon - 1] + tm->tm_mday;
+
+ tm->tm_wday = day % 7;
+}
+
+static void private_to_tm(int tim, struct local_rtc_time *tm)
+{
+ register int i;
+ register long hms, day;
+
+ day = tim / SECDAY;
+ hms = tim % SECDAY;
+
+ /* Hours, minutes, seconds are easy */
+ tm->tm_hour = hms / 3600;
+ tm->tm_min = (hms % 3600) / 60;
+ tm->tm_sec = (hms % 3600) % 60;
+
+ /* Number of years in days */
+ for (i = STARTOFTIME; day >= days_in_year(i); i++)
+ day -= days_in_year(i);
+ tm->tm_year = i;
+
+ /* Number of months in days left */
+ if (leapyear(tm->tm_year))
+ days_in_month(FEBRUARY) = 29;
+ for (i = 1; day >= days_in_month(i); i++)
+ day -= days_in_month(i);
+ days_in_month(FEBRUARY) = 28;
+ tm->tm_mon = i;
+
+ /* Days are what is left over (+1) from all that. */
+ tm->tm_mday = day + 1;
+
+ /*
+ * Determine the day of week
+ */
+ NovfsGregorianDay(tm);
+}
+
+char *ctime_r(time_t * clock, char *buf)
+{
+ struct local_rtc_time tm;
+ static char *DAYOFWEEK[] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" };
+ static char *MONTHOFYEAR[] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
+ "Oct", "Nov", "Dec"
+ };
+
+ private_to_tm(*clock, &tm);
+
+ sprintf(buf, "%s %s %d %d:%02d:%02d %d", DAYOFWEEK[tm.tm_wday],
+ MONTHOFYEAR[tm.tm_mon - 1], tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_year);
+ return (buf);
+}
+
+static void dump(struct dentry *parent, void *pf)
+{
+ void (*pfunc) (char *Fmt, ...) = pf;
+ struct l {
+ struct l *next;
+ struct dentry *dentry;
+ } *l, *n, *start;
+ struct list_head *p;
+ struct dentry *d;
+ char *buf, *path, *sd;
+ char inode_number[16];
+
+ buf = (char *)kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+
+ if (NULL == buf) {
+ return;
+ }
+
+ if (parent) {
+ pfunc("starting 0x%p %.*s\n", parent, parent->d_name.len, parent->d_name.name);
+ if (parent->d_subdirs.next == &parent->d_subdirs) {
+ pfunc("No children...\n");
+ } else {
+ start = kmalloc(sizeof(*start), GFP_KERNEL);
+ if (start) {
+ start->next = NULL;
+ start->dentry = parent;
+ l = start;
+ while (l) {
+ p = l->dentry->d_subdirs.next;
+ while (p != &l->dentry->d_subdirs) {
+ d = list_entry(p, struct dentry, d_u.d_child);
+ p = p->next;
+
+ if (d->d_subdirs.next != &d->d_subdirs) {
+ n = kmalloc(sizeof(*n), GFP_KERNEL);
+ if (n) {
+ n->next = l->next;
+ l->next = n;
+ n->dentry = d;
+ }
+ } else {
+ path = novfs_scope_dget_path(d, buf, PATH_LENGTH_BUFFER, 1);
+ if (path) {
+ pfunc
+ ("1-0x%p %s\n"
+ " d_name: %.*s\n"
+ " d_parent: 0x%p\n"
+ " d_count: %d\n"
+ " d_flags: 0x%x\n"
+ " d_subdirs: 0x%p\n"
+ " d_inode: 0x%p\n",
+ d, path,
+ d->d_name.len,
+ d->d_name.name,
+ d->d_parent,
+ atomic_read
+ (&d->d_count), d->d_flags, d->d_subdirs.next, d->d_inode);
+ }
+ }
+ }
+ l = l->next;
+ }
+ l = start;
+ while (l) {
+ d = l->dentry;
+ path = novfs_scope_dget_path(d, buf, PATH_LENGTH_BUFFER, 1);
+ if (path) {
+ sd = " (None)";
+ if (&d->d_subdirs != d->d_subdirs.next) {
+ sd = "";
+ }
+ inode_number[0] = '\0';
+ if (d->d_inode) {
+ sprintf(inode_number, " (%lu)", d->d_inode->i_ino);
+ }
+ pfunc("0x%p %s\n"
+ " d_parent: 0x%p\n"
+ " d_count: %d\n"
+ " d_flags: 0x%x\n"
+ " d_subdirs: 0x%p%s\n"
+ " d_inode: 0x%p%s\n",
+ d, path, d->d_parent,
+ atomic_read(&d->d_count),
+ d->d_flags, d->d_subdirs.next, sd, d->d_inode, inode_number);
+ }
+
+ n = l;
+ l = l->next;
+ kfree(n);
+ }
+ }
+ }
+ }
+
+ kfree(buf);
+
+}
+
+static ssize_t common_read(char *buf, size_t len, loff_t * off)
+{
+ ssize_t retval = 0;
+ size_t count;
+ unsigned long offset = *off;
+
+ if (0 != (count = DbgPrintBufferOffset - offset)) {
+ if (count > len) {
+ count = len;
+ }
+
+ count -= copy_to_user(buf, &DbgPrintBuffer[offset], count);
+
+ if (count == 0) {
+ retval = -EFAULT;
+ } else {
+ *off += (loff_t) count;
+ retval = count;
+ }
+ }
+ return retval;
+
+}
+
+static ssize_t novfs_profile_read_inode(struct file *file, char *buf, size_t len, loff_t * off)
+{
+ ssize_t retval = 0;
+ unsigned long offset = *off;
+ static char save_DbgPrintOn;
+
+ if (offset == 0) {
- down(&LocalPrint_lock);
++ mutex_lock(&LocalPrint_lock);
+ save_DbgPrintOn = DbgPrintOn;
+ DbgPrintOn = 0;
+
+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0;
+ novfs_dump_inode(LocalPrint);
+ }
+
+ retval = common_read(buf, len, off);
+
+ if (0 == retval) {
+ DbgPrintOn = save_DbgPrintOn;
+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0;
+
- up(&LocalPrint_lock);
++ mutex_unlock(&LocalPrint_lock);
+ }
+
+ return retval;
+
+}
+
+static ssize_t novfs_profile_dentry_read(struct file *file, char *buf, size_t len, loff_t * off)
+{
+ ssize_t retval = 0;
+ unsigned long offset = *off;
+ static char save_DbgPrintOn;
+
+ if (offset == 0) {
- down(&LocalPrint_lock);
++ mutex_lock(&LocalPrint_lock);
+ save_DbgPrintOn = DbgPrintOn;
+ DbgPrintOn = 0;
+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0;
+ dump(novfs_root, LocalPrint);
+ }
+
+ retval = common_read(buf, len, off);
+
+ if (0 == retval) {
+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0;
+ DbgPrintOn = save_DbgPrintOn;
+
- up(&LocalPrint_lock);
++ mutex_unlock(&LocalPrint_lock);
+ }
+
+ return retval;
+
+}
+
+uint64_t get_nanosecond_time()
+{
+ struct timespec ts;
+ uint64_t retVal;
+
+ ts = current_kernel_time();
+
+ retVal = (uint64_t) NSEC_PER_SEC;
+ retVal *= (uint64_t) ts.tv_sec;
+ retVal += (uint64_t) ts.tv_nsec;
+
+ return (retVal);
+}
+
+void novfs_profile_init()
+{
+ if (novfs_procfs_dir)
+ dbg_dir = novfs_procfs_dir;
+ else
+ dbg_dir = proc_mkdir(MODULE_NAME, NULL);
+
+ if (dbg_dir) {
+ dbg_file = create_proc_read_entry("Debug", 0600, dbg_dir, proc_read_DbgBuffer, NULL);
+ if (dbg_file) {
+ dbg_file->size = DBGBUFFERSIZE;
+ memcpy(&Dbg_proc_file_operations, dbg_file->proc_fops, sizeof(struct file_operations));
+ Dbg_proc_file_operations.read = User_proc_read_DbgBuffer;
+ Dbg_proc_file_operations.write = User_proc_write_DbgBuffer;
+ dbg_file->proc_fops = &Dbg_proc_file_operations;
+ } else {
+ remove_proc_entry(MODULE_NAME, NULL);
+ vfree(DbgPrintBuffer);
+ DbgPrintBuffer = NULL;
+ }
+ }
+
+ if (DbgPrintBuffer) {
+ if (dbg_dir) {
+ inode_file = create_proc_entry("inode", 0600, dbg_dir);
+ if (inode_file) {
+ inode_file->size = 0;
+ memcpy(&inode_proc_file_ops, inode_file->proc_fops, sizeof(struct file_operations));
+ inode_proc_file_ops.owner = THIS_MODULE;
+ inode_proc_file_ops.read = novfs_profile_read_inode;
+ inode_file->proc_fops = &inode_proc_file_ops;
+ }
+
+ dentry_file = create_proc_entry("dentry", 0600, dbg_dir);
+ if (dentry_file) {
+ dentry_file->size = 0;
+ memcpy(&dentry_proc_file_ops, dentry_file->proc_fops, sizeof(struct file_operations));
+ dentry_proc_file_ops.owner = THIS_MODULE;
+ dentry_proc_file_ops.read = novfs_profile_dentry_read;
+ dentry_file->proc_fops = &dentry_proc_file_ops;
+ }
+
+ } else {
+ vfree(DbgPrintBuffer);
+ DbgPrintBuffer = NULL;
+ }
+ }
+}
+
+void novfs_profile_exit(void)
+{
+ if (dbg_file)
+ DbgPrint("Calling remove_proc_entry(Debug, NULL)\n"), remove_proc_entry("Debug", dbg_dir);
+ if (inode_file)
+ DbgPrint("Calling remove_proc_entry(inode, NULL)\n"), remove_proc_entry("inode", dbg_dir);
+ if (dentry_file)
+ DbgPrint("Calling remove_proc_entry(dentry, NULL)\n"), remove_proc_entry("dentry", dbg_dir);
+
+ if (dbg_dir && (dbg_dir != novfs_procfs_dir)) {
+ DbgPrint("Calling remove_proc_entry(%s, NULL)\n", MODULE_NAME);
+ remove_proc_entry(MODULE_NAME, NULL);
+ }
+}
--- /dev/null
+/*
+ * Novell NCP Redirector for Linux
+ * Author: James Turner
+ *
+ * This file contains functions used to scope users.
+ *
+ * Copyright (C) 2005 Novell, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/personality.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/synclink.h>
+#include <linux/smp_lock.h>
+#include <linux/semaphore.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+
+#include "vfs.h"
+
+#define SHUTDOWN_INTERVAL 5
+#define CLEANUP_INTERVAL 10
+#define MAX_USERNAME_LENGTH 32
+
+static struct list_head Scope_List;
+static struct semaphore Scope_Lock;
+static struct semaphore Scope_Thread_Delay;
+static int Scope_Thread_Terminate = 0;
+static struct timer_list Scope_Timer;
+static unsigned int Scope_Hash_Val = 1;
+
+static struct novfs_scope_list *Scope_Search4Scope(struct novfs_schandle Id, int Session, int Locked)
+{
+ struct novfs_scope_list *scope, *rscope = NULL;
+ struct novfs_schandle cur_scope;
+ struct list_head *sl;
+ int offset;
+
+ DbgPrint("Scope_Search4Scope: 0x%p:%p 0x%x 0x%x\n", Id.hTypeId, Id.hId, Session, Locked);
+
+ if (Session)
+ offset = offsetof(struct novfs_scope_list, SessionId);
+ else
+ offset = offsetof(struct novfs_scope_list, ScopeId);
+
+ if (!Locked) {
+ down(&Scope_Lock);
+ }
+
+ sl = Scope_List.next;
+ DbgPrint("Scope_Search4Scope: 0x%p\n", sl);
+ while (sl != &Scope_List) {
+ scope = list_entry(sl, struct novfs_scope_list, ScopeList);
+
+ cur_scope = *(struct novfs_schandle *)((char *)scope + offset);
+ if (SC_EQUAL(Id, cur_scope)) {
+ rscope = scope;
+ break;
+ }
+
+ sl = sl->next;
+ }
+
+ if (!Locked) {
+ up(&Scope_Lock);
+ }
+
+ DbgPrint("Scope_Search4Scope: return 0x%p\n", rscope);
+ return (rscope);
+}
+
+static struct novfs_scope_list *Scope_Find_Scope(int Create)
+{
+ struct novfs_scope_list *scope = NULL, *pscope = NULL;
+ struct task_struct *task;
+ struct novfs_schandle scopeId;
+ int addscope = 0;
+
+ task = current;
+
+ DbgPrint("Scope_Find_Scope: %d %d %d %d\n", current_uid(), current_euid(), current_suid(), current_fsuid());
+
+ //scopeId = task->euid;
+ UID_TO_SCHANDLE(scopeId, current_euid());
+
+ scope = Scope_Search4Scope(scopeId, 0, 0);
+
+ if (!scope && Create) {
+ scope = kmalloc(sizeof(*pscope), GFP_KERNEL);
+ if (scope) {
+ scope->ScopeId = scopeId;
+ SC_INITIALIZE(scope->SessionId);
+ scope->ScopePid = task->pid;
+ scope->ScopeTask = task;
+ scope->ScopeHash = 0;
+ scope->ScopeUid = current_euid();
+ scope->ScopeUserName[0] = '\0';
+
+ if (!novfs_daemon_create_sessionId(&scope->SessionId)) {
+ DbgPrint("Scope_Find_Scope2: %d %d %d %d\n",
+ current_uid(), current_euid(), current_suid(), current_fsuid());
+ memset(scope->ScopeUserName, 0, sizeof(scope->ScopeUserName));
+ scope->ScopeUserNameLength = 0;
+ novfs_daemon_getpwuid(current_euid(), sizeof(scope->ScopeUserName), scope->ScopeUserName);
+ scope->ScopeUserNameLength = strlen(scope->ScopeUserName);
+ addscope = 1;
+ }
+
+ scope->ScopeHash = Scope_Hash_Val++;
+ DbgPrint("Scope_Find_Scope: Adding 0x%p\n"
+ " ScopeId: 0x%p:%p\n"
+ " SessionId: 0x%p:%p\n"
+ " ScopePid: %d\n"
+ " ScopeTask: 0x%p\n"
+ " ScopeHash: %u\n"
+ " ScopeUid: %u\n"
+ " ScopeUserNameLength: %u\n"
+ " ScopeUserName: %s\n",
+ scope,
+ scope->ScopeId.hTypeId, scope->ScopeId.hId,
+ scope->SessionId.hTypeId, scope->SessionId.hId,
+ scope->ScopePid,
+ scope->ScopeTask,
+ scope->ScopeHash, scope->ScopeUid, scope->ScopeUserNameLength, scope->ScopeUserName);
+
+ if (SC_PRESENT(scope->SessionId)) {
+ down(&Scope_Lock);
+ pscope = Scope_Search4Scope(scopeId, 0, 1);
+
+ if (!pscope) {
+ list_add(&scope->ScopeList, &Scope_List);
+ }
+ up(&Scope_Lock);
+
+ if (pscope) {
+ printk("<6>Scope_Find_Scope scope not added because it was already there...\n");
+ novfs_daemon_destroy_sessionId(scope->SessionId);
+ kfree(scope);
+ scope = pscope;
+ addscope = 0;
+ }
+ } else {
+ kfree(scope);
+ scope = NULL;
+ }
+
+ if (scope && addscope)
+ novfs_add_to_root(scope->ScopeUserName);
+ }
+ }
+
+ return (scope);
+}
+
+static int Scope_Validate_Scope(struct novfs_scope_list *Scope)
+{
+ struct novfs_scope_list *s;
+ struct list_head *sl;
+ int retVal = 0;
+
+ DbgPrint("Scope_Validate_Scope: 0x%p\n", Scope);
+
+ down(&Scope_Lock);
+
+ sl = Scope_List.next;
+ while (sl != &Scope_List) {
+ s = list_entry(sl, struct novfs_scope_list, ScopeList);
+
+ if (s == Scope) {
+ retVal = 1;
+ break;
+ }
+
+ sl = sl->next;
+ }
+
+ up(&Scope_Lock);
+
+ return (retVal);
+}
+
+uid_t novfs_scope_get_uid(struct novfs_scope_list * scope)
+{
+ uid_t uid = 0;
+ if (!scope)
+ scope = Scope_Find_Scope(1);
+
+ if (scope && Scope_Validate_Scope(scope))
+ uid = scope->ScopeUid;
+ return uid;
+}
+
+char *novfs_scope_get_username(void)
+{
+ char *name = NULL;
+ struct novfs_scope_list *Scope;
+
+ Scope = Scope_Find_Scope(1);
+
+ if (Scope && Scope_Validate_Scope(Scope))
+ name = Scope->ScopeUserName;
+
+ return name;
+}
+
+struct novfs_schandle novfs_scope_get_sessionId(struct novfs_scope_list
+ *Scope)
+{
+ struct novfs_schandle sessionId;
+ DbgPrint("Scope_Get_SessionId: 0x%p\n", Scope);
+ SC_INITIALIZE(sessionId);
+ if (!Scope)
+ Scope = Scope_Find_Scope(1);
+
+ if (Scope && Scope_Validate_Scope(Scope))
+ sessionId = Scope->SessionId;
+ DbgPrint("Scope_Get_SessionId: return 0x%p:%p\n", sessionId.hTypeId, sessionId.hId);
+ return (sessionId);
+}
+
+struct novfs_scope_list *novfs_get_scope_from_name(struct qstr *Name)
+{
+ struct novfs_scope_list *scope, *rscope = NULL;
+ struct list_head *sl;
+
+ DbgPrint("Scope_Get_ScopefromName: %.*s\n", Name->len, Name->name);
+
+ down(&Scope_Lock);
+
+ sl = Scope_List.next;
+ while (sl != &Scope_List) {
+ scope = list_entry(sl, struct novfs_scope_list, ScopeList);
+
+ if ((Name->len == scope->ScopeUserNameLength) && (0 == strncmp(scope->ScopeUserName, Name->name, Name->len))) {
+ rscope = scope;
+ break;
+ }
+
+ sl = sl->next;
+ }
+
+ up(&Scope_Lock);
+
+ return (rscope);
+}
+
+int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties)
+{
+ struct novfs_scope_list *scope;
+ int retVal = 0;
+
+ scope = Scope_Find_Scope(1);
+
+ if (scope) {
+ if (TotalSize)
+ scope->ScopeUSize = *TotalSize;
+ if (Free)
+ scope->ScopeUFree = *Free;
+ if (TotalEnties)
+ scope->ScopeUTEnties = *TotalEnties;
+ if (FreeEnties)
+ scope->ScopeUAEnties = *FreeEnties;
+ }
+
+ return (retVal);
+}
+
+int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties)
+{
+ struct novfs_scope_list *scope;
+ int retVal = 0;
+
+ uint64_t td, fd, te, fe;
+
+ scope = Scope_Find_Scope(1);
+
+ td = fd = te = fe = 0;
+ if (scope) {
+
+ retVal = novfs_daemon_get_userspace(scope->SessionId, &td, &fd, &te, &fe);
+
+ scope->ScopeUSize = td;
+ scope->ScopeUFree = fd;
+ scope->ScopeUTEnties = te;
+ scope->ScopeUAEnties = fe;
+ }
+
+ if (TotalSize)
+ *TotalSize = td;
+ if (Free)
+ *Free = fd;
+ if (TotalEnties)
+ *TotalEnties = te;
+ if (FreeEnties)
+ *FreeEnties = fe;
+
+ return (retVal);
+}
+
+struct novfs_scope_list *novfs_get_scope(struct dentry *Dentry)
+{
+ struct novfs_scope_list *scope = NULL;
+ char *buf, *path, *cp;
+ struct qstr name;
+
+ buf = (char *)kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL);
+ if (buf) {
+ path = novfs_scope_dget_path(Dentry, buf, PATH_LENGTH_BUFFER, 0);
+ if (path) {
+ DbgPrint("Scope_Get_ScopefromPath: %s\n", path);
+
+ if (*path == '/')
+ path++;
+
+ cp = path;
+ if (*cp) {
+ while (*cp && (*cp != '/'))
+ cp++;
+
+ *cp = '\0';
+ name.hash = 0;
+ name.len = (int)(cp - path);
+ name.name = path;
+ scope = novfs_get_scope_from_name(&name);
+ }
+ }
+ kfree(buf);
+ }
+
+ return (scope);
+}
+
+static char *add_to_list(char *Name, char *List, char *EndOfList)
+{
+ while (*Name && (List < EndOfList)) {
+ *List++ = *Name++;
+ }
+
+ if (List < EndOfList) {
+ *List++ = '\0';
+ }
+ return (List);
+}
+
+char *novfs_get_scopeusers(void)
+{
+ struct novfs_scope_list *scope;
+ struct list_head *sl;
+ int asize = 8 * MAX_USERNAME_LENGTH;
+ char *list, *cp, *ep;
+
+ DbgPrint("Scope_Get_ScopeUsers\n");
+
+ do { /* Copy list until done or out of memory */
+ list = kmalloc(asize, GFP_KERNEL);
+
+ DbgPrint("Scope_Get_ScopeUsers list=0x%p\n", list);
+ if (list) {
+ cp = list;
+ ep = cp + asize;
+
+ /*
+ * Add the tree and server entries
+ */
+ cp = add_to_list(TREE_DIRECTORY_NAME, cp, ep);
+ cp = add_to_list(SERVER_DIRECTORY_NAME, cp, ep);
+
+ down(&Scope_Lock);
+
+ sl = Scope_List.next;
+ while ((sl != &Scope_List) && (cp < ep)) {
+ scope = list_entry(sl, struct novfs_scope_list, ScopeList);
+
+ DbgPrint("Scope_Get_ScopeUsers found 0x%p %s\n", scope, scope->ScopeUserName);
+
+ cp = add_to_list(scope->ScopeUserName, cp, ep);
+
+ sl = sl->next;
+ }
+
+ up(&Scope_Lock);
+
+ if (cp < ep) {
+ *cp++ = '\0';
+ asize = 0;
+ } else { /* Allocation was to small, up size */
+
+ asize *= 4;
+ kfree(list);
+ list = NULL;
+ }
+ } else { /* if allocation fails return an empty list */
+
+ break;
+ }
+ } while (!list); /* List was to small try again */
+
+ return (list);
+}
+
+void *novfs_scope_lookup(void)
+{
+ return Scope_Find_Scope(1);
+}
+
+static void Scope_Timer_Function(unsigned long context)
+{
+ up(&Scope_Thread_Delay);
+}
+
+static int Scope_Cleanup_Thread(void *Args)
+{
+ struct novfs_scope_list *scope, *rscope;
+ struct list_head *sl, cleanup;
+ struct task_struct *task;
+
+ DbgPrint("Scope_Cleanup_Thread: %d\n", current->pid);
+
+ /*
+ * Setup and start que timer
+ */
+ init_timer(&Scope_Timer);
+
+ while (0 == Scope_Thread_Terminate) {
+ DbgPrint("Scope_Cleanup_Thread: looping\n");
+ if (Scope_Thread_Terminate) {
+ break;
+ }
+
+ /*
+ * Check scope list for any terminated processes
+ */
+ down(&Scope_Lock);
+
+ sl = Scope_List.next;
+ INIT_LIST_HEAD(&cleanup);
+
+ while (sl != &Scope_List) {
+ scope = list_entry(sl, struct novfs_scope_list, ScopeList);
+ sl = sl->next;
+
+ rscope = NULL;
+ rcu_read_lock();
+ for_each_process(task) {
+ if ((task->cred->uid == scope->ScopeUid)
+ || (task->cred->euid == scope->ScopeUid)) {
+ rscope = scope;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!rscope) {
+ list_move(&scope->ScopeList, &cleanup);
+ DbgPrint("Scope_Cleanup_Thread: Scope=0x%p\n", rscope);
+ }
+ }
+
+ up(&Scope_Lock);
+
+ sl = cleanup.next;
+ while (sl != &cleanup) {
+ scope = list_entry(sl, struct novfs_scope_list, ScopeList);
+ sl = sl->next;
+
+ DbgPrint("Scope_Cleanup_Thread: Removing 0x%p\n"
+ " ScopeId: 0x%p:%p\n"
+ " SessionId: 0x%p:%p\n"
+ " ScopePid: %d\n"
+ " ScopeTask: 0x%p\n"
+ " ScopeHash: %u\n"
+ " ScopeUid: %u\n"
+ " ScopeUserName: %s\n",
+ scope,
+ scope->ScopeId,
+ scope->SessionId,
+ scope->ScopePid, scope->ScopeTask, scope->ScopeHash, scope->ScopeUid, scope->ScopeUserName);
+ if (!Scope_Search4Scope(scope->SessionId, 1, 0)) {
+ novfs_remove_from_root(scope->ScopeUserName);
+ novfs_daemon_destroy_sessionId(scope->SessionId);
+ }
+ kfree(scope);
+ }
+
+ Scope_Timer.expires = jiffies + HZ * CLEANUP_INTERVAL;
+ Scope_Timer.data = (unsigned long)0;
+ Scope_Timer.function = Scope_Timer_Function;
+ add_timer(&Scope_Timer);
+ DbgPrint("Scope_Cleanup_Thread: sleeping\n");
+
+ if (down_interruptible(&Scope_Thread_Delay)) {
+ break;
+ }
+ del_timer(&Scope_Timer);
+ }
+ Scope_Thread_Terminate = 0;
+
+ printk(KERN_INFO "Scope_Cleanup_Thread: Exit\n");
+ DbgPrint("Scope_Cleanup_Thread: Exit\n");
+ return (0);
+}
+
+void novfs_scope_cleanup(void)
+{
+ struct novfs_scope_list *scope;
+ struct list_head *sl;
+
+ DbgPrint("Scope_Cleanup:\n");
+
+ /*
+ * Check scope list for any terminated processes
+ */
+ down(&Scope_Lock);
+
+ sl = Scope_List.next;
+
+ while (sl != &Scope_List) {
+ scope = list_entry(sl, struct novfs_scope_list, ScopeList);
+ sl = sl->next;
+
+ list_del(&scope->ScopeList);
+
+ DbgPrint("Scope_Cleanup: Removing 0x%p\n"
+ " ScopeId: 0x%p:%p\n"
+ " SessionId: 0x%p:%p\n"
+ " ScopePid: %d\n"
+ " ScopeTask: 0x%p\n"
+ " ScopeHash: %u\n"
+ " ScopeUid: %u\n"
+ " ScopeUserName: %s\n",
+ scope,
+ scope->ScopeId,
+ scope->SessionId,
+ scope->ScopePid, scope->ScopeTask, scope->ScopeHash, scope->ScopeUid, scope->ScopeUserName);
+ if (!Scope_Search4Scope(scope->SessionId, 1, 1)) {
+ novfs_remove_from_root(scope->ScopeUserName);
+ novfs_daemon_destroy_sessionId(scope->SessionId);
+ }
+ kfree(scope);
+ }
+
+ up(&Scope_Lock);
+
+}
+
+/*
+ * Walks the dentry chain building a path.
+ */
+char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, unsigned int Buflen, int Flags)
+{
+ char *retval = &Buf[Buflen];
+ struct dentry *p = Dentry;
+ int len;
+
+ *(--retval) = '\0';
+ Buflen--;
+
+ do {
+ if (Buflen > p->d_name.len) {
+ retval -= p->d_name.len;
+ Buflen -= p->d_name.len;
+ memcpy(retval, p->d_name.name, p->d_name.len);
+ *(--retval) = '/';
+ Buflen--;
+ p = p->d_parent;
+ } else {
+ retval = NULL;
+ break;
+ }
+ } while (!IS_ROOT(p));
+
+ if (IS_ROOT(Dentry)) {
+ retval++;
+ }
+
+ if (Flags) {
+ len = strlen(p->d_sb->s_type->name);
+ if (Buflen - len > 0) {
+ retval -= len;
+ Buflen -= len;
+ memcpy(retval, p->d_sb->s_type->name, len);
+ *(--retval) = '/';
+ Buflen--;
+ }
+ }
+
+ return (retval);
+}
+
+void novfs_scope_init(void)
+{
+ INIT_LIST_HEAD(&Scope_List);
- init_MUTEX(&Scope_Lock);
- init_MUTEX_LOCKED(&Scope_Thread_Delay);
++ sema_init(&Scope_Lock, 1);
++ sema_init(&Scope_Thread_Delay, 0);
+ kthread_run(Scope_Cleanup_Thread, NULL, "novfs_ST");
+}
+
+void novfs_scope_exit(void)
+{
+ unsigned long expires = jiffies + HZ * SHUTDOWN_INTERVAL;
+
+ printk(KERN_INFO "Scope_Uninit: Start\n");
+
+ Scope_Thread_Terminate = 1;
+
+ up(&Scope_Thread_Delay);
+
+ mb();
+ while (Scope_Thread_Terminate && (jiffies < expires))
+ yield();
+ /* down(&Scope_Thread_Delay); */
+ printk(KERN_INFO "Scope_Uninit: Exit\n");
+
+}
}
s->s_flags |= MS_ACTIVE;
} else {
- do_remount_sb(s, flags, data, 0);
+ __do_remount_sb(s, flags, data, 0);
}
- simple_set_mnt(mnt, s);
+ return dget(s->s_root);
+ }
+ EXPORT_SYMBOL(mount_single);
+
+ int get_sb_single(struct file_system_type *fs_type,
+ int flags, void *data,
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt)
+ {
+ struct dentry *root;
+ root = mount_single(fs_type, flags, data, fill_super);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+ mnt->mnt_root = root;
+ mnt->mnt_sb = root->d_sb;
return 0;
}
#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
#define TIOCGRS485 0x542E
+ #ifndef TIOCSRS485
#define TIOCSRS485 0x542F
+ #endif
#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
+#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get real dev no below /dev/console */
#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
#define TCSETX 0x5433
#define TCSETXF 0x5434
int ddebug_add_module(struct _ddebug *tab, unsigned int n,
const char *modname);
+struct kernel_param;
+
#if defined(CONFIG_DYNAMIC_DEBUG)
extern int ddebug_remove_module(const char *mod_name);
+extern int ddebug_exec_query(char *query_string);
+extern void ddebug_module_parse_args(const char *name, char* args,
+ struct kernel_param *params, unsigned num);
- #define __dynamic_dbg_enabled(dd) ({ \
- int __ret = 0; \
- if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \
- (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \
- if (unlikely(dd.flags)) \
- __ret = 1; \
- __ret; })
-
#define dynamic_pr_debug(fmt, ...) do { \
+ __label__ do_printk; \
+ __label__ out; \
static struct _ddebug descriptor \
__used \
__attribute__((section("__verbose"), aligned(8))) = \
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
+ #define IS_IMA(inode) ((inode)->i_flags & S_IMA)
+/*
+ * IS_ACL() tells the VFS to not apply the umask
+ * and use iop->check_acl for acl permission checks when defined.
+ */
+#define IS_ACL(inode) __IS_FLG(inode, MS_POSIXACL | MS_RICHACL)
+
/* the read-only stuff doesn't really belong here, but any other place is
probably as bad and I don't want to create yet another include file. */
struct vm_area_struct;
+ /* Plain integer GFP bitmasks. Do not use this directly. */
+ #define ___GFP_DMA 0x01u
+ #define ___GFP_HIGHMEM 0x02u
+ #define ___GFP_DMA32 0x04u
+ #define ___GFP_MOVABLE 0x08u
+ #define ___GFP_WAIT 0x10u
+ #define ___GFP_HIGH 0x20u
+ #define ___GFP_IO 0x40u
+ #define ___GFP_FS 0x80u
+ #define ___GFP_COLD 0x100u
+ #define ___GFP_NOWARN 0x200u
+ #define ___GFP_REPEAT 0x400u
+ #define ___GFP_NOFAIL 0x800u
+ #define ___GFP_NORETRY 0x1000u
++#define ___GFP_MEMALLOC 0x2000u
+ #define ___GFP_COMP 0x4000u
+ #define ___GFP_ZERO 0x8000u
+ #define ___GFP_NOMEMALLOC 0x10000u
+ #define ___GFP_HARDWALL 0x20000u
+ #define ___GFP_THISNODE 0x40000u
+ #define ___GFP_RECLAIMABLE 0x80000u
+ #ifdef CONFIG_KMEMCHECK
+ #define ___GFP_NOTRACK 0x200000u
+ #else
+ #define ___GFP_NOTRACK 0
+ #endif
+
/*
* GFP bitmasks..
*
* __GFP_MOVABLE: Flag that this page will be movable by the page migration
* mechanism or reclaimed
*/
- #define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */
- #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */
- #define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */
- #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */
- #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */
- #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
- #define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */
- #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */
- #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */
- #define __GFP_MEMALLOC ((__force gfp_t)0x2000u)/* Use emergency reserves */
- #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */
- #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
- #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
- #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
- #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
- #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
-
- #ifdef CONFIG_KMEMCHECK
- #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
- #else
- #define __GFP_NOTRACK ((__force gfp_t)0)
- #endif
+ #define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
+ #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
+ #define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
+ #define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
+ #define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
+ #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
+ #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
+ #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
+ #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
++#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Use emergency reserves */
+ #define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
+ #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
+ #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */
+ #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
+ #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
+ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
+ #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
/*
* This may seem redundant, but it's a way of annotating false positives vs.
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
- /*
- * Monolithic do_IRQ implementation.
- */
- #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
- extern unsigned int __do_IRQ(unsigned int irq);
- #endif
-
- /*
- * Architectures call this to let the generic IRQ layer
- * handle an interrupt. If the descriptor is attached to an
- * irqchip-style controller then we call the ->handle_irq() handler,
- * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
- */
- static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
- {
- #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
- desc->handle_irq(irq, desc);
- #else
- if (likely(desc->handle_irq))
- desc->handle_irq(irq, desc);
- else
- __do_IRQ(irq);
- #endif
- }
-
- static inline void generic_handle_irq(unsigned int irq)
- {
- generic_handle_irq_desc(irq, irq_to_desc(irq));
- }
-
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret);
+ irqreturn_t action_ret, bool only_fixup);
- /* Resending of interrupts :*/
- void check_irq_resend(struct irq_desc *desc, unsigned int irq);
/* Enable/disable irq debugging output: */
extern int noirqdebug_setup(char *str);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
void account_page_dirtied(struct page *page, struct address_space *mapping);
+ void account_page_writeback(struct page *page);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
+int set_page_dirty_notag(struct page *page);
int clear_page_dirty_for_io(struct page *page);
/* Is the vma a continuation of the stack vma above it? */
#include <linux/rcupdate.h>
#include <linux/dmaengine.h>
#include <linux/hrtimer.h>
++#include <linux/reserve.h>
/* Don't change this without changing skb_csum_unnecessary! */
#define CHECKSUM_NONE 0
#else
__u8 deliver_no_wcard:1;
#endif
+#ifdef CONFIG_NETVM
+ __u8 emergency:1;
+#endif
- #ifdef CONFIG_XEN
- __u8 proto_data_valid:1,
- proto_csum_blank:1;
- #endif
kmemcheck_bitfield_end(flags2);
- /* 0/9...15 bit hole */
+ /* 0/14 bit hole */
#ifdef CONFIG_NET_DMA
dma_cookie_t dma_cookie;
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
- return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, -1);
- return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
++ return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
return skb;
}
- extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
- extern void __netdev_free_page(struct net_device *dev, struct page *page);
++extern struct mem_reserve net_skb_reserve;
++
+ /**
+ * __netdev_alloc_page - allocate a page for ps-rx on a specific device
+ * @dev: network device to receive on
+ * @gfp_mask: alloc_pages_node mask
+ *
+ * Allocate a new page. dev currently unused.
+ *
+ * %NULL is returned if there is no free memory.
+ */
+ static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
+ {
- return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
++ return alloc_pages_reserve(NUMA_NO_NODE, gfp_mask | __GFP_MEMALLOC, 0,
++ &net_skb_reserve, NULL);
+ }
/**
* netdev_alloc_page - allocate a page for ps-rx on a specific device
return __netdev_alloc_page(dev, GFP_ATOMIC);
}
++static inline void __netdev_free_page(struct net_device *dev, struct page *page)
++{
++ free_pages_reserve(page, 0, &net_skb_reserve, page->reserve);
++}
++
static inline void netdev_free_page(struct net_device *dev, struct page *page)
{
- __free_page(page);
+ __netdev_free_page(dev, page);
}
/**
menuconfig CGROUP_SCHED
bool "Group CPU scheduler"
- depends on EXPERIMENTAL && CGROUPS
+ depends on EXPERIMENTAL
- default n
+ default n if KERNEL_DESKTOP
+ default y
help
This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups. It uses cgroups to group
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret, false);
- if (desc->chip->eoi)
- desc->chip->eoi(irq);
+ if (desc->irq_data.chip->irq_eoi)
+ desc->irq_data.chip->irq_eoi(&desc->irq_data);
}
void
if (likely(!(desc->status & IRQ_DISABLED))) {
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret, true);
}
- desc->chip->end(irq);
+ desc->irq_data.chip->end(irq);
return 1;
}
lockdep_softirq_exit();
account_system_vtime(current);
- _local_bh_enable();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
}
#ifndef __ARCH_HAS_DO_SOFTIRQ
#include <linux/dynamic_debug.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
+ #include <linux/jump_label.h>
+#include <asm/setup.h>
+
extern struct _ddebug __start___verbose[];
extern struct _ddebug __stop___verbose[];
+#define DDEBUG_STRING_SIZE 1024
+
- /* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
- * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
- * use independent hash functions, to reduce the chance of false positives.
- */
- long long dynamic_debug_enabled;
- EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
- long long dynamic_debug_enabled2;
- EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
-
struct ddebug_table {
struct list_head link;
char *mod_name;
maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
- page_isolation.o mm_init.o mmu_context.o reserve.o \
- $(mmu-y)
+ page_isolation.o mm_init.o mmu_context.o percpu.o \
- $(mmu-y)
++ reserve.o $(mmu-y)
obj-y += init-mm.o
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
}
#endif /* CONFIG_ELF_CORE */
+static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end, pgprot_t prot)
+{
+ pte_t *pte;
+ spinlock_t *ptl;
+ int err = 0;
+
+ pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+ if (!pte)
+ return -EAGAIN;
+ arch_enter_lazy_mmu_mode();
+ do {
+ pte_t zero_pte;
+
+ if (unlikely(!pte_none(*pte))) {
+ err = -EEXIST;
+ pte++;
+ break;
+ }
+ zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(addr), prot));
+ zero_pte = pte_wrprotect(zero_pte);
+ set_pte_at(mm, addr, pte, zero_pte);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(pte - 1, ptl);
+ return err;
+}
+
+static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
+ unsigned long addr, unsigned long end, pgprot_t prot)
+{
+ pmd_t *pmd;
+ unsigned long next;
+ int err;
+
+ pmd = pmd_alloc(mm, pud, addr);
+ if (!pmd)
+ return -EAGAIN;
+ do {
+ next = pmd_addr_end(addr, end);
+ err = zeromap_pte_range(mm, pmd, addr, next, prot);
+ if (err)
+ break;
+ } while (pmd++, addr = next, addr != end);
+ return err;
+}
+
+static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end, pgprot_t prot)
+{
+ pud_t *pud;
+ unsigned long next;
+ int err;
+
+ pud = pud_alloc(mm, pgd, addr);
+ if (!pud)
+ return -EAGAIN;
+ do {
+ next = pud_addr_end(addr, end);
+ err = zeromap_pmd_range(mm, pud, addr, next, prot);
+ if (err)
+ break;
+ } while (pud++, addr = next, addr != end);
+ return err;
+}
+
+int zeromap_page_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long size, pgprot_t prot)
+{
+ pgd_t *pgd;
+ unsigned long next;
+ unsigned long end = addr + size;
+ struct mm_struct *mm = vma->vm_mm;
+ int err;
+
+ BUG_ON(addr >= end);
+ pgd = pgd_offset(mm, addr);
+ flush_cache_range(vma, addr, end);
+ do {
+ next = pgd_addr_end(addr, end);
+ err = zeromap_pud_range(mm, pgd, addr, next, prot);
+ if (err)
+ break;
+ } while (pgd++, addr = next, addr != end);
+ return err;
+}
+
- pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
pgd_t * pgd = pgd_offset(mm, addr);
unlock_page(page);
goto out;
}
+
+ if (sis->flags & SWP_FILE) {
+ struct file *swap_file = sis->swap_file;
+ struct address_space *mapping = swap_file->f_mapping;
+
+ ret = mapping->a_ops->swap_out(swap_file, page, wbc);
+ if (!ret)
+ count_vm_event(PSWPOUT);
+ return ret;
+ }
+
- if (preswap_put(page) == 1) {
- set_page_writeback(page);
- unlock_page(page);
- end_page_writeback(page);
- goto out;
- }
-
bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
if (bio == NULL) {
set_page_dirty(page);
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageUptodate(page));
+
+ if (sis->flags & SWP_FILE) {
+ struct file *swap_file = sis->swap_file;
+ struct address_space *mapping = swap_file->f_mapping;
+
+ ret = mapping->a_ops->swap_in(swap_file, page);
+ if (!ret)
+ count_vm_event(PSWPIN);
+ return ret;
+ }
+
- if (preswap_get(page) == 1) {
- SetPageUptodate(page);
- unlock_page(page);
- goto out;
- }
-
bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
if (bio == NULL) {
unlock_page(page);
--- /dev/null
+/*
+ * Memory reserve management.
+ *
+ * Copyright (C) 2007-2008, Red Hat, Inc.,
+ * Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ * Description:
+ *
+ * Manage a set of memory reserves.
+ *
+ * A memory reserve is a reserve for a specified number of object of specified
+ * size. Since memory is managed in pages, this reserve demand is then
+ * translated into a page unit.
+ *
+ * So each reserve has a specified object limit, an object usage count and a
+ * number of pages required to back these objects.
+ *
+ * Usage is charged against a reserve, if the charge fails, the resource must
+ * not be allocated/used.
+ *
+ * The reserves are managed in a tree, and the resource demands (pages and
+ * limit) are propagated up the tree. Obviously the object limit will be
+ * meaningless as soon as the unit starts mixing, but the required page reserve
+ * (being of one unit) is still valid at the root.
+ *
+ * It is the page demand of the root node that is used to set the global
+ * reserve (adjust_memalloc_reserve() which sets zone->pages_emerg).
+ *
+ * As long as a subtree has the same usage unit, an aggregate node can be used
+ * to charge against, instead of the leaf nodes. However, do be consistent with
+ * who is charged, resource usage is not propagated up the tree (for
+ * performance reasons).
+ */
+
+#include <linux/reserve.h>
+#include <linux/mutex.h>
+#include <linux/mmzone.h>
+#include <linux/log2.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "internal.h"
+
+static DEFINE_MUTEX(mem_reserve_mutex);
+
+/**
+ * @mem_reserve_root - the global reserve root
+ *
+ * The global reserve is empty, and has no limit unit, it merely
+ * acts as an aggregation point for reserves and an interface to
+ * adjust_memalloc_reserve().
+ */
+struct mem_reserve mem_reserve_root = {
+ .children = LIST_HEAD_INIT(mem_reserve_root.children),
+ .siblings = LIST_HEAD_INIT(mem_reserve_root.siblings),
+ .name = "total reserve",
+ .lock = __SPIN_LOCK_UNLOCKED(mem_reserve_root.lock),
+ .waitqueue = __WAIT_QUEUE_HEAD_INITIALIZER(mem_reserve_root.waitqueue),
+};
+EXPORT_SYMBOL_GPL(mem_reserve_root);
+
+/**
+ * mem_reserve_init() - initialize a memory reserve object
+ * @res - the new reserve object
+ * @name - a name for this reserve
+ * @parent - when non NULL, the parent to connect to.
+ */
+void mem_reserve_init(struct mem_reserve *res, const char *name,
+ struct mem_reserve *parent)
+{
+ memset(res, 0, sizeof(*res));
+ INIT_LIST_HEAD(&res->children);
+ INIT_LIST_HEAD(&res->siblings);
+ res->name = name;
+ spin_lock_init(&res->lock);
+ init_waitqueue_head(&res->waitqueue);
+
+ if (parent)
+ mem_reserve_connect(res, parent);
+}
+EXPORT_SYMBOL_GPL(mem_reserve_init);
+
+/*
+ * propagate the pages and limit changes up the (sub)tree.
+ */
+static void __calc_reserve(struct mem_reserve *res, long pages, long limit)
+{
+ unsigned long flags;
+
+ for ( ; res; res = res->parent) {
+ res->pages += pages;
+
+ if (limit) {
+ spin_lock_irqsave(&res->lock, flags);
+ res->limit += limit;
+ spin_unlock_irqrestore(&res->lock, flags);
+ }
+ }
+}
+
+/**
+ * __mem_reserve_add() - primitive to change the size of a reserve
+ * @res - reserve to change
+ * @pages - page delta
+ * @limit - usage limit delta
+ *
+ * Returns -ENOMEM when a size increase is not possible atm.
+ */
+static int __mem_reserve_add(struct mem_reserve *res, long pages, long limit)
+{
+ int ret = 0;
+ long reserve;
+
+ /*
+ * This looks more complex than need be, that is because we handle
+ * the case where @res isn't actually connected to mem_reserve_root.
+ *
+ * So, by propagating the new pages up the (sub)tree and computing
+ * the difference in mem_reserve_root.pages we find if this action
+ * affects the actual reserve.
+ *
+ * The (partial) propagation also makes that mem_reserve_connect()
+ * needs only look at the direct child, since each disconnected
+ * sub-tree is fully up-to-date.
+ */
+ reserve = mem_reserve_root.pages;
+ __calc_reserve(res, pages, 0);
+ reserve = mem_reserve_root.pages - reserve;
+
+ if (reserve) {
+ ret = adjust_memalloc_reserve(reserve);
+ if (ret)
+ __calc_reserve(res, -pages, 0);
+ }
+
+ /*
+ * Delay updating the limits until we've acquired the resources to
+ * back it.
+ */
+ if (!ret)
+ __calc_reserve(res, 0, limit);
+
+ return ret;
+}
+
+/**
+ * __mem_reserve_charge() - primitive to charge object usage of a reserve
+ * @res - reserve to charge
+ * @charge - size of the charge
+ *
+ * Returns non-zero on success, zero on failure.
+ */
+static
+int __mem_reserve_charge(struct mem_reserve *res, long charge)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&res->lock, flags);
+ if (charge < 0 || res->usage + charge < res->limit) {
+ res->usage += charge;
+ if (unlikely(res->usage < 0))
+ res->usage = 0;
+ ret = 1;
+ }
+ if (charge < 0)
+ wake_up_all(&res->waitqueue);
+ spin_unlock_irqrestore(&res->lock, flags);
+
+ return ret;
+}
+
+/**
+ * mem_reserve_connect() - connect a reserve to another in a child-parent relation
+ * @new_child - the reserve node to connect (child)
+ * @node - the reserve node to connect to (parent)
+ *
+ * Connecting a node results in an increase of the reserve by the amount of
+ * pages in @new_child->pages if @node has a connection to mem_reserve_root.
+ *
+ * Returns -ENOMEM when the new connection would increase the reserve (parent
+ * is connected to mem_reserve_root) and there is no memory to do so.
+ *
+ * On error, the child is _NOT_ connected.
+ */
+int mem_reserve_connect(struct mem_reserve *new_child, struct mem_reserve *node)
+{
+ int ret;
+
+ WARN_ON(!new_child->name);
+
+ mutex_lock(&mem_reserve_mutex);
+ if (new_child->parent) {
+ ret = -EEXIST;
+ goto unlock;
+ }
+ new_child->parent = node;
+ list_add(&new_child->siblings, &node->children);
+ ret = __mem_reserve_add(node, new_child->pages, new_child->limit);
+ if (ret) {
+ new_child->parent = NULL;
+ list_del_init(&new_child->siblings);
+ }
+unlock:
+ mutex_unlock(&mem_reserve_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mem_reserve_connect);
+
+/**
+ * mem_reserve_disconnect() - sever a nodes connection to the reserve tree
+ * @node - the node to disconnect
+ *
+ * Disconnecting a node results in a reduction of the reserve by @node->pages
+ * if node had a connection to mem_reserve_root.
+ */
+void mem_reserve_disconnect(struct mem_reserve *node)
+{
+ int ret;
+
+ BUG_ON(!node->parent);
+
+ mutex_lock(&mem_reserve_mutex);
+ if (!node->parent) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ ret = __mem_reserve_add(node->parent, -node->pages, -node->limit);
+ if (!ret) {
+ node->parent = NULL;
+ list_del_init(&node->siblings);
+ }
+unlock:
+ mutex_unlock(&mem_reserve_mutex);
+
+ /*
+ * We cannot fail to shrink the reserves, can we?
+ */
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL_GPL(mem_reserve_disconnect);
+
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Simple output of the reserve tree in: /proc/reserve_info
+ * Example:
+ *
+ * localhost ~ # cat /proc/reserve_info
+ * 1:0 "total reserve" 6232K 0/278581
+ * 2:1 "total network reserve" 6232K 0/278581
+ * 3:2 "network TX reserve" 212K 0/53
+ * 4:3 "protocol TX pages" 212K 0/53
+ * 5:2 "network RX reserve" 6020K 0/278528
+ * 6:5 "IPv4 route cache" 5508K 0/16384
+ * 7:5 "SKB data reserve" 512K 0/262144
+ * 8:7 "IPv4 fragment cache" 512K 0/262144
+ */
+
+static void mem_reserve_show_item(struct seq_file *m, struct mem_reserve *res,
+ unsigned int parent, unsigned int *id)
+{
+ struct mem_reserve *child;
+ unsigned int my_id = ++*id;
+
+ seq_printf(m, "%d:%d \"%s\" %ldK %ld/%ld\n",
+ my_id, parent, res->name,
+ res->pages << (PAGE_SHIFT - 10),
+ res->usage, res->limit);
+
+ list_for_each_entry(child, &res->children, siblings)
+ mem_reserve_show_item(m, child, my_id, id);
+}
+
+static int mem_reserve_show(struct seq_file *m, void *v)
+{
+ unsigned int ident = 0;
+
+ mutex_lock(&mem_reserve_mutex);
+ mem_reserve_show_item(m, &mem_reserve_root, ident, &ident);
+ mutex_unlock(&mem_reserve_mutex);
+
+ return 0;
+}
+
+static int mem_reserve_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mem_reserve_show, NULL);
+}
+
+static const struct file_operations mem_reserve_opterations = {
+ .open = mem_reserve_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static __init int mem_reserve_proc_init(void)
+{
+ proc_create("reserve_info", S_IRUSR, NULL, &mem_reserve_opterations);
+ return 0;
+}
+
+module_init(mem_reserve_proc_init);
+
+#endif
+
+/*
+ * alloc_page helpers
+ */
+
+/**
+ * mem_reserve_pages_set() - set reserves size in pages
+ * @res - reserve to set
+ * @pages - size in pages to set it to
+ *
+ * Returns -ENOMEM when it fails to set the reserve. On failure the old size
+ * is preserved.
+ */
+int mem_reserve_pages_set(struct mem_reserve *res, long pages)
+{
+ int ret;
+
+ mutex_lock(&mem_reserve_mutex);
+ pages -= res->pages;
+ ret = __mem_reserve_add(res, pages, pages * PAGE_SIZE);
+ mutex_unlock(&mem_reserve_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mem_reserve_pages_set);
+
+/**
+ * mem_reserve_pages_add() - change the size in a relative way
+ * @res - reserve to change
+ * @pages - number of pages to add (or subtract when negative)
+ *
+ * Similar to mem_reserve_pages_set, except that the argument is relative
+ * instead of absolute.
+ *
+ * Returns -ENOMEM when it fails to increase.
+ */
+int mem_reserve_pages_add(struct mem_reserve *res, long pages)
+{
+ int ret;
+
+ mutex_lock(&mem_reserve_mutex);
+ ret = __mem_reserve_add(res, pages, pages * PAGE_SIZE);
+ mutex_unlock(&mem_reserve_mutex);
+
+ return ret;
+}
+
+/**
+ * mem_reserve_pages_charge() - charge page usage to a reserve
+ * @res - reserve to charge
+ * @pages - size to charge
+ *
+ * Returns non-zero on success.
+ */
+int mem_reserve_pages_charge(struct mem_reserve *res, long pages)
+{
+ return __mem_reserve_charge(res, pages * PAGE_SIZE);
+}
+EXPORT_SYMBOL_GPL(mem_reserve_pages_charge);
+
+/*
+ * kmalloc helpers
+ */
+
+/**
+ * mem_reserve_kmalloc_set() - set this reserve to bytes worth of kmalloc
+ * @res - reserve to change
+ * @bytes - size in bytes to reserve
+ *
+ * Returns -ENOMEM on failure.
+ */
+int mem_reserve_kmalloc_set(struct mem_reserve *res, long bytes)
+{
+ int ret;
+ long pages;
+
+ mutex_lock(&mem_reserve_mutex);
+ pages = kmalloc_estimate_bytes(GFP_ATOMIC, bytes);
+ pages -= res->pages;
+ bytes -= res->limit;
+ ret = __mem_reserve_add(res, pages, bytes);
+ mutex_unlock(&mem_reserve_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mem_reserve_kmalloc_set);
+
+/**
+ * mem_reserve_kmalloc_charge() - charge bytes to a reserve
+ * @res - reserve to charge
+ * @bytes - bytes to charge
+ *
+ * Returns non-zero on success.
+ */
+int mem_reserve_kmalloc_charge(struct mem_reserve *res, long bytes)
+{
+ if (bytes < 0)
+ bytes = -roundup_pow_of_two(-bytes);
+ else
+ bytes = roundup_pow_of_two(bytes);
+
+ return __mem_reserve_charge(res, bytes);
+}
+EXPORT_SYMBOL_GPL(mem_reserve_kmalloc_charge);
+
+/*
+ * kmem_cache helpers
+ */
+
+/**
+ * mem_reserve_kmem_cache_set() - set reserve to @objects worth of kmem_cache_alloc of @s
+ * @res - reserve to set
+ * @s - kmem_cache to reserve from
+ * @objects - number of objects to reserve
+ *
+ * Returns -ENOMEM on failure.
+ */
+int mem_reserve_kmem_cache_set(struct mem_reserve *res, struct kmem_cache *s,
+ int objects)
+{
+ int ret;
+ long pages, bytes;
+
+ mutex_lock(&mem_reserve_mutex);
+ pages = kmem_alloc_estimate(s, GFP_ATOMIC, objects);
+ pages -= res->pages;
+ bytes = objects * kmem_cache_size(s) - res->limit;
+ ret = __mem_reserve_add(res, pages, bytes);
+ mutex_unlock(&mem_reserve_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mem_reserve_kmem_cache_set);
+
+/**
+ * mem_reserve_kmem_cache_charge() - charge (or uncharge) usage of objs
+ * @res - reserve to charge
+ * @objs - objects to charge for
+ *
+ * Returns non-zero on success.
+ */
+int mem_reserve_kmem_cache_charge(struct mem_reserve *res, struct kmem_cache *s,
+ long objs)
+{
+ return __mem_reserve_charge(res, objs * kmem_cache_size(s));
+}
+EXPORT_SYMBOL_GPL(mem_reserve_kmem_cache_charge);
+
+/*
+ * Alloc wrappers.
+ *
+ * Actual usage is commented in linux/reserve.h where the interface functions
+ * live. Furthermore, the code is 3 instances of the same paradigm, hence only
+ * the first contains extensive comments.
+ */
+
+/*
+ * kmalloc/kfree
+ */
+
+void *___kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip,
+ struct mem_reserve *res, int *emerg)
+{
+ void *obj;
+ gfp_t gfp;
+
+ /*
+ * Try a regular allocation, when that fails and we're not entitled
+ * to the reserves, fail.
+ */
+ gfp = flags | __GFP_NOMEMALLOC | __GFP_NOWARN;
+ obj = kmalloc_node_track_caller(size, gfp, node);
+
+ if (obj || !(gfp_to_alloc_flags(flags) & ALLOC_NO_WATERMARKS))
+ goto out;
+
+ /*
+ * If we were given a reserve to charge against, try that.
+ */
+ if (res && !mem_reserve_kmalloc_charge(res, size)) {
+ /*
+ * If we failed to charge and we're not allowed to wait for
+ * it to succeed, bail.
+ */
+ if (!(flags & __GFP_WAIT))
+ goto out;
+
+ /*
+ * Wait for a successfull charge against the reserve. All
+ * uncharge operations against this reserve will wake us up.
+ */
+ wait_event(res->waitqueue,
+ mem_reserve_kmalloc_charge(res, size));
+
+ /*
+ * After waiting for it, again try a regular allocation.
+ * Pressure could have lifted during our sleep. If this
+ * succeeds, uncharge the reserve.
+ */
+ obj = kmalloc_node_track_caller(size, gfp, node);
+ if (obj) {
+ mem_reserve_kmalloc_charge(res, -size);
+ goto out;
+ }
+ }
+
+ /*
+ * Regular allocation failed, and we've successfully charged our
+ * requested usage against the reserve. Do the emergency allocation.
+ */
+ obj = kmalloc_node_track_caller(size, flags, node);
+ WARN_ON(!obj);
+ if (emerg)
+ *emerg = 1;
+
+out:
+ return obj;
+}
+
+void __kfree_reserve(void *obj, struct mem_reserve *res, int emerg)
+{
+ /*
+ * ksize gives the full allocated size vs the requested size we used to
+ * charge; however since we round up to the nearest power of two, this
+ * should all work nicely.
+ */
+ size_t size = ksize(obj);
+
+ kfree(obj);
+ /*
+ * Free before uncharge, this ensures memory is actually present when
+ * a subsequent charge succeeds.
+ */
+ mem_reserve_kmalloc_charge(res, -size);
+}
+
+/*
+ * kmem_cache_alloc/kmem_cache_free
+ */
+
+void *__kmem_cache_alloc_reserve(struct kmem_cache *s, gfp_t flags, int node,
+ struct mem_reserve *res, int *emerg)
+{
+ void *obj;
+ gfp_t gfp;
+
+ gfp = flags | __GFP_NOMEMALLOC | __GFP_NOWARN;
+ obj = kmem_cache_alloc_node(s, gfp, node);
+
+ if (obj || !(gfp_to_alloc_flags(flags) & ALLOC_NO_WATERMARKS))
+ goto out;
+
+ if (res && !mem_reserve_kmem_cache_charge(res, s, 1)) {
+ if (!(flags & __GFP_WAIT))
+ goto out;
+
+ wait_event(res->waitqueue,
+ mem_reserve_kmem_cache_charge(res, s, 1));
+
+ obj = kmem_cache_alloc_node(s, gfp, node);
+ if (obj) {
+ mem_reserve_kmem_cache_charge(res, s, -1);
+ goto out;
+ }
+ }
+
+ obj = kmem_cache_alloc_node(s, flags, node);
+ WARN_ON(!obj);
+ if (emerg)
+ *emerg = 1;
+
+out:
+ return obj;
+}
+
+void __kmem_cache_free_reserve(struct kmem_cache *s, void *obj,
+ struct mem_reserve *res, int emerg)
+{
+ kmem_cache_free(s, obj);
+ mem_reserve_kmem_cache_charge(res, s, -1);
+}
+
+/*
+ * alloc_pages/free_pages
+ */
+
+struct page *__alloc_pages_reserve(int node, gfp_t flags, int order,
+ struct mem_reserve *res, int *emerg)
+{
+ struct page *page;
+ gfp_t gfp;
+ long pages = 1 << order;
+
+ gfp = flags | __GFP_NOMEMALLOC | __GFP_NOWARN;
+ page = alloc_pages_node(node, gfp, order);
+
+ if (page || !(gfp_to_alloc_flags(flags) & ALLOC_NO_WATERMARKS))
+ goto out;
+
+ if (res && !mem_reserve_pages_charge(res, pages)) {
+ if (!(flags & __GFP_WAIT))
+ goto out;
+
+ wait_event(res->waitqueue,
+ mem_reserve_pages_charge(res, pages));
+
+ page = alloc_pages_node(node, gfp, order);
+ if (page) {
+ mem_reserve_pages_charge(res, -pages);
+ goto out;
+ }
+ }
+
+ page = alloc_pages_node(node, flags, order);
+ WARN_ON(!page);
+ if (emerg)
+ *emerg = 1;
+
+out:
+ return page;
+}
++EXPORT_SYMBOL_GPL(__alloc_pages_reserve);
+
+void __free_pages_reserve(struct page *page, int order,
+ struct mem_reserve *res, int emerg)
+{
+ __free_pages(page, order);
+ mem_reserve_pages_charge(res, -(1 << order));
+}
++EXPORT_SYMBOL_GPL(__free_pages_reserve);
goto load_freelist;
}
+grow_slab:
+ gfpflags &= gfp_allowed_mask;
if (gfpflags & __GFP_WAIT)
local_irq_enable();
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node);
return NULL;
+
debug:
- if (!alloc_debug_processing(s, c->page, object, addr))
+ if (kmem_cache_debug(s) &&
+ !alloc_debug_processing(s, c->page, object, addr))
goto another_slab;
+ /*
+ * Avoid the slub fast path in slab_alloc() by not setting
+ * c->freelist and the fast path in slab_free() by making
+ * node_match() fail by setting c->node to -1.
+ *
+ * We use this for for debug and reserve checks which need
+ * to be done for each allocation.
+ */
+
c->page->inuse++;
c->page->freelist = get_freepointer(s, object);
- c->node = -1;
+ c->node = NUMA_NO_NODE;
goto unlock_out;
}
struct page *page;
struct kmem_cache_node *n;
unsigned long flags;
+ int reserve;
- BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
+ BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
- page = new_slab(kmalloc_caches, gfpflags, node, &reserve);
- page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
++ page = new_slab(kmem_cache_node, GFP_NOWAIT, node, &reserve);
BUG_ON(!page);
if (page_to_nid(page) != node) {
swap_list.next = p->type;
nr_swap_pages++;
p->inuse_pages--;
- preswap_flush(p->type, offset);
- if ((p->flags & SWP_BLKDEV) &&
- disk->fops->swap_slot_free_notify)
- disk->fops->swap_slot_free_notify(p->bdev, offset);
+ if (p->flags & SWP_BLKDEV) {
+ struct gendisk *disk = p->bdev->bd_disk;
+ if (disk->fops->swap_slot_free_notify)
+ disk->fops->swap_slot_free_notify(p->bdev, offset);
+ }
}
return usage;
source "net/rfkill/Kconfig"
source "net/9p/Kconfig"
source "net/caif/Kconfig"
+ source "net/ceph/Kconfig"
+config NETVM
+ def_bool n
endif # if NET
err = sk_filter(sk, skb);
if (err)
return err;
- if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
+ if (!sk_rmem_schedule(sk, skb) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
- trace_printk("CAIF: %s():"
- " sending flow OFF due to rmem_schedule\n",
- __func__);
+ pr_debug("sending flow OFF due to rmem_schedule\n");
dbfs_atomic_inc(&cnt.num_rx_flow_off);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
if (!netdev_tstamp_prequeue)
net_timestamp_check(skb);
- if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
- return NET_RX_SUCCESS;
+ trace_netif_receive_skb(skb);
+ /* Emergency skb are special, they should
+ * - be delivered to SOCK_MEMALLOC sockets only
+ * - stay away from userspace
+ * - have bounded memory usage
+ *
+ * Use PF_MEMALLOC as a poor mans memory pool - the grouping kind.
+ * This saves us from propagating the allocation context down to all
+ * allocation sites.
+ */
+ if (skb_emergency(skb))
+ current->flags |= PF_MEMALLOC;
+
/* if we've gotten here through NAPI, check netpoll */
if (netpoll_receive_skb(skb))
- return NET_RX_DROP;
+ goto out;
if (!skb->skb_iif)
skb->skb_iif = skb->dev->ifindex;
}
#endif
- #ifdef CONFIG_XEN
- switch (skb->ip_summed) {
- case CHECKSUM_UNNECESSARY:
- skb->proto_data_valid = 1;
- break;
- case CHECKSUM_PARTIAL:
- /* XXX Implement me. */
- default:
- skb->proto_data_valid = 0;
- break;
- }
- #endif
-
+ if (skb_emergency(skb))
+ goto skip_taps;
+
list_for_each_entry_rcu(ptype, &ptype_all, list) {
if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
ptype->dev == orig_dev) {
}
skb = rx_handler(skb);
if (!skb)
- goto out;
+ goto unlock;
}
+ if (vlan_tx_tag_present(skb)) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = NULL;
+ }
+ if (vlan_hwaccel_do_receive(&skb)) {
+ ret = __netif_receive_skb(skb);
+ goto out;
+ } else if (unlikely(!skb))
+ goto out;
+ }
+
/*
* Make sure frames received on VLAN interfaces stacked on
* bonding interfaces still make their way to any base bonding
if (pt_prev) {
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
+drop:
+ atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int length, gfp_t gfp_mask)
{
- int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
struct sk_buff *skb;
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, SKB_ALLOC_RX, node);
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
++ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;
if (!skb->cloned ||
!atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
&skb_shinfo(skb)->dataref)) {
+
if (skb_shinfo(skb)->nr_frags) {
int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- put_page(skb_shinfo(skb)->frags[i].page);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_put_page(skb,
+ skb_shinfo(skb)->frags[i].page);
+ }
}
- if (skb_has_frags(skb))
+ if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
- kfree(skb->head);
+ kfree_reserve(skb->head, &net_skb_reserve, skb_emergency(skb));
}
}
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
- int headerlen = skb->data - skb->head;
- int size;
- /*
- * Allocate the copy buffer
- */
- struct sk_buff *n;
- #ifdef NET_SKBUFF_DATA_USES_OFFSET
- size = skb->end + skb->data_len;
- #else
- size = skb->end - skb->head + skb->data_len;
- #endif
- n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), -1);
+ int headerlen = skb_headroom(skb);
+ unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
- struct sk_buff *n = alloc_skb(size, gfp_mask);
++ struct sk_buff *n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb),
++ NUMA_NO_NODE);
+
if (!n)
return NULL;
struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
{
- /*
- * Allocate the copy buffer
- */
- int size;
- struct sk_buff *n;
- #ifdef NET_SKBUFF_DATA_USES_OFFSET
- size = skb->end;
- #else
- size = skb->end - skb->head;
- #endif
- n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), -1);
+ unsigned int size = skb_end_pointer(skb) - skb->head;
- struct sk_buff *n = alloc_skb(size, gfp_mask);
++ struct sk_buff *n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb),
++ NUMA_NO_NODE);
+
if (!n)
goto out;
goto nodata;
/* Copy only real data... and, alas, header. This should be
- * optimized for the cases when header is void. */
- #ifdef NET_SKBUFF_DATA_USES_OFFSET
- memcpy(data + nhead, skb->head, skb->tail);
- #else
- memcpy(data + nhead, skb->head, skb->tail - skb->head);
- #endif
- memcpy(data + size, skb_end_pointer(skb),
+ * optimized for the cases when header is void.
+ */
+ memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
+
+ memcpy((struct skb_shared_info *)(data + size),
+ skb_shinfo(skb),
offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- skb_get_page(skb, skb_shinfo(skb)->frags[i].page);
+ /* Check if we can avoid taking references on fragments if we own
+ * the last reference on skb->head. (see skb_release_data())
+ */
+ if (!skb->cloned)
+ fastpath = true;
+ else {
+ int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
- if (skb_has_frags(skb))
- skb_clone_fraglist(skb);
+ fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
+ }
- skb_release_data(skb);
+ if (fastpath) {
+ kfree(skb->head);
+ } else {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- get_page(skb_shinfo(skb)->frags[i].page);
++ skb_get_page(skb, skb_shinfo(skb)->frags[i].page);
+ if (skb_has_frag_list(skb))
+ skb_clone_fraglist(skb);
+
+ skb_release_data(skb);
+ }
off = (data + nhead) - skb->head;
skb->head = data;
skb_shinfo(skb)->nr_frags = i;
for (; i < nfrags; i++)
- put_page(skb_shinfo(skb)->frags[i].page);
+ skb_put_page(skb, skb_shinfo(skb)->frags[i].page);
- if (skb_has_frags(skb))
+ if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
goto done;
}
out_ip6_prohibit_entry:
kfree(net->ipv6.ip6_prohibit_entry);
out_ip6_null_entry:
- kfree(net->ipv6.ip6_null_entry);
#endif
+ kfree(net->ipv6.ip6_null_entry);
+ out_ip6_dst_entries:
+ dst_entries_destroy(&net->ipv6.ip6_dst_ops);
out_ip6_dst_ops:
goto out;
}
spin_unlock(&xprt->reserve_lock);
}
+ struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req)
+ {
+ struct rpc_xprt *xprt;
+
+ xprt = kzalloc(size, GFP_KERNEL);
+ if (xprt == NULL)
+ goto out;
+
+ xprt->max_reqs = max_req;
- xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
++ xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL | __GFP_REPEAT);
+ if (xprt->slot == NULL)
+ goto out_free;
+
+ xprt->xprt_net = get_net(net);
+ return xprt;
+
+ out_free:
+ kfree(xprt);
+ out:
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(xprt_alloc);
+
+ void xprt_free(struct rpc_xprt *xprt)
+ {
+ put_net(xprt->xprt_net);
+ kfree(xprt->slot);
+ kfree(xprt);
+ }
+ EXPORT_SYMBOL_GPL(xprt_free);
+
/**
* xprt_reserve - allocate an RPC request slot
* @task: RPC task requesting a slot allocation
static inline void xs_reclassify_socket6(struct socket *sock)
{
}
+
+ static inline void xs_reclassify_socket(int family, struct socket *sock)
+ {
+ }
#endif
+ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
+ struct sock_xprt *transport, int family, int type, int protocol)
+ {
+ struct socket *sock;
+ int err;
+
+ err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
+ if (err < 0) {
+ dprintk("RPC: can't create %d transport socket (%d).\n",
+ protocol, -err);
+ goto out;
+ }
+ xs_reclassify_socket(family, sock);
+
+ if (xs_bind(transport, sock)) {
+ sock_release(sock);
+ goto out;
+ }
+
+ return sock;
+ out:
+ return ERR_PTR(err);
+ }
+
+#ifdef CONFIG_SUNRPC_SWAP
+static void xs_set_memalloc(struct rpc_xprt *xprt)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
+ xprt);
+
+ if (xprt->swapper)
+ sk_set_memalloc(transport->inet);
+}
+
+#define RPC_BUF_RESERVE_PAGES \
+ kmalloc_estimate_objs(sizeof(struct rpc_rqst), GFP_KERNEL, RPC_MAX_SLOT_TABLE)
+#define RPC_RESERVE_PAGES (RPC_BUF_RESERVE_PAGES + TX_RESERVE_PAGES)
+
+/**
+ * xs_swapper - Tag this transport as being used for swap.
+ * @xprt: transport to tag
+ * @enable: enable/disable
+ *
+ */
+int xs_swapper(struct rpc_xprt *xprt, int enable)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
+ xprt);
+ int err = 0;
+
+ if (enable) {
+ /*
+ * keep one extra sock reference so the reserve won't dip
+ * when the socket gets reconnected.
+ */
+ err = sk_adjust_memalloc(1, RPC_RESERVE_PAGES);
+ if (!err) {
+ xprt->swapper++;
+ xs_set_memalloc(xprt);
+ }
+ } else if (xprt->swapper) {
+ xprt->swapper--;
+ sk_clear_memalloc(transport->inet);
+ sk_adjust_memalloc(-1, -RPC_RESERVE_PAGES);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xs_swapper);
+#else
+static void xs_set_memalloc(struct rpc_xprt *xprt)
+{
+}
+#endif
+
static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
container_of(work, struct sock_xprt, connect_worker.work);
struct rpc_xprt *xprt = &transport->xprt;
struct socket *sock = transport->sock;
+ unsigned long pflags = current->flags;
- int err, status = -EIO;
+ int status = -EIO;
if (xprt->shutdown)
goto out;
+ if (xprt->swapper)
+ current->flags |= PF_MEMALLOC;
+
/* Start by resetting any existing state */
xs_reset_transport(transport);
-
- err = sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
- goto out;
- }
- xs_reclassify_socket6(sock);
-
- if (xs_bind6(transport, sock) < 0) {
- sock_release(sock);
+ sock = xs_create_sock(xprt, transport,
+ xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP);
+ if (IS_ERR(sock))
goto out;
- }
dprintk("RPC: worker connecting xprt %p via %s to "
"%s (port %s)\n", xprt,
*
* Invoked by a work queue tasklet.
*/
- static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
- struct sock_xprt *transport,
- struct socket *(*create_sock)(struct rpc_xprt *,
- struct sock_xprt *))
+ static void xs_tcp_setup_socket(struct work_struct *work)
{
+ struct sock_xprt *transport =
+ container_of(work, struct sock_xprt, connect_worker.work);
struct socket *sock = transport->sock;
+ struct rpc_xprt *xprt = &transport->xprt;
+ unsigned long pflags = current->flags;
int status = -EIO;
if (xprt->shutdown)
goto out;
+ if (xprt->swapper)
+ current->flags |= PF_MEMALLOC;
+
if (!sock) {
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
- sock = create_sock(xprt, transport);
+ sock = xs_create_sock(xprt, transport,
+ xs_addr(xprt)->sa_family, SOCK_STREAM, IPPROTO_TCP);
if (IS_ERR(sock)) {
status = PTR_ERR(sock);
goto out;
out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
}
- static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt,
- struct sock_xprt *transport)
- {
- struct socket *sock;
- int err;
-
- /* start from scratch */
- err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create TCP transport socket (%d).\n",
- -err);
- goto out_err;
- }
- xs_reclassify_socket4(sock);
-
- if (xs_bind4(transport, sock) < 0) {
- sock_release(sock);
- goto out_err;
- }
- return sock;
- out_err:
- return ERR_PTR(-EIO);
- }
-
- /**
- * xs_tcp_connect_worker4 - connect a TCP socket to a remote endpoint
- * @work: RPC transport to connect
- *
- * Invoked by a work queue tasklet.
- */
- static void xs_tcp_connect_worker4(struct work_struct *work)
- {
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, connect_worker.work);
- struct rpc_xprt *xprt = &transport->xprt;
-
- xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock4);
- }
-
- static struct socket *xs_create_tcp_sock6(struct rpc_xprt *xprt,
- struct sock_xprt *transport)
- {
- struct socket *sock;
- int err;
-
- /* start from scratch */
- err = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create TCP transport socket (%d).\n",
- -err);
- goto out_err;
- }
- xs_reclassify_socket6(sock);
-
- if (xs_bind6(transport, sock) < 0) {
- sock_release(sock);
- goto out_err;
- }
- return sock;
- out_err:
- return ERR_PTR(-EIO);
- }
-
- /**
- * xs_tcp_connect_worker6 - connect a TCP socket to a remote endpoint
- * @work: RPC transport to connect
- *
- * Invoked by a work queue tasklet.
- */
- static void xs_tcp_connect_worker6(struct work_struct *work)
- {
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, connect_worker.work);
- struct rpc_xprt *xprt = &transport->xprt;
-
- xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock6);
- }
-
/**
* xs_connect - connect a socket to a remote endpoint
* @task: address of RPC task that manages state of connect request