Merge branch 'kmap_atomic' of git://github.com/congwang/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 21 Mar 2012 16:40:26 +0000 (09:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 21 Mar 2012 16:40:26 +0000 (09:40 -0700)
Pull kmap_atomic cleanup from Cong Wang.

It's been in -next for a long time, and it gets rid of the (no longer
used) second argument to k[un]map_atomic().

Fix up a few trivial conflicts in various drivers, and do an "evil
merge" to catch some new uses that have come in since Cong's tree.

* 'kmap_atomic' of git://github.com/congwang/linux: (59 commits)
  feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal
  highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename]
  drbd: remove the second argument of k[un]map_atomic()
  zcache: remove the second argument of k[un]map_atomic()
  gma500: remove the second argument of k[un]map_atomic()
  dm: remove the second argument of k[un]map_atomic()
  tomoyo: remove the second argument of k[un]map_atomic()
  sunrpc: remove the second argument of k[un]map_atomic()
  rds: remove the second argument of k[un]map_atomic()
  net: remove the second argument of k[un]map_atomic()
  mm: remove the second argument of k[un]map_atomic()
  lib: remove the second argument of k[un]map_atomic()
  power: remove the second argument of k[un]map_atomic()
  kdb: remove the second argument of k[un]map_atomic()
  udf: remove the second argument of k[un]map_atomic()
  ubifs: remove the second argument of k[un]map_atomic()
  squashfs: remove the second argument of k[un]map_atomic()
  reiserfs: remove the second argument of k[un]map_atomic()
  ocfs2: remove the second argument of k[un]map_atomic()
  ntfs: remove the second argument of k[un]map_atomic()
  ...

180 files changed:
Documentation/feature-removal-schedule.txt
arch/arm/include/asm/highmem.h
arch/arm/mm/copypage-fa.c
arch/arm/mm/copypage-feroceon.c
arch/arm/mm/copypage-v3.c
arch/arm/mm/copypage-v4mc.c
arch/arm/mm/copypage-v4wb.c
arch/arm/mm/copypage-v4wt.c
arch/arm/mm/copypage-v6.c
arch/arm/mm/copypage-xsc3.c
arch/arm/mm/copypage-xscale.c
arch/arm/mm/highmem.c
arch/frv/include/asm/highmem.h
arch/frv/mm/highmem.c
arch/mips/include/asm/highmem.h
arch/mips/mm/c-r4k.c
arch/mips/mm/highmem.c
arch/mips/mm/init.c
arch/mn10300/include/asm/highmem.h
arch/parisc/include/asm/cacheflush.h
arch/powerpc/include/asm/highmem.h
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/mm/dma-noncoherent.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/mem.c
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache.c
arch/sparc/include/asm/highmem.h
arch/sparc/mm/highmem.c
arch/tile/include/asm/highmem.h
arch/tile/mm/highmem.c
arch/um/kernel/skas/uaccess.c
arch/x86/crypto/aesni-intel_glue.c
arch/x86/include/asm/highmem.h
arch/x86/kernel/crash_dump_32.c
arch/x86/kvm/lapic.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/x86.c
arch/x86/lib/usercopy_32.c
arch/x86/mm/highmem_32.c
crypto/ahash.c
crypto/async_tx/async_memcpy.c
crypto/blkcipher.c
crypto/ccm.c
crypto/scatterwalk.c
crypto/shash.c
drivers/ata/libata-sff.c
drivers/block/brd.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_nl.c
drivers/block/loop.c
drivers/block/pktcdvd.c
drivers/crypto/hifn_795x.c
drivers/edac/edac_mc.c
drivers/gpu/drm/drm_cache.c
drivers/gpu/drm/gma500/mmu.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
drivers/ide/ide-taskfile.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/md/bitmap.c
drivers/md/dm-crypt.c
drivers/media/video/ivtv/ivtv-udma.c
drivers/memstick/host/jmb38x_ms.c
drivers/memstick/host/tifm_ms.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/sun/cassini.c
drivers/scsi/arcmsr/arcmsr_hba.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/gdth.c
drivers/scsi/ips.c
drivers/scsi/isci/request.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_libfc.c
drivers/scsi/libfc/fc_libfc.h
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libiscsi_tcp.c
drivers/scsi/libsas/sas_host_smp.c
drivers/scsi/megaraid.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd_dif.c
drivers/scsi/storvsc_drv.c
drivers/staging/ramster/xvmalloc.c
drivers/staging/ramster/zcache-main.c
drivers/staging/rtl8192u/ieee80211/cipher.c
drivers/staging/rtl8192u/ieee80211/digest.c
drivers/staging/rtl8192u/ieee80211/internal.h
drivers/staging/rtl8192u/ieee80211/kmap_types.h [deleted file]
drivers/staging/rtl8192u/ieee80211/scatterwalk.c
drivers/staging/zcache/zcache-main.c
drivers/staging/zram/zram_drv.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_io.c
drivers/vhost/vhost.c
fs/afs/fsclient.c
fs/afs/mntpt.c
fs/aio.c
fs/bio-integrity.c
fs/btrfs/compression.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/lzo.c
fs/btrfs/scrub.c
fs/btrfs/zlib.c
fs/exec.c
fs/exofs/dir.c
fs/ext2/dir.c
fs/fuse/dev.c
fs/fuse/file.c
fs/gfs2/aops.c
fs/gfs2/lops.c
fs/gfs2/quota.c
fs/jbd/journal.c
fs/jbd/transaction.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/logfs/dir.c
fs/logfs/readwrite.c
fs/logfs/segment.c
fs/minix/dir.c
fs/namei.c
fs/nfs/dir.c
fs/nfs/nfs4proc.c
fs/nilfs2/cpfile.c
fs/nilfs2/dat.c
fs/nilfs2/dir.c
fs/nilfs2/ifile.c
fs/nilfs2/mdt.c
fs/nilfs2/page.c
fs/nilfs2/recovery.c
fs/nilfs2/segbuf.c
fs/nilfs2/sufile.c
fs/ntfs/aops.c
fs/ntfs/attrib.c
fs/ntfs/file.c
fs/ntfs/super.c
fs/ocfs2/aops.c
fs/pipe.c
fs/reiserfs/stree.c
fs/reiserfs/tail_conversion.c
fs/splice.c
fs/squashfs/file.c
fs/squashfs/symlink.c
fs/ubifs/file.c
fs/udf/file.c
include/crypto/scatterwalk.h
include/linux/bio.h
include/linux/highmem.h
kernel/debug/kdb/kdb_support.c
kernel/power/snapshot.c
lib/scatterlist.c
lib/swiotlb.c
mm/bounce.c
mm/filemap.c
mm/ksm.c
mm/memory.c
mm/shmem.c
mm/swapfile.c
mm/vmalloc.c
net/core/kmap_skb.h
net/rds/ib_recv.c
net/rds/info.c
net/rds/iw_recv.c
net/rds/loop.c
net/rds/rds.h
net/rds/recv.c
net/rds/tcp_recv.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/sunrpc/socklib.c
net/sunrpc/xdr.c
net/sunrpc/xprtrdma/rpc_rdma.c
security/tomoyo/domain.c

index d5dc80f..4bfd982 100644 (file)
@@ -535,3 +535,11 @@ Why:       This driver provides support for USB storage devices like "USB
         (CONFIG_USB_STORAGE) which only drawback is the additional SCSI
         stack.
 Who:   Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+
+----------------------------
+
+What:  kmap_atomic(page, km_type)
+When:  3.5
+Why:   The old kmap_atomic() with two arguments is deprecated, we only
+       keep it for backward compatibility for few cycles and then drop it.
+Who:   Cong Wang <amwang@redhat.com>
index a4edd19..8c5e828 100644 (file)
@@ -57,7 +57,7 @@ static inline void *kmap_high_get(struct page *page)
 #ifdef CONFIG_HIGHMEM
 extern void *kmap(struct page *page);
 extern void kunmap(struct page *page);
-extern void *__kmap_atomic(struct page *page);
+extern void *kmap_atomic(struct page *page);
 extern void __kunmap_atomic(void *kvaddr);
 extern void *kmap_atomic_pfn(unsigned long pfn);
 extern struct page *kmap_atomic_to_page(const void *ptr);
index d2852e1..d130a5e 100644 (file)
@@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
 {
        void *kto, *kfrom;
 
-       kto = kmap_atomic(to, KM_USER0);
-       kfrom = kmap_atomic(from, KM_USER1);
+       kto = kmap_atomic(to);
+       kfrom = kmap_atomic(from);
        fa_copy_user_page(kto, kfrom);
-       kunmap_atomic(kfrom, KM_USER1);
-       kunmap_atomic(kto, KM_USER0);
+       kunmap_atomic(kfrom);
+       kunmap_atomic(kto);
 }
 
 /*
@@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
  */
 void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-       void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+       void *ptr, *kaddr = kmap_atomic(page);
        asm volatile("\
        mov     r1, %2                          @ 1\n\
        mov     r2, #0                          @ 1\n\
@@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
        : "=r" (ptr)
        : "0" (kaddr), "I" (PAGE_SIZE / 32)
        : "r1", "r2", "r3", "ip", "lr");
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 struct cpu_user_fns fa_user_fns __initdata = {
index ac163de..49ee0c1 100644 (file)
@@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from,
 {
        void *kto, *kfrom;
 
-       kto = kmap_atomic(to, KM_USER0);
-       kfrom = kmap_atomic(from, KM_USER1);
+       kto = kmap_atomic(to);
+       kfrom = kmap_atomic(from);
        flush_cache_page(vma, vaddr, page_to_pfn(from));
        feroceon_copy_user_page(kto, kfrom);
-       kunmap_atomic(kfrom, KM_USER1);
-       kunmap_atomic(kto, KM_USER0);
+       kunmap_atomic(kfrom);
+       kunmap_atomic(kto);
 }
 
 void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-       void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+       void *ptr, *kaddr = kmap_atomic(page);
        asm volatile ("\
        mov     r1, %2                          \n\
        mov     r2, #0                          \n\
@@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
        : "=r" (ptr)
        : "0" (kaddr), "I" (PAGE_SIZE / 32)
        : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 struct cpu_user_fns feroceon_user_fns __initdata = {
index f72303e..3935bdd 100644 (file)
@@ -42,11 +42,11 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
 {
        void *kto, *kfrom;
 
-       kto = kmap_atomic(to, KM_USER0);
-       kfrom = kmap_atomic(from, KM_USER1);
+       kto = kmap_atomic(to);
+       kfrom = kmap_atomic(from);
        v3_copy_user_page(kto, kfrom);
-       kunmap_atomic(kfrom, KM_USER1);
-       kunmap_atomic(kto, KM_USER0);
+       kunmap_atomic(kfrom);
+       kunmap_atomic(kto);
 }
 
 /*
@@ -56,7 +56,7 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
  */
 void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-       void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+       void *ptr, *kaddr = kmap_atomic(page);
        asm volatile("\n\
        mov     r1, %2                          @ 1\n\
        mov     r2, #0                          @ 1\n\
@@ -72,7 +72,7 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
        : "=r" (ptr)
        : "0" (kaddr), "I" (PAGE_SIZE / 64)
        : "r1", "r2", "r3", "ip", "lr");
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 struct cpu_user_fns v3_user_fns __initdata = {
index 7d0a8c2..ec8c3be 100644 (file)
@@ -71,7 +71,7 @@ mc_copy_user_page(void *from, void *to)
 void v4_mc_copy_user_highpage(struct page *to, struct page *from,
        unsigned long vaddr, struct vm_area_struct *vma)
 {
-       void *kto = kmap_atomic(to, KM_USER1);
+       void *kto = kmap_atomic(to);
 
        if (!test_and_set_bit(PG_dcache_clean, &from->flags))
                __flush_dcache_page(page_mapping(from), from);
@@ -85,7 +85,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
 
        raw_spin_unlock(&minicache_lock);
 
-       kunmap_atomic(kto, KM_USER1);
+       kunmap_atomic(kto);
 }
 
 /*
@@ -93,7 +93,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
  */
 void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-       void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+       void *ptr, *kaddr = kmap_atomic(page);
        asm volatile("\
        mov     r1, %2                          @ 1\n\
        mov     r2, #0                          @ 1\n\
@@ -111,7 +111,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
        : "=r" (ptr)
        : "0" (kaddr), "I" (PAGE_SIZE / 64)
        : "r1", "r2", "r3", "ip", "lr");
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 struct cpu_user_fns v4_mc_user_fns __initdata = {
index cb589cb..067d0fd 100644 (file)
@@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
 {
        void *kto, *kfrom;
 
-       kto = kmap_atomic(to, KM_USER0);
-       kfrom = kmap_atomic(from, KM_USER1);
+       kto = kmap_atomic(to);
+       kfrom = kmap_atomic(from);
        flush_cache_page(vma, vaddr, page_to_pfn(from));
        v4wb_copy_user_page(kto, kfrom);
-       kunmap_atomic(kfrom, KM_USER1);
-       kunmap_atomic(kto, KM_USER0);
+       kunmap_atomic(kfrom);
+       kunmap_atomic(kto);
 }
 
 /*
@@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
  */
 void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-       void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+       void *ptr, *kaddr = kmap_atomic(page);
        asm volatile("\
        mov     r1, %2                          @ 1\n\
        mov     r2, #0                          @ 1\n\
@@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
        : "=r" (ptr)
        : "0" (kaddr), "I" (PAGE_SIZE / 64)
        : "r1", "r2", "r3", "ip", "lr");
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 struct cpu_user_fns v4wb_user_fns __initdata = {
index 30c7d04..b85c5da 100644 (file)
@@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
 {
        void *kto, *kfrom;
 
-       kto = kmap_atomic(to, KM_USER0);
-       kfrom = kmap_atomic(from, KM_USER1);
+       kto = kmap_atomic(to);
+       kfrom = kmap_atomic(from);
        v4wt_copy_user_page(kto, kfrom);
-       kunmap_atomic(kfrom, KM_USER1);
-       kunmap_atomic(kto, KM_USER0);
+       kunmap_atomic(kfrom);
+       kunmap_atomic(kto);
 }
 
 /*
@@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
  */
 void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-       void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+       void *ptr, *kaddr = kmap_atomic(page);
        asm volatile("\
        mov     r1, %2                          @ 1\n\
        mov     r2, #0                          @ 1\n\
@@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
        : "=r" (ptr)
        : "0" (kaddr), "I" (PAGE_SIZE / 64)
        : "r1", "r2", "r3", "ip", "lr");
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 struct cpu_user_fns v4wt_user_fns __initdata = {
index 3d9a155..8b03a58 100644 (file)
@@ -38,11 +38,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
 {
        void *kto, *kfrom;
 
-       kfrom = kmap_atomic(from, KM_USER0);
-       kto = kmap_atomic(to, KM_USER1);
+       kfrom = kmap_atomic(from);
+       kto = kmap_atomic(to);
        copy_page(kto, kfrom);
-       kunmap_atomic(kto, KM_USER1);
-       kunmap_atomic(kfrom, KM_USER0);
+       kunmap_atomic(kto);
+       kunmap_atomic(kfrom);
 }
 
 /*
@@ -51,9 +51,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
  */
 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
 {
-       void *kaddr = kmap_atomic(page, KM_USER0);
+       void *kaddr = kmap_atomic(page);
        clear_page(kaddr);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 /*
index f9cde07..03a2042 100644 (file)
@@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
 {
        void *kto, *kfrom;
 
-       kto = kmap_atomic(to, KM_USER0);
-       kfrom = kmap_atomic(from, KM_USER1);
+       kto = kmap_atomic(to);
+       kfrom = kmap_atomic(from);
        flush_cache_page(vma, vaddr, page_to_pfn(from));
        xsc3_mc_copy_user_page(kto, kfrom);
-       kunmap_atomic(kfrom, KM_USER1);
-       kunmap_atomic(kto, KM_USER0);
+       kunmap_atomic(kfrom);
+       kunmap_atomic(kto);
 }
 
 /*
@@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
  */
 void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-       void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+       void *ptr, *kaddr = kmap_atomic(page);
        asm volatile ("\
        mov     r1, %2                          \n\
        mov     r2, #0                          \n\
@@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
        : "=r" (ptr)
        : "0" (kaddr), "I" (PAGE_SIZE / 32)
        : "r1", "r2", "r3");
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 struct cpu_user_fns xsc3_mc_user_fns __initdata = {
index 610c24c..439d106 100644 (file)
@@ -93,7 +93,7 @@ mc_copy_user_page(void *from, void *to)
 void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
        unsigned long vaddr, struct vm_area_struct *vma)
 {
-       void *kto = kmap_atomic(to, KM_USER1);
+       void *kto = kmap_atomic(to);
 
        if (!test_and_set_bit(PG_dcache_clean, &from->flags))
                __flush_dcache_page(page_mapping(from), from);
@@ -107,7 +107,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
 
        raw_spin_unlock(&minicache_lock);
 
-       kunmap_atomic(kto, KM_USER1);
+       kunmap_atomic(kto);
 }
 
 /*
@@ -116,7 +116,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
 void
 xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-       void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+       void *ptr, *kaddr = kmap_atomic(page);
        asm volatile(
        "mov    r1, %2                          \n\
        mov     r2, #0                          \n\
@@ -133,7 +133,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
        : "=r" (ptr)
        : "0" (kaddr), "I" (PAGE_SIZE / 32)
        : "r1", "r2", "r3", "ip");
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 struct cpu_user_fns xscale_mc_user_fns __initdata = {
index 807c057..5a21505 100644 (file)
@@ -36,7 +36,7 @@ void kunmap(struct page *page)
 }
 EXPORT_SYMBOL(kunmap);
 
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
 {
        unsigned int idx;
        unsigned long vaddr;
@@ -81,7 +81,7 @@ void *__kmap_atomic(struct page *page)
 
        return (void *)vaddr;
 }
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
 
 void __kunmap_atomic(void *kvaddr)
 {
index a8d6565..716956a 100644 (file)
@@ -157,7 +157,7 @@ static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
        pagefault_enable();
 }
 
-void *__kmap_atomic(struct page *page);
+void *kmap_atomic(struct page *page);
 void __kunmap_atomic(void *kvaddr);
 
 #endif /* !__ASSEMBLY__ */
index fd7fcd4..31902c9 100644 (file)
@@ -37,7 +37,7 @@ struct page *kmap_atomic_to_page(void *ptr)
        return virt_to_page(ptr);
 }
 
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
 {
        unsigned long paddr;
        int type;
@@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page)
                return NULL;
        }
 }
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
 
 void __kunmap_atomic(void *kvaddr)
 {
index 77e6440..2d91888 100644 (file)
@@ -47,7 +47,7 @@ extern void kunmap_high(struct page *page);
 
 extern void *kmap(struct page *page);
 extern void kunmap(struct page *page);
-extern void *__kmap_atomic(struct page *page);
+extern void *kmap_atomic(struct page *page);
 extern void __kunmap_atomic(void *kvaddr);
 extern void *kmap_atomic_pfn(unsigned long pfn);
 extern struct page *kmap_atomic_to_page(void *ptr);
index 4f9eb0b..c97087d 100644 (file)
@@ -498,7 +498,7 @@ static inline void local_r4k_flush_cache_page(void *args)
                if (map_coherent)
                        vaddr = kmap_coherent(page, addr);
                else
-                       vaddr = kmap_atomic(page, KM_USER0);
+                       vaddr = kmap_atomic(page);
                addr = (unsigned long)vaddr;
        }
 
@@ -521,7 +521,7 @@ static inline void local_r4k_flush_cache_page(void *args)
                if (map_coherent)
                        kunmap_coherent();
                else
-                       kunmap_atomic(vaddr, KM_USER0);
+                       kunmap_atomic(vaddr);
        }
 }
 
index 3634c7e..aff5705 100644 (file)
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(kunmap);
  * kmaps are appropriate for short, tight code paths only.
  */
 
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
 {
        unsigned long vaddr;
        int idx, type;
@@ -62,7 +62,7 @@ void *__kmap_atomic(struct page *page)
 
        return (void*) vaddr;
 }
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
 
 void __kunmap_atomic(void *kvaddr)
 {
index 3b3ffd4..1a85ba9 100644 (file)
@@ -207,21 +207,21 @@ void copy_user_highpage(struct page *to, struct page *from,
 {
        void *vfrom, *vto;
 
-       vto = kmap_atomic(to, KM_USER1);
+       vto = kmap_atomic(to);
        if (cpu_has_dc_aliases &&
            page_mapped(from) && !Page_dcache_dirty(from)) {
                vfrom = kmap_coherent(from, vaddr);
                copy_page(vto, vfrom);
                kunmap_coherent();
        } else {
-               vfrom = kmap_atomic(from, KM_USER0);
+               vfrom = kmap_atomic(from);
                copy_page(vto, vfrom);
-               kunmap_atomic(vfrom, KM_USER0);
+               kunmap_atomic(vfrom);
        }
        if ((!cpu_has_ic_fills_f_dc) ||
            pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
                flush_data_cache_page((unsigned long)vto);
-       kunmap_atomic(vto, KM_USER1);
+       kunmap_atomic(vto);
        /* Make sure this page is cleared on other CPU's too before using it */
        smp_wmb();
 }
index bfe2d88..7c137cd 100644 (file)
@@ -70,7 +70,7 @@ static inline void kunmap(struct page *page)
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline unsigned long __kmap_atomic(struct page *page)
+static inline unsigned long kmap_atomic(struct page *page)
 {
        unsigned long vaddr;
        int idx, type;
index da601dd..9f21ab0 100644 (file)
@@ -140,7 +140,7 @@ static inline void *kmap(struct page *page)
 
 #define kunmap(page)                   kunmap_parisc(page_address(page))
 
-static inline void *__kmap_atomic(struct page *page)
+static inline void *kmap_atomic(struct page *page)
 {
        pagefault_disable();
        return page_address(page);
index dbc2640..caaf6e0 100644 (file)
@@ -79,7 +79,7 @@ static inline void kunmap(struct page *page)
        kunmap_high(page);
 }
 
-static inline void *__kmap_atomic(struct page *page)
+static inline void *kmap_atomic(struct page *page)
 {
        return kmap_atomic_prot(page, kmap_prot);
 }
index e2cfb9e..220fcdf 100644 (file)
@@ -227,14 +227,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
        hpage_offset /= 4;
 
        get_page(hpage);
-       page = kmap_atomic(hpage, KM_USER0);
+       page = kmap_atomic(hpage);
 
        /* patch dcbz into reserved instruction, so we trap */
        for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
                if ((page[i] & 0xff0007ff) == INS_DCBZ)
                        page[i] &= 0xfffffff7;
 
-       kunmap_atomic(page, KM_USER0);
+       kunmap_atomic(page);
        put_page(hpage);
 }
 
index 329be36..6747eec 100644 (file)
@@ -365,12 +365,11 @@ static inline void __dma_sync_page_highmem(struct page *page,
        local_irq_save(flags);
 
        do {
-               start = (unsigned long)kmap_atomic(page + seg_nr,
-                               KM_PPC_SYNC_PAGE) + seg_offset;
+               start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
 
                /* Sync this buffer segment */
                __dma_sync((void *)start, seg_size, direction);
-               kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
+               kunmap_atomic((void *)start);
                seg_nr++;
 
                /* Calculate next buffer segment size */
index a8b3cc7..57c7465 100644 (file)
@@ -910,9 +910,9 @@ void flush_dcache_icache_hugepage(struct page *page)
                if (!PageHighMem(page)) {
                        __flush_dcache_icache(page_address(page+i));
                } else {
-                       start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE);
+                       start = kmap_atomic(page+i);
                        __flush_dcache_icache(start);
-                       kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+                       kunmap_atomic(start);
                }
        }
 }
index d974b79..baaafde 100644 (file)
@@ -458,9 +458,9 @@ void flush_dcache_icache_page(struct page *page)
 #endif
 #ifdef CONFIG_BOOKE
        {
-               void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
+               void *start = kmap_atomic(page);
                __flush_dcache_icache(start);
-               kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+               kunmap_atomic(start);
        }
 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
        /* On 8xx there is no need to kmap since highmem is not supported */
index 92eb986..112fea1 100644 (file)
@@ -244,7 +244,7 @@ static void sh4_flush_cache_page(void *args)
                if (map_coherent)
                        vaddr = kmap_coherent(page, address);
                else
-                       vaddr = kmap_atomic(page, KM_USER0);
+                       vaddr = kmap_atomic(page);
 
                address = (unsigned long)vaddr;
        }
@@ -259,7 +259,7 @@ static void sh4_flush_cache_page(void *args)
                if (map_coherent)
                        kunmap_coherent(vaddr);
                else
-                       kunmap_atomic(vaddr, KM_USER0);
+                       kunmap_atomic(vaddr);
        }
 }
 
index 5a580ea..616966a 100644 (file)
@@ -95,7 +95,7 @@ void copy_user_highpage(struct page *to, struct page *from,
 {
        void *vfrom, *vto;
 
-       vto = kmap_atomic(to, KM_USER1);
+       vto = kmap_atomic(to);
 
        if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
            test_bit(PG_dcache_clean, &from->flags)) {
@@ -103,16 +103,16 @@ void copy_user_highpage(struct page *to, struct page *from,
                copy_page(vto, vfrom);
                kunmap_coherent(vfrom);
        } else {
-               vfrom = kmap_atomic(from, KM_USER0);
+               vfrom = kmap_atomic(from);
                copy_page(vto, vfrom);
-               kunmap_atomic(vfrom, KM_USER0);
+               kunmap_atomic(vfrom);
        }
 
        if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
            (vma->vm_flags & VM_EXEC))
                __flush_purge_region(vto, PAGE_SIZE);
 
-       kunmap_atomic(vto, KM_USER1);
+       kunmap_atomic(vto);
        /* Make sure this page is cleared on other CPU's too before using it */
        smp_wmb();
 }
@@ -120,14 +120,14 @@ EXPORT_SYMBOL(copy_user_highpage);
 
 void clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-       void *kaddr = kmap_atomic(page, KM_USER0);
+       void *kaddr = kmap_atomic(page);
 
        clear_page(kaddr);
 
        if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
                __flush_purge_region(kaddr, PAGE_SIZE);
 
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 EXPORT_SYMBOL(clear_user_highpage);
 
index 3d7afbb..3b6e00d 100644 (file)
@@ -70,7 +70,7 @@ static inline void kunmap(struct page *page)
        kunmap_high(page);
 }
 
-extern void *__kmap_atomic(struct page *page);
+extern void *kmap_atomic(struct page *page);
 extern void __kunmap_atomic(void *kvaddr);
 extern struct page *kmap_atomic_to_page(void *vaddr);
 
index 77140a0..055c66c 100644 (file)
@@ -30,7 +30,7 @@
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
 
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
 {
        unsigned long vaddr;
        long idx, type;
@@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page)
 
        return (void*) vaddr;
 }
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
 
 void __kunmap_atomic(void *kvaddr)
 {
index b2a6c5d..fc8429a 100644 (file)
@@ -59,7 +59,7 @@ void *kmap_fix_kpte(struct page *page, int finished);
 /* This macro is used only in map_new_virtual() to map "page". */
 #define kmap_prot page_to_kpgprot(page)
 
-void *__kmap_atomic(struct page *page);
+void *kmap_atomic(struct page *page);
 void __kunmap_atomic(void *kvaddr);
 void *kmap_atomic_pfn(unsigned long pfn);
 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
index 31dbbd9..ef8e5a6 100644 (file)
@@ -224,12 +224,12 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 }
 EXPORT_SYMBOL(kmap_atomic_prot);
 
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
 {
        /* PAGE_NONE is a magic value that tells us to check immutability. */
        return kmap_atomic_prot(page, PAGE_NONE);
 }
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
 
 void __kunmap_atomic(void *kvaddr)
 {
index 9fefd92..cd7df79 100644 (file)
@@ -69,7 +69,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
                return -1;
 
        page = pte_page(*pte);
-       addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) +
+       addr = (unsigned long) kmap_atomic(page) +
                (addr & ~PAGE_MASK);
 
        current->thread.fault_catcher = &buf;
@@ -82,7 +82,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
 
        current->thread.fault_catcher = NULL;
 
-       kunmap_atomic((void *)addr, KM_UML_USERCOPY);
+       kunmap_atomic((void *)addr);
 
        return n;
 }
index b3350bd..c799352 100644 (file)
@@ -1108,12 +1108,12 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
                one_entry_in_sg = 1;
                scatterwalk_start(&src_sg_walk, req->src);
                scatterwalk_start(&assoc_sg_walk, req->assoc);
-               src = scatterwalk_map(&src_sg_walk, 0);
-               assoc = scatterwalk_map(&assoc_sg_walk, 0);
+               src = scatterwalk_map(&src_sg_walk);
+               assoc = scatterwalk_map(&assoc_sg_walk);
                dst = src;
                if (unlikely(req->src != req->dst)) {
                        scatterwalk_start(&dst_sg_walk, req->dst);
-                       dst = scatterwalk_map(&dst_sg_walk, 0);
+                       dst = scatterwalk_map(&dst_sg_walk);
                }
 
        } else {
@@ -1137,11 +1137,11 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
         * back to the packet. */
        if (one_entry_in_sg) {
                if (unlikely(req->src != req->dst)) {
-                       scatterwalk_unmap(dst, 0);
+                       scatterwalk_unmap(dst);
                        scatterwalk_done(&dst_sg_walk, 0, 0);
                }
-               scatterwalk_unmap(src, 0);
-               scatterwalk_unmap(assoc, 0);
+               scatterwalk_unmap(src);
+               scatterwalk_unmap(assoc);
                scatterwalk_done(&src_sg_walk, 0, 0);
                scatterwalk_done(&assoc_sg_walk, 0, 0);
        } else {
@@ -1190,12 +1190,12 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
                one_entry_in_sg = 1;
                scatterwalk_start(&src_sg_walk, req->src);
                scatterwalk_start(&assoc_sg_walk, req->assoc);
-               src = scatterwalk_map(&src_sg_walk, 0);
-               assoc = scatterwalk_map(&assoc_sg_walk, 0);
+               src = scatterwalk_map(&src_sg_walk);
+               assoc = scatterwalk_map(&assoc_sg_walk);
                dst = src;
                if (unlikely(req->src != req->dst)) {
                        scatterwalk_start(&dst_sg_walk, req->dst);
-                       dst = scatterwalk_map(&dst_sg_walk, 0);
+                       dst = scatterwalk_map(&dst_sg_walk);
                }
 
        } else {
@@ -1220,11 +1220,11 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
 
        if (one_entry_in_sg) {
                if (unlikely(req->src != req->dst)) {
-                       scatterwalk_unmap(dst, 0);
+                       scatterwalk_unmap(dst);
                        scatterwalk_done(&dst_sg_walk, 0, 0);
                }
-               scatterwalk_unmap(src, 0);
-               scatterwalk_unmap(assoc, 0);
+               scatterwalk_unmap(src);
+               scatterwalk_unmap(assoc);
                scatterwalk_done(&src_sg_walk, 0, 0);
                scatterwalk_done(&assoc_sg_walk, 0, 0);
        } else {
index 3bd0402..302a323 100644 (file)
@@ -61,7 +61,7 @@ void *kmap(struct page *page);
 void kunmap(struct page *page);
 
 void *kmap_atomic_prot(struct page *page, pgprot_t prot);
-void *__kmap_atomic(struct page *page);
+void *kmap_atomic(struct page *page);
 void __kunmap_atomic(void *kvaddr);
 void *kmap_atomic_pfn(unsigned long pfn);
 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
index 642f75a..11891ca 100644 (file)
@@ -62,16 +62,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 
        if (!userbuf) {
                memcpy(buf, (vaddr + offset), csize);
-               kunmap_atomic(vaddr, KM_PTE0);
+               kunmap_atomic(vaddr);
        } else {
                if (!kdump_buf_page) {
                        printk(KERN_WARNING "Kdump: Kdump buffer page not"
                                " allocated\n");
-                       kunmap_atomic(vaddr, KM_PTE0);
+                       kunmap_atomic(vaddr);
                        return -EFAULT;
                }
                copy_page(kdump_buf_page, vaddr);
-               kunmap_atomic(vaddr, KM_PTE0);
+               kunmap_atomic(vaddr);
                if (copy_to_user(buf, (kdump_buf_page + offset), csize))
                        return -EFAULT;
        }
index cfdc6e0..31bfc69 100644 (file)
@@ -1283,9 +1283,9 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
        if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
                return;
 
-       vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+       vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
        data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
-       kunmap_atomic(vapic, KM_USER0);
+       kunmap_atomic(vapic);
 
        apic_set_tpr(vcpu->arch.apic, data & 0xff);
 }
@@ -1310,9 +1310,9 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
                max_isr = 0;
        data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
 
-       vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+       vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
        *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
-       kunmap_atomic(vapic, KM_USER0);
+       kunmap_atomic(vapic);
 }
 
 void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
index 1561028..df5a703 100644 (file)
@@ -92,9 +92,9 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
        if (unlikely(npages != 1))
                return -EFAULT;
 
-       table = kmap_atomic(page, KM_USER0);
+       table = kmap_atomic(page);
        ret = CMPXCHG(&table[index], orig_pte, new_pte);
-       kunmap_atomic(table, KM_USER0);
+       kunmap_atomic(table);
 
        kvm_release_page_dirty(page);
 
index 9cbfc06..bb4fd26 100644 (file)
@@ -1162,12 +1162,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
         */
        vcpu->hv_clock.version += 2;
 
-       shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
+       shared_kaddr = kmap_atomic(vcpu->time_page);
 
        memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
               sizeof(vcpu->hv_clock));
 
-       kunmap_atomic(shared_kaddr, KM_USER0);
+       kunmap_atomic(shared_kaddr);
 
        mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
        return 0;
@@ -3848,7 +3848,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
                goto emul_write;
        }
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        kaddr += offset_in_page(gpa);
        switch (bytes) {
        case 1:
@@ -3866,7 +3866,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
        default:
                BUG();
        }
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        kvm_release_page_dirty(page);
 
        if (!exchanged)
index e218d5d..d9b094c 100644 (file)
@@ -760,9 +760,9 @@ survive:
                                break;
                        }
 
-                       maddr = kmap_atomic(pg, KM_USER0);
+                       maddr = kmap_atomic(pg);
                        memcpy(maddr + offset, from, len);
-                       kunmap_atomic(maddr, KM_USER0);
+                       kunmap_atomic(maddr);
                        set_page_dirty_lock(pg);
                        put_page(pg);
                        up_read(&current->mm->mmap_sem);
index f4f29b1..6f31ee5 100644 (file)
@@ -51,11 +51,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 }
 EXPORT_SYMBOL(kmap_atomic_prot);
 
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
 {
        return kmap_atomic_prot(page, kmap_prot);
 }
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
 
 /*
  * This is the same as kmap_atomic() but can map memory that doesn't
index ac93c99..33bc9b6 100644 (file)
@@ -46,7 +46,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
        unsigned int nbytes = min(walk->entrylen,
                                  ((unsigned int)(PAGE_SIZE)) - offset);
 
-       walk->data = crypto_kmap(walk->pg, 0);
+       walk->data = kmap_atomic(walk->pg);
        walk->data += offset;
 
        if (offset & alignmask) {
@@ -93,7 +93,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
                return nbytes;
        }
 
-       crypto_kunmap(walk->data, 0);
+       kunmap_atomic(walk->data);
        crypto_yield(walk->flags);
 
        if (err)
index 0d5a90c..361b5e8 100644 (file)
@@ -79,13 +79,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
                /* wait for any prerequisite operations */
                async_tx_quiesce(&submit->depend_tx);
 
-               dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
-               src_buf = kmap_atomic(src, KM_USER1) + src_offset;
+               dest_buf = kmap_atomic(dest) + dest_offset;
+               src_buf = kmap_atomic(src) + src_offset;
 
                memcpy(dest_buf, src_buf, len);
 
-               kunmap_atomic(src_buf, KM_USER1);
-               kunmap_atomic(dest_buf, KM_USER0);
+               kunmap_atomic(src_buf);
+               kunmap_atomic(dest_buf);
 
                async_tx_sync_epilog(submit);
        }
index 1e61d1a..4dd80c7 100644 (file)
@@ -43,22 +43,22 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
 
 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
 {
-       walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
+       walk->src.virt.addr = scatterwalk_map(&walk->in);
 }
 
 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
 {
-       walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
+       walk->dst.virt.addr = scatterwalk_map(&walk->out);
 }
 
 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
 {
-       scatterwalk_unmap(walk->src.virt.addr, 0);
+       scatterwalk_unmap(walk->src.virt.addr);
 }
 
 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
 {
-       scatterwalk_unmap(walk->dst.virt.addr, 1);
+       scatterwalk_unmap(walk->dst.virt.addr);
 }
 
 /* Get a spot of the specified length that does not straddle a page.
index c36d654..32fe1bb 100644 (file)
@@ -216,12 +216,12 @@ static void get_data_to_compute(struct crypto_cipher *tfm,
                        scatterwalk_start(&walk, sg_next(walk.sg));
                        n = scatterwalk_clamp(&walk, len);
                }
-               data_src = scatterwalk_map(&walk, 0);
+               data_src = scatterwalk_map(&walk);
 
                compute_mac(tfm, data_src, n, pctx);
                len -= n;
 
-               scatterwalk_unmap(data_src, 0);
+               scatterwalk_unmap(data_src);
                scatterwalk_advance(&walk, n);
                scatterwalk_done(&walk, 0, len);
                if (len)
index 41e529a..7281b8a 100644 (file)
@@ -40,9 +40,9 @@ void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
 }
 EXPORT_SYMBOL_GPL(scatterwalk_start);
 
-void *scatterwalk_map(struct scatter_walk *walk, int out)
+void *scatterwalk_map(struct scatter_walk *walk)
 {
-       return crypto_kmap(scatterwalk_page(walk), out) +
+       return kmap_atomic(scatterwalk_page(walk)) +
               offset_in_page(walk->offset);
 }
 EXPORT_SYMBOL_GPL(scatterwalk_map);
@@ -83,9 +83,9 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
                if (len_this_page > nbytes)
                        len_this_page = nbytes;
 
-               vaddr = scatterwalk_map(walk, out);
+               vaddr = scatterwalk_map(walk);
                memcpy_dir(buf, vaddr, len_this_page, out);
-               scatterwalk_unmap(vaddr, out);
+               scatterwalk_unmap(vaddr);
 
                scatterwalk_advance(walk, len_this_page);
 
index 9100912..21fc12e 100644 (file)
@@ -281,10 +281,10 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
        if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
                void *data;
 
-               data = crypto_kmap(sg_page(sg), 0);
+               data = kmap_atomic(sg_page(sg));
                err = crypto_shash_digest(desc, data + offset, nbytes,
                                          req->result);
-               crypto_kunmap(data, 0);
+               kunmap_atomic(data);
                crypto_yield(desc->flags);
        } else
                err = crypto_shash_init(desc) ?:
@@ -420,9 +420,9 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
 
                desc->flags = hdesc->flags;
 
-               data = crypto_kmap(sg_page(sg), 0);
+               data = kmap_atomic(sg_page(sg));
                err = crypto_shash_digest(desc, data + offset, nbytes, out);
-               crypto_kunmap(data, 0);
+               kunmap_atomic(data);
                crypto_yield(desc->flags);
                goto out;
        }
index 9691dd0..d8af325 100644 (file)
@@ -720,13 +720,13 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 
                /* FIXME: use a bounce buffer */
                local_irq_save(flags);
-               buf = kmap_atomic(page, KM_IRQ0);
+               buf = kmap_atomic(page);
 
                /* do the actual data transfer */
                ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
                                       do_write);
 
-               kunmap_atomic(buf, KM_IRQ0);
+               kunmap_atomic(buf);
                local_irq_restore(flags);
        } else {
                buf = page_address(page);
@@ -865,13 +865,13 @@ next_sg:
 
                /* FIXME: use bounce buffer */
                local_irq_save(flags);
-               buf = kmap_atomic(page, KM_IRQ0);
+               buf = kmap_atomic(page);
 
                /* do the actual data transfer */
                consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
                                                                count, rw);
 
-               kunmap_atomic(buf, KM_IRQ0);
+               kunmap_atomic(buf);
                local_irq_restore(flags);
        } else {
                buf = page_address(page);
index ec24643..531ceb3 100644 (file)
@@ -242,9 +242,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
        page = brd_lookup_page(brd, sector);
        BUG_ON(!page);
 
-       dst = kmap_atomic(page, KM_USER1);
+       dst = kmap_atomic(page);
        memcpy(dst + offset, src, copy);
-       kunmap_atomic(dst, KM_USER1);
+       kunmap_atomic(dst);
 
        if (copy < n) {
                src += copy;
@@ -253,9 +253,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
                page = brd_lookup_page(brd, sector);
                BUG_ON(!page);
 
-               dst = kmap_atomic(page, KM_USER1);
+               dst = kmap_atomic(page);
                memcpy(dst, src, copy);
-               kunmap_atomic(dst, KM_USER1);
+               kunmap_atomic(dst);
        }
 }
 
@@ -273,9 +273,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
        copy = min_t(size_t, n, PAGE_SIZE - offset);
        page = brd_lookup_page(brd, sector);
        if (page) {
-               src = kmap_atomic(page, KM_USER1);
+               src = kmap_atomic(page);
                memcpy(dst, src + offset, copy);
-               kunmap_atomic(src, KM_USER1);
+               kunmap_atomic(src);
        } else
                memset(dst, 0, copy);
 
@@ -285,9 +285,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
                copy = n - copy;
                page = brd_lookup_page(brd, sector);
                if (page) {
-                       src = kmap_atomic(page, KM_USER1);
+                       src = kmap_atomic(page);
                        memcpy(dst, src, copy);
-                       kunmap_atomic(src, KM_USER1);
+                       kunmap_atomic(src);
                } else
                        memset(dst, 0, copy);
        }
@@ -309,7 +309,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
                        goto out;
        }
 
-       mem = kmap_atomic(page, KM_USER0);
+       mem = kmap_atomic(page);
        if (rw == READ) {
                copy_from_brd(mem + off, brd, sector, len);
                flush_dcache_page(page);
@@ -317,7 +317,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
                flush_dcache_page(page);
                copy_to_brd(brd, mem + off, sector, len);
        }
-       kunmap_atomic(mem, KM_USER0);
+       kunmap_atomic(mem);
 
 out:
        return err;
index 912f585..3030201 100644 (file)
@@ -289,25 +289,25 @@ static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
        return page_nr;
 }
 
-static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
+static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
 {
        struct page *page = b->bm_pages[idx];
-       return (unsigned long *) kmap_atomic(page, km);
+       return (unsigned long *) kmap_atomic(page);
 }
 
 static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
 {
-       return __bm_map_pidx(b, idx, KM_IRQ1);
+       return __bm_map_pidx(b, idx);
 }
 
-static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
+static void __bm_unmap(unsigned long *p_addr)
 {
-       kunmap_atomic(p_addr, km);
+       kunmap_atomic(p_addr);
 };
 
 static void bm_unmap(unsigned long *p_addr)
 {
-       return __bm_unmap(p_addr, KM_IRQ1);
+       return __bm_unmap(p_addr);
 }
 
 /* long word offset of _bitmap_ sector */
@@ -543,15 +543,15 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
 
        /* all but last page */
        for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
-               p_addr = __bm_map_pidx(b, idx, KM_USER0);
+               p_addr = __bm_map_pidx(b, idx);
                for (i = 0; i < LWPP; i++)
                        bits += hweight_long(p_addr[i]);
-               __bm_unmap(p_addr, KM_USER0);
+               __bm_unmap(p_addr);
                cond_resched();
        }
        /* last (or only) page */
        last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
-       p_addr = __bm_map_pidx(b, idx, KM_USER0);
+       p_addr = __bm_map_pidx(b, idx);
        for (i = 0; i < last_word; i++)
                bits += hweight_long(p_addr[i]);
        p_addr[last_word] &= cpu_to_lel(mask);
@@ -559,7 +559,7 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
        /* 32bit arch, may have an unused padding long */
        if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
                p_addr[last_word+1] = 0;
-       __bm_unmap(p_addr, KM_USER0);
+       __bm_unmap(p_addr);
        return bits;
 }
 
@@ -970,11 +970,11 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
                 * to use pre-allocated page pool */
                void *src, *dest;
                page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
-               dest = kmap_atomic(page, KM_USER0);
-               src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
+               dest = kmap_atomic(page);
+               src = kmap_atomic(b->bm_pages[page_nr]);
                memcpy(dest, src, PAGE_SIZE);
-               kunmap_atomic(src, KM_USER1);
-               kunmap_atomic(dest, KM_USER0);
+               kunmap_atomic(src);
+               kunmap_atomic(dest);
                bm_store_page_idx(page, page_nr);
        } else
                page = b->bm_pages[page_nr];
@@ -1163,7 +1163,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
  * this returns a bit number, NOT a sector!
  */
 static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
-       const int find_zero_bit, const enum km_type km)
+       const int find_zero_bit)
 {
        struct drbd_bitmap *b = mdev->bitmap;
        unsigned long *p_addr;
@@ -1178,7 +1178,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
                while (bm_fo < b->bm_bits) {
                        /* bit offset of the first bit in the page */
                        bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
-                       p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
+                       p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
 
                        if (find_zero_bit)
                                i = find_next_zero_bit_le(p_addr,
@@ -1187,7 +1187,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
                                i = find_next_bit_le(p_addr,
                                                PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
 
-                       __bm_unmap(p_addr, km);
+                       __bm_unmap(p_addr);
                        if (i < PAGE_SIZE*8) {
                                bm_fo = bit_offset + i;
                                if (bm_fo >= b->bm_bits)
@@ -1215,7 +1215,7 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
        if (BM_DONT_TEST & b->bm_flags)
                bm_print_lock_info(mdev);
 
-       i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
+       i = __bm_find_next(mdev, bm_fo, find_zero_bit);
 
        spin_unlock_irq(&b->bm_lock);
        return i;
@@ -1239,13 +1239,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo
 unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
 {
        /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
-       return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
+       return __bm_find_next(mdev, bm_fo, 0);
 }
 
 unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
 {
        /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
-       return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
+       return __bm_find_next(mdev, bm_fo, 1);
 }
 
 /* returns number of bits actually changed.
@@ -1273,14 +1273,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
                unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
                if (page_nr != last_page_nr) {
                        if (p_addr)
-                               __bm_unmap(p_addr, KM_IRQ1);
+                               __bm_unmap(p_addr);
                        if (c < 0)
                                bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
                        else if (c > 0)
                                bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
                        changed_total += c;
                        c = 0;
-                       p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
+                       p_addr = __bm_map_pidx(b, page_nr);
                        last_page_nr = page_nr;
                }
                if (val)
@@ -1289,7 +1289,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
                        c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
        }
        if (p_addr)
-               __bm_unmap(p_addr, KM_IRQ1);
+               __bm_unmap(p_addr);
        if (c < 0)
                bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
        else if (c > 0)
@@ -1342,13 +1342,13 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
 {
        int i;
        int bits;
-       unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
+       unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
        for (i = first_word; i < last_word; i++) {
                bits = hweight_long(paddr[i]);
                paddr[i] = ~0UL;
                b->bm_set += BITS_PER_LONG - bits;
        }
-       kunmap_atomic(paddr, KM_IRQ1);
+       kunmap_atomic(paddr);
 }
 
 /* Same thing as drbd_bm_set_bits,
index af2a250..e09f9ce 100644 (file)
@@ -2526,10 +2526,10 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
 
        page = e->pages;
        page_chain_for_each(page) {
-               void *d = kmap_atomic(page, KM_USER0);
+               void *d = kmap_atomic(page);
                unsigned l = min_t(unsigned, len, PAGE_SIZE);
                memcpy(tl, d, l);
-               kunmap_atomic(d, KM_USER0);
+               kunmap_atomic(d);
                tl = (unsigned short*)((char*)tl + l);
                len -= l;
                if (len == 0)
index cd50435..bbca966 100644 (file)
@@ -93,16 +93,16 @@ static int transfer_none(struct loop_device *lo, int cmd,
                         struct page *loop_page, unsigned loop_off,
                         int size, sector_t real_block)
 {
-       char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
-       char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
+       char *raw_buf = kmap_atomic(raw_page) + raw_off;
+       char *loop_buf = kmap_atomic(loop_page) + loop_off;
 
        if (cmd == READ)
                memcpy(loop_buf, raw_buf, size);
        else
                memcpy(raw_buf, loop_buf, size);
 
-       kunmap_atomic(loop_buf, KM_USER1);
-       kunmap_atomic(raw_buf, KM_USER0);
+       kunmap_atomic(loop_buf);
+       kunmap_atomic(raw_buf);
        cond_resched();
        return 0;
 }
@@ -112,8 +112,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
                        struct page *loop_page, unsigned loop_off,
                        int size, sector_t real_block)
 {
-       char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
-       char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
+       char *raw_buf = kmap_atomic(raw_page) + raw_off;
+       char *loop_buf = kmap_atomic(loop_page) + loop_off;
        char *in, *out, *key;
        int i, keysize;
 
@@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
        for (i = 0; i < size; i++)
                *out++ = *in++ ^ key[(i & 511) % keysize];
 
-       kunmap_atomic(loop_buf, KM_USER1);
-       kunmap_atomic(raw_buf, KM_USER0);
+       kunmap_atomic(loop_buf);
+       kunmap_atomic(raw_buf);
        cond_resched();
        return 0;
 }
index d59edea..ba66e44 100644 (file)
@@ -987,14 +987,14 @@ static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct pag
 
        while (copy_size > 0) {
                struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
-               void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) +
+               void *vfrom = kmap_atomic(src_bvl->bv_page) +
                        src_bvl->bv_offset + offs;
                void *vto = page_address(dst_page) + dst_offs;
                int len = min_t(int, copy_size, src_bvl->bv_len - offs);
 
                BUG_ON(len < 0);
                memcpy(vto, vfrom, len);
-               kunmap_atomic(vfrom, KM_USER0);
+               kunmap_atomic(vfrom);
 
                seg++;
                offs = 0;
@@ -1019,10 +1019,10 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
        offs = 0;
        for (f = 0; f < pkt->frames; f++) {
                if (bvec[f].bv_page != pkt->pages[p]) {
-                       void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset;
+                       void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
                        void *vto = page_address(pkt->pages[p]) + offs;
                        memcpy(vto, vfrom, CD_FRAMESIZE);
-                       kunmap_atomic(vfrom, KM_USER0);
+                       kunmap_atomic(vfrom);
                        bvec[f].bv_page = pkt->pages[p];
                        bvec[f].bv_offset = offs;
                } else {
index fe765f4..76368f9 100644 (file)
@@ -1731,9 +1731,9 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
        while (size) {
                copy = min3(srest, dst->length, size);
 
-               daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
+               daddr = kmap_atomic(sg_page(dst));
                memcpy(daddr + dst->offset + offset, saddr, copy);
-               kunmap_atomic(daddr, KM_IRQ0);
+               kunmap_atomic(daddr);
 
                nbytes -= copy;
                size -= copy;
@@ -1793,17 +1793,17 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
                                continue;
                        }
 
-                       saddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
+                       saddr = kmap_atomic(sg_page(t));
 
                        err = ablkcipher_get(saddr, &t->length, t->offset,
                                        dst, nbytes, &nbytes);
                        if (err < 0) {
-                               kunmap_atomic(saddr, KM_SOFTIRQ0);
+                               kunmap_atomic(saddr);
                                break;
                        }
 
                        idx += err;
-                       kunmap_atomic(saddr, KM_SOFTIRQ0);
+                       kunmap_atomic(saddr);
                }
 
                hifn_cipher_walk_exit(&rctx->walk);
index ca6c04d..da09cd7 100644 (file)
@@ -620,13 +620,13 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
        if (PageHighMem(pg))
                local_irq_save(flags);
 
-       virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
+       virt_addr = kmap_atomic(pg);
 
        /* Perform architecture specific atomic scrub operation */
        atomic_scrub(virt_addr + offset, size);
 
        /* Unmap and complete */
-       kunmap_atomic(virt_addr, KM_BOUNCE_READ);
+       kunmap_atomic(virt_addr);
 
        if (PageHighMem(pg))
                local_irq_restore(flags);
index 5928653..4b8653b 100644 (file)
@@ -41,10 +41,10 @@ drm_clflush_page(struct page *page)
        if (unlikely(page == NULL))
                return;
 
-       page_virtual = kmap_atomic(page, KM_USER0);
+       page_virtual = kmap_atomic(page);
        for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
                clflush(page_virtual + i);
-       kunmap_atomic(page_virtual, KM_USER0);
+       kunmap_atomic(page_virtual);
 }
 
 static void drm_cache_flush_clflush(struct page *pages[],
@@ -87,10 +87,10 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
                if (unlikely(page == NULL))
                        continue;
 
-               page_virtual = kmap_atomic(page, KM_USER0);
+               page_virtual = kmap_atomic(page);
                flush_dcache_range((unsigned long)page_virtual,
                                   (unsigned long)page_virtual + PAGE_SIZE);
-               kunmap_atomic(page_virtual, KM_USER0);
+               kunmap_atomic(page_virtual);
        }
 #else
        printk(KERN_ERR "Architecture has no drm_cache.c support\n");
index c904d73..e80ee82 100644 (file)
@@ -125,14 +125,14 @@ static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
        int i;
        uint8_t *clf;
 
-       clf = kmap_atomic(page, KM_USER0);
+       clf = kmap_atomic(page);
        mb();
        for (i = 0; i < clflush_count; ++i) {
                psb_clflush(clf);
                clf += clflush_add;
        }
        mb();
-       kunmap_atomic(clf, KM_USER0);
+       kunmap_atomic(clf);
 }
 
 static void psb_pages_clflush(struct psb_mmu_driver *driver,
@@ -325,7 +325,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
 
        spin_lock(lock);
 
-       v = kmap_atomic(pt->p, KM_USER0);
+       v = kmap_atomic(pt->p);
        clf = (uint8_t *) v;
        ptes = (uint32_t *) v;
        for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
@@ -341,7 +341,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
                mb();
        }
 
-       kunmap_atomic(v, KM_USER0);
+       kunmap_atomic(v);
        spin_unlock(lock);
 
        pt->count = 0;
@@ -376,18 +376,18 @@ struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
                        continue;
                }
 
-               v = kmap_atomic(pd->p, KM_USER0);
+               v = kmap_atomic(pd->p);
                pd->tables[index] = pt;
                v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
                pt->index = index;
-               kunmap_atomic((void *) v, KM_USER0);
+               kunmap_atomic((void *) v);
 
                if (pd->hw_context != -1) {
                        psb_mmu_clflush(pd->driver, (void *) &v[index]);
                        atomic_set(&pd->driver->needs_tlbflush, 1);
                }
        }
-       pt->v = kmap_atomic(pt->p, KM_USER0);
+       pt->v = kmap_atomic(pt->p);
        return pt;
 }
 
@@ -404,7 +404,7 @@ static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
                spin_unlock(lock);
                return NULL;
        }
-       pt->v = kmap_atomic(pt->p, KM_USER0);
+       pt->v = kmap_atomic(pt->p);
        return pt;
 }
 
@@ -413,9 +413,9 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
        struct psb_mmu_pd *pd = pt->pd;
        uint32_t *v;
 
-       kunmap_atomic(pt->v, KM_USER0);
+       kunmap_atomic(pt->v);
        if (pt->count == 0) {
-               v = kmap_atomic(pd->p, KM_USER0);
+               v = kmap_atomic(pd->p);
                v[pt->index] = pd->invalid_pde;
                pd->tables[pt->index] = NULL;
 
@@ -424,7 +424,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
                                        (void *) &v[pt->index]);
                        atomic_set(&pd->driver->needs_tlbflush, 1);
                }
-               kunmap_atomic(pt->v, KM_USER0);
+               kunmap_atomic(pt->v);
                spin_unlock(&pd->driver->lock);
                psb_mmu_free_pt(pt);
                return;
@@ -457,7 +457,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
        down_read(&driver->sem);
        spin_lock(&driver->lock);
 
-       v = kmap_atomic(pd->p, KM_USER0);
+       v = kmap_atomic(pd->p);
        v += start;
 
        while (gtt_pages--) {
@@ -467,7 +467,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
 
        /*ttm_tt_cache_flush(&pd->p, num_pages);*/
        psb_pages_clflush(pd->driver, &pd->p, num_pages);
-       kunmap_atomic(v, KM_USER0);
+       kunmap_atomic(v);
        spin_unlock(&driver->lock);
 
        if (pd->hw_context != -1)
@@ -830,9 +830,9 @@ int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
                uint32_t *v;
 
                spin_lock(lock);
-               v = kmap_atomic(pd->p, KM_USER0);
+               v = kmap_atomic(pd->p);
                tmp = v[psb_mmu_pd_index(virtual)];
-               kunmap_atomic(v, KM_USER0);
+               kunmap_atomic(v);
                spin_unlock(lock);
 
                if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
index 2f75d20..c10cf5e 100644 (file)
@@ -309,11 +309,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
                        goto out_err;
 
                preempt_disable();
-               from_virtual = kmap_atomic(from_page, KM_USER0);
-               to_virtual = kmap_atomic(to_page, KM_USER1);
+               from_virtual = kmap_atomic(from_page);
+               to_virtual = kmap_atomic(to_page);
                memcpy(to_virtual, from_virtual, PAGE_SIZE);
-               kunmap_atomic(to_virtual, KM_USER1);
-               kunmap_atomic(from_virtual, KM_USER0);
+               kunmap_atomic(to_virtual);
+               kunmap_atomic(from_virtual);
                preempt_enable();
                page_cache_release(from_page);
        }
@@ -365,11 +365,11 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
                        goto out_err;
                }
                preempt_disable();
-               from_virtual = kmap_atomic(from_page, KM_USER0);
-               to_virtual = kmap_atomic(to_page, KM_USER1);
+               from_virtual = kmap_atomic(from_page);
+               to_virtual = kmap_atomic(to_page);
                memcpy(to_virtual, from_virtual, PAGE_SIZE);
-               kunmap_atomic(to_virtual, KM_USER1);
-               kunmap_atomic(from_virtual, KM_USER0);
+               kunmap_atomic(to_virtual);
+               kunmap_atomic(from_virtual);
                preempt_enable();
                set_page_dirty(to_page);
                mark_page_accessed(to_page);
index f4e7763..51c9ba5 100644 (file)
@@ -136,10 +136,10 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
 
                if (likely(page_virtual != NULL)) {
                        desc_virtual->ppn = page_to_pfn(page);
-                       kunmap_atomic(page_virtual, KM_USER0);
+                       kunmap_atomic(page_virtual);
                }
 
-               page_virtual = kmap_atomic(page, KM_USER0);
+               page_virtual = kmap_atomic(page);
                desc_virtual = page_virtual - 1;
                prev_pfn = ~(0UL);
 
@@ -169,7 +169,7 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
        }
 
        if (likely(page_virtual != NULL))
-               kunmap_atomic(page_virtual, KM_USER0);
+               kunmap_atomic(page_virtual);
 
        return 0;
 out_err:
index 5bc2839..729428e 100644 (file)
@@ -253,7 +253,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
                if (page_is_high)
                        local_irq_save(flags);
 
-               buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
+               buf = kmap_atomic(page) + offset;
 
                cmd->nleft -= nr_bytes;
                cmd->cursg_ofs += nr_bytes;
@@ -269,7 +269,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
                else
                        hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes);
 
-               kunmap_atomic(buf, KM_BIO_SRC_IRQ);
+               kunmap_atomic(buf);
 
                if (page_is_high)
                        local_irq_restore(flags);
index fb88d68..2033a92 100644 (file)
@@ -73,11 +73,11 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
 
                p = mem;
                for_each_sg(sgl, sg, data->size, i) {
-                       from = kmap_atomic(sg_page(sg), KM_USER0);
+                       from = kmap_atomic(sg_page(sg));
                        memcpy(p,
                               from + sg->offset,
                               sg->length);
-                       kunmap_atomic(from, KM_USER0);
+                       kunmap_atomic(from);
                        p += sg->length;
                }
        }
@@ -133,11 +133,11 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
 
                p = mem;
                for_each_sg(sgl, sg, sg_size, i) {
-                       to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+                       to = kmap_atomic(sg_page(sg));
                        memcpy(to + sg->offset,
                               p,
                               sg->length);
-                       kunmap_atomic(to, KM_SOFTIRQ0);
+                       kunmap_atomic(to);
                        p += sg->length;
                }
        }
index cdf36b1..045e086 100644 (file)
@@ -457,7 +457,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
                return;
        }
        spin_unlock_irqrestore(&bitmap->lock, flags);
-       sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+       sb = kmap_atomic(bitmap->sb_page);
        sb->events = cpu_to_le64(bitmap->mddev->events);
        if (bitmap->mddev->events < bitmap->events_cleared)
                /* rocking back to read-only */
@@ -467,7 +467,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
        /* Just in case these have been changed via sysfs: */
        sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
        sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
-       kunmap_atomic(sb, KM_USER0);
+       kunmap_atomic(sb);
        write_page(bitmap, bitmap->sb_page, 1);
 }
 
@@ -478,7 +478,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
 
        if (!bitmap || !bitmap->sb_page)
                return;
-       sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+       sb = kmap_atomic(bitmap->sb_page);
        printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
        printk(KERN_DEBUG "         magic: %08x\n", le32_to_cpu(sb->magic));
        printk(KERN_DEBUG "       version: %d\n", le32_to_cpu(sb->version));
@@ -497,7 +497,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
        printk(KERN_DEBUG "     sync size: %llu KB\n",
                        (unsigned long long)le64_to_cpu(sb->sync_size)/2);
        printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
-       kunmap_atomic(sb, KM_USER0);
+       kunmap_atomic(sb);
 }
 
 /*
@@ -525,7 +525,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
        }
        bitmap->sb_page->index = 0;
 
-       sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+       sb = kmap_atomic(bitmap->sb_page);
 
        sb->magic = cpu_to_le32(BITMAP_MAGIC);
        sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
@@ -533,7 +533,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
        chunksize = bitmap->mddev->bitmap_info.chunksize;
        BUG_ON(!chunksize);
        if (!is_power_of_2(chunksize)) {
-               kunmap_atomic(sb, KM_USER0);
+               kunmap_atomic(sb);
                printk(KERN_ERR "bitmap chunksize not a power of 2\n");
                return -EINVAL;
        }
@@ -571,7 +571,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
        bitmap->flags |= BITMAP_HOSTENDIAN;
        sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
 
-       kunmap_atomic(sb, KM_USER0);
+       kunmap_atomic(sb);
 
        return 0;
 }
@@ -603,7 +603,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
                return err;
        }
 
-       sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+       sb = kmap_atomic(bitmap->sb_page);
 
        chunksize = le32_to_cpu(sb->chunksize);
        daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -664,7 +664,7 @@ success:
                bitmap->events_cleared = bitmap->mddev->events;
        err = 0;
 out:
-       kunmap_atomic(sb, KM_USER0);
+       kunmap_atomic(sb);
        if (err)
                bitmap_print_sb(bitmap);
        return err;
@@ -689,7 +689,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
                return 0;
        }
        spin_unlock_irqrestore(&bitmap->lock, flags);
-       sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+       sb = kmap_atomic(bitmap->sb_page);
        old = le32_to_cpu(sb->state) & bits;
        switch (op) {
        case MASK_SET:
@@ -703,7 +703,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
        default:
                BUG();
        }
-       kunmap_atomic(sb, KM_USER0);
+       kunmap_atomic(sb);
        return old;
 }
 
@@ -881,12 +881,12 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
        bit = file_page_offset(bitmap, chunk);
 
        /* set the bit */
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        if (bitmap->flags & BITMAP_HOSTENDIAN)
                set_bit(bit, kaddr);
        else
                __set_bit_le(bit, kaddr);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        pr_debug("set file bit %lu page %lu\n", bit, page->index);
        /* record page number so it gets flushed to disk when unplug occurs */
        set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
@@ -1050,10 +1050,10 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
                                 * if bitmap is out of date, dirty the
                                 * whole page and write it out
                                 */
-                               paddr = kmap_atomic(page, KM_USER0);
+                               paddr = kmap_atomic(page);
                                memset(paddr + offset, 0xff,
                                       PAGE_SIZE - offset);
-                               kunmap_atomic(paddr, KM_USER0);
+                               kunmap_atomic(paddr);
                                write_page(bitmap, page, 1);
 
                                ret = -EIO;
@@ -1061,12 +1061,12 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
                                        goto err;
                        }
                }
-               paddr = kmap_atomic(page, KM_USER0);
+               paddr = kmap_atomic(page);
                if (bitmap->flags & BITMAP_HOSTENDIAN)
                        b = test_bit(bit, paddr);
                else
                        b = test_bit_le(bit, paddr);
-               kunmap_atomic(paddr, KM_USER0);
+               kunmap_atomic(paddr);
                if (b) {
                        /* if the disk bit is set, set the memory bit */
                        int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap))
@@ -1209,10 +1209,10 @@ void bitmap_daemon_work(struct mddev *mddev)
                            mddev->bitmap_info.external == 0) {
                                bitmap_super_t *sb;
                                bitmap->need_sync = 0;
-                               sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+                               sb = kmap_atomic(bitmap->sb_page);
                                sb->events_cleared =
                                        cpu_to_le64(bitmap->events_cleared);
-                               kunmap_atomic(sb, KM_USER0);
+                               kunmap_atomic(sb);
                                write_page(bitmap, bitmap->sb_page, 1);
                        }
                        spin_lock_irqsave(&bitmap->lock, flags);
@@ -1235,7 +1235,7 @@ void bitmap_daemon_work(struct mddev *mddev)
                                                  -1);
 
                                /* clear the bit */
-                               paddr = kmap_atomic(page, KM_USER0);
+                               paddr = kmap_atomic(page);
                                if (bitmap->flags & BITMAP_HOSTENDIAN)
                                        clear_bit(file_page_offset(bitmap, j),
                                                  paddr);
@@ -1244,7 +1244,7 @@ void bitmap_daemon_work(struct mddev *mddev)
                                                file_page_offset(bitmap,
                                                                 j),
                                                paddr);
-                               kunmap_atomic(paddr, KM_USER0);
+                               kunmap_atomic(paddr);
                        } else if (*bmc <= 2) {
                                *bmc = 1; /* maybe clear the bit next time */
                                set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
index 8c2a000..db6b516 100644 (file)
@@ -590,9 +590,9 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
        int r = 0;
 
        if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
-               src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0);
+               src = kmap_atomic(sg_page(&dmreq->sg_in));
                r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
-               kunmap_atomic(src, KM_USER0);
+               kunmap_atomic(src);
        } else
                memset(iv, 0, cc->iv_size);
 
@@ -608,14 +608,14 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
        if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
                return 0;
 
-       dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0);
+       dst = kmap_atomic(sg_page(&dmreq->sg_out));
        r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
 
        /* Tweak the first block of plaintext sector */
        if (!r)
                crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
 
-       kunmap_atomic(dst, KM_USER0);
+       kunmap_atomic(dst);
        return r;
 }
 
index 69cc816..7338cb2 100644 (file)
@@ -57,9 +57,9 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info
                        if (dma->bouncemap[map_offset] == NULL)
                                return -1;
                        local_irq_save(flags);
-                       src = kmap_atomic(dma->map[map_offset], KM_BOUNCE_READ) + offset;
+                       src = kmap_atomic(dma->map[map_offset]) + offset;
                        memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
-                       kunmap_atomic(src, KM_BOUNCE_READ);
+                       kunmap_atomic(src);
                        local_irq_restore(flags);
                        sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
                }
index 5319e9b..c37d375 100644 (file)
@@ -325,7 +325,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
                        p_cnt = min(p_cnt, length);
 
                        local_irq_save(flags);
-                       buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off;
+                       buf = kmap_atomic(pg) + p_off;
                } else {
                        buf = host->req->data + host->block_pos;
                        p_cnt = host->req->data_len - host->block_pos;
@@ -341,7 +341,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
                                 : jmb38x_ms_read_reg_data(host, buf, p_cnt);
 
                if (host->req->long_data) {
-                       kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ);
+                       kunmap_atomic(buf - p_off);
                        local_irq_restore(flags);
                }
 
index 6902b83..7bafa72 100644 (file)
@@ -210,7 +210,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
                        p_cnt = min(p_cnt, length);
 
                        local_irq_save(flags);
-                       buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off;
+                       buf = kmap_atomic(pg) + p_off;
                } else {
                        buf = host->req->data + host->block_pos;
                        p_cnt = host->req->data_len - host->block_pos;
@@ -221,7 +221,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
                         : tifm_ms_read_data(host, buf, p_cnt);
 
                if (host->req->long_data) {
-                       kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ);
+                       kunmap_atomic(buf - p_off);
                        local_irq_restore(flags);
                }
 
index 6419a88..0e9aec8 100644 (file)
@@ -4101,11 +4101,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                                if (length <= copybreak &&
                                    skb_tailroom(skb) >= length) {
                                        u8 *vaddr;
-                                       vaddr = kmap_atomic(buffer_info->page,
-                                                           KM_SKB_DATA_SOFTIRQ);
+                                       vaddr = kmap_atomic(buffer_info->page);
                                        memcpy(skb_tail_pointer(skb), vaddr, length);
-                                       kunmap_atomic(vaddr,
-                                                     KM_SKB_DATA_SOFTIRQ);
+                                       kunmap_atomic(vaddr);
                                        /* re-use the page, so don't erase
                                         * buffer_info->page */
                                        skb_put(skb, length);
index a9a4ea2..7152eb1 100644 (file)
@@ -1301,10 +1301,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
                                                        ps_page->dma,
                                                        PAGE_SIZE,
                                                        DMA_FROM_DEVICE);
-                               vaddr = kmap_atomic(ps_page->page,
-                                                   KM_SKB_DATA_SOFTIRQ);
+                               vaddr = kmap_atomic(ps_page->page);
                                memcpy(skb_tail_pointer(skb), vaddr, l1);
-                               kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+                               kunmap_atomic(vaddr);
                                dma_sync_single_for_device(&pdev->dev,
                                                           ps_page->dma,
                                                           PAGE_SIZE,
@@ -1503,12 +1502,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                                if (length <= copybreak &&
                                    skb_tailroom(skb) >= length) {
                                        u8 *vaddr;
-                                       vaddr = kmap_atomic(buffer_info->page,
-                                                          KM_SKB_DATA_SOFTIRQ);
+                                       vaddr = kmap_atomic(buffer_info->page);
                                        memcpy(skb_tail_pointer(skb), vaddr,
                                               length);
-                                       kunmap_atomic(vaddr,
-                                                     KM_SKB_DATA_SOFTIRQ);
+                                       kunmap_atomic(vaddr);
                                        /* re-use the page, so don't erase
                                         * buffer_info->page */
                                        skb_put(skb, length);
index b36edbd..3c22955 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/uaccess.h>
 
-#define cas_page_map(x)      kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
-#define cas_page_unmap(x)    kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
+#define cas_page_map(x)      kmap_atomic((x))
+#define cas_page_unmap(x)    kunmap_atomic((x))
 #define CAS_NCPUS            num_online_cpus()
 
 #define cas_skb_release(x)  netif_rx(x)
index f980600..2fe9e90 100644 (file)
@@ -1736,7 +1736,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
                                                (uint32_t ) cmd->cmnd[8];
                                                /* 4 bytes: Areca io control code */
        sg = scsi_sglist(cmd);
-       buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
+       buffer = kmap_atomic(sg_page(sg)) + sg->offset;
        if (scsi_sg_count(cmd) > 1) {
                retvalue = ARCMSR_MESSAGE_FAIL;
                goto message_out;
@@ -1985,7 +1985,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
        }
        message_out:
        sg = scsi_sglist(cmd);
-       kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+       kunmap_atomic(buffer - sg->offset);
        return retvalue;
 }
 
@@ -2035,11 +2035,11 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
                strncpy(&inqdata[32], "R001", 4); /* Product Revision */
 
                sg = scsi_sglist(cmd);
-               buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
+               buffer = kmap_atomic(sg_page(sg)) + sg->offset;
 
                memcpy(buffer, inqdata, sizeof(inqdata));
                sg = scsi_sglist(cmd);
-               kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+               kunmap_atomic(buffer - sg->offset);
 
                cmd->scsi_done(cmd);
        }
index 8c6156a..a9af42e 100644 (file)
@@ -322,8 +322,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
                        return -ENOMEM;
                }
                frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-               cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
-                               + frag->page_offset;
+               cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
        } else {
                cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
        }
@@ -332,7 +331,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
        cp->fcoe_eof = eof;
        cp->fcoe_crc32 = cpu_to_le32(~crc);
        if (skb_is_nonlinear(skb)) {
-               kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+               kunmap_atomic(cp);
                cp = NULL;
        }
 
index d3ff9cd..89afd6d 100644 (file)
@@ -1956,12 +1956,11 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
 
                        /* data fits in the skb's headroom */
                        for (i = 0; i < tdata->nr_frags; i++, frag++) {
-                               char *src = kmap_atomic(frag->page,
-                                                       KM_SOFTIRQ0);
+                               char *src = kmap_atomic(frag->page);
 
                                memcpy(dst, src+frag->offset, frag->size);
                                dst += frag->size;
-                               kunmap_atomic(src, KM_SOFTIRQ0);
+                               kunmap_atomic(src);
                        }
                        if (padlen) {
                                memset(dst, 0, padlen);
index c164890..cc75cbe 100644 (file)
@@ -1515,7 +1515,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
                        return -ENOMEM;
                }
                frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-               cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
+               cp = kmap_atomic(skb_frag_page(frag))
                        + frag->page_offset;
        } else {
                cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
@@ -1526,7 +1526,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        cp->fcoe_crc32 = cpu_to_le32(~crc);
 
        if (skb_is_nonlinear(skb)) {
-               kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+               kunmap_atomic(cp);
                cp = NULL;
        }
 
index bd97b22..4d119a3 100644 (file)
@@ -210,10 +210,9 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
                while (len > 0) {
                        clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
                        data = kmap_atomic(
-                               skb_frag_page(frag) + (off >> PAGE_SHIFT),
-                               KM_SKB_DATA_SOFTIRQ);
+                               skb_frag_page(frag) + (off >> PAGE_SHIFT));
                        crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
-                       kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+                       kunmap_atomic(data);
                        off += clen;
                        len -= clen;
                }
index 3242bca..d42ec92 100644 (file)
@@ -2310,10 +2310,10 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
                 return;
             }
             local_irq_save(flags);
-            address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
+            address = kmap_atomic(sg_page(sl)) + sl->offset;
             memcpy(address, buffer, cpnow);
             flush_dcache_page(sg_page(sl));
-            kunmap_atomic(address, KM_BIO_SRC_IRQ);
+            kunmap_atomic(address);
             local_irq_restore(flags);
             if (cpsum == cpcount)
                 break;
index d77891e..b6d7a5c 100644 (file)
@@ -1511,14 +1511,14 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
                 /* kmap_atomic() ensures addressability of the user buffer.*/
                 /* local_irq_save() protects the KM_IRQ0 address slot.     */
                 local_irq_save(flags);
-                buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
+                buffer = kmap_atomic(sg_page(sg)) + sg->offset;
                 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
                     buffer[2] == 'P' && buffer[3] == 'P') {
-                        kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+                        kunmap_atomic(buffer - sg->offset);
                         local_irq_restore(flags);
                         return 1;
                 }
-                kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+                kunmap_atomic(buffer - sg->offset);
                 local_irq_restore(flags);
        }
        return 0;
index 192cb48..ee0dc05 100644 (file)
@@ -1304,9 +1304,9 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
                        struct page *page = sg_page(sg);
 
                        copy_len = min_t(int, total_len, sg_dma_len(sg));
-                       kaddr = kmap_atomic(page, KM_IRQ0);
+                       kaddr = kmap_atomic(page);
                        memcpy(kaddr + sg->offset, src_addr, copy_len);
-                       kunmap_atomic(kaddr, KM_IRQ0);
+                       kunmap_atomic(kaddr);
                        total_len -= copy_len;
                        src_addr += copy_len;
                        sg = sg_next(sg);
@@ -1654,7 +1654,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
                sci_unsolicited_frame_control_get_header(&ihost->uf_control,
                                                         frame_index,
                                                         &frame_header);
-               kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+               kaddr = kmap_atomic(sg_page(sg));
                rsp = kaddr + sg->offset;
                sci_swab32_cpy(rsp, frame_header, 1);
 
@@ -1691,7 +1691,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
                        ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
                        sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
                }
-               kunmap_atomic(kaddr, KM_IRQ0);
+               kunmap_atomic(kaddr);
 
                sci_controller_release_frame(ihost, frame_index);
 
@@ -3023,10 +3023,10 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
                dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
 
                /* need to swab it back in case the command buffer is re-used */
-               kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+               kaddr = kmap_atomic(sg_page(sg));
                smp_req = kaddr + sg->offset;
                sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
-               kunmap_atomic(kaddr, KM_IRQ0);
+               kunmap_atomic(kaddr);
                break;
        }
        default:
@@ -3311,7 +3311,7 @@ sci_io_request_construct_smp(struct device *dev,
        u8 req_len;
        u32 cmd;
 
-       kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+       kaddr = kmap_atomic(sg_page(sg));
        smp_req = kaddr + sg->offset;
        /*
         * Look at the SMP requests' header fields; for certain SAS 1.x SMP
@@ -3337,7 +3337,7 @@ sci_io_request_construct_smp(struct device *dev,
        req_len = smp_req->req_len;
        sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
        cmd = *(u32 *) smp_req;
-       kunmap_atomic(kaddr, KM_IRQ0);
+       kunmap_atomic(kaddr);
 
        if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
                return SCI_FAILURE;
index f607314..b577c90 100644 (file)
@@ -485,11 +485,11 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 
        if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
                copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
-                                                   &offset, KM_SOFTIRQ0, NULL);
+                                                   &offset, NULL);
        } else {
                crc = crc32(~0, (u8 *) fh, sizeof(*fh));
                copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
-                                                   &offset, KM_SOFTIRQ0, &crc);
+                                                   &offset, &crc);
                buf = fc_frame_payload_get(fp, 0);
                if (len % 4)
                        crc = crc32(crc, buf + len, 4 - (len % 4));
@@ -650,10 +650,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
                         * The scatterlist item may be bigger than PAGE_SIZE,
                         * but we must not cross pages inside the kmap.
                         */
-                       page_addr = kmap_atomic(page, KM_SOFTIRQ0);
+                       page_addr = kmap_atomic(page);
                        memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
                               sg_bytes);
-                       kunmap_atomic(page_addr, KM_SOFTIRQ0);
+                       kunmap_atomic(page_addr);
                        data += sg_bytes;
                }
                offset += sg_bytes;
index 1bf9841..8d65a51 100644 (file)
@@ -105,14 +105,13 @@ module_exit(libfc_exit);
  * @sg: pointer to the pointer of the SG list.
  * @nents: pointer to the remaining number of entries in the SG list.
  * @offset: pointer to the current offset in the SG list.
- * @km_type: dedicated page table slot type for kmap_atomic.
  * @crc: pointer to the 32-bit crc value.
  *      If crc is NULL, CRC is not calculated.
  */
 u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
                             struct scatterlist *sg,
                             u32 *nents, size_t *offset,
-                            enum km_type km_type, u32 *crc)
+                            u32 *crc)
 {
        size_t remaining = len;
        u32 copy_len = 0;
@@ -142,12 +141,11 @@ u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
                off = *offset + sg->offset;
                sg_bytes = min(sg_bytes,
                               (size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
-               page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
-                                       km_type);
+               page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT));
                if (crc)
                        *crc = crc32(*crc, buf, sg_bytes);
                memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
-               kunmap_atomic(page_addr, km_type);
+               kunmap_atomic(page_addr);
                buf += sg_bytes;
                *offset += sg_bytes;
                remaining -= sg_bytes;
index c7d0712..c2830cc 100644 (file)
@@ -134,6 +134,6 @@ extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
 u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
                             struct scatterlist *sg,
                             u32 *nents, size_t *offset,
-                            enum km_type km_type, u32 *crc);
+                            u32 *crc);
 
 #endif /* _FC_LIBFC_H_ */
index 83750eb..c1a808c 100644 (file)
@@ -1698,7 +1698,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
 
        job->reply->reply_payload_rcv_len +=
                fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
-                                        &info->offset, KM_BIO_SRC_IRQ, NULL);
+                                        &info->offset, NULL);
 
        if (fr_eof(fp) == FC_EOF_T &&
            (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
index 5715a3d..7f0465b 100644 (file)
@@ -135,7 +135,7 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
 
        if (recv) {
                segment->atomic_mapped = true;
-               segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+               segment->sg_mapped = kmap_atomic(sg_page(sg));
        } else {
                segment->atomic_mapped = false;
                /* the xmit path can sleep with the page mapped so use kmap */
@@ -149,7 +149,7 @@ void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
 {
        if (segment->sg_mapped) {
                if (segment->atomic_mapped)
-                       kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+                       kunmap_atomic(segment->sg_mapped);
                else
                        kunmap(sg_page(segment->sg));
                segment->sg_mapped = NULL;
index bb8f492..3814d3e 100644 (file)
@@ -246,9 +246,9 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
        }
 
        local_irq_disable();
-       buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
+       buf = kmap_atomic(bio_page(req->bio));
        memcpy(req_data, buf, blk_rq_bytes(req));
-       kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
+       kunmap_atomic(buf - bio_offset(req->bio));
        local_irq_enable();
 
        if (req_data[0] != SMP_REQUEST)
@@ -361,10 +361,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
        }
 
        local_irq_disable();
-       buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
+       buf = kmap_atomic(bio_page(rsp->bio));
        memcpy(buf, resp_data, blk_rq_bytes(rsp));
        flush_kernel_dcache_page(bio_page(rsp->bio));
-       kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
+       kunmap_atomic(buf - bio_offset(rsp->bio));
        local_irq_enable();
 
  out:
index 15eefa1..4d39a9f 100644 (file)
@@ -670,10 +670,10 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
                        struct scatterlist *sg;
 
                        sg = scsi_sglist(cmd);
-                       buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
+                       buf = kmap_atomic(sg_page(sg)) + sg->offset;
 
                        memset(buf, 0, cmd->cmnd[4]);
-                       kunmap_atomic(buf - sg->offset, KM_IRQ0);
+                       kunmap_atomic(buf - sg->offset);
 
                        cmd->result = (DID_OK << 16);
                        cmd->scsi_done(cmd);
index a4884a5..01ab9c4 100644 (file)
@@ -1885,11 +1885,11 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
        case SAS_PROTOCOL_SMP: {
                        struct scatterlist *sg_resp = &task->smp_task.smp_resp;
                        tstat->stat = SAM_STAT_GOOD;
-                       to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
+                       to = kmap_atomic(sg_page(sg_resp));
                        memcpy(to + sg_resp->offset,
                                slot->response + sizeof(struct mvs_err_info),
                                sg_dma_len(sg_resp));
-                       kunmap_atomic(to, KM_IRQ0);
+                       kunmap_atomic(to);
                        break;
                }
 
index 6888b2c..68da6c0 100644 (file)
@@ -1778,7 +1778,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
        scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
                int len = min(psgl->length, resid);
 
-               paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
+               paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
                memcpy(paddr, dif_storep + dif_offset(sector), len);
 
                sector += len >> 3;
@@ -1788,7 +1788,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
                        sector = do_div(tmp_sec, sdebug_store_sectors);
                }
                resid -= len;
-               kunmap_atomic(paddr, KM_IRQ0);
+               kunmap_atomic(paddr);
        }
 
        dix_reads++;
@@ -1881,12 +1881,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
        BUG_ON(scsi_sg_count(SCpnt) == 0);
        BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
 
-       paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
+       paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
        ppage_offset = 0;
 
        /* For each data page */
        scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
-               daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
+               daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
 
                /* For each sector-sized chunk in data page */
                for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
@@ -1895,10 +1895,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
                         * protection page advance to the next one
                         */
                        if (ppage_offset >= psgl->length) {
-                               kunmap_atomic(paddr, KM_IRQ1);
+                               kunmap_atomic(paddr);
                                psgl = sg_next(psgl);
                                BUG_ON(psgl == NULL);
-                               paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
+                               paddr = kmap_atomic(sg_page(psgl))
                                        + psgl->offset;
                                ppage_offset = 0;
                        }
@@ -1971,10 +1971,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
                        ppage_offset += sizeof(struct sd_dif_tuple);
                }
 
-               kunmap_atomic(daddr, KM_IRQ0);
+               kunmap_atomic(daddr);
        }
 
-       kunmap_atomic(paddr, KM_IRQ1);
+       kunmap_atomic(paddr);
 
        dix_writes++;
 
@@ -1982,8 +1982,8 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
 
 out:
        dif_errors++;
-       kunmap_atomic(daddr, KM_IRQ0);
-       kunmap_atomic(paddr, KM_IRQ1);
+       kunmap_atomic(daddr);
+       kunmap_atomic(paddr);
        return ret;
 }
 
@@ -2303,7 +2303,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
 
        offset = 0;
        for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
-               kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
+               kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
                if (!kaddr)
                        goto out;
 
@@ -2311,7 +2311,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
                        *(kaddr + sg->offset + j) ^= *(buf + offset + j);
 
                offset += sg->length;
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
        }
        ret = 0;
 out:
index b2c95db..a33b2b6 100644 (file)
@@ -2567,7 +2567,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
        if (*len > sg_len)
                *len = sg_len;
 
-       return kmap_atomic(page, KM_BIO_SRC_IRQ);
+       return kmap_atomic(page);
 }
 EXPORT_SYMBOL(scsi_kmap_atomic_sg);
 
@@ -2577,6 +2577,6 @@ EXPORT_SYMBOL(scsi_kmap_atomic_sg);
  */
 void scsi_kunmap_atomic_sg(void *virt)
 {
-       kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+       kunmap_atomic(virt);
 }
 EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
index f8fb2d6..e52d5bc 100644 (file)
@@ -392,7 +392,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
                virt = bio->bi_integrity->bip_sector & 0xffffffff;
 
                bip_for_each_vec(iv, bio->bi_integrity, i) {
-                       sdt = kmap_atomic(iv->bv_page, KM_USER0)
+                       sdt = kmap_atomic(iv->bv_page)
                                + iv->bv_offset;
 
                        for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
@@ -405,7 +405,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
                                phys++;
                        }
 
-                       kunmap_atomic(sdt, KM_USER0);
+                       kunmap_atomic(sdt);
                }
 
                bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
@@ -414,7 +414,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
        return 0;
 
 error:
-       kunmap_atomic(sdt, KM_USER0);
+       kunmap_atomic(sdt);
        sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
                  __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
                  be16_to_cpu(sdt->app_tag));
@@ -453,13 +453,13 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
                virt = bio->bi_integrity->bip_sector & 0xffffffff;
 
                bip_for_each_vec(iv, bio->bi_integrity, i) {
-                       sdt = kmap_atomic(iv->bv_page, KM_USER0)
+                       sdt = kmap_atomic(iv->bv_page)
                                + iv->bv_offset;
 
                        for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
 
                                if (sectors == 0) {
-                                       kunmap_atomic(sdt, KM_USER0);
+                                       kunmap_atomic(sdt);
                                        return;
                                }
 
@@ -474,7 +474,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
                                sectors--;
                        }
 
-                       kunmap_atomic(sdt, KM_USER0);
+                       kunmap_atomic(sdt);
                }
        }
 }
index 695ffc3..83a1972 100644 (file)
@@ -481,6 +481,19 @@ cleanup:
        return NULL;
 }
 
+/* Disgusting wrapper functions */
+static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
+{
+       void *addr = kmap_atomic(sg_page(sgl + idx));
+       return (unsigned long)addr;
+}
+
+static inline void sg_kunmap_atomic(unsigned long addr)
+{
+       kunmap_atomic((void *)addr);
+}
+
+
 /* Assume the original sgl has enough room */
 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
                                            struct scatterlist *bounce_sgl,
@@ -499,15 +512,12 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
        local_irq_save(flags);
 
        for (i = 0; i < orig_sgl_count; i++) {
-               dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
-                                       KM_IRQ0) + orig_sgl[i].offset;
+               dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
                dest = dest_addr;
                destlen = orig_sgl[i].length;
 
                if (bounce_addr == 0)
-                       bounce_addr =
-                       (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
-                                                       KM_IRQ0);
+                       bounce_addr = sg_kmap_atomic(bounce_sgl,j);
 
                while (destlen) {
                        src = bounce_addr + bounce_sgl[j].offset;
@@ -523,7 +533,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
 
                        if (bounce_sgl[j].offset == bounce_sgl[j].length) {
                                /* full */
-                               kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+                               sg_kunmap_atomic(bounce_addr);
                                j++;
 
                                /*
@@ -537,26 +547,21 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
                                        /*
                                         * We are done; cleanup and return.
                                         */
-                                       kunmap_atomic((void *)(dest_addr -
-                                                       orig_sgl[i].offset),
-                                                       KM_IRQ0);
+                                       sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
                                        local_irq_restore(flags);
                                        return total_copied;
                                }
 
                                /* if we need to use another bounce buffer */
                                if (destlen || i != orig_sgl_count - 1)
-                                       bounce_addr =
-                                       (unsigned long)kmap_atomic(
-                                       sg_page((&bounce_sgl[j])), KM_IRQ0);
+                                       bounce_addr = sg_kmap_atomic(bounce_sgl,j);
                        } else if (destlen == 0 && i == orig_sgl_count - 1) {
                                /* unmap the last bounce that is < PAGE_SIZE */
-                               kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+                               sg_kunmap_atomic(bounce_addr);
                        }
                }
 
-               kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
-                             KM_IRQ0);
+               sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
        }
 
        local_irq_restore(flags);
@@ -581,15 +586,12 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
        local_irq_save(flags);
 
        for (i = 0; i < orig_sgl_count; i++) {
-               src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
-                               KM_IRQ0) + orig_sgl[i].offset;
+               src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
                src = src_addr;
                srclen = orig_sgl[i].length;
 
                if (bounce_addr == 0)
-                       bounce_addr =
-                       (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
-                                               KM_IRQ0);
+                       bounce_addr = sg_kmap_atomic(bounce_sgl,j);
 
                while (srclen) {
                        /* assume bounce offset always == 0 */
@@ -606,22 +608,20 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
 
                        if (bounce_sgl[j].length == PAGE_SIZE) {
                                /* full..move to next entry */
-                               kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+                               sg_kunmap_atomic(bounce_addr);
                                j++;
 
                                /* if we need to use another bounce buffer */
                                if (srclen || i != orig_sgl_count - 1)
-                                       bounce_addr =
-                                       (unsigned long)kmap_atomic(
-                                       sg_page((&bounce_sgl[j])), KM_IRQ0);
+                                       bounce_addr = sg_kmap_atomic(bounce_sgl,j);
 
                        } else if (srclen == 0 && i == orig_sgl_count - 1) {
                                /* unmap the last bounce that is < PAGE_SIZE */
-                               kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+                               sg_kunmap_atomic(bounce_addr);
                        }
                }
 
-               kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
+               sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
        }
 
        local_irq_restore(flags);
index 1f9c508..93ba8e9 100644 (file)
@@ -56,17 +56,17 @@ static void clear_flag(struct block_header *block, enum blockflags flag)
  * This is called from xv_malloc/xv_free path, so it
  * needs to be fast.
  */
-static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type)
+static void *get_ptr_atomic(struct page *page, u16 offset)
 {
        unsigned char *base;
 
-       base = kmap_atomic(page, type);
+       base = kmap_atomic(page);
        return base + offset;
 }
 
-static void put_ptr_atomic(void *ptr, enum km_type type)
+static void put_ptr_atomic(void *ptr)
 {
-       kunmap_atomic(ptr, type);
+       kunmap_atomic(ptr);
 }
 
 static u32 get_blockprev(struct block_header *block)
@@ -202,10 +202,10 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
 
        if (block->link.next_page) {
                nextblock = get_ptr_atomic(block->link.next_page,
-                                       block->link.next_offset, KM_USER1);
+                                       block->link.next_offset);
                nextblock->link.prev_page = page;
                nextblock->link.prev_offset = offset;
-               put_ptr_atomic(nextblock, KM_USER1);
+               put_ptr_atomic(nextblock);
                /* If there was a next page then the free bits are set. */
                return;
        }
@@ -225,18 +225,18 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
 
        if (block->link.prev_page) {
                tmpblock = get_ptr_atomic(block->link.prev_page,
-                               block->link.prev_offset, KM_USER1);
+                               block->link.prev_offset);
                tmpblock->link.next_page = block->link.next_page;
                tmpblock->link.next_offset = block->link.next_offset;
-               put_ptr_atomic(tmpblock, KM_USER1);
+               put_ptr_atomic(tmpblock);
        }
 
        if (block->link.next_page) {
                tmpblock = get_ptr_atomic(block->link.next_page,
-                               block->link.next_offset, KM_USER1);
+                               block->link.next_offset);
                tmpblock->link.prev_page = block->link.prev_page;
                tmpblock->link.prev_offset = block->link.prev_offset;
-               put_ptr_atomic(tmpblock, KM_USER1);
+               put_ptr_atomic(tmpblock);
        }
 
        /* Is this block is at the head of the freelist? */
@@ -249,11 +249,10 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
                if (pool->freelist[slindex].page) {
                        struct block_header *tmpblock;
                        tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
-                                       pool->freelist[slindex].offset,
-                                       KM_USER1);
+                                       pool->freelist[slindex].offset);
                        tmpblock->link.prev_page = NULL;
                        tmpblock->link.prev_offset = 0;
-                       put_ptr_atomic(tmpblock, KM_USER1);
+                       put_ptr_atomic(tmpblock);
                } else {
                        /* This freelist bucket is empty */
                        __clear_bit(slindex % BITS_PER_LONG,
@@ -284,7 +283,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
        stat_inc(&pool->total_pages);
 
        spin_lock(&pool->lock);
-       block = get_ptr_atomic(page, 0, KM_USER0);
+       block = get_ptr_atomic(page, 0);
 
        block->size = PAGE_SIZE - XV_ALIGN;
        set_flag(block, BLOCK_FREE);
@@ -293,7 +292,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
 
        insert_block(pool, page, 0, block);
 
-       put_ptr_atomic(block, KM_USER0);
+       put_ptr_atomic(block);
        spin_unlock(&pool->lock);
 
        return 0;
@@ -375,7 +374,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
                return -ENOMEM;
        }
 
-       block = get_ptr_atomic(*page, *offset, KM_USER0);
+       block = get_ptr_atomic(*page, *offset);
 
        remove_block(pool, *page, *offset, block, index);
 
@@ -405,7 +404,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
        block->size = origsize;
        clear_flag(block, BLOCK_FREE);
 
-       put_ptr_atomic(block, KM_USER0);
+       put_ptr_atomic(block);
        spin_unlock(&pool->lock);
 
        *offset += XV_ALIGN;
@@ -426,7 +425,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
 
        spin_lock(&pool->lock);
 
-       page_start = get_ptr_atomic(page, 0, KM_USER0);
+       page_start = get_ptr_atomic(page, 0);
        block = (struct block_header *)((char *)page_start + offset);
 
        /* Catch double free bugs */
@@ -468,7 +467,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
 
        /* No used objects in this page. Free it. */
        if (block->size == PAGE_SIZE - XV_ALIGN) {
-               put_ptr_atomic(page_start, KM_USER0);
+               put_ptr_atomic(page_start);
                spin_unlock(&pool->lock);
 
                __free_page(page);
@@ -486,7 +485,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
                set_blockprev(tmpblock, offset);
        }
 
-       put_ptr_atomic(page_start, KM_USER0);
+       put_ptr_atomic(page_start);
        spin_unlock(&pool->lock);
 }
 EXPORT_SYMBOL_GPL(xv_free);
index 36d53ed..68b2e05 100644 (file)
@@ -496,13 +496,13 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
        }
        ASSERT_SENTINEL(zh, ZBH);
        BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
-       to_va = kmap_atomic(page, KM_USER0);
+       to_va = kmap_atomic(page);
        size = zh->size;
        from_va = zbud_data(zh, size);
        ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
        BUG_ON(ret != LZO_E_OK);
        BUG_ON(out_len != PAGE_SIZE);
-       kunmap_atomic(to_va, KM_USER0);
+       kunmap_atomic(to_va);
 out:
        spin_unlock(&zbpg->lock);
        return ret;
@@ -1109,7 +1109,7 @@ static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
                goto out;
        atomic_inc(&zv_curr_dist_counts[chunks]);
        atomic_inc(&zv_cumul_dist_counts[chunks]);
-       zv = kmap_atomic(page, KM_USER0) + offset;
+       zv = kmap_atomic(page) + offset;
        zv->index = index;
        zv->oid = *oid;
        zv->pool_id = pool_id;
@@ -1123,7 +1123,7 @@ static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
                spin_unlock(&zcache_rem_op_list_lock);
        }
        memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
-       kunmap_atomic(zv, KM_USER0);
+       kunmap_atomic(zv);
 out:
        return zv;
 }
@@ -1145,7 +1145,7 @@ static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
                        &page, &offset, ZCACHE_GFP_MASK);
        if (unlikely(ret))
                goto out;
-       zv = kmap_atomic(page, KM_USER0) + offset;
+       zv = kmap_atomic(page) + offset;
        SET_SENTINEL(zv, ZVH);
        INIT_LIST_HEAD(&zv->rem_op.list);
        zv->client_id = LOCAL_CLIENT;
@@ -1153,7 +1153,7 @@ static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
        zv->index = index;
        zv->oid = *oid;
        zv->pool_id = pool->pool_id;
-       kunmap_atomic(zv, KM_USER0);
+       kunmap_atomic(zv);
 out:
        return zv;
 }
@@ -1194,10 +1194,10 @@ static void zv_decompress(struct page *page, struct zv_hdr *zv)
        ASSERT_SENTINEL(zv, ZVH);
        size = xv_get_object_size(zv) - sizeof(*zv);
        BUG_ON(size == 0);
-       to_va = kmap_atomic(page, KM_USER0);
+       to_va = kmap_atomic(page);
        ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
                                        size, to_va, &clen);
-       kunmap_atomic(to_va, KM_USER0);
+       kunmap_atomic(to_va);
        BUG_ON(ret != LZO_E_OK);
        BUG_ON(clen != PAGE_SIZE);
 }
@@ -2203,12 +2203,12 @@ static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
        BUG_ON(!irqs_disabled());
        if (unlikely(dmem == NULL || wmem == NULL))
                goto out;  /* no buffer, so can't compress */
-       from_va = kmap_atomic(from, KM_USER0);
+       from_va = kmap_atomic(from);
        mb();
        ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
        BUG_ON(ret != LZO_E_OK);
        *out_va = dmem;
-       kunmap_atomic(from_va, KM_USER0);
+       kunmap_atomic(from_va);
        ret = 1;
 out:
        return ret;
index 69dcc31..d47345c 100644 (file)
@@ -71,8 +71,8 @@ static int crypt(struct crypto_tfm *tfm,
                u8 *src_p, *dst_p;
                int in_place;
 
-               scatterwalk_map(&walk_in, 0);
-               scatterwalk_map(&walk_out, 1);
+               scatterwalk_map(&walk_in);
+               scatterwalk_map(&walk_out);
                src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
                dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
                in_place = scatterwalk_samebuf(&walk_in, &walk_out,
@@ -84,10 +84,10 @@ static int crypt(struct crypto_tfm *tfm,
 
                prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
 
-               scatterwalk_done(&walk_in, 0, nbytes);
+               scatterwalk_done(&walk_in, nbytes);
 
                scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
-               scatterwalk_done(&walk_out, 1, nbytes);
+               scatterwalk_done(&walk_out, nbytes);
 
                if (!nbytes)
                        return 0;
index 301ed51..05e7497 100644 (file)
@@ -39,12 +39,12 @@ static void update(struct crypto_tfm *tfm,
                        unsigned int bytes_from_page = min(l, ((unsigned int)
                                                           (PAGE_SIZE)) -
                                                           offset);
-                       char *p = crypto_kmap(pg, 0) + offset;
+                       char *p = kmap_atomic(pg) + offset;
 
                        tfm->__crt_alg->cra_digest.dia_update
                                        (crypto_tfm_ctx(tfm), p,
                                         bytes_from_page);
-                       crypto_kunmap(p, 0);
+                       kunmap_atomic(p);
                        crypto_yield(tfm);
                        offset = 0;
                        pg++;
@@ -75,10 +75,10 @@ static void digest(struct crypto_tfm *tfm,
        tfm->crt_digest.dit_init(tfm);
 
        for (i = 0; i < nsg; i++) {
-               char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
+               char *p = kmap_atomic(sg[i].page) + sg[i].offset;
                tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
                                                      p, sg[i].length);
-               crypto_kunmap(p, 0);
+               kunmap_atomic(p);
                crypto_yield(tfm);
        }
        crypto_digest_final(tfm, out);
index a7c096e..bebe13a 100644 (file)
 #include <asm/kmap_types.h>
 
 
-extern enum km_type crypto_km_types[];
-
-static inline enum km_type crypto_kmap_type(int out)
-{
-       return crypto_km_types[(in_softirq() ? 2 : 0) + out];
-}
-
-static inline void *crypto_kmap(struct page *page, int out)
-{
-       return kmap_atomic(page, crypto_kmap_type(out));
-}
-
-static inline void crypto_kunmap(void *vaddr, int out)
-{
-       kunmap_atomic(vaddr, crypto_kmap_type(out));
-}
-
 static inline void crypto_yield(struct crypto_tfm *tfm)
 {
        if (!in_softirq())
diff --git a/drivers/staging/rtl8192u/ieee80211/kmap_types.h b/drivers/staging/rtl8192u/ieee80211/kmap_types.h
deleted file mode 100644 (file)
index de67bb0..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __KMAP_TYPES_H
-
-#define __KMAP_TYPES_H
-
-
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BH_IRQ,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,
-       KM_TYPE_NR
-};
-
-#define _ASM_KMAP_TYPES_H
-
-#endif
index 3543a61..8b73f6c 100644 (file)
@@ -13,8 +13,6 @@
  * any later version.
  *
  */
-#include "kmap_types.h"
-
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include "internal.h"
 #include "scatterwalk.h"
 
-enum km_type crypto_km_types[] = {
-       KM_USER0,
-       KM_USER1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,
-};
-
 void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
 {
        if (nbytes <= walk->len_this_page &&
@@ -62,9 +53,9 @@ void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
        walk->offset = sg->offset;
 }
 
-void scatterwalk_map(struct scatter_walk *walk, int out)
+void scatterwalk_map(struct scatter_walk *walk)
 {
-       walk->data = crypto_kmap(walk->page, out) + walk->offset;
+       walk->data = kmap_atomic(walk->page) + walk->offset;
 }
 
 static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
@@ -103,7 +94,7 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more)
  * has been verified as multiple of the block size.
  */
 int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
-                          size_t nbytes, int out)
+                          size_t nbytes)
 {
        if (buf != walk->data) {
                while (nbytes > walk->len_this_page) {
@@ -111,9 +102,9 @@ int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
                        buf += walk->len_this_page;
                        nbytes -= walk->len_this_page;
 
-                       crypto_kunmap(walk->data, out);
+                       kunmap_atomic(walk->data);
                        scatterwalk_pagedone(walk, out, 1);
-                       scatterwalk_map(walk, out);
+                       scatterwalk_map(walk);
                }
 
                memcpy_dir(buf, walk->data, nbytes, out);
index 7073465..ed2c800 100644 (file)
@@ -455,14 +455,14 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
        }
        ASSERT_SENTINEL(zh, ZBH);
        BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
-       to_va = kmap_atomic(page, KM_USER0);
+       to_va = kmap_atomic(page);
        size = zh->size;
        from_va = zbud_data(zh, size);
        ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
                                to_va, &out_len);
        BUG_ON(ret);
        BUG_ON(out_len != PAGE_SIZE);
-       kunmap_atomic(to_va, KM_USER0);
+       kunmap_atomic(to_va);
 out:
        spin_unlock(&zbpg->lock);
        return ret;
@@ -753,10 +753,10 @@ static void zv_decompress(struct page *page, void *handle)
        zv = zs_map_object(zcache_host.zspool, handle);
        BUG_ON(zv->size == 0);
        ASSERT_SENTINEL(zv, ZVH);
-       to_va = kmap_atomic(page, KM_USER0);
+       to_va = kmap_atomic(page);
        ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
                                zv->size, to_va, &clen);
-       kunmap_atomic(to_va, KM_USER0);
+       kunmap_atomic(to_va);
        zs_unmap_object(zcache_host.zspool, handle);
        BUG_ON(ret);
        BUG_ON(clen != PAGE_SIZE);
@@ -1334,13 +1334,13 @@ static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
        if (unlikely(dmem == NULL))
                goto out;  /* no buffer or no compressor so can't compress */
        *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
-       from_va = kmap_atomic(from, KM_USER0);
+       from_va = kmap_atomic(from);
        mb();
        ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
                                out_len);
        BUG_ON(ret);
        *out_va = dmem;
-       kunmap_atomic(from_va, KM_USER0);
+       kunmap_atomic(from_va);
        ret = 1;
 out:
        return ret;
index 7f13819..685d612 100644 (file)
@@ -175,9 +175,9 @@ static void handle_zero_page(struct bio_vec *bvec)
        struct page *page = bvec->bv_page;
        void *user_mem;
 
-       user_mem = kmap_atomic(page, KM_USER0);
+       user_mem = kmap_atomic(page);
        memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(user_mem);
 
        flush_dcache_page(page);
 }
@@ -188,12 +188,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
        struct page *page = bvec->bv_page;
        unsigned char *user_mem, *cmem;
 
-       user_mem = kmap_atomic(page, KM_USER0);
-       cmem = kmap_atomic(zram->table[index].handle, KM_USER1);
+       user_mem = kmap_atomic(page);
+       cmem = kmap_atomic(zram->table[index].handle);
 
        memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
-       kunmap_atomic(cmem, KM_USER1);
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(cmem);
+       kunmap_atomic(user_mem);
 
        flush_dcache_page(page);
 }
@@ -242,7 +242,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
                }
        }
 
-       user_mem = kmap_atomic(page, KM_USER0);
+       user_mem = kmap_atomic(page);
        if (!is_partial_io(bvec))
                uncmem = user_mem;
        clen = PAGE_SIZE;
@@ -260,7 +260,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
        }
 
        zs_unmap_object(zram->mem_pool, zram->table[index].handle);
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(user_mem);
 
        /* Should NEVER happen. Return bio error if it does. */
        if (unlikely(ret != LZO_E_OK)) {
@@ -292,7 +292,7 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
        /* Page is stored uncompressed since it's incompressible */
        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
                memcpy(mem, cmem, PAGE_SIZE);
-               kunmap_atomic(cmem, KM_USER0);
+               kunmap_atomic(cmem);
                return 0;
        }
 
@@ -351,7 +351,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
            zram_test_flag(zram, index, ZRAM_ZERO))
                zram_free_page(zram, index);
 
-       user_mem = kmap_atomic(page, KM_USER0);
+       user_mem = kmap_atomic(page);
 
        if (is_partial_io(bvec))
                memcpy(uncmem + offset, user_mem + bvec->bv_offset,
@@ -360,7 +360,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                uncmem = user_mem;
 
        if (page_zero_filled(uncmem)) {
-               kunmap_atomic(user_mem, KM_USER0);
+               kunmap_atomic(user_mem);
                if (is_partial_io(bvec))
                        kfree(uncmem);
                zram_stat_inc(&zram->stats.pages_zero);
@@ -372,7 +372,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
                               zram->compress_workmem);
 
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(user_mem);
        if (is_partial_io(bvec))
                        kfree(uncmem);
 
@@ -400,8 +400,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
                zram_stat_inc(&zram->stats.pages_expand);
                handle = page_store;
-               src = kmap_atomic(page, KM_USER0);
-               cmem = kmap_atomic(page_store, KM_USER1);
+               src = kmap_atomic(page);
+               cmem = kmap_atomic(page_store);
                goto memstore;
        }
 
@@ -427,8 +427,8 @@ memstore:
        memcpy(cmem, src, clen);
 
        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
-               kunmap_atomic(cmem, KM_USER1);
-               kunmap_atomic(src, KM_USER0);
+               kunmap_atomic(cmem);
+               kunmap_atomic(src);
        } else {
                zs_unmap_object(zram->mem_pool, handle);
        }
index cd5cd95..929cc93 100644 (file)
@@ -2344,7 +2344,7 @@ static void transport_xor_callback(struct se_cmd *cmd)
 
        offset = 0;
        for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
-               addr = kmap_atomic(sg_page(sg), KM_USER0);
+               addr = kmap_atomic(sg_page(sg));
                if (!addr)
                        goto out;
 
@@ -2352,7 +2352,7 @@ static void transport_xor_callback(struct se_cmd *cmd)
                        *(addr + sg->offset + i) ^= *(buf + offset + i);
 
                offset += sg->length;
-               kunmap_atomic(addr, KM_USER0);
+               kunmap_atomic(addr);
        }
 
 out:
index d8cabc2..2b693ee 100644 (file)
@@ -146,14 +146,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
                                        PAGE_SIZE << compound_order(page);
                } else {
                        BUG_ON(!page);
-                       from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
-                                          KM_SOFTIRQ0);
+                       from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
                        page_addr = from;
                        from += mem_off & ~PAGE_MASK;
                        tlen = min(tlen, (size_t)(PAGE_SIZE -
                                                (mem_off & ~PAGE_MASK)));
                        memcpy(to, from, tlen);
-                       kunmap_atomic(page_addr, KM_SOFTIRQ0);
+                       kunmap_atomic(page_addr);
                        to += tlen;
                }
 
@@ -291,14 +290,13 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 
                tlen = min(mem_len, frame_len);
 
-               to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
-                                KM_SOFTIRQ0);
+               to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
                page_addr = to;
                to += mem_off & ~PAGE_MASK;
                tlen = min(tlen, (size_t)(PAGE_SIZE -
                                          (mem_off & ~PAGE_MASK)));
                memcpy(to, from, tlen);
-               kunmap_atomic(page_addr, KM_SOFTIRQ0);
+               kunmap_atomic(page_addr);
 
                from += tlen;
                frame_len -= tlen;
index c14c42b..bdb2d64 100644 (file)
@@ -937,9 +937,9 @@ static int set_bit_to_user(int nr, void __user *addr)
        if (r < 0)
                return r;
        BUG_ON(r != 1);
-       base = kmap_atomic(page, KM_USER0);
+       base = kmap_atomic(page);
        set_bit(bit, base);
-       kunmap_atomic(base, KM_USER0);
+       kunmap_atomic(base);
        set_page_dirty_lock(page);
        put_page(page);
        return 0;
index 2f213d1..b960ff0 100644 (file)
@@ -365,10 +365,10 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
                _debug("extract data");
                if (call->count > 0) {
                        page = call->reply3;
-                       buffer = kmap_atomic(page, KM_USER0);
+                       buffer = kmap_atomic(page);
                        ret = afs_extract_data(call, skb, last, buffer,
                                               call->count);
-                       kunmap_atomic(buffer, KM_USER0);
+                       kunmap_atomic(buffer);
                        switch (ret) {
                        case 0:         break;
                        case -EAGAIN:   return 0;
@@ -411,9 +411,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
        if (call->count < PAGE_SIZE) {
                _debug("clear");
                page = call->reply3;
-               buffer = kmap_atomic(page, KM_USER0);
+               buffer = kmap_atomic(page);
                memset(buffer + call->count, 0, PAGE_SIZE - call->count);
-               kunmap_atomic(buffer, KM_USER0);
+               kunmap_atomic(buffer);
        }
 
        _leave(" = 0 [done]");
index 8f4ce26..298cf89 100644 (file)
@@ -200,9 +200,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
                if (PageError(page))
                        goto error;
 
-               buf = kmap_atomic(page, KM_USER0);
+               buf = kmap_atomic(page);
                memcpy(devname, buf, size);
-               kunmap_atomic(buf, KM_USER0);
+               kunmap_atomic(buf);
                page_cache_release(page);
                page = NULL;
        }
index b9d64d8..5b600cb 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
 
        info->nr = nr_events;           /* trusted copy */
 
-       ring = kmap_atomic(info->ring_pages[0], KM_USER0);
+       ring = kmap_atomic(info->ring_pages[0]);
        ring->nr = nr_events;   /* user copy */
        ring->id = ctx->user_id;
        ring->head = ring->tail = 0;
@@ -168,32 +168,32 @@ static int aio_setup_ring(struct kioctx *ctx)
        ring->compat_features = AIO_RING_COMPAT_FEATURES;
        ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
        ring->header_length = sizeof(struct aio_ring);
-       kunmap_atomic(ring, KM_USER0);
+       kunmap_atomic(ring);
 
        return 0;
 }
 
 
 /* aio_ring_event: returns a pointer to the event at the given index from
- * kmap_atomic(, km).  Release the pointer with put_aio_ring_event();
+ * kmap_atomic().  Release the pointer with put_aio_ring_event();
  */
 #define AIO_EVENTS_PER_PAGE    (PAGE_SIZE / sizeof(struct io_event))
 #define AIO_EVENTS_FIRST_PAGE  ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
 #define AIO_EVENTS_OFFSET      (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
 
-#define aio_ring_event(info, nr, km) ({                                        \
+#define aio_ring_event(info, nr) ({                                    \
        unsigned pos = (nr) + AIO_EVENTS_OFFSET;                        \
        struct io_event *__event;                                       \
        __event = kmap_atomic(                                          \
-                       (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
+                       (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
        __event += pos % AIO_EVENTS_PER_PAGE;                           \
        __event;                                                        \
 })
 
-#define put_aio_ring_event(event, km) do {     \
+#define put_aio_ring_event(event) do {         \
        struct io_event *__event = (event);     \
        (void)__event;                          \
-       kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
+       kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
 } while(0)
 
 static void ctx_rcu_free(struct rcu_head *head)
@@ -1019,10 +1019,10 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
        if (kiocbIsCancelled(iocb))
                goto put_rq;
 
-       ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
+       ring = kmap_atomic(info->ring_pages[0]);
 
        tail = info->tail;
-       event = aio_ring_event(info, tail, KM_IRQ0);
+       event = aio_ring_event(info, tail);
        if (++tail >= info->nr)
                tail = 0;
 
@@ -1043,8 +1043,8 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
        info->tail = tail;
        ring->tail = tail;
 
-       put_aio_ring_event(event, KM_IRQ0);
-       kunmap_atomic(ring, KM_IRQ1);
+       put_aio_ring_event(event);
+       kunmap_atomic(ring);
 
        pr_debug("added to ring %p at [%lu]\n", iocb, tail);
 
@@ -1089,7 +1089,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
        unsigned long head;
        int ret = 0;
 
-       ring = kmap_atomic(info->ring_pages[0], KM_USER0);
+       ring = kmap_atomic(info->ring_pages[0]);
        dprintk("in aio_read_evt h%lu t%lu m%lu\n",
                 (unsigned long)ring->head, (unsigned long)ring->tail,
                 (unsigned long)ring->nr);
@@ -1101,18 +1101,18 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
 
        head = ring->head % info->nr;
        if (head != ring->tail) {
-               struct io_event *evp = aio_ring_event(info, head, KM_USER1);
+               struct io_event *evp = aio_ring_event(info, head);
                *ent = *evp;
                head = (head + 1) % info->nr;
                smp_mb(); /* finish reading the event before updatng the head */
                ring->head = head;
                ret = 1;
-               put_aio_ring_event(evp, KM_USER1);
+               put_aio_ring_event(evp);
        }
        spin_unlock(&info->ring_lock);
 
 out:
-       kunmap_atomic(ring, KM_USER0);
+       kunmap_atomic(ring);
        dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
                 (unsigned long)ring->head, (unsigned long)ring->tail);
        return ret;
index c2183f3..e85c04b 100644 (file)
@@ -357,7 +357,7 @@ static void bio_integrity_generate(struct bio *bio)
        bix.sector_size = bi->sector_size;
 
        bio_for_each_segment(bv, bio, i) {
-               void *kaddr = kmap_atomic(bv->bv_page, KM_USER0);
+               void *kaddr = kmap_atomic(bv->bv_page);
                bix.data_buf = kaddr + bv->bv_offset;
                bix.data_size = bv->bv_len;
                bix.prot_buf = prot_buf;
@@ -371,7 +371,7 @@ static void bio_integrity_generate(struct bio *bio)
                total += sectors * bi->tuple_size;
                BUG_ON(total > bio->bi_integrity->bip_size);
 
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
        }
 }
 
@@ -498,7 +498,7 @@ static int bio_integrity_verify(struct bio *bio)
        bix.sector_size = bi->sector_size;
 
        bio_for_each_segment(bv, bio, i) {
-               void *kaddr = kmap_atomic(bv->bv_page, KM_USER0);
+               void *kaddr = kmap_atomic(bv->bv_page);
                bix.data_buf = kaddr + bv->bv_offset;
                bix.data_size = bv->bv_len;
                bix.prot_buf = prot_buf;
@@ -507,7 +507,7 @@ static int bio_integrity_verify(struct bio *bio)
                ret = bi->verify_fn(&bix);
 
                if (ret) {
-                       kunmap_atomic(kaddr, KM_USER0);
+                       kunmap_atomic(kaddr);
                        return ret;
                }
 
@@ -517,7 +517,7 @@ static int bio_integrity_verify(struct bio *bio)
                total += sectors * bi->tuple_size;
                BUG_ON(total > bio->bi_integrity->bip_size);
 
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
        }
 
        return ret;
index d02c27c..b805afb 100644 (file)
@@ -120,10 +120,10 @@ static int check_compressed_csum(struct inode *inode,
                page = cb->compressed_pages[i];
                csum = ~(u32)0;
 
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
                btrfs_csum_final(csum, (char *)&csum);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
 
                if (csum != *cb_sum) {
                        printk(KERN_INFO "btrfs csum failed ino %llu "
@@ -521,10 +521,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
                        if (zero_offset) {
                                int zeros;
                                zeros = PAGE_CACHE_SIZE - zero_offset;
-                               userpage = kmap_atomic(page, KM_USER0);
+                               userpage = kmap_atomic(page);
                                memset(userpage + zero_offset, 0, zeros);
                                flush_dcache_page(page);
-                               kunmap_atomic(userpage, KM_USER0);
+                               kunmap_atomic(userpage);
                        }
                }
 
@@ -993,9 +993,9 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
                bytes = min(PAGE_CACHE_SIZE - *pg_offset,
                            PAGE_CACHE_SIZE - buf_offset);
                bytes = min(bytes, working_bytes);
-               kaddr = kmap_atomic(page_out, KM_USER0);
+               kaddr = kmap_atomic(page_out);
                memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                flush_dcache_page(page_out);
 
                *pg_offset += bytes;
index a55fbe6..2862454 100644 (file)
@@ -2546,10 +2546,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 
                if (zero_offset) {
                        iosize = PAGE_CACHE_SIZE - zero_offset;
-                       userpage = kmap_atomic(page, KM_USER0);
+                       userpage = kmap_atomic(page);
                        memset(userpage + zero_offset, 0, iosize);
                        flush_dcache_page(page);
-                       kunmap_atomic(userpage, KM_USER0);
+                       kunmap_atomic(userpage);
                }
        }
        while (cur <= end) {
@@ -2558,10 +2558,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        struct extent_state *cached = NULL;
 
                        iosize = PAGE_CACHE_SIZE - pg_offset;
-                       userpage = kmap_atomic(page, KM_USER0);
+                       userpage = kmap_atomic(page);
                        memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
-                       kunmap_atomic(userpage, KM_USER0);
+                       kunmap_atomic(userpage);
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
                        unlock_extent_cached(tree, cur, cur + iosize - 1,
@@ -2607,10 +2607,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        char *userpage;
                        struct extent_state *cached = NULL;
 
-                       userpage = kmap_atomic(page, KM_USER0);
+                       userpage = kmap_atomic(page);
                        memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
-                       kunmap_atomic(userpage, KM_USER0);
+                       kunmap_atomic(userpage);
 
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
@@ -2756,10 +2756,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
        if (page->index == end_index) {
                char *userpage;
 
-               userpage = kmap_atomic(page, KM_USER0);
+               userpage = kmap_atomic(page);
                memset(userpage + pg_offset, 0,
                       PAGE_CACHE_SIZE - pg_offset);
-               kunmap_atomic(userpage, KM_USER0);
+               kunmap_atomic(userpage);
                flush_dcache_page(page);
        }
        pg_offset = 0;
index c7fb3a4..078b4fd 100644 (file)
@@ -447,13 +447,13 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                        sums->bytenr = ordered->start;
                }
 
-               data = kmap_atomic(bvec->bv_page, KM_USER0);
+               data = kmap_atomic(bvec->bv_page);
                sector_sum->sum = ~(u32)0;
                sector_sum->sum = btrfs_csum_data(root,
                                                  data + bvec->bv_offset,
                                                  sector_sum->sum,
                                                  bvec->bv_len);
-               kunmap_atomic(data, KM_USER0);
+               kunmap_atomic(data);
                btrfs_csum_final(sector_sum->sum,
                                 (char *)&sector_sum->sum);
                sector_sum->bytenr = disk_bytenr;
index 892b347..3a0b5c1 100644 (file)
@@ -173,9 +173,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
                        cur_size = min_t(unsigned long, compressed_size,
                                       PAGE_CACHE_SIZE);
 
-                       kaddr = kmap_atomic(cpage, KM_USER0);
+                       kaddr = kmap_atomic(cpage);
                        write_extent_buffer(leaf, kaddr, ptr, cur_size);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       kunmap_atomic(kaddr);
 
                        i++;
                        ptr += cur_size;
@@ -187,10 +187,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
                page = find_get_page(inode->i_mapping,
                                     start >> PAGE_CACHE_SHIFT);
                btrfs_set_file_extent_compression(leaf, ei, 0);
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                offset = start & (PAGE_CACHE_SIZE - 1);
                write_extent_buffer(leaf, kaddr + offset, ptr, size);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                page_cache_release(page);
        }
        btrfs_mark_buffer_dirty(leaf);
@@ -422,10 +422,10 @@ again:
                         * sending it down to disk
                         */
                        if (offset) {
-                               kaddr = kmap_atomic(page, KM_USER0);
+                               kaddr = kmap_atomic(page);
                                memset(kaddr + offset, 0,
                                       PAGE_CACHE_SIZE - offset);
-                               kunmap_atomic(kaddr, KM_USER0);
+                               kunmap_atomic(kaddr);
                        }
                        will_compress = 1;
                }
@@ -1873,7 +1873,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
        } else {
                ret = get_state_private(io_tree, start, &private);
        }
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        if (ret)
                goto zeroit;
 
@@ -1882,7 +1882,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
        if (csum != private)
                goto zeroit;
 
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 good:
        return 0;
 
@@ -1894,7 +1894,7 @@ zeroit:
                       (unsigned long long)private);
        memset(kaddr + offset, 1, end - start + 1);
        flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        if (private == 0)
                return 0;
        return -EIO;
@@ -4937,12 +4937,12 @@ static noinline int uncompress_inline(struct btrfs_path *path,
        ret = btrfs_decompress(compress_type, tmp, page,
                               extent_offset, inline_size, max_size);
        if (ret) {
-               char *kaddr = kmap_atomic(page, KM_USER0);
+               char *kaddr = kmap_atomic(page);
                unsigned long copy_size = min_t(u64,
                                  PAGE_CACHE_SIZE - pg_offset,
                                  max_size - extent_offset);
                memset(kaddr + pg_offset, 0, copy_size);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
        }
        kfree(tmp);
        return 0;
@@ -5719,11 +5719,11 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
                        unsigned long flags;
 
                        local_irq_save(flags);
-                       kaddr = kmap_atomic(page, KM_IRQ0);
+                       kaddr = kmap_atomic(page);
                        csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
                                               csum, bvec->bv_len);
                        btrfs_csum_final(csum, (char *)&csum);
-                       kunmap_atomic(kaddr, KM_IRQ0);
+                       kunmap_atomic(kaddr);
                        local_irq_restore(flags);
 
                        flush_dcache_page(bvec->bv_page);
index a178f5e..743b86f 100644 (file)
@@ -411,9 +411,9 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
 
        bytes = min_t(unsigned long, destlen, out_len - start_byte);
 
-       kaddr = kmap_atomic(dest_page, KM_USER0);
+       kaddr = kmap_atomic(dest_page);
        memcpy(kaddr, workspace->buf + start_byte, bytes);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 out:
        return ret;
 }
index abc0fbf..390e710 100644 (file)
@@ -591,7 +591,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
        u64 flags = sbio->spag[ix].flags;
 
        page = sbio->bio->bi_io_vec[ix].bv_page;
-       buffer = kmap_atomic(page, KM_USER0);
+       buffer = kmap_atomic(page);
        if (flags & BTRFS_EXTENT_FLAG_DATA) {
                ret = scrub_checksum_data(sbio->sdev,
                                          sbio->spag + ix, buffer);
@@ -603,7 +603,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
        } else {
                WARN_ON(1);
        }
-       kunmap_atomic(buffer, KM_USER0);
+       kunmap_atomic(buffer);
 
        return ret;
 }
@@ -792,7 +792,7 @@ static void scrub_checksum(struct btrfs_work *work)
        }
        for (i = 0; i < sbio->count; ++i) {
                page = sbio->bio->bi_io_vec[i].bv_page;
-               buffer = kmap_atomic(page, KM_USER0);
+               buffer = kmap_atomic(page);
                flags = sbio->spag[i].flags;
                logical = sbio->logical + i * PAGE_SIZE;
                ret = 0;
@@ -807,7 +807,7 @@ static void scrub_checksum(struct btrfs_work *work)
                } else {
                        WARN_ON(1);
                }
-               kunmap_atomic(buffer, KM_USER0);
+               kunmap_atomic(buffer);
                if (ret) {
                        ret = scrub_recheck_error(sbio, i);
                        if (!ret) {
index faccd47..92c2065 100644 (file)
@@ -370,9 +370,9 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
                            PAGE_CACHE_SIZE - buf_offset);
                bytes = min(bytes, bytes_left);
 
-               kaddr = kmap_atomic(dest_page, KM_USER0);
+               kaddr = kmap_atomic(dest_page);
                memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
 
                pg_offset += bytes;
                bytes_left -= bytes;
index 95551c6..3908544 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1342,13 +1342,13 @@ int remove_arg_zero(struct linux_binprm *bprm)
                        ret = -EFAULT;
                        goto out;
                }
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
 
                for (; offset < PAGE_SIZE && kaddr[offset];
                                offset++, bprm->p++)
                        ;
 
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                put_arg_page(page);
 
                if (offset == PAGE_SIZE)
index 8040583..c61e62a 100644 (file)
@@ -597,7 +597,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
                goto fail;
        }
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        de = (struct exofs_dir_entry *)kaddr;
        de->name_len = 1;
        de->rec_len = cpu_to_le16(EXOFS_DIR_REC_LEN(1));
@@ -611,7 +611,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
        de->inode_no = cpu_to_le64(parent->i_ino);
        memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
        exofs_set_de_type(de, inode);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        err = exofs_commit_chunk(page, 0, chunk_size);
 fail:
        page_cache_release(page);
index d37df35..0f4f5c9 100644 (file)
@@ -645,7 +645,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
                unlock_page(page);
                goto fail;
        }
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        memset(kaddr, 0, chunk_size);
        de = (struct ext2_dir_entry_2 *)kaddr;
        de->name_len = 1;
@@ -660,7 +660,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
        de->inode = cpu_to_le32(parent->i_ino);
        memcpy (de->name, "..\0", 4);
        ext2_set_de_type (de, inode);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        err = ext2_commit_chunk(page, 0, chunk_size);
 fail:
        page_cache_release(page);
index 5f3368a..7df2b5e 100644 (file)
@@ -838,10 +838,10 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
                        }
                }
                if (page) {
-                       void *mapaddr = kmap_atomic(page, KM_USER0);
+                       void *mapaddr = kmap_atomic(page);
                        void *buf = mapaddr + offset;
                        offset += fuse_copy_do(cs, &buf, &count);
-                       kunmap_atomic(mapaddr, KM_USER0);
+                       kunmap_atomic(mapaddr);
                } else
                        offset += fuse_copy_do(cs, NULL, &count);
        }
index 4a199fd..a841868 100644 (file)
@@ -1887,11 +1887,11 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
                    in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
                        goto out;
 
-               vaddr = kmap_atomic(pages[0], KM_USER0);
+               vaddr = kmap_atomic(pages[0]);
                err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
                                            transferred, in_iovs + out_iovs,
                                            (flags & FUSE_IOCTL_COMPAT) != 0);
-               kunmap_atomic(vaddr, KM_USER0);
+               kunmap_atomic(vaddr);
                if (err)
                        goto out;
 
index 501e5cb..38b7a74 100644 (file)
@@ -434,12 +434,12 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
        if (error)
                return error;
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
                dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
        memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
        memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        flush_dcache_page(page);
        brelse(dibh);
        SetPageUptodate(page);
@@ -542,9 +542,9 @@ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
                page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
                if (IS_ERR(page))
                        return PTR_ERR(page);
-               p = kmap_atomic(page, KM_USER0);
+               p = kmap_atomic(page);
                memcpy(buf + copied, p + offset, amt);
-               kunmap_atomic(p, KM_USER0);
+               kunmap_atomic(p);
                mark_page_accessed(page);
                page_cache_release(page);
                copied += amt;
@@ -788,11 +788,11 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
        unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
 
        BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        memcpy(buf + pos, kaddr + pos, copied);
        memset(kaddr + pos + copied, 0, len - copied);
        flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        if (!PageUptodate(page))
                SetPageUptodate(page);
index 0301be6..df7c6e8 100644 (file)
@@ -553,11 +553,11 @@ static void gfs2_check_magic(struct buffer_head *bh)
        __be32 *ptr;
 
        clear_buffer_escaped(bh);
-       kaddr = kmap_atomic(bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(bh->b_page);
        ptr = kaddr + bh_offset(bh);
        if (*ptr == cpu_to_be32(GFS2_MAGIC))
                set_buffer_escaped(bh);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
@@ -594,10 +594,10 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
                if (buffer_escaped(bd->bd_bh)) {
                        void *kaddr;
                        bh1 = gfs2_log_get_buf(sdp);
-                       kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0);
+                       kaddr = kmap_atomic(bd->bd_bh->b_page);
                        memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
                               bh1->b_size);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       kunmap_atomic(kaddr);
                        *(__be32 *)bh1->b_data = 0;
                        clear_buffer_escaped(bd->bd_bh);
                        unlock_buffer(bd->bd_bh);
index a45b21b..c0f8904 100644 (file)
@@ -720,12 +720,12 @@ get_a_page:
 
        gfs2_trans_add_bh(ip->i_gl, bh, 0);
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
                nbytes = PAGE_CACHE_SIZE - offset;
        memcpy(kaddr + offset, ptr, nbytes);
        flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        unlock_page(page);
        page_cache_release(page);
 
index 59c09f9..e49e81b 100644 (file)
@@ -328,7 +328,7 @@ repeat:
                new_offset = offset_in_page(jh2bh(jh_in)->b_data);
        }
 
-       mapped_data = kmap_atomic(new_page, KM_USER0);
+       mapped_data = kmap_atomic(new_page);
        /*
         * Check for escaping
         */
@@ -337,7 +337,7 @@ repeat:
                need_copy_out = 1;
                do_escape = 1;
        }
-       kunmap_atomic(mapped_data, KM_USER0);
+       kunmap_atomic(mapped_data);
 
        /*
         * Do we need to do a data copy?
@@ -354,9 +354,9 @@ repeat:
                }
 
                jh_in->b_frozen_data = tmp;
-               mapped_data = kmap_atomic(new_page, KM_USER0);
+               mapped_data = kmap_atomic(new_page);
                memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
-               kunmap_atomic(mapped_data, KM_USER0);
+               kunmap_atomic(mapped_data);
 
                new_page = virt_to_page(tmp);
                new_offset = offset_in_page(tmp);
@@ -368,9 +368,9 @@ repeat:
         * copying, we can finally do so.
         */
        if (do_escape) {
-               mapped_data = kmap_atomic(new_page, KM_USER0);
+               mapped_data = kmap_atomic(new_page);
                *((unsigned int *)(mapped_data + new_offset)) = 0;
-               kunmap_atomic(mapped_data, KM_USER0);
+               kunmap_atomic(mapped_data);
        }
 
        set_bh_page(new_bh, new_page, new_offset);
index 7fce94b..b2a7e52 100644 (file)
@@ -718,9 +718,9 @@ done:
                            "Possible IO failure.\n");
                page = jh2bh(jh)->b_page;
                offset = offset_in_page(jh2bh(jh)->b_data);
-               source = kmap_atomic(page, KM_USER0);
+               source = kmap_atomic(page);
                memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
-               kunmap_atomic(source, KM_USER0);
+               kunmap_atomic(source);
        }
        jbd_unlock_bh_state(bh);
 
index 5069b84..c067a8c 100644 (file)
@@ -286,10 +286,10 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
        char *addr;
        __u32 checksum;
 
-       addr = kmap_atomic(page, KM_USER0);
+       addr = kmap_atomic(page);
        checksum = crc32_be(crc32_sum,
                (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
-       kunmap_atomic(addr, KM_USER0);
+       kunmap_atomic(addr);
 
        return checksum;
 }
index c0a5f9f..5ff8940 100644 (file)
@@ -345,7 +345,7 @@ repeat:
                new_offset = offset_in_page(jh2bh(jh_in)->b_data);
        }
 
-       mapped_data = kmap_atomic(new_page, KM_USER0);
+       mapped_data = kmap_atomic(new_page);
        /*
         * Fire data frozen trigger if data already wasn't frozen.  Do this
         * before checking for escaping, as the trigger may modify the magic
@@ -364,7 +364,7 @@ repeat:
                need_copy_out = 1;
                do_escape = 1;
        }
-       kunmap_atomic(mapped_data, KM_USER0);
+       kunmap_atomic(mapped_data);
 
        /*
         * Do we need to do a data copy?
@@ -385,9 +385,9 @@ repeat:
                }
 
                jh_in->b_frozen_data = tmp;
-               mapped_data = kmap_atomic(new_page, KM_USER0);
+               mapped_data = kmap_atomic(new_page);
                memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
-               kunmap_atomic(mapped_data, KM_USER0);
+               kunmap_atomic(mapped_data);
 
                new_page = virt_to_page(tmp);
                new_offset = offset_in_page(tmp);
@@ -406,9 +406,9 @@ repeat:
         * copying, we can finally do so.
         */
        if (do_escape) {
-               mapped_data = kmap_atomic(new_page, KM_USER0);
+               mapped_data = kmap_atomic(new_page);
                *((unsigned int *)(mapped_data + new_offset)) = 0;
-               kunmap_atomic(mapped_data, KM_USER0);
+               kunmap_atomic(mapped_data);
        }
 
        set_bh_page(new_bh, new_page, new_offset);
index 35ae096..e5aba56 100644 (file)
@@ -783,12 +783,12 @@ done:
                            "Possible IO failure.\n");
                page = jh2bh(jh)->b_page;
                offset = offset_in_page(jh2bh(jh)->b_data);
-               source = kmap_atomic(page, KM_USER0);
+               source = kmap_atomic(page);
                /* Fire data frozen trigger just before we copy the data */
                jbd2_buffer_frozen_trigger(jh, source + offset,
                                           jh->b_triggers);
                memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
-               kunmap_atomic(source, KM_USER0);
+               kunmap_atomic(source);
 
                /*
                 * Now that the frozen data is saved off, we need to store
index 3de7a32..1b6e21d 100644 (file)
@@ -177,17 +177,17 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
                                (filler_t *)logfs_readpage, NULL);
                if (IS_ERR(page))
                        return page;
-               dd = kmap_atomic(page, KM_USER0);
+               dd = kmap_atomic(page);
                BUG_ON(dd->namelen == 0);
 
                if (name->len != be16_to_cpu(dd->namelen) ||
                                memcmp(name->name, dd->name, name->len)) {
-                       kunmap_atomic(dd, KM_USER0);
+                       kunmap_atomic(dd);
                        page_cache_release(page);
                        continue;
                }
 
-               kunmap_atomic(dd, KM_USER0);
+               kunmap_atomic(dd);
                return page;
        }
        return NULL;
@@ -365,9 +365,9 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
                return NULL;
        }
        index = page->index;
-       dd = kmap_atomic(page, KM_USER0);
+       dd = kmap_atomic(page);
        ino = be64_to_cpu(dd->ino);
-       kunmap_atomic(dd, KM_USER0);
+       kunmap_atomic(dd);
        page_cache_release(page);
 
        inode = logfs_iget(dir->i_sb, ino);
@@ -402,12 +402,12 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
                if (!page)
                        return -ENOMEM;
 
-               dd = kmap_atomic(page, KM_USER0);
+               dd = kmap_atomic(page);
                memset(dd, 0, sizeof(*dd));
                dd->ino = cpu_to_be64(inode->i_ino);
                dd->type = logfs_type(inode);
                logfs_set_name(dd, &dentry->d_name);
-               kunmap_atomic(dd, KM_USER0);
+               kunmap_atomic(dd);
 
                err = logfs_write_buf(dir, page, WF_LOCK);
                unlock_page(page);
@@ -579,9 +579,9 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
        if (IS_ERR(page))
                return PTR_ERR(page);
        *pos = page->index;
-       map = kmap_atomic(page, KM_USER0);
+       map = kmap_atomic(page);
        memcpy(dd, map, sizeof(*dd));
-       kunmap_atomic(map, KM_USER0);
+       kunmap_atomic(map);
        page_cache_release(page);
        return 0;
 }
index 4153e65..e3ab5e5 100644 (file)
@@ -517,9 +517,9 @@ static int indirect_write_alias(struct super_block *sb,
 
                ino = page->mapping->host->i_ino;
                logfs_unpack_index(page->index, &bix, &level);
-               child = kmap_atomic(page, KM_USER0);
+               child = kmap_atomic(page);
                val = child[pos];
-               kunmap_atomic(child, KM_USER0);
+               kunmap_atomic(child);
                err = write_one_alias(sb, ino, bix, level, pos, val);
                if (err)
                        return err;
@@ -673,9 +673,9 @@ static void alloc_indirect_block(struct inode *inode, struct page *page,
        alloc_data_block(inode, page);
 
        block = logfs_block(page);
-       array = kmap_atomic(page, KM_USER0);
+       array = kmap_atomic(page);
        initialize_block_counters(page, block, array, page_is_empty);
-       kunmap_atomic(array, KM_USER0);
+       kunmap_atomic(array);
 }
 
 static void block_set_pointer(struct page *page, int index, u64 ptr)
@@ -685,10 +685,10 @@ static void block_set_pointer(struct page *page, int index, u64 ptr)
        u64 oldptr;
 
        BUG_ON(!block);
-       array = kmap_atomic(page, KM_USER0);
+       array = kmap_atomic(page);
        oldptr = be64_to_cpu(array[index]);
        array[index] = cpu_to_be64(ptr);
-       kunmap_atomic(array, KM_USER0);
+       kunmap_atomic(array);
        SetPageUptodate(page);
 
        block->full += !!(ptr & LOGFS_FULLY_POPULATED)
@@ -701,9 +701,9 @@ static u64 block_get_pointer(struct page *page, int index)
        __be64 *block;
        u64 ptr;
 
-       block = kmap_atomic(page, KM_USER0);
+       block = kmap_atomic(page);
        ptr = be64_to_cpu(block[index]);
-       kunmap_atomic(block, KM_USER0);
+       kunmap_atomic(block);
        return ptr;
 }
 
@@ -850,7 +850,7 @@ static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data)
                }
 
                slot = get_bits(bix, SUBLEVEL(level));
-               rblock = kmap_atomic(page, KM_USER0);
+               rblock = kmap_atomic(page);
                while (slot < LOGFS_BLOCK_FACTOR) {
                        if (data && (rblock[slot] != 0))
                                break;
@@ -861,12 +861,12 @@ static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data)
                        bix &= ~(increment - 1);
                }
                if (slot >= LOGFS_BLOCK_FACTOR) {
-                       kunmap_atomic(rblock, KM_USER0);
+                       kunmap_atomic(rblock);
                        logfs_put_read_page(page);
                        return bix;
                }
                bofs = be64_to_cpu(rblock[slot]);
-               kunmap_atomic(rblock, KM_USER0);
+               kunmap_atomic(rblock);
                logfs_put_read_page(page);
                if (!bofs) {
                        BUG_ON(data);
@@ -1961,9 +1961,9 @@ int logfs_read_inode(struct inode *inode)
        if (IS_ERR(page))
                return PTR_ERR(page);
 
-       di = kmap_atomic(page, KM_USER0);
+       di = kmap_atomic(page);
        logfs_disk_to_inode(di, inode);
-       kunmap_atomic(di, KM_USER0);
+       kunmap_atomic(di);
        move_page_to_inode(inode, page);
        page_cache_release(page);
        return 0;
@@ -1982,9 +1982,9 @@ static struct page *inode_to_page(struct inode *inode)
        if (!page)
                return NULL;
 
-       di = kmap_atomic(page, KM_USER0);
+       di = kmap_atomic(page);
        logfs_inode_to_disk(inode, di);
-       kunmap_atomic(di, KM_USER0);
+       kunmap_atomic(di);
        move_inode_to_page(page, inode);
        return page;
 }
@@ -2041,13 +2041,13 @@ static void logfs_mod_segment_entry(struct super_block *sb, u32 segno,
 
        if (write)
                alloc_indirect_block(inode, page, 0);
-       se = kmap_atomic(page, KM_USER0);
+       se = kmap_atomic(page);
        change_se(se + child_no, arg);
        if (write) {
                logfs_set_alias(sb, logfs_block(page), child_no);
                BUG_ON((int)be32_to_cpu(se[child_no].valid) > super->s_segsize);
        }
-       kunmap_atomic(se, KM_USER0);
+       kunmap_atomic(se);
 
        logfs_put_write_page(page);
 }
@@ -2245,10 +2245,10 @@ int logfs_inode_write(struct inode *inode, const void *buf, size_t count,
        if (!page)
                return -ENOMEM;
 
-       pagebuf = kmap_atomic(page, KM_USER0);
+       pagebuf = kmap_atomic(page);
        memcpy(pagebuf, buf, count);
        flush_dcache_page(page);
-       kunmap_atomic(pagebuf, KM_USER0);
+       kunmap_atomic(pagebuf);
 
        if (i_size_read(inode) < pos + LOGFS_BLOCKSIZE)
                i_size_write(inode, pos + LOGFS_BLOCKSIZE);
index ab798ed..e28d090 100644 (file)
@@ -543,9 +543,9 @@ void move_page_to_btree(struct page *page)
                BUG_ON(!item); /* mempool empty */
                memset(item, 0, sizeof(*item));
 
-               child = kmap_atomic(page, KM_USER0);
+               child = kmap_atomic(page);
                item->val = child[pos];
-               kunmap_atomic(child, KM_USER0);
+               kunmap_atomic(child);
                item->child_no = pos;
                list_add(&item->list, &block->item_list);
        }
index 085a926..685b2d9 100644 (file)
@@ -335,7 +335,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
                goto fail;
        }
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        memset(kaddr, 0, PAGE_CACHE_SIZE);
 
        if (sbi->s_version == MINIX_V3) {
@@ -355,7 +355,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
                de->inode = dir->i_ino;
                strcpy(de->name, "..");
        }
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
 fail:
index fa96a26..20a4fcf 100644 (file)
@@ -3493,9 +3493,9 @@ retry:
        if (err)
                goto fail;
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        memcpy(kaddr, symname, len-1);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
                                                        page, fsdata);
index fd9a872..32aa691 100644 (file)
@@ -260,10 +260,10 @@ void nfs_readdir_clear_array(struct page *page)
        struct nfs_cache_array *array;
        int i;
 
-       array = kmap_atomic(page, KM_USER0);
+       array = kmap_atomic(page);
        for (i = 0; i < array->size; i++)
                kfree(array->array[i].string.name);
-       kunmap_atomic(array, KM_USER0);
+       kunmap_atomic(array);
 }
 
 /*
@@ -1870,11 +1870,11 @@ static int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *sym
        if (!page)
                return -ENOMEM;
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        memcpy(kaddr, symname, pathlen);
        if (pathlen < PAGE_SIZE)
                memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
        if (error != 0) {
index ec9f6ef..caf92d0 100644 (file)
@@ -193,7 +193,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
         * when talking to the server, we always send cookie 0
         * instead of 1 or 2.
         */
-       start = p = kmap_atomic(*readdir->pages, KM_USER0);
+       start = p = kmap_atomic(*readdir->pages);
        
        if (cookie == 0) {
                *p++ = xdr_one;                                  /* next */
@@ -221,7 +221,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
 
        readdir->pgbase = (char *)p - (char *)start;
        readdir->count -= readdir->pgbase;
-       kunmap_atomic(start, KM_USER0);
+       kunmap_atomic(start);
 }
 
 static int nfs4_wait_clnt_recover(struct nfs_client *clp)
index c9b342c..dab5c4c 100644 (file)
@@ -218,11 +218,11 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
                                                                 kaddr, 1);
                mark_buffer_dirty(cp_bh);
 
-               kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+               kaddr = kmap_atomic(header_bh->b_page);
                header = nilfs_cpfile_block_get_header(cpfile, header_bh,
                                                       kaddr);
                le64_add_cpu(&header->ch_ncheckpoints, 1);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                mark_buffer_dirty(header_bh);
                nilfs_mdt_mark_dirty(cpfile);
        }
@@ -313,7 +313,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
                        continue;
                }
 
-               kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
+               kaddr = kmap_atomic(cp_bh->b_page);
                cp = nilfs_cpfile_block_get_checkpoint(
                        cpfile, cno, cp_bh, kaddr);
                nicps = 0;
@@ -334,7 +334,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
                                                cpfile, cp_bh, kaddr, nicps);
                                if (count == 0) {
                                        /* make hole */
-                                       kunmap_atomic(kaddr, KM_USER0);
+                                       kunmap_atomic(kaddr);
                                        brelse(cp_bh);
                                        ret =
                                          nilfs_cpfile_delete_checkpoint_block(
@@ -349,18 +349,18 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
                        }
                }
 
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                brelse(cp_bh);
        }
 
        if (tnicps > 0) {
-               kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+               kaddr = kmap_atomic(header_bh->b_page);
                header = nilfs_cpfile_block_get_header(cpfile, header_bh,
                                                       kaddr);
                le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
                mark_buffer_dirty(header_bh);
                nilfs_mdt_mark_dirty(cpfile);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
        }
 
        brelse(header_bh);
@@ -408,7 +408,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
                        continue; /* skip hole */
                }
 
-               kaddr = kmap_atomic(bh->b_page, KM_USER0);
+               kaddr = kmap_atomic(bh->b_page);
                cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
                for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
                        if (!nilfs_checkpoint_invalid(cp)) {
@@ -418,7 +418,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
                                n++;
                        }
                }
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                brelse(bh);
        }
 
@@ -451,10 +451,10 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
                ret = nilfs_cpfile_get_header_block(cpfile, &bh);
                if (ret < 0)
                        goto out;
-               kaddr = kmap_atomic(bh->b_page, KM_USER0);
+               kaddr = kmap_atomic(bh->b_page);
                header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
                curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                brelse(bh);
                if (curr == 0) {
                        ret = 0;
@@ -472,7 +472,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
                        ret = 0; /* No snapshots (started from a hole block) */
                goto out;
        }
-       kaddr = kmap_atomic(bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(bh->b_page);
        while (n < nci) {
                cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
                curr = ~(__u64)0; /* Terminator */
@@ -488,7 +488,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
 
                next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
                if (curr_blkoff != next_blkoff) {
-                       kunmap_atomic(kaddr, KM_USER0);
+                       kunmap_atomic(kaddr);
                        brelse(bh);
                        ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
                                                                0, &bh);
@@ -496,12 +496,12 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
                                WARN_ON(ret == -ENOENT);
                                goto out;
                        }
-                       kaddr = kmap_atomic(bh->b_page, KM_USER0);
+                       kaddr = kmap_atomic(bh->b_page);
                }
                curr = next;
                curr_blkoff = next_blkoff;
        }
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        brelse(bh);
        *cnop = curr;
        ret = n;
@@ -592,24 +592,24 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
        ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
        if (ret < 0)
                goto out_sem;
-       kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(cp_bh->b_page);
        cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
        if (nilfs_checkpoint_invalid(cp)) {
                ret = -ENOENT;
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                goto out_cp;
        }
        if (nilfs_checkpoint_snapshot(cp)) {
                ret = 0;
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                goto out_cp;
        }
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
        if (ret < 0)
                goto out_cp;
-       kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(header_bh->b_page);
        header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
        list = &header->ch_snapshot_list;
        curr_bh = header_bh;
@@ -621,13 +621,13 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
                prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
                curr = prev;
                if (curr_blkoff != prev_blkoff) {
-                       kunmap_atomic(kaddr, KM_USER0);
+                       kunmap_atomic(kaddr);
                        brelse(curr_bh);
                        ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
                                                                0, &curr_bh);
                        if (ret < 0)
                                goto out_header;
-                       kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
+                       kaddr = kmap_atomic(curr_bh->b_page);
                }
                curr_blkoff = prev_blkoff;
                cp = nilfs_cpfile_block_get_checkpoint(
@@ -635,7 +635,7 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
                list = &cp->cp_snapshot_list;
                prev = le64_to_cpu(list->ssl_prev);
        }
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        if (prev != 0) {
                ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
@@ -647,29 +647,29 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
                get_bh(prev_bh);
        }
 
-       kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(curr_bh->b_page);
        list = nilfs_cpfile_block_get_snapshot_list(
                cpfile, curr, curr_bh, kaddr);
        list->ssl_prev = cpu_to_le64(cno);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
-       kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(cp_bh->b_page);
        cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
        cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
        cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
        nilfs_checkpoint_set_snapshot(cp);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
-       kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(prev_bh->b_page);
        list = nilfs_cpfile_block_get_snapshot_list(
                cpfile, prev, prev_bh, kaddr);
        list->ssl_next = cpu_to_le64(cno);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
-       kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(header_bh->b_page);
        header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
        le64_add_cpu(&header->ch_nsnapshots, 1);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        mark_buffer_dirty(prev_bh);
        mark_buffer_dirty(curr_bh);
@@ -710,23 +710,23 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
        ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
        if (ret < 0)
                goto out_sem;
-       kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(cp_bh->b_page);
        cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
        if (nilfs_checkpoint_invalid(cp)) {
                ret = -ENOENT;
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                goto out_cp;
        }
        if (!nilfs_checkpoint_snapshot(cp)) {
                ret = 0;
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                goto out_cp;
        }
 
        list = &cp->cp_snapshot_list;
        next = le64_to_cpu(list->ssl_next);
        prev = le64_to_cpu(list->ssl_prev);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
        if (ret < 0)
@@ -750,29 +750,29 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
                get_bh(prev_bh);
        }
 
-       kaddr = kmap_atomic(next_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(next_bh->b_page);
        list = nilfs_cpfile_block_get_snapshot_list(
                cpfile, next, next_bh, kaddr);
        list->ssl_prev = cpu_to_le64(prev);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
-       kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(prev_bh->b_page);
        list = nilfs_cpfile_block_get_snapshot_list(
                cpfile, prev, prev_bh, kaddr);
        list->ssl_next = cpu_to_le64(next);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
-       kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(cp_bh->b_page);
        cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
        cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
        cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
        nilfs_checkpoint_clear_snapshot(cp);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
-       kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(header_bh->b_page);
        header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
        le64_add_cpu(&header->ch_nsnapshots, -1);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        mark_buffer_dirty(next_bh);
        mark_buffer_dirty(prev_bh);
@@ -829,13 +829,13 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
        ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
        if (ret < 0)
                goto out;
-       kaddr = kmap_atomic(bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(bh->b_page);
        cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
        if (nilfs_checkpoint_invalid(cp))
                ret = -ENOENT;
        else
                ret = nilfs_checkpoint_snapshot(cp);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        brelse(bh);
 
  out:
@@ -912,12 +912,12 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
        ret = nilfs_cpfile_get_header_block(cpfile, &bh);
        if (ret < 0)
                goto out_sem;
-       kaddr = kmap_atomic(bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(bh->b_page);
        header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
        cpstat->cs_cno = nilfs_mdt_cno(cpfile);
        cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
        cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        brelse(bh);
 
  out_sem:
index fcc2f86..b5c13f3 100644 (file)
@@ -85,13 +85,13 @@ void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
        struct nilfs_dat_entry *entry;
        void *kaddr;
 
-       kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(req->pr_entry_bh->b_page);
        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
                                             req->pr_entry_bh, kaddr);
        entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
        entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
        entry->de_blocknr = cpu_to_le64(0);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        nilfs_palloc_commit_alloc_entry(dat, req);
        nilfs_dat_commit_entry(dat, req);
@@ -109,13 +109,13 @@ static void nilfs_dat_commit_free(struct inode *dat,
        struct nilfs_dat_entry *entry;
        void *kaddr;
 
-       kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(req->pr_entry_bh->b_page);
        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
                                             req->pr_entry_bh, kaddr);
        entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
        entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
        entry->de_blocknr = cpu_to_le64(0);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        nilfs_dat_commit_entry(dat, req);
        nilfs_palloc_commit_free_entry(dat, req);
@@ -136,12 +136,12 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
        struct nilfs_dat_entry *entry;
        void *kaddr;
 
-       kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(req->pr_entry_bh->b_page);
        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
                                             req->pr_entry_bh, kaddr);
        entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
        entry->de_blocknr = cpu_to_le64(blocknr);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        nilfs_dat_commit_entry(dat, req);
 }
@@ -160,12 +160,12 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
                return ret;
        }
 
-       kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(req->pr_entry_bh->b_page);
        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
                                             req->pr_entry_bh, kaddr);
        start = le64_to_cpu(entry->de_start);
        blocknr = le64_to_cpu(entry->de_blocknr);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        if (blocknr == 0) {
                ret = nilfs_palloc_prepare_free_entry(dat, req);
@@ -186,7 +186,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
        sector_t blocknr;
        void *kaddr;
 
-       kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(req->pr_entry_bh->b_page);
        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
                                             req->pr_entry_bh, kaddr);
        end = start = le64_to_cpu(entry->de_start);
@@ -196,7 +196,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
        }
        entry->de_end = cpu_to_le64(end);
        blocknr = le64_to_cpu(entry->de_blocknr);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        if (blocknr == 0)
                nilfs_dat_commit_free(dat, req);
@@ -211,12 +211,12 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
        sector_t blocknr;
        void *kaddr;
 
-       kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(req->pr_entry_bh->b_page);
        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
                                             req->pr_entry_bh, kaddr);
        start = le64_to_cpu(entry->de_start);
        blocknr = le64_to_cpu(entry->de_blocknr);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        if (start == nilfs_mdt_cno(dat) && blocknr == 0)
                nilfs_palloc_abort_free_entry(dat, req);
@@ -346,20 +346,20 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
                }
        }
 
-       kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(entry_bh->b_page);
        entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
        if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
                printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
                       (unsigned long long)vblocknr,
                       (unsigned long long)le64_to_cpu(entry->de_start),
                       (unsigned long long)le64_to_cpu(entry->de_end));
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                brelse(entry_bh);
                return -EINVAL;
        }
        WARN_ON(blocknr == 0);
        entry->de_blocknr = cpu_to_le64(blocknr);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        mark_buffer_dirty(entry_bh);
        nilfs_mdt_mark_dirty(dat);
@@ -409,7 +409,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
                }
        }
 
-       kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(entry_bh->b_page);
        entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
        blocknr = le64_to_cpu(entry->de_blocknr);
        if (blocknr == 0) {
@@ -419,7 +419,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
        *blocknrp = blocknr;
 
  out:
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        brelse(entry_bh);
        return ret;
 }
@@ -440,7 +440,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
                                                   0, &entry_bh);
                if (ret < 0)
                        return ret;
-               kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
+               kaddr = kmap_atomic(entry_bh->b_page);
                /* last virtual block number in this block */
                first = vinfo->vi_vblocknr;
                do_div(first, entries_per_block);
@@ -456,7 +456,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
                        vinfo->vi_end = le64_to_cpu(entry->de_end);
                        vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
                }
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                brelse(entry_bh);
        }
 
index ca35b3a..df1a7fb 100644 (file)
@@ -602,7 +602,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
                unlock_page(page);
                goto fail;
        }
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        memset(kaddr, 0, chunk_size);
        de = (struct nilfs_dir_entry *)kaddr;
        de->name_len = 1;
@@ -617,7 +617,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
        de->inode = cpu_to_le64(parent->i_ino);
        memcpy(de->name, "..\0", 4);
        nilfs_set_de_type(de, inode);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        nilfs_commit_chunk(page, mapping, 0, chunk_size);
 fail:
        page_cache_release(page);
index 684d763..5a48df7 100644 (file)
@@ -122,11 +122,11 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
                return ret;
        }
 
-       kaddr = kmap_atomic(req.pr_entry_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(req.pr_entry_bh->b_page);
        raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr,
                                                 req.pr_entry_bh, kaddr);
        raw_inode->i_flags = 0;
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        mark_buffer_dirty(req.pr_entry_bh);
        brelse(req.pr_entry_bh);
index 800e8d7..f9897d0 100644 (file)
@@ -58,12 +58,12 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
 
        set_buffer_mapped(bh);
 
-       kaddr = kmap_atomic(bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(bh->b_page);
        memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
        if (init_block)
                init_block(inode, bh, kaddr);
        flush_dcache_page(bh->b_page);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        set_buffer_uptodate(bh);
        mark_buffer_dirty(bh);
index 65221a0..3e7b2a0 100644 (file)
@@ -119,11 +119,11 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
        struct page *spage = sbh->b_page, *dpage = dbh->b_page;
        struct buffer_head *bh;
 
-       kaddr0 = kmap_atomic(spage, KM_USER0);
-       kaddr1 = kmap_atomic(dpage, KM_USER1);
+       kaddr0 = kmap_atomic(spage);
+       kaddr1 = kmap_atomic(dpage);
        memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
-       kunmap_atomic(kaddr1, KM_USER1);
-       kunmap_atomic(kaddr0, KM_USER0);
+       kunmap_atomic(kaddr1);
+       kunmap_atomic(kaddr0);
 
        dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
        dbh->b_blocknr = sbh->b_blocknr;
index a604ac0..f1626f5 100644 (file)
@@ -493,9 +493,9 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
        if (unlikely(!bh_org))
                return -EIO;
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        brelse(bh_org);
        return 0;
 }
index 850a7c0..dc9a913 100644 (file)
@@ -227,9 +227,9 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
                crc = crc32_le(crc, bh->b_data, bh->b_size);
        }
        list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
-               kaddr = kmap_atomic(bh->b_page, KM_USER0);
+               kaddr = kmap_atomic(bh->b_page);
                crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
        }
        raw_sum->ss_datasum = cpu_to_le32(crc);
 }
index 0a0aba6..c5b7653 100644 (file)
@@ -111,11 +111,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
        struct nilfs_sufile_header *header;
        void *kaddr;
 
-       kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
        le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
        le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        mark_buffer_dirty(header_bh);
 }
@@ -319,11 +319,11 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
        ret = nilfs_sufile_get_header_block(sufile, &header_bh);
        if (ret < 0)
                goto out_sem;
-       kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
        ncleansegs = le64_to_cpu(header->sh_ncleansegs);
        last_alloc = le64_to_cpu(header->sh_last_alloc);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        nsegments = nilfs_sufile_get_nsegments(sufile);
        maxsegnum = sui->allocmax;
@@ -356,7 +356,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
                                                           &su_bh);
                if (ret < 0)
                        goto out_header;
-               kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+               kaddr = kmap_atomic(su_bh->b_page);
                su = nilfs_sufile_block_get_segment_usage(
                        sufile, segnum, su_bh, kaddr);
 
@@ -367,14 +367,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
                                continue;
                        /* found a clean segment */
                        nilfs_segment_usage_set_dirty(su);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       kunmap_atomic(kaddr);
 
-                       kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+                       kaddr = kmap_atomic(header_bh->b_page);
                        header = kaddr + bh_offset(header_bh);
                        le64_add_cpu(&header->sh_ncleansegs, -1);
                        le64_add_cpu(&header->sh_ndirtysegs, 1);
                        header->sh_last_alloc = cpu_to_le64(segnum);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       kunmap_atomic(kaddr);
 
                        sui->ncleansegs--;
                        mark_buffer_dirty(header_bh);
@@ -385,7 +385,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
                        goto out_header;
                }
 
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                brelse(su_bh);
        }
 
@@ -407,16 +407,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
        struct nilfs_segment_usage *su;
        void *kaddr;
 
-       kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
        if (unlikely(!nilfs_segment_usage_clean(su))) {
                printk(KERN_WARNING "%s: segment %llu must be clean\n",
                       __func__, (unsigned long long)segnum);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                return;
        }
        nilfs_segment_usage_set_dirty(su);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        nilfs_sufile_mod_counter(header_bh, -1, 1);
        NILFS_SUI(sufile)->ncleansegs--;
@@ -433,11 +433,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
        void *kaddr;
        int clean, dirty;
 
-       kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
        if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
            su->su_nblocks == cpu_to_le32(0)) {
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                return;
        }
        clean = nilfs_segment_usage_clean(su);
@@ -447,7 +447,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
        su->su_lastmod = cpu_to_le64(0);
        su->su_nblocks = cpu_to_le32(0);
        su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
        NILFS_SUI(sufile)->ncleansegs -= clean;
@@ -464,12 +464,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
        void *kaddr;
        int sudirty;
 
-       kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
        if (nilfs_segment_usage_clean(su)) {
                printk(KERN_WARNING "%s: segment %llu is already clean\n",
                       __func__, (unsigned long long)segnum);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                return;
        }
        WARN_ON(nilfs_segment_usage_error(su));
@@ -477,7 +477,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
 
        sudirty = nilfs_segment_usage_dirty(su);
        nilfs_segment_usage_set_clean(su);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        mark_buffer_dirty(su_bh);
 
        nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
@@ -525,13 +525,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
        if (ret < 0)
                goto out_sem;
 
-       kaddr = kmap_atomic(bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
        WARN_ON(nilfs_segment_usage_error(su));
        if (modtime)
                su->su_lastmod = cpu_to_le64(modtime);
        su->su_nblocks = cpu_to_le32(nblocks);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        mark_buffer_dirty(bh);
        nilfs_mdt_mark_dirty(sufile);
@@ -572,7 +572,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
        if (ret < 0)
                goto out_sem;
 
-       kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
        sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
        sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
@@ -582,7 +582,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
        spin_lock(&nilfs->ns_last_segment_lock);
        sustat->ss_prot_seq = nilfs->ns_prot_seq;
        spin_unlock(&nilfs->ns_last_segment_lock);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        brelse(header_bh);
 
  out_sem:
@@ -598,15 +598,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
        void *kaddr;
        int suclean;
 
-       kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
        if (nilfs_segment_usage_error(su)) {
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                return;
        }
        suclean = nilfs_segment_usage_clean(su);
        nilfs_segment_usage_set_error(su);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        if (suclean) {
                nilfs_sufile_mod_counter(header_bh, -1, 0);
@@ -675,7 +675,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
                        /* hole */
                        continue;
                }
-               kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+               kaddr = kmap_atomic(su_bh->b_page);
                su = nilfs_sufile_block_get_segment_usage(
                        sufile, segnum, su_bh, kaddr);
                su2 = su;
@@ -684,7 +684,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
                             ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
                            nilfs_segment_is_active(nilfs, segnum + j)) {
                                ret = -EBUSY;
-                               kunmap_atomic(kaddr, KM_USER0);
+                               kunmap_atomic(kaddr);
                                brelse(su_bh);
                                goto out_header;
                        }
@@ -696,7 +696,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
                                nc++;
                        }
                }
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                if (nc > 0) {
                        mark_buffer_dirty(su_bh);
                        ncleaned += nc;
@@ -772,10 +772,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
                sui->ncleansegs -= nsegs - newnsegs;
        }
 
-       kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
        header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        mark_buffer_dirty(header_bh);
        nilfs_mdt_mark_dirty(sufile);
@@ -840,7 +840,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
                        continue;
                }
 
-               kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+               kaddr = kmap_atomic(su_bh->b_page);
                su = nilfs_sufile_block_get_segment_usage(
                        sufile, segnum, su_bh, kaddr);
                for (j = 0; j < n;
@@ -853,7 +853,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
                                si->sui_flags |=
                                        (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
                }
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                brelse(su_bh);
        }
        ret = nsegs;
@@ -902,10 +902,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
                goto failed;
 
        sui = NILFS_SUI(sufile);
-       kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+       kaddr = kmap_atomic(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
        sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        brelse(header_bh);
 
        sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
index 0b1e885..fa9c05f 100644 (file)
@@ -94,11 +94,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
                        if (file_ofs < init_size)
                                ofs = init_size - file_ofs;
                        local_irq_save(flags);
-                       kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+                       kaddr = kmap_atomic(page);
                        memset(kaddr + bh_offset(bh) + ofs, 0,
                                        bh->b_size - ofs);
                        flush_dcache_page(page);
-                       kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
+                       kunmap_atomic(kaddr);
                        local_irq_restore(flags);
                }
        } else {
@@ -147,11 +147,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
                /* Should have been verified before we got here... */
                BUG_ON(!recs);
                local_irq_save(flags);
-               kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+               kaddr = kmap_atomic(page);
                for (i = 0; i < recs; i++)
                        post_read_mst_fixup((NTFS_RECORD*)(kaddr +
                                        i * rec_size), rec_size);
-               kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
+               kunmap_atomic(kaddr);
                local_irq_restore(flags);
                flush_dcache_page(page);
                if (likely(page_uptodate && !PageError(page)))
@@ -504,7 +504,7 @@ retry_readpage:
                /* Race with shrinking truncate. */
                attr_len = i_size;
        }
-       addr = kmap_atomic(page, KM_USER0);
+       addr = kmap_atomic(page);
        /* Copy the data to the page. */
        memcpy(addr, (u8*)ctx->attr +
                        le16_to_cpu(ctx->attr->data.resident.value_offset),
@@ -512,7 +512,7 @@ retry_readpage:
        /* Zero the remainder of the page. */
        memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
        flush_dcache_page(page);
-       kunmap_atomic(addr, KM_USER0);
+       kunmap_atomic(addr);
 put_unm_err_out:
        ntfs_attr_put_search_ctx(ctx);
 unm_err_out:
@@ -746,14 +746,14 @@ lock_retry_remap:
                        unsigned long *bpos, *bend;
 
                        /* Check if the buffer is zero. */
-                       kaddr = kmap_atomic(page, KM_USER0);
+                       kaddr = kmap_atomic(page);
                        bpos = (unsigned long *)(kaddr + bh_offset(bh));
                        bend = (unsigned long *)((u8*)bpos + blocksize);
                        do {
                                if (unlikely(*bpos))
                                        break;
                        } while (likely(++bpos < bend));
-                       kunmap_atomic(kaddr, KM_USER0);
+                       kunmap_atomic(kaddr);
                        if (bpos == bend) {
                                /*
                                 * Buffer is zero and sparse, no need to write
@@ -1495,14 +1495,14 @@ retry_writepage:
                /* Shrinking cannot fail. */
                BUG_ON(err);
        }
-       addr = kmap_atomic(page, KM_USER0);
+       addr = kmap_atomic(page);
        /* Copy the data from the page to the mft record. */
        memcpy((u8*)ctx->attr +
                        le16_to_cpu(ctx->attr->data.resident.value_offset),
                        addr, attr_len);
        /* Zero out of bounds area in the page cache page. */
        memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
-       kunmap_atomic(addr, KM_USER0);
+       kunmap_atomic(addr);
        flush_dcache_page(page);
        flush_dcache_mft_record_page(ctx->ntfs_ino);
        /* We are done with the page. */
index e028199..a27e3fe 100644 (file)
@@ -1656,12 +1656,12 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
        attr_size = le32_to_cpu(a->data.resident.value_length);
        BUG_ON(attr_size != data_size);
        if (page && !PageUptodate(page)) {
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                memcpy(kaddr, (u8*)a +
                                le16_to_cpu(a->data.resident.value_offset),
                                attr_size);
                memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                flush_dcache_page(page);
                SetPageUptodate(page);
        }
@@ -1806,9 +1806,9 @@ undo_err_out:
                        sizeof(a->data.resident.reserved));
        /* Copy the data from the page back to the attribute value. */
        if (page) {
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                memcpy((u8*)a + mp_ofs, kaddr, attr_size);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
        }
        /* Setup the allocated size in the ntfs inode in case it changed. */
        write_lock_irqsave(&ni->size_lock, flags);
@@ -2540,10 +2540,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
                size = PAGE_CACHE_SIZE;
                if (idx == end)
                        size = end_ofs;
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                memset(kaddr + start_ofs, val, size - start_ofs);
                flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                set_page_dirty(page);
                page_cache_release(page);
                balance_dirty_pages_ratelimited(mapping);
@@ -2561,10 +2561,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
                                        "page (index 0x%lx).", idx);
                        return -ENOMEM;
                }
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                memset(kaddr, val, PAGE_CACHE_SIZE);
                flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                /*
                 * If the page has buffers, mark them uptodate since buffer
                 * state and not page state is definitive in 2.6 kernels.
@@ -2598,10 +2598,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
                                        "(error, index 0x%lx).", idx);
                        return PTR_ERR(page);
                }
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                memset(kaddr, val, end_ofs);
                flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                set_page_dirty(page);
                page_cache_release(page);
                balance_dirty_pages_ratelimited(mapping);
index c587e2d..8639169 100644 (file)
@@ -704,7 +704,7 @@ map_buffer_cached:
                                u8 *kaddr;
                                unsigned pofs;
                                        
-                               kaddr = kmap_atomic(page, KM_USER0);
+                               kaddr = kmap_atomic(page);
                                if (bh_pos < pos) {
                                        pofs = bh_pos & ~PAGE_CACHE_MASK;
                                        memset(kaddr + pofs, 0, pos - bh_pos);
@@ -713,7 +713,7 @@ map_buffer_cached:
                                        pofs = end & ~PAGE_CACHE_MASK;
                                        memset(kaddr + pofs, 0, bh_end - end);
                                }
-                               kunmap_atomic(kaddr, KM_USER0);
+                               kunmap_atomic(kaddr);
                                flush_dcache_page(page);
                        }
                        continue;
@@ -1287,9 +1287,9 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
                len = PAGE_CACHE_SIZE - ofs;
                if (len > bytes)
                        len = bytes;
-               addr = kmap_atomic(*pages, KM_USER0);
+               addr = kmap_atomic(*pages);
                left = __copy_from_user_inatomic(addr + ofs, buf, len);
-               kunmap_atomic(addr, KM_USER0);
+               kunmap_atomic(addr);
                if (unlikely(left)) {
                        /* Do it the slow way. */
                        addr = kmap(*pages);
@@ -1401,10 +1401,10 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
                len = PAGE_CACHE_SIZE - ofs;
                if (len > bytes)
                        len = bytes;
-               addr = kmap_atomic(*pages, KM_USER0);
+               addr = kmap_atomic(*pages);
                copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
                                *iov, *iov_ofs, len);
-               kunmap_atomic(addr, KM_USER0);
+               kunmap_atomic(addr);
                if (unlikely(copied != len)) {
                        /* Do it the slow way. */
                        addr = kmap(*pages);
@@ -1691,7 +1691,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
        BUG_ON(end > le32_to_cpu(a->length) -
                        le16_to_cpu(a->data.resident.value_offset));
        kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        /* Copy the received data from the page to the mft record. */
        memcpy(kattr + pos, kaddr + pos, bytes);
        /* Update the attribute length if necessary. */
@@ -1713,7 +1713,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
                flush_dcache_page(page);
                SetPageUptodate(page);
        }
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        /* Update initialized_size/i_size if necessary. */
        read_lock_irqsave(&ni->size_lock, flags);
        initialized_size = ni->initialized_size;
index f907611..28d4e6a 100644 (file)
@@ -2473,7 +2473,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
                        nr_free -= PAGE_CACHE_SIZE * 8;
                        continue;
                }
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                /*
                 * Subtract the number of set bits. If this
                 * is the last page and it is partial we don't really care as
@@ -2483,7 +2483,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
                 */
                nr_free -= bitmap_weight(kaddr,
                                        PAGE_CACHE_SIZE * BITS_PER_BYTE);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                page_cache_release(page);
        }
        ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
@@ -2544,7 +2544,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
                        nr_free -= PAGE_CACHE_SIZE * 8;
                        continue;
                }
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                /*
                 * Subtract the number of set bits. If this
                 * is the last page and it is partial we don't really care as
@@ -2554,7 +2554,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
                 */
                nr_free -= bitmap_weight(kaddr,
                                        PAGE_CACHE_SIZE * BITS_PER_BYTE);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                page_cache_release(page);
        }
        ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
index 78b68af..6577432 100644 (file)
@@ -102,7 +102,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
                 * copy, the data is still good. */
                if (buffer_jbd(buffer_cache_bh)
                    && ocfs2_inode_is_new(inode)) {
-                       kaddr = kmap_atomic(bh_result->b_page, KM_USER0);
+                       kaddr = kmap_atomic(bh_result->b_page);
                        if (!kaddr) {
                                mlog(ML_ERROR, "couldn't kmap!\n");
                                goto bail;
@@ -110,7 +110,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
                        memcpy(kaddr + (bh_result->b_size * iblock),
                               buffer_cache_bh->b_data,
                               bh_result->b_size);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       kunmap_atomic(kaddr);
                        set_buffer_uptodate(bh_result);
                }
                brelse(buffer_cache_bh);
@@ -236,13 +236,13 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
                return -EROFS;
        }
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        if (size)
                memcpy(kaddr, di->id2.i_data.id_data, size);
        /* Clear the remaining part of the page */
        memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
        flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        SetPageUptodate(page);
 
@@ -689,7 +689,7 @@ static void ocfs2_clear_page_regions(struct page *page,
 
        ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
 
        if (from || to) {
                if (from > cluster_start)
@@ -700,7 +700,7 @@ static void ocfs2_clear_page_regions(struct page *page,
                memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
        }
 
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 /*
@@ -1981,9 +1981,9 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
                }
        }
 
-       kaddr = kmap_atomic(wc->w_target_page, KM_USER0);
+       kaddr = kmap_atomic(wc->w_target_page);
        memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        trace_ocfs2_write_end_inline(
             (unsigned long long)OCFS2_I(inode)->ip_blkno,
index a932ced..fe0502f 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -230,7 +230,7 @@ void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
 {
        if (atomic) {
                buf->flags |= PIPE_BUF_FLAG_ATOMIC;
-               return kmap_atomic(buf->page, KM_USER0);
+               return kmap_atomic(buf->page);
        }
 
        return kmap(buf->page);
@@ -251,7 +251,7 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
 {
        if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
                buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
-               kunmap_atomic(map_data, KM_USER0);
+               kunmap_atomic(map_data);
        } else
                kunmap(buf->page);
 }
@@ -565,14 +565,14 @@ redo1:
                        iov_fault_in_pages_read(iov, chars);
 redo2:
                        if (atomic)
-                               src = kmap_atomic(page, KM_USER0);
+                               src = kmap_atomic(page);
                        else
                                src = kmap(page);
 
                        error = pipe_iov_copy_from_user(src, iov, chars,
                                                        atomic);
                        if (atomic)
-                               kunmap_atomic(src, KM_USER0);
+                               kunmap_atomic(src);
                        else
                                kunmap(page);
 
index 313d39d..77df82f 100644 (file)
@@ -1284,12 +1284,12 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
                 ** -clm
                 */
 
-               data = kmap_atomic(un_bh->b_page, KM_USER0);
+               data = kmap_atomic(un_bh->b_page);
                off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
                memcpy(data + off,
                       B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih),
                       ret_value);
-               kunmap_atomic(data, KM_USER0);
+               kunmap_atomic(data);
        }
        /* Perform balancing after all resources have been collected at once. */
        do_balance(&s_del_balance, NULL, NULL, M_DELETE);
index d7f6e51..8f546bd 100644 (file)
@@ -128,9 +128,9 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
        if (up_to_date_bh) {
                unsigned pgoff =
                    (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
-               char *kaddr = kmap_atomic(up_to_date_bh->b_page, KM_USER0);
+               char *kaddr = kmap_atomic(up_to_date_bh->b_page);
                memset(kaddr + pgoff, 0, blk_size - total_tail);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
        }
 
        REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
index 1ec0493..f16402e 100644 (file)
@@ -737,15 +737,12 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
                goto out;
 
        if (buf->page != page) {
-               /*
-                * Careful, ->map() uses KM_USER0!
-                */
                char *src = buf->ops->map(pipe, buf, 1);
-               char *dst = kmap_atomic(page, KM_USER1);
+               char *dst = kmap_atomic(page);
 
                memcpy(dst + offset, src + buf->offset, this_len);
                flush_dcache_page(page);
-               kunmap_atomic(dst, KM_USER1);
+               kunmap_atomic(dst);
                buf->ops->unmap(pipe, buf, src);
        }
        ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
index 38bb1c6..8ca62c2 100644 (file)
@@ -464,10 +464,10 @@ static int squashfs_readpage(struct file *file, struct page *page)
                if (PageUptodate(push_page))
                        goto skip_page;
 
-               pageaddr = kmap_atomic(push_page, KM_USER0);
+               pageaddr = kmap_atomic(push_page);
                squashfs_copy_data(pageaddr, buffer, offset, avail);
                memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
-               kunmap_atomic(pageaddr, KM_USER0);
+               kunmap_atomic(pageaddr);
                flush_dcache_page(push_page);
                SetPageUptodate(push_page);
 skip_page:
@@ -484,9 +484,9 @@ skip_page:
 error_out:
        SetPageError(page);
 out:
-       pageaddr = kmap_atomic(page, KM_USER0);
+       pageaddr = kmap_atomic(page);
        memset(pageaddr, 0, PAGE_CACHE_SIZE);
-       kunmap_atomic(pageaddr, KM_USER0);
+       kunmap_atomic(pageaddr);
        flush_dcache_page(page);
        if (!PageError(page))
                SetPageUptodate(page);
index 1191817..12806df 100644 (file)
@@ -90,14 +90,14 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
                        goto error_out;
                }
 
-               pageaddr = kmap_atomic(page, KM_USER0);
+               pageaddr = kmap_atomic(page);
                copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
                                                                length - bytes);
                if (copied == length - bytes)
                        memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
                else
                        block = entry->next_index;
-               kunmap_atomic(pageaddr, KM_USER0);
+               kunmap_atomic(pageaddr);
                squashfs_cache_put(entry);
        }
 
index f9c234b..5c8f6dc 100644 (file)
@@ -1042,10 +1042,10 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
         * the page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
        flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        if (i_size > synced_i_size) {
                err = inode->i_sb->s_op->write_inode(inode, NULL);
index d567b84..7f3f7ba 100644 (file)
@@ -87,10 +87,10 @@ static int udf_adinicb_write_end(struct file *file,
        char *kaddr;
        struct udf_inode_info *iinfo = UDF_I(inode);
 
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
                kaddr + offset, copied);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        return simple_write_end(file, mapping, pos, len, copied, page, fsdata);
 }
index 4fd95a3..3744d2a 100644 (file)
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
 
-static inline enum km_type crypto_kmap_type(int out)
-{
-       enum km_type type;
-
-       if (in_softirq())
-               type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
-       else
-               type = out * (KM_USER1 - KM_USER0) + KM_USER0;
-
-       return type;
-}
-
-static inline void *crypto_kmap(struct page *page, int out)
-{
-       return kmap_atomic(page, crypto_kmap_type(out));
-}
-
-static inline void crypto_kunmap(void *vaddr, int out)
-{
-       kunmap_atomic(vaddr, crypto_kmap_type(out));
-}
-
 static inline void crypto_yield(u32 flags)
 {
        if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
@@ -121,15 +99,15 @@ static inline struct page *scatterwalk_page(struct scatter_walk *walk)
        return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
 }
 
-static inline void scatterwalk_unmap(void *vaddr, int out)
+static inline void scatterwalk_unmap(void *vaddr)
 {
-       crypto_kunmap(vaddr, out);
+       kunmap_atomic(vaddr);
 }
 
 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
 void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
                            size_t nbytes, int out);
-void *scatterwalk_map(struct scatter_walk *walk, int out);
+void *scatterwalk_map(struct scatter_walk *walk);
 void scatterwalk_done(struct scatter_walk *walk, int out, int more);
 
 void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
index 129a9c0..de5422a 100644 (file)
@@ -101,10 +101,10 @@ static inline int bio_has_allocated_vec(struct bio *bio)
  * I/O completely on that queue (see ide-dma for example)
  */
 #define __bio_kmap_atomic(bio, idx, kmtype)                            \
-       (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) +    \
+       (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) +    \
                bio_iovec_idx((bio), (idx))->bv_offset)
 
-#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
+#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr)
 
 /*
  * merge helpers etc
@@ -317,7 +317,7 @@ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
         * balancing is a lot nicer this way
         */
        local_irq_save(*flags);
-       addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
+       addr = (unsigned long) kmap_atomic(bvec->bv_page);
 
        BUG_ON(addr & ~PAGE_MASK);
 
@@ -328,7 +328,7 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 {
        unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
 
-       kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
+       kunmap_atomic((void *) ptr);
        local_irq_restore(*flags);
 }
 
index 3a93f73..6549ed7 100644 (file)
@@ -55,12 +55,12 @@ static inline void kunmap(struct page *page)
 {
 }
 
-static inline void *__kmap_atomic(struct page *page)
+static inline void *kmap_atomic(struct page *page)
 {
        pagefault_disable();
        return page_address(page);
 }
-#define kmap_atomic_prot(page, prot)   __kmap_atomic(page)
+#define kmap_atomic_prot(page, prot)   kmap_atomic(page)
 
 static inline void __kunmap_atomic(void *addr)
 {
@@ -109,27 +109,62 @@ static inline void kmap_atomic_idx_pop(void)
 #endif
 
 /*
- * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
+ * NOTE:
+ * kmap_atomic() and kunmap_atomic() with two arguments are deprecated.
+ * We only keep them for backward compatibility, any usage of them
+ * are now warned.
  */
-#define kmap_atomic(page, args...) __kmap_atomic(page)
+
+#define PASTE(a, b) a ## b
+#define PASTE2(a, b) PASTE(a, b)
+
+#define NARG_(_2, _1, n, ...) n
+#define NARG(...) NARG_(__VA_ARGS__, 2, 1, :)
+
+static inline void __deprecated *kmap_atomic_deprecated(struct page *page,
+                                                       enum km_type km)
+{
+       return kmap_atomic(page);
+}
+
+#define kmap_atomic1(...) kmap_atomic(__VA_ARGS__)
+#define kmap_atomic2(...) kmap_atomic_deprecated(__VA_ARGS__)
+#define kmap_atomic(...) PASTE2(kmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
+
+static inline void __deprecated __kunmap_atomic_deprecated(void *addr,
+                                                       enum km_type km)
+{
+       __kunmap_atomic(addr);
+}
 
 /*
  * Prevent people trying to call kunmap_atomic() as if it were kunmap()
  * kunmap_atomic() should get the return value of kmap_atomic, not the page.
  */
-#define kunmap_atomic(addr, args...)                           \
-do {                                                           \
-       BUILD_BUG_ON(__same_type((addr), struct page *));       \
-       __kunmap_atomic(addr);                                  \
+#define kunmap_atomic_deprecated(addr, km)                      \
+do {                                                            \
+       BUILD_BUG_ON(__same_type((addr), struct page *));       \
+       __kunmap_atomic_deprecated(addr, km);                   \
 } while (0)
 
+#define kunmap_atomic_withcheck(addr)                           \
+do {                                                            \
+       BUILD_BUG_ON(__same_type((addr), struct page *));       \
+       __kunmap_atomic(addr);                                  \
+} while (0)
+
+#define kunmap_atomic1(...) kunmap_atomic_withcheck(__VA_ARGS__)
+#define kunmap_atomic2(...) kunmap_atomic_deprecated(__VA_ARGS__)
+#define kunmap_atomic(...) PASTE2(kunmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
+/**** End of C pre-processor tricks for deprecated macros ****/
+
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-       void *addr = kmap_atomic(page, KM_USER0);
+       void *addr = kmap_atomic(page);
        clear_user_page(addr, vaddr, page);
-       kunmap_atomic(addr, KM_USER0);
+       kunmap_atomic(addr);
 }
 #endif
 
@@ -180,16 +215,16 @@ alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
 
 static inline void clear_highpage(struct page *page)
 {
-       void *kaddr = kmap_atomic(page, KM_USER0);
+       void *kaddr = kmap_atomic(page);
        clear_page(kaddr);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 
 static inline void zero_user_segments(struct page *page,
        unsigned start1, unsigned end1,
        unsigned start2, unsigned end2)
 {
-       void *kaddr = kmap_atomic(page, KM_USER0);
+       void *kaddr = kmap_atomic(page);
 
        BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
 
@@ -199,7 +234,7 @@ static inline void zero_user_segments(struct page *page,
        if (end2 > start2)
                memset(kaddr + start2, 0, end2 - start2);
 
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
        flush_dcache_page(page);
 }
 
@@ -228,11 +263,11 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
 {
        char *vfrom, *vto;
 
-       vfrom = kmap_atomic(from, KM_USER0);
-       vto = kmap_atomic(to, KM_USER1);
+       vfrom = kmap_atomic(from);
+       vto = kmap_atomic(to);
        copy_user_page(vto, vfrom, vaddr, to);
-       kunmap_atomic(vto, KM_USER1);
-       kunmap_atomic(vfrom, KM_USER0);
+       kunmap_atomic(vto);
+       kunmap_atomic(vfrom);
 }
 
 #endif
@@ -241,11 +276,11 @@ static inline void copy_highpage(struct page *to, struct page *from)
 {
        char *vfrom, *vto;
 
-       vfrom = kmap_atomic(from, KM_USER0);
-       vto = kmap_atomic(to, KM_USER1);
+       vfrom = kmap_atomic(from);
+       vto = kmap_atomic(to);
        copy_page(vto, vfrom);
-       kunmap_atomic(vto, KM_USER1);
-       kunmap_atomic(vfrom, KM_USER0);
+       kunmap_atomic(vto);
+       kunmap_atomic(vfrom);
 }
 
 #endif /* _LINUX_HIGHMEM_H */
index 7d6fb40..d35cc2d 100644 (file)
@@ -384,9 +384,9 @@ static int kdb_getphys(void *res, unsigned long addr, size_t size)
        if (!pfn_valid(pfn))
                return 1;
        page = pfn_to_page(pfn);
-       vaddr = kmap_atomic(page, KM_KDB);
+       vaddr = kmap_atomic(page);
        memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size);
-       kunmap_atomic(vaddr, KM_KDB);
+       kunmap_atomic(vaddr);
 
        return 0;
 }
index 6a768e5..3a564ac 100644 (file)
@@ -1000,20 +1000,20 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
        s_page = pfn_to_page(src_pfn);
        d_page = pfn_to_page(dst_pfn);
        if (PageHighMem(s_page)) {
-               src = kmap_atomic(s_page, KM_USER0);
-               dst = kmap_atomic(d_page, KM_USER1);
+               src = kmap_atomic(s_page);
+               dst = kmap_atomic(d_page);
                do_copy_page(dst, src);
-               kunmap_atomic(dst, KM_USER1);
-               kunmap_atomic(src, KM_USER0);
+               kunmap_atomic(dst);
+               kunmap_atomic(src);
        } else {
                if (PageHighMem(d_page)) {
                        /* Page pointed to by src may contain some kernel
                         * data modified by kmap_atomic()
                         */
                        safe_copy_page(buffer, s_page);
-                       dst = kmap_atomic(d_page, KM_USER0);
+                       dst = kmap_atomic(d_page);
                        copy_page(dst, buffer);
-                       kunmap_atomic(dst, KM_USER0);
+                       kunmap_atomic(dst);
                } else {
                        safe_copy_page(page_address(d_page), s_page);
                }
@@ -1728,9 +1728,9 @@ int snapshot_read_next(struct snapshot_handle *handle)
                         */
                        void *kaddr;
 
-                       kaddr = kmap_atomic(page, KM_USER0);
+                       kaddr = kmap_atomic(page);
                        copy_page(buffer, kaddr);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       kunmap_atomic(kaddr);
                        handle->buffer = buffer;
                } else {
                        handle->buffer = page_address(page);
@@ -2014,9 +2014,9 @@ static void copy_last_highmem_page(void)
        if (last_highmem_page) {
                void *dst;
 
-               dst = kmap_atomic(last_highmem_page, KM_USER0);
+               dst = kmap_atomic(last_highmem_page);
                copy_page(dst, buffer);
-               kunmap_atomic(dst, KM_USER0);
+               kunmap_atomic(dst);
                last_highmem_page = NULL;
        }
 }
@@ -2309,13 +2309,13 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
 {
        void *kaddr1, *kaddr2;
 
-       kaddr1 = kmap_atomic(p1, KM_USER0);
-       kaddr2 = kmap_atomic(p2, KM_USER1);
+       kaddr1 = kmap_atomic(p1);
+       kaddr2 = kmap_atomic(p2);
        copy_page(buf, kaddr1);
        copy_page(kaddr1, kaddr2);
        copy_page(kaddr2, buf);
-       kunmap_atomic(kaddr2, KM_USER1);
-       kunmap_atomic(kaddr1, KM_USER0);
+       kunmap_atomic(kaddr2);
+       kunmap_atomic(kaddr1);
 }
 
 /**
index 4ceb05d..33b2cbb 100644 (file)
@@ -390,7 +390,7 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
        miter->consumed = miter->length;
 
        if (miter->__flags & SG_MITER_ATOMIC)
-               miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
+               miter->addr = kmap_atomic(miter->page) + off;
        else
                miter->addr = kmap(miter->page) + off;
 
@@ -424,7 +424,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
 
                if (miter->__flags & SG_MITER_ATOMIC) {
                        WARN_ON(!irqs_disabled());
-                       kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
+                       kunmap_atomic(miter->addr);
                } else
                        kunmap(miter->page);
 
index 058935e..d0f6315 100644 (file)
@@ -349,13 +349,12 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
                        sz = min_t(size_t, PAGE_SIZE - offset, size);
 
                        local_irq_save(flags);
-                       buffer = kmap_atomic(pfn_to_page(pfn),
-                                            KM_BOUNCE_READ);
+                       buffer = kmap_atomic(pfn_to_page(pfn));
                        if (dir == DMA_TO_DEVICE)
                                memcpy(dma_addr, buffer + offset, sz);
                        else
                                memcpy(buffer + offset, dma_addr, sz);
-                       kunmap_atomic(buffer, KM_BOUNCE_READ);
+                       kunmap_atomic(buffer);
                        local_irq_restore(flags);
 
                        size -= sz;
index 4e9ae72..d1be02c 100644 (file)
@@ -50,9 +50,9 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
        unsigned char *vto;
 
        local_irq_save(flags);
-       vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
+       vto = kmap_atomic(to->bv_page);
        memcpy(vto + to->bv_offset, vfrom, to->bv_len);
-       kunmap_atomic(vto, KM_BOUNCE_READ);
+       kunmap_atomic(vto);
        local_irq_restore(flags);
 }
 
index b662757..2f81650 100644 (file)
@@ -1318,10 +1318,10 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
         * taking the kmap.
         */
        if (!fault_in_pages_writeable(desc->arg.buf, size)) {
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                left = __copy_to_user_inatomic(desc->arg.buf,
                                                kaddr + offset, size);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                if (left == 0)
                        goto success;
        }
@@ -2045,7 +2045,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
        size_t copied;
 
        BUG_ON(!in_atomic());
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
@@ -2055,7 +2055,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
                                                i->iov, i->iov_offset, bytes);
        }
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        return copied;
 }
index 310544a..a6d3fb7 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -672,9 +672,9 @@ error:
 static u32 calc_checksum(struct page *page)
 {
        u32 checksum;
-       void *addr = kmap_atomic(page, KM_USER0);
+       void *addr = kmap_atomic(page);
        checksum = jhash2(addr, PAGE_SIZE / 4, 17);
-       kunmap_atomic(addr, KM_USER0);
+       kunmap_atomic(addr);
        return checksum;
 }
 
@@ -683,11 +683,11 @@ static int memcmp_pages(struct page *page1, struct page *page2)
        char *addr1, *addr2;
        int ret;
 
-       addr1 = kmap_atomic(page1, KM_USER0);
-       addr2 = kmap_atomic(page2, KM_USER1);
+       addr1 = kmap_atomic(page1);
+       addr2 = kmap_atomic(page2);
        ret = memcmp(addr1, addr2, PAGE_SIZE);
-       kunmap_atomic(addr2, KM_USER1);
-       kunmap_atomic(addr1, KM_USER0);
+       kunmap_atomic(addr2);
+       kunmap_atomic(addr1);
        return ret;
 }
 
index fa2f04e..347e5fa 100644 (file)
@@ -2447,7 +2447,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
         * fails, we just zero-fill it. Live with it.
         */
        if (unlikely(!src)) {
-               void *kaddr = kmap_atomic(dst, KM_USER0);
+               void *kaddr = kmap_atomic(dst);
                void __user *uaddr = (void __user *)(va & PAGE_MASK);
 
                /*
@@ -2458,7 +2458,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
                 */
                if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
                        clear_page(kaddr);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                flush_dcache_page(dst);
        } else
                copy_user_highpage(dst, src, va, vma);
index 269d049..b7e1955 100644 (file)
@@ -1656,9 +1656,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
                }
                inode->i_mapping->a_ops = &shmem_aops;
                inode->i_op = &shmem_symlink_inode_operations;
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                memcpy(kaddr, symname, len);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                set_page_dirty(page);
                unlock_page(page);
                page_cache_release(page);
index d999f09..00a962c 100644 (file)
@@ -2427,9 +2427,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
                if (!(count & COUNT_CONTINUED))
                        goto out;
 
-               map = kmap_atomic(list_page, KM_USER0) + offset;
+               map = kmap_atomic(list_page) + offset;
                count = *map;
-               kunmap_atomic(map, KM_USER0);
+               kunmap_atomic(map);
 
                /*
                 * If this continuation count now has some space in it,
@@ -2472,7 +2472,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
 
        offset &= ~PAGE_MASK;
        page = list_entry(head->lru.next, struct page, lru);
-       map = kmap_atomic(page, KM_USER0) + offset;
+       map = kmap_atomic(page) + offset;
 
        if (count == SWAP_MAP_MAX)      /* initial increment from swap_map */
                goto init_map;          /* jump over SWAP_CONT_MAX checks */
@@ -2482,26 +2482,26 @@ static bool swap_count_continued(struct swap_info_struct *si,
                 * Think of how you add 1 to 999
                 */
                while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.next, struct page, lru);
                        BUG_ON(page == head);
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                }
                if (*map == SWAP_CONT_MAX) {
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.next, struct page, lru);
                        if (page == head)
                                return false;   /* add count continuation */
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
 init_map:              *map = 0;               /* we didn't zero the page */
                }
                *map += 1;
-               kunmap_atomic(map, KM_USER0);
+               kunmap_atomic(map);
                page = list_entry(page->lru.prev, struct page, lru);
                while (page != head) {
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                        *map = COUNT_CONTINUED;
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.prev, struct page, lru);
                }
                return true;                    /* incremented */
@@ -2512,22 +2512,22 @@ init_map:               *map = 0;               /* we didn't zero the page */
                 */
                BUG_ON(count != COUNT_CONTINUED);
                while (*map == COUNT_CONTINUED) {
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.next, struct page, lru);
                        BUG_ON(page == head);
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                }
                BUG_ON(*map == 0);
                *map -= 1;
                if (*map == 0)
                        count = 0;
-               kunmap_atomic(map, KM_USER0);
+               kunmap_atomic(map);
                page = list_entry(page->lru.prev, struct page, lru);
                while (page != head) {
-                       map = kmap_atomic(page, KM_USER0) + offset;
+                       map = kmap_atomic(page) + offset;
                        *map = SWAP_CONT_MAX | count;
                        count = COUNT_CONTINUED;
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                        page = list_entry(page->lru.prev, struct page, lru);
                }
                return count == COUNT_CONTINUED;
index 86ce9a5..94dff88 100644 (file)
@@ -1906,9 +1906,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
                         * we can expect USER0 is not used (see vread/vwrite's
                         * function description)
                         */
-                       void *map = kmap_atomic(p, KM_USER0);
+                       void *map = kmap_atomic(p);
                        memcpy(buf, map + offset, length);
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                } else
                        memset(buf, 0, length);
 
@@ -1945,9 +1945,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
                         * we can expect USER0 is not used (see vread/vwrite's
                         * function description)
                         */
-                       void *map = kmap_atomic(p, KM_USER0);
+                       void *map = kmap_atomic(p);
                        memcpy(map + offset, buf, length);
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                }
                addr += length;
                buf += length;
index 81e1ed7..52d0a44 100644 (file)
@@ -7,12 +7,12 @@ static inline void *kmap_skb_frag(const skb_frag_t *frag)
 
        local_bh_disable();
 #endif
-       return kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ);
+       return kmap_atomic(skb_frag_page(frag));
 }
 
 static inline void kunmap_skb_frag(void *vaddr)
 {
-       kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+       kunmap_atomic(vaddr);
 #ifdef CONFIG_HIGHMEM
        local_bh_enable();
 #endif
index 8d6d970..8d19491 100644 (file)
@@ -763,7 +763,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
                to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
                BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
 
-               addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0);
+               addr = kmap_atomic(sg_page(&frag->f_sg));
 
                src = addr + frag_off;
                dst = (void *)map->m_page_addrs[map_page] + map_off;
@@ -773,7 +773,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
                        uncongested |= ~(*src) & *dst;
                        *dst++ = *src++;
                }
-               kunmap_atomic(addr, KM_SOFTIRQ0);
+               kunmap_atomic(addr);
 
                copied += to_copy;
 
@@ -919,8 +919,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
                        rds_ib_cong_recv(conn, ibinc);
                else {
                        rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
-                                         &ibinc->ii_inc, GFP_ATOMIC,
-                                         KM_SOFTIRQ0);
+                                         &ibinc->ii_inc, GFP_ATOMIC);
                        state->ack_next = be64_to_cpu(hdr->h_sequence);
                        state->ack_next_valid = 1;
                }
index f1c016c..9a6b4f6 100644 (file)
@@ -104,7 +104,7 @@ EXPORT_SYMBOL_GPL(rds_info_deregister_func);
 void rds_info_iter_unmap(struct rds_info_iterator *iter)
 {
        if (iter->addr) {
-               kunmap_atomic(iter->addr, KM_USER0);
+               kunmap_atomic(iter->addr);
                iter->addr = NULL;
        }
 }
@@ -119,7 +119,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
 
        while (bytes) {
                if (!iter->addr)
-                       iter->addr = kmap_atomic(*iter->pages, KM_USER0);
+                       iter->addr = kmap_atomic(*iter->pages);
 
                this = min(bytes, PAGE_SIZE - iter->offset);
 
@@ -134,7 +134,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
                iter->offset += this;
 
                if (iter->offset == PAGE_SIZE) {
-                       kunmap_atomic(iter->addr, KM_USER0);
+                       kunmap_atomic(iter->addr);
                        iter->addr = NULL;
                        iter->offset = 0;
                        iter->pages++;
index 3c87170..4503335 100644 (file)
@@ -598,7 +598,7 @@ static void rds_iw_cong_recv(struct rds_connection *conn,
                to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
                BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
 
-               addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0);
+               addr = kmap_atomic(frag->f_page);
 
                src = addr + frag_off;
                dst = (void *)map->m_page_addrs[map_page] + map_off;
@@ -608,7 +608,7 @@ static void rds_iw_cong_recv(struct rds_connection *conn,
                        uncongested |= ~(*src) & *dst;
                        *dst++ = *src++;
                }
-               kunmap_atomic(addr, KM_SOFTIRQ0);
+               kunmap_atomic(addr);
 
                copied += to_copy;
 
@@ -754,8 +754,7 @@ static void rds_iw_process_recv(struct rds_connection *conn,
                        rds_iw_cong_recv(conn, iwinc);
                else {
                        rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
-                                         &iwinc->ii_inc, GFP_ATOMIC,
-                                         KM_SOFTIRQ0);
+                                         &iwinc->ii_inc, GFP_ATOMIC);
                        state->ack_next = be64_to_cpu(hdr->h_sequence);
                        state->ack_next_valid = 1;
                }
index bca6761..87ff2a8 100644 (file)
@@ -79,7 +79,7 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
        rds_message_addref(rm);
 
        rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
-                         GFP_KERNEL, KM_USER0);
+                         GFP_KERNEL);
 
        rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence),
                            NULL);
index 7eaba18..ec1d731 100644 (file)
@@ -704,7 +704,7 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
                  __be32 saddr);
 void rds_inc_put(struct rds_incoming *inc);
 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
-                      struct rds_incoming *inc, gfp_t gfp, enum km_type km);
+                      struct rds_incoming *inc, gfp_t gfp);
 int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
                size_t size, int msg_flags);
 void rds_clear_recv_queue(struct rds_sock *rs);
index bc3f8cd..5c6e9f1 100644 (file)
@@ -155,7 +155,7 @@ static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock
  * tell us which roles the addrs in the conn are playing for this message.
  */
 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
-                      struct rds_incoming *inc, gfp_t gfp, enum km_type km)
+                      struct rds_incoming *inc, gfp_t gfp)
 {
        struct rds_sock *rs = NULL;
        struct sock *sk;
index 78205e2..6243258 100644 (file)
@@ -169,7 +169,6 @@ static void rds_tcp_cong_recv(struct rds_connection *conn,
 struct rds_tcp_desc_arg {
        struct rds_connection *conn;
        gfp_t gfp;
-       enum km_type km;
 };
 
 static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
@@ -255,7 +254,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
                        else
                                rds_recv_incoming(conn, conn->c_faddr,
                                                  conn->c_laddr, &tinc->ti_inc,
-                                                 arg->gfp, arg->km);
+                                                 arg->gfp);
 
                        tc->t_tinc_hdr_rem = sizeof(struct rds_header);
                        tc->t_tinc_data_rem = 0;
@@ -272,8 +271,7 @@ out:
 }
 
 /* the caller has to hold the sock lock */
-static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp,
-                            enum km_type km)
+static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp)
 {
        struct rds_tcp_connection *tc = conn->c_transport_data;
        struct socket *sock = tc->t_sock;
@@ -283,7 +281,6 @@ static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp,
        /* It's like glib in the kernel! */
        arg.conn = conn;
        arg.gfp = gfp;
-       arg.km = km;
        desc.arg.data = &arg;
        desc.error = 0;
        desc.count = 1; /* give more than one skb per call */
@@ -311,7 +308,7 @@ int rds_tcp_recv(struct rds_connection *conn)
        rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock);
 
        lock_sock(sock->sk);
-       ret = rds_tcp_read_sock(conn, GFP_KERNEL, KM_USER0);
+       ret = rds_tcp_read_sock(conn, GFP_KERNEL);
        release_sock(sock->sk);
 
        return ret;
@@ -336,7 +333,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
        ready = tc->t_orig_data_ready;
        rds_tcp_stats_inc(s_tcp_data_ready_calls);
 
-       if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
+       if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
                queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 out:
        read_unlock_bh(&sk->sk_callback_lock);
index 2763e3e..38f388c 100644 (file)
@@ -82,9 +82,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
                                        >>PAGE_CACHE_SHIFT;
                unsigned int offset = (buf->page_base + len - 1)
                                        & (PAGE_CACHE_SIZE - 1);
-               ptr = kmap_atomic(buf->pages[last], KM_USER0);
+               ptr = kmap_atomic(buf->pages[last]);
                pad = *(ptr + offset);
-               kunmap_atomic(ptr, KM_USER0);
+               kunmap_atomic(ptr);
                goto out;
        } else
                len -= buf->page_len;
index 145e678..0a648c5 100644 (file)
@@ -114,7 +114,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
                }
 
                len = PAGE_CACHE_SIZE;
-               kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
+               kaddr = kmap_atomic(*ppage);
                if (base) {
                        len -= base;
                        if (pglen < len)
@@ -127,7 +127,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
                        ret = copy_actor(desc, kaddr, len);
                }
                flush_dcache_page(*ppage);
-               kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
+               kunmap_atomic(kaddr);
                copied += ret;
                if (ret != len || !desc->count)
                        goto out;
index 593f4c6..b97a3dd 100644 (file)
@@ -122,9 +122,9 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
 {
        char *kaddr;
 
-       kaddr = kmap_atomic(buf->pages[0], KM_USER0);
+       kaddr = kmap_atomic(buf->pages[0]);
        kaddr[buf->page_base + len] = '\0';
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 }
 EXPORT_SYMBOL_GPL(xdr_terminate_string);
 
@@ -232,12 +232,12 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
                pgto_base -= copy;
                pgfrom_base -= copy;
 
-               vto = kmap_atomic(*pgto, KM_USER0);
-               vfrom = kmap_atomic(*pgfrom, KM_USER1);
+               vto = kmap_atomic(*pgto);
+               vfrom = kmap_atomic(*pgfrom);
                memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
                flush_dcache_page(*pgto);
-               kunmap_atomic(vfrom, KM_USER1);
-               kunmap_atomic(vto, KM_USER0);
+               kunmap_atomic(vfrom);
+               kunmap_atomic(vto);
 
        } while ((len -= copy) != 0);
 }
@@ -267,9 +267,9 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
                if (copy > len)
                        copy = len;
 
-               vto = kmap_atomic(*pgto, KM_USER0);
+               vto = kmap_atomic(*pgto);
                memcpy(vto + pgbase, p, copy);
-               kunmap_atomic(vto, KM_USER0);
+               kunmap_atomic(vto);
 
                len -= copy;
                if (len == 0)
@@ -311,9 +311,9 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
                if (copy > len)
                        copy = len;
 
-               vfrom = kmap_atomic(*pgfrom, KM_USER0);
+               vfrom = kmap_atomic(*pgfrom);
                memcpy(p, vfrom + pgbase, copy);
-               kunmap_atomic(vfrom, KM_USER0);
+               kunmap_atomic(vfrom);
 
                pgbase += copy;
                if (pgbase == PAGE_CACHE_SIZE) {
index 554d081..1776e57 100644 (file)
@@ -338,9 +338,9 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
                        curlen = copy_len;
                dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
                        __func__, i, destp, copy_len, curlen);
-               srcp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
+               srcp = kmap_atomic(ppages[i]);
                memcpy(destp, srcp+page_base, curlen);
-               kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
+               kunmap_atomic(srcp);
                rqst->rq_svec[0].iov_len += curlen;
                destp += curlen;
                copy_len -= curlen;
@@ -639,10 +639,10 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
                        dprintk("RPC:       %s: page %d"
                                " srcp 0x%p len %d curlen %d\n",
                                __func__, i, srcp, copy_len, curlen);
-                       destp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
+                       destp = kmap_atomic(ppages[i]);
                        memcpy(destp + page_base, srcp, curlen);
                        flush_dcache_page(ppages[i]);
-                       kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
+                       kunmap_atomic(destp);
                        srcp += curlen;
                        copy_len -= curlen;
                        if (copy_len == 0)
index 9027ac1..3865145 100644 (file)
@@ -886,12 +886,12 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
                 * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic().
                 * So do I.
                 */
-               char *kaddr = kmap_atomic(page, KM_USER0);
+               char *kaddr = kmap_atomic(page);
 
                dump->page = page;
                memcpy(dump->data + offset, kaddr + offset,
                       PAGE_SIZE - offset);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
        }
        /* Same with put_arg_page(page) in fs/exec.c */
 #ifdef CONFIG_MMU