module: Sort exported symbols
[linux-flexiantxendom0.git] / include / asm-generic / vmlinux.lds.h
index 853aa87..b27445e 100644 (file)
@@ -15,7 +15,7 @@
  *     HEAD_TEXT_SECTION
  *     INIT_TEXT_SECTION(PAGE_SIZE)
  *     INIT_DATA_SECTION(...)
- *     PERCPU(PAGE_SIZE)
+ *     PERCPU(CACHELINE_SIZE, PAGE_SIZE)
  *     __init_end = .;
  *
  *     _stext = .;
@@ -67,7 +67,8 @@
  * Align to a 32 byte boundary equal to the
  * alignment gcc 4.5 uses for a struct
  */
-#define STRUCT_ALIGN() . = ALIGN(32)
+#define STRUCT_ALIGNMENT 32
+#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
 
 /* The actual configuration determine if the init/exit sections
  * are handled as text/data or they can be discarded (which
 #endif
 
 #ifdef CONFIG_EVENT_TRACING
-#define FTRACE_EVENTS()        VMLINUX_SYMBOL(__start_ftrace_events) = .;      \
+#define FTRACE_EVENTS()        . = ALIGN(8);                                   \
+                       VMLINUX_SYMBOL(__start_ftrace_events) = .;      \
                        *(_ftrace_events)                               \
                        VMLINUX_SYMBOL(__stop_ftrace_events) = .;
 #else
 #endif
 
 #ifdef CONFIG_FTRACE_SYSCALLS
-#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .;        \
+#define TRACE_SYSCALLS() . = ALIGN(8);                                 \
+                        VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
                         *(__syscalls_metadata)                         \
                         VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
 #else
 #define TRACE_SYSCALLS()
 #endif
 
+
+#define KERNEL_DTB()                                                   \
+       STRUCT_ALIGN();                                                 \
+       VMLINUX_SYMBOL(__dtb_start) = .;                                \
+       *(.dtb.init.rodata)                                             \
+       VMLINUX_SYMBOL(__dtb_end) = .;
+
 /* .data section */
 #define DATA_DATA                                                      \
        *(.data)                                                        \
        *(.ref.data)                                                    \
+       *(.data..shared_aligned) /* percpu related */                   \
        DEV_KEEP(init.data)                                             \
        DEV_KEEP(exit.data)                                             \
        CPU_KEEP(init.data)                                             \
        CPU_KEEP(exit.data)                                             \
        MEM_KEEP(init.data)                                             \
        MEM_KEEP(exit.data)                                             \
-       . = ALIGN(32);                                                  \
-       VMLINUX_SYMBOL(__start___tracepoints) = .;                      \
+       STRUCT_ALIGN();                                                 \
        *(__tracepoints)                                                \
-       VMLINUX_SYMBOL(__stop___tracepoints) = .;                       \
        /* implement dynamic printk debug */                            \
        . = ALIGN(8);                                                   \
        VMLINUX_SYMBOL(__start___verbose) = .;                          \
        VMLINUX_SYMBOL(__stop___verbose) = .;                           \
        LIKELY_PROFILE()                                                \
        BRANCH_PROFILE()                                                \
-       TRACE_PRINTKS()                                                 \
-                                                                       \
-       STRUCT_ALIGN();                                                 \
-       FTRACE_EVENTS()                                                 \
-                                                                       \
-       STRUCT_ALIGN();                                                 \
-       TRACE_SYSCALLS()
+       TRACE_PRINTKS()
 
 /*
  * Data section helpers
 
 #define READ_MOSTLY_DATA(align)                                                \
        . = ALIGN(align);                                               \
-       *(.data..read_mostly)
+       *(.data..read_mostly)                                           \
+       . = ALIGN(align);
 
 #define CACHELINE_ALIGNED_DATA(align)                                  \
        . = ALIGN(align);                                               \
                VMLINUX_SYMBOL(__start_rodata) = .;                     \
                *(.rodata) *(.rodata.*)                                 \
                *(__vermagic)           /* Kernel version magic */      \
+               . = ALIGN(8);                                           \
+               VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;         \
+               *(__tracepoints_ptrs)   /* Tracepoints: pointer array */\
+               VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .;          \
                *(__markers_strings)    /* Markers: strings */          \
                *(__tracepoints_strings)/* Tracepoints: strings */      \
        }                                                               \
                                                                        \
        BUG_TABLE                                                       \
                                                                        \
+       JUMP_TABLE                                                      \
+                                                                       \
        /* PCI quirks */                                                \
        .pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {        \
                VMLINUX_SYMBOL(__start_pci_fixups_early) = .;           \
        /* Kernel symbol table: Normal symbols */                       \
        __ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {         \
                VMLINUX_SYMBOL(__start___ksymtab) = .;                  \
-               *(__ksymtab)                                            \
+               *(SORT(___ksymtab+*))                                   \
                VMLINUX_SYMBOL(__stop___ksymtab) = .;                   \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-only symbols */                     \
        __ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {     \
                VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;              \
-               *(__ksymtab_gpl)                                        \
+               *(SORT(___ksymtab_gpl+*))                               \
                VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;               \
        }                                                               \
                                                                        \
        /* Kernel symbol table: Normal unused symbols */                \
        __ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {  \
                VMLINUX_SYMBOL(__start___ksymtab_unused) = .;           \
-               *(__ksymtab_unused)                                     \
+               *(SORT(___ksymtab_unused+*))                            \
                VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;            \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-only unused symbols */              \
        __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
                VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;       \
-               *(__ksymtab_unused_gpl)                                 \
+               *(SORT(___ksymtab_unused_gpl+*))                        \
                VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;        \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-future-only symbols */              \
        __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
                VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;       \
-               *(__ksymtab_gpl_future)                                 \
+               *(SORT(___ksymtab_gpl_future+*))                        \
                VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;        \
        }                                                               \
                                                                        \
        /* Kernel symbol table: Normal symbols */                       \
        __kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {         \
                VMLINUX_SYMBOL(__start___kcrctab) = .;                  \
-               *(__kcrctab)                                            \
+               *(SORT(___kcrctab+*))                                   \
                VMLINUX_SYMBOL(__stop___kcrctab) = .;                   \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-only symbols */                     \
        __kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {     \
                VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;              \
-               *(__kcrctab_gpl)                                        \
+               *(SORT(___kcrctab_gpl+*))                               \
                VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;               \
        }                                                               \
                                                                        \
        /* Kernel symbol table: Normal unused symbols */                \
        __kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {  \
                VMLINUX_SYMBOL(__start___kcrctab_unused) = .;           \
-               *(__kcrctab_unused)                                     \
+               *(SORT(___kcrctab_unused+*))                            \
                VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;            \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-only unused symbols */              \
        __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
                VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;       \
-               *(__kcrctab_unused_gpl)                                 \
+               *(SORT(___kcrctab_unused_gpl+*))                        \
                VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;        \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-future-only symbols */              \
        __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
                VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;       \
-               *(__kcrctab_gpl_future)                                 \
+               *(SORT(___kcrctab_gpl_future+*))                        \
                VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;        \
        }                                                               \
                                                                        \
                VMLINUX_SYMBOL(__start___param) = .;                    \
                *(__param)                                              \
                VMLINUX_SYMBOL(__stop___param) = .;                     \
+       }                                                               \
+                                                                       \
+       /* Built-in module versions. */                                 \
+       __modver : AT(ADDR(__modver) - LOAD_OFFSET) {                   \
+               VMLINUX_SYMBOL(__start___modver) = .;                   \
+               *(__modver)                                             \
+               VMLINUX_SYMBOL(__stop___modver) = .;                    \
                . = ALIGN((align));                                     \
                VMLINUX_SYMBOL(__end_rodata) = .;                       \
        }                                                               \
                *(.kprobes.text)                                        \
                VMLINUX_SYMBOL(__kprobes_text_end) = .;
 
+#define ENTRY_TEXT                                                     \
+               ALIGN_FUNCTION();                                       \
+               VMLINUX_SYMBOL(__entry_text_start) = .;                 \
+               *(.entry.text)                                          \
+               VMLINUX_SYMBOL(__entry_text_end) = .;
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #define IRQENTRY_TEXT                                                  \
                ALIGN_FUNCTION();                                       \
        KERNEL_CTORS()                                                  \
        *(.init.rodata)                                                 \
        MCOUNT_REC()                                                    \
+       FTRACE_EVENTS()                                                 \
+       TRACE_SYSCALLS()                                                \
        DEV_DISCARD(init.rodata)                                        \
        CPU_DISCARD(init.rodata)                                        \
-       MEM_DISCARD(init.rodata)
+       MEM_DISCARD(init.rodata)                                        \
+       KERNEL_DTB()
 
 #define INIT_TEXT                                                      \
        *(.init.text)                                                   \
 #define BUG_TABLE
 #endif
 
+#define JUMP_TABLE                                                     \
+       . = ALIGN(8);                                                   \
+       __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) {           \
+               VMLINUX_SYMBOL(__start___jump_table) = .;               \
+               *(__jump_table)                                         \
+               VMLINUX_SYMBOL(__stop___jump_table) = .;                \
+       }
+
 #ifdef CONFIG_PM_TRACE
 #define TRACEDATA                                                      \
        . = ALIGN(4);                                                   \
 
 #ifdef CONFIG_BLK_DEV_INITRD
 #define INIT_RAM_FS                                                    \
-       . = ALIGN(PAGE_SIZE);                                           \
+       . = ALIGN(4);                                                   \
        VMLINUX_SYMBOL(__initramfs_start) = .;                          \
        *(.init.ramfs)                                                  \
-       VMLINUX_SYMBOL(__initramfs_end) = .;
+       . = ALIGN(8);                                                   \
+       *(.init.ramfs.info)
 #else
 #define INIT_RAM_FS
 #endif
        EXIT_DATA                                                       \
        EXIT_CALL                                                       \
        *(.discard)                                                     \
+       *(.discard.*)                                                   \
        }
 
 /**
  * PERCPU_VADDR - define output section for percpu area
+ * @cacheline: cacheline size
  * @vaddr: explicit base address (optional)
  * @phdr: destination PHDR (optional)
  *
- * Macro which expands to output section for percpu area.  If @vaddr
- * is not blank, it specifies explicit base address and all percpu
- * symbols will be offset from the given address.  If blank, @vaddr
- * always equals @laddr + LOAD_OFFSET.
+ * Macro which expands to output section for percpu area.
+ *
+ * @cacheline is used to align subsections to avoid false cacheline
+ * sharing between subsections for different purposes.
+ *
+ * If @vaddr is not blank, it specifies explicit base address and all
+ * percpu symbols will be offset from the given address.  If blank,
+ * @vaddr always equals @laddr + LOAD_OFFSET.
  *
  * @phdr defines the output PHDR to use if not blank.  Be warned that
  * output PHDR is sticky.  If @phdr is specified, the next output
  * If there is no need to put the percpu section at a predetermined
  * address, use PERCPU().
  */
-#define PERCPU_VADDR(vaddr, phdr)                                      \
+#define PERCPU_VADDR(cacheline, vaddr, phdr)                           \
        VMLINUX_SYMBOL(__per_cpu_load) = .;                             \
        .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)         \
                                - LOAD_OFFSET) {                        \
                VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
                *(.data..percpu..first)                                 \
+               . = ALIGN(PAGE_SIZE);                                   \
                *(.data..percpu..page_aligned)                          \
+               . = ALIGN(cacheline);                                   \
+               *(.data..percpu..readmostly)                            \
+               . = ALIGN(cacheline);                                   \
                *(.data..percpu)                                        \
                *(.data..percpu..shared_aligned)                        \
                VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
 
 /**
  * PERCPU - define output section for percpu area, simple version
+ * @cacheline: cacheline size
  * @align: required alignment
  *
- * Align to @align and outputs output section for percpu area.  This
- * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
+ * Align to @align and outputs output section for percpu area.  This macro
+ * doesn't manipulate @vaddr or @phdr and __per_cpu_load and
  * __per_cpu_start will be identical.
  *
- * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
- * that __per_cpu_load is defined as a relative symbol against
- * .data..percpu which is required for relocatable x86_32
- * configuration.
+ * This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,)
+ * except that __per_cpu_load is defined as a relative symbol against
+ * .data..percpu which is required for relocatable x86_32 configuration.
  */
-#define PERCPU(align)                                                  \
+#define PERCPU(cacheline, align)                                       \
        . = ALIGN(align);                                               \
        .data..percpu   : AT(ADDR(.data..percpu) - LOAD_OFFSET) {       \
                VMLINUX_SYMBOL(__per_cpu_load) = .;                     \
                VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
                *(.data..percpu..first)                                 \
+               . = ALIGN(PAGE_SIZE);                                   \
                *(.data..percpu..page_aligned)                          \
+               . = ALIGN(cacheline);                                   \
+               *(.data..percpu..readmostly)                            \
+               . = ALIGN(cacheline);                                   \
                *(.data..percpu)                                        \
                *(.data..percpu..shared_aligned)                        \
                VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
  * the sections that has this restriction (or similar)
  * is located before the ones requiring PAGE_SIZE alignment.
  * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
- * matches the requirment of PAGE_ALIGNED_DATA.
+ * matches the requirement of PAGE_ALIGNED_DATA.
  *
  * use 0 as page_align if page_aligned data is not used */
 #define RW_DATA_SECTION(cacheline, pagealigned, inittask)              \