#endif
__u8 x86_virt_bits;
__u8 x86_phys_bits;
+#ifndef CONFIG_XEN
/* CPUID returned core id bits: */
__u8 x86_coreid_bits;
+#endif
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level;
/* Maximum supported CPUID level, -1=no CPUID: */
int x86_cache_alignment; /* In bytes */
int x86_power;
unsigned long loops_per_jiffy;
-#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
+#ifndef CONFIG_XEN
+#ifdef CONFIG_SMP
/* cpus sharing the last level cache: */
cpumask_var_t llc_shared_map;
#endif
u16 x86_max_cores;
u16 apicid;
u16 initial_apicid;
+#endif
u16 x86_clflush_size;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
/* number of cores as seen by the OS: */
u16 booted_cores;
/* Physical processor id: */
u16 phys_proc_id;
/* Core id: */
u16 cpu_core_id;
+#endif
+#ifdef CONFIG_SMP
/* Index into per_cpu list: */
u16 cpu_index;
#endif
#include <linux/thread_info.h>
#include <asm/cpumask.h>
-extern int smp_num_siblings;
extern unsigned int num_processors;
-DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
DECLARE_PER_CPU(int, cpu_number);
-static inline struct cpumask *cpu_sibling_mask(int cpu)
+static inline const struct cpumask *cpu_sibling_mask(int cpu)
{
- return per_cpu(cpu_sibling_map, cpu);
+ return cpumask_of(cpu);
}
-static inline struct cpumask *cpu_core_mask(int cpu)
+static inline const struct cpumask *cpu_core_mask(int cpu)
{
- return per_cpu(cpu_core_map, cpu);
+ return cpumask_of(cpu);
}
DECLARE_PER_CPU(u16, x86_cpu_to_apicid);
/* Get dependency relationships */
if (cx->csd_count) {
- printk("Wow! _CSD is found. Not support for now!\n");
+ pr_warning("_CSD found: Not supported for now!\n");
kfree(buf);
return -EINVAL;
} else {
}
if (!count) {
- printk("No available Cx info for cpu %d\n", pr->acpi_id);
+ pr_info("No available Cx info for cpu %d\n", pr->acpi_id);
kfree(buf);
return -EINVAL;
}
.hotplug = xen_hotplug_notifier,
};
-void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **ops)
+static int __init init_extcntl(void)
{
unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
if (!pmbits)
- return;
+ return 0;
if (pmbits & XEN_PROCESSOR_PM_CX)
xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier;
if (pmbits & XEN_PROCESSOR_PM_PX)
if (pmbits & XEN_PROCESSOR_PM_TX)
xen_extcntl_ops.pm_ops[PM_TYPE_THR] = xen_tx_notifier;
- *ops = &xen_extcntl_ops;
+ processor_extcntl_ops = &xen_extcntl_ops;
+
+ return 0;
}
-EXPORT_SYMBOL(arch_acpi_processor_init_extcntl);
+arch_initcall(init_extcntl);
unsigned int cpufreq_quick_get(unsigned int cpu)
{
old_cfg = old_desc->chip_data;
- memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
+ cfg->vector = old_cfg->vector;
+ cfg->move_in_progress = old_cfg->move_in_progress;
+ cpumask_copy(cfg->domain, old_cfg->domain);
+ cpumask_copy(cfg->old_domain, old_cfg->old_domain);
init_copy_irq_2_pin(old_cfg, cfg, node);
}
-static void free_irq_cfg(struct irq_cfg *old_cfg)
+static void free_irq_cfg(struct irq_cfg *cfg)
{
- kfree(old_cfg);
+ free_cpumask_var(cfg->domain);
+ free_cpumask_var(cfg->old_domain);
+ kfree(cfg);
}
void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
u64 val;
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
+#ifndef CONFIG_XEN
if (!rdmsrl_amd_safe(0xc001100d, &val)) {
val &= ~(1ULL << 32);
wrmsrl_amd_safe(0xc001100d, val);
}
+#else
+ pr_warning("Long-mode LAHF feature wrongly enabled -"
+ "hypervisor update needed\n");
+ (void)&val;
+#endif
}
}
#ifndef CONFIG_XEN
cpumask_var_t cpu_callout_mask;
cpumask_var_t cpu_callin_mask;
-#endif
/* representing cpus for which sibling maps can be computed */
cpumask_var_t cpu_sibling_setup_mask;
+#endif
/* correctly size the local cpu masks */
void __init setup_cpu_local_masks(void)
#ifndef CONFIG_XEN
alloc_bootmem_cpumask_var(&cpu_callin_mask);
alloc_bootmem_cpumask_var(&cpu_callout_mask);
-#endif
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
+#endif
}
static void __cpuinit default_init(struct cpuinfo_x86 *c)
}
}
-static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
+void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
u32 ebx;
get_cpu_cap(c);
+#ifndef CONFIG_XEN
if (c->cpuid_level >= 0x00000001) {
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
-#if defined(CONFIG_X86_32) && !defined(CONFIG_XEN)
+#ifdef CONFIG_X86_32
# ifdef CONFIG_X86_HT
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
# else
c->phys_proc_id = c->initial_apicid;
#endif
}
+#endif
get_model_name(c); /* Default name */
c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
+#ifndef CONFIG_XEN
c->x86_max_cores = 1;
c->x86_coreid_bits = 0;
+#endif
#ifdef CONFIG_X86_64
c->x86_clflush_size = 64;
c->x86_phys_bits = 36;
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
+#ifndef CONFIG_XEN
misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
c->cpuid_level = cpuid_eax(0);
get_cpu_cap(c);
+#else
+ pr_warning("CPUID levels are restricted -"
+ " update hypervisor\n");
+#endif
}
}
* need the microcode to have already been loaded... so if it is
* not, recommend a BIOS update and disable large pages.
*/
+#ifdef CONFIG_XEN
+ if (cpu_has(c, X86_FEATURE_PSE))
+#endif
if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) {
u32 ucode, junk;
rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
+#ifndef CONFIG_XEN
printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
+#else
+ pr_warning("CPU: Hypervisor update needed\n");
+#endif
}
}
#endif
}
+#ifndef CONFIG_XEN
/*
* find out the number of processor cores on the die
*/
else
return 1;
}
+#endif
static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_P3);
#endif
+#ifndef CONFIG_XEN
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
detect_ht(c);
#endif
}
+#endif
/* Work around errata */
srat_detect_node(c);
eax->split.type = types[leaf];
eax->split.level = levels[leaf];
eax->split.num_threads_sharing = 0;
+#ifndef CONFIG_XEN
eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
-
+#endif
if (assoc == 0xffff)
eax->split.is_fully_associative = 1;
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
- unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
#ifdef CONFIG_X86_HT
+ unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
unsigned int cpu = c->cpu_index;
#endif
break;
case 2:
new_l2 = this_leaf.size/1024;
+#ifdef CONFIG_X86_HT
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
index_msb = get_count_order(num_threads_sharing);
l2_id = c->apicid >> index_msb;
+#endif
break;
case 3:
new_l3 = this_leaf.size/1024;
+#ifdef CONFIG_X86_HT
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
index_msb = get_count_order(
num_threads_sharing);
l3_id = c->apicid >> index_msb;
+#endif
break;
default:
break;
m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
m->cpuid = cpuid_eax(1);
+#ifndef CONFIG_XEN
#ifdef CONFIG_SMP
m->socketid = cpu_data(m->extcpu).phys_proc_id;
#endif
m->apicid = cpu_data(m->extcpu).initial_apicid;
+#endif
rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
}
x86_mcinfo_lookup(mic, mi, MC_TYPE_GLOBAL);
if (mic == NULL)
{
- printk(KERN_ERR "DOM0_MCE_LOG: global data is NULL\n");
+ pr_err("DOM0_MCE_LOG: global data is NULL\n");
return -1;
}
break;
}
WARN_ON_ONCE(!found);
- m.socketid = g_physinfo[i].mc_chipid;
+ m.socketid = mc_global->mc_socketid;
m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
m.cpuvendor = (__u8)g_physinfo[i].mc_vendor;
{
result = convert_log(g_mi);
if (result) {
- printk(KERN_ERR "MCE_DOM0_LOG: Log conversion failed\n");
+ pr_err("MCE_DOM0_LOG: Log conversion failed\n");
goto end;
}
/* After fetching the telem from DOM0, we need to dec the telem's
{
result = convert_log(g_mi);
if (result) {
- printk(KERN_ERR "MCE_DOM0_LOG: Log conversion failed\n");
+ pr_err("MCE_DOM0_LOG: Log conversion failed\n");
goto end;
}
/* After fetching the telem from DOM0, we need to dec the telem's
set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, NULL);
ret = HYPERVISOR_mca(&mc_op);
if (ret) {
- printk(KERN_ERR "MCE: Failed to get physical CPU count\n");
+ pr_err("MCE: Failed to get physical CPU count\n");
kfree(g_mi);
return ret;
}
set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
ret = HYPERVISOR_mca(&mc_op);
if (ret) {
- printk(KERN_ERR "MCE: Failed to get physical CPUs' info\n");
+ pr_err("MCE: Failed to get physical CPUs' info\n");
kfree(g_mi);
kfree(g_physinfo);
return ret;
mce_dom0_interrupt, 0, "mce", NULL);
if (ret < 0) {
- printk(KERN_ERR "MCE: Failed to bind vIRQ for Dom0\n");
+ pr_err("MCE: Failed to bind vIRQ for Dom0\n");
kfree(g_mi);
kfree(g_physinfo);
return ret;
static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
unsigned int cpu)
{
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
if (c->x86_max_cores * smp_num_siblings > 1) {
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n",
if ((mem_size >> (PAGE_SHIFT + 5)) > xen_start_info->nr_pages) {
u64 size = (u64)xen_start_info->nr_pages << 5;
- printk(KERN_WARNING "mem=%Luk is invalid for an initial"
- " allocation of %luk, using %Luk\n",
- (unsigned long long)mem_size >> 10,
- xen_start_info->nr_pages << (PAGE_SHIFT - 10),
- (unsigned long long)size << (PAGE_SHIFT - 10));
+ pr_warning("mem=%Luk is invalid for an initial"
+ " allocation of %luk, using %Luk\n",
+ (unsigned long long)mem_size >> 10,
+ xen_start_info->nr_pages << (PAGE_SHIFT - 10),
+ (unsigned long long)size << (PAGE_SHIFT - 10));
mem_size = size << PAGE_SHIFT;
}
#endif
if ((maxmem >> (PAGE_SHIFT + 5)) > xen_start_info->nr_pages) {
unsigned long long size = (u64)xen_start_info->nr_pages << 5;
- printk(KERN_WARNING "maxmem of %LuM is invalid for an initial"
- " allocation of %luM, using %LuM\n",
- maxmem >> 20,
- xen_start_info->nr_pages >> (20 - PAGE_SHIFT),
- size >> (20 - PAGE_SHIFT));
+ pr_warning("maxmem of %LuM is invalid for an initial"
+ " allocation of %luM, using %LuM\n",
+ maxmem >> 20,
+ xen_start_info->nr_pages >> (20 - PAGE_SHIFT),
+ size >> (20 - PAGE_SHIFT));
size <<= PAGE_SHIFT;
e820_remove_range(size, ULLONG_MAX - size, E820_RAM, 1);
}
#include <linux/version.h>
#include <asm/traps.h>
-#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args )
+#define DP(_f, _args...) pr_alert(" " _f "\n" , ## _args )
dotraplinkage void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
{
lpfn = mfn_to_local_pfn(lmfn);
rpfn = mfn_to_local_pfn(rmfn);
- printk(KERN_INFO
- "Swapping MFNs for PFN %lx and %lx (MFN %lx and %lx)\n",
- lpfn, rpfn, lmfn, rmfn);
+ pr_info("Swapping MFNs for PFN %lx and %lx (MFN %lx and %lx)\n",
+ lpfn, rpfn, lmfn, rmfn);
xen_l1_entry_update(lpte, pfn_pte_ma(rmfn, pte_pgprot(*lpte)));
xen_l1_entry_update(rpte, pfn_pte_ma(lmfn, pte_pgprot(*rpte)));
static void multicall_failed(const multicall_entry_t *mc, int rc)
{
- printk(KERN_EMERG "hypercall#%lu(%lx, %lx, %lx, %lx)"
- " failed: %d (caller %lx)\n",
+ pr_emerg("hypercall#%lu(%lx, %lx, %lx, %lx) failed: %d"
+ " (caller %lx)\n",
mc->op, mc->args[0], mc->args[1], mc->args[2], mc->args[3],
rc, mc->args[5]);
BUG();
unsigned int j = 0;
if (!page) {
- printk(KERN_WARNING "Xen and kernel out of memory "
- "while trying to release an order %u "
- "contiguous region\n", order);
+ pr_warning("Xen and kernel out of memory"
+ " while trying to release an order"
+ " %u contiguous region\n", order);
break;
}
pfn = page_to_pfn(page);
if (raw_pci_ops)
return 0;
- printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
+ pr_info("PCI: setting up Xen PCI frontend stub\n");
/* Copied from arch/i386/pci/common.c */
pci_cache_line_size = 32 >> 2;
&setup_gsi) < 0)
continue;
- printk(KERN_INFO "GSI%d: %s-%s\n", gsi,
- (triggering == ACPI_LEVEL_SENSITIVE) ? "level"
+ dev_info(&dev->dev, "GSI%d: %s-%s\n", gsi,
+ triggering == ACPI_LEVEL_SENSITIVE ? "level"
: "edge",
- (polarity == ACPI_ACTIVE_LOW) ? "low" : "high");
+ polarity == ACPI_ACTIVE_LOW ? "low" : "high");
} else {
/*
* No IRQ known to the ACPI subsystem - maybe the
pin_name(dev->pin));
/* Interrupt Line values above 0xF are forbidden */
if (dev->irq > 0 && (dev->irq <= 0xF)) {
- printk(" - using IRQ %d\n", dev->irq);
+ pr_cont(" - using IRQ %d\n", dev->irq);
setup_gsi.gsi = dev->irq;
setup_gsi.triggering = 1;
setup_gsi.polarity = 1;
VOID(HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi,
&setup_gsi));
} else
- printk("\n");
+ pr_cont("\n");
}
}
}
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
+ if (processor_cntl_external())
+ type = ~type;
cpuid = acpi_get_cpuid(handle, type, acpi_id);
if ((cpuid == -1) && (num_possible_cpus() > 1))
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
device->driver_data = pr;
- processor_extcntl_init();
-
result = acpi_processor_get_info(device);
if (result ||
((pr->id == -1) && !processor_cntl_external())) {
ret = processor_extcntl_ops->hotplug(pr, type);
break;
default:
- printk(KERN_ERR "Unsupport processor events %d.\n", event);
+ pr_err("Unsupported processor event %d.\n", event);
break;
}
}
/*
- * External control logic can decide to grab full or part of physical
- * processor control bits. Take a VMM for example, physical processors
- * are owned by VMM and thus existence information like hotplug is
- * always required to be notified to VMM. Similar is processor idle
- * state which is also necessarily controlled by VMM. But for other
- * control bits like performance/throttle states, VMM may choose to
- * control or not upon its own policy.
- */
-void processor_extcntl_init(void)
-{
- if (!processor_extcntl_ops)
- arch_acpi_processor_init_extcntl(&processor_extcntl_ops);
-}
-
-/*
* This is called from ACPI processor init, and targeted to hold
* some tricky housekeeping jobs to satisfy external control model.
* For example, we may put dependency parse stub here for idle
#define DPRINTK(fmt, args...) \
pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
#define IPRINTK(fmt, args...) \
- printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
+ pr_info("xen_tpm_fr: " fmt, ##args)
#define WPRINTK(fmt, args...) \
- printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
+ pr_warning("xen_tpm_fr: " fmt, ##args)
#define GRANT_INVALID_REF 0
if (mce->bank != 8)
return 0;
-#ifdef CONFIG_SMP
/* Only handle if it is the right mc controller */
+#if defined(CONFIG_XEN) /* Could easily be used for native too. */
+ if (mce->socketid != pvt->i7core_dev->socket)
+ return 0;
+#elif defined(CONFIG_SMP)
if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
return 0;
#endif
(unsigned long)packet_data_temp_buf, ordernum, 0)) {
free_pages((unsigned long)packet_data_temp_buf,
ordernum);
- printk(KERN_WARNING
- "dell_rbu:%s: failed to adjust new "
- "packet\n", __func__);
+ pr_warning("dell_rbu:%s: failed to adjust new "
+ "packet\n", __func__);
retval = -ENOMEM;
spin_lock(&rbu_data.lock);
goto out_alloc_packet_array;
const char *name;
u32 cpu_core_id, phys_proc_id;
u8 x86_model, x86_mask;
+ u32 ucode_rev;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
int temp;
if ((data->x86_model == 0xe) && (data->x86_mask < 0xc)) {
/* check for microcode update */
- if (rdmsr_safe_on_pcpu(pdev->id, MSR_IA32_UCODE_REV,
- &eax, &edx) < 0)
+ if (!(data->ucode_rev + 1))
dev_warn(&pdev->dev,
"Cannot read microcode revision of CPU\n");
- else if (edx < 0x39) {
+ else if (data->ucode_rev < 0x39) {
err = -ENODEV;
dev_err(&pdev->dev,
"Errata AE18 not fixed, update BIOS or "
pdev_entry->x86_model = ((val >> 4) & 0xf) | ((val >> 12) & 0xf0);
pdev_entry->x86_mask = val & 0xf;
- if (cpuid_eax(0) >= 6)
- info->cpuid_6_eax = cpuid_eax(6);
+ if (((val >> 8) & 0xf) != 6 || ((val >> 20) & 0xff)
+ || !pdev_entry->x86_model
+ || wrmsr_safe(MSR_IA32_UCODE_REV, 0, 0) < 0
+ || (sync_core(), rdmsr_safe(MSR_IA32_UCODE_REV,
+ &val, &pdev_entry->ucode_rev)) < 0)
+ pdev_entry->ucode_rev = ~0;
+
+ info->cpuid_6_eax = cpuid_eax(0) >= 6 ? cpuid_eax(6) : 0;
}
static int coretemp_device_add(unsigned int cpu)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
return 0;
}
{
struct cpu_info *info = arg;
- if (cpuid_eax(0) >= 6)
- info->cpuid_6_eax = cpuid_eax(6);
+ info->cpuid_6_eax = cpuid_eax(0) >= 6 ? cpuid_eax(6) : 0;
}
static int pkgtemp_device_add(unsigned int cpu)
return TRUE;
format_err_end:
- printk(KERN_ERR
- "PCI: The format of the guestdev parameter is illegal. [%s]\n",
- str);
+ pr_err("PCI: The format of the guestdev parameter is illegal. [%s]\n",
+ str);
return FALSE;
}
allocate_err_end:
if (gdev)
pci_free_guestdev(gdev);
- printk(KERN_ERR "PCI: Failed to allocate memory.\n");
+ pr_err("PCI: failed to allocate memory\n");
return NULL;
}
} else
gdev->u.devicepath.child = node;
} else if (gdev) {
- printk(KERN_ERR
- "PCI: Can't obtain dev# and #func# from %s.\n",
- sp);
+ pr_err("PCI: Can't obtain dev# and #func# from %s.\n",
+ sp);
ret_val = -EINVAL;
if (gdev == gdev_org)
goto end;
goto end;
format_err_end:
- printk(KERN_ERR
- "PCI: The format of the guestdev parameter is illegal. [%s]\n",
- path_str);
+ pr_err("PCI: The format of the guestdev parameter is illegal. [%s]\n",
+ path_str);
ret_val = -EINVAL;
goto end;
allocate_err_end:
- printk(KERN_ERR "PCI: Failed to allocate memory.\n");
+ pr_err("PCI: failed to allocate memory\n");
ret_val = -ENOMEM;
goto end;
}
gdev = kmalloc(sizeof(*gdev), GFP_KERNEL);
if (!gdev) {
- printk(KERN_ERR "PCI: Failed to allocate memory.\n");
+ pr_err("PCI: failed to allocate memory\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&gdev->root_list);
list_for_each(head, &guestdev_list) {
gdev = list_entry(head, struct guestdev, root_list);
pci_make_guestdev_str(gdev, path_str, GUESTDEV_STR_MAX);
- printk(KERN_DEBUG
- "PCI: %s has been reserved for guest domain.\n",
- path_str);
+ pr_debug("PCI: %s has been reserved for guest domain.\n",
+ path_str);
}
return 0;
}
for(;;) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) {
- printk(KERN_ERR "PCI: Failed to allocate memory.\n");
+ pr_err("PCI: failed to allocate memory\n");
goto err_end;
}
node->dev = PCI_SLOT(dev->devfn);
} else {
pci_make_guestdev_str(gdev,
path_str, GUESTDEV_STR_MAX);
- printk(KERN_INFO
- "PCI: Device does not exist. %s\n",
- path_str);
+ pr_info("PCI: "
+ "device %s does not exist\n",
+ path_str);
continue;
}
}
bus = pci_find_bus(gdev->u.devicepath.seg,
gdev->u.devicepath.bbn);
- if (!bus ||
- !pci_check_devicepath_exists(gdev, bus)) {
+ if (!bus || !pci_check_devicepath_exists(gdev, bus)) {
pci_make_guestdev_str(gdev, path_str,
GUESTDEV_STR_MAX);
- printk(KERN_INFO
- "PCI: Device does not exist. %s\n",
+ pr_info("PCI: device %s does not exist\n",
path_str);
}
break;
}
}
pci_make_guestdev_str(gdev, path_str, GUESTDEV_STR_MAX);
- printk(KERN_INFO "PCI: Device does not exist. %s\n",
- path_str);
+ pr_info("PCI: device %s does not exist\n", path_str);
break;
default:
BUG();
rpcap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!rpcap) {
/* pci device isn't supported */
- printk(KERN_INFO
- "PCI: sharing io port of non PCIe device %s "
- "isn't supported. ignoring.\n",
- pci_name(pdev));
+ pr_info("PCI: sharing io port of non PCIe device %s "
+ "isn't supported. ignoring.\n",
+ pci_name(pdev));
return -ENOSYS;
}
pci_read_config_word(pdev, rpcap + PCI_CAP_FLAGS, &cap);
switch ((cap & PCI_EXP_FLAGS_TYPE) >> 4) {
case PCI_EXP_TYPE_RC_END:
- printk(KERN_INFO
- "PCI: io port sharing of root complex integrated "
- "endpoint %s isn't supported. ignoring.\n",
- pci_name(pdev));
+ pr_info("PCI: io port sharing of root complex integrated "
+ "endpoint %s isn't supported. ignoring.\n",
+ pci_name(pdev));
return -ENOSYS;
case PCI_EXP_TYPE_ENDPOINT:
case PCI_EXP_TYPE_LEG_END:
break;
default:
- printk(KERN_INFO
- "PCI: io port sharing of non endpoint %s "
- "doesn't make sense. ignoring.\n",
- pci_name(pdev));
+ pr_info("PCI: io port sharing of non endpoint %s "
+ "doesn't make sense. ignoring.\n",
+ pci_name(pdev));
return -EINVAL;
}
uint8_t i;
struct resource *r;
- printk(KERN_INFO "PCI: deallocating io resource[%s]. io size 0x%lx\n",
- pci_name(pdev), func->io_size);
+ pr_info("PCI: deallocating io resource[%s]. io size 0x%lx\n",
+ pci_name(pdev), func->io_size);
for (i = 0; i < PCI_NUM_BARS; i++) {
r = &pdev->resource[i];
if (!(func->io_bar & (1 << i)))
pci_dev_switch_busnr(pdev));
if (sw == NULL) {
mutex_unlock(&switch_list_lock);
- printk(KERN_WARNING
- "PCI: can't allocate memory "
- "for sw of IO mulplexing %s", pci_name(pdev));
+ pr_warn("PCI: can't allocate memory"
+ "for sw of IO multiplexing %s",
+ pci_name(pdev));
return;
}
pci_iomul_switch_add_locked(sw);
if (slot == NULL) {
mutex_unlock(&sw->lock);
pci_iomul_switch_put(sw);
- printk(KERN_WARNING "PCI: can't allocate memory "
- "for IO mulplexing %s", pci_name(pdev));
+ pr_warn("PCI: can't allocate memory "
+ "for IO multiplexing %s", pci_name(pdev));
return;
}
pci_iomul_slot_add_locked(sw, slot);
}
- printk(KERN_INFO "PCI: disable device and release io resource[%s].\n",
- pci_name(pdev));
+ pr_info("PCI: disable device and release io resource[%s].\n",
+ pci_name(pdev));
pci_disable_device(pdev);
__quirk_iomul_dealloc_ioresource(sw, pdev, slot);
sw->io_limit = io_limit;
pci_dev_put(pdev);
- printk(KERN_INFO "PCI: bridge %s base 0x%x limit 0x%x\n",
- pci_name(bridge), sw->io_base, sw->io_limit);
+ pr_info("PCI: bridge %s base 0x%x limit 0x%x\n",
+ pci_name(bridge), sw->io_base, sw->io_limit);
}
static void __devinit pci_iomul_setup_brige(struct pci_dev *bridge,
pci_read_config_word(bridge, PCI_COMMAND, &cmd);
if (!(cmd & PCI_COMMAND_IO)) {
cmd |= PCI_COMMAND_IO;
- printk(KERN_INFO "PCI: Forcibly Enabling IO %s\n",
- pci_name(bridge));
+ pr_info("PCI: forcibly enabling IO %s\n", pci_name(bridge));
pci_write_config_word(bridge, PCI_COMMAND, cmd);
}
}
uint8_t num_bars = 0;
struct resource *r;
- printk(KERN_INFO "PCI: Forcibly assign IO %s from 0x%x\n",
- pci_name(pdev), io_base);
+ pr_info("PCI: Forcibly assign IO %s from 0x%x\n",
+ pci_name(pdev), io_base);
for (i = 0; i < PCI_NUM_BARS; i++) {
if (!(f->io_bar & (1 << i)))
if (request_resource(parent,
&sw->io_resource))
- printk(KERN_ERR
- "PCI IOMul: can't allocate "
+ pr_err("PCI IOMul: can't allocate "
"resource. [0x%x, 0x%x]",
sw->io_base, sw->io_limit);
}
ret = __pci_iomul_notifier_del_switch(pdev);
break;
default:
- printk(KERN_WARNING "PCI IOMUL: "
- "device %s has unknown header type %02x, ignoring.\n",
- pci_name(pdev), pdev->hdr_type);
+ pr_warn("PCI IOMUL: device %s has unknown "
+ "header type %02x, ignoring.\n",
+ pci_name(pdev), pdev->hdr_type);
ret = -EIO;
break;
}
error = misc_register(&pci_iomul_miscdev);
if (error != 0) {
- printk(KERN_ALERT "Couldn't register /dev/xen/pci_iomul");
+ pr_alert("Couldn't register /dev/xen/pci_iomul");
return error;
}
- printk("PCI IO multiplexer device installed.\n");
+ pr_info("PCI IO multiplexer device installed\n");
return 0;
}
PAGE_TO_LIST(p)->prev = NULL; \
} while(0)
-#define IPRINTK(fmt, args...) \
- printk(KERN_INFO "xen_mem: " fmt, ##args)
-#define WPRINTK(fmt, args...) \
- printk(KERN_WARNING "xen_mem: " fmt, ##args)
+#define IPRINTK(fmt, args...) pr_info("xen_mem: " fmt, ##args)
+#define WPRINTK(fmt, args...) pr_warning("xen_mem: " fmt, ##args)
/* balloon_append: add the given page to the balloon. */
static void balloon_append(struct page *page, int account)
err = register_xenbus_watch(&target_watch);
if (err)
- printk(KERN_ERR "Failed to set balloon watcher\n");
+ pr_err("Failed to set balloon watcher\n");
return NOTIFY_DONE;
}
entry = blkback_pagemap + idx;
if (!blkback_pagemap_entry_clear(entry)) {
- printk("overwriting pagemap %d: d %u b %u g %u\n",
- idx, entry->domid, entry->busid, entry->gref);
+ pr_emerg("overwriting pagemap %d: d %u b %u g %u\n",
+ idx, entry->domid, entry->busid, entry->gref);
BUG();
}
entry = blkback_pagemap + idx;
if (blkback_pagemap_entry_clear(entry)) {
- printk("clearing empty pagemap %d\n", idx);
+ pr_emerg("clearing empty pagemap %d\n", idx);
BUG();
}
entry = blkback_pagemap + idx;
if (blkback_pagemap_entry_clear(entry)) {
- printk("reading empty pagemap %d\n", idx);
+ pr_emerg("reading empty pagemap %d\n", idx);
BUG();
}
kfree(pending_reqs);
kfree(pending_grant_handles);
free_empty_pages_and_pagevec(pending_pages, mmap_pages);
- printk("%s: out of memory\n", __FUNCTION__);
+ pr_warning("%s: out of memory\n", __FUNCTION__);
return -ENOMEM;
}
#include "common.h"
#undef DPRINTK
-#define DPRINTK(_f, _a...) \
- printk("(%s() file=%s, line=%d) " _f "\n", \
- __PRETTY_FUNCTION__, __FILE__ , __LINE__ , ##_a )
+#define DPRINTK(_f, _a...) \
+ printk(KERN_DEBUG "(%s() file=%s, line=%d) " _f "\n", \
+ __func__, __FILE__ , __LINE__ , ##_a )
#define MEDIA_PRESENT "media-present"
struct xenbus_device *dev = blkif->be->dev;
unsigned long long new_size = vbd_size(vbd);
- printk(KERN_INFO "VBD Resize: new size %Lu\n", new_size);
+ pr_info("VBD Resize: new size %Lu\n", new_size);
vbd->size = new_size;
again:
err = xenbus_transaction_start(&xbt);
if (err) {
- printk(KERN_WARNING "Error starting transaction");
+ pr_warning("Error %d starting transaction", err);
return;
}
err = xenbus_printf(xbt, dev->nodename, "sectors", "%Lu",
vbd_size(vbd));
if (err) {
- printk(KERN_WARNING "Error writing new size");
+ pr_warning("Error %d writing new size", err);
goto abort;
}
/*
*/
err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
if (err) {
- printk(KERN_WARNING "Error writing the state");
+ pr_warning("Error %d writing the state", err);
goto abort;
}
if (err == -EAGAIN)
goto again;
if (err)
- printk(KERN_WARNING "Error ending transaction");
+ pr_warning("Error %d ending transaction", err);
abort:
xenbus_transaction_end(xbt, 1);
}
"sectors", "%Lu", §ors);
if (XENBUS_EXIST_ERR(err))
return;
- printk(KERN_INFO "Setting capacity to %Lu\n",
- sectors);
+ pr_info("Setting capacity to %Lu\n", sectors);
set_capacity(info->gd, sectors);
revalidate_disk(info->gd);
}
}
- /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
- command);*/
return -EINVAL; /* same return as native Linux */
}
switch (bret->operation) {
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
- printk("blkfront: %s: write barrier op failed\n",
- info->gd->disk_name);
+ pr_warning("blkfront: %s:"
+ " write barrier op failed\n",
+ info->gd->disk_name);
ret = -EOPNOTSUPP;
info->feature_barrier = QUEUE_ORDERED_NONE;
xlvbd_barrier(info);
#define DPRINTK(_f, _a...) pr_debug(_f, ## _a)
#if 0
-#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a)
+#define DPRINTK_IOCTL(_f, _a...) pr_alert(_f, ## _a)
#else
#define DPRINTK_IOCTL(_f, _a...) ((void)0)
#endif
return NULL;
}
- printk("xen-vbd: registered block device major %i\n", ptr->major);
+ pr_info("xen-vbd: registered block device major %i\n",
+ ptr->major);
}
ptr->minors = minors;
if ((vdevice>>EXT_SHIFT) > 1) {
/* this is above the extended range; something is wrong */
- printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice);
+ pr_warning("blkfront: vdevice %#x is above the extended range;"
+ " ignoring\n", vdevice);
return -ENODEV;
}
err = blk_queue_ordered(info->rq, info->feature_barrier);
if (err)
return err;
- printk(KERN_INFO "blkfront: %s: barriers %s\n",
- info->gd->disk_name, barrier);
+ pr_info("blkfront: %s: barriers %s\n",
+ info->gd->disk_name, barrier);
return 0;
}
#else
xlvbd_barrier(struct blkfront_info *info)
{
if (info->feature_barrier)
- printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name);
+ pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name);
return -ENOSYS;
}
#endif
struct vcd_generic_command *vgc;
if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) {
- printk(KERN_WARNING "%s() Packet buffer length is to large \n", __func__);
+ pr_warning("%s() Packet buffer length is to large \n", __func__);
return -EIO;
}
page = alloc_page(GFP_NOIO|__GFP_ZERO);
if (!page) {
- printk(KERN_CRIT "%s() Unable to allocate page\n", __func__);
+ pr_crit("%s() Unable to allocate page\n", __func__);
return -ENOMEM;
}
page = alloc_page(GFP_NOIO|__GFP_ZERO);
if (!page) {
- printk(KERN_CRIT "%s() Unable to allocate page\n", __func__);
+ pr_crit("%s() Unable to allocate page\n", __func__);
return -ENOMEM;
}
page = alloc_page(GFP_NOIO|__GFP_ZERO);
if (!page) {
- printk(KERN_CRIT "%s() Unable to allocate page\n", __func__);
+ pr_crit("%s() Unable to allocate page\n", __func__);
return -ENOMEM;
}
page = alloc_page(GFP_NOIO|__GFP_ZERO);
if (!page) {
- printk(KERN_CRIT "%s() Unable to allocate page\n", __func__);
+ pr_crit("%s() Unable to allocate page\n", __func__);
return -ENOMEM;
}
/* Create new vcd_disk and fill in cdrom_info */
vcd = (struct vcd_disk *)kzalloc(sizeof(struct vcd_disk), GFP_KERNEL);
if (!vcd) {
- printk(KERN_INFO "%s(): Unable to allocate vcd struct!\n", __func__);
+ pr_info("%s(): Unable to allocate vcd struct!\n", __func__);
goto out;
}
spin_lock_init(&vcd->vcd_cdrom_info_lock);
CDC_MRW | CDC_MRW_W | CDC_RAM);
if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) {
- printk(KERN_WARNING "%s() Cannot register blkdev as a cdrom %d!\n", __func__,
- gd->major);
+ pr_warning("%s() Cannot register blkdev as a cdrom %d!\n",
+ __func__, gd->major);
goto err_out;
}
gd->fops = &xencdrom_bdops;
case BLKTAP_IOCTL_PRINT_IDXS:
{
if (info) {
- printk("User Rings: \n-----------\n");
- printk("UF: rsp_cons: %2d, req_prod_prv: %2d "
+ pr_info("User Rings: \n-----------\n");
+ pr_info("UF: rsp_cons: %2d, req_prod_prv: %2d "
"| req_prod: %2d, rsp_prod: %2d\n",
info->ufe_ring.rsp_cons,
info->ufe_ring.req_prod_pvt,
#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
__FILE__ , __LINE__ , ## _a )
-#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
+#define WPRINTK(fmt, args...) pr_warning("blktap: " fmt, ##args)
struct backend_info;
switch (frontend_state) {
case XenbusStateInitialising:
if (dev->state == XenbusStateClosed) {
- printk(KERN_INFO "%s: %s: prepare for reconnect\n",
- __FUNCTION__, dev->nodename);
+ pr_info("%s: %s: prepare for reconnect\n",
+ __FUNCTION__, dev->nodename);
xenbus_switch_state(dev, XenbusStateInitWait);
}
break;
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -1;
}
- printk(KERN_INFO
- "blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
- ring_ref, evtchn, be->blkif->blk_protocol, protocol);
+ pr_info("blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
+ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
/* Map the shared frame, irq etc. */
err = tap_blkif_map(be->blkif, ring_ref, evtchn);
#include "../blkback/blkback-pagemap.h"
#if 0
-#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a)
+#define DPRINTK_IOCTL(_f, _a...) pr_alert(_f, ## _a)
#else
#define DPRINTK_IOCTL(_f, _a...) ((void)0)
#endif
return 0;
default:
- /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
- command);*/
return -EINVAL; /* same return as native Linux */
}
tty_set_operations(xencons_driver, &xencons_ops);
if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
- printk("WARNING: Failed to register Xen virtual "
- "console driver as '%s%d'\n",
- DRV(xencons_driver)->name,
- DRV(xencons_driver)->name_base);
+ pr_warning("WARNING: Failed to register Xen virtual "
+ "console driver as '%s%d'\n",
+ DRV(xencons_driver)->name,
+ DRV(xencons_driver)->name_base);
put_tty_driver(xencons_driver);
xencons_driver = NULL;
return rc;
BUG_ON(xencons_priv_irq < 0);
}
- printk("Xen virtual console successfully installed as %s%d\n",
- DRV(xencons_driver)->name, xc_num);
+ pr_info("Xen virtual console successfully installed as %s%d\n",
+ DRV(xencons_driver)->name, xc_num);
return 0;
}
xen_start_info->console.domU.evtchn,
handle_input, 0, "xencons", NULL);
if (irq < 0) {
- printk(KERN_ERR "XEN console request irq failed %i\n", irq);
+ pr_err("XEN console request irq failed %i\n", irq);
return irq;
}
sprintf(dir, "cpu/%u", cpu);
err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
if (err != 1) {
- printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
+ pr_err("XENBUS: Unable to read cpu state\n");
return;
}
cpumask_clear_cpu(cpu, xenbus_allowed_cpumask);
(void)cpu_down(cpu);
} else {
- printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
+ pr_err("XENBUS: unknown state(%s) on CPU%d\n",
state, cpu);
}
}
if (!is_initial_xendomain()) {
for_each_possible_cpu(i)
vcpu_hotplug(i);
- printk(KERN_INFO "Brought up %ld CPUs\n",
- (long)num_online_cpus());
+ pr_info("Brought up %ld CPUs\n", (long)num_online_cpus());
}
return NOTIFY_DONE;
continue;
err = cpu_down(cpu);
if (err) {
- printk(KERN_CRIT "Failed to take all CPUs "
- "down: %d.\n", err);
+ pr_crit("Failed to take all CPUs down: %d\n", err);
for_each_possible_cpu(cpu)
vcpu_hotplug(cpu);
return err;
if (local_cpu_hotplug_request()) {
cpumask_set_cpu(cpu, local_allowed_cpumask);
if (!cpumask_test_cpu(cpu, xenbus_allowed_cpumask)) {
- printk("%s: attempt to bring up CPU %u disallowed by "
- "remote admin.\n", __FUNCTION__, cpu);
+ pr_warning("%s: attempt to bring up CPU %u disallowed "
+ "by remote admin.\n", __FUNCTION__, cpu);
rc = -EBUSY;
}
} else if (!cpumask_test_cpu(cpu, local_allowed_cpumask) ||
domctl.v##ver.domain = domid; \
ret = hypervisor_domctl(&domctl) ?: domctl.v##ver.address_size.size; \
if (ret == 32 || ret == 64) { \
- printk("v" #ver " domctl worked ok: dom%d is %d-bit\n", \
- domid, ret); \
+ pr_info("v" #ver " domctl worked ok: dom%d is %d-bit\n",\
+ domid, ret); \
return ret; \
} \
} while (0)
#endif
ret = BITS_PER_LONG;
- printk("v%d...7 domctls failed, assuming dom%d is native: %d\n",
- low, domid, ret);
+ pr_warning("v%d...%d domctls failed, assuming dom%d is native: %d\n",
+ low, XEN_DOMCTL_INTERFACE_VERSION, domid, ret);
return ret;
}
if (!warned) {
warned = 1;
- printk(KERN_WARNING "No available IRQ to bind to: "
- "increase NR_DYNIRQS.\n");
+ pr_warning("No available IRQ to bind to: "
+ "increase NR_DYNIRQS.\n");
}
return -ENOSPC;
? 0 : BIND_PIRQ__WILL_SHARE;
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
if (bind_pirq.flags)
- printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
- irq);
+ pr_info("Failed to obtain physical IRQ %d\n", irq);
return;
}
evtchn = bind_pirq.port;
desc->chip_data = kzalloc(sizeof(struct irq_cfg), GFP_ATOMIC);
}
if (!desc->chip_data) {
- printk(KERN_ERR "cannot alloc irq_cfg\n");
+ pr_emerg("cannot alloc irq_cfg\n");
BUG();
}
return 0;
} else if (type_from_irq(irq) != IRQT_PIRQ
|| index_from_irq(irq) != xen_pirq) {
- printk(KERN_ERR "IRQ#%d is already mapped to %d:%u - "
- "cannot map to PIRQ#%u\n",
+ pr_err("IRQ#%d is already mapped to %d:%u - "
+ "cannot map to PIRQ#%u\n",
irq, type_from_irq(irq), index_from_irq(irq), xen_pirq);
return -EINVAL;
}
resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes);
if (shared == NULL) {
- printk("error to ioremap gnttab share frames\n");
+ pr_warning("error to ioremap gnttab share frames\n");
return -1;
}
}
int rc;
if (strstr(boot_command_line, "crashkernel="))
- printk(KERN_WARNING "Ignoring crashkernel command line, "
- "parameter will be supplied by xen\n");
+ pr_warning("Ignoring crashkernel command line, "
+ "parameter will be supplied by xen\n");
if (!is_initial_xendomain())
return;
#if defined(__i386__) || defined(__x86_64__)
if (xen_feature(XENFEAT_auto_translated_physmap)) {
- printk(KERN_WARNING "Cannot suspend in "
- "auto_translated_physmap mode.\n");
+ pr_warning("Can't suspend in auto_translated_physmap mode\n");
return -EOPNOTSUPP;
}
#endif
suspend.resume_notifier = resume_notifier;
if (_check(dpm_suspend_start, PMSG_SUSPEND)) {
- printk(KERN_ERR "%s() failed: %d\n", what, err);
+ pr_err("%s() failed: %d\n", what, err);
return err;
}
if (_check(dpm_suspend_noirq, PMSG_SUSPEND)) {
xenbus_suspend_cancel();
dpm_resume_end(PMSG_RESUME);
- printk(KERN_ERR "%s() failed: %d\n", what, err);
+ pr_err("%s() failed: %d\n", what, err);
return err;
}
if (err) {
xenbus_suspend_cancel();
dpm_resume_end(PMSG_RESUME);
- printk(KERN_ERR "%s() failed: %d\n",
- what, err);
+ pr_err("%s() failed: %d\n", what, err);
return err;
}
-#define __KERNEL_SYSCALLS__
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/unistd.h>
daemonize("suspend");
err = set_cpus_allowed_ptr(current, cpumask_of(0));
if (err) {
- printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err);
+ pr_err("Xen suspend can't run on CPU0 (%d)\n", err);
goto fail;
}
do {
err = __xen_suspend(fast_suspend, xen_resume_notifier);
if (err) {
- printk(KERN_ERR "Xen suspend failed (%d)\n", err);
+ pr_err("Xen suspend failed (%d)\n", err);
goto fail;
}
if (!suspend_cancelled)
NULL, CLONE_FS | CLONE_FILES);
if (err < 0) {
- printk(KERN_WARNING "Error creating shutdown process (%d): "
- "retrying...\n", -err);
+ pr_warning("Error creating shutdown process (%d): "
+ "retrying...\n", -err);
schedule_delayed_work(&shutdown_work, HZ/2);
}
}
else if (strcmp(str, "halt") == 0)
new_state = SHUTDOWN_HALT;
else
- printk("Ignoring shutdown request: %s\n", str);
+ pr_warning("Ignoring shutdown request: %s\n", str);
switch_shutdown_state(new_state);
if (err)
return;
if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
- printk(KERN_ERR "Unable to read sysrq code in "
- "control/sysrq\n");
+ pr_err("Unable to read sysrq code in control/sysrq\n");
xenbus_transaction_end(xbt, 1);
return;
}
return -1;
port = irq_to_evtchn_port(irq);
- printk(KERN_INFO "suspend: event channel %d\n", port);
+ pr_info("suspend: event channel %d\n", port);
sprintf(portstr, "%d", port);
xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr);
err = register_xenbus_watch(&shutdown_watch);
if (err) {
- printk(KERN_ERR "Failed to set shutdown watcher\n");
+ pr_err("Failed to set shutdown watcher\n");
return err;
}
err = register_xenbus_watch(&sysrq_watch);
if (err) {
- printk(KERN_ERR "Failed to set sysrq watcher\n");
+ pr_err("Failed to set sysrq watcher\n");
return err;
}
/* suspend event channel */
err = setup_suspend_evtchn();
if (err) {
- printk(KERN_ERR "Failed to register suspend event channel\n");
+ pr_err("Failed to register suspend event channel\n");
return err;
}
extern void system_call(void);
extern void smp_trap_init(trap_info_t *);
-/* Number of siblings per CPU package */
-int smp_num_siblings = 1;
-
cpumask_var_t vcpu_initialized_mask;
DEFINE_PER_CPU(struct cpuinfo_x86, cpu_info);
#define set_cpu_to_apicid(cpu, apicid)
#endif
-DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
-DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
-
void __init prefill_possible_map(void)
{
int i, rc;
++total_cpus;
}
-static inline void
-set_cpu_sibling_map(unsigned int cpu)
-{
- cpu_data(cpu).phys_proc_id = cpu;
- cpu_data(cpu).cpu_core_id = 0;
-
- cpumask_copy(cpu_sibling_mask(cpu), cpumask_of(cpu));
- cpumask_copy(cpu_core_mask(cpu), cpumask_of(cpu));
-
- cpu_data(cpu).booted_cores = 1;
-}
-
-static void
-remove_siblinginfo(unsigned int cpu)
-{
- cpu_data(cpu).phys_proc_id = BAD_APICID;
- cpu_data(cpu).cpu_core_id = BAD_APICID;
-
- cpumask_clear(cpu_sibling_mask(cpu));
- cpumask_clear(cpu_core_mask(cpu));
-
- cpu_data(cpu).booted_cores = 0;
-}
-
static irqreturn_t ipi_interrupt(int irq, void *dev_id)
{
static void(*const handlers[])(struct pt_regs *) = {
apicid = 0;
if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, 0, &cpu_id) == 0)
apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
- boot_cpu_data.apicid = apicid;
cpu_data(0) = boot_cpu_data;
set_cpu_to_apicid(0, apicid);
current_thread_info()->cpu = 0;
- for_each_possible_cpu (cpu) {
- alloc_cpumask_var(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL);
- alloc_cpumask_var(&per_cpu(cpu_core_map, cpu), GFP_KERNEL);
- cpumask_clear(cpu_sibling_mask(cpu));
- cpumask_clear(cpu_core_mask(cpu));
- }
-
- set_cpu_sibling_map(0);
-
if (xen_smp_intr_init(0))
BUG();
apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
cpu_data(cpu) = boot_cpu_data;
cpu_data(cpu).cpu_index = cpu;
- cpu_data(cpu).apicid = apicid;
set_cpu_to_apicid(cpu, apicid);
}
core_initcall(initialize_cpu_present_map);
-int __cpuexit __cpu_disable(void)
+int __cpuinit __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
if (cpu == 0)
return -EBUSY;
- remove_siblinginfo(cpu);
-
set_cpu_online(cpu, false);
fixup_irqs();
alternatives_smp_switch(1);
/* This must be done before setting cpu_online_map */
- set_cpu_sibling_map(cpu);
wmb();
rc = xen_smp_intr_init(cpu);
- if (rc) {
- remove_siblinginfo(cpu);
+ if (rc)
return rc;
- }
set_cpu_online(cpu, true);
idle_task_exit();
local_irq_disable();
cpumask_clear_cpu(smp_processor_id(), cpu_initialized_mask);
+ cpumask_clear_cpu(smp_processor_id(), vcpu_initialized_mask);
preempt_enable_no_resched();
VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
#ifdef CONFIG_HOTPLUG_CPU
if (!rc)
per_cpu(poll_evtchn, cpu) = bind_ipi.port;
else
- printk(KERN_WARNING
- "No spinlock poll event channel for CPU#%u (%d)\n",
- cpu, rc);
+ pr_warning("No spinlock poll event channel for CPU#%u (%d)\n",
+ cpu, rc);
- return 0;
+ return rc;
}
void __cpuinit xen_spinlock_cleanup(unsigned int cpu)
/* Create '/dev/xen/evtchn'. */
err = misc_register(&evtchn_miscdev);
if (err != 0) {
- printk(KERN_ALERT "Could not register /dev/xen/evtchn\n");
+ pr_alert("Could not register /dev/xen/evtchn\n");
return err;
}
mutex_unlock(&info->mm_lock);
if (x2 < x1 || y2 < y1) {
- printk("xenfb_update_screen bogus rect %d %d %d %d\n",
- x1, x2, y1, y2);
+ pr_warning("xenfb_update_screen bogus rect %d %d %d %d\n",
+ x1, x2, y1, y2);
WARN_ON(1);
}
xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
input_report_key(dev, event->key.keycode,
event->key.pressed);
else
- printk("xenkbd: unhandled keycode 0x%x\n",
- event->key.keycode);
+ pr_warning("xenkbd: unhandled keycode 0x%x\n",
+ event->key.keycode);
break;
case XENKBD_TYPE_POS:
if (event->pos.rel_z)
struct device *device;
if (!is_running_on_xen()) {
- printk(KERN_ERR "You must be running Xen to use gntdev\n");
+ pr_err("You must be running Xen to use gntdev\n");
return -ENODEV;
}
gntdev_major = __register_chrdev(0, 0, 1, GNTDEV_NAME, &gntdev_fops);
if (gntdev_major < 0)
{
- printk(KERN_ERR "Could not register gntdev device\n");
+ pr_err("Could not register gntdev device\n");
return -ENOMEM;
}
MKDEV(gntdev_major, 0),
NULL, GNTDEV_NAME);
if (IS_ERR(device)) {
- printk(KERN_ERR "Error creating gntdev device in xen_class\n");
- printk(KERN_ERR "gntdev created with major number = %d\n",
- gntdev_major);
+ pr_err("Error creating gntdev device in xen_class\n");
+ pr_err("gntdev created, major number = %d\n", gntdev_major);
return 0;
}
gntdev_file_private_data_t *private_data = flip->private_data;
if (unlikely(!private_data)) {
- printk(KERN_ERR "File's private data is NULL.\n");
+ pr_err("file's private data is NULL\n");
return -EINVAL;
}
down_read(&private_data->grants_sem);
if (unlikely(!private_data->grants)) {
up_read(&private_data->grants_sem);
- printk(KERN_ERR "Attempted to mmap before ioctl.\n");
+ pr_err("attempted to mmap before ioctl\n");
return -EINVAL;
}
up_read(&private_data->grants_sem);
if (unlikely((size <= 0) ||
(size + slot_index) > private_data->grants_size)) {
- printk(KERN_ERR "Invalid number of pages or offset"
- "(num_pages = %d, first_slot = %ld).\n",
+ pr_err("Invalid number of pages or offset"
+ "(num_pages = %d, first_slot = %ld)\n",
size, slot_index);
return -ENXIO;
}
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
- printk(KERN_ERR "Writable mappings must be shared.\n");
+ pr_err("writable mappings must be shared\n");
return -EINVAL;
}
for (i = 0; i < size; ++i) {
if (private_data->grants[slot_index + i].state !=
GNTDEV_SLOT_NOT_YET_MAPPED) {
- printk(KERN_ERR "Slot (index = %ld) is in the wrong "
- "state (%d).\n", slot_index + i,
+ pr_err("Slot (index = %ld) is in the wrong "
+ "state (%d)\n", slot_index + i,
private_data->grants[slot_index + i].state);
up_write(&private_data->grants_sem);
return -EINVAL;
vma->vm_private_data = kzalloc(size * sizeof(struct page *),
GFP_KERNEL);
if (vma->vm_private_data == NULL) {
- printk(KERN_ERR "Couldn't allocate mapping structure for VM "
- "area.\n");
+ pr_err("couldn't allocate mapping structure for VM area\n");
return -ENOMEM;
}
BUG_ON(ret);
if (op.status != GNTST_okay) {
if (op.status != GNTST_eagain)
- printk(KERN_ERR "Error mapping the grant reference "
+ pr_err("Error mapping the grant reference "
"into the kernel (%d). domid = %d; ref = %d\n",
op.status,
private_data->grants[slot_index+i]
+ (i << PAGE_SHIFT),
&ptep)))
{
- printk(KERN_ERR "Error obtaining PTE pointer "
- "(%d).\n", ret);
+ pr_err("Error obtaining PTE pointer (%d)\n",
+ ret);
goto undo_map_out;
}
&op, 1);
BUG_ON(ret);
if (op.status != GNTST_okay) {
- printk(KERN_ERR "Error mapping the grant "
+ pr_err("Error mapping the grant "
"reference into user space (%d). domid "
"= %d; ref = %d\n", op.status,
private_data->grants[slot_index+i].u
GNTTABOP_unmap_grant_ref, &op, 1);
BUG_ON(ret);
if (op.status != GNTST_okay)
- printk("User unmap grant status = %d\n",
- op.status);
+ pr_warning("User unmap grant status = %d\n",
+ op.status);
} else {
/* USING SHADOW PAGE TABLES. */
copy = xen_ptep_get_and_clear_full(vma, addr, ptep, is_fullmm);
&op, 1);
BUG_ON(ret);
if (op.status != GNTST_okay)
- printk("Kernel unmap grant status = %d\n", op.status);
+ pr_warning("Kernel unmap grant status = %d\n",
+ op.status);
/* Return slot to the not-yet-mapped state, so that it may be
up_write(&private_data->grants_sem);
if (rc) {
- printk (KERN_ERR "Initialising gntdev private data "
- "failed.\n");
+ pr_err("Initialising gntdev private data failed\n");
return rc;
}
}
if (op.count == 1) {
if ((rc = add_grant_reference(flip, &op.refs[0],
&op.index)) < 0) {
- printk(KERN_ERR "Adding grant reference "
- "failed (%d).\n", rc);
+ pr_err("Adding grant reference failed (%d)\n",
+ rc);
goto map_out;
}
} else {
if ((rc = copy_from_user(refs,
(void __user *)u,
sizeof(*refs) * op.count))) {
- printk(KERN_ERR "Copying refs from user failed"
- " (%d).\n", rc);
+ pr_err("Copying refs from user failed (%d)\n",
+ rc);
rc = -EINVAL;
goto map_out;
}
if ((rc = find_contiguous_free_range(flip, op.count))
< 0) {
- printk(KERN_ERR "Finding contiguous range "
- "failed (%d).\n", rc);
+ pr_err("Finding contiguous range failed"
+ " (%d)\n", rc);
kfree(refs);
goto map_out;
}
op.index = rc << PAGE_SHIFT;
if ((rc = add_grant_references(flip, op.count,
refs, rc))) {
- printk(KERN_ERR "Adding grant references "
- "failed (%d).\n", rc);
+ pr_err("Adding grant references failed (%d)\n",
+ rc);
kfree(refs);
goto map_out;
}
if ((rc = copy_to_user((void __user *) arg,
&op,
sizeof(op)))) {
- printk(KERN_ERR "Copying result back to user failed "
- "(%d)\n", rc);
+ pr_err("Copying result back to user failed (%d)\n",
+ rc);
rc = -EFAULT;
goto map_out;
}
!= GNTDEV_SLOT_NOT_YET_MAPPED)) {
if (private_data->grants[start_index + i].state
== GNTDEV_SLOT_INVALID) {
- printk(KERN_ERR
- "Tried to remove an invalid "
+ pr_err("Tried to remove an invalid "
"grant at offset 0x%x.",
(start_index + i)
<< PAGE_SHIFT);
rc = -EINVAL;
} else {
- printk(KERN_ERR
- "Tried to remove a grant which "
+ pr_err("Tried to remove a grant which "
"is currently mmap()-ed at "
"offset 0x%x.",
(start_index + i)
goto get_offset_unlock_out;
}
if ((!vma->vm_ops) || (vma->vm_ops != &gntdev_vmops)) {
- printk(KERN_ERR "The vaddr specified does not belong "
+ pr_err("The vaddr specified does not belong "
"to a gntdev instance: %#lx\n", vaddr);
rc = -EFAULT;
goto get_offset_unlock_out;
}
if (vma->vm_start != vaddr) {
- printk(KERN_ERR "The vaddr specified in an "
+ pr_err("The vaddr specified in an "
"IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR must be at "
"the start of the VM area. vma->vm_start = "
"%#lx; vaddr = %#lx\n",
#define DPRINTK(_f, _a...) \
pr_debug("(file=%s, line=%d) " _f, \
__FILE__ , __LINE__ , ## _a )
-#define IPRINTK(fmt, args...) \
- printk(KERN_INFO "xen_net: " fmt, ##args)
-#define WPRINTK(fmt, args...) \
- printk(KERN_WARNING "xen_net: " fmt, ##args)
+#define IPRINTK(fmt, args...) pr_info("xen_net: " fmt, ##args)
+#define WPRINTK(fmt, args...) pr_warning("xen_net: " fmt, ##args)
typedef struct netif_st {
/* Unique identifier for this interface. */
netif_t *netif;
unsigned int i = 0, group;
- printk(KERN_ALERT "netif_schedule_list:\n");
+ pr_alert("netif_schedule_list:\n");
for (group = 0; group < netbk_nr_groups; ++group) {
struct xen_netbk *netbk = &xen_netbk[group];
list_for_each(ent, &netbk->net_schedule_list) {
netif = list_entry(ent, netif_t, list);
- printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
- "rx_resp_prod=%08x\n",
- i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
- printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
- netif->tx.req_cons, netif->tx.rsp_prod_pvt);
- printk(KERN_ALERT " shared(rx_req_prod=%08x "
- "rx_resp_prod=%08x\n",
- netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
- printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
- netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
- printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
- netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
+ pr_alert(" %d: private(rx_req_cons=%08x "
+ "rx_resp_prod=%08x\n", i,
+ netif->rx.req_cons, netif->rx.rsp_prod_pvt);
+ pr_alert(" tx_req_cons=%08x tx_resp_prod=%08x)\n",
+ netif->tx.req_cons, netif->tx.rsp_prod_pvt);
+ pr_alert(" shared(rx_req_prod=%08x "
+ "rx_resp_prod=%08x\n",
+ netif->rx.sring->req_prod,
+ netif->rx.sring->rsp_prod);
+ pr_alert(" rx_event=%08x tx_req_prod=%08x\n",
+ netif->rx.sring->rsp_event,
+ netif->tx.sring->req_prod);
+ pr_alert(" tx_resp_prod=%08x, tx_event=%08x)\n",
+ netif->tx.sring->rsp_prod,
+ netif->tx.sring->rsp_event);
i++;
}
spin_unlock_irq(&netbk->netbk->net_schedule_list_lock);
}
- printk(KERN_ALERT " ** End of netif_schedule_list **\n");
+ pr_alert(" ** End of netif_schedule_list **\n");
return IRQ_HANDLED;
}
PAGE_KERNEL);
} while (!xen_netbk && (netbk_nr_groups >>= 1));
if (!xen_netbk) {
- printk(KERN_ALERT "%s: out of memory\n", __func__);
+ pr_err("%s: out of memory\n", __func__);
return -ENOMEM;
}
if (group && netbk_nr_groups != group)
- printk(KERN_WARNING
- "netback: only using %u (instead of %u) groups\n",
- netbk_nr_groups, group);
+ pr_warning("netback: only using %u (instead of %u) groups\n",
+ netbk_nr_groups, group);
/* We can increase reservation by this much in net_rx_action(). */
balloon_update_driver_allowance(netbk_nr_groups * NET_RX_RING_SIZE);
netbk->mmap_pages =
alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
if (netbk->mmap_pages == NULL) {
- printk(KERN_ALERT "%s: out of memory\n", __func__);
+ pr_err("%s: out of memory\n", __func__);
rc = -ENOMEM;
goto failed_init;
}
"netback/%u", group);
if (IS_ERR(netbk->task)) {
- printk(KERN_ALERT
- "kthread_create() fails at netback\n");
+ pr_alert("netback: kthread_create() failed\n");
rc = PTR_ERR(netbk->task);
goto failed_init;
}
switch (frontend_state) {
case XenbusStateInitialising:
if (dev->state == XenbusStateClosed) {
- printk(KERN_INFO "%s: %s: prepare for reconnect\n",
- __FUNCTION__, dev->nodename);
+ pr_info("%s: %s: prepare for reconnect\n",
+ __FUNCTION__, dev->nodename);
xenbus_switch_state(dev, XenbusStateInitWait);
}
break;
#define DPRINTK(fmt, args...) \
pr_debug("netfront/accel (%s:%d) " fmt, \
__FUNCTION__, __LINE__, ##args)
-#define IPRINTK(fmt, args...) \
- printk(KERN_INFO "netfront/accel: " fmt, ##args)
-#define WPRINTK(fmt, args...) \
- printk(KERN_WARNING "netfront/accel: " fmt, ##args)
+#define IPRINTK(fmt, args...) pr_info("netfront/accel: " fmt, ##args)
+#define WPRINTK(fmt, args...) pr_warning("netfront/accel: " fmt, ##args)
static int netfront_remove_accelerator(struct netfront_info *np,
struct xenbus_device *dev);
#define DPRINTK(fmt, args...) \
pr_debug("netfront (%s:%d) " fmt, \
__FUNCTION__, __LINE__, ##args)
-#define IPRINTK(fmt, args...) \
- printk(KERN_INFO "netfront: " fmt, ##args)
-#define WPRINTK(fmt, args...) \
- printk(KERN_WARNING "netfront: " fmt, ##args)
+#define IPRINTK(fmt, args...) pr_info("netfront: " fmt, ##args)
+#define WPRINTK(fmt, args...) pr_warning("netfront: " fmt, ##args)
static int setup_device(struct xenbus_device *, struct netfront_info *);
static struct net_device *create_netdev(struct xenbus_device *);
err = register_netdev(info->netdev);
if (err) {
- printk(KERN_WARNING "%s: register_netdev err=%d\n",
- __FUNCTION__, err);
+ pr_warning("%s: register_netdev err=%d\n",
+ __FUNCTION__, err);
goto fail;
}
err = xennet_sysfs_addif(info->netdev);
if (err) {
unregister_netdev(info->netdev);
- printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
- __FUNCTION__, err);
+ pr_warning("%s: add sysfs failed err=%d\n",
+ __FUNCTION__, err);
goto fail;
}
skb = np->tx_skbs[id];
if (unlikely(gnttab_query_foreign_access(
np->grant_tx_ref[id]) != 0)) {
- printk(KERN_ALERT "network_tx_buf_gc: warning "
- "-- grant still in use by backend "
- "domain.\n");
+ pr_alert("network_tx_buf_gc: grant still"
+ " in use by backend domain\n");
BUG();
}
gnttab_end_foreign_access_ref(np->grant_tx_ref[id]);
frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
- printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
- frags);
+ pr_alert("xennet: skb rides the rocket: %d frags\n", frags);
dump_stack();
goto drop;
}
netdev = alloc_etherdev(sizeof(struct netfront_info));
if (!netdev) {
- printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
- __FUNCTION__);
+ pr_warning("%s: alloc_etherdev failed\n", __FUNCTION__);
return ERR_PTR(-ENOMEM);
}
/* A grant for every tx ring slot */
if (gnttab_alloc_grant_references(TX_MAX_TARGET,
&np->gref_tx_head) < 0) {
- printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
+ pr_alert("#### netfront can't alloc tx grant refs\n");
err = -ENOMEM;
goto exit;
}
/* A grant for every rx ring slot */
if (gnttab_alloc_grant_references(RX_MAX_TARGET,
&np->gref_rx_head) < 0) {
- printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
+ pr_alert("#### netfront can't alloc rx grant refs\n");
err = -ENOMEM;
goto exit_free_tx;
}
status = pci_enable_msi(dev);
if (status) {
- printk("error enable msi for guest %x status %x\n", otherend, status);
+ pr_err("error enable msi for guest %x status %x\n",
+ otherend, status);
op->value = 0;
return XEN_PCI_ERR_op_failed;
}
pci_name(dev));
err = pci_set_mwi(dev);
if (err) {
- printk(KERN_WARNING
- "pciback: %s: cannot enable memory-write-invalidate (%d)\n",
- pci_name(dev), err);
+ pr_warning("pciback: %s: cannot enable"
+ " memory-write-invalidate (%d)\n",
+ pci_name(dev), err);
value &= ~PCI_COMMAND_INVALIDATE;
}
}
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
- printk(KERN_WARNING "pciback: driver data not found for %s\n",
- pci_name(dev));
+ pr_warning("pciback: driver data not found for %s\n",
+ pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
- printk(KERN_WARNING "pciback: driver data not found for %s\n",
- pci_name(dev));
+ pr_warning("pciback: driver data not found for %s\n",
+ pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
- printk(KERN_WARNING "pciback: driver data not found for %s\n",
- pci_name(dev));
+ pr_warning("pciback: driver data not found for %s\n",
+ pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
default:
err = -EINVAL;
- printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
+ pr_err("pciback: %s: Unsupported header type %d!\n",
pci_name(dev), dev->hdr_type);
break;
}
found_psdev->pdev);
if (found_psdev->pdev) {
- printk(KERN_WARNING "pciback: ****** removing device "
- "%s while still in-use! ******\n",
- pci_name(found_psdev->dev));
- printk(KERN_WARNING "pciback: ****** driver domain may "
- "still access this device's i/o resources!\n");
- printk(KERN_WARNING "pciback: ****** shutdown driver "
- "domain before binding device\n");
- printk(KERN_WARNING "pciback: ****** to other drivers "
- "or domains\n");
+ pr_warning("pciback: ****** removing device %s"
+ " while still in-use! ******\n",
+ pci_name(found_psdev->dev));
+ pr_warning("pciback: ****** driver domain may still"
+ " access this device's i/o resources!\n");
+ pr_warning("pciback: ****** shutdown driver "
+ "domain before binding device\n");
+ pr_warning("pciback: ****** to other drivers "
+ "or domains\n");
pciback_release_pci_dev(found_psdev->pdev,
found_psdev->dev);
return err;
parse_error:
- printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
+ pr_err("pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
pci_devs_to_hide + pos);
return -EINVAL;
}
for (bus = 0; bus < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (slot_dev->slots[bus][slot] == NULL) {
- printk(KERN_INFO
- "pciback: slot: %s: assign to virtual slot %d, bus %d\n",
- pci_name(dev), slot, bus);
+ pr_info("pciback: slot: %s: assign to"
+ " virtual slot %d, bus %d\n",
+ pci_name(dev), slot, bus);
slot_dev->slots[bus][slot] = dev;
goto unlock;
}
/* Assign to a new slot on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot])) {
- printk(KERN_INFO
- "pciback: vpci: %s: assign to virtual slot %d\n",
- pci_name(dev), slot);
+ pr_info("pciback: vpci: %s:"
+ " assign to virtual slot %d\n",
+ pci_name(dev), slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
func = PCI_FUNC(dev->devfn);
return -ENODEV;
pciback_wq = create_workqueue("pciback_workqueue");
if (!pciback_wq) {
- printk(KERN_ERR "pciback_xenbus_register: create"
- "pciback_workqueue failed\n");
+ pr_err("pciback_xenbus_register: create workqueue failed\n");
return -EFAULT;
}
return xenbus_register_backend(&xenbus_pciback_driver);
return; /* No resources, nothing to do */
if (magic != (sizeof(res) * 2) + 1) {
- printk(KERN_WARNING "pcifront: resource magic mismatch\n");
+ pr_warning("pcifront: resource magic mismatch\n");
return;
}
err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
"%s", buf);
if (err != 1) {
- printk(KERN_WARNING "pcifront: error reading "
- "resource %d on bus %04x:%02x\n",
- j, domain, bus);
+ pr_warning("pcifront: error reading "
+ "resource %d on bus %04x:%02x\n",
+ j, domain, bus);
continue;
}
struct pcifront_device *pdev = pcifront_get_pdev(sd);
if (nvec > SH_INFO_MAX_VEC) {
- printk("too much vector for pci frontend%x\n", nvec);
+ pr_warning("too many vectors (%#x) for pci frontend\n", nvec);
return -EINVAL;
}
return 0;
}
else {
- printk("enable msix get value %x\n", op.value);
+ pr_err("enable msix get value %#x\n", op.value);
return op.value;
}
}
else {
- printk("enable msix get err %x\n", err);
+ pr_err("enable msix err %#x\n", err);
return err;
}
}
/* What should do for error ? */
if (err)
- printk("pci_disable_msix get err %x\n", err);
+ pr_err("disable msix err %#x\n", err);
}
int pci_frontend_enable_msi(struct pci_dev *dev)
dev->irq = op.value;
}
else {
- printk("pci frontend enable msi failed for dev %x:%x \n",
- op.bus, op.devfn);
+ pr_err("pci frontend enable msi failed for dev %x:%x\n",
+ op.bus, op.devfn);
err = -EINVAL;
}
return err;
err = do_pci_op(pdev, &op);
if (err == XEN_PCI_ERR_dev_not_found) {
/* XXX No response from backend, what shall we do? */
- printk("get no response from backend for disable MSI\n");
+ pr_err("no response from backend for disable MSI\n");
return;
}
if (likely(!err))
dev->irq = op.value;
else
/* how can pciback notify us fail? */
- printk("get fake response frombackend \n");
+ pr_err("got bogus response from backend\n");
}
#endif /* CONFIG_PCI_MSI */
/* Claim resources for the PCI frontend as-is, backend won't allow changes */
-static int pcifront_claim_resource(struct pci_dev *dev, void *data)
+static int __devinit pcifront_claim_resource(struct pci_dev *dev, void *data)
{
struct pcifront_device *pdev = data;
int i;
}
d = pci_scan_single_device(b, devfn);
- if (d) {
- int err;
-
+ if (d)
dev_info(&pdev->xdev->dev, "New device on "
"%04x:%02x:%02x.%02x found.\n", domain, bus,
PCI_SLOT(devfn), PCI_FUNC(devfn));
- err = pci_bus_add_device(d);
- if (err)
- dev_err(&pdev->xdev->dev,
- "error %d adding device, continuing.\n",
- err);
- }
}
+ /* Claim resources before going "live" with our devices */
+ pci_walk_bus(b, pcifront_claim_resource, pdev);
+
+ /* Create SysFS and notify udev of the devices. Aka: "going live" */
+ pci_bus_add_devices(b);
+
return 0;
}
int evtchn;
int gnt_ref;
+ int irq;
/* Lock this when doing any operations in sh_info */
spinlock_t sh_info_lock;
pdev->evtchn = INVALID_EVTCHN;
pdev->gnt_ref = INVALID_GRANT_REF;
+ pdev->irq = -1;
INIT_WORK(&pdev->op_work, pcifront_do_aer);
/*For PCIE_AER error handling job*/
flush_scheduled_work();
- unbind_from_irqhandler(pdev->evtchn, pdev);
+
+ if (pdev->irq > 0)
+ unbind_from_irqhandler(pdev->irq, pdev);
if (pdev->evtchn != INVALID_EVTCHN)
xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
if (pdev->gnt_ref != INVALID_GRANT_REF)
gnttab_end_foreign_access(pdev->gnt_ref,
(unsigned long)pdev->sh_info);
+ else
+ free_page((unsigned long)pdev->sh_info);
dev_set_drvdata(&pdev->xdev->dev, NULL);
if (err)
goto out;
- bind_caller_port_to_irqhandler(pdev->evtchn, pcifront_handler_aer,
- IRQF_SAMPLE_RANDOM, "pcifront", pdev);
+ err = bind_caller_port_to_irqhandler(pdev->evtchn,
+ pcifront_handler_aer,
+ IRQF_SAMPLE_RANDOM,
+ "pcifront", pdev);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Failed to bind event channel");
+ goto out;
+ }
+ pdev->irq = err;
do_publish:
err = xenbus_transaction_start(&trans);
}
err = pcifront_publish_info(pdev);
+ if (err)
+ free_pdev(pdev);
out:
return err;
for_each_sg (sgl, sg, nr_sg, i) {
if (sg_page(sg) == NULL) {
- printk(KERN_WARNING "%s: inconsistent length field in "
- "scatterlist\n", __FUNCTION__);
+ pr_warning("%s: inconsistent length field in "
+ "scatterlist\n", __FUNCTION__);
return -ENOMEM;
}
from += copy_size;
}
- printk(KERN_WARNING "%s: no space in scatterlist\n",
- __FUNCTION__);
+ pr_warning("%s: no space in scatterlist\n", __FUNCTION__);
return -ENOMEM;
}
for_each_sg (sgl, sg, nr_sg, i) {
if (sg_page(sg) == NULL) {
- printk(KERN_WARNING "%s: inconsistent length field in "
- "scatterlist\n", __FUNCTION__);
+ pr_warning("%s: inconsistent length field in "
+ "scatterlist\n", __FUNCTION__);
return -ENOMEM;
}
from_rest = sg->length;
if ((from_rest > 0) && (to_capa < from_rest)) {
- printk(KERN_WARNING
- "%s: no space in destination buffer\n",
- __FUNCTION__);
+ pr_warning("%s: no space in destination buffer\n",
+ __FUNCTION__);
return -ENOMEM;
}
copy_size = from_rest;
+ VSCSI_REPORT_LUNS_HEADER;
retry:
if ((buff = kzalloc(alloc_len, GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "scsiback:%s kmalloc err\n", __FUNCTION__);
+ pr_err("scsiback:%s kmalloc err\n", __FUNCTION__);
goto fail;
}
gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op);
if (op.status != GNTST_okay) {
- printk(KERN_ERR "scsiback: Grant table operation failure %d!\n", (int)op.status);
+ pr_err("scsiback: Grant table operation failure %d!\n",
+ (int)op.status);
ret = -EINVAL;
} else {
info->shmem_ref = ring_ref;
int err;
if (info->irq) {
- printk(KERN_ERR "scsiback: Already connected through?\n");
+ pr_err("scsiback: Already connected through?\n");
return -1;
}
scsiback_cachep = kmem_cache_create("vscsiif_cache",
sizeof(struct vscsibk_info), 0, 0, NULL);
if (!scsiback_cachep) {
- printk(KERN_ERR "scsiback: can't init scsi cache\n");
+ pr_err("scsiback: can't init scsi cache\n");
return -ENOMEM;
}
{
struct scsi_device *sdev = pending_req->sdev;
- printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no,
- sdev->channel, sdev->id, sdev->lun);
- printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n",
- status_byte(errors), msg_byte(errors),
- host_byte(errors), driver_byte(errors));
+ pr_err("scsiback: %d:%d:%d:%d ",
+ sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
+ pr_err("status = 0x%02x, message = 0x%02x, host = 0x%02x,"
+ " driver = 0x%02x\n",
+ status_byte(errors), msg_byte(errors),
+ host_byte(errors), driver_byte(errors));
- printk(KERN_ERR "scsiback: cmnd[0]=0x%02X\n",
- pending_req->cmnd[0]);
+ pr_err("scsiback: cmnd[0]=0x%02X\n", pending_req->cmnd[0]);
if (CHECK_CONDITION & status_byte(errors))
__scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
GFP_KERNEL);
if (!pending_req->sgl) {
- printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
+ pr_err("scsiback: %s: kmalloc() error\n", __FUNCTION__);
return -ENOMEM;
}
if (unlikely(map[i].status == GNTST_eagain))
gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]);
if (unlikely(map[i].status != GNTST_okay)) {
- printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
+ pr_err("scsiback: invalid buffer -- could not remap it\n");
map[i].handle = SCSIBACK_INVALID_HANDLE;
err |= 1;
}
struct bio *bio = request_map_sg(pending_req);
if (IS_ERR(bio)) {
- printk(KERN_ERR "scsiback: SG Request Map Error\n");
+ pr_err("scsiback: SG Request Map Error\n");
return;
}
rq = blk_make_request(pending_req->sdev->request_queue, bio,
GFP_KERNEL);
if (IS_ERR(rq)) {
- printk(KERN_ERR "scsiback: Make Request Error\n");
+ pr_err("scsiback: Make Request Error\n");
return;
}
rq = blk_get_request(pending_req->sdev->request_queue, write,
GFP_KERNEL);
if (unlikely(!rq)) {
- printk(KERN_ERR "scsiback: Get Request Error\n");
+ pr_err("scsiback: Get Request Error\n");
return;
}
}
} else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
scsiback_device_reset_exec(pending_req);
} else {
- printk(KERN_ERR "scsiback: invalid parameter for request\n");
+ pr_err("scsiback: invalid parameter for request\n");
scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
0, pending_req);
continue;
kfree(pending_reqs);
kfree(pending_grant_handles);
free_empty_pages_and_pagevec(pending_pages, mmap_pages);
- printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__);
+ pr_err("scsiback: %s: out of memory\n", __FUNCTION__);
return -ENOMEM;
}
if ((entry->v.chn == v->chn) &&
(entry->v.tgt == v->tgt) &&
(entry->v.lun == v->lun)) {
- printk(KERN_WARNING "scsiback: Virtual ID is already used. "
- "Assignment was not performed.\n");
+ pr_warning("scsiback: Virtual ID is already used. "
+ "Assignment was not performed.\n");
err = -EEXIST;
goto out;
}
/* Create a new translation entry and add to the list */
if ((new = kmalloc(sizeof(struct v2p_entry), GFP_ATOMIC)) == NULL) {
- printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
+ pr_err("scsiback: %s: kmalloc() error\n", __FUNCTION__);
err = -ENOMEM;
goto out;
}
shost = scsi_host_lookup(phy->hst);
if (IS_ERR(shost)) {
- printk(KERN_ERR "scsiback: host%d doesn't exist.\n",
- phy->hst);
+ pr_err("scsiback: host%d doesn't exist\n", phy->hst);
return NULL;
}
sdev = scsi_device_lookup(shost, phy->chn, phy->tgt, phy->lun);
if (!sdev) {
- printk(KERN_ERR "scsiback: %d:%d:%d:%d doesn't exist.\n",
- phy->hst, phy->chn, phy->tgt, phy->lun);
+ pr_err("scsiback: %d:%d:%d:%d doesn't exist\n",
+ phy->hst, phy->chn, phy->tgt, phy->lun);
scsi_host_put(shost);
return NULL;
}
if (!err) {
if (xenbus_printf(XBT_NIL, dev->nodename, state_str,
"%d", XenbusStateInitialised)) {
- printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
+ pr_err("scsiback: xenbus_printf error %s\n",
+ state_str);
scsiback_del_translation_entry(be->info, &vir);
}
} else {
if (!scsiback_del_translation_entry(be->info, &vir)) {
if (xenbus_printf(XBT_NIL, dev->nodename, state_str,
"%d", XenbusStateClosed))
- printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
+ pr_err("scsiback: xenbus_printf error %s\n",
+ state_str);
}
}
break;
/* modify vscsi-devs/dev-x/state */
if (xenbus_printf(XBT_NIL, dev->nodename, state_str,
"%d", XenbusStateConnected)) {
- printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
+ pr_err("scsiback: xenbus_printf error %s\n",
+ state_str);
scsiback_del_translation_entry(be->info, &vir);
xenbus_printf(XBT_NIL, dev->nodename, state_str,
"%d", XenbusStateClosed);
fail:
- printk(KERN_WARNING "scsiback: %s failed\n",__FUNCTION__);
+ pr_warning("scsiback: %s failed\n",__FUNCTION__);
scsiback_remove(dev);
return err;
for (i = 0; i < s->nr_segments; i++) {
if (unlikely(gnttab_query_foreign_access(
s->gref[i]) != 0)) {
- printk(KERN_ALERT "scsifront: "
- "grant still in use by backend.\n");
+ pr_alert("scsifront: "
+ "grant still in use by backend\n");
BUG();
}
gnttab_end_foreign_access(s->gref[i], 0UL);
err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head);
if (err) {
- printk(KERN_ERR "scsifront: gnttab_alloc_grant_references() error\n");
+ pr_err("scsifront: gnttab_alloc_grant_references() error\n");
return -ENOMEM;
}
nr_pages = (data_len + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (nr_pages > VSCSIIF_SG_TABLESIZE) {
- printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n");
+ pr_err("scsifront: Unable to map request_buffer for command!\n");
ref_cnt = (-E2BIG);
goto big_to_sg;
}
if (IS_ERR(info->kthread)) {
err = PTR_ERR(info->kthread);
info->kthread = NULL;
- printk(KERN_ERR "scsifront: kthread start err %d\n", err);
+ pr_err("scsifront: kthread start err %d\n", err);
goto free_sring;
}
err = scsi_add_host(host, &dev->dev);
if (err) {
- printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err);
+ pr_err("scsifront: fail to add scsi host %d\n", err);
goto free_sring;
}
if (device_state == XenbusStateInitialised) {
sdev = scsi_device_lookup(info->host, chn, tgt, lun);
if (sdev) {
- printk(KERN_ERR "scsifront: Device already in use.\n");
+ pr_err("scsifront: Device already in use.\n");
scsi_device_put(sdev);
xenbus_printf(XBT_NIL, dev->nodename,
state_str, "%d", XenbusStateClosed);
#else
#define NET_ACCEL_CHECK_MAGIC(_p, _errval) \
if (_p->magic != NET_ACCEL_MSG_MAGIC) { \
- printk(KERN_ERR "%s: passed invalid shared page %p!\n", \
+ pr_err("%s: passed invalid shared page %p!\n", \
__FUNCTION__, _p); \
return _errval; \
}
out_of_memory:
if (tpmif != NULL)
kmem_cache_free(tpmif_cachep, tpmif);
- printk("%s: out of memory\n", __FUNCTION__);
+ pr_err("%s: out of memory\n", __FUNCTION__);
return ERR_PTR(-ENOMEM);
}
int rc;
if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
- printk(KERN_ALERT
- "Could not register misc device for TPM BE.\n");
+ pr_alert("Could not register misc device for TPM BE\n");
return rc;
}
return rc;
}
- printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
+ pr_alert("Successfully initialized TPM backend driver\n");
return 0;
}
gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op);
if (op.status != GNTST_okay) {
- printk(KERN_ERR "grant table failure mapping urb_ring_ref %d\n", (int)op.status);
+ pr_err("grant table failure mapping urb_ring_ref %d\n",
+ (int)op.status);
return -EINVAL;
}
GNTMAP_host_map, usbif->urb_shmem_handle);
VOID(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop,
1));
- printk(KERN_ERR "grant table failure mapping conn_ring_ref %d\n", (int)op.status);
+ pr_err("grant table failure mapping conn_ring_ref %d\n",
+ (int)op.status);
return -EINVAL;
}
else
pending_req->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pending_req->urb) {
- printk(KERN_ERR "usbback: can't alloc urb\n");
+ pr_err("usbback: can't alloc urb\n");
ret = -ENOMEM;
goto fail;
}
req->buffer_length, GFP_KERNEL,
&pending_req->transfer_dma);
if (!pending_req->buffer) {
- printk(KERN_ERR "usbback: can't alloc urb buffer\n");
+ pr_err("usbback: can't alloc urb buffer\n");
ret = -ENOMEM;
goto fail_free_urb;
}
pending_req->setup = kmalloc(sizeof(struct usb_ctrlrequest),
GFP_KERNEL);
if (!pending_req->setup) {
- printk(KERN_ERR "usbback: can't alloc usb_ctrlrequest\n");
+ pr_err("usbback: can't alloc usb_ctrlrequest\n");
ret = -ENOMEM;
goto fail_free_buffer;
}
nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) {
- printk(KERN_ERR "Bad number of segments in request\n");
+ pr_err("Bad number of segments in request\n");
ret = -EINVAL;
goto fail;
}
gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]);
if (unlikely(map[i].status != GNTST_okay)) {
- printk(KERN_ERR "usbback: invalid buffer -- could not remap it\n");
+ pr_err("usbback: invalid buffer -- could not remap it\n");
map[i].handle = USBBACK_INVALID_HANDLE;
ret |= 1;
}
ret = usbbk_gnttab_map(usbif, req, pending_req);
if (ret) {
- printk(KERN_ERR "usbback: invalid buffer\n");
+ pr_err("usbback: invalid buffer\n");
ret = -ESHUTDOWN;
goto fail_free_urb;
}
ret = usb_submit_urb(pending_req->urb, GFP_KERNEL);
if (ret) {
- printk(KERN_ERR "usbback: failed submitting urb, error %d\n", ret);
+ pr_err("usbback: failed submitting urb, error %d\n", ret);
ret = -ESHUTDOWN;
goto fail_flush_area;
}
while (rc != rp) {
if (RING_REQUEST_CONS_OVERFLOW(urb_ring, rc)) {
- printk(KERN_WARNING "RING_REQUEST_CONS_OVERFLOW\n");
+ pr_warning("RING_REQUEST_CONS_OVERFLOW\n");
break;
}
stub = kzalloc(sizeof(*stub), GFP_KERNEL);
if (!stub) {
- printk(KERN_ERR "no memory for alloc usbstub\n");
+ pr_err("no memory for usbstub\n");
return NULL;
}
kref_init(&stub->kref);
err = usb_register(&usbback_usb_driver);
if (err < 0) {
- printk(KERN_ERR "usbback: usb_register failed (error %d)\n", err);
+ pr_err("usbback: usb_register failed (%d)\n", err);
goto out;
}
return err;
}
- printk("usbback: urb-ring-ref %ld, conn-ring-ref %ld, event-channel %d\n",
- urb_ring_ref, conn_ring_ref, evtchn);
+ pr_info("usbback: urb-ring-ref %ld, conn-ring-ref %ld,"
+ " event-channel %d\n",
+ urb_ring_ref, conn_ring_ref, evtchn);
err = usbif_map(usbif, urb_ring_ref, conn_ring_ref, evtchn);
if (err) {
case XenbusStateInitialising:
if (dev->state == XenbusStateClosed) {
- printk("%s: %s: prepare for reconnect\n",
- __FUNCTION__, dev->nodename);
+ pr_info("%s: %s: prepare for reconnect\n",
+ __FUNCTION__, dev->nodename);
xenbus_switch_state(dev, XenbusStateInitWait);
}
break;
{
struct device *dev = info_to_hcd(info)->self.controller;
if (device_create_file(dev, &dev_attr_statistics))
- printk(KERN_WARNING "statistics file not created for %s\n",
- info_to_hcd(info)->self.bus_name);
+ pr_warning("statistics file not created for %s\n",
+ info_to_hcd(info)->self.bus_name);
}
static inline void remove_debug_file(struct usbfront_info *info)
ret = gnttab_alloc_grant_references(USBIF_MAX_SEGMENTS_PER_REQUEST, &gref_head);
if (ret) {
- printk(KERN_ERR "usbfront: gnttab_alloc_grant_references() error\n");
+ pr_err("usbfront: gnttab_alloc_grant_references() error\n");
return -ENOMEM;
}
xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv",
sizeof(struct urb_priv), 0, 0, NULL);
if (!xenhcd_urbp_cachep) {
- printk(KERN_ERR "usbfront failed to create kmem cache\n");
+ pr_err("usbfront failed to create kmem cache\n");
return -ENOMEM;
}
xen_class = class_create(THIS_MODULE, "xen");
mutex_unlock(&xc_mutex);
if (IS_ERR(xen_class))
- printk("Failed to create xen sysfs class.\n");
+ pr_err("failed to create xen sysfs class\n");
return xen_class;
}
int err;
if (intf->req_prod != intf->req_cons)
- printk(KERN_ERR "XENBUS request ring is not quiescent "
+ pr_err("XENBUS request ring is not quiescent "
"(%08x:%08x)!\n", intf->req_cons, intf->req_prod);
if (intf->rsp_prod != intf->rsp_cons) {
- printk(KERN_WARNING "XENBUS response ring is not quiescent "
- "(%08x:%08x): fixing up\n",
- intf->rsp_cons, intf->rsp_prod);
+ pr_warning("XENBUS response ring is not quiescent"
+ " (%08x:%08x): fixing up\n",
+ intf->rsp_cons, intf->rsp_prod);
intf->rsp_cons = intf->rsp_prod;
}
xen_store_evtchn, wake_waiting,
0, "xenbus", &xb_waitq);
if (err <= 0) {
- printk(KERN_ERR "XENBUS request irq failed %i\n", err);
+ pr_err("XENBUS request irq failed %i\n", err);
return err;
}
err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
0, "xenbus", &xb_waitq);
if (err <= 0) {
- printk(KERN_ERR "XENBUS request irq failed %i\n", err);
+ pr_err("XENBUS request irq failed %i\n", err);
return err;
}
xenbus_irq = err;
{
nodename = strchr(nodename, '/');
if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
- printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
+ pr_warning("XENBUS: bad frontend %s\n", nodename);
return -EINVAL;
}
strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
if (!strchr(bus_id, '/')) {
- printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
+ pr_warning("XENBUS: bus_id %s no slash\n", bus_id);
return -EINVAL;
}
*strchr(bus_id, '/') = '-';
err = drv->suspend(xdev);
#endif
if (err)
- printk(KERN_WARNING
- "xenbus: suspend %s failed: %i\n", dev_name(dev), err);
+ pr_warning("xenbus: suspend %s failed: %i\n",
+ dev_name(dev), err);
return 0;
}
if (drv->suspend_cancel)
err = drv->suspend_cancel(xdev);
if (err)
- printk(KERN_WARNING
- "xenbus: suspend_cancel %s failed: %i\n",
- dev_name(dev), err);
+ pr_warning("xenbus: suspend_cancel %s failed: %i\n",
+ dev_name(dev), err);
return 0;
}
#endif
err = talk_to_otherend(xdev);
if (err) {
- printk(KERN_WARNING
- "xenbus: resume (talk_to_otherend) %s failed: %i\n",
- dev_name(dev), err);
+ pr_warning("xenbus: resume (talk_to_otherend) %s failed: %i\n",
+ dev_name(dev), err);
return err;
}
if (drv->resume) {
err = drv->resume(xdev);
if (err) {
- printk(KERN_WARNING
- "xenbus: resume %s failed: %i\n",
- dev_name(dev), err);
+ pr_warning("xenbus: resume %s failed: %i\n",
+ dev_name(dev), err);
return err;
}
}
err = watch_otherend(xdev);
if (err) {
- printk(KERN_WARNING
- "xenbus_probe: resume (watch_otherend) %s failed: "
- "%d.\n", dev_name(dev), err);
+ pr_warning("xenbus_probe: resume (watch_otherend) %s failed:"
+ " %d\n", dev_name(dev), err);
return err;
}
{
int ret = 0;
-#if defined(CONFIG_XEN) || defined(MODULE)
if (is_xenstored_ready())
ret = nb->notifier_call(nb, 0, NULL);
else
-#endif
blocking_notifier_chain_register(&xenstore_chain, nb);
return ret;
fail1:
rc2 = close_evtchn(xen_store_evtchn);
if (rc2 != 0)
- printk(KERN_WARNING
- "XENBUS: Error freeing xenstore event channel: %d\n",
- rc2);
+ pr_warning("XENBUS: Error freeing xenstore event channel:"
+ " %d\n", rc2);
fail0:
xen_store_evtchn = -1;
return rc;
/* Register ourselves with the kernel bus subsystem */
xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
if (xenbus_frontend.error)
- printk(KERN_WARNING
- "XENBUS: Error registering frontend bus: %i\n",
- xenbus_frontend.error);
+ pr_warning("XENBUS: Error registering frontend bus: %i\n",
+ xenbus_frontend.error);
xenbus_backend_bus_register();
/*
/* Initialize the interface to xenstore. */
err = xs_init();
if (err) {
- printk(KERN_WARNING
- "XENBUS: Error initializing xenstore comms: %i\n", err);
+ pr_warning("XENBUS: Error initializing xenstore comms: %i\n",
+ err);
goto err;
}
xenbus_frontend.error = device_register(&xenbus_frontend.dev);
if (xenbus_frontend.error) {
bus_unregister(&xenbus_frontend.bus);
- printk(KERN_WARNING
- "XENBUS: Error registering frontend device: %i\n",
- xenbus_frontend.error);
+ pr_warning("XENBUS: Error registering frontend device:"
+ " %d\n", xenbus_frontend.error);
}
}
#endif
if (!dev->driver) {
/* Information only: is this too noisy? */
- printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
- xendev->nodename);
+ pr_info("XENBUS: Device with no driver: %s\n",
+ xendev->nodename);
return 0;
}
enum xenbus_state rstate = XenbusStateUnknown;
if (xendev->otherend)
rstate = xenbus_read_driver_state(xendev->otherend);
- printk(KERN_WARNING "XENBUS: Timeout connecting "
- "to device: %s (local state %d, remote state %d)\n",
- xendev->nodename, xendev->state, rstate);
+ pr_warning("XENBUS: Timeout connecting to device: %s"
+ " (local state %d, remote state %d)\n",
+ xendev->nodename, xendev->state, rstate);
}
xendrv = to_xenbus_driver(dev->driver);
if (xendrv->is_ready && !xendrv->is_ready(xendev))
- printk(KERN_WARNING "XENBUS: Device not ready: %s\n",
- xendev->nodename);
+ pr_warning("XENBUS: Device not ready: %s\n",
+ xendev->nodename);
return 0;
}
while (exists_connecting_device(drv)) {
if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
if (!seconds_waited)
- printk(KERN_WARNING "XENBUS: Waiting for "
- "devices to initialise: ");
+ pr_warning("XENBUS: Waiting for "
+ "devices to initialise: ");
seconds_waited += 5;
printk("%us...", 300 - seconds_waited);
if (seconds_waited == 300)
{
xenbus_backend.error = bus_register(&xenbus_backend.bus);
if (xenbus_backend.error)
- printk(KERN_WARNING
- "XENBUS: Error registering backend bus: %i\n",
- xenbus_backend.error);
+ pr_warning("XENBUS: Error registering backend bus: %i\n",
+ xenbus_backend.error);
}
void xenbus_backend_device_register(void)
xenbus_backend.error = device_register(&xenbus_backend.dev);
if (xenbus_backend.error) {
bus_unregister(&xenbus_backend.bus);
- printk(KERN_WARNING
- "XENBUS: Error registering backend device: %i\n",
- xenbus_backend.error);
+ pr_warning("XENBUS: Error registering backend device: %i\n",
+ xenbus_backend.error);
}
}
for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
if (i == ARRAY_SIZE(xsd_errors) - 1) {
- printk(KERN_WARNING
- "XENBUS xen store gave: unknown error %s",
- errorstring);
+ pr_warning("XENBUS xen store gave: unknown error %s",
+ errorstring);
return EINVAL;
}
}
if (msg.type != type) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "XENBUS unexpected type [%d], expected [%d]\n",
- msg.type, type);
+ pr_warning("XENBUS unexpected type [%d],"
+ " expected [%d]\n",
+ msg.type, type);
kfree(ret);
return ERR_PTR(-EINVAL);
}
err = xs_unwatch(watch->node, token);
if (err)
- printk(KERN_WARNING
- "XENBUS Failed to release watch %s: %i\n",
- watch->node, err);
+ pr_warning("XENBUS Failed to release watch %s: %i\n",
+ watch->node, err);
up_read(&xs_state.watch_mutex);
for (;;) {
err = process_msg();
if (err)
- printk(KERN_WARNING "XENBUS error %d while reading "
- "message\n", err);
+ pr_warning("XENBUS error %d while reading "
+ "message\n", err);
if (kthread_should_stop())
break;
}
active_defined = 0;
}
- printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
- __func__, ret, init.num_events, xenoprof_is_primary);
+ pr_info("%s: ret %d, events %d, xenoprof_is_primary %d\n",
+ __func__, ret, init.num_events, xenoprof_is_primary);
return ret;
}
extern int processor_notify_external(struct acpi_processor *pr,
int event, int type);
-extern void processor_extcntl_init(void);
extern int processor_extcntl_prepare(struct acpi_processor *pr);
extern int acpi_processor_get_performance_info(struct acpi_processor *pr);
extern int acpi_processor_get_psd(struct acpi_processor *pr);
-void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **);
#else
static inline int processor_cntl_external(void) {return 0;}
static inline int processor_pm_external(void) {return 0;}
{
return 0;
}
-static inline void processor_extcntl_init(void) {}
static inline int processor_extcntl_prepare(struct acpi_processor *pr)
{
return 0;
static inline int smp_suspend(void)
{
if (num_online_cpus() > 1) {
- printk(KERN_WARNING "Can't suspend SMP guests "
- "without CONFIG_HOTPLUG_CPU\n");
+ pr_warning("Can't suspend SMP guests without"
+ " CONFIG_HOTPLUG_CPU\n");
return -EOPNOTSUPP;
}
return 0;
BUG_ON(__ret); \
} \
if (__hc_delay == 0) { \
- printk(KERN_ERR "%s: %s gnt busy\n", __func__, current->comm); \
+ pr_err("%s: %s gnt busy\n", __func__, current->comm); \
(__HCarg_p)->status = GNTST_bad_page; \
} \
if ((__HCarg_p)->status != GNTST_okay) \
- printk(KERN_ERR "%s: %s gnt status %x\n", \
+ pr_err("%s: %s gnt status %x\n", \
__func__, current->comm, (__HCarg_p)->status); \
}
msleep(__hc_delay++); \
} while ((__HCarg_p)->status == GNTST_eagain && __hc_delay); \
if (__hc_delay == 0) { \
- printk(KERN_ERR "%s: %s gnt busy\n", __func__, current->comm); \
+ pr_err("%s: %s gnt busy\n", __func__, current->comm); \
(__HCarg_p)->status = GNTST_bad_page; \
} \
if ((__HCarg_p)->status != GNTST_okay) \
- printk(KERN_ERR "%s: %s gnt status %x\n", \
+ pr_err("%s: %s gnt status %x\n", \
__func__, current->comm, (__HCarg_p)->status); \
}
xhv.index = idx;
r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
if (r < 0) {
- printk(KERN_ERR "Cannot get hvm parameter %d: %d!\n",
- idx, r);
+ pr_err("Cannot get hvm parameter %d: %d!\n", idx, r);
return r;
}
*value = xhv.value;
#include "xen.h"
+/* version of ABI */
+#define TMEM_SPEC_VERSION 1
+
/* Commands to HYPERVISOR_tmem_op() */
#define TMEM_CONTROL 0
#define TMEM_NEW_POOL 1
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
#define TMEM_POOL_PERSIST 1
#define TMEM_POOL_SHARED 2
+#define TMEM_POOL_PRECOMPRESSED 4
#define TMEM_POOL_PAGESIZE_SHIFT 4
#define TMEM_POOL_PAGESIZE_MASK 0xf
#define TMEM_POOL_VERSION_SHIFT 24
#define TMEM_POOL_VERSION_MASK 0xff
+#define TMEM_POOL_RESERVED_BITS 0x00ffff00
/* Bits for client flags (save/restore) */
#define TMEM_CLIENT_COMPRESS 1
uint32_t cli_id;
uint32_t arg1;
uint32_t arg2;
- uint64_t arg3;
+ uint64_t oid[3];
tmem_cli_va_t buf;
} ctrl; /* for cmd == TMEM_CONTROL */
struct {
- uint64_t object;
+ uint64_t oid[3];
uint32_t index;
uint32_t tmem_offset;
uint32_t pfn_offset;
struct tmem_handle {
uint32_t pool_id;
uint32_t index;
- uint64_t oid;
+ uint64_t oid[3];
};
-
#endif
#endif /* __XEN_PUBLIC_TMEM_H__ */
*/
#include <linux/precache.h>
+#include <linux/exportfs.h>
#include <linux/module.h>
#include "tmem.h"
static int precache_auto_allocate; /* set to 1 to auto_allocate */
+union precache_filekey {
+ struct tmem_oid oid;
+ u32 fh[0];
+};
+
+/*
+ * If the filesystem uses exportable filehandles, use the filehandle as
+ * the key, else use the inode number.
+ */
+static int precache_get_key(struct inode *inode, union precache_filekey *key)
+{
+#define PRECACHE_KEY_MAX (sizeof(key->oid) / sizeof(*key->fh))
+ struct super_block *sb = inode->i_sb;
+
+ memset(key, 0, sizeof(key));
+ if (sb->s_export_op) {
+ int (*fhfn)(struct dentry *, __u32 *fh, int *, int);
+
+ fhfn = sb->s_export_op->encode_fh;
+ if (fhfn) {
+ struct dentry *d;
+ int ret, maxlen = PRECACHE_KEY_MAX;
+
+ d = list_first_entry(&inode->i_dentry,
+ struct dentry, d_alias);
+ ret = fhfn(d, key->fh, &maxlen, 0);
+ if (ret < 0)
+ return ret;
+ if (ret >= 255 || maxlen > PRECACHE_KEY_MAX)
+ return -EPERM;
+ if (maxlen > 0)
+ return 0;
+ }
+ }
+ key->oid.oid[0] = inode->i_ino;
+ key->oid.oid[1] = inode->i_generation;
+ return 0;
+#undef PRECACHE_KEY_MAX
+}
+
int precache_put(struct address_space *mapping, unsigned long index,
- struct page *page)
+ struct page *page)
{
u32 tmem_pool = mapping->host->i_sb->precache_poolid;
- u64 obj = (unsigned long) mapping->host->i_ino;
+ union precache_filekey key;
u32 ind = (u32) index;
unsigned long mfn = pfn_to_mfn(page_to_pfn(page));
int ret;
ret = tmem_new_pool(0, 0, 0);
if (ret < 0)
return 0;
- printk(KERN_INFO
- "Mapping superblock for s_id=%s to precache_id=%d\n",
+ pr_info("Mapping superblock for s_id=%s to precache_id=%d\n",
mapping->host->i_sb->s_id, tmem_pool);
mapping->host->i_sb->precache_poolid = tmem_pool;
}
- if (ind != index)
+ if (ind != index || precache_get_key(mapping->host, &key))
return 0;
mb(); /* ensure page is quiescent; tmem may address it with an alias */
- return tmem_put_page(tmem_pool, obj, ind, mfn);
+ return tmem_put_page(tmem_pool, key.oid, ind, mfn);
}
int precache_get(struct address_space *mapping, unsigned long index,
- struct page *empty_page)
+ struct page *empty_page)
{
u32 tmem_pool = mapping->host->i_sb->precache_poolid;
- u64 obj = (unsigned long) mapping->host->i_ino;
+ union precache_filekey key;
u32 ind = (u32) index;
unsigned long mfn = pfn_to_mfn(page_to_pfn(empty_page));
if ((s32)tmem_pool < 0)
return 0;
- if (ind != index)
+ if (ind != index || precache_get_key(mapping->host, &key))
return 0;
- return tmem_get_page(tmem_pool, obj, ind, mfn);
+ return tmem_get_page(tmem_pool, key.oid, ind, mfn);
}
EXPORT_SYMBOL(precache_get);
int precache_flush(struct address_space *mapping, unsigned long index)
{
u32 tmem_pool = mapping->host->i_sb->precache_poolid;
- u64 obj = (unsigned long) mapping->host->i_ino;
+ union precache_filekey key;
u32 ind = (u32) index;
if ((s32)tmem_pool < 0)
return 0;
- if (ind != index)
+ if (ind != index || precache_get_key(mapping->host, &key))
return 0;
- return tmem_flush_page(tmem_pool, obj, ind);
+ return tmem_flush_page(tmem_pool, key.oid, ind);
}
EXPORT_SYMBOL(precache_flush);
int precache_flush_inode(struct address_space *mapping)
{
u32 tmem_pool = mapping->host->i_sb->precache_poolid;
- u64 obj = (unsigned long) mapping->host->i_ino;
+ union precache_filekey key;
- if ((s32)tmem_pool < 0)
+ if ((s32)tmem_pool < 0 || precache_get_key(mapping->host, &key))
return 0;
- return tmem_flush_object(tmem_pool, obj);
+ return tmem_flush_object(tmem_pool, key.oid);
}
EXPORT_SYMBOL(precache_flush_inode);
ret = tmem_destroy_pool(tmem_pool);
if (!ret)
return 0;
- printk(KERN_INFO
- "Unmapping superblock for s_id=%s from precache_id=%d\n",
+ pr_info("Unmapping superblock for s_id=%s from precache_id=%d\n",
sb->s_id, ret);
sb->precache_poolid = 0;
return 1;
*/
#define SWIZ_BITS 4
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
-#define oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
+#define oswiz(_type, _ind) ((struct tmem_oid){ \
+ .oid[0] = (_type << SWIZ_BITS) | (_ind & SWIZ_MASK) })
#define iswiz(_ind) (_ind >> SWIZ_BITS)
/*
#include <linux/types.h>
#include <xen/interface/xen.h>
#include <asm/hypervisor.h>
+#include "tmem.h"
-int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, u64 object, u32 index,
+int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid, u32 index,
unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
{
struct tmem_op op;
op.cmd = tmem_cmd;
op.pool_id = tmem_pool;
- op.u.gen.object = object;
+ BUILD_BUG_ON(sizeof(op.u.gen.oid) != sizeof(oid.oid));
+ memcpy(op.u.gen.oid, oid.oid, sizeof(op.u.gen.oid));
op.u.gen.index = index;
op.u.gen.tmem_offset = tmem_offset;
op.u.gen.pfn_offset = pfn_offset;
return rc;
}
-int xen_tmem_new_pool(uint32_t tmem_cmd, uint64_t uuid_lo,
- uint64_t uuid_hi, uint32_t flags)
+int xen_tmem_new_pool(struct tmem_pool_uuid uuid, uint32_t flags)
{
struct tmem_op op;
int rc = 0;
- op.cmd = tmem_cmd;
- op.u.new.uuid[0] = uuid_lo;
- op.u.new.uuid[1] = uuid_hi;
+ op.cmd = TMEM_NEW_POOL;
+ op.u.new.uuid[0] = uuid.lo;
+ op.u.new.uuid[1] = uuid.hi;
+#ifdef TMEM_SPEC_VERSION
+ switch (flags >> TMEM_POOL_VERSION_SHIFT) {
+ case 0:
+ flags |= TMEM_SPEC_VERSION << TMEM_POOL_VERSION_SHIFT;
+ break;
+ case TMEM_SPEC_VERSION:
+ break;
+ default:
+ WARN(1, "TMEM: Bogus version %u, expecting %u\n",
+ flags >> TMEM_POOL_VERSION_SHIFT, TMEM_SPEC_VERSION);
+ return -ENOSYS;
+ }
+#endif
op.u.new.flags = flags;
rc = HYPERVISOR_tmem_op(&op);
return rc;
#define TMEM_POOL_MIN_PAGESHIFT 12
#define TMEM_POOL_PAGEORDER (PAGE_SHIFT - TMEM_POOL_MIN_PAGESHIFT)
-extern int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, u64 object, u32 index,
+struct tmem_pool_uuid {
+ u64 lo;
+ u64 hi;
+};
+
+struct tmem_oid {
+ u64 oid[3];
+};
+
+extern int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid, u32 index,
unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len);
-extern int xen_tmem_new_pool(u32 tmem_cmd, u64 uuid_lo, u64 uuid_hi, u32 flags);
+extern int xen_tmem_new_pool(struct tmem_pool_uuid, u32 flags);
-static inline int tmem_put_page(u32 pool_id, u64 object, u32 index,
+static inline int tmem_put_page(u32 pool_id, struct tmem_oid oid, u32 index,
unsigned long gmfn)
{
- return xen_tmem_op(TMEM_PUT_PAGE, pool_id, object, index,
+ return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
gmfn, 0, 0, 0);
}
-static inline int tmem_get_page(u32 pool_id, u64 object, u32 index,
+static inline int tmem_get_page(u32 pool_id, struct tmem_oid oid, u32 index,
unsigned long gmfn)
{
- return xen_tmem_op(TMEM_GET_PAGE, pool_id, object, index,
+ return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
gmfn, 0, 0, 0);
}
-static inline int tmem_flush_page(u32 pool_id, u64 object, u32 index)
+static inline int tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
{
- return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, object, index,
+ return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
0, 0, 0, 0);
}
-static inline int tmem_flush_object(u32 pool_id, u64 object)
+static inline int tmem_flush_object(u32 pool_id, struct tmem_oid oid)
{
- return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, object, 0, 0, 0, 0, 0);
+ return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
}
static inline int tmem_new_pool(u64 uuid_lo, u64 uuid_hi, u32 flags)
{
+ struct tmem_pool_uuid uuid = { .lo = uuid_lo, .hi = uuid_hi };
+
BUILD_BUG_ON((TMEM_POOL_PAGEORDER < 0) ||
(TMEM_POOL_PAGEORDER >= TMEM_POOL_PAGESIZE_MASK));
flags |= TMEM_POOL_PAGEORDER << TMEM_POOL_PAGESIZE_SHIFT;
- return xen_tmem_new_pool(TMEM_NEW_POOL, uuid_lo, uuid_hi, flags);
+ return xen_tmem_new_pool(uuid, flags);
}
static inline int tmem_destroy_pool(u32 pool_id)
{
- return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, 0, 0, 0, 0, 0, 0);
+ static const struct tmem_oid oid = {};
+
+ return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
}
-#else
-struct tmem_op {
- u32 cmd;
- s32 pool_id; /* private > 0; shared < 0; 0 is invalid */
- union {
- struct { /* for cmd == TMEM_NEW_POOL */
- u64 uuid[2];
- u32 flags;
- } new;
- struct { /* for cmd == TMEM_CONTROL */
- u32 subop;
- u32 cli_id;
- u32 arg1;
- u32 arg2;
- void *buf;
- } ctrl;
- struct {
- u64 object;
- u32 index;
- u32 tmem_offset;
- u32 pfn_offset;
- u32 len;
- unsigned long pfn; /* page frame */
- } gen;
- } u;
-};
#endif
break;
default:
if (net_ratelimit())
- printk(KERN_ERR "Attempting to checksum a non-"
+ pr_err("Attempting to checksum a non-"
"TCP/UDP packet, dropping a protocol"
" %d packet", iph->protocol);
goto out;