Range: 0 - 8192
Default: 64
+ hpet64 [X86-64,HPET] enable 64-bit mode of the HPET timer (bnc#456700)
+
com20020= [HW,NET] ARCnet - COM20020 chipset
Format:
<io>[,<irq>[,<nodeID>[,<backplane>[,<ckp>[,<timeout>]]]]]
unknown_nmi_panic
[X86] Cause panic on unknown NMI.
+ unsupported Allow loading of unsupported kernel modules:
+ 0 = only allow supported modules,
+ 1 = warn when loading unsupported modules,
+ 2 = don't warn.
+
+ CONFIG_ENTERPRISE_SUPPORT must be enabled for this
+ to have any effect.
+
usbcore.authorized_default=
[USB] Default USB device authorization:
(default -1 = authorized except for wireless USB,
--- /dev/null
+/*?
+ * Text: "Allocating AES fallback algorithm %s failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: algorithm name
+ * Description:
+ * The advanced encryption standard (AES) algorithm includes three modes with
+ * 128-bit, 192-bit, and 256-bit keys. Your hardware system only provides
+ * hardware acceleration for the 128-bit mode. The aes_s390 module failed to
+ * allocate a software fallback for the AES modes that are not supported by the
+ * hardware. A possible reason for this problem is that the aes_generic module
+ * that provides the fallback algorithms is not available.
+ * User action:
+ * Use the 128-bit mode only or ensure that the aes_generic module is available
+ * and loaded and reload the aes_s390 module.
+ */
+
+/*?
+ * Text: "AES hardware acceleration is only available for 128-bit keys\n"
+ * Severity: Informational
+ * Description:
+ * The advanced encryption standard (AES) algorithm includes three modes with
+ * 128-bit, 192-bit, and 256-bit keys. Your hardware system only provides
+ * hardware acceleration for the 128-bit key mode. The aes_s390 module
+ * will use the less performant software fallback algorithm for the 192-bit
+ * and 256-bit key modes.
+ * User action:
+ * None.
+ */
+
--- /dev/null
+/*?
+ * Text: "Application %s on z/VM guest %s exceeds message limit\n"
+ * Severity: Error
+ * Parameter:
+ * @1: application name
+ * @2: z/VM user ID
+ * Description:
+ * Messages or packets destined for the application have accumulated and
+ * reached the maximum value. The default for the message limit is 65535.
+ * You can specify a different limit as the value for MSGLIMIT within
+ * the IUCV statement of the z/VM virtual machine on which the application
+ * runs.
+ * User action:
+ * Ensure that you do not send data faster than the application retrieves
+ * them. Ensure that the message limit on the z/VM guest virtual machine
+ * on which the application runs is high enough.
+ */
+
+/*?
+ * Text: "The af_iucv module cannot be loaded without z/VM\n"
+ * Severity: Error
+ * Description:
+ * The AF_IUCV protocol connects socket applications running in Linux
+ * kernels on different z/VM virtual machines, or it connects a Linux
+ * application to another sockets application running in a z/VM virtual
+ * machine. On Linux instances that run in environments other than the
+ * z/VM hypervisor, the AF_IUCV protocol does not provide any useful
+ * function and the corresponding af_iucv module cannot be loaded.
+ * User action:
+ * Load the af_iucv module only on Linux instances that run as guest
+ * operating systems of the z/VM hypervisor. If the module has been
+ * compiled into the kernel, ignore this message.
+ */
--- /dev/null
+/*?
+ * Text: "%d is not a valid cryptographic domain\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: AP domain index
+ * Description:
+ * The cryptographic domain specified for the 'domain=' module or kernel
+ * parameter must be an integer in the range 0 to 15.
+ * User action:
+ * Reload the cryptographic device driver with a correct module parameter.
+ * If the device driver has been compiled into the kernel, correct the value
+ * in the kernel parameter line and reboot Linux.
+ */
+
+/*?
+ * Text: "The hardware system does not support AP instructions\n"
+ * Severity: Warning
+ * Description:
+ * The ap module addresses AP adapters through AP instructions. The hardware
+ * system on which the Linux instance runs does not support AP instructions.
+ * The ap module cannot detect any AP adapters.
+ * User action:
+ * Load the ap module only if your Linux instance runs on hardware that
+ * supports AP instructions. If the ap module has been compiled into the kernel,
+ * ignore this message.
+ */
+
+/*?
+ * Text: "Registering adapter interrupts for AP %d failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: AP device ID
+ * Description:
+ * The hardware system supports AP adapter interrupts but failed to enable
+ * an adapter for interrupts. Possible causes for this error are:
+ * i) The AP adapter firmware does not support AP interrupts.
+ * ii) An AP adapter firmware update to a firmware level that supports AP
+ * adapter interrupts failed.
+ * iii) The AP adapter firmware has been successfully updated to a level that
+ * supports AP interrupts but the new firmware has not been activated.
+ * User action:
+ * Ensure that the firmware on your AP adapters support AP interrupts and that
+ * any firmware updates have completed successfully. If necessary, deconfigure
+ * your cryptographic adapters and reconfigure them to ensure that any firmware
+ * updates become active, then reload the ap module. If the ap module has been
+ * compiled into the kernel, reboot Linux.
+ */
--- /dev/null
+/*?
+ * Text: "Starting the data collection for %s failed with rc=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: appldata module
+ * @2: return code
+ * Description:
+ * The specified data collection module used the z/VM diagnose call
+ * DIAG 0xDC to start writing data. z/VM returned an error and the data
+ * collection could not start. If the return code is 5, your z/VM guest
+ * virtual machine is not authorized to write data records.
+ * User action:
+ * If the return code is 5, ensure that your z/VM guest virtual machine's
+ * entry in the z/VM directory includes the OPTION APPLMON statement.
+ * For other return codes see the section about DIAGNOSE Code X'DC'
+ * in "z/VM CP Programming Services".
+ */
+
+/*?
+ * Text: "Stopping the data collection for %s failed with rc=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: appldata module
+ * @2: return code
+ * Description:
+ * The specified data collection module used the z/VM diagnose call DIAG 0xDC
+ * to stop writing data. z/VM returned an error and the data collection
+ * continues.
+ * User action:
+ * See the section about DIAGNOSE Code X'DC' in "z/VM CP Programming Services".
+ */
+
+/*?
+ * Text: "Starting a new OS data collection failed with rc=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: return code
+ * Description:
+ * After a CPU hotplug event, the record size for the running operating
+ * system data collection is no longer correct. The appldata_os module tried
+ * to start a new data collection with the correct record size but received
+ * an error from the z/VM diagnose call DIAG 0xDC. Any data collected with
+ * the current record size might be faulty.
+ * User action:
+ * Start a new data collection with the cappldata_os module. For information
+ * about starting data collections see "Device Drivers, Features, and
+ * Commands". For information about the return codes see the section about
+ * DIAGNOSE Code X'DC' in "z/VM CP Programming Services".
+ */
+
+/*?
+ * Text: "Stopping a faulty OS data collection failed with rc=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: return code
+ * Description:
+ * After a CPU hotplug event, the record size for the running operating
+ * system data collection is no longer correct. The appldata_os module tried
+ * to stop the faulty data collection but received an error from the z/VM
+ * diagnose call DIAG 0xDC. Any data collected with the current record size
+ * might be faulty.
+ * User action:
+ * Try to restart appldata_os monitoring. For information about stopping
+ * and starting data collections see "Device Drivers, Features, and
+ * Commands". For information about the return codes see the section about
+ * DIAGNOSE Code X'DC' in "z/VM CP Programming Services".
+ */
+
+/*?
+ * Text: "Maximum OS record size %i exceeds the maximum record size %i\n"
+ * Severity: Error
+ * Parameter:
+ * @1: no of bytes
+ * @2: no of bytes
+ * Description:
+ * The OS record size grows with the number of CPUs and is adjusted by the
+ * appldata_os module in response to CPU hotplug events. For more than 110
+ * CPUs the record size would exceed the maximum record size of 4024 bytes
+ * that is supported by the z/VM hypervisor. To prevent the maximum supported
+ * record size from being exceeded while data collection is in progress,
+ * you cannot load the appldata_os module on Linux instances that are
+ * configured for a maximum of more than 110 CPUs.
+ * User action:
+ * If you do not want to collect operating system data, you can ignore this
+ * message. If you want to collect operating system data, reconfigure your
+ * Linux instance to support less than 110 CPUs.
+ */
+
--- /dev/null
+/*?
+ * Text: "%s is not a valid device for the cio_ignore kernel parameter\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: device bus-ID
+ * Description:
+ * The device specification for the cio_ignore kernel parameter is
+ * syntactically incorrect or specifies an unknown device. This device is not
+ * excluded from being sensed and analyzed.
+ * User action:
+ * Correct your device specification in the kernel parameter line to have the
+ * device excluded when you next reboot Linux. You can write the correct
+ * device specification to /proc/cio_ignore to add the device to the list of
+ * devices to be excluded. This does not immediately make the device
+ * inaccessible but the device is ignored if it disappears and later reappears.
+ */
+
+/*?
+ * Text: "0.%x.%04x to 0.%x.%04x is not a valid range for cio_ignore\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: from subchannel set ID
+ * @2: from device number
+ * @3: to subchannel set ID
+ * @4: to device number
+ * Description:
+ * The device range specified for the cio_ignore kernel parameter is
+ * syntactically incorrect. No devices specified with this range are
+ * excluded from being sensed and analyzed.
+ * User action:
+ * Correct your range specification in the kernel parameter line to have the
+ * range of devices excluded when you next reboot Linux. You can write the
+ * correct range specification to /proc/cio_ignore to add the range of devices
+ * to the list of devices to be excluded. This does not immediately make the
+ * devices in the range inaccessible but any of these devices are ignored if
+ * they disappear and later reappear.
+ */
+
+/*?
+ * Text: "Processing %s for channel path %x.%02x\n"
+ * Severity: Notice
+ * Parameter:
+ * @1: configuration change
+ * @2: channel subsystem ID
+ * @3: CHPID
+ * Description:
+ * A configuration change is in progress for the given channel path.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "No CCW console was found\n"
+ * Severity: Warning
+ * Description:
+ * Linux did not find the expected CCW console and tries to use an alternative
+ * console. A possible reason why the console was not found is that the console
+ * has been specified in the cio_ignore list.
+ * User action:
+ * None, if an appropriate alternative console has been found, and you want
+ * to use this alternative console. If you want to use the CCW console, ensure
+ * that is not specified in the cio_ignore list, explicitly specify the console
+ * with the 'condev=' kernel parameter, and reboot Linux.
+ */
+
+/*?
+ * Text: "Channel measurement facility initialized using format %s (mode %s)\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: format
+ * @2: mode
+ * Description:
+ * The channel measurement facility has been initialized successfully.
+ * Format 'extended' should be used for z990 and later mainframe systems.
+ * Format 'basic' is intended for earlier mainframes. Mode 'autodetected' means
+ * that the format has been set automatically. Mode 'parameter' means that the
+ * format has been set according to the 'format=' kernel parameter.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "The CSS device driver initialization failed with errno=%d\n"
+ * Severity: Alert
+ * Parameter:
+ * @1: Return code
+ * Description:
+ * The channel subsystem bus could not be established.
+ * User action:
+ * See the errno man page to find out what caused the problem.
+ */
+ /*? Text: "%s: Got subchannel machine check but no sch_event handler provided.\n" */
+
+/*?
+ * Text: "%s: Setting the device online failed because it is boxed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: Device bus-ID
+ * Description:
+ * Initialization of a device did not complete because it did not respond in
+ * time or it was reserved by another operating system.
+ * User action:
+ * Make sure that the device is working correctly, then try again to set it
+ * online. For devices that support the reserve/release mechanism (for example
+ * DASDs), you can try to override the reservation of the other system by
+ * writing 'force' to the 'online' sysfs attribute of the affected device.
+ */
+
+/*?
+ * Text: "%s: Setting the device online failed because it is not operational\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: Device bus-ID
+ * Description:
+ * Initialization of a device did not complete because it is not present or
+ * not operational.
+ * User action:
+ * Make sure that the device is present and working correctly, then try again
+ * to set it online.
+ */
+
+/*?
+ * Text: "%s: The device stopped operating while being set offline\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: Device bus-ID
+ * Description:
+ * While the device was set offline, it was not present or not operational.
+ * The device is now inactive, but setting it online again might fail.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The device entered boxed state while being set offline\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: Device bus-ID
+ * Description:
+ * While the device was set offline, it did not respond in time or it was
+ * reserved by another operating system. The device is now inactive, but
+ * setting it online again might fail.
+ * User action:
+ * None.
+ */
--- /dev/null
+/*?
+ * Text: "%s: Creating the /proc files for a new CLAW device failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the failed CLAW device
+ * Description:
+ * For each Common Link Access to Workstation (CLAW) device the CLAW device
+ * driver maintains files in the proc file system. The CLAW device driver
+ * failed to create a new CLAW device because it could not create these /proc
+ * files for the new device. You cannot create CLAW devices for Linux kernels
+ * that do not include a proc file system.
+ * User action:
+ * Ensure that your Linux kernel provides a proc file system. Reboot Linux.
+ * If your kernel provides a proc file system and the problem persists, contact
+ * your support organization.
+ */
+
+/*?
+ * Text: "%s: An uninitialized CLAW device received an IRQ, c-%02x d-%02x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: subchannel status
+ * @3: device status
+ * Description:
+ * A Common Link Access to Workstation (CLAW) device was not initialized when
+ * it received a channel interrupt (IRQ). The IRQ is ignored. This might be a
+ * temporary condition while the device comes online or is taken offline.
+ * User action:
+ * If this problem occurs frequently, use the status information from the
+ * message and the channel and device traces to analyze the problem. See
+ * "Principles of Operation" for details about of the status information.
+ */
+
+/*?
+ * Text: "%s: The device is not a CLAW device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the device
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver received a
+ * channel interrupt (IRQ) for a subchannel that is not a CLAW read or write
+ * subchannel. A CLAW subchannel must be configured for a 3088 device of
+ * type x'61' and have an even bus ID.
+ * User action:
+ * Assure that the subchannels have been defined correctly to the real or
+ * virtual hardware, for example, in your IOCDS or in your z/VM configuration.
+ */
+
+/*?
+ * Text: "%s: The CLAW device received an unexpected IRQ, c-%02x d-%02x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: subchannel status
+ * @3: device status
+ * Description:
+ * A Common Link Access to Workstation (CLAW) device received a channel
+ * interrupt (IRQ) while it was in a state in which it cannot process IRQs.
+ * The IRQ is ignored. This might be a temporary condition.
+ * User action:
+ * If this problem occurs frequently, use the status information from the
+ * message and the channel and device traces to analyze the problem. See
+ * "Principles of Operation" for details about the status information.
+ */
+
+/*?
+ * Text: "%s: The CLAW device for %s received an unexpected IRQ\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * A Common Link Access to Workstation (CLAW) device received a channel
+ * interrupt (IRQ) while the CLAW device driver had assigned a status to the
+ * device in which it cannot process IRQs. The IRQ is ignored.
+ * User action:
+ * Restart the remote channel adapter. If the problem persists, use s390dbf
+ * traces and CCW traces to diagnose the problem.
+ */
+
+/*?
+ * Text: "%s: Deactivating %s completed with incorrect subchannel status (read %02x, write %02x)\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * @3: read subchannel status
+ * @4: write subchannel status
+ * Description:
+ * When the Common Link Access to Workstation (CLAW) device driver closes a
+ * CLAW device, the device driver frees all storage that is used for the
+ * device. A successful closing operation results in status DEVICE END and
+ * CHANNEL END for both the read and write subchannel. At least one of these
+ * statuses is missing for a subchannel. Data might have been lost and there
+ * might be problems when the network interface is activated again.
+ * User action:
+ * If the network interface cannot be activated, vary the subchannels for the
+ * device offline and back online, for example, with chchp. If this does not
+ * resolve the problem, reset the remote channel adapter.
+ */
+
+/*?
+ * Text: "%s: The remote channel adapter is not available\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * Description:
+ * During an operation, the Common Link Access to Workstation (CLAW) device
+ * driver received errno ENODEV from the common I/O layer. This means that
+ * the remote channel adapter was not operational or offline.
+ * User action:
+ * Check the remote channel adapter and, if necessary, restart it.
+ */
+
+/*?
+ * Text: "%s: The status of the remote channel adapter is not valid\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * Description:
+ * During an operation, the Common Link Access to Workstation (CLAW) device
+ * driver received errno EINVAL from the common I/O layer. This indicates
+ * that the remote channel adapter was offline or not operational.
+ * User action:
+ * Check for related error messages to find the cause of the problem. If
+ * necessary, restart the remote channel adapter.
+ */
+
+/*?
+ * Text: "%s: The common device layer returned error code %d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: errno
+ * Description:
+ * During an I/O operation, the Common Link Access to Workstation (CLAW) device
+ * driver received an errno from the common I/O layer. This indicates a problem
+ * with the remote channel adapter.
+ * User action:
+ * See the errno man page to find out what the error code means. Check for
+ * related messages. Restart the remote channel adapter. If the problem
+ * persists, examine the subchannel trace for further diagnostic information.
+ */
+
+/*?
+ * Text: "%s: The communication peer of %s disconnected\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver received a device
+ * status word DEV_STAT_UNIT_CHECK and sense code 0x41. This indicates that the
+ * remote network interface is no longer available.
+ * User action:
+ * Ensure that the remote channel adapter is operational and activate the
+ * remote interface. For information about the sense code see
+ * /Documentation/s390/cds.txt in the Linux source tree. Search for 'SNS0' to
+ * locate the information.
+ */
+
+/*?
+ * Text: "%s: The remote channel adapter for %s has been reset\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver received a device
+ * status word DEV_STAT_UNIT_CHECK and sense code 0x40. This indicates that the
+ * remote channel adapter has been reset.
+ * User action:
+ * When the remote channel adapter is operational again, activate the remote
+ * interface. For information about the sense code see
+ * /Documentation/s390/cds.txt in the Linux source tree. Search for 'SNS0' to
+ * locate the information.
+ */
+
+/*?
+ * Text: "%s: A data streaming timeout occurred for %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver received a device
+ * status word DEV_STAT_UNIT_CHECK and sense code 0x24. This indicates a data
+ * streaming timeout. The remote channel adapter or the channel might be
+ * faulty.
+ * User action:
+ * Restart the remote channel adapter and activate the remote interface. If the
+ * problem persists, examine the subchannel trace for further diagnostic
+ * information. For information about the sense code see
+ * /Documentation/s390/cds.txt in the Linux source tree. Search for 'SNS0' to
+ * locate the information.
+ */
+
+/*?
+ * Text: "%s: A data transfer parity error occurred for %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @1: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver received a device
+ * status word DEV_STAT_UNIT_CHECK and sense code 0x20. This indicates a data
+ * parity error. The remote channel adapter or the channel might be faulty.
+ * User action:
+ * Ensure that all cables are securely plugged. Restart the remote channel
+ * adapter and activate the remote interface. If the problem persists, examine
+ * the subchannel trace for further diagnostic information. For information
+ * about the sense code see /Documentation/s390/cds.txt in the Linux source
+ * tree. Search for 'SNS0' to locate the information.
+ */
+
+/*?
+ * Text: "%s: The remote channel adapter for %s is faulty\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver received a device
+ * status word DEV_STAT_UNIT_CHECK and sense code 0x30. This indicates that the
+ * remote channel adapter is faulty.
+ * User action:
+ * Check and restart the remote channel adapter and activate the remote
+ * interface. If the problem persists, perform device diagnosis for the remote
+ * channel adapter and examine the subchannel trace for further diagnostic
+ * information. For information about the sense code see
+ * /Documentation/s390/cds.txt in the Linux source tree. Search for 'SNS0' to
+ * locate the information.
+ */
+
+/*?
+ * Text: "%s: A read data parity error occurred for %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver received a device
+ * status word DEV_STAT_UNIT_CHECK and sense code 0x10. This indicates a read
+ * data parity error. The remote channel adapter might be faulty.
+ * User action:
+ * Ensure that all cables are securely plugged. Check and restart the remote
+ * channel adapter and activate the remote interface. If the problem persists,
+ * perform device diagnosis for the remote channel adapter and examine the
+ * subchannel trace for further diagnostic information. For information about
+ * the sense code see /Documentation/s390/cds.txt in the Linux source tree.
+ * Search for 'SNS0' to locate the information.
+ */
+
+/*?
+ * Text: "%s: The communication peer of %s uses an incorrect API version %d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * @3: CLAW API version
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver received a
+ * SYSTEM_VALIDATE_REQUEST packet from the remote channel adapter. The packet
+ * included an unexpected version ID for the CLAW API. The version ID must
+ * be '2' for all packets.
+ * User action:
+ * Ensure that the remote channel adapter is at the latest firmware level.
+ * Restart the remote channel adapter and activate the remote interface. If the
+ * problem persists, examine the subchannel trace for further diagnostic
+ * information.
+ */
+
+/*?
+ * Text: "%s: Host name %s for %s does not match the remote adapter name %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: host name in the local CLAW device settings
+ * @3: network interface name
+ * @4: adapter name in the remote CLAW device settings
+ * Description:
+ * The host name in the local Common Link Access to Workstation (CLAW) device
+ * settings must match the adapter name in the CLAW device settings of the
+ * communication peer. The CLAW device driver discovered a mismatch between
+ * these settings. The connection cannot be established.
+ * User action:
+ * Check the configuration of the CLAW device and of its communication peer.
+ * Correct the erroneous setting and restart the CLAW device, local or remote,
+ * for which you have made corrections.
+ */
+
+/*?
+ * Text: "%s: Adapter name %s for %s does not match the remote host name %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: adapter name in the local CLAW device settings
+ * @3: network interface name
+ * @4: host name in the remote CLAW device settings
+ * Description:
+ * The adapter name in the local Common Link Access to Workstation (CLAW) device
+ * settings must match the host name in the CLAW device settings of the
+ * communication peer. The CLAW device driver discovered a mismatch between
+ * these settings. The connection cannot be established.
+ * User action:
+ * Check the configuration of the CLAW device and of its communication peer.
+ * Correct the erroneous setting and restart the CLAW device, local or remote,
+ * for which you have made corrections.
+ */
+
+/*?
+ * Text: "%s: The local write buffer is smaller than the remote read buffer\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * Description:
+ * You set the buffer size for the local Common Link Access to Workstation
+ * (CLAW) device implicitly by setting the connection type. For connection
+ * type 'packed' the buffer size is 32 KB, for the other connection types the
+ * buffer size is 4 KB. The connection cannot be established because the
+ * write buffer size of the local CLAW device does not match the read buffer
+ * size of the communication peer.
+ * User action:
+ * Confirm that you are using the correct connection type for the local CLAW
+ * device. Ensure that the read buffer size of the remote CLAW device is set
+ * accordingly. Restart the CLAW device, local or remote, for which you have
+ * made corrections.
+ */
+
+/*?
+ * Text: "%s: The local read buffer is smaller than the remote write buffer\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * Description:
+ * You set the buffer size for the local Common Link Access to Workstation
+ * (CLAW) device implicitly by setting the connection type. For connection
+ * type 'packed' the buffer size is 32 KB, for the other connection types the
+ * buffer size is 4 KB. The connection cannot be established because the
+ * read buffer size of the local CLAW device does not match the write buffer
+ * size of the communication peer.
+ * User action:
+ * Confirm that you are using the correct connection type for the local CLAW
+ * device. Ensure that the write buffer size of the remote CLAW device is set
+ * accordingly. Restart the CLAW device, local or remote, for which you have
+ * made corrections.
+ */
+
+/*?
+ * Text: "%s: Settings for %s validated (version=%d, remote device=%d, rc=%d, adapter name=%.8s, host name=%.8s)\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * @3: CLAW API version
+ * @4: identifier for the remote CLAW device
+ * @5: return code received from the remote CLAW device
+ * @6: adapter name
+ * @7: host name
+ * Description:
+ * The settings of the local Common Link Access to Workstation (CLAW) device
+ * have been validated by the communication peer. The message summarizes the
+ * content of the response. If the return code is zero, the validation was
+ * successful and the connection is activated.
+ * User action:
+ * If the return code is not equal to zero, look for related warning messages.
+ */
+
+/*?
+ * Text: "%s: Validating %s failed because of a host or adapter name mismatch\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) network interface cannot be
+ * activated because there is a mismatch between a host name and the
+ * corresponding adapter name. The local host name must match the remote
+ * adapter name and the local adapter name must match the remote host name.
+ * User action:
+ * Correct the erroneous setting and restart the CLAW device, local or remote,
+ * for which you have made corrections.
+ */
+
+/*?
+ * Text: "%s: Validating %s failed because of a version conflict\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) network interface cannot be
+ * activated because the remote CLAW device does not support CLAW version 2.
+ * The CLAW device driver requires CLAW version 2.
+ * User action:
+ * Ensure that the remote channel adapter supports CLAW version 2 and that the
+ * remote CLAW device is configured for CLAW version 2.
+ */
+
+/*?
+ * Text: "%s: Validating %s failed because of a frame size conflict\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * You set the frame size for the local Common Link Access to Workstation
+ * (CLAW) device implicitly by setting the connection type. For connection
+ * type 'packed' the frame size is 32 KB, for the other connection types the
+ * frame size is 4 KB. The connection cannot be activated because the
+ * the frame size of the local CLAW device does not match the frame size of the
+ * communication peer.
+ * User action:
+ * Confirm that you are using the correct connection type for the local CLAW
+ * device. Ensure that the frame size of the remote CLAW device is set
+ * accordingly. Restart the CLAW device, local or remote, for which you have
+ * have made corrections.
+ */
+
+/*?
+ * Text: "%s: The communication peer of %s rejected the connection\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The remote CLAW device rejected the connection because of a mismatch between
+ * the settings of the local CLAW device and the remote CLAW device.
+ * User action:
+ * Check the settings of both the local and the remote CLAW device and ensure
+ * that the settings are consistent. Restart the CLAW device, local or remote
+ * for which you have made the correction.
+ */
+
+/*?
+ * Text: "%s: %s rejected a connection request because it is already active\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device rejected a connection
+ * request by its communication peer because the connection is already active.
+ * The CLAW device driver only supports a single connection for each CLAW
+ * device. This might be a runtime problem.
+ * User action:
+ * None if there is an active connection. If no connection can be established,
+ * restart the remote channel adapter.
+ */
+
+/*?
+ * Text: "%s: %s rejected a request to open multiple connections\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device rejected a request by
+ * its communication peer to open more than one connection. The CLAW device
+ * driver only supports a single connection for each CLAW device.
+ * User action:
+ * Reconfigure the remote CLAW device to only use one connection. Restart the
+ * remote CLAW device.
+ */
+
+/*?
+ * Text: "%s: %s rejected a connection request because of a type mismatch\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @1: network interface name
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device rejected a request by
+ * its communication peer to open a connection. A connection can only be opened
+ * if the same connection type has been set for both the local and the remote
+ * CLAW device.
+ * User action:
+ * Ensure that the connection types for the local and remote CLAW device match.
+ * Restart the CLAW device, local or remote, for which you have changed the
+ * connection type.
+ */
+
+/*?
+ * Text: "%s: The communication peer of %s rejected a connection request\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @1: network interface name
+ * Description:
+ * The remote CLAW device detected an inconsistency in the configurations of the
+ * local and the remote CLAW device and rejected a connection request.
+ * User action:
+ * Examine the settings of your local and remote CLAW device. Correct the
+ * erroneous setting and restart the CLAW device, local or remote, for which
+ * you have made corrections.
+ */
+
+/*?
+ * Text: "%s: The communication peer of %s rejected a connection request because of a type mismatch\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The remote Common Link Access to Workstation (CLAW) device rejected a
+ * request to open a connection. A connection can only be opened if the same
+ * connection type has been set for both the local and the remote CLAW device.
+ * not be started.
+ * User action:
+ * Ensure that the connection types for the local and remote CLAW device match.
+ * Restart the CLAW device, local or remote, for which you have changed the
+ * connection type.
+ */
+
+/*?
+ * Text: "%s: Activating %s failed because of an incorrect link ID=%d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * @3: link ID returned from the remote CLAW device
+ * Description:
+ * The remote Common Link Access to Workstation (CLAW) device accepted a
+ * connection request but returned an incorrect link ID. The CLAW device driver
+ * only supports a single connection at a time (link ID=1) for each network
+ * interface.
+ * User action:
+ * Restart the remote CLAW device and try again to activate the network
+ * interface.
+ */
+
+/*?
+ * Text: "%s: The communication peer of %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The remote Common Link Access to Workstation (CLAW) device reported an
+ * error condition that cannot be recovered automatically.
+ * User action:
+ * Restart the remote CLAW device. If this does not resolve the error, gather
+ * logs and traces from the remote CLAW device to obtain further
+ * diagnostic data.
+ */
+
+/*?
+ * Text: "%s: The communication peer of %s sent an unknown command code\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * Description:
+ * The remote Common Link Access to Workstation (CLAW) device sent a command
+ * code that is not defined. This might indicate that the remote CLAW device is
+ * malfunctioning. The connection remains operational.
+ * User action:
+ * If this problem occurs frequently, restart the remote CLAW device. If this
+ * does not resolve the error, gather logs and traces from the remote CLAW
+ * device to obtain further diagnostic data.
+ */
+
+/*?
+ * Text: "%s: The communication peer of %s sent a faulty frame of length %02x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: network interface name
+ * @3: incorrect frame length value
+ * Description:
+ * The remote Common Link Access to Workstation (CLAW) device sent a frame
+ * with an incorrect value in the length field. This problem might result from
+ * data errors or incorrect packing. The connection remains operational.
+ * User action:
+ * If this problem occurs frequently, restart the remote CLAW device. If this
+ * does not resolve the error, gather logs and traces from the remote CLAW
+ * device to obtain further diagnostic data.
+ */
+
+/*?
+ * Text: "%s: Allocating a buffer for incoming data failed\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * Description:
+ * A Common Link Access to Workstation (CLAW) data packet was received but
+ * the CLAW device driver could not allocate a receive buffer. A possible cause
+ * of this problem is memory constraints. The data packet is dropped but the
+ * connection remains operational.
+ * User action:
+ * Ensure that sufficient memory is available. If this problem occurs
+ * frequently, restart the remote CLAW device. If this does not resolve the
+ * error, gather logs and traces from the remote CLAW device to obtain further
+ * diagnostic data.
+ */
+
+/*?
+ * Text: "%s: Creating a CLAW group device failed with error code %d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: errno
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver failed to create
+ * a CLAW group device. A possible cause of this problem is memory constraints.
+ * User action:
+ * Ensure that there is sufficient free memory. See the errno man page and look
+ * for related messages to find out what caused the problem. If you cannot
+ * resolve the problem, contact your support organization.
+ */
+
+/*?
+ * Text: "%s: Setting the read subchannel online failed with error code %d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: errno
+ * Description:
+ * Setting the Common Link Access to Workstation (CLAW) device online failed
+ * with an error for the read subchannel. This problem occurs, for example, if
+ * the read subchannel used to create the CLAW group device is not defined as a
+ * CLAW read subchannel in the hardware definitions. The CLAW read subchannel
+ * must be for a 3088 device of type x'61' and have an even bus ID. The bus ID
+ * of the read subchannel matches the bus ID of the CLAW device.
+ * User action:
+ * Confirm that you are using the correct bus ID for the read subchannel. If
+ * necessary, ungroup the device and recreate it with the correct bus ID.
+ * Assure that the read subchannel has been defined correctly to the real or
+ * virtual hardware, for example, in your IOCDS or in your z/VM configuration.
+ * Assure that a valid number of read buffers has been assigned to the device.
+ * See 'Device Drivers, Features, and Commands' for details about the read
+ * buffers. See the errno man page for information about the error code.
+ */
+
+/*?
+ * Text: "%s: Setting the write subchannel online failed with error code %d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * @2: errno
+ * Description:
+ * Setting the Common Link Access to Workstation (CLAW) device online failed
+ * with an error for the write subchannel. This problem occurs, for example, if
+ * the write subchannel used to create the CLAW group device is not defined as a
+ * CLAW write subchannel in the hardware definitions. The CLAW write subchannel
+ * must be for a 3088 device of type x'61' and have an uneven bus ID. The
+ * bus ID of the write subchannel can be found from the symbolic link
+ * /sys/bus/ccwgroup/drivers/claw/<device-bus-ID>/cdev1 where <device-bus-ID>
+ * is the bus ID of the CLAW device.
+ * User action:
+ * Confirm that you are using the correct bus ID for the write subchannel. If
+ * necessary, ungroup the device and recreate it with the correct bus ID.
+ * Assure that the write subchannel has been defined correctly to the real or
+ * virtual hardware, for example, in your IOCDS or in your z/VM configuration.
+ * Assure that a valid number of write buffers has been assigned to the device.
+ * See 'Device Drivers, Features, and Commands' for details about the read
+ * buffers. See the errno man page for information about the error code.
+ */
+
+/*?
+ * Text: "%s: Activating the CLAW device failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CLAW device
+ * Description:
+ * Activating the Common Link Access to Workstation (CLAW) device failed. A
+ * possible cause of this problem is memory constraints.
+ * User action:
+ * Free some memory and try again to activate the CLAW device. If the problem
+ * persists, contact your support organization.
+ */
+
+/*?
+ * Text: "Registering with the S/390 debug feature failed with error code %d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: errno
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver failed to register
+ * with the S/390 debug feature. No debug traces will be available for CLAW.
+ * User action:
+ * Enter 'lsmod | grep dbf' or an equivalent command to check if the S/390 debug
+ * feature loaded. If the output does not show the dbf module, the S/390 debug
+ * feature has not been loaded, unload the CLAW device driver, load the debug
+ * feature, then reload the CLAW device driver. See the errno man page for
+ * information about the error code.
+ */
+
+/*?
+ * Text: "Registering with the cu3088 device driver failed with error code %d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: errno
+ * Description:
+ * The Common Link Access to Workstation (CLAW) device driver failed to register
+ * with the cu3088 channel subsystem device driver. The CLAW device driver
+ * requires the cu3088 device driver.
+ * User action:
+ * Enter 'lsmod | grep cu3088' or an equivalent command to check if the cu3088
+ * device driver is loaded. If the output does not show the cu3088 module,
+ * unload the CLAW device driver, load the cu3088 device driver, then reload
+ * the CLAW device driver. See the errno man page for information about the
+ * error code.
+ */
+
+/*? Text: "%s: %s: CLAW device %.8s: Received Control Packet\n" */
+/*? Text: "%s: %s: CLAW device %.8s: System validate completed.\n" */
+/*? Text: "%s: %s: CLAW device %.8s: Connection completed link_id=%d.\n" */
+/*? Text: "%s: %s: remote side is not ready\n" */
+/*? Text: "%s: %s: write connection restarting\n" */
+/*? Text: "%s: %s: subchannel check for device: %04x - Sch Stat %02x Dev Stat %02x CPA - %04x\n" */
+/*? Text: "%s: %s: Unit Exception occurred in write channel\n" */
+/*? Text: "%s: %s: Resetting Event occurred:\n" */
+/*? Text: "%s: %s: Recv Conn Confirm:Vers=%d,link_id=%d,Corr=%d,Host appl=%.8s,WS appl=%.8s\n" */
+/*? Text: "%s: %s: Recv Conn Req: Vers=%d,link_id=%d,Corr=%d,HOST appl=%.8s,WS appl=%.8s\n" */
+/*? Text: "%s: %s: Recv Sys Validate Request: Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,Host name=%.8s\n" */
+/*? Text: "%s: %s: Confirmed Now packing\n" */
+/*? Text: "%s: %s: Unit Check Occured in write channel\n" */
+/*? Text: "%s: %s: Restart is required after remote side recovers \n" */
+/*? Text: "%s: %s: sys Validate Rsize:%d Wsize:%d\n" */
+/*? Text: "%s: %s:readsize=%d writesize=%d readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n" */
+/*? Text: "%s: %s:host_name:%.8s, adapter_name :%.8s api_type: %.8s\n" */
+/*? Text: "Driver unloaded\n" */
+/*? Text: "Loading %s\n" */
+/*? Text: "%s: will be removed.\n" */
+/*? Text: "%s: add for %s\n" */
+/*? Text: "%s: %s: shutting down \n" */
+/*? Text: "%s: CLAW device %.8s: System validate completed.\n" */
+/*? Text: "%s: %s: Disconnect: Vers=%d,link_id=%d,Corr=%d\n" */
+/*? Text: "%s: %s: Recv Conn Resp: Vers=%d,link_id=%d,Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n" */
--- /dev/null
+/*?
+ * Text: "The cpcmd kernel function failed to allocate a response buffer\n"
+ * Severity: Warning
+ * Description:
+ * IPL code, console detection, and device drivers like vmcp or vmlogrdr use
+ * the cpcmd kernel function to send commands to the z/VM control program (CP).
+ * If a program that uses the cpcmd function does not allocate a contiguous
+ * response buffer below 2 GB guest real storage, cpcmd creates a bounce buffer
+ * to be used as the response buffer. Because of low memory or memory
+ * fragmentation, cpcmd could not create the bounce buffer.
+ * User action:
+ * Look for related page allocation failure messages and at the stack trace to
+ * find out which program or operation failed. Free some memory and retry the
+ * failed operation. Consider allocating more memory to your z/VM guest virtual
+ * machine.
+ */
+
--- /dev/null
+/*?
+ * Text: "Processor %d started, address %d, identification %06X\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: logical CPU number
+ * @2: CPU address
+ * @3: CPU identification number
+ * Description:
+ * The kernel detected a CPU with the given characteristics.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Processor %d stopped\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: logical CPU number
+ * Description:
+ * A logical CPU has been set offline.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%d configured CPUs, %d standby CPUs\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: number of configured CPUs
+ * @2: number of standby CPUs
+ * Description:
+ * The kernel detected the given number of configured and standby CPUs.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "The CPU configuration topology of the machine is:"
+ * Severity: Informational
+ * Description:
+ * The first six values of the topology information represent fields Mag6 to
+ * Mag1 of system-information block (SYSIB) 15.1.2. These fields specify the
+ * maximum numbers of topology-list entries (TLE) at successive topology nesting
+ * levels. The last value represents the MNest value of SYSIB 15.1.2 which
+ * specifies the maximum possible nesting that can be configured through
+ * dynamic changes. For details see the SYSIB 15.1.2 information in the
+ * "Principles of Operation."
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "CPU %i exceeds the maximum %i and is excluded from the dump\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: CPU number
+ * @2: maximum CPU number
+ * Description:
+ * The Linux kernel is used as a system dumper but it runs on more CPUs than
+ * it has been compiled for with the CONFIG_NR_CPUS kernel configuration
+ * option. The system dump will be created but information on one or more
+ * CPUs will be missing.
+ * User action:
+ * Update the system dump kernel to a newer version that supports more
+ * CPUs or reduce the number of installed CPUs and reproduce the problem
+ * that should be analyzed. If you send the system dump that prompted this
+ * message to a support organization, be sure to communicate that the dump
+ * does not include all CPU information.
+ */
--- /dev/null
+/*?
+ * Text: "%s: An I/O-error occurred on the CTCM device\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the CTCM device
+ * Description:
+ * An I/O error was detected on one of the subchannels of the CTCM device.
+ * Depending on the error, the CTCM device driver might attempt an automatic
+ * recovery.
+ * User action:
+ * Check the status of the CTCM device, for example, with ifconfig. If the
+ * device is not operational, perform a manual recovery. See "Device Drivers,
+ * Features, and Commands" for details about how to recover a CTCM device.
+ */
+
+/*?
+ * Text: "%s: An adapter hardware operation timed out\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the CTCM device
+ * Description:
+ * The CTCM device uses an adapter to physically connect to its communication
+ * peer. An operation on this adapter timed out.
+ * User action:
+ * Check the status of the CTCM device, for example, with ifconfig. If the
+ * device is not operational, perform a manual recovery. See "Device Drivers,
+ * Features, and Commands" for details about how to recover a CTCM device.
+ */
+
+/*?
+ * Text: "%s: An error occurred on the adapter hardware\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the CTCM device
+ * Description:
+ * The CTCM device uses an adapter to physically connect to its communication
+ * peer. An operation on this adapter returned an error.
+ * User action:
+ * Check the status of the CTCM device, for example, with ifconfig. If the
+ * device is not operational, perform a manual recovery. See "Device Drivers,
+ * Features, and Commands" for details about how to recover a CTCM device.
+ */
+
+/*?
+ * Text: "%s: The communication peer has disconnected\n"
+ * Severity: Notice
+ * Parameter:
+ * @1: channel ID
+ * Description:
+ * The remote device has disconnected. Possible reasons are that the remote
+ * interface has been closed or that the operating system instance with the
+ * communication peer has been rebooted or shut down.
+ * User action:
+ * Check the status of the peer device. Ensure that the peer operating system
+ * instance is running and that the peer interface is operational.
+ */
+
+/*?
+ * Text: "%s: The remote operating system is not available\n"
+ * Severity: Notice
+ * Parameter:
+ * @1: channel ID
+ * Description:
+ * The operating system instance with the communication peer has disconnected.
+ * Possible reasons are that the operating system instance has been rebooted
+ * or shut down.
+ * User action:
+ * Ensure that the peer operating system instance is running and that the peer
+ * interface is operational.
+ */
+
+/*?
+ * Text: "%s: The adapter received a non-specific IRQ\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CTCM device
+ * Description:
+ * The adapter hardware used by the CTCM device received an IRQ that cannot
+ * be mapped to a particular device. This is a hardware problem.
+ * User action:
+ * Check the status of the CTCM device, for example, with ifconfig. Check if
+ * the connection to the remote device still works. If the CTCM device is not
+ * operational, set it offline and back online. If this does not resolve the
+ * problem, perform a manual recovery. See "Device Drivers, Features, and
+ * Commands" for details about how to recover a CTCM device. If this problem
+ * persists, gather Linux debug data, collect the hardware logs, and report the
+ * problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: A check occurred on the subchannel\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CTCM device
+ * Description:
+ * A check condition has been detected on the subchannel.
+ * User action:
+ * Check if the connection to the remote device still works. If the CTCM device
+ * is not operational, set it offline and back online. If this does not resolve
+ * the problem, perform a manual recovery. See "Device Drivers, Features, and
+ * Commands" for details about how to recover a CTCM device. If this problem
+ * persists, gather Linux debug data and report the problem to your support
+ * organization.
+ */
+
+/*?
+ * Text: "%s: The communication peer is busy\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: channel ID
+ * Description:
+ * A busy target device was reported. This might be a temporary problem.
+ * User action:
+ * If this problem persists or is reported frequently ensure that the target
+ * device is working properly.
+ */
+
+/*?
+ * Text: "%s: The specified target device is not valid\n"
+ * Severity: Error
+ * Parameter:
+ * @1: channel ID
+ * Description:
+ * A target device was called with a faulty device specification. This is an
+ * adapter hardware problem.
+ * User action:
+ * Gather Linux debug data, collect the hardware logs, and contact IBM support.
+ */
+
+/*?
+ * Text: "An I/O operation resulted in error %04x\n"
+ * Severity: Error
+ * Parameter:
+ * @1: channel ID
+ * @2: error information
+ * Description:
+ * A hardware operation ended with an error.
+ * User action:
+ * Check the status of the CTCM device, for example, with ifconfig. If the
+ * device is not operational, perform a manual recovery. See "Device Drivers,
+ * Features, and Commands" for details about how to recover a CTCM device.
+ * If this problem persists, gather Linux debug data, collect the hardware logs,
+ * and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Initialization failed with RX/TX init handshake error %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CTCM device
+ * @2: error information
+ * Description:
+ * A problem occurred during the initialization of the connection. If the
+ * connection can be established after an automatic recovery, a success message
+ * is issued.
+ * User action:
+ * If the problem is not resolved by the automatic recovery process, check the
+ * local and remote device. If this problem persists, gather Linux debug data
+ * and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: The network backlog for %s is exceeded, package dropped\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CTCM device
+ * @2: calling function
+ * Description:
+ * There is more network traffic than can be handled by the device. The device
+ * is closed and some data has not been transmitted. The device might be
+ * recovered automatically.
+ * User action:
+ * Investigate and resolve the congestion. If necessary, set the device
+ * online to make it operational.
+ */
+
+/*?
+ * Text: "%s: The XID used in the MPC protocol is not valid, rc = %d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the CTCM device
+ * @2: return code
+ * Description:
+ * The exchange identification (XID) used by the CTCM device driver when
+ * in MPC mode is not valid.
+ * User action:
+ * Note the error information provided with this message and contact your
+ * support organization.
+ */
+
+/*? Text: "CTCM driver unloaded\n" */
+/*? Text: "%s: %s Internal error: net_device is NULL, ch = 0x%p\n" */
+/*? Text: "%s / register_cu3088_discipline failed, ret = %d\n" */
+/*? Text: "%s: %s: Internal error: Can't determine channel for interrupt device %s\n" */
+/*? Text: "CTCM driver initialized\n" */
+/*? Text: "%s: setup OK : r/w = %s/%s, protocol : %d\n" */
+/*? Text: "%s: Connected with remote side\n" */
+/*? Text: "%s: Restarting device\n" */
+
--- /dev/null
+/* dasd_ioctl */
+
+/*?
+ * Text: "%s: The DASD has been put in the quiesce state\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * No I/O operation is possible on this device.
+ * User action:
+ * Resume the DASD to enable I/O operations.
+ */
+
+/*?
+ * Text: "%s: I/O operations have been resumed on the DASD\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD is no longer in state quiesce and I/O operations can be performed
+ * on the device.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The DASD cannot be formatted while it is enabled\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD you try to format is enabled. Enabled devices cannot be formatted.
+ * User action:
+ * Contact the owner of the formatting tool.
+ */
+
+/*?
+ * Text: "%s: The specified DASD is a partition and cannot be formatted\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD you try to format is a partition. Partitions cannot be formatted
+ * separately. You can only format a complete DASD including all its partitions.
+ * User action:
+ * Format the complete DASD.
+ * ATTENTION: Formatting irreversibly destroys all data on all partitions
+ * of the DASD.
+ */
+
+/*?
+ * Text: "%s: Formatting unit %d failed with rc=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: start track
+ * @3: return code
+ * Description:
+ * The formatting process might have been interrupted by a signal, for example,
+ * CTRL+C. If the process was not interrupted intentionally, an I/O error
+ * might have occurred.
+ * User action:
+ * Retry to format the device. If the error persists, check the log file for
+ * related error messages. If you cannot resolve the error, note the return
+ * code and contact your support organization.
+ */
+
+
+/* dasd */
+
+/*?
+ * Text: "%s: start_IO run out of retries and failed with request %s\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: pointer to request
+ * Description:
+ * The start IO function tried to start an IO request but the number
+ * of retries for the I/O was exceeded before the request could be started.
+ * User action:
+ * Check for related previous error messages.
+ */
+
+/*?
+ * Text: "%s: Cancelling request %p failed with rc=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: pointer to request
+ * @3: return code of previous function
+ * Description:
+ * In response to a user action, the DASD device driver tried but failed to
+ * cancel a previously started I/O operation.
+ * User action:
+ * Try the action again.
+ */
+
+/*?
+ * Text: "%s: Flushing the DASD request queue failed for request %p\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: pointer to request
+ * Description:
+ * As part of the unloading process, the DASD device driver flushes the
+ * request queue. This failed because a previously started I/O operation
+ * could not be canceled.
+ * User action:
+ * Try again to unload the DASD device driver or to shut down Linux.
+ */
+
+/*?
+ * Text: "The DASD device driver could not be initialized\n"
+ * Severity: Informational
+ * Description:
+ * The initialization of the DASD device driver failed because of previous
+ * errors.
+ * User action:
+ * Check for related previous error messages.
+ */
+
+/*?
+ * Text: "%s: Accessing the DASD failed because it is in probeonly mode\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The dasd= module or kernel parameter specified the probeonly attribute for
+ * the DASD you are trying to access. The DASD device driver cannot access
+ * DASDs that are in probeonly mode.
+ * User action:
+ * Change the dasd= parameter as to omit probeonly for the DASD and reload
+ * the DASD device driver. If the DASD device driver has been compiled into
+ * the kernel, reboot Linux.
+ */
+
+/*?
+ * Text: "%s: cqr %p timed out (%is), %i retries remaining\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: pointer to request
+ * @3: timeout value
+ * @4: number of retries left
+ * Description:
+ * One try of the error recovery procedure (ERP) for the channel queued request
+ * (cqr) timed out and failed to recover the error. ERP continues for the DASD.
+ * User action:
+ * Ignore this message if it occurs infrequently and if the recovery succeeds
+ * during one of the retries. If this error persists, check for related
+ * previous error messages and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: cqr %p timed out (%is) but cannot be ended, retrying in 5 s\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: pointer to request
+ * @3: timeout value
+ * Description:
+ * A try of the error recovery procedure (ERP) for the channel queued request
+ * (cqr) timed out and failed to recover the error. The I/O request submitted
+ * during the try could not be canceled. The ERP waits for 5 seconds before
+ * trying again.
+ * User action:
+ * Ignore this message if it occurs infrequently and if the recovery succeeds
+ * during one of the retries. If this error persists, check for related
+ * previous error messages and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: The DASD cannot be set offline while it is in use\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD cannot be set offline because it is in use by an internal process.
+ * An action to free the DASD might not have completed yet.
+ * User action:
+ * Wait some time and set the DASD offline later.
+ */
+
+/*?
+ * Text: "%s: The DASD cannot be set offline with open count %i\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: count
+ * Description:
+ * The DASD is being used by one or more processes and cannot be set offline.
+ * User action:
+ * Ensure that the DASD is not in use anymore, for example, unmount all
+ * partitions. Then try again to set the DASD offline.
+ */
+
+/*?
+ * Text: "%s: Setting the DASD online failed with rc=%d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: return code
+ * Description:
+ * The DASD could not be set online because of previous errors.
+ * User action:
+ * Look for previous error messages. If you cannot resolve the error, note
+ * the return code and contact your support organization.
+ */
+
+/*?
+ * Text: "%s Setting the DASD online with discipline %s failed with rc=%i\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: discipline
+ * @3: return code
+ * Description:
+ * The DASD could not be set online because of previous errors.
+ * User action:
+ * Look for previous error messages. If you cannot resolve the error, note the
+ * return code and contact your support organization.
+ */
+
+/*?
+ * Text: "%s Setting the DASD online failed because of missing DIAG discipline\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD was to be set online with discipline DIAG but this discipline of
+ * the DASD device driver is not available.
+ * User action:
+ * Ensure that the dasd_diag_mod module is loaded. If your Linux system does
+ * not include this module, you cannot set DASDs online with the DIAG
+ * discipline.
+ */
+
+/*?
+ * Text: "%s Setting the DASD online failed because of a missing discipline\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD was to be set online with a DASD device driver discipline that
+ * is not available.
+ * User action:
+ * Ensure that all DASD modules are loaded correctly.
+ */
+
+---------------------------
+
+/*?
+ * Text: "The statistics feature has been switched off\n"
+ * Severity: Informational
+ * Description:
+ * The statistics feature of the DASD device driver has been switched off.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "The statistics feature has been switched on\n"
+ * Severity: Informational
+ * Description:
+ * The statistics feature of the DASD device driver has been switched on.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "The statistics have been reset\n"
+ * Severity: Informational
+ * Description:
+ * The DASD statistics data have been reset.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s is not a supported value for /proc/dasd/statistics\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: value
+ * Description:
+ * An incorrect value has been written to /proc/dasd/statistics.
+ * The supported values are: 'set on', 'set off', and 'reset'.
+ * User action:
+ * Write a supported value to /proc/dasd/statistics.
+ */
+
+/*?
+ * Text: "%s is not a valid device range\n"
+ * Severity: Error
+ * Parameter:
+ * @1: range
+ * Description:
+ * A device range specified with the dasd= parameter is not valid.
+ * User action:
+ * Examine the dasd= parameter and correct the device range.
+ */
+
+/*?
+ * Text: "The probeonly mode has been activated\n"
+ * Severity: Informational
+ * Description:
+ * The probeonly mode of the DASD device driver has been activated. In this
+ * mode the device driver rejects any 'open' syscalls with EPERM.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "The IPL device is not a CCW device\n"
+ * Severity: Error
+ * Description:
+ * The value for the dasd= parameter contains the 'ipldev' keyword. During
+ * the boot process this keyword is replaced with the device from which the
+ * IPL was performed. The 'ipldev' keyword is not valid if the IPL device is
+ * not a CCW device.
+ * User action:
+ * Do not specify the 'ipldev' keyword when performing an IPL from a device
+ * other than a CCW device.
+ */
+
+/*?
+ * Text: "A closing parenthesis ')' is missing in the dasd= parameter\n"
+ * Severity: Warning
+ * Description:
+ * The specification for the dasd= kernel or module parameter has an opening
+ * parenthesis '(' * without a matching closing parenthesis ')'.
+ * User action:
+ * Correct the parameter value.
+ */
+
+/*?
+ * Text: "The autodetection mode has been activated\n"
+ * Severity: Informational
+ * Description:
+ * The autodetection mode of the DASD device driver has been activated. In
+ * this mode the DASD device driver sets all detected DASDs online.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%*s is not a supported device option\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: length of option code
+ * @2: option code
+ * Description:
+ * The dasd= parameter includes an unknown option for a DASD or a device range.
+ * Options are specified in parenthesis and immediately follow a device or
+ * device range.
+ * User action:
+ * Check the dasd= syntax and remove any unsupported options from the dasd=
+ * parameter specification.
+ */
+
+/*?
+ * Text: "PAV support has be deactivated\n"
+ * Severity: Informational
+ * Description:
+ * The 'nopav' keyword has been specified with the dasd= kernel or module
+ * parameter. The Parallel Access Volume (PAV) support of the DASD device
+ * driver has been deactivated.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "'nopav' is not supported on z/VM\n"
+ * Severity: Informational
+ * Description:
+ * For Linux instances that run as guest operating systems of the z/VM
+ * hypervisor Parallel Access Volume (PAV) support is controlled by z/VM not
+ * by Linux.
+ * User action:
+ * Remove 'nopav' from the dasd= module or kernel parameter specification.
+ */
+
+/*?
+ * Text: "High Performance FICON support has been deactivated\n"
+ * Severity: Informational
+ * Description:
+ * The 'nofcx' keyword has been specified with the dasd= kernel or module
+ * parameter. The High Performance FICON (transport mode) support of the DASD
+ * device driver has been deactivated.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "The dasd= parameter value %s has an invalid ending\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: parameter value
+ * Description:
+ * The specified value for the dasd= kernel or module parameter is not correct.
+ * User action:
+ * Check the module or the kernel parameter.
+ */
+
+/*?
+ * Text: "Registering the device driver with major number %d failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: DASD major
+ * Description:
+ * Major number 94 is reserved for the DASD device driver. The DASD device
+ * driver failed to register with this major number. Another device driver
+ * might have used major number 94.
+ * User action:
+ * Determine which device driver uses major number 94 instead of the DASD
+ * device driver and unload this device driver. Then try again to load the
+ * DASD device driver.
+ */
+
+/*?
+ * Text: "%s: default ERP has run out of retries and failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The error recovery procedure (ERP) tried to recover an error but the number
+ * of retries for the I/O was exceeded before the error could be resolved.
+ * User action:
+ * Check for related previous error messages.
+ */
+
+/*?
+ * Text: "%s: Unable to terminate request %p on suspend\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: pointer to request
+ * Description:
+ * As part of the suspend process, the DASD device driver terminates requests
+ * on the request queue. This failed because a previously started I/O operation
+ * could not be canceled. The suspend process will be stopped.
+ * User action:
+ * Try again to suspend the system.
+ */
+
+/*?
+ * Text: "%s: ERP failed for the DASD\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * An error recovery procedure (ERP) was performed for the DASD but failed.
+ * User action:
+ * Check the message log for previous related error messages.
+ */
+
+/*?
+ * Text: "%s: An error occurred in the DASD device driver, reason=%s\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: reason code
+ * Description:
+ * This problem indicates a program error in the DASD device driver.
+ * User action:
+ * Note the reason code and contact your support organization.
+*/
--- /dev/null
+/* dasd_diag */
+
+/*?
+ * Text: "%s: A 64-bit DIAG call failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * 64-bit DIAG calls require a 64-bit z/VM version.
+ * User action:
+ * Use z/VM 5.2 or later or set the sysfs 'use_diag' attribute of the DASD to 0
+ * to switch off DIAG.
+ */
+
+/*?
+ * Text: "%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: return code
+ * Description:
+ * The format of the DASD is not correct.
+ * User action:
+ * Check the device format. For details about the return code see the
+ * section about the INITIALIZE function for DIAGNOSE Code X'250'
+ * in "z/VM CP Programming Services". If you cannot resolve the error, note
+ * the return code and contact your support organization.
+ */
+
+/*?
+ * Text: "%s: New DASD with %ld byte/block, total size %ld KB%s\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: bytes per block
+ * @3: size
+ * @4: access mode
+ * Description:
+ * A DASD with the indicated block size and total size has been set online.
+ * If the DASD is configured as read-only to the real or virtual hardware,
+ * the message includes an indication of this hardware access mode. The
+ * hardware access mode is independent from the 'readonly' attribute of
+ * the device in sysfs.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: DIAG ERP failed with rc=%d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: return code
+ * Description:
+ * An error in the DIAG processing could not be recovered by the error
+ * recovery procedure (ERP) of the DIAG discipline.
+ * User action:
+ * Note the return code, check for related I/O errors, and report this problem
+ * to your support organization.
+ */
+
+/*?
+ * Text: "%s: DIAG initialization failed with rc=%d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: return code
+ * Description:
+ * Initializing the DASD with the DIAG discipline failed. Possible reasons for
+ * this problem are that the device has a device type other than FBA or ECKD,
+ * or has a block size other than one of the supported sizes:
+ * 512 byte, 1024 byte, 2048 byte, or 4096 byte.
+ * User action:
+ * Ensure that the device can be written to and has a supported device type
+ * and block size. For details about the return code see the section about
+ * the INITIALIZE function for DIAGNOSE Code X'250' in "z/VM CP Programming
+ * Services". If you cannot resolve the error, note the error code and contact
+ * your support organization.
+ */
+
+/*?
+ * Text: "%s: Device type %d is not supported in DIAG mode\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: device type
+ * Description:
+ * Only DASD of type FBA and ECKD are supported in DIAG mode.
+ * User action:
+ * Set the sysfs 'use_diag' attribute of the DASD to 0 and try again to access
+ * the DASD.
+ */
+
+/*?
+ * Text: "Discipline %s cannot be used without z/VM\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: discipline name
+ * Description:
+ * The discipline that is specified with the dasd= kernel or module parameter
+ * is only available for Linux instances that run as guest operating
+ * systems of the z/VM hypervisor.
+ * User action:
+ * Remove the unsupported discipline from the parameter string.
+ */
+
+/*?
+ * Text: "%s: The access mode of a DIAG device changed to read-only"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A device changed its access mode from writeable to
+ * read-only while in use.
+ * User action:
+ * Set the device offline, ensure that the device is configured correctly in
+ * z/VM, then set the device online again.
+ */
--- /dev/null
+/* dasd_eckd */
+
+/*?
+ * Text: "%s: ERP failed for the DASD\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * An error recovery procedure (ERP) was performed for the DASD but failed.
+ * User action:
+ * Check the message log for previous related error messages.
+ */
+
+/*?
+ * Text: "%s: An error occurred in the DASD device driver, reason=%s\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: reason code
+ * Description:
+ * This problem indicates a program error in the DASD device driver.
+ * User action:
+ * Note the reason code and contact your support organization.
+*/
+
+/*?
+ * Text: "%s: Allocating memory for private DASD data failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD device driver maintains data structures for each DASD it manages.
+ * There is not enough memory to allocate these data structures for one or
+ * more DASD.
+ * User action:
+ * Free some memory and try the operation again.
+ */
+
+/*?
+ * Text: "%s: DASD with %d KB/block, %d KB total size, %d KB/track, %s\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: block size
+ * @3: DASD size
+ * @4: track size
+ * @5: disc layout
+ * Description:
+ * A DASD with the shown characteristics has been set online.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: Start track number %d used in formatting is too big\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: track number
+ * Description:
+ * The DASD format I/O control was used incorrectly by a formatting tool.
+ * User action:
+ * Contact the owner of the formatting tool.
+ */
+
+/*?
+ * Text: "%s: The DASD is not formatted\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A DASD has been set online but it has not been formatted yet. You must
+ * format the DASD before you can use it.
+ * User action:
+ * Format the DASD, for example, with dasdfmt.
+ */
+
+/*?
+ * Text: "%s: 0x%x is not a known command\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: command
+ * Description:
+ * This problem is likely to be caused by a programming error.
+ * User action:
+ * Contact your support organization.
+ */
+
+/*?
+ * Text: "%s: Track 0 has no records following the VTOC\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * Linux has identified a volume table of contents (VTOC) on the DASD but
+ * cannot read any data records following the VTOC. A possible cause of this
+ * problem is that the DASD has been used with another System z operating
+ * system.
+ * User action:
+ * Format the DASD for usage with Linux, for example, with dasdfmt.
+ * ATTENTION: Formatting irreversibly destroys all data on the DASD.
+ */
+
+/*?
+ * Text: "%s: An I/O control call used incorrect flags 0x%x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: flags
+ * Description:
+ * The DASD format I/O control was used incorrectly.
+ * User action:
+ * Contact the owner of the formatting tool.
+ */
+
+/*?
+ * Text: "%s: New DASD %04X/%02X (CU %04X/%02X) with %d cylinders, %d heads, %d sectors\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: device type
+ * @3: device model
+ * @4: control unit type
+ * @5: control unit model
+ * @6: number of cylinders
+ * @7: tracks per cylinder
+ * @8: sectors per track
+ * Description:
+ * A DASD with the shown characteristics has been set online.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The disk layout of the DASD is not supported\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD device driver only supports the following disk layouts: CDL, LDL,
+ * FBA, CMS, and CMS RESERVED.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: Start track %d used in formatting exceeds end track\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: track number
+ * Description:
+ * The DASD format I/O control was used incorrectly by a formatting tool.
+ * User action:
+ * Contact the owner of the formatting tool.
+ */
+
+/*?
+ * Text: "%s: The DASD cache mode was set to %x (%i cylinder prestage)\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: operation mode
+ * @3: number of cylinders
+ * Description:
+ * The DASD cache mode has been changed. See the storage system documentation
+ * for information about the different cache operation modes.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The DASD cannot be formatted with block size %d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: block size
+ * Description:
+ * The block size specified for a format instruction is not valid. The block
+ * size must be between 512 and 4096 byte and must be a power of 2.
+ * User action:
+ * Call the format command with a supported block size.
+ */
+
+/*?
+ * Text: "%s: The UID of the DASD has changed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The Unique Identifier (UID) of a DASD that is currently in use has changed.
+ * This indicates that the physical disk has been replaced.
+ * User action:
+ * None if the replacement was intentional.
+ * If the disk change is not expected, stop using the disk to prevent possible
+ * data loss.
+*/
+
+
+/* dasd_3990_erp */
+
+/*?
+ * Text: "%s: is offline or not installed - INTERVENTION REQUIRED!!\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD to be accessed is not in an accessible state. The I/O operation
+ * will wait until the device is operational again. This is an operating system
+ * independent message that is issued by the storage system.
+ * User action:
+ * Make the DASD accessible again. For details see the storage system
+ * documentation.
+ */
+
+/*?
+ * Text: "%s: The DASD cannot be reached on any path (lpum=%x/opm=%x)\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: last path used mask
+ * @3: online path mask
+ * Description:
+ * After a path to the DASD failed, the error recovery procedure of the DASD
+ * device driver tried but failed to reconnect the DASD through an alternative
+ * path.
+ * User action:
+ * Ensure that the cabling between the storage server and the mainframe
+ * system is securely in place. Check the file systems on the DASD when it is
+ * accessible again.
+ */
+
+/*?
+ * Text: "%s: Unable to allocate DCTL-CQR\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an internal error.
+ * User action:
+ * Contact your support organization.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Invalid Parameter\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A data argument of a command is not valid. This is an operating system
+ * independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - DPS Installation Check\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This operating system independent message is issued by the storage system
+ * for one of the following reasons:
+ * - A 3380 Model D or E DASD does not have the Dynamic Path Selection (DPS)
+ * feature in the DASD A-unit.
+ * - The device type of an attached DASD is not supported by the firmware.
+ * - A type 3390 DASD is attached to a 3 MB channel.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 2 - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Drive motor switch is off\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - CCW Count less than required\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The CCW count of a command is less than required. This is an operating
+ * system independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Channel requested ... %02x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: reason code
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system. The possible reason codes indicate the following problems:
+ * 00 No Message.
+ * 01 The channel has requested unit check sense data.
+ * 02 The channel has requested retry and retry is exhausted.
+ * 03 A SA Check-2 error has occurred. This sense is presented with
+ * Equipment Check.
+ * 04 The channel has requested retry and retry is not possible.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Status Not As Required: reason %02x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: reason code
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system. There are several potential reasons for this message;
+ * byte 8 contains the reason code.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Device status 1 not valid\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Storage Path Restart\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * An operation for an active channel program was queued in a Storage Control
+ * when a warm start was received by the path. This is an operating system
+ * independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Reset Notification\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A system reset or its equivalent was received on an interface. The Unit
+ * Check that generates this sense is posted to the next channel initiated
+ * selection following the resetting event. This is an operating system
+ * independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Invalid Command Sequence\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * An incorrect sequence of commands has occurred. This is an operating system
+ * independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Missing device address bit\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Subsystem Processing Error\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A firmware logic error has been detected. This is an operating system
+ * independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Seek incomplete\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Invalid Command\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A command was issued that is not in the 2107/1750 command set.
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Command Invalid on Secondary Address\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A command or order not allowed on a PPRC secondary device has been received
+ * by the secondary device. This is an operating system independent message
+ * that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Invalid Defective/Alternate Track Pointer\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A defective track has been accessed. The subsystem generates an invalid
+ * Defective/Alternate Track Pointer as a part of RAID Recovery.
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Channel Returned with Incorrect retry CCW\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A command portion of the CCW returned after a command retry sequence does
+ * not match the command for which retry was signaled. This is an operating
+ * system independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Diagnostic of Special Command Violates File Mask\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A command is not allowed under the Access Authorization specified by the
+ * File Mask. This is an operating system independent message that is issued
+ * by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Head address does not compare\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Device did not respond to selection\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Device check-2 error or Set Sector is not complete\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Device Error Source\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The device has completed soft error logging. This is an operating system
+ * independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Data Pinned for Device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * Modified data in cache or in persistent storage exists for the DASD. The
+ * data cannot be destaged to the device. This track is the first track pinned
+ * for this device. This is an operating system independent message that is
+ * issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 6 - Overrun on channel C\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Device Status 1 not as expected\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 0 - Device Fenced - device = %02x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: sense data byte 4
+ * Description:
+ * The device shown in sense byte 4 has been fenced. This is an operating
+ * system independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Interruption cannot be reset\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Index missing\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - DASD Fast Write inhibited\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * DASD Fast Write is not allowed because of a nonvolatile storage battery
+ * check condition. This is an operating system independent message that is
+ * issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - Invalid tag-in for an extended command sequence\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - Key area error; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - Count area error; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Track physical address did not compare\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 2 - 3990 check-2 error\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Offset active cannot be reset\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - RCC 1 and RCC 2 sequences not successful\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - No syn byte in count address area; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - Data area error\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 6 - Overrun on channel A\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - No sync byte in count address area\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 5 - Data Check in the key area\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Caching status reset to default\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The storage director has assigned two new subsystem status devices and
+ * resets the status to its default value. This is an operating system
+ * independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 5 - Data Check in the data area; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 5 - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Device not ready\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - No sync byte in key area\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 8 - DASD controller failed to set or reset the long busy latch\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 1 - Cylinder address did not compare\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 3 - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - No syn byte in data area; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 2 - Support facility errors\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - Key area error\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 8 - End operation with transfer count not zero\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 2 - Microcode detected error %02x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: error code
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 5 - Data Check in the count area; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 3 - Allegiance terminated\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * Allegiance terminated because of a Reset Allegiance or an Unconditional
+ * Reserve command on another channel. This is an operating system independent
+ * message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - Home address area error\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - Count area error\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - Invalid tag-in during selection sequence\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - No sync byte in data area\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - No sync byte in home address area; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - Home address area error; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - Data area error; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - No sync byte in home address area\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 5 - Data Check in the home address area; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 5 - Data Check in the home address area\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 5 - Data Check in the count area\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 4 - No sync byte in key area; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - Invalid DCC selection response or timeout\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 5 - Data Check in the data area\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Operation Terminated\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The storage system ends an operation related to an active channel program
+ * when termination and redrive are required and logging is not desired.
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 6 - Overrun on channel B\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 5 - Data Check in the key area; offset active\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Volume is suspended duplex\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The duplex pair volume has entered the suspended duplex state because of a
+ * failure. This is an operating system independent message that is issued by
+ * the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 6 - Overrun on channel D\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - RCC 1 sequence not successful\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 6 - Overrun on channel E\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - 3990 microcode time out when stopping selection\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 6 - Overrun on channel F\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 6 - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - RCC initiated by a connection check alert\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 6 - Overrun on channel G\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - extra RCC required\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 6 - Overrun on channel H\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 8 - Unexpected end operation response code\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - Permanent path error (DASD controller not available)\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - Missing end operation; device transfer incomplete\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT D - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Cache or nonvolatile storage equipment failure\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * An equipment failure has occurred in the cache storage or nonvolatile
+ * storage of the storage system. This is an operating system independent
+ * message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 8 - DPS cannot be filled\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 8 - Error correction code hardware fault\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - Missing end operation; device transfer complete\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - DASD controller not available on disconnected command chain\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 8 - No interruption from device during a command chain\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - No response to selection after a poll interruption\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 9 - Track physical address did not compare while oriented\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 9 - Head address did not compare\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - Invalid tag-in for an immediate command sequence\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 9 - Cylinder address did not compare\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 8 - DPS checks after a system reset or selective reset\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Caching reinitiated\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * Caching has been automatically reinitiated following an error.
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 8 - End operation with transfer count zero\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 7 - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 9 - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 8 - Short busy time-out during device selection\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Caching terminated\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The storage system was unable to initiate caching or had to suspend caching
+ * for a 3990 control unit. If this problem is caused by a failure condition,
+ * an additional message will provide more information about the failure.
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * Check for additional messages that point out possible failures. For more
+ * information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Subsystem status cannot be determined\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The status of a DASD Fast Write or PPRC volume cannot be determined.
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Nonvolatile storage terminated\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The storage director has stopped using nonvolatile storage or cannot
+ * initiate nonvolatile storage. If this problem is caused by a failure, an
+ * additional message will provide more information about the failure. This is
+ * an operating system independent message that is issued by the storage system.
+ * User action:
+ * Check for additional messages that point out possible failures. For more
+ * information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT 8 - Reserved\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: Write inhibited path encountered\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an informational message.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: FORMAT 9 - Device check-2 error\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This is an operating system independent message that is issued by the
+ * storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Track format incorrect\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A track format error occurred while data was being written to the DASD or
+ * while a duplex pair was being established. This is an operating system
+ * independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: FORMAT F - Cache fast write access not authorized\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A request for Cache Fast Write Data access cannot be satisfied because
+ * of missing access authorization for the storage system. This is an operating
+ * system independent message that is issued by the storage system.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: Data recovered during retry with PCI fetch mode active\n"
+ * Severity: Emerg
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * A data error has been recovered on the storages system but the Linux file
+ * system cannot be informed about the data mismatch. To prevent Linux from
+ * running with incorrect data, the DASD device driver will trigger a kernel
+ * panic.
+ * User action:
+ * Reset your real or virtual hardware and reboot Linux.
+ */
+
+/*?
+ * Text: "%s: The specified record was not found\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The record to be accessed does not exist. The DASD might be unformatted
+ * or defect.
+ * User action:
+ * Try to format the DASD or replace it.
+ * ATTENTION: Formatting irreversibly destroys all data on the DASD.
+ */
+
+/*?
+ * Text: "%s: ERP %p (%02x) refers to %p\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: pointer to ERP
+ * @3: ERP status
+ * @4: cqr
+ * Description:
+ * This message provides debug information for the enhanced error recovery
+ * procedure (ERP).
+ * User action:
+ * If you do not need this information, you can suppress this message by
+ * switching off ERP logging, for example, by writing '1' to the 'erplog'
+ * sysfs attribute of the DASD.
+ */
+
+/*?
+ * Text: "%s: ERP chain at END of ERP-ACTION\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This message provides debug information for the enhanced error recovery
+ * procedure (ERP).
+ * User action:
+ * If you do not need this information, you can suppress this message by
+ * switching off ERP logging, for example, by writing '1' to the 'erplog'
+ * sysfs attribute of the DASD.
+ */
+
+/*?
+ * Text: "%s: The cylinder data for accessing the DASD is inconsistent\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * An error occurred in the storage system hardware.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: Accessing the DASD failed because of a hardware error\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * An error occurred in the storage system hardware.
+ * User action:
+ * For more information see the documentation of your storage system.
+ */
+
+/*?
+ * Text: "%s: ERP chain at BEGINNING of ERP-ACTION\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * This message provides debug information for the enhanced error recovery
+ * procedure (ERP).
+ * User action:
+ * If you do not need this information, you can suppress this message by
+ * switching off ERP logging, for example, by writing '1' to the 'erplog'
+ * sysfs attribute of the DASD.
+ */
+
+/*?
+ * Text: "%s: ERP %p has run out of retries and failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: ERP pointer
+ * Description:
+ * The error recovery procedure (ERP) tried to recover an error but the number
+ * of retries for the I/O was exceeded before the error could be resolved.
+ * User action:
+ * Check for related previous error messages.
+ */
+
+/*?
+ * Text: "%s: ERP failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The error recovery procedure (ERP) tried to recover an error but has
+ * failed. A retry is not recommended. The I/O will also fail.
+ * User action:
+ * Check for related previous error messages.
+ */
+
+/*?
+ * Text: "%s: SIM - SRC: %02x%02x%02x%02x\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: sense byte
+ * @3: sense byte
+ * @4: sense byte
+ * @5: sense byte
+ * Description:
+ * This error message is a System Information Message (SIM) generated by the
+ * storage system. The System Reference Code (SRC) defines the error in detail.
+ * User action:
+ * Look up the SRC in the storage server documentation.
+ */
+
+/*?
+ * Text: "%s: log SIM - SRC: %02x%02x%02x%02x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: sense byte
+ * @3: sense byte
+ * @4: sense byte
+ * @5: sense byte
+ * Description:
+ * This System Information Message (SIM) is generated by the storage system.
+ * The System Reference Code (SRC) defines the error in detail.
+ * User action:
+ * Look up the SRC in the storage server documentation.
+ */
+
+/*?
+ * Text: "%s: Reading device feature codes failed with rc=%d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: return code
+ * Description:
+ * The device feature codes state which advanced features are supported by a
+ * device.
+ * Examples for advanced features are PAV or high performance FICON.
+ * Some early devices do not provide feature codes and no advanced features are
+ * available on these devices.
+ * User action:
+ * None, if the DASD does not provide feature codes. If the DASD provides
+ * feature codes, make sure that it is working correctly, then set it offline
+ * and back online.
+ */
+
+/*?
+ * Text: "%s: A channel path group could not be established\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * Initialization of a DASD did not complete because a channel path group
+ * could not be established.
+ * User action:
+ * Make sure that the DASD is working correctly, then try again to set it
+ * online. If initialization still fails, reboot.
+ */
+
+/*?
+ * Text: "%s: The DASD is not operating in multipath mode\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD channel path group could not be configured to use multipath mode.
+ * This might negatively affect I/O performance on this DASD.
+ * User action:
+ * Make sure that the DASD is working correctly, then try again to set it
+ * online. If initialization still fails, reboot.
+ */
+
+/*?
+ * Text: "%s: Detecting the DASD disk layout failed because of an I/O error\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The disk layout of the DASD could not be detected because of an unexpected
+ * I/O error. The DASD device driver treats the device like an unformatted DASD,
+ * and partitions on the device are not accessible.
+ * User action:
+ * If the DASD is formatted, make sure that the DASD is working correctly,
+ * then set it offline and back online. If the DASD is unformatted, format the
+ * DASD, for example, with dasdfmt.
+ * ATTENTION: Formatting irreversibly destroys all data on the DASD.
+ */
--- /dev/null
+
+/*?
+ * Text: "%s: New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB and %d B/blk\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the DASD
+ * @2: device type
+ * @3: device model
+ * @4: control unit type
+ * @5: control unit model
+ * @6: size
+ * @7: bytes per block
+ * Description:
+ * A DASD with the shown characteristics has been set online.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: Allocating memory for private DASD data failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the DASD
+ * Description:
+ * The DASD device driver maintains data structures for each DASD it manages.
+ * There is not enough memory to allocate these data structures for one or
+ * more DASD.
+ * User action:
+ * Free some memory and try the operation again.
+ */
--- /dev/null
+/*?
+ * Text: "Adjacent DCSSs %s and %s are not contiguous\n"
+ * Severity: Error
+ * Parameter:
+ * @1: name 1
+ * @2: name 2
+ * Description:
+ * You can only map a set of two or more DCSSs to a single DCSS device if the
+ * DCSSs in the set form a contiguous memory space. The DCSS device cannot be
+ * created because there is a memory gap between two adjacent DCSSs.
+ * User action:
+ * Ensure that you have specified all DCSSs that belong to the set. Check the
+ * definitions of the DCSSs on the z/VM hypervisor to verify that they form
+ * a contiguous memory space.
+ */
+
+/*?
+ * Text: "DCSS %s and DCSS %s have incompatible types\n"
+ * Severity: Error
+ * Parameter:
+ * @1: name 1
+ * @2: name 2
+ * Description:
+ * You can only map a set of two or more DCSSs to a single DCSS device if
+ * either all DCSSs in the set have the same type or if the set contains DCSSs
+ * of the two types EW and EN but no other type. The DCSS device cannot be
+ * created because at least two of the specified DCSSs are not compatible.
+ * User action:
+ * Check the definitions of the DCSSs on the z/VM hypervisor to verify that
+ * their types are compatible.
+ */
+
+/*?
+ * Text: "DCSS %s is of type SC and cannot be loaded as exclusive-writable\n"
+ * Severity: Error
+ * Parameter:
+ * @1: device name
+ * Description:
+ * You cannot load a DCSS device in exclusive-writable access mode if the DCSS
+ * devise maps to one or more DCSSs of type SC.
+ * User action:
+ * Load the DCSS in shared access mode.
+ */
+
+/*?
+ * Text: "DCSS device %s is removed after a failed access mode change\n"
+ * Severity: Error
+ * Parameter:
+ * @1: device name
+ * Description:
+ * To change the access mode of a DCSS device, all DCSSs that map to the device
+ * were unloaded. Reloading the DCSSs for the new access mode failed and the
+ * device is removed.
+ * User action:
+ * Look for related messages to find out why the DCSSs could not be reloaded.
+ * If necessary, add the device again.
+ */
+
+/*?
+ * Text: "All DCSSs that map to device %s are saved\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: device name
+ * Description:
+ * A save request has been submitted for the DCSS device. Changes to all DCSSs
+ * that map to the device are saved permanently.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Device %s is in use, its DCSSs will be saved when it becomes idle\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: device name
+ * Description:
+ * A save request for the device has been deferred until the device becomes
+ * idle. Then changes to all DCSSs that the device maps to will be saved
+ * permanently.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "A pending save request for device %s has been canceled\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: device name
+ * Description:
+ * A save request for the DCSSs that map to a DCSS device has been pending
+ * while the device was in use. This save request has been canceled. Changes to
+ * the DCSSs will not be saved permanently.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Loaded %s with total size %lu bytes and capacity %lu sectors\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: DCSS names
+ * @2: total size in bytes
+ * @3: total size in 512 byte sectors
+ * Description:
+ * The listed DCSSs have been verified as contiguous and successfully loaded.
+ * The displayed sizes are the sums of all DCSSs.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Device %s cannot be removed because it is not a known device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: device name
+ * Description:
+ * The DCSS device you are trying to remove is not known to the DCSS device
+ * driver.
+ * User action:
+ * List the entries under /sys/devices/dcssblk/ to see the names of the
+ * existing DCSS devices.
+ */
+
+/*?
+ * Text: "Device %s cannot be removed while it is in use\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: device name
+ * Description:
+ * You are trying to remove a device that is in use.
+ * User action:
+ * Make sure that all users of the device close the device before you try to
+ * remove it.
+ */
+
+/*?
+ * Text: "Device %s has become idle and is being saved now\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: device name
+ * Description:
+ * A save request for the DCSSs that map to a DCSS device has been pending
+ * while the device was in use. The device has become idle and all changes
+ * to the DCSSs are now saved permanently.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Writing to %s failed because it is a read-only device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: device name
+ * Description:
+ * The DCSS device is in shared access mode and cannot be written to. Depending
+ * on the type of the DCSSs that the device maps to, you might be able to
+ * change the access mode to exclusive-writable.
+ * User action:
+ * If the DCSSs of the device are of type SC, do not attempt to write to the
+ * device. If the DCSSs of the device are of type ER or SR, change the access
+ * mode to exclusive-writable before writing to the device.
+ */
+
+/*?
+ * Text: "The address range of DCSS %s changed while the system was suspended\n"
+ * Severity: Error
+ * Parameter:
+ * @1: device name
+ * Description:
+ * After resuming the system, the start address or end address of a DCSS does
+ * not match the address when the system was suspended. DCSSs must not be
+ * changed after the system was suspended.
+ * This error cannot be recovered. The system is stopped with a kernel panic.
+ * User action:
+ * Reboot Linux.
+ */
+
+/*?
+ * Text: "Suspending the system failed because DCSS device %s is writable\n"
+ * Severity: Error
+ * Parameter:
+ * @1: device name
+ * Description:
+ * A system cannot be suspended if one or more DCSSs are accessed in exclusive-
+ * writable mode. DCSS segment types EW, SW, and EN are always writable and
+ * must be removed before a system is suspended.
+ * User action:
+ * Remove all DCSSs of segment types EW, SW, and EN by writing the DCSS name to
+ * the sysfs 'remove' attribute. Set the access mode for all DCSSs of segment
+ * types SR and ER to read-only by writing 1 to the sysfs 'shared' attribute of
+ * the DCSS. Then try again to suspend the system.
+ */
--- /dev/null
+/*?
+ * Text: "Querying a DCSS type failed with rc=%ld\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: return code
+ * Description:
+ * The DCSS kernel interface used z/VM diagnose call X'64' to query the
+ * type of a DCSS. z/VM failed to determine the type and returned an error.
+ * User action:
+ * Look for related messages to find out which DCSS is affected.
+ * For details about the return codes see the section about DIAGNOSE Code
+ * X'64' in "z/VM CP Programming Services".
+ */
+
+/*?
+ * Text: "Loading DCSS %s failed with rc=%ld\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: DCSS name
+ * @2: return code
+ * Description:
+ * The DCSS kernel interface used diagnose call X'64' to load a DCSS. z/VM
+ * failed to load the DCSS and returned an error.
+ * User action:
+ * For details about the return codes see the section about DIAGNOSE Code
+ * X'64' in "z/VM CP Programming Services".
+ */
+
+/*?
+ * Text: "DCSS %s of range %p to %p and type %s loaded as exclusive-writable\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: DCSS name
+ * @2: starting page address
+ * @3: ending page address
+ * @4: DCSS type
+ * Description:
+ * The DCSS was loaded successfully in exclusive-writable access mode.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "DCSS %s of range %p to %p and type %s loaded in shared access mode\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: DCSS name
+ * @2: starting page address
+ * @3: ending page address
+ * @4: DCSS type
+ * Description:
+ * The DCSS was loaded successfully in shared access mode.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "DCSS %s is already in the requested access mode\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * A request to reload a DCSS with a new access mode has been rejected
+ * because the new access mode is the same as the current access mode.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "DCSS %s is in use and cannot be reloaded\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * Reloading a DCSS in a different access mode has failed because the DCSS is
+ * being used by one or more device drivers. The DCSS remains loaded with the
+ * current access mode.
+ * User action:
+ * Ensure that the DCSS is not used by any device driver then try again to
+ * load the DCSS with the new access mode.
+ */
+
+/*?
+ * Text: "DCSS %s overlaps with used memory resources and cannot be reloaded\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * The DCSS has been unloaded and cannot be reloaded because it overlaps with
+ * another loaded DCSS or with the memory of the z/VM guest virtual machine
+ * (guest storage).
+ * User action:
+ * Ensure that no DCSS is loaded that has overlapping memory resources
+ * with the DCSS you want to reload. If the DCSS overlaps with guest storage,
+ * use the DEF STORE CONFIG z/VM CP command to create a sufficient storage gap
+ * for the DCSS. For details, see the section about the DCSS device driver in
+ * "Device Drivers, Features, and Commands".
+ */
+
+/*?
+ * Text: "Reloading DCSS %s failed with rc=%ld\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: DCSS name
+ * @2: return code
+ * Description:
+ * The DCSS kernel interface used z/VM diagnose call X'64' to reload a DCSS
+ * in a different access mode. The DCSS was unloaded but z/VM failed to reload
+ * the DCSS.
+ * User action:
+ * For details about the return codes see the section about DIAGNOSE Code
+ * X'64' in "z/VM CP Programming Services".
+ */
+
+/*?
+ * Text: "Unloading unknown DCSS %s failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * The specified DCSS cannot be unloaded. The DCSS is known to the DCSS device
+ * driver but not to the DCSS kernel interface. This problem indicates a
+ * program error in extmem.c.
+ * User action:
+ * Report this problem to your support organization.
+ */
+
+/*?
+ * Text: "Saving unknown DCSS %s failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * The specified DCSS cannot be saved. The DCSS is known to the DCSS device
+ * driver but not to the DCSS kernel interface. This problem indicates a
+ * program error in extmem.c.
+ * User action:
+ * Report this problem to your support organization.
+ */
+
+/*?
+ * Text: "Saving a DCSS failed with DEFSEG response code %i\n"
+ * Severity: Error
+ * Parameter:
+ * @1: response-code
+ * Description:
+ * The DEFSEG z/VM CP command failed to permanently save changes to a DCSS.
+ * User action:
+ * Look for related messages to find the cause of this error. See also message
+ * HCP<response-code>E in the DEFSEG section of the "z/VM CP Command and
+ * Utility Reference".
+ */
+
+/*?
+ * Text: "Saving a DCSS failed with SAVESEG response code %i\n"
+ * Severity: Error
+ * Parameter:
+ * @1: response-code
+ * Description:
+ * The SAVESEG z/VM CP command failed to permanently save changes to a DCSS.
+ * User action:
+ * Look for related messages to find the cause of this error. See also message
+ * HCP<response-code>E in the SAVESEG section of the "z/VM CP Command and
+ * Utility Reference".
+ */
+
+/*?
+ * Text: "DCSS %s cannot be loaded or queried\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * You cannot load or query the specified DCSS because it either is not defined
+ * in the z/VM hypervisor, or it is a class S DCSS, or it is above 2047 MB
+ * and he Linux system is a 31-bit system.
+ * User action:
+ * Use the CP command "QUERY NSS" to find out if the DCSS is a valid
+ * DCSS that can be loaded.
+ */
+
+/*?
+ * Text: "DCSS %s cannot be loaded or queried without z/VM\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * A DCSS is a z/VM resource. Your Linux instance is not running as a z/VM
+ * guest operating system and, therefore, cannot load DCSSs.
+ * User action:
+ * Load DCSSs only on Linux instances that run as z/VM guest operating systems.
+ */
+
+/*?
+ * Text: "Loading or querying DCSS %s resulted in a hardware error\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * Either the z/VM DIAGNOSE X'64' query or load call issued for the DCSS
+ * returned with an error.
+ * User action:
+ * Look for previous extmem message to find the return code from the
+ * DIAGNOSE X'64' query or load call. For details about the return codes see
+ * the section about DIAGNOSE Code X'64' in "z/VM CP Programming Services".
+ */
+
+/*?
+ * Text: "DCSS %s has multiple page ranges and cannot be loaded or queried\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * You can only load or query a DCSS with multiple page ranges if:
+ * - The DCSS has 6 or fewer page ranges
+ * - The page ranges form a contiguous address space
+ * - The page ranges are of type EW or EN
+ * User action:
+ * Check the definition of the DCSS to make sure that the conditions for
+ * DCSSs with multiple page ranges are met.
+ */
+
+/*?
+ * Text: "%s needs used memory resources and cannot be loaded or queried\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * You cannot load or query the DCSS because it overlaps with an already
+ * loaded DCSS or with the memory of the z/VM guest virtual machine
+ * (guest storage).
+ * User action:
+ * Ensure that no DCSS is loaded that has overlapping memory resources
+ * with the DCSS you want to load or query. If the DCSS overlaps with guest
+ * storage, use the DEF STORE CONFIG z/VM CP command to create a sufficient
+ * storage gap for the DCSS. For details, see the section about the DCSS
+ * device driver in "Device Drivers, Features, and Commands".
+ */
+
+/*?
+ * Text: "DCSS %s is already loaded in a different access mode\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * The DCSS you are trying to load has already been loaded in a different
+ * access mode. You cannot simultaneously load the DCSS in different modes.
+ * User action:
+ * Reload the DCSS in a different mode or load it with the same mode in which
+ * it has already been loaded.
+ */
+
+/*?
+ * Text: "There is not enough memory to load or query DCSS %s\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * The available memory is not enough to load or query the DCSS.
+ * User action:
+ * Free some memory and repeat the failed operation.
+ */
+
+/*?
+ * Text: "DCSS %s overlaps with used storage and cannot be loaded\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * You cannot load the DCSS because it overlaps with an already loaded DCSS
+ * or with the memory of the z/VM guest virtual machine (guest storage).
+ * User action:
+ * Ensure that no DCSS is loaded that has overlapping memory resources
+ * with the DCSS you want to load. If the DCSS overlaps with guest storage,
+ * use the DEF STORE CONFIG z/VM CP command to create a sufficient storage gap
+ * for the DCSS. For details, see the section about the DCSS device driver in
+ * "Device Drivers, Features, and Commands".
+ */
+
+/*?
+ * Text: "DCSS %s exceeds the kernel mapping range (%lu) and cannot be loaded\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * @2: kernel mapping range in bytes
+ * Description:
+ * You cannot load the DCSS because it exceeds the kernel mapping range limit.
+ * User action:
+ * Ensure that the DCSS range is defined below the kernel mapping range.
+ */
+
--- /dev/null
+/*?
+ * Text: "The z/VM IUCV HVC device driver cannot be used without z/VM\n"
+ * Severity: Notice
+ * Description:
+ * The z/VM IUCV hypervisor console (HVC) device driver requires the
+ * z/VM inter-user communication vehicle (IUCV).
+ * User action:
+ * Set "hvc_iucv=" to zero in the kernel parameter line and reboot Linux.
+ */
+
+/*?
+ * Text: "%lu is not a valid value for the hvc_iucv= kernel parameter\n"
+ * Severity: Error
+ * Parameter:
+ * @1: hvc_iucv_devices
+ * Description:
+ * The "hvc_iucv=" kernel parameter specifies the number of z/VM IUCV
+ * hypervisor console (HVC) terminal devices.
+ * The parameter value ranges from 0 to 8.
+ * If zero is specified, the z/VM IUCV HVC device driver is disabled
+ * and no IUCV-based terminal access is available.
+ * User action:
+ * Correct the "hvc_iucv=" setting in the kernel parameter line and
+ * reboot Linux.
+ */
+
+/*?
+ * Text: "Creating a new HVC terminal device failed with error code=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: errno
+ * Description:
+ * The device driver initialization failed to allocate a new
+ * HVC terminal device.
+ * A possible cause of this problem is memory constraints.
+ * User action:
+ * If the error code is -12 (ENOMEM), consider assigning more memory
+ * to your z/VM guest virtual machine.
+ */
+
+/*?
+ * Text: "Registering HVC terminal device as Linux console failed\n"
+ * Severity: Error
+ * Description:
+ * The device driver initialization failed to set up the first HVC terminal
+ * device for use as Linux console.
+ * User action:
+ * If the error code is -12 (ENOMEM), consider assigning more memory
+ * to your z/VM guest virtual machine.
+ */
+
+/*?
+ * Text: "Registering IUCV handlers failed with error code=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: errno
+ * Description:
+ * The device driver initialization failed to register with z/VM IUCV to
+ * handle IUCV connections, as well as sending and receiving of IUCV messages.
+ * User action:
+ * Check for related IUCV error messages and see the errno manual page
+ * to find out what caused the problem.
+ */
+
+/*?
+ * Text: "Allocating memory failed with reason code=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: reason
+ * Description:
+ * The z/VM IUCV hypervisor console (HVC) device driver initialization failed,
+ * because of a general memory allocation failure. The reason code indicates
+ * the memory operation that has failed:
+ * kmem_cache (reason code=1),
+ * mempool (reason code=2), or
+ * hvc_iucv_allow= (reason code=3)
+ * User action:
+ * Consider assigning more memory to your z/VM guest virtual machine.
+ */
+
+/*?
+ * Text: "hvc_iucv_allow= does not specify a valid z/VM user ID list\n"
+ * Severity: Error
+ * Description:
+ * The "hvc_iucv_allow=" kernel parameter specifies a comma-separated list
+ * of z/VM user IDs that are permitted to connect to the z/VM IUCV hypervisor
+ * device driver.
+ * The z/VM user IDs in the list must not exceed eight characters and must
+ * not contain spaces.
+ * User action:
+ * Correct the "hvc_iucv_allow=" setting in the kernel parameter line and reboot
+ * Linux.
+ */
+
+/*?
+ * Text: "hvc_iucv_allow= specifies too many z/VM user IDs\n"
+ * Severity: Error
+ * Description:
+ * The "hvc_iucv_allow=" kernel parameter specifies a comma-separated list
+ * of z/VM user IDs that are permitted to connect to the z/VM IUCV hypervisor
+ * device driver.
+ * The number of z/VM user IDs that are specified with the "hvc_iucv_allow="
+ * kernel parameter exceeds the maximum of 500.
+ * User action:
+ * Correct the "hvc_iucv_allow=" setting by reducing the z/VM user IDs in
+ * the list and reboot Linux.
+ */
+
+/*?
+ * Text: "A connection request from z/VM user ID %s was refused\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: ID
+ * Description:
+ * An IUCV connection request from another z/VM guest virtual machine has been
+ * refused. The request was from a z/VM guest virtual machine that is not
+ * listed by the "hvc_iucv_allow=" kernel parameter.
+ * User action:
+ * Check the "hvc_iucv_allow=" kernel parameter setting.
+ * Consider adding the z/VM user ID to the "hvc_iucv_allow=" list in the kernel
+ * parameter line and reboot Linux.
+ */
--- /dev/null
+/*?
+ * Text: "The hardware system does not support hypfs\n"
+ * Severity: Error
+ * Description:
+ * hypfs requires DIAGNOSE Code X'204' but this diagnose code is not available
+ * on your hardware. You need more recent hardware to use hypfs.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "The hardware system does not provide all functions required by hypfs\n"
+ * Severity: Error
+ * Description:
+ * hypfs requires DIAGNOSE Code X'224' but this diagnode code is not available
+ * on your hardware. You need more recent hardware to use hypfs.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Updating the hypfs tree failed\n"
+ * Severity: Error
+ * Description:
+ * There was not enough memory available to update the hypfs tree.
+ * User action:
+ * Free some memory and try again to update the hypfs tree. Consider assigning
+ * more memory to your LPAR or z/VM guest virtual machine.
+ */
+
+/*?
+ * Text: "%s is not a valid mount option\n"
+ * Severity: Error
+ * Parameter:
+ * @1: mount option
+ * Description:
+ * hypfs has detected mount options that are not valid.
+ * User action:
+ * See "Device Drivers Features and Commands" for information about valid
+ * mount options for hypfs.
+ */
+
+/*?
+ * Text: "Initialization of hypfs failed with rc=%i\n"
+ * Severity: Error
+ * Parameter:
+ * @1: error code
+ * Description:
+ * Initialization of hypfs failed because of resource or hardware constraints.
+ * Possible reasons for this problem are insufficient free memory or missing
+ * hardware interfaces.
+ * User action:
+ * See errno.h for information about the error codes.
+ */
+
+/*? Text: "Hypervisor filesystem mounted\n" */
--- /dev/null
+/*?
+ * Text: "Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: CPU number
+ * @2: hexadecimal error value
+ * @3: short error code explanation
+ * Description:
+ * Defining an interrupt buffer for external interrupts failed. Error
+ * value 0x03 indicates a problem with the z/VM directory entry of the
+ * z/VM guest virtual machine. This problem can also be caused by a
+ * program error.
+ * User action:
+ * If the error value is 0x03, examine the z/VM directory entry of your
+ * z/VM guest virtual machine. If the directory entry is correct or if the
+ * error value is not 0x03, report this problem to your support organization.
+ */
+
+/*?
+ * Text: "Suspending Linux did not completely close all IUCV connections\n"
+ * Severity: Warning
+ * Description:
+ * When resuming a suspended Linux instance, the IUCV base code found
+ * data structures from one or more IUCV connections that existed before the
+ * Linux instance was suspended. Modules that use IUCV connections must close
+ * these connections when a Linux instance is suspended. This problem
+ * indicates an error in a program that used an IUCV connection.
+ * User action:
+ * Report this problem to your support organization.
+ */
+
+/*? Text: "iucv_external_interrupt: out of memory\n" */
+
--- /dev/null
+/*?
+ * Text: "%s: Allocating a socket buffer to interface %s failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the LCS device
+ * @2: network interface
+ * Description:
+ * LAN channel station (LCS) devices require a socket buffer (SKB) structure
+ * for storing incoming data. The LCS device driver failed to allocate an SKB
+ * structure to the LCS device. A likely cause of this problem is memory
+ * constraints.
+ * User action:
+ * Free some memory and repeat the failed operation.
+ */
+
+/*?
+ * Text: "%s: Shutting down the LCS device failed\n "
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the LCS device
+ * Description:
+ * A request to shut down a LAN channel station (LCS) device resulted in an
+ * error. The error is logged in the LCS trace at trace level 4.
+ * User action:
+ * Try again to shut down the device. If the error persists, see the LCS trace
+ * to find out what causes the error.
+ */
+
+/*?
+ * Text: "%s: Detecting a network adapter for LCS devices failed with rc=%d (0x%x)\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the LCS device
+ * @2: lcs_detect return code in decimal notation
+ * @3: lcs_detect return code in hexadecimal notation
+ * Description:
+ * The LCS device driver could not initialize a network adapter.
+ * User action:
+ * Note the return codes from the error message and contact IBM support.
+ */
+
+/*?
+ * Text: "%s: A recovery process has been started for the LCS device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the LCS device
+ * Description:
+ * The LAN channel station (LCS) device is shut down and restarted. The recovery
+ * process might have been initiated by a user or started automatically as a
+ * response to a device problem.
+ * User action:
+ * Wait until a message indicates the completion of the recovery process.
+ */
+
+/*?
+ * Text: "%s: An I/O-error occurred on the LCS device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the LCS device
+ * Description:
+ * The LAN channel station (LCS) device reported a problem that can be recovered
+ * by the LCS device driver. Repeated occurrences of this problem indicate a
+ * malfunctioning device.
+ * User action:
+ * If this problem occurs frequently, initiate a recovery process for the
+ * device, for example, by writing '1' to the 'recover' sysfs attribute of the
+ * device.
+ */
+
+/*?
+ * Text: "%s: A command timed out on the LCS device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the LCS device
+ * Description:
+ * The LAN channel station (LCS) device reported a problem that can be recovered
+ * by the LCS device driver. Repeated occurrences of this problem indicate a
+ * malfunctioning device.
+ * User action:
+ * If this problem occurs frequently, initiate a recovery process for the
+ * device, for example, by writing '1' to the 'recover' sysfs attribute of the
+ * device.
+ */
+
+/*?
+ * Text: "%s: An error occurred on the LCS device, rc=%ld\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the LCS device
+ * @2: return code
+ * Description:
+ * The LAN channel station (LCS) device reported a problem that can be recovered
+ * by the LCS device driver. Repeated occurrences of this problem indicate a
+ * malfunctioning device.
+ * User action:
+ * If this problem occurs frequently, initiate a recovery process for the
+ * device, for example, by writing '1' to the 'recover' sysfs attribute of the
+ * device.
+ */
+
+/*?
+ * Text: "%s: The LCS device stopped because of an error, dstat=0x%X, cstat=0x%X \n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the LCS device
+ * @2: device status
+ * @3: subchannel status
+ * Description:
+ * The LAN channel station (LCS) device reported an error. The LCS device driver
+ * might start a device recovery process.
+ * User action:
+ * If the device driver does not start a recovery process, initiate a recovery
+ * process, for example, by writing '1' to the 'recover' sysfs attribute of the
+ * device. If the problem persists, note the status information provided with
+ * the message and contact IBM support.
+ */
+
+/*?
+ * Text: "%s: Starting an LCS device resulted in an error, rc=%d!\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the LCS device
+ * @2: ccw_device_start return code in decimal notation
+ * Description:
+ * The LAN channel station (LCS) device driver failed to initialize an LCS
+ * device. The device is not operational.
+ * User action:
+ * Initiate a recovery process, for example, by writing '1' to the 'recover'
+ * sysfs attribute of the device. If the problem persists, contact IBM support.
+ */
+
+/*?
+ * Text: "%s: Sending data from the LCS device to the LAN failed with rc=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the LCS device
+ * @2: ccw_device_resume return code in decimal notation
+ * Description:
+ * The LAN channel station (LCS) device driver could not send data to the LAN
+ * using the LCS device. This might be a temporary problem. Operations continue
+ * on the LCS device.
+ * User action:
+ * If this problem occurs frequently, initiate a recovery process, for example,
+ * by writing '1' to the 'recover' sysfs attribute of the device. If the
+ * problem persists, contact IBM support.
+ */
+
+/*? Text: "Query IPAssist failed. Assuming unsupported!\n" */
+/*? Text: "Stoplan for %s initiated by LGW.\n" */
+/*? Text: "Not enough memory to add new multicast entry!\n" */
+/*? Text: "Not enough memory for debug facility.\n" */
+/*? Text: "Adding multicast address failed. Table possibly full!\n" */
+/*? Text: "Error in opening device!\n" */
+/*? Text: "LCS device %s %s IPv6 support\n" */
+/*? Text: "Device %s successfully recovered!\n" */
+/*? Text: "LCS device %s %s Multicast support\n" */
+/*? Text: " Initialization failed\n" */
+/*? Text: "Loading %s\n" */
+/*? Text: "Initialization failed\n" */
+/*? Text: "Terminating lcs module.\n" */
+/*? Text: "Device %s could not be recovered!\n" */
--- /dev/null
+/*?
+ * Text: "Reading monitor data failed with rc=%i\n"
+ * Severity: Error
+ * Parameter:
+ * @1: return code
+ * Description:
+ * The z/VM *MONITOR record device driver failed to read monitor data
+ * because the IUCV REPLY function failed. The read function against
+ * the monitor record device returns EIO. All monitor data that has been read
+ * since the last read with 0 size is incorrect.
+ * User action:
+ * Disregard all monitor data that has been read since the last read with
+ * 0 size. If the device driver has been compiled as a separate module, unload
+ * and reload the monreader module. If the device driver has been compiled
+ * into the kernel, reboot Linux. For more information about possible causes
+ * of the error see the IUCV section in "z/VM CP Programming Services" and
+ * the *MONITOR section in "z/VM Performance".
+ */
+
+/*?
+ * Text: "z/VM *MONITOR system service disconnected with rc=%i\n"
+ * Severity: Error
+ * Parameter:
+ * @1: IPUSER SEVER return code
+ * Description:
+ * The z/VM *MONITOR record device driver receives monitor records through
+ * an IUCV connection to the z/VM *MONITOR system service. This connection
+ * has been severed and the read function of the z/VM *MONITOR device driver
+ * returns EIO. All data received since the last read with 0 size is incorrect.
+ * User action:
+ * Disregard all monitor data read since the last read with 0 size. Close and
+ * reopen the monitor record device. For information about the IPUSER SEVER
+ * return codes see "z/VM Performance".
+ */
+
+/*?
+ * Text: "The read queue for monitor data is full\n"
+ * Severity: Warning
+ * Description:
+ * The read function of the z/VM *MONITOR device driver returns EOVERFLOW
+ * because not enough monitor data has been read since the monitor device
+ * has been opened. Monitor data already read are valid and subsequent reads
+ * return valid data but some intermediate data might be missing.
+ * User action:
+ * Be aware that monitor data might be missing. Assure that you regularly
+ * read monitor data after opening the monitor record device.
+ */
+
+/*?
+ * Text: "Connecting to the z/VM *MONITOR system service failed with rc=%i\n"
+ * Severity: Error
+ * Parameter:
+ * @1: IUCV CONNECT return code
+ * Description:
+ * The z/VM *MONITOR record device driver receives monitor records through
+ * an IUCV connection to the z/VM *MONITOR system service. This connection
+ * could not be established when the monitor record device was opened. If
+ * the return code is 15, your z/VM guest virtual machine is not authorized
+ * to connect to the *MONITOR system service.
+ * User action:
+ * If the return code is 15, ensure that the IUCV *MONITOR statement is
+ * included in the z/VM directory entry for your z/VM guest virtual machine.
+ * For other IUCV CONNECT return codes see the IUCV section in "CP Programming
+ * Services" and the *MONITOR section in "z/VM Performance".
+ */
+
+/*?
+ * Text: "Disconnecting the z/VM *MONITOR system service failed with rc=%i\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: IUCV SEVER return code
+ * Description:
+ * The z/VM *MONITOR record device driver receives monitor data through an
+ * IUCV connection to the z/VM *MONITOR system service. This connection
+ * could not be closed when the monitor record device was closed. You might
+ * not be able to resume monitoring.
+ * User action:
+ * No immediate action is necessary. If you cannot open the monitor record
+ * device in the future, reboot Linux. For information about the IUCV SEVER
+ * return codes see the IUCV section in "CP Programming Services" and the
+ * *MONITOR section in "z/VM Performance".
+ */
+
+/*?
+ * Text: "The z/VM *MONITOR record device driver cannot be loaded without z/VM\n"
+ * Severity: Error
+ * Description:
+ * The z/VM *MONITOR record device driver uses z/VM system services to provide
+ * monitor data about z/VM guest operating systems to applications on Linux.
+ * On Linux instances that run in environments other than the z/VM hypervisor,
+ * the z/VM *MONITOR record device driver does not provide any useful
+ * function and the corresponding monreader module cannot be loaded.
+ * User action:
+ * Load the z/VM *MONITOR record device driver only on Linux instances that run
+ * as guest operating systems of the z/VM hypervisor. If the z/VM *MONITOR
+ * record device driver has been compiled into the kernel, ignore this message.
+ */
+
+/*?
+ * Text: "The z/VM *MONITOR record device driver failed to register with IUCV\n"
+ * Severity: Error
+ * Description:
+ * The z/VM *MONITOR record device driver receives monitor data through an IUCV
+ * connection and needs to register with the IUCV device driver. This
+ * registration failed and the z/VM *MONITOR record device driver was not
+ * loaded. A possible cause of this problem is insufficient memory.
+ * User action:
+ * Free some memory and try again to load the module. If the z/VM *MONITOR
+ * record device driver has been compiled into the kernel, you might have to
+ * configure more memory and reboot Linux. If you do not want to read monitor
+ * data, ignore this message.
+ */
+
+/*?
+ * Text: "The specified *MONITOR DCSS %s does not have the required type SC\n"
+ * Severity: Error
+ * Parameter:
+ * @1: DCSS name
+ * Description:
+ * The DCSS that was specified with the monreader.mondcss kernel parameter or
+ * with the mondcss module parameter cannot be a *MONITOR DCSS because it is
+ * not of type SC.
+ * User action:
+ * Confirm that you are using the name of the DCSS that has been configured as
+ * the *MONITOR DCSS on the z/VM hypervisor. If the default name, MONDCSS, is
+ * used, omit the monreader.mondcss or mondcss parameter.
+ */
--- /dev/null
+/*?
+ * Text: "Writing monitor data failed with rc=%i\n"
+ * Severity: Error
+ * Parameter:
+ * @1: return code
+ * Description:
+ * The monitor stream application device driver used the z/VM diagnose call
+ * DIAG X'DC' to start writing monitor data. z/VM returned an error and the
+ * monitor data cannot be written. If the return code is 5, your z/VM guest
+ * virtual machine is not authorized to write monitor data.
+ * User action:
+ * If the return code is 5, ensure that your z/VM guest virtual machine's
+ * entry in the z/VM directory includes the OPTION APPLMON statement.
+ * For other return codes see the section about DIAGNOSE Code X'DC'
+ * in "z/VM CP Programming Services".
+ */
--- /dev/null
+/*?
+ * Text: "%s: The peer interface of the IUCV device has closed the connection\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the IUCV device
+ * Description:
+ * The peer interface on the remote z/VM guest virtual machine has closed the
+ * connection. Do not expect further packets on this interface. Any packets
+ * you send to this interface will be dropped.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The IUCV device failed to connect to z/VM guest %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the IUCV device
+ * @2: z/VM user ID
+ * Description:
+ * The connection cannot be established because the z/VM guest virtual
+ * machine with the peer interface is not running.
+ * User action:
+ * Ensure that the z/VM guest virtual machine with the peer interface is
+ * running; then try again to establish the connection.
+ */
+
+/*?
+ * Text: "%s: The IUCV device failed to connect to the peer on z/VM guest %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the IUCV device
+ * @2: z/VM user ID
+ * Description:
+ * The connection cannot be established because the z/VM guest virtual machine
+ * with the peer interface is not configured for IUCV connections.
+ * User action:
+ * Configure the z/VM guest virtual machine with the peer interface for IUCV
+ * connections; then try again to establish the connection.
+ */
+
+/*?
+ * Text: "%s: Connecting the IUCV device would exceed the maximum number of IUCV connections\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the IUCV device
+ * Description:
+ * The connection cannot be established because the maximum number of IUCV
+ * connections has been reached on the local z/VM guest virtual machine.
+ * User action:
+ * Close some of the established IUCV connections on the local z/VM guest
+ * virtual machine; then try again to establish the connection.
+ */
+
+/*?
+ * Text: "%s: z/VM guest %s has too many IUCV connections to connect with the IUCV device\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the IUCV device
+ * @2: remote z/VM user ID
+ * Description:
+ * Connecting to the remote z/VM guest virtual machine failed because the
+ * maximum number of IUCV connections for the remote z/VM guest virtual
+ * machine has been reached.
+ * User action:
+ * Close some of the established IUCV connections on the remote z/VM guest
+ * virtual machine; then try again to establish the connection.
+ */
+
+/*?
+ * Text: "%s: The IUCV device cannot connect to a z/VM guest with no IUCV authorization\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the IUCV device
+ * Description:
+ * Because the remote z/VM guest virtual machine is not authorized for IUCV
+ * connections, the connection cannot be established.
+ * User action:
+ * Add the statements 'IUCV ALLOW' and 'IUCV ANY' to the z/VM directory
+ * entry of the remote z/VM guest virtual machine; then try again to
+ * establish the connection. See "z/VM CP Planning and Administration"
+ * for details about the IUCV statements.
+ */
+
+/*?
+ * Text: "%s: Connecting the IUCV device failed with error %d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the IUCV device
+ * @2: error code
+ * Description:
+ * The connection cannot be established because of an IUCV CONNECT error.
+ * User action:
+ * Report this problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: The IUCV device has been connected successfully to %s\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the IUCV device
+ * @2: remote z/VM user ID
+ * Description:
+ * The connection has been established and the interface is ready to
+ * transmit communication packages.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The IUCV interface to %s has been established successfully\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the IUCV device
+ * @2: remote z/VM user ID
+ * Description:
+ * The IUCV interface to the remote z/VM guest virtual machine has been
+ * established and can be activated with "ifconfig up" or an equivalent
+ * command.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The IUCV device is connected to %s and cannot be removed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the IUCV device
+ * @2: remote z/VM user ID
+ * Description:
+ * Removing a connection failed because the interface is active with a peer
+ * interface on a remote z/VM guest virtual machine.
+ * User action:
+ * Deactivate the interface with "ifconfig down" or an equivalent command;
+ * then try again to remove the interface.
+ */
+
+/*? Text: "driver unloaded\n" */
+/*? Text: "driver initialized\n" */
--- /dev/null
+/*?
+ * Text: "%s: The LAN is offline\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * A start LAN command was sent by the qeth device driver but the physical or
+ * virtual adapter has not started the LAN. The LAN might take a few seconds
+ * to become available.
+ * User action:
+ * Check the status of the qeth device, for example, with the lsqeth command.
+ * If the device does not become operational within a few seconds, initiate a
+ * recovery process, for example, by writing '1' to the 'recover' sysfs
+ * attribute of the device.
+ */
+
+/*?
+ * Text: "%s: The user canceled setting the qeth device offline\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * A user initiated setting the device offline but subsequently canceled the
+ * operation, for example, with CTRL+C.
+ * User action:
+ * Check the status of the qeth device, for example, with the lsqeth command.
+ * If necessary, repeat the operation to set the device offline.
+ */
+
+/*?
+ * Text: "%s: A recovery process has been started for the device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * A recovery process was started either by the qeth device driver or through
+ * a user command.
+ * User action:
+ * Wait until a message indicates the completion of the recovery process.
+ */
+
+/*?
+ * Text: "%s: The qeth device driver failed to recover an error on the device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * The qeth device driver performed an automatic recovery operation to recover
+ * an error on a qeth device. The recovery operation failed.
+ * User action:
+ * Try the following actions in the given order: i) Check the status of the
+ * qeth device, for example, with the lsqeth command. ii) Initiate a recovery
+ * process by writing '1' to the 'recover' sysfs attribute of the device.
+ * iii) Ungroup and regroup the subchannel triplet of the device. vi) Reboot
+ * Linux. v) If the problem persists, gather Linux debug data and report the
+ * problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: The link for interface %s on CHPID 0x%X failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * @3: CHPID
+ * Description:
+ * A network link failed. A possible reason for this error is that a physical
+ * network cable has been disconnected.
+ * User action:
+ * Ensure that the network cable on the adapter hardware is connected properly.
+ * If the connection is to a guest LAN, ensure that the device is still coupled
+ * to the guest LAN.
+ */
+
+/*?
+ * Text: "%s: The link for %s on CHPID 0x%X has been restored\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * @3: CHPID
+ * Description:
+ * A failed network link has been re-established. A device recovery is in
+ * progress.
+ * User action:
+ * Wait until a message indicates the completion of the recovery process.
+ */
+
+/*?
+ * Text: "%s: A hardware operation timed out on the device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * A hardware operation timed out on the qeth device.
+ * User action:
+ * Check the status of the qeth device, for example, with the lsqeth command.
+ * If the device is not operational, initiate a recovery process, for example,
+ * by writing '1' to the 'recover' sysfs attribute of the device.
+ */
+
+/*?
+ * Text: "%s: The adapter hardware is of an unknown type\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * The qeth device driver does not recognize the adapter hardware. The cause
+ * of this problem could be a hardware error or a Linux level that does not
+ * support your adapter hardware.
+ * User action:
+ * i) Investigate if your adapter hardware is supported by your Linux level.
+ * Consider using hardware that is supported by your Linux level or upgrading
+ * to a Linux level that supports your hardware. ii) Install the latest
+ * firmware on your adapter hardware. iii) If the problem persists and is not
+ * caused by a version mismatch, contact IBM support.
+ */
+
+/*?
+ * Text: "%s: The adapter is used exclusively by another host\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * The qeth adapter is exclusively used by another host.
+ * User action:
+ * Use another qeth adapter or configure this one not exclusively to a
+ * particular host.
+ */
+
+/*?
+ * Text: "%s: QDIO reported an error, rc=%i\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: return code
+ * Description:
+ * The QDIO subsystem reported an error.
+ * User action:
+ * Check for related QDIO errors. Check the status of the qeth device, for
+ * example, with the lsqeth command. If the device is not operational, initiate
+ * a recovery process, for example, by writing '1' to the 'recover' sysfs
+ * attribute of the device.
+ */
+
+/*?
+ * Text: "%s: There is no kernel module to support discipline %d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: discipline
+ * Description:
+ * The qeth device driver or a user command requested a kernel module for a
+ * particular qeth discipline. Either the discipline is not supported by the
+ * qeth device driver or the requested module is not available to your Linux
+ * system.
+ * User action:
+ * Check if the requested discipline module has been compiled into the kernel
+ * or is present in /lib/modules/<version>/kernel/drivers/s390/net.
+ */
+
+/*?
+ * Text: "Initializing the qeth device driver failed\n"
+ * Severity: Error
+ * Parameter:
+ * Description:
+ * The base module of the qeth device driver could not be initialized.
+ * User action:
+ * See errno.h to determine the reason for the error.
+ * i) Reboot Linux. ii) If the problem persists, gather Linux debug data and
+ * report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Registering IP address %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: IP address
+ * Description:
+ * An IP address could not be registered with the network adapter.
+ * User action:
+ * Check if another operating system instance has already registered the
+ * IP address with the same network adapter or at the same logical IP subnet.
+ */
+
+/*?
+ * Text: "%s: Reading the adapter MAC address failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * The qeth device driver could not read the MAC address from the network
+ * adapter.
+ * User action:
+ * Ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Starting ARP processing support for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not start ARP support on the network adapter.
+ * User action:
+ * Ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Starting IP fragmentation support for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not start IP fragmentation support on the
+ * network adapter.
+ * User action:
+ * Ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Starting proxy ARP support for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not start proxy ARP support on the network
+ * adapter.
+ * User action:
+ * None if you do not require proxy ARP support. If you need proxy ARP,
+ * ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Starting VLAN support for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not start VLAN support on the network adapter.
+ * User action:
+ * None if you do not require VLAN support. If you need VLAN support,
+ * ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Starting multicast support for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not start multicast support on the network
+ * adapter.
+ * User action:
+ * Ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Activating IPv6 support for %s failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not activate IPv6 support on the network
+ * adapter.
+ * User action:
+ * None if you do not require IPv6 communication. If you need IPv6 support,
+ * ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Enabling the passthrough mode for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not enable the passthrough mode on the
+ * network adapter. The passthrough mode is required for all network traffic
+ * other than IPv4. In particular, the passthrough mode is required for IPv6
+ * traffic.
+ * User action:
+ * None if all you want to support is IPv4 communication. If you want to support
+ * IPv6 or other network traffic apart from IPv4, ungroup and regroup the
+ * subchannel triplet of the device. If this does not resolve the problem,
+ * reboot Linux. If the problem persists, gather Linux debug data and report
+ * the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Enabling broadcast filtering for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not enable broadcast filtering on the network
+ * adapter.
+ * User action:
+ * Ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Setting up broadcast filtering for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not set up broadcast filtering on the network
+ * adapter.
+ * User action:
+ * Ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Setting up broadcast echo filtering for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not set up broadcast echo filtering on the
+ * network adapter.
+ * User action:
+ * Ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Starting HW checksumming for %s failed, using SW checksumming\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The network adapter supports hardware checksumming for incoming IP packages
+ * but the qeth device driver could not start hardware checksumming on the
+ * adapter. The qeth device driver continues to use software checksumming for
+ * incoming IP packages.
+ * User action:
+ * None if you do not require hardware checksumming for incoming network
+ * traffic. If you want to enable hardware checksumming, ungroup and regroup
+ * the subchannel triplet of the device. If this does not resolve the problem,
+ * reboot Linux. If the problem persists, gather Linux debug data and report
+ * the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Enabling HW checksumming for %s failed, using SW checksumming\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The network adapter supports hardware checksumming for incoming IP packages
+ * but the qeth device driver could not enable hardware checksumming on the
+ * adapter. The qeth device driver continues to use software checksumming for
+ * incoming IP packages.
+ * User action:
+ * None if you do not require hardware checksumming for incoming network
+ * traffic. If you want to enable hardware checksumming, ungroup and regroup
+ * the subchannel triplet of the device. If this does not resolve the problem,
+ * reboot Linux. If the problem persists, gather Linux debug data and report
+ * the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Starting outbound TCP segmentation offload for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The network adapter supports TCP segmentation offload, but the qeth device
+ * driver could not start this support on the adapter.
+ * User action:
+ * None if you do not require TCP segmentation offload. If you want to
+ * enable TCP segmentation offload, ungroup and regroup the subchannel triplet
+ * of the device. If this does not resolve the problem, reboot Linux. If the
+ * problem persists, gather Linux debug data and report the problem to your
+ * support organization.
+ */
+
+/*?
+ * Text: "%s: The network adapter failed to generate a unique ID\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * In IBM mainframe environments, network interfaces are not identified by
+ * a specific MAC address. Therefore, the network adapters provide the network
+ * interfaces with unique IDs to be used in their IPv6 link local addresses.
+ * Without such a unique ID, duplicate addresses might be assigned in other
+ * LPARs.
+ * User action:
+ * Install the latest firmware on the adapter hardware. Manually, configure
+ * an IPv6 link local address for this device.
+ */
+
+/*?
+ * Text: "There is no IPv6 support for the layer 3 discipline\n"
+ * Severity: Warning
+ * Description:
+ * If you want to use IPv6 with the layer 3 discipline, you need a Linux kernel
+ * with IPv6 support. Because your Linux kernel has not been compiled with
+ * IPv6 support, you cannot use IPv6 with the layer 3 discipline, even if your
+ * adapter supports IPv6.
+ * User action:
+ * Use a Linux kernel that has been complied to include IPv6 support if you
+ * want to use IPv6 with layer 3 qeth devices.
+ */
+
+/*?
+ * Text: "%s: The qeth device is not configured for the OSI layer required by z/VM\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * A qeth device that connects to a virtual network on z/VM must be configured for the
+ * same Open Systems Interconnection (OSI) layer as the virtual network. An ETHERNET
+ * guest LAN or VSWITCH uses the data link layer (layer 2) while an IP guest LAN
+ * or VSWITCH uses the network layer (layer 3).
+ * User action:
+ * If you are connecting to an ETHERNET guest LAN or VSWITCH, set the layer2 sysfs
+ * attribute of the qeth device to 1. If you are connecting to an IP guest LAN or
+ * VSWITCH, set the layer2 sysfs attribute of the qeth device to 0.
+ */
+
+/*?
+ * Text: "%s: Starting source MAC-address support for %s failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: network interface name
+ * Description:
+ * The qeth device driver could not enable source MAC-address on the network
+ * adapter.
+ * User action:
+ * Ungroup and regroup the subchannel triplet of the device. If this does not
+ * resolve the problem, reboot Linux. If the problem persists, gather Linux
+ * debug data and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x already exists\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: first token of the MAC-address
+ * @3: second token of the MAC-address
+ * @4: third token of the MAC-address
+ * @5: fourth token of the MAC-address
+ * @6: fifth token of the MAC-address
+ * @7: sixth token of the MAC-address
+ * Description:
+ * Setting the MAC address for the qeth device fails, because this
+ * MAC address is already defined on the OSA CHPID.
+ * User action:
+ * Use a different MAC address for this qeth device.
+ */
+
+/*?
+ * Text: "%s: MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x is not authorized\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * @2: first token of the MAC-address
+ * @3: second token of the MAC-address
+ * @4: third token of the MAC-address
+ * @5: fourth token of the MAC-address
+ * @6: fifth token of the MAC-address
+ * @7: sixth token of the MAC-address
+ * Description:
+ * This qeth device is a virtual network interface card (NIC), to which z/VM
+ * has already assigned a MAC address. z/VM MAC address verification does
+ * not allow you to change this predefined address.
+ * User action:
+ * None; use the MAC address that has been assigned by z/VM.
+ */
+
+/*?
+ * Text: "%s: The HiperSockets network traffic analyzer is activated\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * The sysfs 'sniffer' attribute of the HiperSockets device has the value '1'.
+ * The corresponding HiperSockets interface has been switched into promiscuous mode.
+ * As a result, the HiperSockets network traffic analyzer is started on the device.
+ * User action:
+ * None.
+ */
+
+ /*?
+ * Text: "%s: The HiperSockets network traffic analyzer is deactivated\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * The sysfs 'sniffer' attribute of the HiperSockets device has the value '1'.
+ * Promiscuous mode has been switched off for the corresponding HiperSockets interface
+ * As a result, the HiperSockets network traffic analyzer is stopped on the device.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The device is not authorized to run as a HiperSockets network traffic analyzer\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * The sysfs 'sniffer' attribute of the HiperSockets device has the value '1'.
+ * The corresponding HiperSockets interface is switched into promiscuous mode
+ * but the network traffic analyzer (NTA) rules configured at the Support Element (SE)
+ * do not allow tracing. Possible reasons are:
+ * - Tracing is not authorized for all HiperSockets channels in the mainframe system
+ * - Tracing is not authorized for this HiperSockets channel
+ * - LPAR is not authorized to enable an NTA
+ * User action:
+ * Configure appropriate HiperSockets NTA rules at the SE.
+ */
+
+/*?
+ * Text: "%s: A HiperSockets network traffic analyzer is already active in the HiperSockets LAN\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the qeth device
+ * Description:
+ * The sysfs 'sniffer' attribute of the HiperSockets device has the value '1'.
+ * The HiperSockets interface is switched into promiscuous mode but another
+ * HiperSockets device on the same HiperSockets channel is already running as
+ * a network traffic analyzer.
+ * A HiperSockets channel can only have one active network traffic analyzer.
+ * User action:
+ * Do not configure multiple HiperSockets devices in the same HiperSockets channel as
+ * tracing devices.
+ */
+
+
+/*? Text: "core functions removed\n" */
+/*? Text: "%s: Device is a%s card%s%s%s\nwith link type %s.\n" */
+/*? Text: "%s: Device is a%s card%s%s%s\nwith link type %s (no portname needed by interface).\n" */
+/*? Text: "%s: Device is a%s card%s%s%s\nwith link type %s (portname: %s)\n" */
+/*? Text: "%s: issue_next_read failed: no iob available!\n" */
+/*? Text: "%s: Priority Queueing not supported\n" */
+/*? Text: "%s: sense data available. cstat 0x%X dstat 0x%X\n" */
+/*? Text: "loading core functions\n" */
+/*? Text: "%s: MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x successfully registered on device %s\n" */
+/*? Text: "%s: Device successfully recovered!\n" */
+/*? Text: "register layer 2 discipline\n" */
+/*? Text: "unregister layer 2 discipline\n" */
+/*? Text: "%s: Hardware IP fragmentation not supported on %s\n" */
+/*? Text: "%s: IPv6 not supported on %s\n" */
+/*? Text: "%s: VLAN not supported on %s\n" */
+/*? Text: "%s: Inbound source MAC-address not supported on %s\n" */
+/*? Text: "%s: IPV6 enabled\n" */
+/*? Text: "%s: ARP processing not supported on %s!\n" */
+/*? Text: "%s: Hardware IP fragmentation enabled \n" */
+/*? Text: "%s: set adapter parameters not supported.\n" */
+/*? Text: "%s: VLAN enabled\n" */
+/*? Text: "register layer 3 discipline\n" */
+/*? Text: "%s: Outbound TSO enabled\n" */
+/*? Text: "%s: Broadcast not supported on %s\n" */
+/*? Text: "%s: Outbound TSO not supported on %s\n" */
+/*? Text: "%s: Inbound HW Checksumming not supported on %s,\ncontinuing using Inbound SW Checksumming\n" */
+/*? Text: "%s: Using no checksumming on %s.\n" */
+/*? Text: "%s: Broadcast enabled\n" */
+/*? Text: "%s: Multicast not supported on %s\n" */
+/*? Text: "%s: Using SW checksumming on %s.\n" */
+/*? Text: "%s: HW Checksumming (inbound) enabled\n" */
+/*? Text: "unregister layer 3 discipline\n" */
+/*? Text: "%s: Multicast enabled\n" */
+/*? Text: "%s: QDIO data connection isolation is deactivated\n" */
+/*? Text: "%s: QDIO data connection isolation is activated\n" */
+/*? Text: "%s: Adapter does not support QDIO data connection isolation\n" */
+/*? Text: "%s: Adapter is dedicated. QDIO data connection isolation not supported\n" */
+/*? Text: "%s: TSO does not permit QDIO data connection isolation\n" */
+
--- /dev/null
+/*?
+ * Text: "Root becomes the owner of all s390dbf files in sysfs\n"
+ * Severity: Warning
+ * Description:
+ * The S/390 debug feature you are using only supports uid/gid = 0.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Registering debug feature %s failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: feature name
+ * Description:
+ * The initialization of an S/390 debug feature failed. A likely cause of this
+ * problem is memory constraints. The system keeps running, but the debug
+ * data for this feature will not be available in sysfs.
+ * User action:
+ * Consider assigning more memory to your LPAR or z/VM guest virtual machine.
+ */
+
+/*?
+ * Text: "Registering view %s/%s would exceed the maximum number of views %i\n"
+ * Severity: Error
+ * Parameter:
+ * @1: feature name
+ * @2: view name
+ * @3: maximum
+ * Description:
+ * The maximum number of allowed debug feature views has been reached. The
+ * view has not been registered. The system keeps running but the new view
+ * will not be available in sysfs. This is a program error.
+ * User action:
+ * Report this problem to your support partner.
+ */
+
+/*?
+ * Text: "%s is not a valid level for a debug feature\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: level
+ * Description:
+ * Setting a new level for a debug feature by using the 'level' sysfs attribute
+ * failed. Valid levels are the minus sign (-) and the integers in the
+ * range 0 to 6. The minus sign switches off the feature. The numbers switch
+ * the feature on, where higher numbers produce more debug output.
+ * User action:
+ * Write a valid value to the 'level' sysfs attribute.
+ */
+
+/*?
+ * Text: "Flushing debug data failed because %c is not a valid area\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: debug area number
+ * Description:
+ * Flushing a debug area by using the 'flush' sysfs attribute failed. Valid
+ * values are the minus sign (-) for flushing all areas, or the number of the
+ * respective area for flushing a single area.
+ * User action:
+ * Write a valid area number or the minus sign (-) to the 'flush' sysfs
+ * attribute.
+ */
+
+/*?
+ * Text: "Allocating memory for %i pages failed\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: number of pages
+ * Description:
+ * Setting the debug feature size by using the 'page' sysfs attribute failed.
+ * Linux did not have enough memory for expanding the debug feature to the
+ * requested size.
+ * User action:
+ * Use a smaller number of pages for the debug feature or allocate more
+ * memory to your LPAR or z/VM guest virtual machine.
+ */
+
+/*? Text: "%s: set new size (%i pages)\n" */
+/*? Text: "%s: switched off\n" */
+/*? Text: "%s: level %i is out of range (%i - %i)\n" */
+/*? Text: "Registering view %s/%s failed due to out of memory\n" */
--- /dev/null
+/*? Text: "sync request failed (cmd=0x%08x, status=0x%02x)\n" */
+/*? Text: "readcpuinfo failed (response=0x%04x)\n" */
+/*? Text: "configure cpu failed (cmd=0x%08x, response=0x%04x)\n" */
+/*? Text: "configure channel-path failed (cmd=0x%08x, response=0x%04x)\n" */
+/*? Text: "read channel-path info failed (response=0x%04x)\n" */
+/*? Text: "assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n" */
+
+/*?
+ * Text: "Memory hotplug state changed, suspend refused.\n"
+ * Severity: Error
+ * Description:
+ * Suspend is refused after a memory hotplug operation was performed.
+ * User action:
+ * The system needs to be restarted and no memory hotplug operation must be
+ * performed in order to allow suspend.
+ */
--- /dev/null
+/*? Text: "cpu capability changed.\n" */
+/*? Text: "no configuration management.\n" */
+
--- /dev/null
+/*? Text: "request failed (status=0x%02x)\n" */
+/*? Text: "request failed with response code 0x%x\n" */
--- /dev/null
+/*? Text: "sclp_send failed for get_nr_blocks\n" */
+/*? Text: "SCLP error: %x\n" */
+/*? Text: "sclp_send failed: %x\n" */
+/*? Text: "Error from SCLP while copying hsa. Event status = %x\n" */
--- /dev/null
+/*?
+ * Text: "Execute protection active, mvcos available\n"
+ * Severity: Informational
+ * Description:
+ * The kernel parameter 'noexec' has been specified. The kernel will
+ * honor the execute bit of mappings and will use the mvcos instruction
+ * to copy between the user and kernel address space.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Execute protection active, mvcos not available\n"
+ * Severity: Informational
+ * Description:
+ * The kernel parameter 'noexec' has been specified. The kernel will
+ * honor the execute bit of mappings. The mvcos instruction is not
+ * available and the kernel will use the slower page table walk method
+ * to copy between the user and kernel address space.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Address spaces switched, mvcos available\n"
+ * Severity: Informational
+ * Description:
+ * The kernel parameter 'switch_amode' has been specified. The kernel
+ * will use the primary address space for user space processes and the
+ * home address space for the kernel. The mvcos instruction is used to
+ * copy between the user and kernel address space.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Address spaces switched, mvcos not available\n"
+ * Severity: Informational
+ * Description:
+ * The kernel parameter 'switch_amode' has been specified. The kernel
+ * will use the primary address space for user space processes and the
+ * home address space for the kernel. The mvcos instruction is not
+ * available and the kernel will use the slower page table walk method
+ * to copy between the user and kernel address space.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "initrd extends beyond end of memory (0x%08lx > 0x%08lx) disabling initrd\n"
+ * Severity: Error
+ * Parameter:
+ * @1: start address of the initial RAM disk
+ * @2: memory end address
+ * Description:
+ * The load address and the size of the initial RAM disk result in an end
+ * address of the initial RAM disk that is beyond the end of the system
+ * memory.
+ * User action:
+ * Lower the load address of the initial RAM disk, reduce the size of the
+ * initial RAM disk, or increase the size if the system memory to make the
+ * initial RAM disk fit into the memory.
+ */
+
+/*?
+ * Text: "Moving initrd (0x%08lx -> 0x%08lx, size: %ld)\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: old start address of the initial RAM disk
+ * @2: new start address of the initial RAM disk
+ * @3: size of the initial RAM disk
+ * Description:
+ * The location of the initial RAM disk conflicted with the boot memory bitmap.
+ * To resolve the conflict the initial RAM disk has been moved to a new
+ * location.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Linux is running as a z/VM guest operating system in 31-bit mode\n"
+ * Severity: Informational
+ * Description:
+ * The 31-bit Linux kernel detected that it is running as a guest operating
+ * system of the z/VM hypervisor.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Linux is running natively in 31-bit mode\n"
+ * Severity: Informational
+ * Description:
+ * The 31-bit Linux kernel detected that it is running on an IBM mainframe,
+ * either as the sole operating system in an LPAR or as the sole operating
+ * system on the entire mainframe. The Linux kernel is not running as a
+ * guest operating system of the z/VM hypervisor.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "The hardware system has IEEE compatible floating point units\n"
+ * Severity: Informational
+ * Description:
+ * The Linux kernel detected that it is running on a hardware system with
+ * CPUs that have IEEE compatible floating point units.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "The hardware system has no IEEE compatible floating point units\n"
+ * Severity: Informational
+ * Description:
+ * The Linux kernel detected that it is running on a hardware system with
+ * CPUs that do not have IEEE compatible floating point units.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Linux is running as a z/VM guest operating system in 64-bit mode\n"
+ * Severity: Informational
+ * Description:
+ * The 64-bit Linux kernel detected that it is running as a guest operating
+ * system of the z/VM hypervisor.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Linux is running natively in 64-bit mode\n"
+ * Severity: Informational
+ * Description:
+ * The 64-bit Linux kernel detected that it is running on an IBM mainframe,
+ * either as the sole operating system in an LPAR or as the sole operating
+ * system on the entire mainframe. The Linux kernel is not running as a
+ * guest operating system of the z/VM hypervisor.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "Defining the Linux kernel NSS failed with rc=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: return code
+ * Description:
+ * The Linux kernel could not define the named saved system (NSS) with
+ * the z/VM CP DEFSYS command. The return code represents the numeric
+ * portion of the CP DEFSYS error message.
+ * User action:
+ * For return code 1, the z/VM guest virtual machine is not authorized
+ * to define named saved systems.
+ * Ensure that the z/VM guest virtual machine is authorized to issue
+ * the CP DEFSYS command (typically privilege class E).
+ * For other return codes, see the help and message documentation for
+ * the CP DEFSYS command.
+ */
+
+/*?
+ * Text: "Saving the Linux kernel NSS failed with rc=%d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: return code
+ * Description:
+ * The Linux kernel could not save the named saved system (NSS) with
+ * the z/VM CP SAVESYS command. The return code represents the numeric
+ * portion of the CP SAVESYS error message.
+ * User action:
+ * For return code 1, the z/VM guest virtual machine is not authorized
+ * to save named saved systems.
+ * Ensure that the z/VM guest virtual machine is authorized to issue
+ * the CP SAVESYS command (typically privilege class E).
+ * For other return codes, see the help and message documentation for
+ * the CP SAVESYS command.
+ */
+
+/*? Text: "Linux is running under KVM in 64-bit mode\n" */
+
--- /dev/null
+/*?
+ * Text: "%s: A tape unit was detached while in use\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A tape unit has been detached from the I/O configuration while a tape
+ * was being accessed. This typically results in I/O error messages and
+ * potentially in damaged data on the tape.
+ * User action:
+ * Check the output of the application that accesses the tape device.
+ * If this problem occurred during a write-type operation, consider repeating
+ * the operation after bringing the tape device back online.
+ */
+
+/*?
+ * Text: "%s: A tape cartridge has been mounted\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A tape cartridge has been inserted into the tape unit. The tape in the
+ * tape unit is ready to be accessed.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The tape cartridge has been successfully unloaded\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape cartridge has been unloaded from the tape unit. Insert a tape
+ * cartridge before accessing the tape device.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: Determining the size of the recorded area...\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape block device driver is currently determining the size of the
+ * recorded area on the tape medium. This operation typically takes a
+ * few minutes.
+ * User action:
+ * Wait until the size is shown in a completion message.
+ */
+
+/*?
+ * Text: "%s: Opening the tape failed because of missing end-of-file marks\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape block device driver requires end-of-file marks at the end of
+ * the recorded area on a tape. If the tape device was to be opened in
+ * response to a mount command, the mount command will fail.
+ * User action:
+ * Insert a tape cartridge that has been prepared for use with the tape
+ * block device driver and try the operation again.
+ */
+
+/*?
+ * Text: "%s: The size of the recorded area is %i blocks\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: number of blocks
+ * Description:
+ * The tape block device driver has successfully determined the size of the
+ * recorded area on the tape medium. The tape device can now be used as
+ * a block device. See the mount(8) man page for details on how to access
+ * block devices.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "A cartridge is loaded in tape device %s, refusing to suspend\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A request to suspend a tape device currently loaded with a cartridge is
+ * rejected.
+ * User action:
+ * Unload the tape device. Then try to suspend the system again.
+ */
+
+/*?
+ * Text: "Tape device %s is busy, refusing to suspend\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A request to suspend a tape device being currently in use is rejected.
+ * User action:
+ * Terminate applications performing tape operations
+ * and then try to suspend the system again.
+ */
--- /dev/null
+/*?
+ * Text: "%s: An unexpected condition %d occurred in tape error recovery\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: number
+ * Description:
+ * The control unit has reported an error condition that is not recognized by
+ * the error recovery process of the tape device driver.
+ * User action:
+ * Report this problem and the condition number from the message to your
+ * support organization.
+ */
+
+/*?
+ * Text: "%s: A data overrun occurred between the control unit and tape unit\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A data overrun error has occurred on the connection between the control
+ * unit and the tape unit. If this problem occurred during a write-type
+ * operation, the integrity of the data on the tape might be compromised.
+ * User action:
+ * Use a faster connection. If this problem occurred during a write-type
+ * operation, consider repositioning the tape and repeating the operation.
+ */
+
+/*?
+ * Text: "%s: The block ID sequence on the tape is incorrect\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The control unit has detected an incorrect block ID sequence on the tape.
+ * This problem typically indicates that the data on the tape is damaged.
+ * User action:
+ * If this problem occurred during a write-type operation reposition the tape
+ * and repeat the operation.
+ */
+
+/*?
+ * Text: "%s: A read error occurred that cannot be recovered\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A read error has occurred that cannot be recovered. The current tape might
+ * be damaged.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: A write error on the tape cannot be recovered\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A write error has occurred that could not be recovered by the automatic
+ * error recovery process.
+ * User action:
+ * Use a different tape cartridge.
+ */
+
+/*?
+ * Text: "%s: Writing the ID-mark failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The ID-mark at the beginning of tape could not be written. The tape medium
+ * might be write-protected.
+ * User action:
+ * Try a different tape cartridge. Ensure that the write-protection on the
+ * cartridge is switched off.
+ */
+
+/*?
+ * Text: "%s: Reading the tape beyond the end of the recorded area failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A read-type operation failed because it extended beyond the end of the
+ * recorded area on the tape medium.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The tape contains an incorrect block ID sequence\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The control unit has detected an incorrect block ID sequence on the tape.
+ * This problem typically indicates that the data on the tape is damaged.
+ * User action:
+ * If this problem occurred during a write-type operation reposition the tape
+ * and repeat the operation.
+ */
+
+/*?
+ * Text: "%s: A path equipment check occurred for the tape device\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A path equipment check has occurred. This check indicates problems with the
+ * connection between the mainframe system and the tape control unit.
+ * User action:
+ * Ensure that the cable connections between the mainframe system and the
+ * control unit are securely in place and not damaged.
+ */
+
+/*?
+ * Text: "%s: The tape unit cannot process the tape format\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * Either the tape unit is not able to read the format ID mark, or the
+ * specified format is not supported by the tape unit.
+ * User action:
+ * If you do not need the data recorded on the current tape, use a different
+ * tape or write a new format ID mark at the beginning of the tape. Be aware
+ * that writing a new ID mark leads to a loss of all data that has been
+ * recorded on the tape. If you need the data on the current tape, use a tape
+ * unit that supports the tape format.
+ */
+
+/*?
+ * Text: "%s: The tape medium is write-protected\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A write-type operation failed because the tape medium is write-protected.
+ * User action:
+ * Eject the tape cartridge, switch off the write protection on the cartridge,
+ * insert the cartridge, and try the operation again.
+ */
+
+/*?
+ * Text: "%s: The tape does not have the required tape tension\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape does not have the required tape tension.
+ * User action:
+ * Rewind and reposition the tape, then repeat the operation.
+ */
+
+/*?
+ * Text: "%s: The tape unit failed to load the cartridge\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * An error has occurred while loading the tape cartridge.
+ * User action:
+ * Unload the cartridge and load it again.
+ */
+
+/*?
+ * Text: "%s: Automatic unloading of the tape cartridge failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape unit failed to unload the cartridge.
+ * User action:
+ * Unload the cartridge manually by using the eject button on the tape unit.
+ */
+
+/*?
+ * Text: "%s: An equipment check has occurred on the tape unit\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * Possible reasons for the check condition are a unit adapter error, a buffer
+ * error on the lower interface, an unusable internal path, or an error that
+ * has occurred while loading the cartridge.
+ * User action:
+ * Examine the tape unit and the cartridge loader. Consult the tape unit
+ * documentation for details.
+ */
+
+/*?
+ * Text: "%s: The tape information states an incorrect length\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape is shorter than stated at the beginning of the tape data. A
+ * possible reason for this problem is that the tape might have been physically
+ * truncated. Data written to the tape might be incomplete or damaged.
+ * User action:
+ * If this problem occurred during a write-type operation, consider repeating
+ * the operation with a different tape cartridge.
+ */
+
+/*?
+ * Text: "%s: The tape unit is not ready\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape unit is online but not ready.
+ * User action:
+ * Turn the ready switch on the tape unit to the ready position and try the
+ * operation again.
+ */
+
+/*?
+ * Text: "%s: The tape medium has been rewound or unloaded manually\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape unit rewind button, unload button, or both have been used to
+ * rewind or unload the tape cartridge. A tape cartridge other than the
+ * intended cartridge might have been inserted or the tape medium might not
+ * be at the expected position.
+ * User action:
+ * Verify that the correct tape cartridge has been inserted and that the tape
+ * medium is at the required position before continuing to work with the tape.
+ */
+
+/*?
+ * Text: "%s: The tape subsystem is running in degraded mode\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape subsystem is not operating at its maximum performance.
+ * User action:
+ * Contact your service representative for the tape unit and report this
+ * problem.
+ */
+
+/*?
+ * Text: "%s: The tape unit is already assigned\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape unit is already assigned to another channel path.
+ * User action:
+ * Free the tape unit from the operating system instance to which it is
+ * currently assigned then try again.
+ */
+
+/*?
+ * Text: "%s: The tape unit is not online\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape unit is not online to the tape device driver.
+ * User action:
+ * Ensure that the tape unit is operational and that the cable connections
+ * between the control unit and the tape unit are securely in place and not
+ * damaged.
+ */
+
+/*?
+ * Text: "%s: The control unit has fenced access to the tape volume\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The control unit fences further access to the current tape volume. The data
+ * integrity on the tape volume might have been compromised.
+ * User action:
+ * Rewind and unload the tape cartridge.
+ */
+
+/*?
+ * Text: "%s: A parity error occurred on the tape bus\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * A data parity check error occurred on the bus. Data that was read or written
+ * while the error occurred is not valid.
+ * User action:
+ * Reposition the tape and repeat the read-type or write-type operation.
+ */
+
+/*?
+ * Text: "%s: I/O error recovery failed on the tape control unit\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * An I/O error occurred that cannot be recovered by the automatic error
+ * recovery process of the tape control unit. The application that operates
+ * the tape unit will receive a return value of -EIO which indicates an
+ * I/O error. The data on the tape might be damaged.
+ * User action:
+ * If this problem occurred during a write-type operation, consider
+ * repositioning the tape and repeating the operation.
+ */
+
+/*?
+ * Text: "%s: The tape unit requires a firmware update\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape unit requires firmware patches from the tape control unit but the
+ * required patches are not available on the control unit.
+ * User action:
+ * Make the require patches available on the control unit then reposition the
+ * tape and retry the operation. For details about obtaining and installing
+ * firmware updates see the control unit documentation.
+ */
+
+/*?
+ * Text: "%s: The maximum block size for buffered mode is exceeded\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The block to be written is larger than allowed for the buffered mode.
+ * User action:
+ * Use a smaller block size.
+ */
+
+/*?
+ * Text: "%s: A channel interface error cannot be recovered\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * An error has occurred on the channel interface. This error cannot
+ * be recovered by the control unit error recovery process.
+ * User action:
+ * See the documentation of the control unit.
+ */
+
+/*?
+ * Text: "%s: A channel protocol error occurred\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * An error was detected in the channel protocol.
+ * User action:
+ * Reposition the tape and try the operation again.
+ */
+
+/*?
+ * Text: "%s: The tape unit does not support the compaction algorithm\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape unit cannot read the current tape. The data on the tape has been
+ * compressed with an algorithm that is not supported by the tape unit.
+ * User action:
+ * Use a tape unit that supports the compaction algorithm used for the
+ * current tape.
+ */
+
+/*?
+ * Text: "%s: The tape unit does not support tape format 3480-2 XF\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape unit does not support tapes recorded in the 3480-2 XF format.
+ * User action:
+ * If you do not need the data recorded on the current tape, rewind the tape
+ * and overwrite it with a supported format. If you need the data on the
+ * current tape, use a tape unit that supports the tape format.
+ */
+
+/*?
+ * Text: "%s: The tape unit does not support format 3480 XF\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape unit does not support tapes recorded in the 3480 XF format.
+ * User action:
+ * If you do not need the data recorded on the current tape, rewind the tape
+ * and overwrite it with a supported format. If you need the data on the
+ * current tape, use a tape unit that supports the tape format.
+ */
+
+/*?
+ * Text: "%s: The tape unit does not support the current tape length\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The length of the tape in the cartridge is incompatible with the tape unit.
+ * User action:
+ * Either use a different tape unit or use a tape with a supported length.
+ */
+
+/*?
+ * Text: "%s: The tape unit does not support the tape length\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The length of the tape in the cartridge is incompatible with the tape
+ * unit.
+ * User action:
+ * Either use a different tape unit or use a tape with a supported length.
+ */
+
--- /dev/null
+/*?
+ * Text: "%s: The tape medium must be loaded into a different tape unit\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape device has indicated an error condition that requires loading
+ * the tape cartridge into a different tape unit to recover.
+ * User action:
+ * Unload the cartridge and use a different tape unit to retry the operation.
+ */
+
+/*?
+ * Text: "%s: Tape media information: exception %s, service %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: exception
+ * @3: service
+ * Description:
+ * This is an operating system independent tape medium information message
+ * that was issued by the tape unit. The information in the message is
+ * intended for the IBM customer engineer.
+ * User action:
+ * See the documentation for the tape unit for further information.
+ */
+
+/*?
+ * Text: "%s: Device subsystem information: exception %s, service %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: exception
+ * @3: required service action
+ * Description:
+ * This is an operating system independent device subsystem information message
+ * that was issued by the tape unit. The information in the message is
+ * intended for the IBM customer engineer.
+ * User action:
+ * See the documentation for the tape unit for further information.
+ */
+
+/*?
+ * Text: "%s: I/O subsystem information: exception %s, service %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: exception
+ * @3: required service action
+ * Description:
+ * This is an operating system independent I/O subsystem information message
+ * that was issued by the tape unit. The information in the message is
+ * intended for the IBM customer engineer.
+ * User action:
+ * See the documentation for the tape unit for further information.
+ */
+
+/*?
+ * Text: "%s: The tape unit has issued sense message %s\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: sense message code
+ * Description:
+ * The tape unit has issued an operating system independent sense message.
+ * User action:
+ * See the documentation for the tape unit for further information.
+ */
+
+/*?
+ * Text: "%s: The tape unit has issued an unknown sense message code 0x%x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: code
+ * Description:
+ * The tape device driver has received an unknown sense message from the
+ * tape unit.
+ * driver.
+ * User action:
+ * See the documentation for the tape unit for further information.
+ */
+
+/*?
+ * Text: "%s: MIM SEV=%i, MC=%02x, ES=%x/%x, RC=%02x-%04x-%02x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: SEV
+ * @3: message code
+ * @4: exception
+ * @5: required service action
+ * @6: refcode
+ * @7: mid
+ * @8: fid
+ * Description:
+ * This is an operating system independent information message that was
+ * issued by the tape unit. The information in the message is intended for
+ * the IBM customer engineer.
+ * User action:
+ * See to the documentation for the tape unit for further information.
+ */
+
+/*?
+ * Text: "%s: IOSIM SEV=%i, DEVTYPE=3590/%02x, MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: SEV
+ * @3: model
+ * @4: message code
+ * @5: exception
+ * @6: required service action
+ * @7: refcode1
+ * @8: refcode2
+ * @9: refcode3
+ * Description:
+ * This is an operating system independent I/O subsystem information message
+ * that was issued by the tape unit. The information in the message is
+ * intended for the IBM customer engineer.
+ * User action:
+ * See the documentation for the tape unit for further information.
+ */
+
+/*?
+ * Text: "%s: DEVSIM SEV=%i, DEVTYPE=3590/%02x, MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: SEV
+ * @3: model
+ * @4: message code
+ * @5: exception
+ * @6: required service action
+ * @7: refcode1
+ * @8: refcode2
+ * @9: refcode3
+ * Description:
+ * This is an operating system independent device subsystem information message
+ * issued by the tape unit. The information in the message is intended for
+ * the IBM customer engineer.
+ * User action:
+ * See the documentation for the tape unit for further information.
+ */
+
+/*?
+ * Text: "%s: The tape unit has issued an unknown sense message code %x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * @2: code
+ * Description:
+ * The tape device has issued a sense message, that is unknown to the device
+ * driver.
+ * User action:
+ * Use the message code printed as hexadecimal value and see the documentation
+ * for the tape unit for further information.
+ */
+
+/*?
+ * Text: "%s: The tape unit failed to obtain the encryption key from EKM\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * The tape unit was unable to retrieve the encryption key required to decode
+ * the data on the tape from the enterprise key manager (EKM).
+ * User action:
+ * See the EKM and tape unit documentation for information about how to enable
+ * the tape unit to retrieve the encryption key.
+ */
+
+/*?
+ * Text: "%s: A different host has privileged access to the tape unit\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the tape device
+ * Description:
+ * You cannot access the tape unit because a different operating system
+ * instance has privileged access to the unit.
+ * User action:
+ * Unload the current cartridge to solve this problem.
+ */
+
--- /dev/null
+/*?
+ * Text: "The ETR interface has adjusted the clock by %li microseconds\n"
+ * Severity: Notice
+ * Parameter:
+ * @1: number of microseconds
+ * Description:
+ * The external time reference (ETR) interface has synchronized the system
+ * clock with the external reference and set it to a new value. The time
+ * difference between the old and new clock value has been passed to the
+ * network time protocol (NTP) as a single shot adjustment.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "The real or virtual hardware system does not provide an ETR interface\n"
+ * Severity: Warning
+ * Description:
+ * The 'etr=' parameter has been passed on the kernel parameter line for
+ * a Linux instance that does not have access to the external time reference
+ * (ETR) facility.
+ * User action:
+ * To avoid this warning remove the 'etr=' kernel parameter.
+ */
+
+/*?
+ * Text: "The real or virtual hardware system does not provide an STP interface\n"
+ * Severity: Warning
+ * Description:
+ * The 'stp=' parameter has been passed on the kernel parameter line for
+ * a Linux instance that does not have access to the server time protocol
+ * (STP) facility.
+ * User action:
+ * To avoid this warning remove the 'stp=' kernel parameter.
+ */
+
--- /dev/null
+/*?
+ * Text: "The z/VM CP interface device driver cannot be loaded without z/VM\n"
+ * Severity: Warning
+ * Description:
+ * With the z/VM CP interface you can issue z/VM CP commands from a Linux
+ * terminal session. On Linux instances that run in environments other than
+ * the z/VM hypervisor, the z/VM CP interface does not provide any useful
+ * function and the corresponding vmcp device driver cannot be loaded.
+ * User action:
+ * Load the vmcp device driver only on Linux instances that run as guest
+ * operating systems of the z/VM hypervisor. If the device driver has been
+ * compiled into the kernel, ignore this message.
+ */
--- /dev/null
+/*? Text: "vmlogrdr: failed to start recording automatically\n" */
+/*? Text: "vmlogrdr: connection severed with reason %i\n" */
+/*? Text: "vmlogrdr: iucv connection to %s failed with rc %i \n" */
+/*? Text: "vmlogrdr: failed to stop recording automatically\n" */
+/*? Text: "not running under VM, driver not loaded.\n" */
+
+/*?
+ * Text: "vmlogrdr: device %s is busy. Refuse to suspend.\n"
+ * Severity: Error
+ * Parameter:
+ * @1: device name
+ * Description:
+ * Suspending vmlogrdr devices that are in uses is not supported.
+ * A request to suspend such a device is refused.
+ * User action:
+ * Close all applications that use any of the vmlogrdr devices
+ * and then try to suspend the system again.
+ */
--- /dev/null
+/*?
+ * Text: "The %s cannot be loaded without z/VM\n"
+ * Severity: Error
+ * Parameter:
+ * @1: z/VM virtual unit record device driver
+ * Description:
+ * The z/VM virtual unit record device driver provides Linux with access to
+ * z/VM virtual unit record devices like punch card readers, card punches, and
+ * line printers. On Linux instances that run in environments other than the
+ * z/VM hypervisor, the device driver does not provide any useful function and
+ * the corresponding vmur module cannot be loaded.
+ * User action:
+ * Load the vmur module only on Linux instances that run as guest operating
+ * systems of the z/VM hypervisor. If the z/VM virtual unit record device
+ * has been compiled into the kernel, ignore this message.
+ */
+
+/*?
+ * Text: "Kernel function alloc_chrdev_region failed with error code %d\n"
+ * Severity: Error
+ * Parameter:
+ * @1: error code according to errno definitions
+ * Description:
+ * The z/VM virtual unit record device driver (vmur) needs to register a range
+ * of character device minor numbers from 0x0000 to 0xffff.
+ * This registration failed, probably because of memory constraints.
+ * User action:
+ * Free some memory and reload the vmur module. If the z/VM virtual unit
+ * record device driver has been compiled into the kernel reboot Linux.
+ * Consider assigning more memory to your LPAR or z/VM guest virtual machine.
+ */
+
+/*?
+ * Text: "Unit record device %s is busy, %s refusing to suspend.\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the unit record device
+ * @1: z/VM virtual unit record device driver
+ * Description:
+ * Linux cannot be suspended while a unit record device is in use.
+ * User action:
+ * Stop all applications that work on z/VM spool file queues, for example, the
+ * vmur tool. Then try again to suspend Linux.
+ */
+
+/*? Text: "%s loaded.\n" */
+/*? Text: "%s unloaded.\n" */
--- /dev/null
+/*?
+ * Text: "The system cannot be suspended while the watchdog is in use\n"
+ * Severity: Error
+ * Description:
+ * A program is currently using the vmwatchdog device node. The watchdog
+ * device driver prevents the system from being suspended while the watchdog
+ * device is in use.
+ * User action:
+ * If you want to suspend the system, find out which program uses the watchdog
+ * device. Stop the program or reconfigure it to not use the watchdog.
+ */
+
+
+/*?
+ * Text: "The system cannot be suspended while the watchdog is running\n"
+ * Severity: Error
+ * Description:
+ * The watchdog must not time out during hibernation. The watchdog
+ * device driver prevents the system from being suspended while the watchdog
+ * timer is running.
+ * User action:
+ * If you want to suspend the system, stop the watchdog, for example, by entering
+ * the command: 'echo V > /dev/vmwatchdog'. Alternatively, stop the program that
+ * uses the watchdog or reconfigure the program to not use the watchdog.
+ */
+
--- /dev/null
+/*?
+ * Text: "%d is not a valid number of XPRAM devices\n"
+ * Severity: Error
+ * Parameter:
+ * @1: number of partitions
+ * Description:
+ * The number of XPRAM partitions specified for the 'devs' module parameter
+ * or with the 'xpram.parts' kernel parameter must be an integer in the
+ * range 1 to 32. The XPRAM device driver created a maximum of 32 partitions
+ * that are probably not configured as intended.
+ * User action:
+ * If the XPRAM device driver has been compiled as a separate module,
+ * unload the module and load it again with a correct value for the 'devs'
+ * module parameter. If the XPRAM device driver has been compiled
+ * into the kernel, correct the 'xpram.parts' parameter in the kernel
+ * command line and restart Linux.
+ */
+
+/*?
+ * Text: "Not enough expanded memory available\n"
+ * Severity: Error
+ * Description:
+ * The amount of expanded memory required to set up your XPRAM partitions
+ * depends on the 'sizes' parameter specified for the xpram module or on
+ * the specifications for the 'xpram.parts' parameter if the XPRAM device
+ * driver has been compiled into the kernel. Your
+ * current specification exceed the amount of available expanded memory.
+ * Your XPRAM partitions are probably not configured as intended.
+ * User action:
+ * If the XPRAM device driver has been compiled as a separate module,
+ * unload the xpram module and load it again with an appropriate value
+ * for the 'sizes' module parameter. If the XPRAM device driver has been
+ * compiled into the kernel, adjust the 'xpram.parts' parameter in the
+ * kernel command line and restart Linux. If you need more than the
+ * available expanded memory, increase the expanded memory allocation for
+ * your virtual hardware or LPAR.
+ */
+
+/*?
+ * Text: "No expanded memory available\n"
+ * Severity: Error
+ * Description:
+ * The XPRAM device driver has been loaded in a Linux instance that runs
+ * in an LPAR or virtual hardware without expanded memory.
+ * No XPRAM partitions are created.
+ * User action:
+ * Allocate expanded memory for your LPAR or virtual hardware or do not
+ * load the xpram module. You can ignore this message, if you do not want
+ * to create XPRAM partitions.
+ */
+
+/*?
+ * Text: "Resuming the system failed: %s\n"
+ * Severity: Error
+ * Parameter:
+ * @1: cause of the failure
+ * Description:
+ * A system cannot be resumed if the expanded memory setup changes
+ * after hibernation. Possible reasons for the failure are:
+ * - Expanded memory was removed after hibernation.
+ * - Size of the expanded memory changed after hibernation.
+ * The system is stopped with a kernel panic.
+ * User action:
+ * Reboot Linux.
+ */
+
+/*? Text: " number of devices (partitions): %d \n" */
+/*? Text: " size of partition %d: %u kB\n" */
+/*? Text: " size of partition %d to be set automatically\n" */
+/*? Text: " memory needed (for sized partitions): %lu kB\n" */
+/*? Text: " partitions to be sized automatically: %d\n" */
+/*? Text: " automatically determined partition size: %lu kB\n" */
+/*? Text: " %u pages expanded memory found (%lu KB).\n" */
--- /dev/null
+/*?
+ * Text: "The 32-bit dump tool cannot be used for a 64-bit system\n"
+ * Severity: Alert
+ * Description:
+ * The dump process ends without creating a system dump.
+ * User action:
+ * Use a 64-bit dump tool to obtain a system dump for 64-bit Linux instance.
+ */
+
+/*? Text: "DETECTED 'S390 (32 bit) OS'\n" */
+/*? Text: "0x%x is an unknown architecture.\n" */
+/*? Text: "DETECTED 'S390X (64 bit) OS'\n" */
--- /dev/null
+/*?
+ * Text: "%s is not a valid SCSI device\n"
+ * Severity: Error
+ * Parameter:
+ * @1: device specification
+ * Description:
+ * The specification for an initial SCSI device provided with the 'zfcp.device'
+ * kernel parameter or with the 'device' module parameter is syntactically
+ * incorrect. The specified SCSI device could not be attached to the Linux
+ * system.
+ * User action:
+ * Correct the value for the 'zfcp.device' or 'device' parameter and reboot
+ * Linux. See "Device Drivers, Features, and Commands" for information about
+ * the syntax.
+ */
+
+/*?
+ * Text: "Registering the misc device zfcp_cfdc failed\n"
+ * Severity: Error
+ * Description:
+ * The zfcp device driver failed to register the device that provides access to
+ * the adapter access control file (ACL tables). The device driver
+ * initialization failed. A possible cause for this problem is memory
+ * constraints.
+ * User action:
+ * Free some memory and try again to load the zfcp device driver. If the zfcp
+ * device driver has been compiled into the kernel, reboot Linux. Consider
+ * assigning more memory to your LPAR or z/VM guest virtual machine. If the
+ * problem persists, contact your support organization.
+ */
+
+/*?
+ * Text: "The zfcp device driver could not register with the common I/O layer\n"
+ * Severity: Error
+ * Description:
+ * The device driver initialization failed. A possible cause of this problem is
+ * memory constraints.
+ * User action:
+ * Free some memory and try again to load the zfcp device driver. If the zfcp
+ * device driver has been compiled into the kernel, reboot Linux. Consider
+ * assigning more memory to your LPAR or z/VM guest virtual machine. If the
+ * problem persists, contact your support organization.
+ */
+
+/*?
+ * Text: "%s: Setting up data structures for the FCP adapter failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The zfcp device driver could not allocate data structures for an FCP adapter.
+ * A possible reason for this problem is memory constraints.
+ * User action:
+ * Set the FCP adapter offline or detach it from the Linux system, free some
+ * memory and set the FCP adapter online again or attach it again. If this
+ * problem persists, gather Linux debug data, collect the FCP adapter
+ * hardware logs, and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: The FCP device is operational again\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * An FCP device has been unavailable because it had been detached from the
+ * Linux system or because the corresponding CHPID was offline. The FCP device
+ * is now available again and the zfcp device driver resumes all operations to
+ * the FCP device.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: The CHPID for the FCP device is offline\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The CHPID for an FCP device has been set offline, either logically in Linux
+ * or on the hardware.
+ * User action:
+ * Find out which CHPID corresponds to the FCP device, for example, with the
+ * lscss command. Check if the CHPID has been set logically offline in sysfs.
+ * Write 'on' to the CHPID's status attribute to set it online. If the CHPID is
+ * online in sysfs, find out if it has been varied offline through a hardware
+ * management interface, for example the service element (SE).
+ */
+
+/*?
+ * Text: "%s: The FCP device has been detached\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * An FCP device is no longer available to Linux.
+ * User action:
+ * Ensure that the FCP adapter is operational and attached to the LPAR or z/VM
+ * virtual machine.
+ */
+
+/*?
+ * Text: "%s: The FCP device did not respond within the specified time\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The common I/O layer waited for a response from the FCP adapter but
+ * no response was received within the specified time limit. This might
+ * indicate a hardware problem.
+ * User action:
+ * Consult your hardware administrator. If this problem persists,
+ * gather Linux debug data, collect the FCP adapter hardware logs, and
+ * report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Registering the FCP device with the SCSI stack failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The FCP adapter could not be registered with the Linux SCSI
+ * stack. A possible reason for this problem is memory constraints.
+ * User action:
+ * Set the FCP adapter offline or detach it from the Linux system, free some
+ * memory and set the FCP adapter online again or attach it again. If this
+ * problem persists, gather Linux debug data, collect the FCP adapter
+ * hardware logs, and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: ERP cannot recover an error on the FCP device\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * An error occurred on an FCP device. The error recovery procedure (ERP)
+ * could not resolve the error. The FCP device driver cannot use the FCP device.
+ * User action:
+ * Check for previous error messages for the same FCP device to find the
+ * cause of the problem.
+ */
+
+/*?
+ * Text: "%s: Creating an ERP thread for the FCP device failed.\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The zfcp device driver could not set up error recovery procedure (ERP)
+ * processing for the FCP device. The FCP device is not available for use
+ * in Linux.
+ * User action:
+ * Free some memory and try again to load the zfcp device driver. If the zfcp
+ * device driver has been compiled into the kernel, reboot Linux. Consider
+ * assigning more memory to your LPAR or z/VM guest virtual machine. If the
+ * problem persists, contact your support organization.
+ */
+
+/*?
+ * Text: "%s: ERP failed for unit 0x%016Lx on port 0x%016Lx\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: LUN
+ * @3: WWPN
+ * Description:
+ * An error occurred on the SCSI device at the specified LUN. The error recovery
+ * procedure (ERP) could not resolve the error. The SCSI device is not
+ * available.
+ * User action:
+ * Verify that the LUN is correct. Check the fibre channel fabric for errors
+ * related to the specified WWPN and LUN, the storage server, and Linux.
+ */
+
+/*?
+ * Text: "%s: ERP failed for remote port 0x%016Lx\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: WWPN
+ * Description:
+ * An error occurred on a remote port. The error recovery procedure (ERP)
+ * could not resolve the error. The port is not available.
+ * User action:
+ * Verify that the WWPN is correct and check the fibre channel fabric for
+ * errors related to the WWPN.
+ */
+
+/*?
+ * Text: "%s: Attaching the name server port to the FCP device failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The zfcp device driver could not attach the name server port of the fibre
+ * channel fabric to an FCP device. A possible cause of this problem is
+ * memory constraints.
+ * User action:
+ * Set the FCP device offline, free some memory, then set the FCP device online
+ * again. If this does not resolve the problem, reboot Linux and try again to
+ * set the FCP device online.
+ */
+
+/*?
+ * Text: "%s: Registering unit 0x%016Lx on port 0x%016Lx failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: LUN
+ * @3: WWPN
+ * Description:
+ * The Linux kernel could not allocate enough memory to register the SCSI
+ * device at the indicated LUN with the SCSI stack. The SCSI device is not
+ * available.
+ * User action:
+ * Free some memory then detach the LUN and attach it again.
+ */
+
+/*?
+ * Text: "%s: Registering port 0x%016Lx failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: WWPN
+ * Description:
+ * The Linux kernel could not allocate enough memory to register the
+ * remote port with the indicated WWPN with the SCSI stack. The remote
+ * port is not available.
+ * User action:
+ * Free some memory and trigger the rescan for ports.
+ */
+
+/*?
+ * Text: "%s: A QDIO problem occurred\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * QDIO reported a problem to the zfcp device driver. The zfcp device driver
+ * tries to recover this problem.
+ * User action:
+ * Check for related error messages. If this problem occurs frequently, gather
+ * Linux debug data and contact your support organization.
+ */
+
+/*?
+ * Text: "%s: A QDIO protocol error occurred, operations continue\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The zfcp device driver detected a missing flag in a QDIO queue. The device
+ * driver tries to keep the FCP device operational.
+ * User action:
+ * Check for related error messages. If this problem occurs frequently, gather
+ * Linux debug data, collect the FCP adapter hardware logs, and report the
+ * problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Setting up the QDIO connection to the FCP adapter failed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The zfcp device driver failed to establish a QDIO connection with the FCP
+ * adapter.
+ * User action:
+ * Set the FCP adapter offline or detach it from the Linux system, free some
+ * memory and set the FCP adapter online again or attach it again. If this
+ * problem persists, gather Linux debug data, collect the FCP adapter
+ * hardware logs, and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: The FCP adapter reported a problem that cannot be recovered\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The FCP adapter has a problem that cannot be recovered by the zfcp device
+ * driver. The zfcp device driver stopped using the FCP device.
+ * User action:
+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report
+ * this problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: There is a wrap plug instead of a fibre channel cable\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The FCP adapter is not physically connected to the fibre channel fabric.
+ * User action:
+ * Remove the wrap plug from the FCP adapter and connect the adapter with the
+ * fibre channel fabric.
+ */
+
+/*?
+ * Text: "%s: Access denied to unit 0x%016Lx on port 0x%016Lx\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: LUN
+ * @3: WWPN
+ * Description:
+ * The Linux system is not allowed to access the SCSI device at the indicated
+ * LUN.
+ * User action:
+ * Update the access control table of the FCP device to grant the Linux
+ * system access to the LUN or remove the LUN from the Linux system.
+ */
+
+/*?
+ * Text: "%s: FCP device not operational because of an unsupported FC class\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The FCP adapter hardware does not support the fibre channel service class
+ * requested by the zfcp device driver. This problem indicates a program error
+ * in the zfcp device driver.
+ * User action:
+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report
+ * this problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: 0x%Lx is an ambiguous request identifier\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: request ID
+ * Description:
+ * The FCP adapter reported that it received the same request ID twice. This is
+ * an error. The zfcp device driver stopped using the FCP device.
+ * User action:
+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report
+ * this problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: QTCB version 0x%x not supported by FCP adapter (0x%x to 0x%x)\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: requested version
+ * @3: lowest supported version
+ * @4: highest supported version
+ * Description:
+ * See message text.
+ * The queue transfer control block (QTCB) version requested by the zfcp device
+ * driver is not supported by the FCP adapter hardware.
+ * User action:
+ * If the requested version is higher than the highest version supported by the
+ * hardware, install more recent firmware on the FCP adapter. If the requested
+ * version is lower then the lowest version supported by the hardware, upgrade
+ * to a Linux level with a more recent zfcp device driver.
+ */
+
+/*?
+ * Text: "%s: The FCP adapter could not log in to the fibre channel fabric\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The fibre channel switch rejected the login request from the FCP adapter.
+ * User action:
+ * Check the fibre channel fabric or switch logs for possible errors.
+ */
+
+/*?
+ * Text: "%s: The FCP device is suspended because of a firmware update\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The FCP device is not available while a firmware update is in progress. This
+ * problem is temporary. The FCP device will resume operations when the
+ * firmware update is completed.
+ * User action:
+ * Wait 10 seconds and try the operation again.
+ */
+
+/*?
+ * Text: "%s: All NPIV ports on the FCP adapter have been assigned\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The number of N_Port ID Virtualization (NPIV) ports that can be assigned
+ * on an FCP adapter is limited. Once assigned, NPIV ports are not released
+ * automatically but have to be released explicitly through the support
+ * element (SE).
+ * User action:
+ * Identify NPIV ports that have been assigned but are no longer in use and
+ * release them from the SE.
+ */
+
+/*?
+ * Text: "%s: The link between the FCP adapter and the FC fabric is down\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The FCP adapter is not usable. Specific error information is not available.
+ * User action:
+ * Check the cabling and the fibre channel fabric configuration. If this
+ * problem persists, gather Linux debug data, collect the FCP adapter
+ * hardware logs, and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Access denied to port 0x%016Lx\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: WWPN
+ * Description:
+ * The Linux system is not allowed to access the remote port with the specified
+ * WWPN.
+ * User action:
+ * Update the access control table of the FCP device to grant the Linux
+ * system access to the WWPN or remove the WWPN from the Linux system.
+ */
+
+/*?
+ * Text: "%s: The QTCB type is not supported by the FCP adapter\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The queue transfer control block (QTCB) type requested by the zfcp device
+ * driver is not supported by the FCP adapter hardware.
+ * User action:
+ * Install the latest firmware on your FCP adapter hardware. If this does not
+ * resolve the problem, upgrade to a Linux level with a more recent zfcp device
+ * driver. If the problem persists, contact your support organization.
+ */
+
+/*?
+ * Text: "%s: The error threshold for checksum statistics has been exceeded\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The FCP adapter has reported a large number of bit errors. This might
+ * indicate a problem with the physical components of the fibre channel fabric.
+ * Details about the errors have been written to the HBA trace for the FCP
+ * adapter.
+ * User action:
+ * Check for problems in the fibre channel fabric and ensure that all cables
+ * are properly plugged.
+ */
+
+/*?
+ * Text: "%s: The local link has been restored\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * A problem with the connection between the FCP adapter and the adjacent node
+ * on the fibre channel fabric has been resolved. The FCP adapter is now
+ * available again.
+ * User action:
+ * None.
+ */
+
+/*?
+ * Text: "%s: Access denied according to ACT rule type %s, rule %d\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: access rule type
+ * @3: access rule
+ * Description:
+ * A rule in the access control table (ACT) for the FCP device denies access
+ * to a remote port or a LUN.
+ * User action:
+ * Examine the access control tables for the FCP device to see if the
+ * specified rule is correct.
+ */
+
+/*?
+ * Text: "%s: The mode table on the FCP adapter has been damaged\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * This is an FCP adapter hardware problem.
+ * User action:
+ * Report this problem with FCP hardware logs to IBM support.
+ */
+
+/*?
+ * Text: "%s: The adjacent fibre channel node does not support FCP\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The fibre channel switch or storage system that is connected to the FCP
+ * channel does not support the fibre channel protocol (FCP). The zfcp
+ * device driver stopped using the FCP device.
+ * User action:
+ * Check the adjacent fibre channel node.
+ */
+
+/*?
+ * Text: "%s: The FCP adapter does not recognize the command 0x%x\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: command
+ * Description:
+ * A command code that was sent from the zfcp device driver to the FCP adapter
+ * is not valid. The zfcp device driver stopped using the FCP device.
+ * User action:
+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report
+ * this problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: There is no light signal from the local fibre channel cable\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * There is no signal on the fibre channel cable that connects the FCP adapter
+ * to the fibre channel fabric.
+ * User action:
+ * Ensure that the cable is in place and connected properly to the FCP adapter
+ * and to the adjacent fibre channel switch or storage system.
+ */
+
+/*?
+ * Text: "%s: The WWPN assignment file on the FCP adapter has been damaged\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * This is an FCP adapter hardware problem.
+ * User action:
+ * Report this problem with FCP hardware logs to IBM support.
+ */
+
+/*?
+ * Text: "%s: The FCP device detected a WWPN that is duplicate or not valid\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * This condition indicates an error in the FCP adapter hardware or in the z/VM
+ * hypervisor.
+ * User action:
+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report
+ * this problem to IBM support.
+ */
+
+/*?
+ * Text: "%s: The fibre channel fabric does not support NPIV\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The FCP adapter requires N_Port ID Virtualization (NPIV) from the adjacent
+ * fibre channel node. Either the FCP adapter is connected to a fibre channel
+ * switch that does not support NPIV or the FCP adapter tries to use NPIV in a
+ * point-to-point setup. The connection is not operational.
+ * User action:
+ * Verify that NPIV is correctly used for this connection. Check the FCP adapter
+ * configuration and the fibre channel switch configuration. If necessary,
+ * update the fibre channel switch firmware.
+ */
+
+/*?
+ * Text: "%s: The FCP adapter cannot support more NPIV ports\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * N_Port ID Virtualization (NPIV) ports consume physical resources on the FCP
+ * adapter. The FCP adapter resources are exhausted. The connection is not
+ * operational.
+ * User action:
+ * Analyze the number of available NPIV ports and which operating system
+ * instances use them. If necessary, reconfigure your setup to move some
+ * NPIV ports to an FCP adapter with free resources.
+ */
+
+/*?
+ * Text: "%s: The adjacent switch cannot support more NPIV ports\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * N_Port ID Virtualization (NPIV) ports consume physical resources. The
+ * resources of the fibre channel switch that is connected to the FCP adapter
+ * are exhausted. The connection is not operational.
+ * User action:
+ * Analyze the number of available NPIV ports on the adjacent fibre channel
+ * switch and how they are used. If necessary, reconfigure your fibre channel
+ * fabric to accommodate the required NPIV ports.
+ */
+
+/*?
+ * Text: "%s: 0x%x is not a valid transfer protocol status\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: status information
+ * Description:
+ * The transfer protocol status information reported by the FCP adapter is not
+ * a valid status for the zfcp device driver. The zfcp device driver stopped
+ * using the FCP device.
+ * User action:
+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report
+ * this problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Unknown or unsupported arbitrated loop fibre channel topology detected\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The FCP device is connected to a fibre channel arbitrated loop or the FCP adapter
+ * reported an unknown fibre channel topology. The zfcp device driver supports
+ * point-to-point connections and switched fibre channel fabrics but not arbitrated
+ * loop topologies. The FCP device cannot be used.
+ * User action:
+ * Check the fibre channel setup and ensure that only supported topologies are
+ * connected to the FCP adapter.
+ */
+
+/*?
+ * Text: "%s: FCP adapter maximum QTCB size (%d bytes) is too small\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: maximum supported size
+ * @3: requested QTCB size
+ * Description:
+ * The queue transfer control block (QTCB) size requested by the zfcp
+ * device driver is not supported by the FCP adapter hardware.
+ * User action:
+ * Update the firmware on your FCP adapter hardware to the latest
+ * available level and update the Linux kernel to the latest supported
+ * level. If the problem persists, contact your support organization.
+ */
+
+/*?
+ * Text: "%s: The FCP adapter only supports newer control block versions\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The protocol supported by the FCP adapter is not compatible with the zfcp
+ * device driver.
+ * User action:
+ * Upgrade your Linux kernel to a level that includes a zfcp device driver
+ * with support for the control block version required by your FCP adapter.
+ */
+
+/*?
+ * Text: "%s: The FCP adapter only supports older control block versions\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * Description:
+ * The protocol supported by the FCP adapter is not compatible with the zfcp
+ * device driver.
+ * User action:
+ * Install the latest firmware on your FCP adapter.
+ */
+
+/*?
+ * Text: "%s: Not enough FCP adapter resources to open remote port 0x%016Lx\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: WWPN
+ * Description:
+ * Each port that is opened consumes physical resources of the FCP adapter to
+ * which it is attached. These resources are exhausted and the specified port
+ * cannot be opened.
+ * User action:
+ * Reduce the total number of remote ports that are attached to the
+ * FCP adapter.
+ */
+
+/*?
+ * Text: "%s: LUN 0x%Lx on port 0x%Lx is already in use by CSS%d, MIF Image ID %x\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: LUN
+ * @3: remote port WWPN
+ * @4: channel subsystem ID
+ * @5: MIF Image ID of the LPAR
+ * Description:
+ * The SCSI device at the indicated LUN is already in use by another system.
+ * Only one system at a time can use the SCSI device.
+ * User action:
+ * Ensure that the other system stops using the device before trying to use it.
+ */
+
+/*?
+ * Text: "%s: No handle is available for LUN 0x%016Lx on port 0x%016Lx\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: LUN
+ * @3: WWPN
+ * Description:
+ * The FCP adapter can only open a limited number of SCSI devices. This limit
+ * has been reached and the SCSI device at the indicated LUN cannot be opened.
+ * User action:
+ * Check all SCSI devices opened through the FCP adapter and close some of them.
+ */
+
+/*?
+ * Text: "%s: SCSI device at LUN 0x%016Lx on port 0x%016Lx opened read-only\n"
+ * Severity: Informational
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: LUN
+ * @3: WWPN
+ * Description:
+ * The access control tables in the FCP adapter allow read-only access for the
+ * LUN. Write access is not permitted for your Linux instance. The SCSI
+ * device has been opened successfully in read-only access mode.
+ * User action:
+ * None if read-only access is sufficient. If you require write access, change
+ * the access control tables in the FCP adapter.
+ */
+
+/*?
+ * Text: "%s: Exclusive read-only access not supported (unit 0x%016Lx, port 0x%016Lx)\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: LUN
+ * @3: WWPN
+ * Description:
+ * The access configuration specified in the access control tables of the FCP
+ * adapter is not valid. The SCSI device at the indicated LUN cannot be
+ * accessed.
+ * User action:
+ * Change the access control tables in the FCP adapter.
+ */
+
+/*?
+ * Text: "%s: Shared read-write access not supported (unit 0x%016Lx, port 0x%016Lx)\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: LUN
+ * @3: WWPN
+ * Description:
+ * The access configuration specified in the access control tables of the FCP
+ * adapter is not valid. The SCSI device at the indicated LUN cannot be
+ * accessed.
+ * User action:
+ * Change the access control tables in the FCP adapter.
+ */
+
+/*?
+ * Text: "%s: Incorrect direction %d, unit 0x%016Lx on port 0x%016Lx closed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: value in direction field
+ * @3: LUN
+ * @4: WWPN
+ * Description:
+ * The direction field in a SCSI request contains an incorrect value. The zfcp
+ * device driver closed down the SCSI device at the indicated LUN.
+ * User action:
+ * Gather Linux debug data and report this problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Incorrect CDB length %d, unit 0x%016Lx on port 0x%016Lx closed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: value in length field
+ * @3: LUN
+ * @4: WWPN
+ * Description:
+ * The control-data-block (CDB) length field in a SCSI request is not valid or
+ * too large for the FCP adapter. The zfcp device driver closed down the SCSI
+ * device at the indicated LUN.
+ * User action:
+ * Gather Linux debug data and report this problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Oversize data package, unit 0x%016Lx on port 0x%016Lx closed\n"
+ * Severity: Error
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: LUN
+ * @3: WWPN
+ * Description:
+ * A SCSI request with too much data has been sent to the SCSI device at the
+ * indicated LUN. The FCP adapter cannot handle data packets of this size and
+ * the SCSI device driver closed down the SCSI device.
+ * User action:
+ * Gather Linux debug data and report this problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: Opening WKA port 0x%x failed\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: destination ID of the WKA port
+ * Description:
+ * The FCP adapter rejected a request to open the specified
+ * well-known address (WKA) port. No retry is possible.
+ * User action:
+ * Verify the setup and check if the maximum number of remote ports
+ * used through this adapter is below the maximum allowed. If the
+ * problem persists, gather Linux debug data, collect the FCP adapter
+ * hardware logs, and report the problem to your support organization.
+ */
+
+/*?
+ * Text: "%s: The name server reported %d words residual data\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: number of words in residual data
+ * Description:
+ * The fibre channel name server sent too much information about remote ports.
+ * The zfcp device driver did not receive sufficient information to attach all
+ * available remote ports in the SAN.
+ * User action:
+ * Verify that you are running the latest firmware level on the FCP
+ * adapter. Check your SAN setup and consider reducing the number of ports
+ * visible to the FCP adapter by using more restrictive zoning in the SAN.
+ */
+
+/*?
+ * Text: "%s: A port opened with WWPN 0x%016Lx returned data that identifies it as WWPN 0x%016Lx\n"
+ * Severity: Warning
+ * Parameter:
+ * @1: bus ID of the zfcp device
+ * @2: expected WWPN
+ * @3: reported WWPN
+ * Description:
+ * A remote port was opened successfully, but it reported an
+ * unexpected WWPN in the returned port login (PLOGI) data. This
+ * condition might have been caused by a change applied to the SAN
+ * configuration while the port was being opened.
+ * User action:
+ * If this condition is only temporary and access to the remote port
+ * is possible, no action is required. If the condition persists,
+ * identify the storage system with the specified WWPN and contact the
+ * support organization of the storage system.
+ */
1024 - A module from drivers/staging was loaded.
2048 - The system is working around a severe firmware bug.
4096 - An out-of-tree module has been loaded.
+ 0x40000000 - An unsupported kernel module was loaded.
+ 0x80000000 - An kernel module with external support was loaded.
+
+==============================================================
+
+unsupported:
+
+Allow to load unsupported kernel modules:
+
+ 0 - refuse to load unsupported modules,
+ 1 - warn when loading unsupported modules,
+ 2 - don't warn.
==============================================================
KBUILD_CHECKSRC = 0
endif
+# Call message checker as part of the C compilation
+#
+# Use 'make D=1' to enable checking
+# Use 'make D=2' to create the message catalog
+
+ifdef D
+ ifeq ("$(origin D)", "command line")
+ KBUILD_KMSG_CHECK = $(D)
+ endif
+endif
+ifndef KBUILD_KMSG_CHECK
+ KBUILD_KMSG_CHECK = 0
+endif
+
# Use make M=dir to specify directory of external module to build
# Old syntax make ... SUBDIRS=$PWD is still supported
# Setting the environment variable KBUILD_EXTMOD take precedence
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void $(CF)
+KMSG_CHECK = $(srctree)/scripts/kmsg-doc
CFLAGS_MODULE =
AFLAGS_MODULE =
LDFLAGS_MODULE =
KBUILD_CFLAGS_MODULE := -DMODULE
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+# Warn about unsupported modules in kernels built inside Autobuild
+ifneq ($(wildcard /.buildenv),)
+CFLAGS += -DUNSUPPORTED_MODULES=2
+endif
+
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
export KBUILD_ARFLAGS
+export KBUILD_KMSG_CHECK KMSG_CHECK
# When compiling out-of-tree modules, put MODVERDIR in the module
# tree rather than in the kernel tree. The kernel tree might
endif
endif
+ifdef CONFIG_UNWIND_INFO
+KBUILD_CFLAGS += -fasynchronous-unwind-tables
+LDFLAGS_vmlinux += --eh-frame-hdr
+endif
+
ifdef CONFIG_DEBUG_INFO
KBUILD_CFLAGS += -g
KBUILD_AFLAGS += -gdwarf-2
# ---------------------------------------------------------------------------
# Firmware install
-INSTALL_FW_PATH=$(INSTALL_MOD_PATH)/lib/firmware
+INSTALL_FW_PATH=$(INSTALL_MOD_PATH)/lib/firmware/$(KERNELRELEASE)
export INSTALL_FW_PATH
PHONY += firmware_install
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+#ifndef CONFIG_ARM_PATCH_PHYS_VIRT
+#ifndef PHYS_OFFSET
+#ifdef PLAT_PHYS_OFFSET
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
+#else
+#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
+#endif
+#endif
+#endif
+
#ifndef __ASSEMBLY__
/*
#endif
#endif
-#ifndef PHYS_OFFSET
-#ifdef PLAT_PHYS_OFFSET
-#define PHYS_OFFSET PLAT_PHYS_OFFSET
-#else
-#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-#endif
-#endif
-
/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
config SGI_SN
def_bool y if (IA64_SGI_SN2 || IA64_GENERIC)
+ select HAVE_UNSTABLE_SCHED_CLOCK
config IA64_ESI
bool "ESI (Extensible SAL Interface) support"
/* Default baud base if not found in device-tree */
#define BASE_BAUD ( 1843200 / 16 )
+#if defined(SUPPORT_SYSRQ) && defined(CONFIG_PPC_PSERIES)
+#undef arch_8250_sysrq_via_ctrl_o
+extern int do_sysrq_via_ctrl_o;
+#define arch_8250_sysrq_via_ctrl_o(ch, port) ((ch) == '\x0f' && do_sysrq_via_ctrl_o && uart_handle_break((port)))
+#endif
+
#ifdef CONFIG_PPC_UDBG_16550
extern void find_legacy_serial_ports(void);
#else
#ifdef CONFIG_SERIAL_8250_CONSOLE
+#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SERIAL_8250_CONSOLE)
+/*
+ * Handle the SysRq ^O Hack also via ttyS0 on POWER4 systems
+ * but only on the system console, see asm/serial.h
+ * If they run in FullSystemPartition mode, the firmware console comes in via ttyS0
+ * But BREAK does not work via the HMC, to trigger sysrq.
+ * The same is required for Cell blades
+ */
+int do_sysrq_via_ctrl_o;
+static const char __initdata *need_ctrl_o[] = {
+ "IBM,079", /* QS2x */
+ "IBM,0792-32G", /* QS21 */
+ "IBM,0793-2RZ", /* QS22 */
+ "IBM,7040-681", /* p690 */
+ "IBM,7040-671", /* p670 */
+ "IBM,7039-651", /* p655 */
+ "IBM,7038-6M2", /* p650 */
+ "IBM,7028-6E4", /* p630 tower */
+ "IBM,7028-6C4", /* p630 rack */
+ "IBM,7029-6E3", /* p615 tower */
+ "IBM,7029-6C3", /* p615 rack */
+ NULL
+};
+static void __init detect_need_for_ctrl_o(void)
+{
+ struct device_node *root;
+ const char *model, *p;
+ int i;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return;
+ model = of_get_property(root, "model", NULL);
+ if (model) {
+ i = 0;
+ while (need_ctrl_o[i]) {
+ p = need_ctrl_o[i];
+ if (strncmp(p, model, strlen(p)) == 0) {
+ do_sysrq_via_ctrl_o = 1;
+ DBG("Enable sysrq via CTRL o on model %s\n", model);
+ break;
+ }
+ i++;
+ }
+ }
+ of_node_put(root);
+}
+#endif
+
/*
* This is called very early, as part of console_init() (typically just after
* time_init()). This function is respondible for trying to find a good
if (i >= legacy_serial_count)
goto not_found;
+#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SERIAL_8250_CONSOLE)
+ detect_need_for_ctrl_o();
+#endif
of_node_put(prom_stdout);
DBG("Found serial console at ttyS%d\n", offset);
struct mcontext32 uc_mcontext;
};
+extern int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s);
+
#endif /* _PPC64_PPC32_H */
static unsigned long __initdata prom_initrd_start, prom_initrd_end;
+static int __initdata prom_no_display;
#ifdef CONFIG_PPC64
static int __initdata prom_iommu_force_on;
static int __initdata prom_iommu_off;
#endif /* CONFIG_CMDLINE */
prom_printf("command line: %s\n", RELOC(prom_cmd_line));
+ opt = strstr(RELOC(prom_cmd_line), RELOC("prom="));
+ if (opt) {
+ opt += 5;
+ while (*opt && *opt == ' ')
+ opt++;
+ if (!strncmp(opt, RELOC("nodisplay"), 9))
+ RELOC(prom_no_display) = 1;
+ }
#ifdef CONFIG_PPC64
opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
if (opt) {
/*
* Initialize display devices
*/
+ if (RELOC(prom_no_display) == 0)
prom_check_displays();
#ifdef CONFIG_PPC64
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/compat.h>
+#include <linux/elf.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/switch_to.h>
+#include "ppc32.h"
+
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
#define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i)
#define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0))
+static int compat_ptrace_getsiginfo(struct task_struct *child, compat_siginfo_t __user *data)
+{
+ siginfo_t lastinfo;
+ int error = -ESRCH;
+
+ read_lock(&tasklist_lock);
+ if (likely(child->sighand != NULL)) {
+ error = -EINVAL;
+ spin_lock_irq(&child->sighand->siglock);
+ if (likely(child->last_siginfo != NULL)) {
+ lastinfo = *child->last_siginfo;
+ error = 0;
+ }
+ spin_unlock_irq(&child->sighand->siglock);
+ }
+ read_unlock(&tasklist_lock);
+ if (!error)
+ return copy_siginfo_to_user32(data, &lastinfo);
+ return error;
+}
+
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
0, PT_REGS_COUNT * sizeof(compat_long_t),
compat_ptr(data));
+ case PTRACE_GETSIGINFO:
+ return compat_ptrace_getsiginfo(child, compat_ptr(data));
+
case PTRACE_GETFPREGS:
case PTRACE_SETFPREGS:
case PTRACE_GETVRREGS:
if (!property)
goto out_put;
if (!strcmp(property, "failsafe") || !strcmp(property, "serial"))
- add_preferred_console("ttyS", 0, NULL);
+ add_preferred_console("ttyS", 0, "115200");
out_put:
of_node_put(node);
}
static int __init pSeries_init_panel(void)
{
/* Manually leave the kernel version on the panel. */
- ppc_md.progress("Linux ppc64\n", 0);
+ ppc_md.progress("SUSE Linux\n", 0);
ppc_md.progress(init_utsname()->version, 0);
return 0;
static int do_step(struct pt_regs *);
static void bpt_cmds(void);
static void cacheflush(void);
+static void xmon_show_dmesg(void);
static int cpu_cmd(void);
static void csum(void);
static void bootcmds(void);
#endif
"\
C checksum\n\
+ D show dmesg (printk) buffer\n\
d dump bytes\n\
di dump instructions\n\
df dump float values\n\
case 'd':
dump();
break;
+ case 'D':
+ xmon_show_dmesg();
+ break;
case 'l':
symbol_lookup();
break;
printf("%s", after);
}
+extern void kdb_syslog_data(char *syslog_data[]);
+#define SYSLOG_WRAP(p) if (p < syslog_data[0]) p = syslog_data[1]-1; \
+ else if (p >= syslog_data[1]) p = syslog_data[0];
+
+static void xmon_show_dmesg(void)
+{
+ char *syslog_data[4], *start, *end, c;
+ int logsize;
+
+ /* syslog_data[0,1] physical start, end+1.
+ * syslog_data[2,3] logical start, end+1.
+ */
+ kdb_syslog_data(syslog_data);
+ if (syslog_data[2] == syslog_data[3])
+ return;
+ logsize = syslog_data[1] - syslog_data[0];
+ start = syslog_data[0] + (syslog_data[2] - syslog_data[0]) % logsize;
+ end = syslog_data[0] + (syslog_data[3] - syslog_data[0]) % logsize;
+
+ /* Do a line at a time (max 200 chars) to reduce overhead */
+ c = '\0';
+ while(1) {
+ char *p;
+ int chars = 0;
+ if (!*start) {
+ while (!*start) {
+ ++start;
+ SYSLOG_WRAP(start);
+ if (start == end)
+ break;
+ }
+ if (start == end)
+ break;
+ }
+ p = start;
+ while (*start && chars < 200) {
+ c = *start;
+ ++chars;
+ ++start;
+ SYSLOG_WRAP(start);
+ if (start == end || c == '\n')
+ break;
+ }
+ if (chars)
+ printf("%.*s", chars, p);
+ if (start == end)
+ break;
+ }
+ if (c != '\n')
+ printf("\n");
+}
+
#ifdef CONFIG_PPC_BOOK3S_64
static void dump_slb(void)
{
virtio transport. If KVM is detected, the virtio console will be
the default console.
+config KMSG_IDS
+ bool "Kernel message numbers"
+ default y
+ help
+ Select this option if you want to include a message number to the
+ prefix for kernel messages issued by the s390 architecture and
+ driver code. See "Documentation/s390/kmsg.txt" for more details.
+
config SECCOMP
def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode"
boot := arch/s390/boot
-all: image bzImage
+all: image bzImage kerntypes.o
install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
-image bzImage: vmlinux
+image bzImage kerntypes.o: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zfcpdump:
# Makefile for the linux s390-specific parts of the memory manager.
#
-COMPILE_VERSION := __linux_compile_version_id__`hostname | \
- tr -c '[0-9A-Za-z]' '_'`__`date | \
- tr -c '[0-9A-Za-z]' '_'`_t
+COMPILE_VERSION := __linux_compile_version_id__$(shell hostname | \
+ tr -c '[0-9A-Za-z]' '_')__$(shell date | \
+ tr -c '[0-9A-Za-z]' '_')_t
+
+chk-option = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \
+ > /dev/null 2>&1; then echo "$(1)"; fi ;)
+
+# Remove possible '-g' from CFLAGS_KERNEL, since we want to use stabs
+# debug format.
+override CFLAGS_KERNEL := $(shell echo $(CFLAGS_KERNEL) | sed 's/-g//')
ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
+# Assume we don't need the flag if the compiler doesn't know about it
+ccflags-y += $(call chk-option,-fno-eliminate-unused-debug-types)
+
targets := image
targets += bzImage
subdir- := compressed
+targets += kerntypes.o
$(obj)/image: vmlinux FORCE
$(call if_changed,objcopy)
--- /dev/null
+/*
+ * kerntypes.c
+ *
+ * Dummy module that includes headers for all kernel types of interest.
+ * The kernel type information is used by the lcrash utility when
+ * analyzing system crash dumps or the live system. Using the type
+ * information for the running system, rather than kernel header files,
+ * makes for a more flexible and robust analysis tool.
+ *
+ * This source code is released under the GNU GPL.
+ */
+
+/* generate version for this file */
+typedef char *COMPILE_VERSION;
+
+/* General linux types */
+
+#include <generated/compile.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#ifdef CONFIG_SLUB
+ #include <linux/slub_def.h>
+#endif
+#ifdef CONFIG_SLAB
+ #include <linux/slab_def.h>
+#endif
+#ifdef CONFIG_SLQB
+ #include <linux/slqb_def.h>
+#endif
+#include <linux/bio.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bitrev.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/bootmem.h>
+#include <linux/buffer_head.h>
+#include <linux/cache.h>
+#include <linux/cdev.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpuset.h>
+#include <linux/dcache.h>
+#include <linux/debugfs.h>
+#include <linux/elevator.h>
+#include <linux/fd.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/futex.h>
+#include <linux/genhd.h>
+#include <linux/highmem.h>
+#include <linux/if.h>
+#include <linux/if_addr.h>
+#include <linux/if_arp.h>
+#include <linux/if_bonding.h>
+#include <linux/if_ether.h>
+#include <linux/if_tr.h>
+#include <linux/if_tun.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/in_route.h>
+#include <linux/inet.h>
+#include <linux/inet_diag.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/inotify.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/ip.h>
+#include <linux/ipsec.h>
+#include <linux/ipv6.h>
+#include <linux/ipv6_route.h>
+#include <linux/interrupt.h>
+#include <linux/irqflags.h>
+#include <linux/irqreturn.h>
+#include <linux/jbd2.h>
+#include <linux/jffs2.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/kexec.h>
+#include <linux/kobject.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/memory.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
+#include <linux/mm_inline.h>
+#include <linux/mm_types.h>
+#include <linux/mman.h>
+#include <linux/mmtimer.h>
+#include <linux/mmzone.h>
+#include <linux/mnt_namespace.h>
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/moduleparam.h>
+#include <linux/mount.h>
+#include <linux/mpage.h>
+#include <linux/mqueue.h>
+#include <linux/mtio.h>
+#include <linux/mutex.h>
+#include <linux/namei.h>
+#include <linux/neighbour.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_decnet.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netlink.h>
+#include <linux/netpoll.h>
+#include <linux/pagemap.h>
+#include <linux/param.h>
+#include <linux/percpu.h>
+#include <linux/percpu_counter.h>
+#include <linux/pfn.h>
+#include <linux/pid.h>
+#include <linux/pid_namespace.h>
+#include <linux/poll.h>
+#include <linux/posix-timers.h>
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/posix_types.h>
+#include <linux/preempt.h>
+#include <linux/prio_tree.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/ptrace.h>
+#include <linux/radix-tree.h>
+#include <linux/ramfs.h>
+#include <linux/raw.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+#include <linux/reboot.h>
+#include <linux/relay.h>
+#include <linux/resource.h>
+#include <linux/romfs_fs.h>
+#include <linux/root_dev.h>
+#include <linux/route.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/sem.h>
+#include <linux/seq_file.h>
+#include <linux/seqlock.h>
+#include <linux/shm.h>
+#include <linux/shmem_fs.h>
+#include <linux/signal.h>
+#include <linux/signalfd.h>
+#include <linux/skbuff.h>
+#include <linux/smp.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/statfs.h>
+#include <linux/stddef.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/sys.h>
+#include <linux/syscalls.h>
+#include <linux/sysctl.h>
+#include <linux/sysdev.h>
+#include <linux/sysfs.h>
+#include <linux/sysrq.h>
+#include <linux/tc.h>
+#include <linux/tcp.h>
+#include <linux/thread_info.h>
+#include <linux/threads.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/timerfd.h>
+#include <linux/times.h>
+#include <linux/timex.h>
+#include <linux/topology.h>
+#include <linux/transport_class.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/tty_ldisc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+#include <linux/utime.h>
+#include <linux/uts.h>
+#include <linux/utsname.h>
+#include <generated/utsrelease.h>
+#include <linux/version.h>
+#include <linux/vfs.h>
+#include <linux/vmalloc.h>
+#include <linux/vmstat.h>
+#include <linux/wait.h>
+#include <linux/watchdog.h>
+#include <linux/workqueue.h>
+#include <linux/zconf.h>
+#include <linux/zlib.h>
+
+/*
+ * s390 specific includes
+ */
+
+#include <asm/lowcore.h>
+#include <asm/debug.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <asm/qdio.h>
+#include <asm/zcrypt.h>
+#include <asm/etr.h>
+#include <asm/ipl.h>
+#include <asm/setup.h>
+#include <asm/schid.h>
+#include <asm/chsc.h>
+
+/* channel subsystem driver */
+#include "drivers/s390/cio/cio.h"
+#include "drivers/s390/cio/chsc.h"
+#include "drivers/s390/cio/css.h"
+#include "drivers/s390/cio/device.h"
+#include "drivers/s390/cio/chsc_sch.h"
+
+/* dasd device driver */
+#include "drivers/s390/block/dasd_int.h"
+#include "drivers/s390/block/dasd_diag.h"
+#include "drivers/s390/block/dasd_eckd.h"
+#include "drivers/s390/block/dasd_fba.h"
+
+/* networking drivers */
+#include "include/net/iucv/iucv.h"
+#include "drivers/s390/net/fsm.h"
+#include "drivers/s390/net/ctcm_main.h"
+#include "drivers/s390/net/ctcm_fsms.h"
+#include "drivers/s390/net/lcs.h"
+#include "drivers/s390/net/qeth_core.h"
+#include "drivers/s390/net/qeth_core_mpc.h"
+#include "drivers/s390/net/qeth_l3.h"
+
+/* zfcp device driver */
+#include "drivers/s390/scsi/zfcp_def.h"
+#include "drivers/s390/scsi/zfcp_fsf.h"
+
+/* crypto device driver */
+#include "drivers/s390/crypto/ap_bus.h"
+#include "drivers/s390/crypto/zcrypt_api.h"
+#include "drivers/s390/crypto/zcrypt_cca_key.h"
+#include "drivers/s390/crypto/zcrypt_pcica.h"
+#include "drivers/s390/crypto/zcrypt_pcicc.h"
+#include "drivers/s390/crypto/zcrypt_pcixcc.h"
+#include "drivers/s390/crypto/zcrypt_cex2a.h"
+
+/* sclp device driver */
+#include "drivers/s390/char/sclp.h"
+#include "drivers/s390/char/sclp_rw.h"
+#include "drivers/s390/char/sclp_tty.h"
+
+/* vmur device driver */
+#include "drivers/s390/char/vmur.h"
+
+/* qdio device driver */
+#include "drivers/s390/cio/qdio.h"
+#include "drivers/s390/cio/qdio_thinint.c"
+
+
+/* KVM */
+#include "include/linux/kvm.h"
+#include "include/linux/kvm_host.h"
+#include "include/linux/kvm_para.h"
+
+/* Virtio */
+#include "include/linux/virtio.h"
+#include "include/linux/virtio_config.h"
+#include "include/linux/virtio_ring.h"
+#include "include/linux/virtio_9p.h"
+#include "include/linux/virtio_console.h"
+#include "include/linux/virtio_rng.h"
+#include "include/linux/virtio_balloon.h"
+#include "include/linux/virtio_net.h"
+#include "include/linux/virtio_blk.h"
+
+/*
+ * include sched.c for types:
+ * - struct prio_array
+ * - struct runqueue
+ */
+#include "kernel/sched.c"
+/*
+ * include slab.c for struct kmem_cache
+ */
+#ifdef CONFIG_SLUB
+ #include "mm/slub.c"
+#endif
+#ifdef CONFIG_SLAB
+ #include "mm/slab.c"
+#endif
+#ifdef CONFIG_SLQB
+ #include "mm/slqb.c"
+#endif
+
+/* include driver core private structures */
+#include "drivers/base/base.h"
config SCHED_OMIT_FRAME_POINTER
def_bool y
prompt "Single-depth WCHAN output"
- depends on X86
+ depends on X86 && !STACK_UNWIND
---help---
Calculate simpler /proc/<PID>/wchan values. If this option
is disabled then wchan values will recurse back to the
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
+ifneq ($(CONFIG_UNWIND_INFO),y)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+endif
# prevent gcc from generating any FP code by mistake
KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
#define CFI_SIGNAL_FRAME
#endif
-#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
+#if !defined(CONFIG_UNWIND_INFO) && defined(CONFIG_AS_CFI_SECTIONS) \
+ && defined(__ASSEMBLY__)
/*
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* The latter we currently just discard since we don't do DWARF
* Due to the structure of pre-exisiting code, don't use assembler line
* comment character # to ignore the arguments. Instead, use a dummy macro.
*/
-.macro cfi_ignore a=0, b=0, c=0, d=0
+.macro cfi_ignore a=0, b=0, c=0, d=0, e=0, f=0, g=0, h=0
.endm
#define CFI_STARTPROC cfi_ignore
/* Generic stack tracer with callbacks */
struct stacktrace_ops {
+ void (*warning)(void *data, char *msg);
+ /* msg must contain %s for the symbol */
+ void (*warning_symbol)(void *data, char *msg, unsigned long symbol);
void (*address)(void *data, unsigned long address, int reliable);
/* On negative return stop dumping */
int (*stack)(void *data, char *name);
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *sp, unsigned long bp, char *log_lvl);
+int try_stack_unwind(struct task_struct *task, struct pt_regs *regs,
+ unsigned long **stack, unsigned long *bp,
+ const struct stacktrace_ops *ops, void *data);
+
extern unsigned int code_bytes;
/* The form of the top of the frame on the stack */
#define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */
+/* The stack unwind code needs this but it pollutes traces otherwise */
+#ifdef CONFIG_UNWIND_INFO
+#define THREAD_RETURN_SYM \
+ ".globl thread_return\n" \
+ "thread_return:\n\t"
+#else
+#define THREAD_RETURN_SYM
+#endif
+
/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
"movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
"call __switch_to\n\t" \
+ THREAD_RETURN_SYM \
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
__switch_canary \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
--- /dev/null
+#ifndef _ASM_X86_UNWIND_H
+#define _ASM_X86_UNWIND_H
+
+/*
+ * Copyright (C) 2002-2009 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ * This code is released under version 2 of the GNU GPL.
+ */
+
+#ifdef CONFIG_STACK_UNWIND
+
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+
+struct unwind_frame_info
+{
+ struct pt_regs regs;
+ struct task_struct *task;
+ unsigned call_frame:1;
+};
+
+#define UNW_PC(frame) (frame)->regs.ip
+#define UNW_SP(frame) (frame)->regs.sp
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame) (frame)->regs.bp
+#define FRAME_LINK_OFFSET 0
+#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.sp0)
+#define TSK_STACK_TOP(tsk) ((tsk)->thread.sp0)
+#else
+#define UNW_FP(frame) ((void)(frame), 0UL)
+#endif
+/* On x86-64, might need to account for the special exception and interrupt
+ handling stacks here, since normally
+ EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER,
+ but the construct is needed only for getting across the stack switch to
+ the interrupt stack - thus considering the IRQ stack itself is unnecessary,
+ and the overhead of comparing against all exception handling stacks seems
+ not desirable. */
+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#ifdef CONFIG_X86_64
+
+#include <asm/vsyscall.h>
+
+#define FRAME_RETADDR_OFFSET 8
+
+#define UNW_REGISTER_INFO \
+ PTREGS_INFO(ax), \
+ PTREGS_INFO(dx), \
+ PTREGS_INFO(cx), \
+ PTREGS_INFO(bx), \
+ PTREGS_INFO(si), \
+ PTREGS_INFO(di), \
+ PTREGS_INFO(bp), \
+ PTREGS_INFO(sp), \
+ PTREGS_INFO(r8), \
+ PTREGS_INFO(r9), \
+ PTREGS_INFO(r10), \
+ PTREGS_INFO(r11), \
+ PTREGS_INFO(r12), \
+ PTREGS_INFO(r13), \
+ PTREGS_INFO(r14), \
+ PTREGS_INFO(r15), \
+ PTREGS_INFO(ip)
+
+#else /* X86_32 */
+
+#include <asm/fixmap.h>
+
+#define FRAME_RETADDR_OFFSET 4
+
+#define UNW_REGISTER_INFO \
+ PTREGS_INFO(ax), \
+ PTREGS_INFO(cx), \
+ PTREGS_INFO(dx), \
+ PTREGS_INFO(bx), \
+ PTREGS_INFO(sp), \
+ PTREGS_INFO(bp), \
+ PTREGS_INFO(si), \
+ PTREGS_INFO(di), \
+ PTREGS_INFO(ip)
+
+#endif
+
+#define UNW_DEFAULT_RA(raItem, dataAlign) \
+ ((raItem).where == Memory && \
+ !((raItem).value * (dataAlign) + sizeof(void *)))
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+ /*const*/ struct pt_regs *regs)
+{
+#ifdef CONFIG_X86_64
+ info->regs = *regs;
+#else
+ if (user_mode_vm(regs))
+ info->regs = *regs;
+ else {
+ memcpy(&info->regs, regs, offsetof(struct pt_regs, sp));
+ info->regs.sp = (unsigned long)®s->sp;
+ info->regs.ss = __KERNEL_DS;
+ }
+#endif
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+#ifdef CONFIG_X86_64
+ extern const char thread_return[];
+
+ memset(&info->regs, 0, sizeof(info->regs));
+ info->regs.ip = (unsigned long)thread_return;
+ info->regs.cs = __KERNEL_CS;
+ probe_kernel_address(info->task->thread.sp, info->regs.bp);
+ info->regs.sp = info->task->thread.sp;
+ info->regs.ss = __KERNEL_DS;
+#else
+ memset(&info->regs, 0, sizeof(info->regs));
+ info->regs.ip = info->task->thread.ip;
+ info->regs.cs = __KERNEL_CS;
+ probe_kernel_address(info->task->thread.sp, info->regs.bp);
+ info->regs.sp = info->task->thread.sp;
+ info->regs.ss = __KERNEL_DS;
+ info->regs.ds = __USER_DS;
+ info->regs.es = __USER_DS;
+#endif
+}
+
+extern asmlinkage int
+arch_unwind_init_running(struct unwind_frame_info *,
+ unwind_callback_fn,
+ const struct stacktrace_ops *, void *data);
+
+static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info)
+{
+#ifdef CONFIG_X86_64
+ return user_mode(&info->regs)
+ || (long)info->regs.ip >= 0
+ || (info->regs.ip >= VSYSCALL_START && info->regs.ip < VSYSCALL_END)
+ || (long)info->regs.sp >= 0;
+#else
+ return user_mode_vm(&info->regs)
+ || info->regs.ip < PAGE_OFFSET
+ || (info->regs.ip >= __fix_to_virt(FIX_VDSO)
+ && info->regs.ip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
+ || info->regs.sp < PAGE_OFFSET;
+#endif
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0UL)
+#define UNW_SP(frame) ((void)(frame), 0UL)
+#define UNW_FP(frame) ((void)(frame), 0UL)
+
+static inline int arch_unw_user_mode(const void *info)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _ASM_X86_UNWIND_H */
return 0;
}
+static int __init force_acpi_rsdt(const struct dmi_system_id *d)
+{
+ if (!acpi_force) {
+ printk(KERN_NOTICE "%s detected: force use of acpi=rsdt\n",
+ d->ident);
+ acpi_rsdt_forced = 1;
+ } else {
+ printk(KERN_NOTICE
+ "Warning: acpi=force overrules DMI blacklist: "
+ "acpi=rsdt\n");
+ }
+ return 0;
+
+}
+
/*
* If your system is blacklisted here, but you find that acpi=force
* works for you, please contact linux-acpi@vger.kernel.org
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
},
},
+
+ /*
+ * Boxes that need RSDT as ACPI root table
+ */
+ {
+ .callback = force_acpi_rsdt,
+ .ident = "ThinkPad ", /* R40e, broken C-states */
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION, "1SET")},
+ },
+ {
+ .callback = force_acpi_rsdt,
+ .ident = "ThinkPad ", /* R50e, slow booting */
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION, "1WET")},
+ },
+ {
+ .callback = force_acpi_rsdt,
+ .ident = "ThinkPad ", /* T40, T40p, T41, T41p, T42, T42p
+ R50, R50p */
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION, "1RET")},
+ },
{}
};
}
early_param("acpi", parse_acpi);
+/* Alias for acpi=rsdt for compatibility with openSUSE 11.1 and SLE11 */
+static int __init parse_acpi_root_table(char *opt)
+{
+ if (!strcmp(opt, "rsdt")) {
+ acpi_rsdt_forced = 1;
+ printk(KERN_WARNING "acpi_root_table=rsdt is deprecated. "
+ "Please use acpi=rsdt instead.\n");
+ }
+ return 0;
+}
+early_param("acpi_root_table", parse_acpi_root_table);
+
/* FIXME: Using pci= for an ACPI parameter is a travesty. */
static int __init parse_pci(char *arg)
{
static int dmi_bigsmp; /* can be set by dmi scanners */
-static int hp_ht_bigsmp(const struct dmi_system_id *d)
+static int force_bigsmp_apic(const struct dmi_system_id *d)
{
printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
dmi_bigsmp = 1;
static const struct dmi_system_id bigsmp_dmi_table[] = {
- { hp_ht_bigsmp, "HP ProLiant DL760 G2",
+ { force_bigsmp_apic, "HP ProLiant DL760 G2",
{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
}
},
- { hp_ht_bigsmp, "HP ProLiant DL740",
+ { force_bigsmp_apic, "HP ProLiant DL740",
{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
}
},
+
+ { force_bigsmp_apic, "IBM x260 / x366 / x460",
+ { DMI_MATCH(DMI_BIOS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION, "-[ZT"),
+ }
+ },
+
+ { force_bigsmp_apic, "IBM x3800 / x3850 / x3950",
+ { DMI_MATCH(DMI_BIOS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION, "-[ZU"),
+ }
+ },
+
+ { force_bigsmp_apic, "IBM x3800 / x3850 / x3950",
+ { DMI_MATCH(DMI_BIOS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION, "-[ZS"),
+ }
+ },
+
+ { force_bigsmp_apic, "IBM x3850 M2 / x3950 M2",
+ { DMI_MATCH(DMI_BIOS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION, "-[A3"),
+ }
+ },
{ } /* NULL entry stops DMI scanning */
};
if (!(*drv)->mps_oem_check(mpc, oem, productid))
continue;
- if (!cmdline_apic) {
+ if (!cmdline_apic && apic == &apic_default) {
apic = *drv;
printk(KERN_INFO "Switched to APIC driver `%s'.\n",
apic->name);
if (!(*drv)->acpi_madt_oem_check(oem_id, oem_table_id))
continue;
- if (!cmdline_apic) {
+ if (!cmdline_apic && apic == &apic_default) {
apic = *drv;
printk(KERN_INFO "Switched to APIC driver `%s'.\n",
apic->name);
unsigned long offset;
unsigned short segment;
} apm_bios_entry;
+#ifdef CONFIG_APM_CPU_IDLE
static int clock_slowed;
static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
static int set_pm_idle;
+#endif
static int suspends_pending;
static int standbys_pending;
static int ignore_sys_suspend;
return set_power_state(APM_DEVICE_ALL, state);
}
+#ifdef CONFIG_APM_CPU_IDLE
/**
* apm_do_idle - perform power saving
*
local_irq_enable();
}
+#endif
/**
* apm_power_off - ask the BIOS to power off
if ((strncmp(str, "bounce-interval=", 16) == 0) ||
(strncmp(str, "bounce_interval=", 16) == 0))
bounce_interval = simple_strtol(str + 16, NULL, 0);
+#ifdef CONFIG_APM_CPU_IDLE
if ((strncmp(str, "idle-threshold=", 15) == 0) ||
(strncmp(str, "idle_threshold=", 15) == 0))
idle_threshold = simple_strtol(str + 15, NULL, 0);
if ((strncmp(str, "idle-period=", 12) == 0) ||
(strncmp(str, "idle_period=", 12) == 0))
idle_period = simple_strtol(str + 12, NULL, 0);
+#endif
invert = (strncmp(str, "no-", 3) == 0) ||
(strncmp(str, "no_", 3) == 0);
if (invert)
if (misc_register(&apm_device))
printk(KERN_WARNING "apm: Could not register misc device.\n");
+#ifdef CONFIG_APM_CPU_IDLE
if (HZ != 100)
idle_period = (idle_period * HZ) / 100;
if (idle_threshold < 100) {
pm_idle = apm_cpu_idle;
set_pm_idle = 1;
}
+#endif
return 0;
}
{
int error;
+#ifdef CONFIG_APM_CPU_IDLE
if (set_pm_idle) {
pm_idle = original_pm_idle;
/*
*/
cpu_idle_wait();
}
+#endif
if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
&& (apm_info.connection_version > 0x0100)) {
error = apm_engage_power_management(APM_DEVICE_ALL, 0);
module_param(realmode_power_off, bool, 0444);
MODULE_PARM_DESC(realmode_power_off,
"Switch to real mode before powering off");
+#ifdef CONFIG_APM_CPU_IDLE
module_param(idle_threshold, int, 0444);
MODULE_PARM_DESC(idle_threshold,
"System idle percentage above which to make APM BIOS idle calls");
module_param(idle_period, int, 0444);
MODULE_PARM_DESC(idle_period,
"Period (in sec/100) over which to caculate the idle percentage");
+#endif
module_param(smp, bool, 0444);
MODULE_PARM_DESC(smp,
"Set this to enable APM use on an SMP platform. Use with caution on older systems");
* callchain support
*/
+static void
+backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+ /* Ignore warnings */
+}
+
+static void backtrace_warning(void *data, char *msg)
+{
+ /* Ignore warnings */
+}
+
static int backtrace_stack(void *data, char *name)
{
return 0;
}
static const struct stacktrace_ops backtrace_ops = {
+ .warning = backtrace_warning,
+ .warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
.address = backtrace_address,
.walk_stack = print_context_stack_bp,
#include <linux/sysfs.h>
#include <asm/stacktrace.h>
+#include <linux/unwind.h>
int panic_on_unrecovered_nmi;
int panic_on_io_nmi;
unsigned int code_bytes = 64;
int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
+#ifdef CONFIG_STACK_UNWIND
+static int call_trace = 1;
+#else
+#define call_trace (-1)
+#endif
static int die_counter;
void printk_address(unsigned long address, int reliable)
{ }
#endif
+int asmlinkage dump_trace_unwind(struct unwind_frame_info *info,
+ const struct stacktrace_ops *ops, void *data)
+{
+ int n = 0;
+#ifdef CONFIG_STACK_UNWIND
+ unsigned long sp = UNW_SP(info);
+
+ if (arch_unw_user_mode(info))
+ return -1;
+ while (unwind(info) == 0 && UNW_PC(info)) {
+ n++;
+ ops->address(data, UNW_PC(info), 1);
+ if (arch_unw_user_mode(info))
+ break;
+ if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
+ && sp > UNW_SP(info))
+ break;
+ sp = UNW_SP(info);
+ }
+#endif
+ return n;
+}
+
+int try_stack_unwind(struct task_struct *task, struct pt_regs *regs,
+ unsigned long **stack, unsigned long *bp,
+ const struct stacktrace_ops *ops, void *data)
+{
+#ifdef CONFIG_STACK_UNWIND
+ int unw_ret = 0;
+ struct unwind_frame_info info;
+ if (call_trace < 0)
+ return 0;
+
+ if (regs) {
+ if (unwind_init_frame_info(&info, task, regs) == 0)
+ unw_ret = dump_trace_unwind(&info, ops, data);
+ } else if (task == current)
+ unw_ret = unwind_init_running(&info, dump_trace_unwind, ops, data);
+#ifdef CONFIG_SMP
+ else if (task->on_cpu)
+ /* nothing */;
+#endif
+ else if (unwind_init_blocked(&info, task) == 0)
+ unw_ret = dump_trace_unwind(&info, ops, data);
+ if (unw_ret > 0) {
+ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
+ ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+ UNW_PC(&info));
+ if (UNW_SP(&info) >= PAGE_OFFSET) {
+ ops->warning(data, "Leftover inexact backtrace:\n");
+ *stack = (void *)UNW_SP(&info);
+ *bp = UNW_FP(&info);
+ return 0;
+ }
+ } else if (call_trace >= 1)
+ return -1;
+ ops->warning(data, "Full inexact backtrace again:\n");
+ } else
+ ops->warning(data, "Inexact backtrace:\n");
+#endif
+ return 0;
+}
+
/*
* x86-64 can have up to three kernel stacks:
* process stack
}
EXPORT_SYMBOL_GPL(print_context_stack_bp);
+
+static void
+print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+ printk(data);
+ print_symbol(msg, symbol);
+ printk("\n");
+}
+
+static void print_trace_warning(void *data, char *msg)
+{
+ printk("%s%s\n", (char *)data, msg);
+}
+
static int print_trace_stack(void *data, char *name)
{
printk("%s <%s> ", (char *)data, name);
}
static const struct stacktrace_ops print_trace_ops = {
+ .warning = print_trace_warning,
+ .warning_symbol = print_trace_warning_symbol,
.stack = print_trace_stack,
.address = print_trace_address,
.walk_stack = print_context_stack,
return 1;
}
__setup("code_bytes=", code_bytes_setup);
+
+#ifdef CONFIG_STACK_UNWIND
+static int __init call_trace_setup(char *s)
+{
+ if (!s)
+ return -EINVAL;
+ if (strcmp(s, "old") == 0)
+ call_trace = -1;
+ else if (strcmp(s, "both") == 0)
+ call_trace = 0;
+ else if (strcmp(s, "newfallback") == 0)
+ call_trace = 1;
+ else if (strcmp(s, "new") == 0)
+ call_trace = 2;
+ return 0;
+}
+early_param("call_trace", call_trace_setup);
+#endif
if (!task)
task = current;
+ bp = stack_frame(task, regs);
+ if (try_stack_unwind(task, regs, &stack, &bp, ops, data))
+ return;
+
if (!stack) {
unsigned long dummy;
#include <linux/bug.h>
#include <linux/nmi.h>
+#include <linux/unwind.h>
#include <asm/stacktrace.h>
if (!task)
task = current;
+ bp = stack_frame(task, regs);
+ if (try_stack_unwind(task, regs, &stack, &bp, ops, data)) {
+ put_cpu();
+ return;
+ }
+
if (!stack) {
if (regs)
stack = (unsigned long *)regs->sp;
* pcibios_resource_survey()
*/
if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) {
- res->flags |= IORESOURCE_BUSY;
+ if (e820.map[i].type != E820_NVS)
+ res->flags |= IORESOURCE_BUSY;
insert_resource(&iomem_resource, res);
}
res++;
*/
.popsection
+#ifdef CONFIG_STACK_UNWIND
+ENTRY(arch_unwind_init_running)
+ CFI_STARTPROC
+ movl 4(%esp), %edx
+ movl (%esp), %ecx
+ leal 4(%esp), %eax
+ movl %ebx, PT_EBX(%edx)
+ xorl %ebx, %ebx
+ movl %ebx, PT_ECX(%edx)
+ movl %ebx, PT_EDX(%edx)
+ movl %esi, PT_ESI(%edx)
+ movl %edi, PT_EDI(%edx)
+ movl %ebp, PT_EBP(%edx)
+ movl %ebx, PT_EAX(%edx)
+ movl $__USER_DS, PT_DS(%edx)
+ movl $__USER_DS, PT_ES(%edx)
+ movl $__KERNEL_PERCPU, PT_FS(%edx)
+ movl $__KERNEL_STACK_CANARY, PT_GS(%edx)
+ movl %eax, PT_OLDESP(%edx)
+ movl 16(%esp), %eax
+ movl %ebx, PT_ORIG_EAX(%edx)
+ movl %ecx, PT_EIP(%edx)
+ movl 12(%esp), %ecx
+ movl $__KERNEL_CS, PT_CS(%edx)
+ movl %eax, 12(%esp)
+ movl 8(%esp), %eax
+ movl %ecx, 8(%esp)
+ movl %ebx, PT_EFLAGS(%edx)
+ movl PT_EBX(%edx), %ebx
+ movl $__KERNEL_DS, PT_OLDSS(%edx)
+ jmpl *%eax
+ CFI_ENDPROC
+ENDPROC(arch_unwind_init_running)
+#endif
+
ENTRY(kernel_thread_helper)
pushl $0 # fake return address for unwinder
CFI_STARTPROC
/*
* initial frame state for interrupts (and exceptions without error code)
*/
- .macro EMPTY_FRAME start=1 offset=0
- .if \start
+ .macro EMPTY_FRAME offset=0
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
- CFI_DEF_CFA rsp,8+\offset
- .else
- CFI_DEF_CFA_OFFSET 8+\offset
- .endif
+ CFI_DEF_CFA rsp,\offset
.endm
/*
* initial frame state for interrupts (and exceptions without error code)
*/
.macro INTR_FRAME start=1 offset=0
- EMPTY_FRAME \start, SS+8+\offset-RIP
+ .if \start
+ EMPTY_FRAME SS+8+\offset-RIP
+ .else
+ CFI_DEF_CFA_OFFSET SS+8+\offset-RIP
+ .endif
/*CFI_REL_OFFSET ss, SS+\offset-RIP*/
CFI_REL_OFFSET rsp, RSP+\offset-RIP
/*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
*/
.macro XCPT_FRAME start=1 offset=0
INTR_FRAME \start, RIP+\offset-ORIG_RAX
- /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
.endm
/*
* frame that enables calling into C.
*/
.macro PARTIAL_FRAME start=1 offset=0
+ .if \start >= 0
XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
+ .endif
CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
* frame that enables passing a complete pt_regs to a C function.
*/
.macro DEFAULT_FRAME start=1 offset=0
+ .if \start >= -1
PARTIAL_FRAME \start, R11+\offset-R15
+ .endif
CFI_REL_OFFSET rbx, RBX+\offset
CFI_REL_OFFSET rbp, RBP+\offset
CFI_REL_OFFSET r12, R12+\offset
.endm
ENTRY(save_rest)
- PARTIAL_FRAME 1 REST_SKIP+8
+ CFI_STARTPROC
movq 5*8+16(%rsp), %r11 /* save return address */
- movq_cfi rbx, RBX+16
- movq_cfi rbp, RBP+16
- movq_cfi r12, R12+16
- movq_cfi r13, R13+16
- movq_cfi r14, R14+16
- movq_cfi r15, R15+16
+ movq %rbx, RBX+16(%rsp)
+ movq %rbp, RBP+16(%rsp)
+ movq %r12, R12+16(%rsp)
+ movq %r13, R13+16(%rsp)
+ movq %r14, R14+16(%rsp)
+ movq %r15, R15+16(%rsp)
movq %r11, 8(%rsp) /* return address */
FIXUP_TOP_OF_STACK %r11, 16
ret
/* save complete stack frame */
.pushsection .kprobes.text, "ax"
ENTRY(save_paranoid)
- XCPT_FRAME 1 RDI+8
+ XCPT_FRAME offset=ORIG_RAX-R15+8
cld
- movq_cfi rdi, RDI+8
- movq_cfi rsi, RSI+8
+ movq %rdi, RDI+8(%rsp)
+ movq %rsi, RSI+8(%rsp)
movq_cfi rdx, RDX+8
movq_cfi rcx, RCX+8
movq_cfi rax, RAX+8
- movq_cfi r8, R8+8
- movq_cfi r9, R9+8
- movq_cfi r10, R10+8
- movq_cfi r11, R11+8
+ movq %r8, R8+8(%rsp)
+ movq %r9, R9+8(%rsp)
+ movq %r10, R10+8(%rsp)
+ movq %r11, R11+8(%rsp)
movq_cfi rbx, RBX+8
- movq_cfi rbp, RBP+8
- movq_cfi r12, R12+8
- movq_cfi r13, R13+8
- movq_cfi r14, R14+8
- movq_cfi r15, R15+8
+ movq %rbp, RBP+8(%rsp)
+ movq %r12, R12+8(%rsp)
+ movq %r13, R13+8(%rsp)
+ movq %r14, R14+8(%rsp)
+ movq %r15, R15+8(%rsp)
movl $1,%ebx
movl $MSR_GS_BASE,%ecx
rdmsr
subq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET REST_SKIP
call save_rest
- DEFAULT_FRAME 0 8 /* offset 8: return address */
+ DEFAULT_FRAME -2 8 /* offset 8: return address */
leaq 8(%rsp), \arg /* pt_regs pointer */
call \func
jmp ptregscall_common
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
- DEFAULT_FRAME 0
+ DEFAULT_FRAME -1
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
call \do_sym
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
+ DEFAULT_FRAME -1
TRACE_IRQS_OFF
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
+ DEFAULT_FRAME -1
TRACE_IRQS_OFF
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
- DEFAULT_FRAME 0
+ DEFAULT_FRAME -1
movq %rsp,%rdi /* pt_regs pointer */
movq ORIG_RAX(%rsp),%rsi /* get error code */
movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
- DEFAULT_FRAME 0
+ DEFAULT_FRAME -1
TRACE_IRQS_OFF
movq %rsp,%rdi /* pt_regs pointer */
movq ORIG_RAX(%rsp),%rsi /* get error code */
CFI_ENDPROC
END(call_softirq)
+#ifdef CONFIG_STACK_UNWIND
+ENTRY(arch_unwind_init_running)
+ CFI_STARTPROC
+ movq %r15, R15(%rdi)
+ movq %r14, R14(%rdi)
+ xchgq %rsi, %rdx
+ movq %r13, R13(%rdi)
+ movq %r12, R12(%rdi)
+ xorl %eax, %eax
+ movq %rbp, RBP(%rdi)
+ movq %rbx, RBX(%rdi)
+ movq (%rsp), %r9
+ xchgq %rdx, %rcx
+ movq %rax, R11(%rdi)
+ movq %rax, R10(%rdi)
+ movq %rax, R9(%rdi)
+ movq %rax, R8(%rdi)
+ movq %rax, RAX(%rdi)
+ movq %rax, RCX(%rdi)
+ movq %rax, RDX(%rdi)
+ movq %rax, RSI(%rdi)
+ movq %rax, RDI(%rdi)
+ movq %rax, ORIG_RAX(%rdi)
+ movq %r9, RIP(%rdi)
+ leaq 8(%rsp), %r9
+ movq $__KERNEL_CS, CS(%rdi)
+ movq %rax, EFLAGS(%rdi)
+ movq %r9, RSP(%rdi)
+ movq $__KERNEL_DS, SS(%rdi)
+ jmpq *%rcx
+ CFI_ENDPROC
+END(arch_unwind_init_running)
+#endif
+
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
* returns in "no swapgs flag" in %ebx.
*/
ENTRY(error_entry)
- XCPT_FRAME
- CFI_ADJUST_CFA_OFFSET 15*8
+ XCPT_FRAME offset=ORIG_RAX-R15+8
/* oldrax contains error code */
cld
- movq_cfi rdi, RDI+8
- movq_cfi rsi, RSI+8
- movq_cfi rdx, RDX+8
- movq_cfi rcx, RCX+8
- movq_cfi rax, RAX+8
- movq_cfi r8, R8+8
- movq_cfi r9, R9+8
- movq_cfi r10, R10+8
- movq_cfi r11, R11+8
+ movq %rdi, RDI+8(%rsp)
+ movq %rsi, RSI+8(%rsp)
+ movq %rdx, RDX+8(%rsp)
+ movq %rcx, RCX+8(%rsp)
+ movq %rax, RAX+8(%rsp)
+ movq %r8, R8+8(%rsp)
+ movq %r9, R9+8(%rsp)
+ movq %r10, R10+8(%rsp)
+ movq %r11, R11+8(%rsp)
movq_cfi rbx, RBX+8
- movq_cfi rbp, RBP+8
- movq_cfi r12, R12+8
- movq_cfi r13, R13+8
- movq_cfi r14, R14+8
- movq_cfi r15, R15+8
+ movq %rbp, RBP+8(%rsp)
+ movq %r12, R12+8(%rsp)
+ movq %r13, R13+8(%rsp)
+ movq %r14, R14+8(%rsp)
+ movq %r15, R15+8(%rsp)
xorl %ebx,%ebx
testl $3,CS+8(%rsp)
je error_kernelspace
* compat mode. Check for these here too.
*/
error_kernelspace:
+ CFI_REL_OFFSET rcx, RCX+8
incl %ebx
leaq irq_return(%rip),%rcx
cmpq %rcx,RIP+8(%rsp)
* exceptions might do.
*/
call save_paranoid
- DEFAULT_FRAME 0
+ DEFAULT_FRAME -1
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
ENTRY(early_idt_handler)
#ifdef CONFIG_EARLY_PRINTK
+#include <asm/calling.h>
+#include <asm/dwarf2.h>
cmpl $2,early_recursion_flag(%rip)
jz 1f
incl early_recursion_flag(%rip)
testl $0x27d00,%eax
je 0f
popq %r8 # get error code
+
+ CFI_STARTPROC simple
+ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA rsp, SS+8-RIP
+# CFI_REL_OFFSET ss, SS-RIP
+ CFI_REL_OFFSET rsp, RSP-RIP
+# CFI_REL_OFFSET rflags, EFLAGS-RIP
+# CFI_REL_OFFSET cs, CS-RIP
+ CFI_REL_OFFSET rip, RIP-RIP
+
0: movq 0(%rsp),%rcx # get ip
movq 8(%rsp),%rdx # get cs
xorl %eax,%eax
movq 0(%rsp),%rsi # get rip again
call __print_symbol
#endif
+ CFI_ENDPROC
#endif /* EARLY_PRINTK */
1: hlt
jmp 1b
static unsigned long hpet_num_timers;
#endif
static void __iomem *hpet_virt_address;
+static int hpet_legacy_use_64_bits;
struct hpet_dev {
struct clock_event_device evt;
#ifdef CONFIG_X86_64
#include <asm/pgtable.h>
+static inline unsigned long hpet_read_value(unsigned long a)
+{
+ if (hpet_legacy_use_64_bits)
+ return readq(hpet_virt_address + a);
+ else
+ return readl(hpet_virt_address + a);
+}
+
+static void hpet_write_value(unsigned long d, unsigned long a)
+{
+ if (hpet_legacy_use_64_bits)
+ writeq(d, hpet_virt_address + a);
+ else
+ writel(d, hpet_virt_address + a);
+}
+
+#else
+
+static inline unsigned long hpet_read_value(unsigned long a)
+{
+ return readl(hpet_virt_address + a);
+}
+
+static void hpet_write_value(unsigned long d, unsigned long a)
+{
+ writel(d, hpet_virt_address + a);
+}
#endif
static inline void hpet_set_mapping(void)
}
__setup("nohpet", disable_hpet);
+#ifdef CONFIG_X86_64
+static int hpet64 = 0;
+static int __init hpet64_setup(char *str)
+{
+ hpet64 = 1;
+ return 1;
+}
+__setup("hpet64", hpet64_setup);
+#endif
+
+
static inline int is_hpet_capable(void)
{
return !boot_hpet_disable && hpet_address;
* Common hpet info
*/
static unsigned long hpet_freq;
+static int hpet_legacy_use_64_bits; /* configure T0 in 64-bit mode? */
static void hpet_legacy_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt);
hpet_legacy_int_enabled = 1;
}
+static int timer0_use_64_bits(void)
+{
+#ifndef CONFIG_X86_64
+ /* using the HPET in 64-bit mode without atomic 64-bit
+ * accesses is too inefficient
+ */
+ return 0;
+#else
+
+ if (unlikely(hpet64)) {
+ u32 id, t0_cfg;
+ id = hpet_readl(HPET_ID);
+ t0_cfg = hpet_readl(HPET_Tn_CFG(0));
+
+ if ((id & HPET_ID_64BIT) && (t0_cfg & HPET_TN_64BIT_CAP)) {
+ printk(KERN_DEBUG "hpet timer0 configured in 64-bit mode\n");
+ return 1;
+ }
+ else {
+ printk(KERN_DEBUG "hpet timer0 does not support 64-bit mode\n");
+ return 0;
+ }
+ }
+ else return 0;
+#endif
+}
+
static void hpet_legacy_clockevent_register(void)
{
/* Start HPET legacy interrupts */
hpet_enable_legacy_int();
+ hpet_legacy_use_64_bits = timer0_use_64_bits();
/*
* Start hpet with the boot cpu mask and make it
/* Make sure we use edge triggered interrupts */
cfg &= ~HPET_TN_LEVEL;
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
- HPET_TN_SETVAL | HPET_TN_32BIT;
+ HPET_TN_SETVAL |
+ (hpet_legacy_use_64_bits ? 0 : HPET_TN_32BIT);
hpet_writel(cfg, HPET_Tn_CFG(timer));
- hpet_writel(cmp, HPET_Tn_CMP(timer));
+ hpet_write_value(cmp, HPET_Tn_CMP(timer));
udelay(1);
/*
* HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
* (See AMD-8111 HyperTransport I/O Hub Data Sheet,
* Publication # 24674)
*/
- hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer));
+ hpet_write_value((unsigned long) delta, HPET_Tn_CMP(timer));
hpet_start_counter();
hpet_print_config();
break;
case CLOCK_EVT_MODE_ONESHOT:
cfg = hpet_readl(HPET_Tn_CFG(timer));
cfg &= ~HPET_TN_PERIODIC;
- cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
+ cfg |= HPET_TN_ENABLE |
+ (hpet_legacy_use_64_bits ? 0 : HPET_TN_32BIT);
hpet_writel(cfg, HPET_Tn_CFG(timer));
break;
static int hpet_next_event(unsigned long delta,
struct clock_event_device *evt, int timer)
{
- u32 cnt;
+ unsigned long cnt;
s32 res;
- cnt = hpet_readl(HPET_COUNTER);
+ cnt = hpet_read_value(HPET_COUNTER);
cnt += (u32) delta;
- hpet_writel(cnt, HPET_Tn_CMP(timer));
+ hpet_write_value(cnt, HPET_Tn_CMP(timer));
/*
* HPETs are a complete disaster. The compare register is
* the event. The minimum programming delta for the generic
* clockevents code is set to 1.5 * HPET_MIN_CYCLES.
*/
- res = (s32)(cnt - hpet_readl(HPET_COUNTER));
+ res = (s32)((u32)cnt - (u32)hpet_readl(HPET_COUNTER));
return res < HPET_MIN_CYCLES ? -ETIME : 0;
}
DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
},
},
- { /* Handle problems with rebooting on the Latitude E6320. */
+ {
.callback = set_pci_reboot,
- .ident = "Dell Latitude E6320",
+ .ident = "Dell Latitude E5xxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5"),
},
},
- { /* Handle problems with rebooting on the Latitude E5420. */
+ {
.callback = set_pci_reboot,
- .ident = "Dell Latitude E5420",
+ .ident = "Dell Latitude E6xxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
- },
- },
- { /* Handle problems with rebooting on the Latitude E6420. */
- .callback = set_pci_reboot,
- .ident = "Dell Latitude E6420",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6"),
},
},
{ /* Handle problems with rebooting on the OptiPlex 990. */
#include <linux/uaccess.h>
#include <asm/stacktrace.h>
+static void save_stack_warning(void *data, char *msg)
+{
+}
+
+static void
+save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+}
+
static int save_stack_stack(void *data, char *name)
{
return 0;
}
static const struct stacktrace_ops save_stack_ops = {
+ .warning = save_stack_warning,
+ .warning_symbol = save_stack_warning_symbol,
.stack = save_stack_stack,
.address = save_stack_address,
.walk_stack = print_context_stack,
};
static const struct stacktrace_ops save_stack_ops_nosched = {
+ .warning = save_stack_warning,
+ .warning_symbol = save_stack_warning_symbol,
.stack = save_stack_stack,
.address = save_stack_address_nosched,
.walk_stack = print_context_stack,
/* Sections to be discarded */
DISCARDS
+#ifndef CONFIG_UNWIND_INFO
/DISCARD/ : { *(.eh_frame) }
+#endif
}
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
/* cpuid 1.ecx */
const u32 kvm_supported_word4_x86_features =
- F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
+ F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64 */ | F(MWAIT) |
0 /* DS-CPL, VMX, SMX, EST */ |
0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
return 1;
}
+static int monitor_interception(struct vcpu_svm *svm)
+{
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+ skip_emulated_instruction(&svm->vcpu);
+
+ return 1;
+}
+
+static int mwait_interception(struct vcpu_svm *svm)
+{
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+ skip_emulated_instruction(&svm->vcpu);
+
+ return kvm_emulate_halt(&svm->vcpu);
+}
+
static int invalid_op_interception(struct vcpu_svm *svm)
{
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
[SVM_EXIT_CLGI] = clgi_interception,
[SVM_EXIT_SKINIT] = skinit_interception,
[SVM_EXIT_WBINVD] = emulate_on_interception,
- [SVM_EXIT_MONITOR] = invalid_op_interception,
- [SVM_EXIT_MWAIT] = invalid_op_interception,
+ [SVM_EXIT_MONITOR] = monitor_interception,
+ [SVM_EXIT_MWAIT] = mwait_interception,
[SVM_EXIT_XSETBV] = xsetbv_interception,
[SVM_EXIT_NPF] = pf_interception,
};
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
break;
+ case MSR_NHM_SNB_PKG_CST_CFG_CTL: /* 0xe2 */
case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE:
case MSR_K8_INT_PENDING_MSG:
case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE:
+ case MSR_NHM_SNB_PKG_CST_CFG_CTL: /* 0xe2 */
data = 0;
break;
case MSR_P6_PERFCTR0:
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
+static void backtrace_warning_symbol(void *data, char *msg,
+ unsigned long symbol)
+{
+ /* Ignore warnings */
+}
+
+static void backtrace_warning(void *data, char *msg)
+{
+ /* Ignore warnings */
+}
+
static int backtrace_stack(void *data, char *name)
{
/* Yes, we want all stacks */
}
static struct stacktrace_ops backtrace_ops = {
+ .warning = backtrace_warning,
+ .warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
.address = backtrace_address,
.walk_stack = print_context_stack,
goto fail;
}
+ /* Check the GUID Partition Table header size */
+ if (le32_to_cpu((*gpt)->header_size) >
+ bdev_logical_block_size(state->bdev)) {
+ pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
+ le32_to_cpu((*gpt)->header_size),
+ bdev_logical_block_size(state->bdev));
+ goto fail;
+ }
+
/* Check the GUID Partition Table CRC */
origcrc = le32_to_cpu((*gpt)->header_crc32);
(*gpt)->header_crc32 = 0;
(!address64->address && length)) {
ACPI_WARNING((AE_INFO,
"Optional field %s has zero address or length: "
- "0x%8.8X%8.8X/0x%X",
+ "0x%8.8X%8.8X/0x%X - not using it",
name,
ACPI_FORMAT_UINT64(address64->
address),
length));
+ address64->address = 0;
}
}
}
#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include "internal.h"
MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
* struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
*/
unsigned int size = EC_SPACE_SIZE;
- u8 *data = (u8 *) buf;
loff_t init_off = *off;
int err = 0;
size = count;
while (size) {
- err = ec_read(*off, &data[*off - init_off]);
+ u8 byte_read;
+ err = ec_read(*off, &byte_read);
if (err)
return err;
+ if (put_user(byte_read, buf + *off - init_off)) {
+ if (*off - init_off)
+ return *off - init_off; /* partial read */
+ return -EFAULT;
+ }
*off += 1;
size--;
}
unsigned int size = count;
loff_t init_off = *off;
- u8 *data = (u8 *) buf;
int err = 0;
if (*off >= EC_SPACE_SIZE)
}
while (size) {
- u8 byte_write = data[*off - init_off];
+ u8 byte_write;
+ if (get_user(byte_write, buf + *off - init_off)) {
+ if (*off - init_off)
+ return *off - init_off; /* partial write */
+ return -EFAULT;
+ }
err = ec_write(*off, byte_write);
if (err)
return err;
{
acpi_status status;
+ if (acpi_rsdt_forced)
+ printk(KERN_INFO "Using RSDT as ACPI root table\n");
+
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
if (ACPI_FAILURE(status))
return 1;
#include <linux/kmod.h>
#include <linux/reboot.h>
#include <linux/device.h>
+#include <linux/dmi.h>
#include <asm/uaccess.h>
#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
tz->kelvin_offset = 2732;
}
+static struct dmi_system_id thermal_psv_dmi_table[] = {
+ {
+ .ident = "IBM ThinkPad T41",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T41"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad T42",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T42"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad T43",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T43"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad T41p",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T41p"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad T42p",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T42p"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad T43p",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T43p"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad R40",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad R40"),
+ },
+ },
+ {
+ .ident = "IBM ThinkPad R50p",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad R50p"),
+ },
+ },
+ {},
+};
+
+static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds)
+{
+ if (!tz)
+ return -EINVAL;
+
+ /* Convert value to deci-seconds */
+ tz->polling_frequency = seconds * 10;
+
+ tz->thermal_zone->polling_delay = seconds * 1000;
+
+ if (tz->tz_enabled)
+ thermal_zone_device_update(tz->thermal_zone);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Polling frequency set to %lu seconds\n",
+ tz->polling_frequency/10));
+
+ return 0;
+}
+
static int acpi_thermal_add(struct acpi_device *device)
{
int result = 0;
if (result)
goto free_memory;
+ if (dmi_check_system(thermal_psv_dmi_table)) {
+ if (tz->trips.passive.flags.valid &&
+ tz->trips.passive.temperature > CELSIUS_TO_KELVIN(85)) {
+ printk (KERN_INFO "Adjust passive trip point from %lu"
+ " to %lu\n",
+ KELVIN_TO_CELSIUS(tz->trips.passive.temperature),
+ KELVIN_TO_CELSIUS(tz->trips.passive.temperature - 150));
+ tz->trips.passive.temperature -= 150;
+ acpi_thermal_set_polling(tz, 5);
+ }
+ }
+
printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
acpi_device_name(device), acpi_device_bid(device),
KELVIN_TO_CELSIUS(tz->temperature));
static int piix_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static void piix_remove_one(struct pci_dev *pdev);
+static unsigned int piix_pata_read_id(struct ata_device *adev, struct ata_taskfile *tf, u16 *id);
static int piix_pata_prereset(struct ata_link *link, unsigned long deadline);
static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev);
static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
.set_piomode = piix_set_piomode,
.set_dmamode = piix_set_dmamode,
.prereset = piix_pata_prereset,
+ .read_id = piix_pata_read_id,
};
static struct ata_port_operations piix_vmw_ops = {
MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
MODULE_VERSION(DRV_VERSION);
+static int piix_msft_hyperv(void)
+{
+ int hv = 0;
+#if defined(CONFIG_HYPERV_STORAGE) || defined(CONFIG_HYPERV_STORAGE_MODULE)
+ static const struct dmi_system_id hv_dmi_ident[] = {
+ {
+ .ident = "Hyper-V",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
+ },
+ },
+ { } /* terminate list */
+ };
+ hv = !!dmi_check_system(hv_dmi_ident);
+#endif
+ return hv;
+}
+
struct ich_laptop {
u16 device;
u16 subvendor;
return ata_sff_prereset(link, deadline);
}
+static unsigned int piix_pata_read_id(struct ata_device *adev, struct ata_taskfile *tf, u16 *id)
+{
+ unsigned int err_mask = ata_do_dev_read_id(adev, tf, id);
+ /*
+ * Ignore disks in a hyper-v guest.
+ * There is no unplug protocol like it is done with xen_emul_unplug= option.
+ * Emulate the unplug by ignoring disks when the hv_storvsc driver is enabled.
+ * If the disks are not ignored, they will appear twice: once through
+ * piix and once through hv_storvsc.
+ * hv_storvsc can not handle ATAPI devices because they can only be
+ * accessed through the emulated code path (not through the vm_bus
+ * channel), the piix driver is still required.
+ */
+ if (ata_id_is_ata(id) && piix_msft_hyperv()) {
+ ata_dev_printk(adev, KERN_WARNING, "ATA device ignored in Hyper-V guest\n");
+ id[ATA_ID_CONFIG] |= (1 << 15);
+ }
+ return err_mask;
+}
+
static DEFINE_SPINLOCK(piix_lock);
static void piix_set_timings(struct ata_port *ap, struct ata_device *adev,
This enables panic and oops messages to be logged to a circular
buffer in RAM where it can be read back at some later point.
+config CRASHER
+ tristate "Crasher Module"
+ help
+ Slab cache memory tester. Only use this as a module
+
config MSM_SMD_PKT
bool "Enable device interface for some SMD packet ports"
default n
obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
obj-$(CONFIG_TCG_TPM) += tpm/
+obj-$(CONFIG_CRASHER) += crasher.o
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
obj-$(CONFIG_RAMOOPS) += ramoops.o
--- /dev/null
+/*
+ * crasher.c, it breaks things
+ */
+
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/moduleparam.h>
+
+static int module_exiting;
+static struct completion startup = COMPLETION_INITIALIZER(startup);
+static unsigned long rand_seed = 152L;
+static unsigned long seed = 152L;
+static int threads = 1;
+static bool call_panic, call_bug, call_warn;
+static bool trap_null, call_null, jump_null;
+static long trap_read, trap_write, call_bad, jump_bad;
+
+module_param(seed, ulong, 0);
+module_param(call_panic, bool, 0);
+module_param(call_bug, bool, 0);
+module_param(call_warn, bool, 0);
+module_param(trap_null, bool, 0);
+module_param(trap_read, long, 0);
+module_param(trap_write, long, 0);
+module_param(call_null, bool, 0);
+module_param(call_bad, long, 0);
+module_param(jump_null, bool, 0);
+module_param(jump_bad, long, 0);
+module_param(threads, int, 0);
+MODULE_PARM_DESC(seed, "random seed for memory tests");
+MODULE_PARM_DESC(call_panic, "test option. call panic() and render the system unusable.");
+MODULE_PARM_DESC(call_bug, "test option. call BUG() and render the system unusable.");
+MODULE_PARM_DESC(call_warn, "test option. call WARN() and leave the system usable.");
+MODULE_PARM_DESC(trap_null, "test option. dereference a NULL pointer to simulate a crash and render the system unusable.");
+MODULE_PARM_DESC(trap_read, "test option. read from an invalid address to simulate a crash and render the system unusable.");
+MODULE_PARM_DESC(trap_write, "test option. write to an invalid address to simulate a crash and render the system unusable.");
+MODULE_PARM_DESC(call_null, "test option. call a NULL pointer to simulate a crash and render the system unusable.");
+MODULE_PARM_DESC(call_bad, "test option. call an invalid address to simulate a crash and render the system unusable.");
+MODULE_PARM_DESC(jump_null, "test option. jump to a NULL pointer to simulate a crash and render the system unusable.");
+MODULE_PARM_DESC(jump_bad, "test option. jump to an invalid address to simulate a crash and render the system unusable.");
+MODULE_PARM_DESC(threads, "number of threads to run");
+MODULE_LICENSE("GPL");
+
+#define NUM_ALLOC 24
+#define NUM_SIZES 8
+static int sizes[] = { 32, 64, 128, 192, 256, 1024, 2048, 4096 };
+
+struct mem_buf {
+ char *buf;
+ int size;
+};
+
+static unsigned long crasher_random(void)
+{
+ rand_seed = rand_seed*69069L+1;
+ return rand_seed^jiffies;
+}
+
+void crasher_srandom(unsigned long entropy)
+{
+ rand_seed ^= entropy;
+ crasher_random();
+}
+
+static char *mem_alloc(int size) {
+ char *p = kmalloc(size, GFP_KERNEL);
+ int i;
+ if (!p)
+ return p;
+ for (i = 0 ; i < size; i++)
+ p[i] = (i % 119) + 8;
+ return p;
+}
+
+static void mem_check(char *p, int size) {
+ int i;
+ if (!p)
+ return;
+ for (i = 0 ; i < size; i++) {
+ if (p[i] != ((i % 119) + 8)) {
+ printk(KERN_CRIT "verify error at %lX offset %d "
+ " wanted %d found %d size %d\n",
+ (unsigned long)(p + i), i, (i % 119) + 8,
+ p[i], size);
+ }
+ }
+ // try and trigger slab poisoning for people using this buffer
+ // wrong
+ memset(p, 0, size);
+}
+
+static void mem_verify(void) {
+ struct mem_buf bufs[NUM_ALLOC];
+ struct mem_buf *b;
+ int index;
+ int size;
+ unsigned long sleep;
+ memset(bufs, 0, sizeof(struct mem_buf) * NUM_ALLOC);
+ while(!module_exiting) {
+ index = crasher_random() % NUM_ALLOC;
+ b = bufs + index;
+ if (b->size) {
+ mem_check(b->buf, b->size);
+ kfree(b->buf);
+ b->buf = NULL;
+ b->size = 0;
+ } else {
+ size = crasher_random() % NUM_SIZES;
+ size = sizes[size];
+ b->buf = mem_alloc(size);
+ b->size = size;
+ }
+ sleep = crasher_random() % (HZ / 10);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(sleep);
+ set_current_state(TASK_RUNNING);
+ }
+ for (index = 0 ; index < NUM_ALLOC ; index++) {
+ b = bufs + index;
+ if (b->size) {
+ mem_check(b->buf, b->size);
+ kfree(b->buf);
+ }
+ }
+}
+
+static int crasher_thread(void *unused)
+{
+ daemonize("crasher");
+ complete(&startup);
+ mem_verify();
+ complete(&startup);
+ return 0;
+}
+
+static int __init crasher_init(void)
+{
+ int i;
+ init_completion(&startup);
+ crasher_srandom(seed);
+
+ if (call_panic) {
+ panic("test panic from crasher module. Good Luck.\n");
+ return -EFAULT;
+ }
+ if (call_bug) {
+ printk("triggering BUG\n");
+ BUG_ON(1);
+ return -EFAULT;
+ }
+ if (WARN(call_warn, "triggering WARN\n"))
+ return -EFAULT;
+
+ if (trap_null) {
+ volatile char *p = NULL;
+ printk("dereferencing NULL pointer.\n");
+ p[0] = '\n';
+ return -EFAULT;
+ }
+ if (trap_read) {
+ const volatile char *p = (char *)trap_read;
+ printk("reading from invalid(?) address %p.\n", p);
+ return p[0] ? -EFAULT : -EACCES;
+ }
+ if (trap_write) {
+ volatile char *p = (char *)trap_write;
+ printk("writing to invalid(?) address %p.\n", p);
+ p[0] = ' ';
+ return -EFAULT;
+ }
+
+ if (call_null) {
+ void(*f)(void) = NULL;
+ printk("calling NULL pointer.\n");
+ f();
+ return -EFAULT;
+ }
+ if (call_bad) {
+ void(*f)(void) = (void(*)(void))call_bad;
+ printk("calling invalid(?) address %p.\n", f);
+ f();
+ return -EFAULT;
+ }
+
+ /* These two depend on the compiler doing tail call optimization. */
+ if (jump_null) {
+ int(*f)(void) = NULL;
+ printk("jumping to NULL.\n");
+ return f();
+ }
+ if (jump_bad) {
+ int(*f)(void) = (int(*)(void))jump_bad;
+ printk("jumping to invalid(?) address %p.\n", f);
+ return f();
+ }
+
+ printk("crasher module (%d threads). Testing sizes: ", threads);
+ for (i = 0 ; i < NUM_SIZES ; i++)
+ printk("%d ", sizes[i]);
+ printk("\n");
+
+ for (i = 0 ; i < threads ; i++)
+ kernel_thread(crasher_thread, crasher_thread,
+ CLONE_FS | CLONE_FILES);
+ for (i = 0 ; i < threads ; i++)
+ wait_for_completion(&startup);
+ return 0;
+}
+
+static void __exit crasher_exit(void)
+{
+ int i;
+ module_exiting = 1;
+ for (i = 0 ; i < threads ; i++)
+ wait_for_completion(&startup);
+ printk("all crasher threads done\n");
+ return;
+}
+
+module_init(crasher_init);
+module_exit(crasher_exit);
return -EFAULT;
break;
case LPGETSTATUS:
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+ return -EINTR;
lp_claim_parport_or_block (&lp_table[minor]);
status = r_str(minor);
lp_release_parport (&lp_table[minor]);
+ mutex_unlock(&lp_table[minor].port_mutex);
if (copy_to_user(argp, &status, sizeof(int)))
return -EFAULT;
#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
-static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
+static atomic_t proc_event_num_listeners __read_mostly = ATOMIC_INIT(0);
static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
/* proc_event_counts is used as the sequence number of the netlink message */
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
+#define MAX_DEFAULT_SAMPLING_RATE (300 * 1000U)
/*
* The polling frequency of this governor depends on the capability of
dbs_tuners_ins.sampling_rate =
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
+ /*
+ * Cut def_sampling rate to 300ms if it was above,
+ * still consider to not set it above latency
+ * transition * 100
+ */
+ if (dbs_tuners_ins.sampling_rate > MAX_DEFAULT_SAMPLING_RATE) {
+ dbs_tuners_ins.sampling_rate =
+ max(min_sampling_rate, MAX_DEFAULT_SAMPLING_RATE);
+ printk(KERN_INFO "CPUFREQ: ondemand sampling "
+ "rate set to %d ms\n",
+ dbs_tuners_ins.sampling_rate / 1000);
+ }
+ /*
+ * Be conservative in respect to performance.
+ * If an application calculates using two threads
+ * depending on each other, they will be run on several
+ * CPU cores resulting on 50% load on both.
+ * SLED might still want to prefer 80% up_threshold
+ * by default, but we cannot differ that here.
+ */
+ if (num_online_cpus() > 1)
+ dbs_tuners_ins.up_threshold =
+ DEF_FREQUENCY_UP_THRESHOLD / 2;
dbs_tuners_ins.io_is_busy = should_io_be_busy();
}
mutex_unlock(&dbs_mutex);
#define NOUVEAU_DSM_HAS_MUX 0x1
#define NOUVEAU_DSM_HAS_OPT 0x2
+#ifdef CONFIG_VGA_SWITCHEROO
static const char nouveau_dsm_muid[] = {
0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
{
vga_switcheroo_unregister_handler();
}
+#else
+void nouveau_register_dsm_handler(void) {}
+void nouveau_unregister_dsm_handler(void) {}
+#endif
/* retrieve the ROM in 4k blocks */
static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
#define APPLE_FLAG_FKEY 0x01
-static unsigned int fnmode = 1;
+static unsigned int fnmode = 2;
module_param(fnmode, uint, 0644);
MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, "
"[1] = fkeyslast, 2 = fkeysfirst)");
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_4000U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_4500U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
#define USB_VENDOR_ID_ELO 0x04E7
+#define USB_DEVICE_ID_ELO_4000U 0x0009
#define USB_DEVICE_ID_ELO_TS2515 0x0022
#define USB_DEVICE_ID_ELO_TS2700 0x0020
+#define USB_DEVICE_ID_ELO_4500U 0x0030
#define USB_VENDOR_ID_EMS 0x2006
#define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118
select SERIO_LIBPS2
select SERIO_I8042 if X86
select SERIO_GSCPS2 if GSC
+ select LEDS_CLASS if MOUSE_PS2_SYNAPICS_LED
help
Say Y here if you have a PS/2 mouse connected to your system. This
includes the standard 2 or 3-button PS/2 mouse, as well as PS/2
If unsure, say Y.
+config MOUSE_PS2_SYNAPTICS_LED
+ bool "Support embedded LED on Synaptics devices"
+ depends on MOUSE_PS2_SYNAPTICS
+ select NEW_LEDS
+ help
+ Say Y here if you have a Synaptics device with an embedded LED.
+ This will enable LED class driver to control the LED device.
+
config MOUSE_PS2_LIFEBOOK
bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
default y
#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/libps2.h>
+#include <linux/leds.h>
#include <linux/slab.h>
#include "psmouse.h"
#include "synaptics.h"
serio_register_port(serio);
}
+#ifdef CONFIG_MOUSE_PS2_SYNAPTICS_LED
+/*
+ * LED handling:
+ * Some Synaptics devices have an embeded LED at the top-left corner.
+ */
+
+struct synaptics_led {
+ struct psmouse *psmouse;
+ struct work_struct work;
+ struct led_classdev cdev;
+};
+
+static void synaptics_set_led(struct psmouse *psmouse, int on)
+{
+ int i;
+ unsigned char cmd = on ? 0x88 : 0x10;
+
+ ps2_begin_command(&psmouse->ps2dev);
+ if (__ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11))
+ goto out;
+ for (i = 6; i >= 0; i -= 2) {
+ unsigned char d = (cmd >> i) & 3;
+ if (__ps2_command(&psmouse->ps2dev, &d, PSMOUSE_CMD_SETRES))
+ goto out;
+ }
+ cmd = 0x0a;
+ __ps2_command(&psmouse->ps2dev, &cmd, PSMOUSE_CMD_SETRATE);
+ out:
+ ps2_end_command(&psmouse->ps2dev);
+}
+
+static void synaptics_led_work(struct work_struct *work)
+{
+ struct synaptics_led *led;
+
+ led = container_of(work, struct synaptics_led, work);
+ synaptics_set_led(led->psmouse, led->cdev.brightness);
+}
+
+static void synaptics_led_cdev_brightness_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct synaptics_led *led;
+
+ led = container_of(cdev, struct synaptics_led, cdev);
+ schedule_work(&led->work);
+}
+
+static void synaptics_sync_led(struct psmouse *psmouse)
+{
+ struct synaptics_data *priv = psmouse->private;
+
+ if (priv->led)
+ synaptics_set_led(psmouse, priv->led->cdev.brightness);
+}
+
+static int synaptics_init_led(struct psmouse *psmouse)
+{
+ struct synaptics_data *priv = psmouse->private;
+ struct synaptics_led *led;
+ int err;
+
+ /* FIXME: LED is supposedly detectable in cap0c[1] 0x20, but it seems
+ * not working on real machines.
+ * So we check the product id to be sure.
+ */
+ if (!priv->ext_cap_0c || SYN_CAP_PRODUCT_ID(priv->ext_cap) != 0xe4)
+ return 0;
+
+ printk(KERN_INFO "synaptics: support LED control\n");
+ led = kzalloc(sizeof(struct synaptics_led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+ led->psmouse = psmouse;
+ INIT_WORK(&led->work, synaptics_led_work);
+ led->cdev.name = "psmouse::synaptics";
+ led->cdev.brightness_set = synaptics_led_cdev_brightness_set;
+ led->cdev.flags = LED_CORE_SUSPENDRESUME;
+ err = led_classdev_register(NULL, &led->cdev);
+ if (err < 0) {
+ kfree(led);
+ return err;
+ }
+ priv->led = led;
+ return 0;
+}
+
+static void synaptics_free_led(struct psmouse *psmouse)
+{
+ struct synaptics_data *priv = psmouse->private;
+
+ if (!priv->led)
+ return;
+ cancel_work_sync(&priv->led->work);
+ synaptics_set_led(psmouse, 0);
+ led_classdev_unregister(&priv->led->cdev);
+ kfree(priv->led);
+}
+#else
+#define synaptics_init_led(ps) 0
+#define synaptics_free_led(ps) do {} while (0)
+#define synaptics_sync_led(ps) do {} while (0)
+#endif
+
/*****************************************************************************
* Functions to interpret the absolute mode packets
****************************************************************************/
device_remove_file(&psmouse->ps2dev.serio->dev,
&psmouse_attr_disable_gesture.dattr);
+ synaptics_free_led(psmouse);
synaptics_reset(psmouse);
kfree(priv);
psmouse->private = NULL;
return -1;
}
+ synaptics_sync_led(psmouse);
+
return 0;
}
priv->model_id,
priv->capabilities, priv->ext_cap, priv->ext_cap_0c);
+ if (synaptics_init_led(psmouse) < 0)
+ goto init_fail;
+
set_input_params(psmouse->dev, priv);
/*
struct synaptics_mt_state mt_state;
};
+struct synaptics_led;
+
struct synaptics_data {
/* Data read from the touchpad */
unsigned long int model_id; /* Model-ID */
*/
struct synaptics_hw_state agm;
bool agm_pending; /* new AGM packet received */
+ struct synaptics_led *led;
};
void synaptics_module_init(void);
},
},
{
+ /* Acer Aspire 5710 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+ },
+ },
+ {
/* Gericom Bellagio */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
To compile this driver as a module, choose M here: the
module will be called elo.
+config TOUCHSCREEN_ELOUSB
+ tristate "Elo USB touchscreens"
+ select USB
+ help
+ Say Y here if you have an Elo USB touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called elousb.
+
config TOUCHSCREEN_WACOM_W8001
tristate "Wacom W8001 penabled serial touchscreen"
select SERIO
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
+obj-$(CONFIG_TOUCHSCREEN_ELOUSB) += elousb.o
obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
--- /dev/null
+/*
+ * Copyright (c) 1999-2001 Vojtech Pavlik
+ *
+ * Elo USB touchscreen support
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Should you need to contact me, the author, you can do so either by
+ * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
+ * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/usb/input.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+
+/*
+ * Version Information
+ */
+#define DRIVER_VERSION "v1.1"
+#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>"
+#define DRIVER_DESC "Elo USB touchscreen driver"
+#define DRIVER_LICENSE "GPL"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE(DRIVER_LICENSE);
+
+struct elousb {
+ char name[128];
+ char phys[64];
+ struct usb_device *usbdev;
+ struct input_dev *dev;
+ struct urb *irq;
+
+ unsigned char *data;
+ dma_addr_t data_dma;
+};
+
+static void elousb_irq(struct urb *urb)
+{
+ struct elousb *elo = urb->context;
+ unsigned char *data = elo->data;
+ struct input_dev *dev = elo->dev;
+ int status;
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+ case -ECONNRESET: /* unlink */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ /* -EPIPE: should clear the halt */
+ default: /* error */
+ goto resubmit;
+ }
+
+ if (data[0] != 'T') /* Mandatory ELO packet marker */
+ return;
+
+
+ input_report_abs(dev, ABS_X, ((u32)data[3] << 8) | data[2]);
+ input_report_abs(dev, ABS_Y, ((u32)data[5] << 8) | data[4]);
+
+ input_report_abs(dev, ABS_PRESSURE,
+ (data[1] & 0x80) ? (((u32)data[7] << 8) | data[6]): 0);
+
+ if (data[1] & 0x03) {
+ input_report_key(dev, BTN_TOUCH, 1);
+ input_sync(dev);
+ }
+
+ if (data[1] & 0x04)
+ input_report_key(dev, BTN_TOUCH, 0);
+
+ input_sync(dev);
+
+resubmit:
+ status = usb_submit_urb (urb, GFP_ATOMIC);
+ if (status)
+ err ("can't resubmit intr, %s-%s/input0, status %d",
+ elo->usbdev->bus->bus_name,
+ elo->usbdev->devpath, status);
+}
+
+static int elousb_open(struct input_dev *dev)
+{
+ struct elousb *elo = input_get_drvdata(dev);
+
+ elo->irq->dev = elo->usbdev;
+ if (usb_submit_urb(elo->irq, GFP_KERNEL))
+ return -EIO;
+
+ return 0;
+}
+
+static void elousb_close(struct input_dev *dev)
+{
+ struct elousb *elo = input_get_drvdata(dev);
+
+ usb_kill_urb(elo->irq);
+}
+
+static int elousb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_host_interface *interface;
+ struct usb_endpoint_descriptor *endpoint;
+ struct hid_descriptor *hdesc;
+ struct elousb *elo;
+ struct input_dev *input_dev;
+ int pipe, i;
+ unsigned int rsize = 0;
+ int error = -ENOMEM;
+ char *rdesc;
+
+ interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints != 1)
+ return -ENODEV;
+
+ endpoint = &interface->endpoint[0].desc;
+ if (!(endpoint->bEndpointAddress & USB_DIR_IN))
+ return -ENODEV;
+ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT)
+ return -ENODEV;
+
+ if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) &&
+ (!interface->desc.bNumEndpoints ||
+ usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) {
+ err("HID class descriptor not present");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < hdesc->bNumDescriptors; i++)
+ if (hdesc->desc[i].bDescriptorType == HID_DT_REPORT)
+ rsize = le16_to_cpu(hdesc->desc[i].wDescriptorLength);
+
+ if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
+ err("weird size of report descriptor (%u)", rsize);
+ return -ENODEV;
+ }
+
+
+ pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
+
+ elo = kzalloc(sizeof(struct elousb), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!elo || !input_dev)
+ goto fail1;
+
+ elo->data = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &elo->data_dma);
+ if (!elo->data)
+ goto fail1;
+
+ elo->irq = usb_alloc_urb(0, GFP_KERNEL);
+ if (!elo->irq)
+ goto fail2;
+
+ if (!(rdesc = kmalloc(rsize, GFP_KERNEL)))
+ goto fail3;
+
+ elo->usbdev = dev;
+ elo->dev = input_dev;
+
+ if ((error = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ HID_REQ_SET_IDLE, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
+ interface->desc.bInterfaceNumber,
+ NULL, 0, USB_CTRL_SET_TIMEOUT)) < 0) {
+ err("setting HID idle timeout failed, error %d", error);
+ error = -ENODEV;
+ goto fail4;
+ }
+
+ if ((error = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ USB_REQ_GET_DESCRIPTOR, USB_RECIP_INTERFACE | USB_DIR_IN,
+ HID_DT_REPORT << 8, interface->desc.bInterfaceNumber,
+ rdesc, rsize, USB_CTRL_GET_TIMEOUT)) < rsize) {
+ err("reading HID report descriptor failed, error %d", error);
+ error = -ENODEV;
+ goto fail4;
+ }
+
+ if (dev->manufacturer)
+ strlcpy(elo->name, dev->manufacturer, sizeof(elo->name));
+
+ if (dev->product) {
+ if (dev->manufacturer)
+ strlcat(elo->name, " ", sizeof(elo->name));
+ strlcat(elo->name, dev->product, sizeof(elo->name));
+ }
+
+ if (!strlen(elo->name))
+ snprintf(elo->name, sizeof(elo->name),
+ "Elo touchscreen %04x:%04x",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ usb_make_path(dev, elo->phys, sizeof(elo->phys));
+ strlcat(elo->phys, "/input0", sizeof(elo->phys));
+
+ input_dev->name = elo->name;
+ input_dev->phys = elo->phys;
+ usb_to_input_id(dev, &input_dev->id);
+ input_dev->dev.parent = &intf->dev;
+
+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ input_dev->absbit[0] = BIT(ABS_X) | BIT(ABS_Y);
+ set_bit(ABS_PRESSURE, input_dev->absbit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, 4000, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 3840, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 256, 0, 0);
+
+ input_set_drvdata(input_dev, elo);
+
+ input_dev->open = elousb_open;
+ input_dev->close = elousb_close;
+
+ usb_fill_int_urb(elo->irq, dev, pipe, elo->data, 8,
+ elousb_irq, elo, endpoint->bInterval);
+ elo->irq->transfer_dma = elo->data_dma;
+ elo->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ error = input_register_device(elo->dev);
+ if (error)
+ goto fail4;
+
+ usb_set_intfdata(intf, elo);
+ return 0;
+
+fail4:
+ kfree(rdesc);
+fail3:
+ usb_free_urb(elo->irq);
+fail2:
+ usb_free_coherent(dev, 8, elo->data, elo->data_dma);
+fail1:
+ input_free_device(input_dev);
+ kfree(elo);
+ return -ENOMEM;
+}
+
+static void elousb_disconnect(struct usb_interface *intf)
+{
+ struct elousb *elo = usb_get_intfdata (intf);
+
+ usb_set_intfdata(intf, NULL);
+ if (elo) {
+ usb_kill_urb(elo->irq);
+ input_unregister_device(elo->dev);
+ usb_free_urb(elo->irq);
+ usb_free_coherent(interface_to_usbdev(intf), 8, elo->data, elo->data_dma);
+ kfree(elo);
+ }
+}
+
+static struct usb_device_id elousb_id_table [] = {
+ { USB_DEVICE(0x04e7, 0x0009) }, /* CarrolTouch 4000U */
+ { USB_DEVICE(0x04e7, 0x0030) }, /* CarrolTouch 4500U */
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE (usb, elousb_id_table);
+
+static struct usb_driver elousb_driver = {
+ .name = "elousb",
+ .probe = elousb_probe,
+ .disconnect = elousb_disconnect,
+ .id_table = elousb_id_table,
+};
+
+static int __init elousb_init(void)
+{
+ int retval = usb_register(&elousb_driver);
+ if (retval == 0)
+ printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC);
+ return retval;
+}
+
+static void __exit elousb_exit(void)
+{
+ usb_deregister(&elousb_driver);
+}
+
+module_init(elousb_init);
+module_exit(elousb_exit);
#include "core.h"
static u_int debug;
+u_int misdn_permitted_gid;
MODULE_AUTHOR("Karsten Keil");
MODULE_LICENSE("GPL");
module_param(debug, uint, S_IRUGO | S_IWUSR);
+module_param_named(gid, misdn_permitted_gid, uint, 0);
+MODULE_PARM_DESC(gid, "Unix group for accessing misdn socket (default 0)");
static u64 device_ids;
#define MAX_DEVICE_ID 63
extern struct mISDNdevice *get_mdevice(u_int);
extern int get_mdevice_count(void);
+extern u_int misdn_permitted_gid;
/* stack status flag */
#define mISDN_STACK_ACTION_MASK 0x0000ffff
{
struct sock *sk;
+ if(!capable(CAP_SYS_ADMIN) && (misdn_permitted_gid != current_gid())
+ && (!in_group_p(misdn_permitted_gid)))
+ return -EPERM;
+
if (sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
case IMSETDEVNAME:
{
struct mISDN_devrename dn;
+ if(!capable(CAP_SYS_ADMIN)
+ && (misdn_permitted_gid != current_gid())
+ && (!in_group_p(misdn_permitted_gid)))
+ return -EPERM;
if (copy_from_user(&dn, (void __user *)arg,
sizeof(dn))) {
err = -EFAULT;
}
static struct led_trigger defon_led_trigger = {
- .name = "default-on",
+ .name = "default::on",
.activate = defon_trig_activate,
};
config ADB
bool "Apple Desktop Bus (ADB) support"
- depends on MAC || (PPC_PMAC && PPC32)
+ depends on MAC || PPC_PMAC
help
Apple Desktop Bus (ADB) support is for support of devices which
are connected to an ADB port. ADB devices tend to have 4 pins.
if (!machine_is(chrp) && !machine_is(powermac))
return 0;
#endif
+#ifdef CONFIG_PPC64
+ if (!machine_is(powermac))
+ return 0;
+#endif
#ifdef CONFIG_MAC
if (!MACH_IS_MAC)
return 0;
static int __init adbhid_init(void)
{
-#ifndef CONFIG_MAC
+#ifdef CONFIG_PPC32
if (!machine_is(chrp) && !machine_is(powermac))
return 0;
#endif
+#ifdef CONFIG_PPC64
+ if (!machine_is(powermac))
+ return 0;
+#endif
led_request.complete = 1;
config DM_MIRROR
tristate "Mirror target"
depends on BLK_DEV_DM
+ select DM_REGION_HASH_LOG
---help---
Allow volume managers to mirror logical volumes, also
needed for live data migration tools such as 'pvmove'.
If unsure, say N.
+config DM_REGION_HASH_LOG
+ tristate
+ default n
+
+config DM_RAID45
+ tristate "RAID 4/5 target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM && EXPERIMENTAL
+ select ASYNC_XOR
+ select DM_REGION_HASH_LOG
+ ---help---
+ A target that supports RAID4 and RAID5 mappings.
+
+ If unsure, say N.
+
config DM_UEVENT
bool "DM uevents"
depends on BLK_DEV_DM
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
-obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
+obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o dm-least-pending.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_PERSISTENT_DATA) += persistent-data/
-obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
+obj-$(CONFIG_DM_MIRROR) += dm-mirror.o
+obj-$(CONFIG_DM_REGION_HASH_LOG) += dm-log.o dm-region-hash.o
obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
+obj-$(CONFIG_DM_RAID45) += dm-raid45.o dm-memcache.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
--- /dev/null
+/*
+ * (C) Copyright 2008 Hewlett-Packard Development Company, L.P
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-path-selector.h"
+
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "multipath least-pending"
+
+/*-----------------------------------------------------------------
+* Path-handling code, paths are held in lists
+*---------------------------------------------------------------*/
+struct path_info {
+ struct list_head list;
+ struct dm_path *path;
+ unsigned repeat_count;
+ atomic_t io_count;
+};
+
+static void free_paths(struct list_head *paths)
+{
+ struct path_info *pi, *next;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ list_del(&pi->list);
+ kfree(pi);
+ }
+}
+
+/*-----------------------------------------------------------------
+ * Least-pending selector
+ *---------------------------------------------------------------*/
+
+#define LPP_MIN_IO 1
+
+struct selector {
+ struct list_head valid_paths;
+ struct list_head invalid_paths;
+};
+
+static struct selector *alloc_selector(void)
+{
+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->invalid_paths);
+ }
+
+ return s;
+}
+
+static int lpp_create(struct path_selector *ps, unsigned argc, char **argv)
+{
+ struct selector *s;
+
+ s = alloc_selector();
+ if (!s)
+ return -ENOMEM;
+
+ ps->context = s;
+ return 0;
+}
+
+static void lpp_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+
+ free_paths(&s->valid_paths);
+ free_paths(&s->invalid_paths);
+ kfree(s);
+ ps->context = NULL;
+}
+
+static int lpp_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned int maxlen)
+{
+ struct path_info *pi;
+ int sz = 0;
+
+ if (!path)
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("1 ");
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("0 ");
+ break;
+ }
+ else {
+ pi = path->pscontext;
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%u:%u ", pi->repeat_count,
+ atomic_read(&pi->io_count));
+ break;
+ case STATUSTYPE_TABLE:
+ break;
+ }
+ }
+
+ return sz;
+}
+
+/*
+ * Called during initialisation to register each path with an
+ * optional repeat_count.
+ */
+static int lpp_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ unsigned repeat_count = LPP_MIN_IO;
+
+ if (argc > 1) {
+ *error = "least-pending ps: incorrect number of arguments";
+ return -EINVAL;
+ }
+
+ /* First path argument is number of I/Os before switching path */
+ if ((argc == 1) && (sscanf(argv[0], "%u", &repeat_count) != 1)) {
+ *error = "least-pending ps: invalid repeat count";
+ return -EINVAL;
+ }
+
+ /* allocate the path */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "least-pending ps: Error allocating path context";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ pi->repeat_count = repeat_count;
+ atomic_set(&pi->io_count, 0);
+
+ path->pscontext = pi;
+
+ list_add(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+static void lpp_fail_path(struct path_selector *ps, struct dm_path *p)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = p->pscontext;
+
+ if (!pi)
+ return;
+
+ atomic_set(&pi->io_count, 0);
+
+ list_move(&pi->list, &s->invalid_paths);
+}
+
+static int lpp_reinstate_path(struct path_selector *ps, struct dm_path *p)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = p->pscontext;
+
+ if (!pi)
+ return 1;
+
+ list_move(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+static struct dm_path *lpp_select_path(struct path_selector *ps,
+ unsigned *repeat_count,
+ size_t nr_bytes)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi, *next, *least_io_path = NULL;
+ struct list_head *paths;
+
+ if (list_empty(&s->valid_paths))
+ return NULL;
+
+ paths = &s->valid_paths;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ if (!least_io_path || atomic_read(&least_io_path->io_count) < atomic_read(&pi->io_count))
+ least_io_path = pi;
+ if (!atomic_read(&least_io_path->io_count))
+ break;
+ }
+
+ if (!least_io_path)
+ return NULL;
+
+ atomic_inc(&least_io_path->io_count);
+ *repeat_count = least_io_path->repeat_count;
+
+ return least_io_path->path;
+}
+
+static int lpp_end_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = NULL;
+
+ pi = path->pscontext;
+ if (!pi)
+ return 1;
+
+ atomic_dec(&pi->io_count);
+
+ return 0;
+}
+
+static struct path_selector_type lpp_ps = {
+ .name = "least-pending",
+ .module = THIS_MODULE,
+ .table_args = 1,
+ .info_args = 0,
+ .create = lpp_create,
+ .destroy = lpp_destroy,
+ .status = lpp_status,
+ .add_path = lpp_add_path,
+ .fail_path = lpp_fail_path,
+ .reinstate_path = lpp_reinstate_path,
+ .select_path = lpp_select_path,
+ .end_io = lpp_end_io,
+};
+
+static int __init dm_lpp_init(void)
+{
+ int r = dm_register_path_selector(&lpp_ps);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ DMINFO("version 1.0.0 loaded");
+
+ return r;
+}
+
+static void __exit dm_lpp_exit(void)
+{
+ int r = dm_unregister_path_selector(&lpp_ps);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+}
+
+module_init(dm_lpp_init);
+module_exit(dm_lpp_exit);
+
+MODULE_DESCRIPTION(DM_NAME " least-pending multipath path selector");
+MODULE_AUTHOR("Sakshi Chaitanya Veni <vsakshi@hp.com>");
+MODULE_LICENSE("GPL");
+
--- /dev/null
+/*
+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
+ *
+ * Module Author: Heinz Mauelshagen <heinzm@redhat.com>
+ *
+ * Device-mapper memory object handling:
+ *
+ * o allocate/free total_pages in a per client page pool.
+ *
+ * o allocate/free memory objects with chunks (1..n) of
+ * pages_per_chunk pages hanging off.
+ *
+ * This file is released under the GPL.
+ */
+
+#define DM_MEM_CACHE_VERSION "0.2"
+
+#include "dm.h"
+#include "dm-memcache.h"
+#include <linux/dm-io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+struct dm_mem_cache_client {
+ spinlock_t lock;
+ mempool_t *objs_pool;
+ struct page_list *free_list;
+ unsigned objects;
+ unsigned chunks;
+ unsigned pages_per_chunk;
+ unsigned free_pages;
+ unsigned total_pages;
+};
+
+/*
+ * Free pages and page_list elements of client.
+ */
+static void free_cache_pages(struct page_list *list)
+{
+ while (list) {
+ struct page_list *pl = list;
+
+ list = pl->next;
+ BUG_ON(!pl->page);
+ __free_page(pl->page);
+ kfree(pl);
+ }
+}
+
+/*
+ * Alloc number of pages and page_list elements as required by client.
+ */
+static struct page_list *alloc_cache_pages(unsigned pages)
+{
+ struct page_list *pl, *ret = NULL;
+ struct page *page;
+
+ while (pages--) {
+ page = alloc_page(GFP_NOIO);
+ if (!page)
+ goto err;
+
+ pl = kmalloc(sizeof(*pl), GFP_NOIO);
+ if (!pl) {
+ __free_page(page);
+ goto err;
+ }
+
+ pl->page = page;
+ pl->next = ret;
+ ret = pl;
+ }
+
+ return ret;
+
+err:
+ free_cache_pages(ret);
+ return NULL;
+}
+
+/*
+ * Allocate page_list elements from the pool to chunks of the memory object.
+ */
+static void alloc_chunks(struct dm_mem_cache_client *cl,
+ struct dm_mem_cache_object *obj)
+{
+ unsigned chunks = cl->chunks;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ local_irq_disable();
+ while (chunks--) {
+ unsigned p = cl->pages_per_chunk;
+
+ obj[chunks].pl = NULL;
+
+ while (p--) {
+ struct page_list *pl;
+
+ /* Take next element from free list */
+ spin_lock(&cl->lock);
+ pl = cl->free_list;
+ BUG_ON(!pl);
+ cl->free_list = pl->next;
+ spin_unlock(&cl->lock);
+
+ pl->next = obj[chunks].pl;
+ obj[chunks].pl = pl;
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Free page_list elements putting them back onto free list
+ */
+static void free_chunks(struct dm_mem_cache_client *cl,
+ struct dm_mem_cache_object *obj)
+{
+ unsigned chunks = cl->chunks;
+ unsigned long flags;
+ struct page_list *next, *pl;
+
+ local_irq_save(flags);
+ local_irq_disable();
+ while (chunks--) {
+ for (pl = obj[chunks].pl; pl; pl = next) {
+ next = pl->next;
+
+ spin_lock(&cl->lock);
+ pl->next = cl->free_list;
+ cl->free_list = pl;
+ cl->free_pages++;
+ spin_unlock(&cl->lock);
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Create/destroy dm memory cache client resources.
+ */
+struct dm_mem_cache_client *
+dm_mem_cache_client_create(unsigned objects, unsigned chunks,
+ unsigned pages_per_chunk)
+{
+ unsigned total_pages = objects * chunks * pages_per_chunk;
+ struct dm_mem_cache_client *client;
+
+ BUG_ON(!total_pages);
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ client->objs_pool = mempool_create_kmalloc_pool(objects,
+ chunks * sizeof(struct dm_mem_cache_object));
+ if (!client->objs_pool)
+ goto err;
+
+ client->free_list = alloc_cache_pages(total_pages);
+ if (!client->free_list)
+ goto err1;
+
+ spin_lock_init(&client->lock);
+ client->objects = objects;
+ client->chunks = chunks;
+ client->pages_per_chunk = pages_per_chunk;
+ client->free_pages = client->total_pages = total_pages;
+ return client;
+
+err1:
+ mempool_destroy(client->objs_pool);
+err:
+ kfree(client);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(dm_mem_cache_client_create);
+
+void dm_mem_cache_client_destroy(struct dm_mem_cache_client *cl)
+{
+ BUG_ON(cl->free_pages != cl->total_pages);
+ free_cache_pages(cl->free_list);
+ mempool_destroy(cl->objs_pool);
+ kfree(cl);
+}
+EXPORT_SYMBOL(dm_mem_cache_client_destroy);
+
+/*
+ * Grow a clients cache by an amount of pages.
+ *
+ * Don't call from interrupt context!
+ */
+int dm_mem_cache_grow(struct dm_mem_cache_client *cl, unsigned objects)
+{
+ unsigned pages = objects * cl->chunks * cl->pages_per_chunk;
+ struct page_list *pl, *last;
+
+ BUG_ON(!pages);
+ pl = alloc_cache_pages(pages);
+ if (!pl)
+ return -ENOMEM;
+
+ last = pl;
+ while (last->next)
+ last = last->next;
+
+ spin_lock_irq(&cl->lock);
+ last->next = cl->free_list;
+ cl->free_list = pl;
+ cl->free_pages += pages;
+ cl->total_pages += pages;
+ cl->objects += objects;
+ spin_unlock_irq(&cl->lock);
+
+ mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO);
+ return 0;
+}
+EXPORT_SYMBOL(dm_mem_cache_grow);
+
+/* Shrink a clients cache by an amount of pages */
+int dm_mem_cache_shrink(struct dm_mem_cache_client *cl, unsigned objects)
+{
+ int r;
+ unsigned pages = objects * cl->chunks * cl->pages_per_chunk, p = pages;
+ unsigned long flags;
+ struct page_list *last = NULL, *pl, *pos;
+
+ BUG_ON(!pages);
+
+ spin_lock_irqsave(&cl->lock, flags);
+ pl = pos = cl->free_list;
+ while (p-- && pos->next) {
+ last = pos;
+ pos = pos->next;
+ }
+
+ if (++p)
+ r = -ENOMEM;
+ else {
+ r = 0;
+ cl->free_list = pos;
+ cl->free_pages -= pages;
+ cl->total_pages -= pages;
+ cl->objects -= objects;
+ last->next = NULL;
+ }
+ spin_unlock_irqrestore(&cl->lock, flags);
+
+ if (!r) {
+ free_cache_pages(pl);
+ mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO);
+ }
+
+ return r;
+}
+EXPORT_SYMBOL(dm_mem_cache_shrink);
+
+/*
+ * Allocate/free a memory object
+ *
+ * Can be called from interrupt context
+ */
+struct dm_mem_cache_object *dm_mem_cache_alloc(struct dm_mem_cache_client *cl)
+{
+ int r = 0;
+ unsigned pages = cl->chunks * cl->pages_per_chunk;
+ unsigned long flags;
+ struct dm_mem_cache_object *obj;
+
+ obj = mempool_alloc(cl->objs_pool, GFP_NOIO);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_irqsave(&cl->lock, flags);
+ if (pages > cl->free_pages)
+ r = -ENOMEM;
+ else
+ cl->free_pages -= pages;
+ spin_unlock_irqrestore(&cl->lock, flags);
+
+ if (r) {
+ mempool_free(obj, cl->objs_pool);
+ return ERR_PTR(r);
+ }
+
+ alloc_chunks(cl, obj);
+ return obj;
+}
+EXPORT_SYMBOL(dm_mem_cache_alloc);
+
+void dm_mem_cache_free(struct dm_mem_cache_client *cl,
+ struct dm_mem_cache_object *obj)
+{
+ free_chunks(cl, obj);
+ mempool_free(obj, cl->objs_pool);
+}
+EXPORT_SYMBOL(dm_mem_cache_free);
+
+MODULE_DESCRIPTION(DM_NAME " dm memory cache");
+MODULE_AUTHOR("Heinz Mauelshagen <heinzm@redhat.com>");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
+ *
+ * Module Author: Heinz Mauelshagen <Mauelshagen@RedHat.com>
+ *
+ * Device-mapper memory object handling:
+ *
+ * o allocate/free total_pages in a per client page pool.
+ *
+ * o allocate/free memory objects with chunks (1..n) of
+ * pages_per_chunk pages hanging off.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _DM_MEM_CACHE_H
+#define _DM_MEM_CACHE_H
+
+#define DM_MEM_CACHE_H_VERSION "0.1"
+
+#include "dm.h"
+#include <linux/dm-io.h>
+
+static inline struct page_list *pl_elem(struct page_list *pl, unsigned p)
+{
+ while (pl && p--)
+ pl = pl->next;
+
+ return pl;
+}
+
+struct dm_mem_cache_object {
+ struct page_list *pl; /* Dynamically allocated array */
+ void *private; /* Caller context reference */
+};
+
+struct dm_mem_cache_client;
+
+/*
+ * Create/destroy dm memory cache client resources.
+ *
+ * On creation, a number of @objects with @chunks of
+ * @pages_per_chunk pages will be allocated.
+ */
+struct dm_mem_cache_client *
+dm_mem_cache_client_create(unsigned objects, unsigned chunks,
+ unsigned pages_per_chunk);
+void dm_mem_cache_client_destroy(struct dm_mem_cache_client *client);
+
+/*
+ * Grow/shrink a dm memory cache client resources
+ * by @objetcs amount of objects.
+ */
+int dm_mem_cache_grow(struct dm_mem_cache_client *client, unsigned objects);
+int dm_mem_cache_shrink(struct dm_mem_cache_client *client, unsigned objects);
+
+/*
+ * Allocate/free a memory object
+ *
+ * On allocation one object with an amount of chunks and
+ * an amount of pages per chunk will be returned on success.
+ */
+struct dm_mem_cache_object *
+dm_mem_cache_alloc(struct dm_mem_cache_client *client);
+void dm_mem_cache_free(struct dm_mem_cache_client *client,
+ struct dm_mem_cache_object *object);
+
+#endif
struct list_head pgpaths;
};
+#define FEATURE_NO_PARTITIONS 1
+
/* Multipath context */
struct multipath {
struct list_head list;
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
+ unsigned features; /* Additional selected features */
struct work_struct process_queued_ios;
struct list_head queued_ios;
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
struct pgpath *pgpath, *tmp;
- struct multipath *m = ti->private;
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
list_del(&pgpath->list);
- if (m->hw_handler_name)
- scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
free_pgpath(pgpath);
}
m->current_pgpath = path_to_pgpath(path);
+ if (!m->current_pgpath->path.dev) {
+ m->current_pgpath = NULL;
+ return -ENODEV;
+ }
+
if (m->current_pg != pg)
__switch_pg(m, m->current_pgpath);
{
int r;
struct pgpath *p;
+ char *path;
struct multipath *m = ti->private;
/* we need at least a path arg */
if (!p)
return ERR_PTR(-ENOMEM);
- r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
+ path = dm_shift_arg(as);
+ r = dm_get_device(ti, path, dm_table_get_mode(ti->table),
&p->path.dev);
if (r) {
- ti->error = "error getting device";
- goto bad;
+ unsigned major, minor;
+
+ /* Try to add a failed device */
+ if (r == -ENXIO && sscanf(path, "%u:%u", &major, &minor) == 2) {
+ dev_t dev;
+
+ /* Extract the major/minor numbers */
+ dev = MKDEV(major, minor);
+ if (MAJOR(dev) != major || MINOR(dev) != minor) {
+ /* Nice try, didn't work */
+ DMWARN("Invalid device path %s", path);
+ ti->error = "error converting devnum";
+ goto bad;
+ }
+ DMWARN("adding disabled device %d:%d", major, minor);
+ p->path.dev = NULL;
+ format_dev_t(p->path.pdev, dev);
+ p->is_active = 0;
+ } else {
+ ti->error = "error getting device";
+ goto bad;
+ }
+ } else {
+ memcpy(p->path.pdev, p->path.dev->name, 16);
}
- if (m->hw_handler_name) {
+ if (p->path.dev) {
struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
- r = scsi_dh_attach(q, m->hw_handler_name);
- if (r == -EBUSY) {
- /*
- * Already attached to different hw_handler,
- * try to reattach with correct one.
- */
- scsi_dh_detach(q);
+ if (m->hw_handler_name) {
r = scsi_dh_attach(q, m->hw_handler_name);
- }
-
- if (r < 0) {
- ti->error = "error attaching hardware handler";
- dm_put_device(ti, p->path.dev);
- goto bad;
+ if (r == -EBUSY) {
+ /*
+ * Already attached to different hw_handler,
+ * try to reattach with correct one.
+ */
+ scsi_dh_detach(q);
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ }
+ if (r < 0) {
+ ti->error = "error attaching hardware handler";
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+ }
+ } else {
+ /* Play safe and detach hardware handler */
+ scsi_dh_detach(q);
}
if (m->hw_handler_params) {
goto bad;
}
+ if (!p->is_active) {
+ ps->type->fail_path(ps, &p->path);
+ p->fail_count++;
+ m->nr_valid_paths--;
+ }
return p;
bad:
continue;
}
+ if (!strcasecmp(arg_name, "no_partitions")) {
+ m->features |= FEATURE_NO_PARTITIONS;
+ continue;
+ }
if (!strcasecmp(arg_name, "pg_init_retries") &&
(argc >= 1)) {
r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
if (!pgpath->is_active)
goto out;
- DMWARN("Failing path %s.", pgpath->path.dev->name);
+ DMWARN("Failing path %s.", pgpath->path.pdev);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
pgpath->is_active = 0;
m->current_pgpath = NULL;
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.pdev, m->nr_valid_paths);
schedule_work(&m->trigger_event);
if (pgpath->is_active)
goto out;
+ if (!pgpath->path.dev) {
+ DMWARN("Cannot reinstate disabled path %s", pgpath->path.pdev);
+ r = -ENODEV;
+ goto out;
+ }
+
if (!pgpath->pg->ps.type->reinstate_path) {
DMWARN("Reinstate path not supported by path selector %s",
pgpath->pg->ps.type->name);
}
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.pdev, m->nr_valid_paths);
schedule_work(&m->trigger_event);
struct pgpath *pgpath;
struct priority_group *pg;
+ if (!dev)
+ return 0;
+
list_for_each_entry(pg, &m->priority_groups, list) {
list_for_each_entry(pgpath, &pg->pgpaths, list) {
if (pgpath->path.dev == dev)
errors = 0;
break;
}
- DMERR("Could not failover the device: Handler scsi_dh_%s "
- "Error %d.", m->hw_handler_name, errors);
+ DMERR("Count not failover device %s: Handler scsi_dh_%s "
+ "was not loaded.", pgpath->path.dev->name,
+ m->hw_handler_name);
/*
* Fail path for now, so we do not ping pong
*/
*/
bypass_pg(m, pg, 1);
break;
+ case SCSI_DH_DEV_OFFLINED:
+ DMWARN("Device %s offlined.", pgpath->path.dev->name);
+ errors = 0;
+ break;
case SCSI_DH_RETRY:
/* Wait before retrying. */
delay_retry = 1;
spin_lock_irqsave(&m->lock, flags);
if (errors) {
if (pgpath == m->current_pgpath) {
- DMERR("Could not failover device. Error %d.", errors);
+ DMERR("Could not failover device %s, error %d.",
+ pgpath->path.dev->name, errors);
m->current_pgpath = NULL;
m->current_pg = NULL;
}
struct pgpath *pgpath =
container_of(work, struct pgpath, activate_path.work);
- scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
- pg_init_done, pgpath);
+ if (pgpath->path.dev)
+ scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
+ pg_init_done, pgpath);
}
/*
else {
DMEMIT("%u ", m->queue_if_no_path +
(m->pg_init_retries > 0) * 2 +
- (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
+ (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
+ (m->features & FEATURE_NO_PARTITIONS));
if (m->queue_if_no_path)
DMEMIT("queue_if_no_path ");
if (m->pg_init_retries)
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
+ if (m->features & FEATURE_NO_PARTITIONS)
+ DMEMIT("no_partitions ");
if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
}
pg->ps.type->info_args);
list_for_each_entry(p, &pg->pgpaths, list) {
- DMEMIT("%s %s %u ", p->path.dev->name,
+ DMEMIT("%s %s %u ", p->path.pdev,
p->is_active ? "A" : "F",
p->fail_count);
if (pg->ps.type->status)
pg->ps.type->table_args);
list_for_each_entry(p, &pg->pgpaths, list) {
- DMEMIT("%s ", p->path.dev->name);
+ DMEMIT("%s ", p->path.pdev);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
&p->path, type, result + sz,
if (!m->current_pgpath)
__choose_pgpath(m, 0);
- if (m->current_pgpath) {
+ if (m->current_pgpath && m->current_pgpath->path.dev) {
bdev = m->current_pgpath->path.dev->bdev;
mode = m->current_pgpath->path.dev->mode;
}
struct dm_dev;
struct dm_path {
+ char pdev[16]; /* Requested physical device */
struct dm_dev *dev; /* Read-only */
void *pscontext; /* For path-selector use */
};
--- /dev/null
+/*
+ * Copyright (C) 2005-2009 Red Hat, Inc. All rights reserved.
+ *
+ * Module Author: Heinz Mauelshagen <heinzm@redhat.com>
+ *
+ * This file is released under the GPL.
+ *
+ *
+ * Linux 2.6 Device Mapper RAID4 and RAID5 target.
+ *
+ * Tested-by: Intel; Marcin.Labun@intel.com, krzysztof.wojcik@intel.com
+ *
+ *
+ * Supports the following ATARAID vendor solutions (and SNIA DDF):
+ *
+ * Adaptec HostRAID ASR
+ * SNIA DDF1
+ * Hiphpoint 37x
+ * Hiphpoint 45x
+ * Intel IMSM
+ * Jmicron ATARAID
+ * LSI Logic MegaRAID
+ * NVidia RAID
+ * Promise FastTrack
+ * Silicon Image Medley
+ * VIA Software RAID
+ *
+ * via the dmraid application.
+ *
+ *
+ * Features:
+ *
+ * o RAID4 with dedicated and selectable parity device
+ * o RAID5 with rotating parity (left+right, symmetric+asymmetric)
+ * o recovery of out of sync device for initial
+ * RAID set creation or after dead drive replacement
+ * o run time optimization of xor algorithm used to calculate parity
+ *
+ *
+ * Thanks to MD for:
+ * o the raid address calculation algorithm
+ * o the base of the biovec <-> page list copier.
+ *
+ *
+ * Uses region hash to keep track of how many writes are in flight to
+ * regions in order to use dirty log to keep state of regions to recover:
+ *
+ * o clean regions (those which are synchronized
+ * and don't have write io in flight)
+ * o dirty regions (those with write io in flight)
+ *
+ *
+ * On startup, any dirty regions are migrated to the
+ * 'nosync' state and are subject to recovery by the daemon.
+ *
+ * See raid_ctr() for table definition.
+ *
+ * ANALYZEME: recovery bandwidth
+ */
+
+static const char *version = "v0.2597k";
+
+#include "dm.h"
+#include "dm-memcache.h"
+#include "dm-raid45.h"
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/raid/xor.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/bio.h>
+#include <linux/dm-io.h>
+#include <linux/dm-dirty-log.h>
+#include <linux/dm-region-hash.h>
+
+
+/*
+ * Configurable parameters
+ */
+
+/* Minimum/maximum and default # of selectable stripes. */
+#define STRIPES_MIN 8
+#define STRIPES_MAX 16384
+#define STRIPES_DEFAULT 80
+
+/* Maximum and default chunk size in sectors if not set in constructor. */
+#define CHUNK_SIZE_MIN 8
+#define CHUNK_SIZE_MAX 16384
+#define CHUNK_SIZE_DEFAULT 64
+
+/* Default io size in sectors if not set in constructor. */
+#define IO_SIZE_MIN CHUNK_SIZE_MIN
+#define IO_SIZE_DEFAULT IO_SIZE_MIN
+
+/* Recover io size default in sectors. */
+#define RECOVER_IO_SIZE_MIN 64
+#define RECOVER_IO_SIZE_DEFAULT 256
+
+/* Default, minimum and maximum percentage of recover io bandwidth. */
+#define BANDWIDTH_DEFAULT 10
+#define BANDWIDTH_MIN 1
+#define BANDWIDTH_MAX 100
+
+/* # of parallel recovered regions */
+#define RECOVERY_STRIPES_MIN 1
+#define RECOVERY_STRIPES_MAX 64
+#define RECOVERY_STRIPES_DEFAULT RECOVERY_STRIPES_MIN
+/*
+ * END Configurable parameters
+ */
+
+#define TARGET "dm-raid45"
+#define DAEMON "kraid45d"
+#define DM_MSG_PREFIX TARGET
+
+#define SECTORS_PER_PAGE (PAGE_SIZE >> SECTOR_SHIFT)
+
+/* Amount/size for __xor(). */
+#define XOR_SIZE PAGE_SIZE
+
+/* Ticks to run xor_speed() test for. */
+#define XOR_SPEED_TICKS 5
+
+/* Check value in range. */
+#define range_ok(i, min, max) (i >= min && i <= max)
+
+/* Structure access macros. */
+/* Derive raid_set from stripe_cache pointer. */
+#define RS(x) container_of(x, struct raid_set, sc)
+
+/* Page reference. */
+#define PAGE(stripe, p) ((stripe)->obj[p].pl->page)
+
+/* Stripe chunk reference. */
+#define CHUNK(stripe, p) ((stripe)->chunk + p)
+
+/* Bio list reference. */
+#define BL(stripe, p, rw) (stripe->chunk[p].bl + rw)
+#define BL_CHUNK(chunk, rw) (chunk->bl + rw)
+
+/* Page list reference. */
+#define PL(stripe, p) (stripe->obj[p].pl)
+/* END: structure access macros. */
+
+/* Factor out to dm-bio-list.h */
+static inline void bio_list_push(struct bio_list *bl, struct bio *bio)
+{
+ bio->bi_next = bl->head;
+ bl->head = bio;
+
+ if (!bl->tail)
+ bl->tail = bio;
+}
+
+/* Factor out to dm.h */
+#define TI_ERR_RET(str, ret) \
+ do { ti->error = str; return ret; } while (0);
+#define TI_ERR(str) TI_ERR_RET(str, -EINVAL)
+
+/* Macro to define access IO flags access inline functions. */
+#define BITOPS(name, what, var, flag) \
+static inline int TestClear ## name ## what(struct var *v) \
+{ return test_and_clear_bit(flag, &v->io.flags); } \
+static inline int TestSet ## name ## what(struct var *v) \
+{ return test_and_set_bit(flag, &v->io.flags); } \
+static inline void Clear ## name ## what(struct var *v) \
+{ clear_bit(flag, &v->io.flags); } \
+static inline void Set ## name ## what(struct var *v) \
+{ set_bit(flag, &v->io.flags); } \
+static inline int name ## what(struct var *v) \
+{ return test_bit(flag, &v->io.flags); }
+
+/*-----------------------------------------------------------------
+ * Stripe cache
+ *
+ * Cache for all reads and writes to raid sets (operational or degraded)
+ *
+ * We need to run all data to and from a RAID set through this cache,
+ * because parity chunks need to get calculated from data chunks
+ * or, in the degraded/resynchronization case, missing chunks need
+ * to be reconstructed using the other chunks of the stripe.
+ *---------------------------------------------------------------*/
+/* Unique kmem cache name suffix # counter. */
+static atomic_t _stripe_sc_nr = ATOMIC_INIT(-1); /* kmem cache # counter. */
+
+/* A chunk within a stripe (holds bios hanging off). */
+/* IO status flags for chunks of a stripe. */
+enum chunk_flags {
+ CHUNK_DIRTY, /* Pages of chunk dirty; need writing. */
+ CHUNK_ERROR, /* IO error on any chunk page. */
+ CHUNK_IO, /* Allow/prohibit IO on chunk pages. */
+ CHUNK_LOCKED, /* Chunk pages locked during IO. */
+ CHUNK_MUST_IO, /* Chunk must io. */
+ CHUNK_UNLOCK, /* Enforce chunk unlock. */
+ CHUNK_UPTODATE, /* Chunk pages are uptodate. */
+};
+
+enum bl_type {
+ WRITE_QUEUED = WRITE + 1,
+ WRITE_MERGED,
+ NR_BL_TYPES, /* Must be last one! */
+};
+struct stripe_chunk {
+ atomic_t cnt; /* Reference count. */
+ struct stripe *stripe; /* Backpointer to stripe for endio(). */
+ /* Bio lists for reads, writes, and writes merged. */
+ struct bio_list bl[NR_BL_TYPES];
+ struct {
+ unsigned long flags; /* IO status flags. */
+ } io;
+};
+
+/* Define chunk bit operations. */
+BITOPS(Chunk, Dirty, stripe_chunk, CHUNK_DIRTY)
+BITOPS(Chunk, Error, stripe_chunk, CHUNK_ERROR)
+BITOPS(Chunk, Io, stripe_chunk, CHUNK_IO)
+BITOPS(Chunk, Locked, stripe_chunk, CHUNK_LOCKED)
+BITOPS(Chunk, MustIo, stripe_chunk, CHUNK_MUST_IO)
+BITOPS(Chunk, Unlock, stripe_chunk, CHUNK_UNLOCK)
+BITOPS(Chunk, Uptodate, stripe_chunk, CHUNK_UPTODATE)
+
+/*
+ * Stripe linked list indexes. Keep order, because the stripe
+ * and the stripe cache rely on the first 3!
+ */
+enum list_types {
+ LIST_FLUSH, /* Stripes to flush for io. */
+ LIST_ENDIO, /* Stripes to endio. */
+ LIST_LRU, /* Least recently used stripes. */
+ SC_NR_LISTS, /* # of lists in stripe cache. */
+ LIST_HASH = SC_NR_LISTS, /* Hashed stripes. */
+ LIST_RECOVER = LIST_HASH, /* For recovery type stripes only. */
+ STRIPE_NR_LISTS,/* To size array in struct stripe. */
+};
+
+/* Adressing region recovery. */
+struct recover_addr {
+ struct dm_region *reg; /* Actual region to recover. */
+ sector_t pos; /* Position within region to recover. */
+ sector_t end; /* End of region to recover. */
+};
+
+/* A stripe: the io object to handle all reads and writes to a RAID set. */
+struct stripe {
+ atomic_t cnt; /* Reference count. */
+ struct stripe_cache *sc; /* Backpointer to stripe cache. */
+
+ /*
+ * 4 linked lists:
+ * o io list to flush io
+ * o endio list
+ * o LRU list to put stripes w/o reference count on
+ * o stripe cache hash
+ */
+ struct list_head lists[STRIPE_NR_LISTS];
+
+ sector_t key; /* Hash key. */
+ region_t region; /* Region stripe is mapped to. */
+
+ struct {
+ unsigned long flags; /* Stripe state flags (see below). */
+
+ /*
+ * Pending ios in flight:
+ *
+ * used to control move of stripe to endio list
+ */
+ atomic_t pending;
+
+ /* Sectors to read and write for multi page stripe sets. */
+ unsigned size;
+ } io;
+
+ /* Address region recovery. */
+ struct recover_addr *recover;
+
+ /* Lock on stripe (Future: for clustering). */
+ void *lock;
+
+ struct {
+ unsigned short parity; /* Parity chunk index. */
+ short recover; /* Recovery chunk index. */
+ } idx;
+
+ /*
+ * This stripe's memory cache object (dm-mem-cache);
+ * i.e. the io chunk pages.
+ */
+ struct dm_mem_cache_object *obj;
+
+ /* Array of stripe sets (dynamically allocated). */
+ struct stripe_chunk chunk[0];
+};
+
+/* States stripes can be in (flags field). */
+enum stripe_states {
+ STRIPE_ERROR, /* io error on stripe. */
+ STRIPE_MERGED, /* Writes got merged to be written. */
+ STRIPE_RBW, /* Read-before-write stripe. */
+ STRIPE_RECONSTRUCT, /* Reconstruct of a missing chunk required. */
+ STRIPE_RECONSTRUCTED, /* Reconstructed of a missing chunk. */
+ STRIPE_RECOVER, /* Stripe used for RAID set recovery. */
+};
+
+/* Define stripe bit operations. */
+BITOPS(Stripe, Error, stripe, STRIPE_ERROR)
+BITOPS(Stripe, Merged, stripe, STRIPE_MERGED)
+BITOPS(Stripe, RBW, stripe, STRIPE_RBW)
+BITOPS(Stripe, Reconstruct, stripe, STRIPE_RECONSTRUCT)
+BITOPS(Stripe, Reconstructed, stripe, STRIPE_RECONSTRUCTED)
+BITOPS(Stripe, Recover, stripe, STRIPE_RECOVER)
+
+/* A stripe hash. */
+struct stripe_hash {
+ struct list_head *hash;
+ unsigned buckets;
+ unsigned mask;
+ unsigned prime;
+ unsigned shift;
+};
+
+enum sc_lock_types {
+ LOCK_ENDIO, /* Protect endio list. */
+ NR_LOCKS, /* To size array in struct stripe_cache. */
+};
+
+/* A stripe cache. */
+struct stripe_cache {
+ /* Stripe hash. */
+ struct stripe_hash hash;
+
+ spinlock_t locks[NR_LOCKS]; /* Locks to protect lists. */
+
+ /* Stripes with io to flush, stripes to endio and LRU lists. */
+ struct list_head lists[SC_NR_LISTS];
+
+ /* Slab cache to allocate stripes from. */
+ struct {
+ struct kmem_cache *cache; /* Cache itself. */
+ char name[32]; /* Unique name. */
+ } kc;
+
+ struct dm_io_client *dm_io_client; /* dm-io client resource context. */
+
+ /* dm-mem-cache client resource context. */
+ struct dm_mem_cache_client *mem_cache_client;
+
+ int stripes_parm; /* # stripes parameter from constructor. */
+ atomic_t stripes; /* actual # of stripes in cache. */
+ atomic_t stripes_to_set; /* # of stripes to resize cache to. */
+ atomic_t stripes_last; /* last # of stripes in cache. */
+ atomic_t active_stripes; /* actual # of active stripes in cache. */
+
+ /* REMOVEME: */
+ atomic_t active_stripes_max; /* actual # of active stripes in cache. */
+};
+
+/* Flag specs for raid_dev */ ;
+enum raid_dev_flags {
+ DEV_FAILED, /* Device failed. */
+ DEV_IO_QUEUED, /* Io got queued to device. */
+};
+
+/* The raid device in a set. */
+struct raid_dev {
+ struct dm_dev *dev;
+ sector_t start; /* Offset to map to. */
+ struct { /* Using struct to be able to BITOPS(). */
+ unsigned long flags; /* raid_dev_flags. */
+ } io;
+};
+
+BITOPS(Dev, Failed, raid_dev, DEV_FAILED)
+BITOPS(Dev, IoQueued, raid_dev, DEV_IO_QUEUED)
+
+/* Flags spec for raid_set. */
+enum raid_set_flags {
+ RS_CHECK_OVERWRITE, /* Check for chunk overwrites. */
+ RS_DEAD, /* RAID set inoperational. */
+ RS_DEAD_ENDIO_MESSAGE, /* RAID set dead endio one-off message. */
+ RS_DEGRADED, /* Io errors on RAID device. */
+ RS_DEVEL_STATS, /* REMOVEME: display status information. */
+ RS_ENFORCE_PARITY_CREATION,/* Enforce parity creation. */
+ RS_PROHIBIT_WRITES, /* Prohibit writes on device failure. */
+ RS_RECOVER, /* Do recovery. */
+ RS_RECOVERY_BANDWIDTH, /* Allow recovery bandwidth (delayed bios). */
+ RS_SC_BUSY, /* Stripe cache busy -> send an event. */
+ RS_SUSPEND, /* Suspend RAID set. */
+};
+
+/* REMOVEME: devel stats counters. */
+enum stats_types {
+ S_BIOS_READ,
+ S_BIOS_ADDED_READ,
+ S_BIOS_ENDIO_READ,
+ S_BIOS_WRITE,
+ S_BIOS_ADDED_WRITE,
+ S_BIOS_ENDIO_WRITE,
+ S_CAN_MERGE,
+ S_CANT_MERGE,
+ S_CONGESTED,
+ S_DM_IO_READ,
+ S_DM_IO_WRITE,
+ S_BANDWIDTH,
+ S_BARRIER,
+ S_BIO_COPY_PL_NEXT,
+ S_DEGRADED,
+ S_DELAYED_BIOS,
+ S_FLUSHS,
+ S_HITS_1ST,
+ S_IOS_POST,
+ S_INSCACHE,
+ S_MAX_LOOKUP,
+ S_CHUNK_LOCKED,
+ S_NO_BANDWIDTH,
+ S_NOT_CONGESTED,
+ S_NO_RW,
+ S_NOSYNC,
+ S_OVERWRITE,
+ S_PROHIBITCHUNKIO,
+ S_RECONSTRUCT_EI,
+ S_RECONSTRUCT_DEV,
+ S_RECONSTRUCT_SET,
+ S_RECONSTRUCTED,
+ S_REQUEUE,
+ S_STRIPE_ERROR,
+ S_SUM_DELAYED_BIOS,
+ S_XORS,
+ S_NR_STATS, /* # of stats counters. Must be last! */
+};
+
+/* Status type -> string mappings. */
+struct stats_map {
+ const enum stats_types type;
+ const char *str;
+};
+
+static struct stats_map stats_map[] = {
+ { S_BIOS_READ, "r=" },
+ { S_BIOS_ADDED_READ, "/" },
+ { S_BIOS_ENDIO_READ, "/" },
+ { S_BIOS_WRITE, " w=" },
+ { S_BIOS_ADDED_WRITE, "/" },
+ { S_BIOS_ENDIO_WRITE, "/" },
+ { S_DM_IO_READ, " rc=" },
+ { S_DM_IO_WRITE, " wc=" },
+ { S_BANDWIDTH, "\nbw=" },
+ { S_NO_BANDWIDTH, " no_bw=" },
+ { S_BARRIER, "\nbarrier=" },
+ { S_BIO_COPY_PL_NEXT, "\nbio_cp_next=" },
+ { S_CAN_MERGE, "\nmerge=" },
+ { S_CANT_MERGE, "/no_merge=" },
+ { S_CHUNK_LOCKED, "\nchunk_locked=" },
+ { S_CONGESTED, "\ncgst=" },
+ { S_NOT_CONGESTED, "/not_cgst=" },
+ { S_DEGRADED, "\ndegraded=" },
+ { S_DELAYED_BIOS, "\ndel_bios=" },
+ { S_SUM_DELAYED_BIOS, "/sum_del_bios=" },
+ { S_FLUSHS, "\nflushs=" },
+ { S_HITS_1ST, "\nhits_1st=" },
+ { S_IOS_POST, " ios_post=" },
+ { S_INSCACHE, " inscache=" },
+ { S_MAX_LOOKUP, " maxlookup=" },
+ { S_NO_RW, "\nno_rw=" },
+ { S_NOSYNC, " nosync=" },
+ { S_OVERWRITE, " ovr=" },
+ { S_PROHIBITCHUNKIO, " prhbt_io=" },
+ { S_RECONSTRUCT_EI, "\nrec_ei=" },
+ { S_RECONSTRUCT_DEV, " rec_dev=" },
+ { S_RECONSTRUCT_SET, " rec_set=" },
+ { S_RECONSTRUCTED, " rec=" },
+ { S_REQUEUE, " requeue=" },
+ { S_STRIPE_ERROR, " stripe_err=" },
+ { S_XORS, " xors=" },
+};
+
+/*
+ * A RAID set.
+ */
+#define dm_rh_client dm_region_hash
+enum count_type { IO_WORK = 0, IO_RECOVER, IO_NR_COUNT };
+typedef void (*xor_function_t)(unsigned count, unsigned long **data);
+struct raid_set {
+ struct dm_target *ti; /* Target pointer. */
+
+ struct {
+ unsigned long flags; /* State flags. */
+ struct mutex in_lock; /* Protects central input list below. */
+ struct mutex xor_lock; /* Protects xor algorithm set. */
+ struct bio_list in; /* Pending ios (central input list). */
+ struct bio_list work; /* ios work set. */
+ wait_queue_head_t suspendq; /* suspend synchronization. */
+ atomic_t in_process; /* counter of queued bios (suspendq). */
+ atomic_t in_process_max;/* counter of queued bios max. */
+
+ /* io work. */
+ struct workqueue_struct *wq;
+ struct delayed_work dws_do_raid; /* For main worker. */
+ struct work_struct ws_do_table_event; /* For event worker. */
+ } io;
+
+ /* Stripe locking abstraction. */
+ struct dm_raid45_locking_type *locking;
+
+ struct stripe_cache sc; /* Stripe cache for this set. */
+
+ /* Xor optimization. */
+ struct {
+ struct xor_func *f;
+ unsigned chunks;
+ unsigned speed;
+ } xor;
+
+ /* Recovery parameters. */
+ struct recover {
+ struct dm_dirty_log *dl; /* Dirty log. */
+ struct dm_rh_client *rh; /* Region hash. */
+
+ struct dm_io_client *dm_io_client; /* recovery dm-io client. */
+ /* dm-mem-cache client resource context for recovery stripes. */
+ struct dm_mem_cache_client *mem_cache_client;
+
+ struct list_head stripes; /* List of recovery stripes. */
+
+ region_t nr_regions;
+ region_t nr_regions_to_recover;
+ region_t nr_regions_recovered;
+ unsigned long start_jiffies;
+ unsigned long end_jiffies;
+
+ unsigned bandwidth; /* Recovery bandwidth [%]. */
+ unsigned bandwidth_work; /* Recovery bandwidth [factor]. */
+ unsigned bandwidth_parm; /* " constructor parm. */
+ unsigned io_size; /* recovery io size <= region size. */
+ unsigned io_size_parm; /* recovery io size ctr parameter. */
+ unsigned recovery; /* Recovery allowed/prohibited. */
+ unsigned recovery_stripes; /* # of parallel recovery stripes. */
+
+ /* recovery io throttling. */
+ atomic_t io_count[IO_NR_COUNT]; /* counter recover/regular io.*/
+ unsigned long last_jiffies;
+ } recover;
+
+ /* RAID set parameters. */
+ struct {
+ struct raid_type *raid_type; /* RAID type (eg, RAID4). */
+ unsigned raid_parms; /* # variable raid parameters. */
+
+ unsigned chunk_size; /* Sectors per chunk. */
+ unsigned chunk_size_parm;
+ unsigned chunk_shift; /* rsector chunk size shift. */
+
+ unsigned io_size; /* Sectors per io. */
+ unsigned io_size_parm;
+ unsigned io_mask; /* Mask for bio_copy_page_list(). */
+ unsigned io_inv_mask; /* Mask for raid_address(). */
+
+ sector_t sectors_per_dev; /* Sectors per device. */
+
+ atomic_t failed_devs; /* Amount of devices failed. */
+
+ /* Index of device to initialize. */
+ int dev_to_init;
+ int dev_to_init_parm;
+
+ /* Raid devices dynamically allocated. */
+ unsigned raid_devs; /* # of RAID devices below. */
+ unsigned data_devs; /* # of RAID data devices. */
+
+ int ei; /* index of failed RAID device. */
+
+ /* Index of dedicated parity device (i.e. RAID4). */
+ int pi;
+ int pi_parm; /* constructor parm for status output. */
+ } set;
+
+ /* REMOVEME: devel stats counters. */
+ atomic_t stats[S_NR_STATS];
+
+ /* Dynamically allocated temporary pointers for xor(). */
+ unsigned long **data;
+
+ /* Dynamically allocated RAID devices. Alignment? */
+ struct raid_dev dev[0];
+};
+
+/* Define RAID set bit operations. */
+BITOPS(RS, Bandwidth, raid_set, RS_RECOVERY_BANDWIDTH)
+BITOPS(RS, CheckOverwrite, raid_set, RS_CHECK_OVERWRITE)
+BITOPS(RS, Dead, raid_set, RS_DEAD)
+BITOPS(RS, DeadEndioMessage, raid_set, RS_DEAD_ENDIO_MESSAGE)
+BITOPS(RS, Degraded, raid_set, RS_DEGRADED)
+BITOPS(RS, DevelStats, raid_set, RS_DEVEL_STATS)
+BITOPS(RS, EnforceParityCreation, raid_set, RS_ENFORCE_PARITY_CREATION)
+BITOPS(RS, ProhibitWrites, raid_set, RS_PROHIBIT_WRITES)
+BITOPS(RS, Recover, raid_set, RS_RECOVER)
+BITOPS(RS, ScBusy, raid_set, RS_SC_BUSY)
+BITOPS(RS, Suspend, raid_set, RS_SUSPEND)
+#undef BITOPS
+
+/*-----------------------------------------------------------------
+ * Raid-4/5 set structures.
+ *---------------------------------------------------------------*/
+/* RAID level definitions. */
+enum raid_level {
+ raid4,
+ raid5,
+};
+
+/* Symmetric/Asymmetric, Left/Right parity rotating algorithms. */
+enum raid_algorithm {
+ none,
+ left_asym,
+ right_asym,
+ left_sym,
+ right_sym,
+};
+
+struct raid_type {
+ const char *name; /* RAID algorithm. */
+ const char *descr; /* Descriptor text for logging. */
+ const unsigned parity_devs; /* # of parity devices. */
+ const unsigned minimal_devs; /* minimal # of devices in set. */
+ const enum raid_level level; /* RAID level. */
+ const enum raid_algorithm algorithm; /* RAID algorithm. */
+};
+
+/* Supported raid types and properties. */
+static struct raid_type raid_types[] = {
+ {"raid4", "RAID4 (dedicated parity disk)", 1, 3, raid4, none},
+ {"raid5_la", "RAID5 (left asymmetric)", 1, 3, raid5, left_asym},
+ {"raid5_ra", "RAID5 (right asymmetric)", 1, 3, raid5, right_asym},
+ {"raid5_ls", "RAID5 (left symmetric)", 1, 3, raid5, left_sym},
+ {"raid5_rs", "RAID5 (right symmetric)", 1, 3, raid5, right_sym},
+};
+
+/* Address as calculated by raid_address(). */
+struct raid_address {
+ sector_t key; /* Hash key (address of stripe % chunk_size). */
+ unsigned di, pi; /* Data and parity disks index. */
+};
+
+/* REMOVEME: reset statistics counters. */
+static void stats_reset(struct raid_set *rs)
+{
+ unsigned s = S_NR_STATS;
+
+ while (s--)
+ atomic_set(rs->stats + s, 0);
+}
+
+/*----------------------------------------------------------------
+ * RAID set management routines.
+ *--------------------------------------------------------------*/
+/*
+ * Begin small helper functions.
+ */
+/* No need to be called from region hash indirectly at dm_rh_dec(). */
+static void wake_dummy(void *context) {}
+
+/* Return # of io reference. */
+static int io_ref(struct raid_set *rs)
+{
+ return atomic_read(&rs->io.in_process);
+}
+
+/* Get an io reference. */
+static void io_get(struct raid_set *rs)
+{
+ int p = atomic_inc_return(&rs->io.in_process);
+
+ if (p > atomic_read(&rs->io.in_process_max))
+ atomic_set(&rs->io.in_process_max, p); /* REMOVEME: max. */
+}
+
+/* Put the io reference and conditionally wake io waiters. */
+static void io_put(struct raid_set *rs)
+{
+ /* Intel: rebuild data corrupter? */
+ if (atomic_dec_and_test(&rs->io.in_process))
+ wake_up(&rs->io.suspendq);
+ else
+ BUG_ON(io_ref(rs) < 0);
+}
+
+/* Wait until all io has been processed. */
+static void wait_ios(struct raid_set *rs)
+{
+ wait_event(rs->io.suspendq, !io_ref(rs));
+}
+
+/* Queue (optionally delayed) io work. */
+static void wake_do_raid_delayed(struct raid_set *rs, unsigned long delay)
+{
+ queue_delayed_work(rs->io.wq, &rs->io.dws_do_raid, delay);
+}
+
+/* Queue io work immediately (called from region hash too). */
+static void wake_do_raid(void *context)
+{
+ struct raid_set *rs = context;
+
+ queue_work(rs->io.wq, &rs->io.dws_do_raid.work);
+}
+
+/* Calculate device sector offset. */
+static sector_t _sector(struct raid_set *rs, struct bio *bio)
+{
+ sector_t sector = bio->bi_sector;
+
+ sector_div(sector, rs->set.data_devs);
+ return sector;
+}
+
+/* Return # of active stripes in stripe cache. */
+static int sc_active(struct stripe_cache *sc)
+{
+ return atomic_read(&sc->active_stripes);
+}
+
+/* Stripe cache busy indicator. */
+static int sc_busy(struct raid_set *rs)
+{
+ return sc_active(&rs->sc) >
+ atomic_read(&rs->sc.stripes) - (STRIPES_MIN / 2);
+}
+
+/* Set chunks states. */
+enum chunk_dirty_type { CLEAN, DIRTY, ERROR };
+static void chunk_set(struct stripe_chunk *chunk, enum chunk_dirty_type type)
+{
+ switch (type) {
+ case CLEAN:
+ ClearChunkDirty(chunk);
+ break;
+ case DIRTY:
+ SetChunkDirty(chunk);
+ break;
+ case ERROR:
+ SetChunkError(chunk);
+ SetStripeError(chunk->stripe);
+ return;
+ default:
+ BUG();
+ }
+
+ SetChunkUptodate(chunk);
+ SetChunkIo(chunk);
+ ClearChunkError(chunk);
+}
+
+/* Return region state for a sector. */
+static int region_state(struct raid_set *rs, sector_t sector,
+ enum dm_rh_region_states state)
+{
+ struct dm_rh_client *rh = rs->recover.rh;
+ region_t region = dm_rh_sector_to_region(rh, sector);
+
+ return !!(dm_rh_get_state(rh, region, 1) & state);
+}
+
+/*
+ * Return true in case a chunk should be read/written
+ *
+ * Conditions to read/write:
+ * o chunk not uptodate
+ * o chunk dirty
+ *
+ * Conditios to avoid io:
+ * o io already ongoing on chunk
+ * o io explitely prohibited
+ */
+static int chunk_io(struct stripe_chunk *chunk)
+{
+ /* 2nd run optimization (flag set below on first run). */
+ if (TestClearChunkMustIo(chunk))
+ return 1;
+
+ /* Avoid io if prohibited or a locked chunk. */
+ if (!ChunkIo(chunk) || ChunkLocked(chunk))
+ return 0;
+
+ if (!ChunkUptodate(chunk) || ChunkDirty(chunk)) {
+ SetChunkMustIo(chunk); /* 2nd run optimization. */
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Call a function on each chunk needing io unless device failed. */
+static unsigned for_each_io_dev(struct stripe *stripe,
+ void (*f_io)(struct stripe *stripe, unsigned p))
+{
+ struct raid_set *rs = RS(stripe->sc);
+ unsigned p, r = 0;
+
+ for (p = 0; p < rs->set.raid_devs; p++) {
+ if (chunk_io(CHUNK(stripe, p)) && !DevFailed(rs->dev + p)) {
+ f_io(stripe, p);
+ r++;
+ }
+ }
+
+ return r;
+}
+
+/*
+ * Index of device to calculate parity on.
+ *
+ * Either the parity device index *or* the selected
+ * device to init after a spare replacement.
+ */
+static int dev_for_parity(struct stripe *stripe, int *sync)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ int r = region_state(rs, stripe->key, DM_RH_NOSYNC | DM_RH_RECOVERING);
+
+ *sync = !r;
+
+ /* Reconstruct a particular device ?. */
+ if (r && rs->set.dev_to_init > -1)
+ return rs->set.dev_to_init;
+ else if (rs->set.raid_type->level == raid4)
+ return rs->set.pi;
+ else if (!StripeRecover(stripe))
+ return stripe->idx.parity;
+ else
+ return -1;
+}
+
+/* RAID set congested function. */
+static int rs_congested(void *congested_data, int bdi_bits)
+{
+ int r;
+ unsigned p;
+ struct raid_set *rs = congested_data;
+
+ if (sc_busy(rs) || RSSuspend(rs) || RSProhibitWrites(rs))
+ r = 1;
+ else for (r = 0, p = rs->set.raid_devs; !r && p--; ) {
+ /* If any of our component devices are overloaded. */
+ struct request_queue *q = bdev_get_queue(rs->dev[p].dev->bdev);
+
+ r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ }
+
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + (r ? S_CONGESTED : S_NOT_CONGESTED));
+ return r;
+}
+
+/* RAID device degrade check. */
+static void rs_check_degrade_dev(struct raid_set *rs,
+ struct stripe *stripe, unsigned p)
+{
+ if (TestSetDevFailed(rs->dev + p))
+ return;
+
+ /* Through an event in case of member device errors. */
+ if ((atomic_inc_return(&rs->set.failed_devs) >
+ rs->set.raid_type->parity_devs) &&
+ !TestSetRSDead(rs)) {
+ /* Display RAID set dead message once. */
+ unsigned p;
+ char buf[BDEVNAME_SIZE];
+
+ DMERR("FATAL: too many devices failed -> RAID set broken");
+ for (p = 0; p < rs->set.raid_devs; p++) {
+ if (DevFailed(rs->dev + p))
+ DMERR("device /dev/%s failed",
+ bdevname(rs->dev[p].dev->bdev, buf));
+ }
+ }
+
+ /* Only log the first member error. */
+ if (!TestSetRSDegraded(rs)) {
+ char buf[BDEVNAME_SIZE];
+
+ /* Store index for recovery. */
+ rs->set.ei = p;
+ DMERR("CRITICAL: %sio error on device /dev/%s "
+ "in region=%llu; DEGRADING RAID set\n",
+ stripe ? "" : "FAKED ",
+ bdevname(rs->dev[p].dev->bdev, buf),
+ (unsigned long long) (stripe ? stripe->key : 0));
+ DMERR("further device error messages suppressed");
+ }
+
+ /* Prohibit further writes to allow for userpace to update metadata. */
+ SetRSProhibitWrites(rs);
+ schedule_work(&rs->io.ws_do_table_event);
+}
+
+/* RAID set degrade check. */
+static void rs_check_degrade(struct stripe *stripe)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ unsigned p = rs->set.raid_devs;
+
+ while (p--) {
+ if (ChunkError(CHUNK(stripe, p)))
+ rs_check_degrade_dev(rs, stripe, p);
+ }
+}
+
+/* Lookup a RAID device by name or by major:minor number. */
+static int raid_dev_lookup(struct raid_set *rs, struct raid_dev *dev_lookup)
+{
+ unsigned p;
+ struct raid_dev *dev;
+
+ /*
+ * Must be an incremental loop, because the device array
+ * can have empty slots still on calls from raid_ctr()
+ */
+ for (dev = rs->dev, p = 0;
+ dev->dev && p < rs->set.raid_devs;
+ dev++, p++) {
+ if (dev_lookup->dev->bdev->bd_dev == dev->dev->bdev->bd_dev)
+ return p;
+ }
+
+ return -ENODEV;
+}
+/*
+ * End small helper functions.
+ */
+
+/*
+ * Stripe hash functions
+ */
+/* Initialize/destroy stripe hash. */
+static int hash_init(struct stripe_hash *hash, unsigned stripes)
+{
+ unsigned buckets = roundup_pow_of_two(stripes >> 1);
+ static unsigned hash_primes[] = {
+ /* Table of primes for hash_fn/table size optimization. */
+ 1, 2, 3, 7, 13, 27, 53, 97, 193, 389, 769,
+ 1543, 3079, 6151, 12289, 24593, 49157, 98317,
+ };
+
+ /* Allocate stripe hash buckets. */
+ hash->hash = vmalloc(buckets * sizeof(*hash->hash));
+ if (!hash->hash)
+ return -ENOMEM;
+
+ hash->buckets = buckets;
+ hash->mask = buckets - 1;
+ hash->shift = ffs(buckets);
+ if (hash->shift > ARRAY_SIZE(hash_primes))
+ hash->shift = ARRAY_SIZE(hash_primes) - 1;
+
+ BUG_ON(hash->shift < 2);
+ hash->prime = hash_primes[hash->shift];
+
+ /* Initialize buckets. */
+ while (buckets--)
+ INIT_LIST_HEAD(hash->hash + buckets);
+ return 0;
+}
+
+static void hash_exit(struct stripe_hash *hash)
+{
+ if (hash->hash) {
+ vfree(hash->hash);
+ hash->hash = NULL;
+ }
+}
+
+static unsigned hash_fn(struct stripe_hash *hash, sector_t key)
+{
+ return (unsigned) (((key * hash->prime) >> hash->shift) & hash->mask);
+}
+
+static struct list_head *hash_bucket(struct stripe_hash *hash, sector_t key)
+{
+ return hash->hash + hash_fn(hash, key);
+}
+
+/* Insert an entry into a hash. */
+static void stripe_insert(struct stripe_hash *hash, struct stripe *stripe)
+{
+ list_add(stripe->lists + LIST_HASH, hash_bucket(hash, stripe->key));
+}
+
+/* Lookup an entry in the stripe hash. */
+static struct stripe *stripe_lookup(struct stripe_cache *sc, sector_t key)
+{
+ unsigned look = 0;
+ struct stripe *stripe;
+ struct list_head *bucket = hash_bucket(&sc->hash, key);
+
+ list_for_each_entry(stripe, bucket, lists[LIST_HASH]) {
+ look++;
+
+ if (stripe->key == key) {
+ /* REMOVEME: statisics. */
+ if (look > atomic_read(RS(sc)->stats + S_MAX_LOOKUP))
+ atomic_set(RS(sc)->stats + S_MAX_LOOKUP, look);
+ return stripe;
+ }
+ }
+
+ return NULL;
+}
+
+/* Resize the stripe cache hash on size changes. */
+static int sc_hash_resize(struct stripe_cache *sc)
+{
+ /* Resize indicated ? */
+ if (atomic_read(&sc->stripes) != atomic_read(&sc->stripes_last)) {
+ int r;
+ struct stripe_hash hash;
+
+ r = hash_init(&hash, atomic_read(&sc->stripes));
+ if (r)
+ return r;
+
+ if (sc->hash.hash) {
+ unsigned b = sc->hash.buckets;
+ struct list_head *pos, *tmp;
+
+ /* Walk old buckets and insert into new. */
+ while (b--) {
+ list_for_each_safe(pos, tmp, sc->hash.hash + b)
+ stripe_insert(&hash,
+ list_entry(pos, struct stripe,
+ lists[LIST_HASH]));
+ }
+
+ }
+
+ hash_exit(&sc->hash);
+ memcpy(&sc->hash, &hash, sizeof(sc->hash));
+ atomic_set(&sc->stripes_last, atomic_read(&sc->stripes));
+ }
+
+ return 0;
+}
+/* End hash stripe hash function. */
+
+/* List add, delete, push and pop functions. */
+/* Add stripe to flush list. */
+#define DEL_LIST(lh) \
+ if (!list_empty(lh)) \
+ list_del_init(lh);
+
+/* Delete stripe from hash. */
+static void stripe_hash_del(struct stripe *stripe)
+{
+ DEL_LIST(stripe->lists + LIST_HASH);
+}
+
+/* Return stripe reference count. */
+static inline int stripe_ref(struct stripe *stripe)
+{
+ return atomic_read(&stripe->cnt);
+}
+
+static void stripe_flush_add(struct stripe *stripe)
+{
+ struct stripe_cache *sc = stripe->sc;
+ struct list_head *lh = stripe->lists + LIST_FLUSH;
+
+ if (!StripeReconstruct(stripe) && list_empty(lh))
+ list_add_tail(lh, sc->lists + LIST_FLUSH);
+}
+
+/*
+ * Add stripe to LRU (inactive) list.
+ *
+ * Need lock, because of concurrent access from message interface.
+ */
+static void stripe_lru_add(struct stripe *stripe)
+{
+ if (!StripeRecover(stripe)) {
+ struct list_head *lh = stripe->lists + LIST_LRU;
+
+ if (list_empty(lh))
+ list_add_tail(lh, stripe->sc->lists + LIST_LRU);
+ }
+}
+
+#define POP_LIST(list) \
+ do { \
+ if (list_empty(sc->lists + (list))) \
+ stripe = NULL; \
+ else { \
+ stripe = list_first_entry(sc->lists + (list), \
+ struct stripe, \
+ lists[(list)]); \
+ list_del_init(stripe->lists + (list)); \
+ } \
+ } while (0);
+
+/* Pop an available stripe off the LRU list. */
+static struct stripe *stripe_lru_pop(struct stripe_cache *sc)
+{
+ struct stripe *stripe;
+
+ POP_LIST(LIST_LRU);
+ return stripe;
+}
+
+/* Pop an available stripe off the io list. */
+static struct stripe *stripe_io_pop(struct stripe_cache *sc)
+{
+ struct stripe *stripe;
+
+ POP_LIST(LIST_FLUSH);
+ return stripe;
+}
+
+/* Push a stripe safely onto the endio list to be handled by do_endios(). */
+static void stripe_endio_push(struct stripe *stripe)
+{
+ unsigned long flags;
+ struct stripe_cache *sc = stripe->sc;
+ struct list_head *stripe_list = stripe->lists + LIST_ENDIO,
+ *sc_list = sc->lists + LIST_ENDIO;
+ spinlock_t *lock = sc->locks + LOCK_ENDIO;
+
+ /* This runs in parallel with do_endios(). */
+ spin_lock_irqsave(lock, flags);
+ if (list_empty(stripe_list))
+ list_add_tail(stripe_list, sc_list);
+ spin_unlock_irqrestore(lock, flags);
+
+ wake_do_raid(RS(sc)); /* Wake myself. */
+}
+
+/* Pop a stripe off safely off the endio list. */
+static struct stripe *stripe_endio_pop(struct stripe_cache *sc)
+{
+ struct stripe *stripe;
+ spinlock_t *lock = sc->locks + LOCK_ENDIO;
+
+ /* This runs in parallel with endio(). */
+ spin_lock_irq(lock);
+ POP_LIST(LIST_ENDIO)
+ spin_unlock_irq(lock);
+ return stripe;
+}
+#undef POP_LIST
+
+/*
+ * Stripe cache locking functions
+ */
+/* Dummy lock function for single host RAID4+5. */
+static void *no_lock(sector_t key, enum dm_lock_type type)
+{
+ return &no_lock;
+}
+
+/* Dummy unlock function for single host RAID4+5. */
+static void no_unlock(void *lock_handle)
+{
+}
+
+/* No locking (for single host RAID 4+5). */
+static struct dm_raid45_locking_type locking_none = {
+ .lock = no_lock,
+ .unlock = no_unlock,
+};
+
+/* Lock a stripe (for clustering). */
+static int
+stripe_lock(struct stripe *stripe, int rw, sector_t key)
+{
+ stripe->lock = RS(stripe->sc)->locking->lock(key, rw == READ ? DM_RAID45_SHARED : DM_RAID45_EX);
+ return stripe->lock ? 0 : -EPERM;
+}
+
+/* Unlock a stripe (for clustering). */
+static void stripe_unlock(struct stripe *stripe)
+{
+ RS(stripe->sc)->locking->unlock(stripe->lock);
+ stripe->lock = NULL;
+}
+
+/* Test io pending on stripe. */
+static int stripe_io_ref(struct stripe *stripe)
+{
+ return atomic_read(&stripe->io.pending);
+}
+
+static void stripe_io_get(struct stripe *stripe)
+{
+ if (atomic_inc_return(&stripe->io.pending) == 1)
+ /* REMOVEME: statistics */
+ atomic_inc(&stripe->sc->active_stripes);
+ else
+ BUG_ON(stripe_io_ref(stripe) < 0);
+}
+
+static void stripe_io_put(struct stripe *stripe)
+{
+ if (atomic_dec_and_test(&stripe->io.pending)) {
+ if (unlikely(StripeRecover(stripe)))
+ /* Don't put recovery stripe on endio list. */
+ wake_do_raid(RS(stripe->sc));
+ else
+ /* Add regular stripe to endio list and wake daemon. */
+ stripe_endio_push(stripe);
+
+ /* REMOVEME: statistics */
+ atomic_dec(&stripe->sc->active_stripes);
+ } else
+ BUG_ON(stripe_io_ref(stripe) < 0);
+}
+
+/* Take stripe reference out. */
+static int stripe_get(struct stripe *stripe)
+{
+ int r;
+ struct list_head *lh = stripe->lists + LIST_LRU;
+
+ /* Delete stripe from LRU (inactive) list if on. */
+ DEL_LIST(lh);
+ BUG_ON(stripe_ref(stripe) < 0);
+
+ /* Lock stripe on first reference */
+ r = (atomic_inc_return(&stripe->cnt) == 1) ?
+ stripe_lock(stripe, WRITE, stripe->key) : 0;
+
+ return r;
+}
+#undef DEL_LIST
+
+/* Return references on a chunk. */
+static int chunk_ref(struct stripe_chunk *chunk)
+{
+ return atomic_read(&chunk->cnt);
+}
+
+/* Take out reference on a chunk. */
+static int chunk_get(struct stripe_chunk *chunk)
+{
+ return atomic_inc_return(&chunk->cnt);
+}
+
+/* Drop reference on a chunk. */
+static void chunk_put(struct stripe_chunk *chunk)
+{
+ BUG_ON(atomic_dec_return(&chunk->cnt) < 0);
+}
+
+/*
+ * Drop reference on a stripe.
+ *
+ * Move it to list of LRU stripes if zero.
+ */
+static void stripe_put(struct stripe *stripe)
+{
+ if (atomic_dec_and_test(&stripe->cnt)) {
+ BUG_ON(stripe_io_ref(stripe));
+ stripe_unlock(stripe);
+ } else
+ BUG_ON(stripe_ref(stripe) < 0);
+}
+
+/* Helper needed by for_each_io_dev(). */
+static void stripe_get_references(struct stripe *stripe, unsigned p)
+{
+
+ /*
+ * Another one to reference the stripe in
+ * order to protect vs. LRU list moves.
+ */
+ io_get(RS(stripe->sc)); /* Global io references. */
+ stripe_get(stripe);
+ stripe_io_get(stripe); /* One for each chunk io. */
+}
+
+/* Helper for endio() to put all take references. */
+static void stripe_put_references(struct stripe *stripe)
+{
+ stripe_io_put(stripe); /* One for each chunk io. */
+ stripe_put(stripe);
+ io_put(RS(stripe->sc));
+}
+
+/*
+ * Stripe cache functions.
+ */
+/*
+ * Invalidate all chunks (i.e. their pages) of a stripe.
+ *
+ * I only keep state for the whole chunk.
+ */
+static inline void stripe_chunk_invalidate(struct stripe_chunk *chunk)
+{
+ chunk->io.flags = 0;
+}
+
+static void
+stripe_chunks_invalidate(struct stripe *stripe)
+{
+ unsigned p = RS(stripe->sc)->set.raid_devs;
+
+ while (p--)
+ stripe_chunk_invalidate(CHUNK(stripe, p));
+}
+
+/* Prepare stripe for (re)use. */
+static void stripe_invalidate(struct stripe *stripe)
+{
+ stripe->io.flags = 0;
+ stripe->idx.parity = stripe->idx.recover = -1;
+ stripe_chunks_invalidate(stripe);
+}
+
+/*
+ * Allow io on all chunks of a stripe.
+ * If not set, IO will not occur; i.e. it's prohibited.
+ *
+ * Actual IO submission for allowed chunks depends
+ * on their !uptodate or dirty state.
+ */
+static void stripe_allow_io(struct stripe *stripe)
+{
+ unsigned p = RS(stripe->sc)->set.raid_devs;
+
+ while (p--)
+ SetChunkIo(CHUNK(stripe, p));
+}
+
+/* Initialize a stripe. */
+static void stripe_init(struct stripe_cache *sc, struct stripe *stripe)
+{
+ unsigned i, p = RS(sc)->set.raid_devs;
+
+ /* Work all io chunks. */
+ while (p--) {
+ struct stripe_chunk *chunk = CHUNK(stripe, p);
+
+ atomic_set(&chunk->cnt, 0);
+ chunk->stripe = stripe;
+ i = ARRAY_SIZE(chunk->bl);
+ while (i--)
+ bio_list_init(chunk->bl + i);
+ }
+
+ stripe->sc = sc;
+
+ i = ARRAY_SIZE(stripe->lists);
+ while (i--)
+ INIT_LIST_HEAD(stripe->lists + i);
+
+ stripe->io.size = RS(sc)->set.io_size;
+ atomic_set(&stripe->cnt, 0);
+ atomic_set(&stripe->io.pending, 0);
+ stripe_invalidate(stripe);
+}
+
+/* Number of pages per chunk. */
+static inline unsigned chunk_pages(unsigned sectors)
+{
+ return dm_div_up(sectors, SECTORS_PER_PAGE);
+}
+
+/* Number of pages per stripe. */
+static inline unsigned stripe_pages(struct raid_set *rs, unsigned io_size)
+{
+ return chunk_pages(io_size) * rs->set.raid_devs;
+}
+
+/* Initialize part of page_list (recovery). */
+static void stripe_zero_pl_part(struct stripe *stripe, int p,
+ unsigned start, unsigned count)
+{
+ unsigned o = start / SECTORS_PER_PAGE, pages = chunk_pages(count);
+ /* Get offset into the page_list. */
+ struct page_list *pl = pl_elem(PL(stripe, p), o);
+
+ BUG_ON(!pl);
+ while (pl && pages--) {
+ BUG_ON(!pl->page);
+ memset(page_address(pl->page), 0, PAGE_SIZE);
+ pl = pl->next;
+ }
+}
+
+/* Initialize parity chunk of stripe. */
+static void stripe_zero_chunk(struct stripe *stripe, int p)
+{
+ if (p > -1)
+ stripe_zero_pl_part(stripe, p, 0, stripe->io.size);
+}
+
+/* Return dynamic stripe structure size. */
+static size_t stripe_size(struct raid_set *rs)
+{
+ return sizeof(struct stripe) +
+ rs->set.raid_devs * sizeof(struct stripe_chunk);
+}
+
+/* Allocate a stripe and its memory object. */
+/* XXX adjust to cope with stripe cache and recovery stripe caches. */
+enum grow { SC_GROW, SC_KEEP };
+static struct stripe *stripe_alloc(struct stripe_cache *sc,
+ struct dm_mem_cache_client *mc,
+ enum grow grow)
+{
+ int r;
+ struct stripe *stripe;
+
+ stripe = kmem_cache_zalloc(sc->kc.cache, GFP_KERNEL);
+ if (stripe) {
+ /* Grow the dm-mem-cache by one object. */
+ if (grow == SC_GROW) {
+ r = dm_mem_cache_grow(mc, 1);
+ if (r)
+ goto err_free;
+ }
+
+ stripe->obj = dm_mem_cache_alloc(mc);
+ if (IS_ERR(stripe->obj))
+ goto err_shrink;
+
+ stripe_init(sc, stripe);
+ }
+
+ return stripe;
+
+err_shrink:
+ if (grow == SC_GROW)
+ dm_mem_cache_shrink(mc, 1);
+err_free:
+ kmem_cache_free(sc->kc.cache, stripe);
+ return NULL;
+}
+
+/*
+ * Free a stripes memory object, shrink the
+ * memory cache and free the stripe itself.
+ */
+static void stripe_free(struct stripe *stripe, struct dm_mem_cache_client *mc)
+{
+ dm_mem_cache_free(mc, stripe->obj);
+ dm_mem_cache_shrink(mc, 1);
+ kmem_cache_free(stripe->sc->kc.cache, stripe);
+}
+
+/* Free the recovery stripe. */
+static void stripe_recover_free(struct raid_set *rs)
+{
+ struct recover *rec = &rs->recover;
+ struct dm_mem_cache_client *mc;
+
+ mc = rec->mem_cache_client;
+ rec->mem_cache_client = NULL;
+ if (mc) {
+ struct stripe *stripe;
+
+ while (!list_empty(&rec->stripes)) {
+ stripe = list_first_entry(&rec->stripes, struct stripe,
+ lists[LIST_RECOVER]);
+ list_del(stripe->lists + LIST_RECOVER);
+ kfree(stripe->recover);
+ stripe_free(stripe, mc);
+ }
+
+ dm_mem_cache_client_destroy(mc);
+ dm_io_client_destroy(rec->dm_io_client);
+ rec->dm_io_client = NULL;
+ }
+}
+
+/* Grow stripe cache. */
+static int sc_grow(struct stripe_cache *sc, unsigned stripes, enum grow grow)
+{
+ int r = 0;
+
+ /* Try to allocate this many (additional) stripes. */
+ while (stripes--) {
+ struct stripe *stripe =
+ stripe_alloc(sc, sc->mem_cache_client, grow);
+
+ if (likely(stripe)) {
+ stripe_lru_add(stripe);
+ atomic_inc(&sc->stripes);
+ } else {
+ r = -ENOMEM;
+ break;
+ }
+ }
+
+ return r ? r : sc_hash_resize(sc);
+}
+
+/* Shrink stripe cache. */
+static int sc_shrink(struct stripe_cache *sc, unsigned stripes)
+{
+ int r = 0;
+
+ /* Try to get unused stripe from LRU list. */
+ while (stripes--) {
+ struct stripe *stripe;
+
+ stripe = stripe_lru_pop(sc);
+ if (stripe) {
+ /* An LRU stripe may never have ios pending! */
+ BUG_ON(stripe_io_ref(stripe));
+ BUG_ON(stripe_ref(stripe));
+ atomic_dec(&sc->stripes);
+ /* Remove from hash if on before deletion. */
+ stripe_hash_del(stripe);
+ stripe_free(stripe, sc->mem_cache_client);
+ } else {
+ r = -ENOENT;
+ break;
+ }
+ }
+
+ /* Check if stats are still sane. */
+ if (atomic_read(&sc->active_stripes_max) >
+ atomic_read(&sc->stripes))
+ atomic_set(&sc->active_stripes_max, 0);
+
+ if (r)
+ return r;
+
+ return atomic_read(&sc->stripes) ? sc_hash_resize(sc) : 0;
+}
+
+/* Create stripe cache and recovery. */
+static int sc_init(struct raid_set *rs, unsigned stripes)
+{
+ unsigned i, r, rstripes;
+ struct stripe_cache *sc = &rs->sc;
+ struct stripe *stripe;
+ struct recover *rec = &rs->recover;
+ struct mapped_device *md;
+ struct gendisk *disk;
+
+
+ /* Initialize lists and locks. */
+ i = ARRAY_SIZE(sc->lists);
+ while (i--)
+ INIT_LIST_HEAD(sc->lists + i);
+
+ INIT_LIST_HEAD(&rec->stripes);
+
+ /* Initialize endio and LRU list locks. */
+ i = NR_LOCKS;
+ while (i--)
+ spin_lock_init(sc->locks + i);
+
+ /* Initialize atomic variables. */
+ atomic_set(&sc->stripes, 0);
+ atomic_set(&sc->stripes_to_set, 0);
+ atomic_set(&sc->active_stripes, 0);
+ atomic_set(&sc->active_stripes_max, 0); /* REMOVEME: statistics. */
+
+ /*
+ * We need a runtime unique # to suffix the kmem cache name
+ * because we'll have one for each active RAID set.
+ */
+ md = dm_table_get_md(rs->ti->table);
+ disk = dm_disk(md);
+ snprintf(sc->kc.name, sizeof(sc->kc.name), "%s-%d.%d", TARGET,
+ disk->first_minor, atomic_inc_return(&_stripe_sc_nr));
+ sc->kc.cache = kmem_cache_create(sc->kc.name, stripe_size(rs),
+ 0, 0, NULL);
+ if (!sc->kc.cache)
+ return -ENOMEM;
+
+ /* Create memory cache client context for RAID stripe cache. */
+ sc->mem_cache_client =
+ dm_mem_cache_client_create(stripes, rs->set.raid_devs,
+ chunk_pages(rs->set.io_size));
+ if (IS_ERR(sc->mem_cache_client))
+ return PTR_ERR(sc->mem_cache_client);
+
+ /* Create memory cache client context for RAID recovery stripe(s). */
+ rstripes = rec->recovery_stripes;
+ rec->mem_cache_client =
+ dm_mem_cache_client_create(rstripes, rs->set.raid_devs,
+ chunk_pages(rec->io_size));
+ if (IS_ERR(rec->mem_cache_client))
+ return PTR_ERR(rec->mem_cache_client);
+
+ /* Create dm-io client context for IO stripes. */
+ sc->dm_io_client = dm_io_client_create();
+ if (IS_ERR(sc->dm_io_client))
+ return PTR_ERR(sc->dm_io_client);
+
+ /* FIXME: intermingeled with stripe cache initialization. */
+ /* Create dm-io client context for recovery stripes. */
+ rec->dm_io_client = dm_io_client_create();
+ if (IS_ERR(rec->dm_io_client))
+ return PTR_ERR(rec->dm_io_client);
+
+ /* Allocate stripes for set recovery. */
+ while (rstripes--) {
+ stripe = stripe_alloc(sc, rec->mem_cache_client, SC_KEEP);
+ if (!stripe)
+ return -ENOMEM;
+
+ stripe->recover = kzalloc(sizeof(*stripe->recover), GFP_KERNEL);
+ if (!stripe->recover) {
+ stripe_free(stripe, rec->mem_cache_client);
+ return -ENOMEM;
+ }
+
+ SetStripeRecover(stripe);
+ stripe->io.size = rec->io_size;
+ list_add_tail(stripe->lists + LIST_RECOVER, &rec->stripes);
+ /* Don't add recovery stripes to LRU list! */
+ }
+
+ /*
+ * Allocate the stripe objetcs from the
+ * cache and add them to the LRU list.
+ */
+ r = sc_grow(sc, stripes, SC_KEEP);
+ if (!r)
+ atomic_set(&sc->stripes_last, stripes);
+
+ return r;
+}
+
+/* Destroy the stripe cache. */
+static void sc_exit(struct stripe_cache *sc)
+{
+ struct raid_set *rs = RS(sc);
+
+ if (sc->kc.cache) {
+ stripe_recover_free(rs);
+ BUG_ON(sc_shrink(sc, atomic_read(&sc->stripes)));
+ kmem_cache_destroy(sc->kc.cache);
+ sc->kc.cache = NULL;
+
+ if (sc->mem_cache_client && !IS_ERR(sc->mem_cache_client))
+ dm_mem_cache_client_destroy(sc->mem_cache_client);
+
+ if (sc->dm_io_client && !IS_ERR(sc->dm_io_client))
+ dm_io_client_destroy(sc->dm_io_client);
+
+ hash_exit(&sc->hash);
+ }
+}
+
+/*
+ * Calculate RAID address
+ *
+ * Delivers tuple with the index of the data disk holding the chunk
+ * in the set, the parity disks index and the start of the stripe
+ * within the address space of the set (used as the stripe cache hash key).
+ */
+/* thx MD. */
+static struct raid_address *raid_address(struct raid_set *rs, sector_t sector,
+ struct raid_address *addr)
+{
+ sector_t stripe, tmp;
+
+ /*
+ * chunk_number = sector / chunk_size
+ * stripe_number = chunk_number / data_devs
+ * di = stripe % data_devs;
+ */
+ stripe = sector >> rs->set.chunk_shift;
+ addr->di = sector_div(stripe, rs->set.data_devs);
+
+ switch (rs->set.raid_type->level) {
+ case raid4:
+ addr->pi = rs->set.pi;
+ goto check_shift_di;
+ case raid5:
+ tmp = stripe;
+ addr->pi = sector_div(tmp, rs->set.raid_devs);
+
+ switch (rs->set.raid_type->algorithm) {
+ case left_asym: /* Left asymmetric. */
+ addr->pi = rs->set.data_devs - addr->pi;
+ case right_asym: /* Right asymmetric. */
+check_shift_di:
+ if (addr->di >= addr->pi)
+ addr->di++;
+ break;
+ case left_sym: /* Left symmetric. */
+ addr->pi = rs->set.data_devs - addr->pi;
+ case right_sym: /* Right symmetric. */
+ addr->di = (addr->pi + addr->di + 1) %
+ rs->set.raid_devs;
+ break;
+ case none: /* Ain't happen: RAID4 algorithm placeholder. */
+ BUG();
+ }
+ }
+
+ /*
+ * Start offset of the stripes chunk on any single device of the RAID
+ * set, adjusted in case io size differs from chunk size.
+ */
+ addr->key = (stripe << rs->set.chunk_shift) +
+ (sector & rs->set.io_inv_mask);
+ return addr;
+}
+
+/*
+ * Copy data across between stripe pages and bio vectors.
+ *
+ * Pay attention to data alignment in stripe and bio pages.
+ */
+static void bio_copy_page_list(int rw, struct stripe *stripe,
+ struct page_list *pl, struct bio *bio)
+{
+ unsigned i, page_offset;
+ void *page_addr;
+ struct raid_set *rs = RS(stripe->sc);
+ struct bio_vec *bv;
+
+ /* Get start page in page list for this sector. */
+ i = (bio->bi_sector & rs->set.io_mask) / SECTORS_PER_PAGE;
+ pl = pl_elem(pl, i);
+ BUG_ON(!pl);
+ BUG_ON(!pl->page);
+
+ page_addr = page_address(pl->page);
+ page_offset = to_bytes(bio->bi_sector & (SECTORS_PER_PAGE - 1));
+
+ /* Walk all segments and copy data across between bio_vecs and pages. */
+ bio_for_each_segment(bv, bio, i) {
+ int len = bv->bv_len, size;
+ unsigned bio_offset = 0;
+ void *bio_addr = __bio_kmap_atomic(bio, i, KM_USER0);
+redo:
+ size = (page_offset + len > PAGE_SIZE) ?
+ PAGE_SIZE - page_offset : len;
+
+ if (rw == READ)
+ memcpy(bio_addr + bio_offset,
+ page_addr + page_offset, size);
+ else
+ memcpy(page_addr + page_offset,
+ bio_addr + bio_offset, size);
+
+ page_offset += size;
+ if (page_offset == PAGE_SIZE) {
+ /*
+ * We reached the end of the chunk page ->
+ * need to refer to the next one to copy more data.
+ */
+ len -= size;
+ if (len) {
+ /* Get next page. */
+ pl = pl->next;
+ BUG_ON(!pl);
+ BUG_ON(!pl->page);
+ page_addr = page_address(pl->page);
+ page_offset = 0;
+ bio_offset += size;
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + S_BIO_COPY_PL_NEXT);
+ goto redo;
+ }
+ }
+
+ __bio_kunmap_atomic(bio_addr, KM_USER0);
+ }
+}
+
+/*
+ * Xor optimization macros.
+ */
+/* Xor data pointer declaration and initialization macros. */
+#define DECLARE_2 unsigned long *d0 = data[0], *d1 = data[1]
+#define DECLARE_3 DECLARE_2, *d2 = data[2]
+#define DECLARE_4 DECLARE_3, *d3 = data[3]
+#define DECLARE_5 DECLARE_4, *d4 = data[4]
+#define DECLARE_6 DECLARE_5, *d5 = data[5]
+#define DECLARE_7 DECLARE_6, *d6 = data[6]
+#define DECLARE_8 DECLARE_7, *d7 = data[7]
+
+/* Xor unrole macros. */
+#define D2(n) d0[n] = d0[n] ^ d1[n]
+#define D3(n) D2(n) ^ d2[n]
+#define D4(n) D3(n) ^ d3[n]
+#define D5(n) D4(n) ^ d4[n]
+#define D6(n) D5(n) ^ d5[n]
+#define D7(n) D6(n) ^ d6[n]
+#define D8(n) D7(n) ^ d7[n]
+
+#define X_2(macro, offset) macro(offset); macro(offset + 1);
+#define X_4(macro, offset) X_2(macro, offset); X_2(macro, offset + 2);
+#define X_8(macro, offset) X_4(macro, offset); X_4(macro, offset + 4);
+#define X_16(macro, offset) X_8(macro, offset); X_8(macro, offset + 8);
+#define X_32(macro, offset) X_16(macro, offset); X_16(macro, offset + 16);
+#define X_64(macro, offset) X_32(macro, offset); X_32(macro, offset + 32);
+
+/* Define a _xor_#chunks_#xors_per_run() function. */
+#define _XOR(chunks, xors_per_run) \
+static void _xor ## chunks ## _ ## xors_per_run(unsigned long **data) \
+{ \
+ unsigned end = XOR_SIZE / sizeof(data[0]), i; \
+ DECLARE_ ## chunks; \
+\
+ for (i = 0; i < end; i += xors_per_run) { \
+ X_ ## xors_per_run(D ## chunks, i); \
+ } \
+}
+
+/* Define xor functions for 2 - 8 chunks and xors per run. */
+#define MAKE_XOR_PER_RUN(xors_per_run) \
+ _XOR(2, xors_per_run); _XOR(3, xors_per_run); \
+ _XOR(4, xors_per_run); _XOR(5, xors_per_run); \
+ _XOR(6, xors_per_run); _XOR(7, xors_per_run); \
+ _XOR(8, xors_per_run);
+
+MAKE_XOR_PER_RUN(8) /* Define _xor_*_8() functions. */
+MAKE_XOR_PER_RUN(16) /* Define _xor_*_16() functions. */
+MAKE_XOR_PER_RUN(32) /* Define _xor_*_32() functions. */
+MAKE_XOR_PER_RUN(64) /* Define _xor_*_64() functions. */
+
+#define MAKE_XOR(xors_per_run) \
+struct { \
+ void (*f)(unsigned long **); \
+} static xor_funcs ## xors_per_run[] = { \
+ { NULL }, /* NULL pointers to optimize indexing in xor(). */ \
+ { NULL }, \
+ { _xor2_ ## xors_per_run }, \
+ { _xor3_ ## xors_per_run }, \
+ { _xor4_ ## xors_per_run }, \
+ { _xor5_ ## xors_per_run }, \
+ { _xor6_ ## xors_per_run }, \
+ { _xor7_ ## xors_per_run }, \
+ { _xor8_ ## xors_per_run }, \
+}; \
+\
+static void xor_ ## xors_per_run(unsigned n, unsigned long **data) \
+{ \
+ /* Call respective function for amount of chunks. */ \
+ xor_funcs ## xors_per_run[n].f(data); \
+}
+
+/* Define xor_8() - xor_64 functions. */
+MAKE_XOR(8)
+MAKE_XOR(16)
+MAKE_XOR(32)
+MAKE_XOR(64)
+/*
+ * END xor optimization macros.
+ */
+
+/* Maximum number of chunks, which can be xor'ed in one go. */
+#define XOR_CHUNKS_MAX (ARRAY_SIZE(xor_funcs8) - 1)
+
+/* xor_blocks wrapper to allow for using that crypto library function. */
+static void xor_blocks_wrapper(unsigned n, unsigned long **data)
+{
+ BUG_ON(n < 2 || n > MAX_XOR_BLOCKS + 1);
+ xor_blocks(n - 1, XOR_SIZE, (void *) data[0], (void **) data + 1);
+}
+
+struct xor_func {
+ xor_function_t f;
+ const char *name;
+} static xor_funcs[] = {
+ { xor_64, "xor_64" },
+ { xor_32, "xor_32" },
+ { xor_16, "xor_16" },
+ { xor_8, "xor_8" },
+ { xor_blocks_wrapper, "xor_blocks" },
+};
+
+/*
+ * Check, if chunk has to be xored in/out:
+ *
+ * o if writes are queued
+ * o if writes are merged
+ * o if stripe is to be reconstructed
+ * o if recovery stripe
+ */
+static inline int chunk_must_xor(struct stripe_chunk *chunk)
+{
+ if (ChunkUptodate(chunk)) {
+ BUG_ON(!bio_list_empty(BL_CHUNK(chunk, WRITE_QUEUED)) &&
+ !bio_list_empty(BL_CHUNK(chunk, WRITE_MERGED)));
+
+ if (!bio_list_empty(BL_CHUNK(chunk, WRITE_QUEUED)) ||
+ !bio_list_empty(BL_CHUNK(chunk, WRITE_MERGED)))
+ return 1;
+
+ if (StripeReconstruct(chunk->stripe) ||
+ StripeRecover(chunk->stripe))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate crc.
+ *
+ * This indexes into the chunks of a stripe and their pages.
+ *
+ * All chunks will be xored into the indexed (@pi)
+ * chunk in maximum groups of xor.chunks.
+ *
+ */
+static void xor(struct stripe *stripe, unsigned pi, unsigned sector)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ unsigned max_chunks = rs->xor.chunks, n = 1,
+ o = sector / SECTORS_PER_PAGE, /* Offset into the page_list. */
+ p = rs->set.raid_devs;
+ unsigned long **d = rs->data;
+ xor_function_t xor_f = rs->xor.f->f;
+
+ BUG_ON(sector > stripe->io.size);
+
+ /* Address of parity page to xor into. */
+ d[0] = page_address(pl_elem(PL(stripe, pi), o)->page);
+
+ while (p--) {
+ /* Preset pointers to data pages. */
+ if (p != pi && chunk_must_xor(CHUNK(stripe, p)))
+ d[n++] = page_address(pl_elem(PL(stripe, p), o)->page);
+
+ /* If max chunks -> xor. */
+ if (n == max_chunks) {
+ mutex_lock(&rs->io.xor_lock);
+ xor_f(n, d);
+ mutex_unlock(&rs->io.xor_lock);
+ n = 1;
+ }
+ }
+
+ /* If chunks -> xor. */
+ if (n > 1) {
+ mutex_lock(&rs->io.xor_lock);
+ xor_f(n, d);
+ mutex_unlock(&rs->io.xor_lock);
+ }
+}
+
+/* Common xor loop through all stripe page lists. */
+static void common_xor(struct stripe *stripe, sector_t count,
+ unsigned off, unsigned pi)
+{
+ unsigned sector;
+
+ BUG_ON(!count);
+ for (sector = off; sector < count; sector += SECTORS_PER_PAGE)
+ xor(stripe, pi, sector);
+
+ /* Set parity page uptodate and clean. */
+ chunk_set(CHUNK(stripe, pi), CLEAN);
+ atomic_inc(RS(stripe->sc)->stats + S_XORS); /* REMOVEME: statistics. */
+}
+
+/*
+ * Calculate parity sectors on intact stripes.
+ *
+ * Need to calculate raid address for recover stripe, because its
+ * chunk sizes differs and is typically larger than io chunk size.
+ */
+static void parity_xor(struct stripe *stripe)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ int size_differs = stripe->io.size != rs->set.io_size;
+ unsigned chunk_size = rs->set.chunk_size, io_size = stripe->io.size,
+ xor_size = chunk_size > io_size ? io_size : chunk_size;
+ sector_t off;
+
+ /* This can be the recover stripe with a larger io size. */
+ for (off = 0; off < io_size; off += xor_size) {
+ /*
+ * Recover stripe is likely bigger than regular io
+ * ones and has no precalculated parity disk index ->
+ * need to calculate RAID address.
+ */
+ if (unlikely(size_differs)) {
+ struct raid_address addr;
+
+ raid_address(rs, (stripe->key + off) *
+ rs->set.data_devs, &addr);
+ stripe->idx.parity = addr.pi;
+ stripe_zero_pl_part(stripe, addr.pi, off, xor_size);
+ }
+
+ common_xor(stripe, xor_size, off, stripe->idx.parity);
+ chunk_set(CHUNK(stripe, stripe->idx.parity), DIRTY);
+ }
+}
+
+/* Reconstruct missing chunk. */
+static void stripe_reconstruct(struct stripe *stripe)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ int p = rs->set.raid_devs, pr = stripe->idx.recover;
+
+ BUG_ON(pr < 0);
+
+ /* Check if all but the chunk to be reconstructed are uptodate. */
+ while (p--)
+ BUG_ON(p != pr && !ChunkUptodate(CHUNK(stripe, p)));
+
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + (RSDegraded(rs) ? S_RECONSTRUCT_EI :
+ S_RECONSTRUCT_DEV));
+ /* Zero chunk to be reconstructed. */
+ stripe_zero_chunk(stripe, pr);
+ common_xor(stripe, stripe->io.size, 0, pr);
+}
+
+/*
+ * Recovery io throttling
+ */
+/* Conditionally reset io counters. */
+static int recover_io_reset(struct raid_set *rs)
+{
+ unsigned long j = jiffies;
+
+ /* Pay attention to jiffies overflows. */
+ if (j > rs->recover.last_jiffies + HZ ||
+ j < rs->recover.last_jiffies) {
+ atomic_set(rs->recover.io_count + IO_WORK, 0);
+ atomic_set(rs->recover.io_count + IO_RECOVER, 0);
+ rs->recover.last_jiffies = j;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Count ios. */
+static void recover_io_count(struct stripe *stripe)
+{
+ struct raid_set *rs = RS(stripe->sc);
+
+ atomic_inc(rs->recover.io_count +
+ (StripeRecover(stripe) ? IO_RECOVER : IO_WORK));
+}
+
+/* Try getting a stripe either from the hash or from the LRU list. */
+static struct stripe *stripe_find(struct raid_set *rs,
+ struct raid_address *addr)
+{
+ int r;
+ struct stripe_cache *sc = &rs->sc;
+ struct stripe *stripe;
+
+ /* Try stripe from hash. */
+ stripe = stripe_lookup(sc, addr->key);
+ if (stripe) {
+ r = stripe_get(stripe);
+ if (r)
+ goto get_lock_failed;
+
+ atomic_inc(rs->stats + S_HITS_1ST); /* REMOVEME: statistics. */
+ } else {
+ /* Not in hash -> try to get an LRU stripe. */
+ stripe = stripe_lru_pop(sc);
+ if (stripe) {
+ /*
+ * An LRU stripe may not be referenced
+ * and may never have ios pending!
+ */
+ BUG_ON(stripe_ref(stripe));
+ BUG_ON(stripe_io_ref(stripe));
+
+ /* Remove from hash if on before reuse. */
+ stripe_hash_del(stripe);
+
+ /* Invalidate before reinserting with changed key. */
+ stripe_invalidate(stripe);
+
+ stripe->key = addr->key;
+ stripe->region = dm_rh_sector_to_region(rs->recover.rh,
+ addr->key);
+ stripe->idx.parity = addr->pi;
+ r = stripe_get(stripe);
+ if (r)
+ goto get_lock_failed;
+
+ /* Insert stripe into the stripe hash. */
+ stripe_insert(&sc->hash, stripe);
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + S_INSCACHE);
+ }
+ }
+
+ return stripe;
+
+get_lock_failed:
+ stripe_put(stripe);
+ return NULL;
+}
+
+/*
+ * Process end io
+ *
+ * I need to do it here because I can't in interrupt
+ */
+/* End io all bios on a bio list. */
+static void bio_list_endio(struct stripe *stripe, struct bio_list *bl,
+ int p, int error)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ struct bio *bio;
+ struct page_list *pl = PL(stripe, p);
+ struct stripe_chunk *chunk = CHUNK(stripe, p);
+
+ /* Update region counters. */
+ while ((bio = bio_list_pop(bl))) {
+ if (bio_data_dir(bio) == WRITE)
+ /* Drop io pending count for any writes. */
+ dm_rh_dec(rs->recover.rh, stripe->region);
+ else if (!error)
+ /* Copy data accross. */
+ bio_copy_page_list(READ, stripe, pl, bio);
+
+ bio_endio(bio, error);
+
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + (bio_data_dir(bio) == READ ?
+ S_BIOS_ENDIO_READ : S_BIOS_ENDIO_WRITE));
+
+ chunk_put(chunk);
+ stripe_put(stripe);
+ io_put(rs); /* Wake any suspend waiters on last bio. */
+ }
+}
+
+/*
+ * End io all reads/writes on a stripe copying
+ * read data accross from stripe to bios and
+ * decrementing region counters for writes.
+ *
+ * Processing of ios depeding on state:
+ * o no chunk error -> endio ok
+ * o degraded:
+ * - chunk error and read -> ignore to be requeued
+ * - chunk error and write -> endio ok
+ * o dead (more than parity_devs failed) and chunk_error-> endio failed
+ */
+static void stripe_endio(int rw, struct stripe *stripe)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ unsigned p = rs->set.raid_devs;
+ int write = (rw != READ);
+
+ while (p--) {
+ struct stripe_chunk *chunk = CHUNK(stripe, p);
+ struct bio_list *bl;
+
+ BUG_ON(ChunkLocked(chunk));
+
+ bl = BL_CHUNK(chunk, rw);
+ if (bio_list_empty(bl))
+ continue;
+
+ if (unlikely(ChunkError(chunk) || !ChunkUptodate(chunk))) {
+ /* RAID set dead. */
+ if (unlikely(RSDead(rs)))
+ bio_list_endio(stripe, bl, p, -EIO);
+ /* RAID set degraded. */
+ else if (write)
+ bio_list_endio(stripe, bl, p, 0);
+ } else {
+ BUG_ON(!RSDegraded(rs) && ChunkDirty(chunk));
+ bio_list_endio(stripe, bl, p, 0);
+ }
+ }
+}
+
+/* Fail all ios hanging off all bio lists of a stripe. */
+static void stripe_fail_io(struct stripe *stripe)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ unsigned p = rs->set.raid_devs;
+
+ while (p--) {
+ struct stripe_chunk *chunk = CHUNK(stripe, p);
+ int i = ARRAY_SIZE(chunk->bl);
+
+ /* Fail all bios on all bio lists of the stripe. */
+ while (i--) {
+ struct bio_list *bl = chunk->bl + i;
+
+ if (!bio_list_empty(bl))
+ bio_list_endio(stripe, bl, p, -EIO);
+ }
+ }
+
+ /* Put stripe on LRU list. */
+ BUG_ON(stripe_io_ref(stripe));
+ BUG_ON(stripe_ref(stripe));
+}
+
+/* Unlock all required chunks. */
+static void stripe_chunks_unlock(struct stripe *stripe)
+{
+ unsigned p = RS(stripe->sc)->set.raid_devs;
+ struct stripe_chunk *chunk;
+
+ while (p--) {
+ chunk = CHUNK(stripe, p);
+
+ if (TestClearChunkUnlock(chunk))
+ ClearChunkLocked(chunk);
+ }
+}
+
+/*
+ * Queue reads and writes to a stripe by hanging
+ * their bios off the stripesets read/write lists.
+ */
+static int stripe_queue_bio(struct raid_set *rs, struct bio *bio,
+ struct bio_list *reject)
+{
+ struct raid_address addr;
+ struct stripe *stripe;
+
+ stripe = stripe_find(rs, raid_address(rs, bio->bi_sector, &addr));
+ if (stripe) {
+ int r = 0, rw = bio_data_dir(bio);
+
+ /* Distinguish reads and writes. */
+ bio_list_add(BL(stripe, addr.di, rw), bio);
+
+ if (rw == READ)
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + S_BIOS_ADDED_READ);
+ else {
+ /* Inrement pending write count on region. */
+ dm_rh_inc(rs->recover.rh, stripe->region);
+ r = 1;
+
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + S_BIOS_ADDED_WRITE);
+ }
+
+ /*
+ * Put on io (flush) list in case of
+ * initial bio queued to chunk.
+ */
+ if (chunk_get(CHUNK(stripe, addr.di)) == 1)
+ stripe_flush_add(stripe);
+
+ return r;
+ }
+
+ /* Got no stripe from cache or failed to lock it -> reject bio. */
+ bio_list_add(reject, bio);
+ atomic_inc(rs->stats + S_IOS_POST); /* REMOVEME: statistics. */
+ return 0;
+}
+
+/*
+ * Handle all stripes by handing them to the daemon, because we can't
+ * map their chunk pages to copy the data in interrupt context.
+ *
+ * We don't want to handle them here either, while interrupts are disabled.
+ */
+
+/* Read/write endio function for dm-io (interrupt context). */
+static void endio(unsigned long error, void *context)
+{
+ struct stripe_chunk *chunk = context;
+
+ if (unlikely(error)) {
+ chunk_set(chunk, ERROR);
+ /* REMOVEME: statistics. */
+ atomic_inc(RS(chunk->stripe->sc)->stats + S_STRIPE_ERROR);
+ } else
+ chunk_set(chunk, CLEAN);
+
+ /*
+ * For recovery stripes, I need to reset locked locked
+ * here, because those aren't processed in do_endios().
+ */
+ if (unlikely(StripeRecover(chunk->stripe)))
+ ClearChunkLocked(chunk);
+ else
+ SetChunkUnlock(chunk);
+
+ /* Indirectly puts stripe on cache's endio list via stripe_io_put(). */
+ stripe_put_references(chunk->stripe);
+}
+
+/* Read/Write a chunk asynchronously. */
+static void stripe_chunk_rw(struct stripe *stripe, unsigned p)
+{
+ struct stripe_cache *sc = stripe->sc;
+ struct raid_set *rs = RS(sc);
+ struct dm_mem_cache_object *obj = stripe->obj + p;
+ struct page_list *pl = obj->pl;
+ struct stripe_chunk *chunk = CHUNK(stripe, p);
+ struct raid_dev *dev = rs->dev + p;
+ struct dm_io_region io = {
+ .bdev = dev->dev->bdev,
+ .sector = stripe->key,
+ .count = stripe->io.size,
+ };
+ struct dm_io_request control = {
+ .bi_rw = ChunkDirty(chunk) ? WRITE : READ,
+ .mem = {
+ .type = DM_IO_PAGE_LIST,
+ .ptr.pl = pl,
+ .offset = 0,
+ },
+ .notify = {
+ .fn = endio,
+ .context = chunk,
+ },
+ .client = StripeRecover(stripe) ? rs->recover.dm_io_client :
+ sc->dm_io_client,
+ };
+
+ BUG_ON(ChunkLocked(chunk));
+ BUG_ON(!ChunkUptodate(chunk) && ChunkDirty(chunk));
+ BUG_ON(ChunkUptodate(chunk) && !ChunkDirty(chunk));
+
+ /*
+ * Don't rw past end of device, which can happen, because
+ * typically sectors_per_dev isn't divisible by io_size.
+ */
+ if (unlikely(io.sector + io.count > rs->set.sectors_per_dev))
+ io.count = rs->set.sectors_per_dev - io.sector;
+
+ BUG_ON(!io.count);
+ io.sector += dev->start; /* Add <offset>. */
+ if (RSRecover(rs))
+ recover_io_count(stripe); /* Recovery io accounting. */
+
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + (ChunkDirty(chunk) ? S_DM_IO_WRITE :
+ S_DM_IO_READ));
+ SetChunkLocked(chunk);
+ SetDevIoQueued(dev);
+ BUG_ON(dm_io(&control, 1, &io, NULL));
+}
+
+/*
+ * Write dirty or read not uptodate page lists of a stripe.
+ */
+static int stripe_chunks_rw(struct stripe *stripe)
+{
+ int r;
+ struct raid_set *rs = RS(stripe->sc);
+
+ /*
+ * Increment the pending count on the stripe
+ * first, so that we don't race in endio().
+ *
+ * An inc (IO) is needed for any chunk unless !ChunkIo(chunk):
+ *
+ * o not uptodate
+ * o dirtied by writes merged
+ * o dirtied by parity calculations
+ */
+ r = for_each_io_dev(stripe, stripe_get_references);
+ if (r) {
+ /* Io needed: chunks are either not uptodate or dirty. */
+ int max; /* REMOVEME: */
+ struct stripe_cache *sc = &rs->sc;
+
+ /* Submit actual io. */
+ for_each_io_dev(stripe, stripe_chunk_rw);
+
+ /* REMOVEME: statistics */
+ max = sc_active(sc);
+ if (atomic_read(&sc->active_stripes_max) < max)
+ atomic_set(&sc->active_stripes_max, max);
+
+ atomic_inc(rs->stats + S_FLUSHS);
+ /* END REMOVEME: statistics */
+ }
+
+ return r;
+}
+
+/* Merge in all writes hence dirtying respective chunks. */
+static void stripe_merge_writes(struct stripe *stripe)
+{
+ unsigned p = RS(stripe->sc)->set.raid_devs;
+
+ while (p--) {
+ struct stripe_chunk *chunk = CHUNK(stripe, p);
+ struct bio_list *write = BL_CHUNK(chunk, WRITE_QUEUED);
+
+ if (!bio_list_empty(write)) {
+ struct bio *bio;
+ struct page_list *pl = stripe->obj[p].pl;
+
+ /*
+ * We can play with the lists without holding a lock,
+ * because it is just us accessing them anyway.
+ */
+ bio_list_for_each(bio, write)
+ bio_copy_page_list(WRITE, stripe, pl, bio);
+
+ bio_list_merge(BL_CHUNK(chunk, WRITE_MERGED), write);
+ bio_list_init(write);
+ chunk_set(chunk, DIRTY);
+ }
+ }
+}
+
+/* Queue all writes to get merged. */
+static int stripe_queue_writes(struct stripe *stripe)
+{
+ int r = 0;
+ unsigned p = RS(stripe->sc)->set.raid_devs;
+
+ while (p--) {
+ struct stripe_chunk *chunk = CHUNK(stripe, p);
+ struct bio_list *write = BL_CHUNK(chunk, WRITE);
+
+ if (!bio_list_empty(write)) {
+ bio_list_merge(BL_CHUNK(chunk, WRITE_QUEUED), write);
+ bio_list_init(write);
+SetChunkIo(chunk);
+ r = 1;
+ }
+ }
+
+ return r;
+}
+
+
+/* Check, if a chunk gets completely overwritten. */
+static int stripe_check_chunk_overwrite(struct stripe *stripe, unsigned p)
+{
+ unsigned sectors = 0;
+ struct bio *bio;
+ struct bio_list *bl = BL(stripe, p, WRITE_QUEUED);
+
+ bio_list_for_each(bio, bl)
+ sectors += bio_sectors(bio);
+
+ BUG_ON(sectors > RS(stripe->sc)->set.io_size);
+ return sectors == RS(stripe->sc)->set.io_size;
+}
+
+/*
+ * Avoid io on broken/reconstructed drive in order to
+ * reconstruct date on endio.
+ *
+ * (*1*) We set StripeReconstruct() in here, so that _do_endios()
+ * will trigger a reconstruct call before resetting it.
+ */
+static int stripe_chunk_set_io_flags(struct stripe *stripe, int pr)
+{
+ struct stripe_chunk *chunk = CHUNK(stripe, pr);
+
+ /*
+ * Allow io on all chunks but the indexed one,
+ * because we're either degraded or prohibit it
+ * on the one for later reconstruction.
+ */
+ /* Includes ClearChunkIo(), ClearChunkUptodate(). */
+ stripe_chunk_invalidate(chunk);
+ stripe->idx.recover = pr;
+ SetStripeReconstruct(stripe);
+
+ /* REMOVEME: statistics. */
+ atomic_inc(RS(stripe->sc)->stats + S_PROHIBITCHUNKIO);
+ return -EPERM;
+}
+
+/* Chunk locked/uptodate and device failed tests. */
+static struct stripe_chunk *
+stripe_chunk_check(struct stripe *stripe, unsigned p, unsigned *chunks_uptodate)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ struct stripe_chunk *chunk = CHUNK(stripe, p);
+
+ /* Can't access active chunks. */
+ if (ChunkLocked(chunk)) {
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + S_CHUNK_LOCKED);
+ return NULL;
+ }
+
+ /* Can't access broken devive. */
+ if (ChunkError(chunk) || DevFailed(rs->dev + p))
+ return NULL;
+
+ /* Can access uptodate chunks. */
+ if (ChunkUptodate(chunk)) {
+ (*chunks_uptodate)++;
+ return NULL;
+ }
+
+ return chunk;
+}
+
+/*
+ * Degraded/reconstruction mode.
+ *
+ * Check stripe state to figure which chunks don't need IO.
+ *
+ * Returns 0 for fully operational, -EPERM for degraded/resynchronizing.
+ */
+static int stripe_check_reconstruct(struct stripe *stripe)
+{
+ struct raid_set *rs = RS(stripe->sc);
+
+ if (RSDead(rs)) {
+ ClearStripeReconstruct(stripe);
+ ClearStripeReconstructed(stripe);
+ stripe_allow_io(stripe);
+ return 0;
+ }
+
+ /* Avoid further reconstruction setting, when already set. */
+ if (StripeReconstruct(stripe)) {
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + S_RECONSTRUCT_SET);
+ return -EBUSY;
+ }
+
+ /* Initially allow io on all chunks. */
+ stripe_allow_io(stripe);
+
+ /* Return if stripe is already reconstructed. */
+ if (StripeReconstructed(stripe)) {
+ atomic_inc(rs->stats + S_RECONSTRUCTED);
+ return 0;
+ }
+
+ /*
+ * Degraded/reconstruction mode (device failed) ->
+ * avoid io on the failed device.
+ */
+ if (unlikely(RSDegraded(rs))) {
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + S_DEGRADED);
+ /* Allow IO on all devices but the dead one. */
+ BUG_ON(rs->set.ei < 0);
+ return stripe_chunk_set_io_flags(stripe, rs->set.ei);
+ } else {
+ int sync, pi = dev_for_parity(stripe, &sync);
+
+ /*
+ * Reconstruction mode (ie. a particular (replaced) device or
+ * some (rotating) parity chunk is being resynchronized) ->
+ * o make sure all needed chunks are read in
+ * o cope with 3/4 disk array special case where it
+ * doesn't make a difference to read in parity
+ * to xor data in/out
+ */
+ if (RSEnforceParityCreation(rs) || !sync) {
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + S_NOSYNC);
+ /* Allow IO on all devs but the one to reconstruct. */
+ return stripe_chunk_set_io_flags(stripe, pi);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Check, if stripe is ready to merge writes.
+ * I.e. if all chunks present to allow to merge bios.
+ *
+ * We prohibit io on:
+ *
+ * o chunks without bios
+ * o chunks which get completely written over
+ */
+static int stripe_merge_possible(struct stripe *stripe, int nosync)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ unsigned chunks_overwrite = 0, chunks_prohibited = 0,
+ chunks_uptodate = 0, p = rs->set.raid_devs;
+
+ /* Walk all chunks. */
+ while (p--) {
+ struct stripe_chunk *chunk;
+
+ /* Prohibit io on broken devices. */
+ if (DevFailed(rs->dev + p)) {
+ chunk = CHUNK(stripe, p);
+ goto prohibit_io;
+ }
+
+ /* We can't optimize any further if no chunk. */
+ chunk = stripe_chunk_check(stripe, p, &chunks_uptodate);
+ if (!chunk || nosync)
+ continue;
+
+ /*
+ * We have a chunk, which is not uptodate.
+ *
+ * If this is not parity and we don't have
+ * reads queued, we can optimize further.
+ */
+ if (p != stripe->idx.parity &&
+ bio_list_empty(BL_CHUNK(chunk, READ)) &&
+ bio_list_empty(BL_CHUNK(chunk, WRITE_MERGED))) {
+ if (bio_list_empty(BL_CHUNK(chunk, WRITE_QUEUED)))
+ goto prohibit_io;
+ else if (RSCheckOverwrite(rs) &&
+ stripe_check_chunk_overwrite(stripe, p))
+ /* Completely overwritten chunk. */
+ chunks_overwrite++;
+ }
+
+ /* Allow io for chunks with bios and overwritten ones. */
+ SetChunkIo(chunk);
+ continue;
+
+prohibit_io:
+ /* No io for broken devices or for chunks w/o bios. */
+ ClearChunkIo(chunk);
+ chunks_prohibited++;
+ /* REMOVEME: statistics. */
+ atomic_inc(RS(stripe->sc)->stats + S_PROHIBITCHUNKIO);
+ }
+
+ /* All data chunks will get written over. */
+ if (chunks_overwrite == rs->set.data_devs)
+ atomic_inc(rs->stats + S_OVERWRITE); /* REMOVEME: statistics.*/
+ else if (chunks_uptodate + chunks_prohibited < rs->set.raid_devs) {
+ /* We don't have enough chunks to merge. */
+ atomic_inc(rs->stats + S_CANT_MERGE); /* REMOVEME: statistics.*/
+ return -EPERM;
+ }
+
+ /*
+ * If we have all chunks up to date or overwrite them, we
+ * just zero the parity chunk and let stripe_rw() recreate it.
+ */
+ if (chunks_uptodate == rs->set.raid_devs ||
+ chunks_overwrite == rs->set.data_devs) {
+ stripe_zero_chunk(stripe, stripe->idx.parity);
+ BUG_ON(StripeReconstruct(stripe));
+ SetStripeReconstruct(stripe); /* Enforce xor in caller. */
+ } else {
+ /*
+ * With less chunks, we xor parity out.
+ *
+ * (*4*) We rely on !StripeReconstruct() in chunk_must_xor(),
+ * so that only chunks with queued or merged writes
+ * are being xored.
+ */
+ parity_xor(stripe);
+ }
+
+ /*
+ * We do have enough chunks to merge.
+ * All chunks are uptodate or get written over.
+ */
+ atomic_inc(rs->stats + S_CAN_MERGE); /* REMOVEME: statistics. */
+ return 0;
+}
+
+/*
+ * Avoid reading chunks in case we're fully operational.
+ *
+ * We prohibit io on any chunks without bios but the parity chunk.
+ */
+static void stripe_avoid_reads(struct stripe *stripe)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ unsigned dummy = 0, p = rs->set.raid_devs;
+
+ /* Walk all chunks. */
+ while (p--) {
+ struct stripe_chunk *chunk =
+ stripe_chunk_check(stripe, p, &dummy);
+
+ if (!chunk)
+ continue;
+
+ /* If parity or any bios pending -> allow io. */
+ if (chunk_ref(chunk) || p == stripe->idx.parity)
+ SetChunkIo(chunk);
+ else {
+ ClearChunkIo(chunk);
+ /* REMOVEME: statistics. */
+ atomic_inc(RS(stripe->sc)->stats + S_PROHIBITCHUNKIO);
+ }
+ }
+}
+
+/*
+ * Read/write a stripe.
+ *
+ * All stripe read/write activity goes through this function
+ * unless recovery, which has to call stripe_chunk_rw() directly.
+ *
+ * Make sure we don't try already merged stripes in order
+ * to avoid data corruption.
+ *
+ * Check the state of the RAID set and if degraded (or
+ * resynchronizing for reads), read in all other chunks but
+ * the one on the dead/resynchronizing device in order to be
+ * able to reconstruct the missing one in _do_endios().
+ *
+ * Can be called on active stripes in order
+ * to dispatch new io on inactive chunks.
+ *
+ * States to cover:
+ * o stripe to read and/or write
+ * o stripe with error to reconstruct
+ */
+static int stripe_rw(struct stripe *stripe)
+{
+ int nosync, r;
+ struct raid_set *rs = RS(stripe->sc);
+
+ /*
+ * Check, if a chunk needs to be reconstructed
+ * because of a degraded set or a region out of sync.
+ */
+ nosync = stripe_check_reconstruct(stripe);
+ switch (nosync) {
+ case -EBUSY:
+ return 0; /* Wait for stripe reconstruction to finish. */
+ case -EPERM:
+ goto io;
+ }
+
+ /*
+ * If we don't have merged writes pending, we can schedule
+ * queued writes to be merged next without corrupting data.
+ */
+ if (!StripeMerged(stripe)) {
+ r = stripe_queue_writes(stripe);
+ if (r)
+ /* Writes got queued -> flag RBW. */
+ SetStripeRBW(stripe);
+ }
+
+ /*
+ * Merge all writes hanging off uptodate/overwritten
+ * chunks of the stripe.
+ */
+ if (StripeRBW(stripe)) {
+ r = stripe_merge_possible(stripe, nosync);
+ if (!r) { /* Merge possible. */
+ struct stripe_chunk *chunk;
+
+ /*
+ * I rely on valid parity in order
+ * to xor a fraction of chunks out
+ * of parity and back in.
+ */
+ stripe_merge_writes(stripe); /* Merge writes in. */
+ parity_xor(stripe); /* Update parity. */
+ ClearStripeReconstruct(stripe); /* Reset xor enforce. */
+ SetStripeMerged(stripe); /* Writes merged. */
+ ClearStripeRBW(stripe); /* Disable RBW. */
+
+ /*
+ * REMOVEME: sanity check on parity chunk
+ * states after writes got merged.
+ */
+ chunk = CHUNK(stripe, stripe->idx.parity);
+ BUG_ON(ChunkLocked(chunk));
+ BUG_ON(!ChunkUptodate(chunk));
+ BUG_ON(!ChunkDirty(chunk));
+ BUG_ON(!ChunkIo(chunk));
+ }
+ } else if (!nosync && !StripeMerged(stripe))
+ /* Read avoidance if not degraded/resynchronizing/merged. */
+ stripe_avoid_reads(stripe);
+
+io:
+ /* Now submit any reads/writes for non-uptodate or dirty chunks. */
+ r = stripe_chunks_rw(stripe);
+ if (!r) {
+ /*
+ * No io submitted because of chunk io
+ * prohibited or locked chunks/failed devices
+ * -> push to end io list for processing.
+ */
+ stripe_endio_push(stripe);
+ atomic_inc(rs->stats + S_NO_RW); /* REMOVEME: statistics. */
+ }
+
+ return r;
+}
+
+/*
+ * Recovery functions
+ */
+/* Read a stripe off a raid set for recovery. */
+static int stripe_recover_read(struct stripe *stripe, int pi)
+{
+ BUG_ON(stripe_io_ref(stripe));
+
+ /* Invalidate all chunks so that they get read in. */
+ stripe_chunks_invalidate(stripe);
+ stripe_allow_io(stripe); /* Allow io on all recovery chunks. */
+
+ /*
+ * If we are reconstructing a perticular device, we can avoid
+ * reading the respective chunk in, because we're going to
+ * reconstruct it anyway.
+ *
+ * We can't do that for resynchronization of rotating parity,
+ * because the recovery stripe chunk size is typically larger
+ * than the sets chunk size.
+ */
+ if (pi > -1)
+ ClearChunkIo(CHUNK(stripe, pi));
+
+ return stripe_chunks_rw(stripe);
+}
+
+/* Write a stripe to a raid set for recovery. */
+static int stripe_recover_write(struct stripe *stripe, int pi)
+{
+ BUG_ON(stripe_io_ref(stripe));
+
+ /*
+ * If this is a reconstruct of a particular device, then
+ * reconstruct the respective chunk, else create parity chunk.
+ */
+ if (pi > -1) {
+ stripe_zero_chunk(stripe, pi);
+ common_xor(stripe, stripe->io.size, 0, pi);
+ chunk_set(CHUNK(stripe, pi), DIRTY);
+ } else
+ parity_xor(stripe);
+
+ return stripe_chunks_rw(stripe);
+}
+
+/* Read/write a recovery stripe. */
+static int stripe_recover_rw(struct stripe *stripe)
+{
+ int r = 0, sync = 0;
+
+ /* Read/write flip-flop. */
+ if (TestClearStripeRBW(stripe)) {
+ SetStripeMerged(stripe);
+ stripe->key = stripe->recover->pos;
+ r = stripe_recover_read(stripe, dev_for_parity(stripe, &sync));
+ BUG_ON(!r);
+ } else if (TestClearStripeMerged(stripe)) {
+ r = stripe_recover_write(stripe, dev_for_parity(stripe, &sync));
+ BUG_ON(!r);
+ }
+
+ BUG_ON(sync);
+ return r;
+}
+
+/* Recover bandwidth available ?. */
+static int recover_bandwidth(struct raid_set *rs)
+{
+ int r, work;
+
+ /* On reset or when bios delayed -> allow recovery. */
+ r = recover_io_reset(rs);
+ if (r || RSBandwidth(rs))
+ goto out;
+
+ work = atomic_read(rs->recover.io_count + IO_WORK);
+ if (work) {
+ /* Pay attention to larger recover stripe size. */
+ int recover = atomic_read(rs->recover.io_count + IO_RECOVER) *
+ rs->recover.io_size / rs->set.io_size;
+
+ /*
+ * Don't use more than given bandwidth
+ * of the work io for recovery.
+ */
+ if (recover > work / rs->recover.bandwidth_work) {
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + S_NO_BANDWIDTH);
+ return 0;
+ }
+ }
+
+out:
+ atomic_inc(rs->stats + S_BANDWIDTH); /* REMOVEME: statistics. */
+ return 1;
+}
+
+/* Try to get a region to recover. */
+static int stripe_recover_get_region(struct stripe *stripe)
+{
+ struct raid_set *rs = RS(stripe->sc);
+ struct recover *rec = &rs->recover;
+ struct recover_addr *addr = stripe->recover;
+ struct dm_dirty_log *dl = rec->dl;
+ struct dm_rh_client *rh = rec->rh;
+
+ BUG_ON(!dl);
+ BUG_ON(!rh);
+
+ /* Return, that we have region first to finish it during suspension. */
+ if (addr->reg)
+ return 1;
+
+ if (RSSuspend(rs))
+ return -EPERM;
+
+ if (dl->type->get_sync_count(dl) >= rec->nr_regions)
+ return -ENOENT;
+
+ /* If we don't have enough bandwidth, we don't proceed recovering. */
+ if (!recover_bandwidth(rs))
+ return -EAGAIN;
+
+ /* Start quiescing a region. */
+ dm_rh_recovery_prepare(rh);
+ addr->reg = dm_rh_recovery_start(rh);
+ if (!addr->reg)
+ return -EAGAIN;
+
+ addr->pos = dm_rh_region_to_sector(rh, dm_rh_get_region_key(addr->reg));
+ addr->end = addr->pos + dm_rh_get_region_size(rh);
+
+ /*
+ * Take one global io reference out for the
+ * whole region, which is going to be released
+ * when the region is completely done with.
+ */
+ io_get(rs);
+ return 0;
+}
+
+/* Update region hash state. */
+enum recover_type { REC_FAILURE = 0, REC_SUCCESS = 1 };
+static void recover_rh_update(struct stripe *stripe, enum recover_type success)
+{
+ struct recover_addr *addr = stripe->recover;
+ struct raid_set *rs = RS(stripe->sc);
+ struct recover *rec = &rs->recover;
+
+ if (!addr->reg) {
+ DMERR("%s- Called w/o region", __func__);
+ return;
+ }
+
+ dm_rh_recovery_end(addr->reg, success);
+ if (success)
+ rec->nr_regions_recovered++;
+
+ addr->reg = NULL;
+
+ /*
+ * Completely done with this region ->
+ * release the 1st io reference.
+ */
+ io_put(rs);
+}
+
+/* Set start of recovery state. */
+static void set_start_recovery(struct raid_set *rs)
+{
+ /* Initialize recovery. */
+ rs->recover.start_jiffies = jiffies;
+ rs->recover.end_jiffies = 0;
+}
+
+/* Set end of recovery state. */
+static void set_end_recovery(struct raid_set *rs)
+{
+ ClearRSRecover(rs);
+/* Achtung: nicht mehr zurück setzten -> 'i' belibt in status output und userpace könnte sich darauf verlassen, das es verschiwndet!!!! */
+ rs->set.dev_to_init = -1;
+
+ /* Check for jiffies overrun. */
+ rs->recover.end_jiffies = jiffies;
+ if (rs->recover.end_jiffies < rs->recover.start_jiffies)
+ rs->recover.end_jiffies = ~0;
+}
+
+/* Handle recovery on one recovery stripe. */
+static int _do_recovery(struct stripe *stripe)
+{
+ int r;
+ struct raid_set *rs = RS(stripe->sc);
+ struct recover_addr *addr = stripe->recover;
+
+ /* If recovery is active -> return. */
+ if (stripe_io_ref(stripe))
+ return 1;
+
+ /* IO error is fatal for recovery -> stop it. */
+ if (unlikely(StripeError(stripe)))
+ goto err;
+
+ /* Recovery end required. */
+ if (unlikely(RSDegraded(rs)))
+ goto err;
+
+ /* Get a region to recover. */
+ r = stripe_recover_get_region(stripe);
+ switch (r) {
+ case 0: /* Got a new region: flag initial read before write. */
+ SetStripeRBW(stripe);
+ case 1: /* Have a region in the works. */
+ break;
+ case -EAGAIN:
+ /* No bandwidth/quiesced region yet, try later. */
+ if (!io_ref(rs))
+ wake_do_raid_delayed(rs, HZ / 4);
+ case -EPERM:
+ /* Suspend. */
+ return 1;
+ case -ENOENT: /* No more regions to recover. */
+ schedule_work(&rs->io.ws_do_table_event);
+ return 0;
+ default:
+ BUG();
+ }
+
+ /* Read/write a recover stripe. */
+ r = stripe_recover_rw(stripe);
+ if (r)
+ /* IO initiated. */
+ return 1;
+
+ /* Read and write finished-> update recovery position within region. */
+ addr->pos += stripe->io.size;
+
+ /* If we're at end of region, update region hash. */
+ if (addr->pos >= addr->end ||
+ addr->pos >= rs->set.sectors_per_dev)
+ recover_rh_update(stripe, REC_SUCCESS);
+ else
+ /* Prepare to read next region segment. */
+ SetStripeRBW(stripe);
+
+ /* Schedule myself for another round... */
+ wake_do_raid(rs);
+ return 1;
+
+err:
+ /* FIXME: rather try recovering other regions on error? */
+ rs_check_degrade(stripe);
+ recover_rh_update(stripe, REC_FAILURE);
+
+ /* Check state of partially recovered array. */
+ if (RSDegraded(rs) && !RSDead(rs) &&
+ rs->set.dev_to_init != -1 &&
+ rs->set.ei != rs->set.dev_to_init) {
+ /* Broken drive != drive to recover -> FATAL. */
+ SetRSDead(rs);
+ DMERR("FATAL: failed device != device to initialize -> "
+ "RAID set broken");
+ }
+
+ if (StripeError(stripe) || RSDegraded(rs)) {
+ char buf[BDEVNAME_SIZE];
+
+ DMERR("stopping recovery due to "
+ "ERROR on /dev/%s, stripe at offset %llu",
+ bdevname(rs->dev[rs->set.ei].dev->bdev, buf),
+ (unsigned long long) stripe->key);
+
+ }
+
+ /* Make sure, that all quiesced regions get released. */
+ while (addr->reg) {
+ dm_rh_recovery_end(addr->reg, -EIO);
+ addr->reg = dm_rh_recovery_start(rs->recover.rh);
+ }
+
+ return 0;
+}
+
+/* Called by main io daemon to recover regions. */
+static int do_recovery(struct raid_set *rs)
+{
+ if (RSRecover(rs)) {
+ int r = 0;
+ struct stripe *stripe;
+
+ list_for_each_entry(stripe, &rs->recover.stripes,
+ lists[LIST_RECOVER])
+ r += _do_recovery(stripe);
+
+ if (r)
+ return r;
+
+ set_end_recovery(rs);
+ stripe_recover_free(rs);
+ }
+
+ return 0;
+}
+
+/*
+ * END recovery functions
+ */
+
+/* End io process all stripes handed in by endio() callback. */
+static void _do_endios(struct raid_set *rs, struct stripe *stripe,
+ struct list_head *flush_list)
+{
+ /* First unlock all required chunks. */
+ stripe_chunks_unlock(stripe);
+
+ /*
+ * If an io error on a stripe occured, degrade the RAID set
+ * and try to endio as many bios as possible. If any bios can't
+ * be endio processed, requeue the stripe (stripe_ref() != 0).
+ */
+ if (TestClearStripeError(stripe)) {
+ /*
+ * FIXME: if read, rewrite the failed chunk after reconstruction
+ * in order to trigger disk bad sector relocation.
+ */
+ rs_check_degrade(stripe); /* Resets ChunkError(). */
+ ClearStripeReconstruct(stripe);
+ ClearStripeReconstructed(stripe);
+
+ /*
+ * FIXME: if write, don't endio writes in flight and don't
+ * allow for new writes until userspace has updated
+ * its metadata.
+ */
+ }
+
+ /* Got to reconstruct a missing chunk. */
+ if (StripeReconstruct(stripe)) {
+ /*
+ * (*2*) We use StripeReconstruct() to allow for
+ * all chunks to be xored into the reconstructed
+ * one (see chunk_must_xor()).
+ */
+ stripe_reconstruct(stripe);
+
+ /*
+ * (*3*) Now we reset StripeReconstruct() and flag
+ * StripeReconstructed() to show to stripe_rw(),
+ * that we have reconstructed a missing chunk.
+ */
+ ClearStripeReconstruct(stripe);
+ SetStripeReconstructed(stripe);
+
+ /* FIXME: reschedule to be written in case of read. */
+ /* if (!RSDead && RSDegraded(rs) !StripeRBW(stripe)) {
+ chunk_set(CHUNK(stripe, stripe->idx.recover), DIRTY);
+ stripe_chunks_rw(stripe);
+ } */
+
+ stripe->idx.recover = -1;
+ }
+
+ /*
+ * Now that we eventually got a complete stripe, we
+ * can process the rest of the end ios on reads.
+ */
+ stripe_endio(READ, stripe);
+
+ /* End io all merged writes if not prohibited. */
+ if (!RSProhibitWrites(rs) && StripeMerged(stripe)) {
+ ClearStripeMerged(stripe);
+ stripe_endio(WRITE_MERGED, stripe);
+ }
+
+ /* If RAID set is dead -> fail any ios to dead drives. */
+ if (RSDead(rs)) {
+ if (!TestSetRSDeadEndioMessage(rs))
+ DMERR("RAID set dead: failing ios to dead devices");
+
+ stripe_fail_io(stripe);
+ }
+
+ /*
+ * We have stripe references still,
+ * beacuse of read before writes or IO errors ->
+ * got to put on flush list for processing.
+ */
+ if (stripe_ref(stripe)) {
+ BUG_ON(!list_empty(stripe->lists + LIST_LRU));
+ list_add_tail(stripe->lists + LIST_FLUSH, flush_list);
+ atomic_inc(rs->stats + S_REQUEUE); /* REMOVEME: statistics. */
+ } else
+ stripe_lru_add(stripe);
+}
+
+/* Pop any endio stripes off of the endio list and belabour them. */
+static void do_endios(struct raid_set *rs)
+{
+ struct stripe_cache *sc = &rs->sc;
+ struct stripe *stripe;
+ /* IO flush list for sorted requeued stripes. */
+ struct list_head flush_list;
+
+ INIT_LIST_HEAD(&flush_list);
+
+ while ((stripe = stripe_endio_pop(sc))) {
+ /* Avoid endio on stripes with newly io'ed chunks. */
+ if (!stripe_io_ref(stripe))
+ _do_endios(rs, stripe, &flush_list);
+ }
+
+ /*
+ * Insert any requeued stripes in the proper
+ * order at the beginning of the io (flush) list.
+ */
+ list_splice(&flush_list, sc->lists + LIST_FLUSH);
+}
+
+/* Flush any stripes on the io list. */
+static int do_flush(struct raid_set *rs)
+{
+ int r = 0;
+ struct stripe *stripe;
+
+ while ((stripe = stripe_io_pop(&rs->sc)))
+ r += stripe_rw(stripe); /* Read/write stripe. */
+
+ return r;
+}
+
+/* Stripe cache resizing. */
+static void do_sc_resize(struct raid_set *rs)
+{
+ unsigned set = atomic_read(&rs->sc.stripes_to_set);
+
+ if (set) {
+ unsigned cur = atomic_read(&rs->sc.stripes);
+ int r = (set > cur) ? sc_grow(&rs->sc, set - cur, SC_GROW) :
+ sc_shrink(&rs->sc, cur - set);
+
+ /* Flag end of resizeing if ok. */
+ if (!r)
+ atomic_set(&rs->sc.stripes_to_set, 0);
+ }
+}
+
+/*
+ * Process all ios
+ *
+ * We do different things with the io depending
+ * on the state of the region that it is in:
+ *
+ * o reads: hang off stripe cache or postpone if full
+ *
+ * o writes:
+ *
+ * CLEAN/DIRTY/NOSYNC: increment pending and hang io off stripe's stripe set.
+ * In case stripe cache is full or busy, postpone the io.
+ *
+ * RECOVERING: delay the io until recovery of the region completes.
+ *
+ */
+static void do_ios(struct raid_set *rs, struct bio_list *ios)
+{
+ int r;
+ unsigned flush = 0, delay = 0;
+ sector_t sector;
+ struct dm_rh_client *rh = rs->recover.rh;
+ struct bio *bio;
+ struct bio_list reject;
+
+ bio_list_init(&reject);
+
+ /*
+ * Classify each io:
+ * o delay writes to recovering regions (let reads go through)
+ * o queue io to all other regions
+ */
+ while ((bio = bio_list_pop(ios))) {
+ /*
+ * In case we get a barrier bio, push it back onto
+ * the input queue unless all work queues are empty
+ * and the stripe cache is inactive.
+ */
+ if (bio->bi_rw & REQ_FLUSH) {
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + S_BARRIER);
+ if (delay ||
+ !list_empty(rs->sc.lists + LIST_FLUSH) ||
+ !bio_list_empty(&reject) ||
+ sc_active(&rs->sc)) {
+ bio_list_push(ios, bio);
+ break;
+ }
+ }
+
+ /* If writes prohibited because of failures -> postpone. */
+ if (RSProhibitWrites(rs) && bio_data_dir(bio) == WRITE) {
+ bio_list_add(&reject, bio);
+ continue;
+ }
+
+ /* Check for recovering regions. */
+ sector = _sector(rs, bio);
+ r = region_state(rs, sector, DM_RH_RECOVERING);
+ if (unlikely(r)) {
+ delay++;
+ /* Wait writing to recovering regions. */
+ dm_rh_delay_by_region(rh, bio,
+ dm_rh_sector_to_region(rh,
+ sector));
+ /* REMOVEME: statistics.*/
+ atomic_inc(rs->stats + S_DELAYED_BIOS);
+ atomic_inc(rs->stats + S_SUM_DELAYED_BIOS);
+
+ /* Force bandwidth tests in recovery. */
+ SetRSBandwidth(rs);
+ } else {
+ /*
+ * Process ios to non-recovering regions by queueing
+ * them to stripes (does dm_rh_inc()) for writes).
+ */
+ flush += stripe_queue_bio(rs, bio, &reject);
+ }
+ }
+
+ if (flush) {
+ /* FIXME: better error handling. */
+ r = dm_rh_flush(rh); /* Writes got queued -> flush dirty log. */
+ if (r)
+ DMERR_LIMIT("dirty log flush");
+ }
+
+ /* Merge any rejected bios back to the head of the input list. */
+ bio_list_merge_head(ios, &reject);
+}
+
+/* Send an event in case we're getting too busy. */
+static void do_busy_event(struct raid_set *rs)
+{
+ if (sc_busy(rs)) {
+ if (!TestSetRSScBusy(rs))
+ schedule_work(&rs->io.ws_do_table_event);
+ } else
+ ClearRSScBusy(rs);
+}
+
+/* Throw an event. */
+static void do_table_event(struct work_struct *ws)
+{
+ struct raid_set *rs = container_of(ws, struct raid_set,
+ io.ws_do_table_event);
+ dm_table_event(rs->ti->table);
+}
+
+
+/*-----------------------------------------------------------------
+ * RAID daemon
+ *---------------------------------------------------------------*/
+/*
+ * o belabour all end ios
+ * o update the region hash states
+ * o optionally shrink the stripe cache
+ * o optionally do recovery
+ * o unplug any component raid devices with queued bios
+ * o grab the input queue
+ * o work an all requeued or new ios and perform stripe cache flushs
+ * o unplug any component raid devices with queued bios
+ * o check, if the stripe cache gets too busy and throw an event if so
+ */
+static void do_raid(struct work_struct *ws)
+{
+ int r;
+ struct raid_set *rs = container_of(ws, struct raid_set,
+ io.dws_do_raid.work);
+ struct bio_list *ios = &rs->io.work, *ios_in = &rs->io.in;
+
+ /*
+ * We always need to end io, so that ios can get errored in
+ * case the set failed and the region counters get decremented
+ * before we update region hash states and go any further.
+ */
+ do_endios(rs);
+ dm_rh_update_states(rs->recover.rh, 1);
+
+ /*
+ * Now that we've end io'd, which may have put stripes on the LRU list
+ * to allow for shrinking, we resize the stripe cache if requested.
+ */
+ do_sc_resize(rs);
+
+ /* Try to recover regions. */
+ r = do_recovery(rs);
+
+ /* Quickly grab all new ios queued and add them to the work list. */
+ mutex_lock(&rs->io.in_lock);
+ bio_list_merge(ios, ios_in);
+ bio_list_init(ios_in);
+ mutex_unlock(&rs->io.in_lock);
+
+ if (!bio_list_empty(ios))
+ do_ios(rs, ios); /* Got ios to work into the cache. */
+
+ r = do_flush(rs); /* Flush any stripes on io list. */
+
+ do_busy_event(rs); /* Check if we got too busy. */
+}
+
+/*
+ * Callback for region hash to dispatch
+ * delayed bios queued to recovered regions
+ * (gets called via dm_rh_update_states()).
+ */
+static void dispatch_delayed_bios(void *context, struct bio_list *bl)
+{
+ struct raid_set *rs = context;
+ struct bio *bio;
+
+ /* REMOVEME: statistics; decrement pending delayed bios counter. */
+ bio_list_for_each(bio, bl)
+ atomic_dec(rs->stats + S_DELAYED_BIOS);
+
+ /* Merge region hash private list to work list. */
+ bio_list_merge_head(&rs->io.work, bl);
+ bio_list_init(bl);
+ ClearRSBandwidth(rs);
+}
+
+/*************************************************************
+ * Constructor helpers
+ *************************************************************/
+/* Calculate MB/sec. */
+static unsigned mbpers(struct raid_set *rs, unsigned io_size)
+{
+ return to_bytes((rs->xor.speed * rs->set.data_devs *
+ io_size * HZ / XOR_SPEED_TICKS) >> 10) >> 10;
+}
+
+/*
+ * Discover fastest xor algorithm and # of chunks combination.
+ */
+/* Calculate speed of particular algorithm and # of chunks. */
+static unsigned xor_speed(struct stripe *stripe)
+{
+ int ticks = XOR_SPEED_TICKS;
+ unsigned p = RS(stripe->sc)->set.raid_devs, r = 0;
+ unsigned long j;
+
+ /* Set uptodate so that common_xor()->xor() will belabour chunks. */
+ while (p--)
+ SetChunkUptodate(CHUNK(stripe, p));
+
+ /* Wait for next tick. */
+ for (j = jiffies; j == jiffies; );
+
+ /* Do xors for a few ticks. */
+ while (ticks--) {
+ unsigned xors = 0;
+
+ for (j = jiffies; j == jiffies; ) {
+ mb();
+ common_xor(stripe, stripe->io.size, 0, 0);
+ mb();
+ xors++;
+ mb();
+ }
+
+ if (xors > r)
+ r = xors;
+ }
+
+ return r;
+}
+
+/* Define for xor multi recovery stripe optimization runs. */
+#define DMRAID45_XOR_TEST
+
+/* Optimize xor algorithm for this RAID set. */
+static unsigned xor_optimize(struct raid_set *rs)
+{
+ unsigned chunks_max = 2, speed_max = 0;
+ struct xor_func *f = ARRAY_END(xor_funcs), *f_max = NULL;
+ struct stripe *stripe;
+ unsigned io_size = 0, speed_hm = 0, speed_min = ~0, speed_xor_blocks = 0;
+
+ BUG_ON(list_empty(&rs->recover.stripes));
+#ifndef DMRAID45_XOR_TEST
+ stripe = list_first_entry(&rs->recover.stripes, struct stripe,
+ lists[LIST_RECOVER]);
+#endif
+
+ /* Try all xor functions. */
+ while (f-- > xor_funcs) {
+ unsigned speed;
+
+#ifdef DMRAID45_XOR_TEST
+ list_for_each_entry(stripe, &rs->recover.stripes,
+ lists[LIST_RECOVER]) {
+ io_size = stripe->io.size;
+#endif
+
+ /* Set actual xor function for common_xor(). */
+ rs->xor.f = f;
+ rs->xor.chunks = (f->f == xor_blocks_wrapper ?
+ (MAX_XOR_BLOCKS + 1) :
+ XOR_CHUNKS_MAX);
+ if (rs->xor.chunks > rs->set.raid_devs)
+ rs->xor.chunks = rs->set.raid_devs;
+
+ for ( ; rs->xor.chunks > 1; rs->xor.chunks--) {
+ speed = xor_speed(stripe);
+
+#ifdef DMRAID45_XOR_TEST
+ if (f->f == xor_blocks_wrapper) {
+ if (speed > speed_xor_blocks)
+ speed_xor_blocks = speed;
+ } else if (speed > speed_hm)
+ speed_hm = speed;
+
+ if (speed < speed_min)
+ speed_min = speed;
+#endif
+
+ if (speed > speed_max) {
+ speed_max = speed;
+ chunks_max = rs->xor.chunks;
+ f_max = f;
+ }
+ }
+#ifdef DMRAID45_XOR_TEST
+ }
+#endif
+ }
+
+ /* Memorize optimal parameters. */
+ rs->xor.f = f_max;
+ rs->xor.chunks = chunks_max;
+#ifdef DMRAID45_XOR_TEST
+ DMINFO("%s stripes=%u/size=%u min=%u xor_blocks=%u hm=%u max=%u",
+ speed_max == speed_hm ? "HM" : "NB",
+ rs->recover.recovery_stripes, io_size, speed_min,
+ speed_xor_blocks, speed_hm, speed_max);
+#endif
+ return speed_max;
+}
+
+/*
+ * Allocate a RAID context (a RAID set)
+ */
+/* Structure for variable RAID parameters. */
+struct variable_parms {
+ int bandwidth;
+ int bandwidth_parm;
+ int chunk_size;
+ int chunk_size_parm;
+ int io_size;
+ int io_size_parm;
+ int stripes;
+ int stripes_parm;
+ int recover_io_size;
+ int recover_io_size_parm;
+ int raid_parms;
+ int recovery;
+ int recovery_stripes;
+ int recovery_stripes_parm;
+};
+
+static struct raid_set *
+context_alloc(struct raid_type *raid_type, struct variable_parms *p,
+ unsigned raid_devs, sector_t sectors_per_dev,
+ struct dm_target *ti, unsigned dl_parms, char **argv)
+{
+ int r;
+ size_t len;
+ sector_t region_size, ti_len;
+ struct raid_set *rs = NULL;
+ struct dm_dirty_log *dl;
+ struct recover *rec;
+
+ /*
+ * Create the dirty log
+ *
+ * We need to change length for the dirty log constructor,
+ * because we want an amount of regions for all stripes derived
+ * from the single device size, so that we can keep region
+ * size = 2^^n independant of the number of devices
+ */
+ ti_len = ti->len;
+ ti->len = sectors_per_dev;
+ dl = dm_dirty_log_create(argv[0], ti, NULL, dl_parms, argv + 2);
+ ti->len = ti_len;
+ if (!dl)
+ goto bad_dirty_log;
+
+ /* Chunk size *must* be smaller than region size. */
+ region_size = dl->type->get_region_size(dl);
+ if (p->chunk_size > region_size)
+ goto bad_chunk_size;
+
+ /* Recover io size *must* be smaller than region size as well. */
+ if (p->recover_io_size > region_size)
+ goto bad_recover_io_size;
+
+ /* Size and allocate the RAID set structure. */
+ len = sizeof(*rs->data) + sizeof(*rs->dev);
+ if (dm_array_too_big(sizeof(*rs), len, raid_devs))
+ goto bad_array;
+
+ len = sizeof(*rs) + raid_devs * len;
+ rs = kzalloc(len, GFP_KERNEL);
+ if (!rs)
+ goto bad_alloc;
+
+ rec = &rs->recover;
+ atomic_set(&rs->io.in_process, 0);
+ atomic_set(&rs->io.in_process_max, 0);
+ rec->io_size = p->recover_io_size;
+
+ /* Pointer to data array. */
+ rs->data = (unsigned long **)
+ ((void *) rs->dev + raid_devs * sizeof(*rs->dev));
+ rec->dl = dl;
+ rs->set.raid_devs = raid_devs;
+ rs->set.data_devs = raid_devs - raid_type->parity_devs;
+ rs->set.raid_type = raid_type;
+
+ rs->set.raid_parms = p->raid_parms;
+ rs->set.chunk_size_parm = p->chunk_size_parm;
+ rs->set.io_size_parm = p->io_size_parm;
+ rs->sc.stripes_parm = p->stripes_parm;
+ rec->io_size_parm = p->recover_io_size_parm;
+ rec->bandwidth_parm = p->bandwidth_parm;
+ rec->recovery = p->recovery;
+ rec->recovery_stripes = p->recovery_stripes;
+
+ /*
+ * Set chunk and io size and respective shifts
+ * (used to avoid divisions)
+ */
+ rs->set.chunk_size = p->chunk_size;
+ rs->set.chunk_shift = ffs(p->chunk_size) - 1;
+
+ rs->set.io_size = p->io_size;
+ rs->set.io_mask = p->io_size - 1;
+ /* Mask to adjust address key in case io_size != chunk_size. */
+ rs->set.io_inv_mask = (p->chunk_size - 1) & ~rs->set.io_mask;
+
+ rs->set.sectors_per_dev = sectors_per_dev;
+
+ rs->set.ei = -1; /* Indicate no failed device. */
+ atomic_set(&rs->set.failed_devs, 0);
+
+ rs->ti = ti;
+
+ atomic_set(rec->io_count + IO_WORK, 0);
+ atomic_set(rec->io_count + IO_RECOVER, 0);
+
+ /* Initialize io lock and queues. */
+ mutex_init(&rs->io.in_lock);
+ mutex_init(&rs->io.xor_lock);
+ bio_list_init(&rs->io.in);
+ bio_list_init(&rs->io.work);
+
+ init_waitqueue_head(&rs->io.suspendq); /* Suspend waiters (dm-io). */
+
+ rec->nr_regions = dm_sector_div_up(sectors_per_dev, region_size);
+ rec->rh = dm_region_hash_create(rs, dispatch_delayed_bios,
+ wake_dummy, wake_do_raid, 0, p->recovery_stripes,
+ dl, region_size, rec->nr_regions);
+ if (IS_ERR(rec->rh))
+ goto bad_rh;
+
+ /* Initialize stripe cache. */
+ r = sc_init(rs, p->stripes);
+ if (r)
+ goto bad_sc;
+
+ /* REMOVEME: statistics. */
+ stats_reset(rs);
+ ClearRSDevelStats(rs); /* Disnable development status. */
+ return rs;
+
+bad_dirty_log:
+ TI_ERR_RET("Error creating dirty log", ERR_PTR(-ENOMEM));
+
+bad_chunk_size:
+ dm_dirty_log_destroy(dl);
+ TI_ERR_RET("Chunk size larger than region size", ERR_PTR(-EINVAL));
+
+bad_recover_io_size:
+ dm_dirty_log_destroy(dl);
+ TI_ERR_RET("Recover stripe io size larger than region size",
+ ERR_PTR(-EINVAL));
+
+bad_array:
+ dm_dirty_log_destroy(dl);
+ TI_ERR_RET("Arry too big", ERR_PTR(-EINVAL));
+
+bad_alloc:
+ dm_dirty_log_destroy(dl);
+ TI_ERR_RET("Cannot allocate raid context", ERR_PTR(-ENOMEM));
+
+bad_rh:
+ dm_dirty_log_destroy(dl);
+ ti->error = DM_MSG_PREFIX "Error creating dirty region hash";
+ goto free_rs;
+
+bad_sc:
+ dm_region_hash_destroy(rec->rh); /* Destroys dirty log too. */
+ sc_exit(&rs->sc);
+ ti->error = DM_MSG_PREFIX "Error creating stripe cache";
+free_rs:
+ kfree(rs);
+ return ERR_PTR(-ENOMEM);
+}
+
+/* Free a RAID context (a RAID set). */
+static void context_free(struct raid_set *rs, unsigned p)
+{
+ while (p--)
+ dm_put_device(rs->ti, rs->dev[p].dev);
+
+ sc_exit(&rs->sc);
+ dm_region_hash_destroy(rs->recover.rh); /* Destroys dirty log too. */
+ kfree(rs);
+}
+
+/* Create work queue and initialize delayed work. */
+static int rs_workqueue_init(struct raid_set *rs)
+{
+ struct dm_target *ti = rs->ti;
+
+ rs->io.wq = create_singlethread_workqueue(DAEMON);
+ if (!rs->io.wq)
+ TI_ERR_RET("failed to create " DAEMON, -ENOMEM);
+
+ INIT_DELAYED_WORK(&rs->io.dws_do_raid, do_raid);
+ INIT_WORK(&rs->io.ws_do_table_event, do_table_event);
+ return 0;
+}
+
+/* Return pointer to raid_type structure for raid name. */
+static struct raid_type *get_raid_type(char *name)
+{
+ struct raid_type *r = ARRAY_END(raid_types);
+
+ while (r-- > raid_types) {
+ if (!strcmp(r->name, name))
+ return r;
+ }
+
+ return NULL;
+}
+
+/* FIXME: factor out to dm core. */
+static int multiple(sector_t a, sector_t b, sector_t *n)
+{
+ sector_t r = a;
+
+ sector_div(r, b);
+ *n = r;
+ return a == r * b;
+}
+
+/* Log RAID set information to kernel log. */
+static void rs_log(struct raid_set *rs, unsigned io_size)
+{
+ unsigned p;
+ char buf[BDEVNAME_SIZE];
+
+ for (p = 0; p < rs->set.raid_devs; p++)
+ DMINFO("/dev/%s is raid disk %u%s",
+ bdevname(rs->dev[p].dev->bdev, buf), p,
+ (p == rs->set.pi) ? " (parity)" : "");
+
+ DMINFO("%d/%d/%d sectors chunk/io/recovery size, %u stripes\n"
+ "algorithm \"%s\", %u chunks with %uMB/s\n"
+ "%s set with net %u/%u devices",
+ rs->set.chunk_size, rs->set.io_size, rs->recover.io_size,
+ atomic_read(&rs->sc.stripes),
+ rs->xor.f->name, rs->xor.chunks, mbpers(rs, io_size),
+ rs->set.raid_type->descr, rs->set.data_devs, rs->set.raid_devs);
+}
+
+/* Get all devices and offsets. */
+static int dev_parms(struct raid_set *rs, char **argv, int *p)
+{
+ struct dm_target *ti = rs->ti;
+
+DMINFO("rs->set.sectors_per_dev=%llu", (unsigned long long) rs->set.sectors_per_dev);
+ for (*p = 0; *p < rs->set.raid_devs; (*p)++, argv += 2) {
+ int r;
+ unsigned long long tmp;
+ struct raid_dev *dev = rs->dev + *p;
+
+ /* Get offset and device. */
+ if (sscanf(argv[1], "%llu", &tmp) != 1 ||
+ tmp > rs->set.sectors_per_dev)
+ TI_ERR("Invalid RAID device offset parameter");
+
+ dev->start = tmp;
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+ &dev->dev);
+ if (r)
+ TI_ERR_RET("RAID device lookup failure", r);
+
+ r = raid_dev_lookup(rs, dev);
+ if (r != -ENODEV && r < *p) {
+ (*p)++; /* Ensure dm_put_device() on actual device. */
+ TI_ERR_RET("Duplicate RAID device", -ENXIO);
+ }
+ }
+
+ return 0;
+}
+
+/* Set recovery bandwidth. */
+static void
+recover_set_bandwidth(struct raid_set *rs, unsigned bandwidth)
+{
+ rs->recover.bandwidth = bandwidth;
+ rs->recover.bandwidth_work = 100 / bandwidth;
+}
+
+/* Handle variable number of RAID parameters. */
+static int get_raid_variable_parms(struct dm_target *ti, char **argv,
+ struct variable_parms *vp)
+{
+ int p, value;
+ struct {
+ int action; /* -1: skip, 0: no power2 check, 1: power2 check */
+ char *errmsg;
+ int min, max;
+ int *var, *var2, *var3;
+ } argctr[] = {
+ { 1,
+ "Invalid chunk size; must be -1 or 2^^n and <= 16384",
+ IO_SIZE_MIN, CHUNK_SIZE_MAX,
+ &vp->chunk_size_parm, &vp->chunk_size, &vp->io_size },
+ { 0,
+ "Invalid number of stripes: must be -1 or >= 8 and <= 16384",
+ STRIPES_MIN, STRIPES_MAX,
+ &vp->stripes_parm, &vp->stripes, NULL },
+ { 1,
+ "Invalid io size; must -1 or >= 8, 2^^n and less equal "
+ "min(BIO_MAX_SECTORS/2, chunk size)",
+ IO_SIZE_MIN, 0, /* Needs to be updated in loop below. */
+ &vp->io_size_parm, &vp->io_size, NULL },
+ { 1,
+ "Invalid recovery io size; must be -1 or "
+ "2^^n and less equal BIO_MAX_SECTORS/2",
+ RECOVER_IO_SIZE_MIN, BIO_MAX_SECTORS / 2,
+ &vp->recover_io_size_parm, &vp->recover_io_size, NULL },
+ { 0,
+ "Invalid recovery bandwidth percentage; "
+ "must be -1 or > 0 and <= 100",
+ BANDWIDTH_MIN, BANDWIDTH_MAX,
+ &vp->bandwidth_parm, &vp->bandwidth, NULL },
+ /* Handle sync argument seperately in loop. */
+ { -1,
+ "Invalid recovery switch; must be \"sync\" or \"nosync\"" },
+ { 0,
+ "Invalid number of recovery stripes;"
+ "must be -1, > 0 and <= 64",
+ RECOVERY_STRIPES_MIN, RECOVERY_STRIPES_MAX,
+ &vp->recovery_stripes_parm, &vp->recovery_stripes, NULL },
+ }, *varp;
+
+ /* Fetch # of variable raid parameters. */
+ if (sscanf(*(argv++), "%d", &vp->raid_parms) != 1 ||
+ !range_ok(vp->raid_parms, 0, 7))
+ TI_ERR("Bad variable raid parameters number");
+
+ /* Preset variable RAID parameters. */
+ vp->chunk_size = CHUNK_SIZE_DEFAULT;
+ vp->io_size = IO_SIZE_DEFAULT;
+ vp->stripes = STRIPES_DEFAULT;
+ vp->recover_io_size = RECOVER_IO_SIZE_DEFAULT;
+ vp->bandwidth = BANDWIDTH_DEFAULT;
+ vp->recovery = 1;
+ vp->recovery_stripes = RECOVERY_STRIPES_DEFAULT;
+
+ /* Walk the array of argument constraints for all given ones. */
+ for (p = 0, varp = argctr; p < vp->raid_parms; p++, varp++) {
+ BUG_ON(varp >= ARRAY_END(argctr));
+
+ /* Special case for "[no]sync" string argument. */
+ if (varp->action < 0) {
+ if (!strcmp(*argv, "sync"))
+ ;
+ else if (!strcmp(*argv, "nosync"))
+ vp->recovery = 0;
+ else
+ TI_ERR(varp->errmsg);
+
+ argv++;
+ continue;
+ }
+
+ /*
+ * Special case for io_size depending
+ * on previously set chunk size.
+ */
+ if (p == 2)
+ varp->max = min(BIO_MAX_SECTORS / 2, vp->chunk_size);
+
+ if (sscanf(*(argv++), "%d", &value) != 1 ||
+ (value != -1 &&
+ ((varp->action && !is_power_of_2(value)) ||
+ !range_ok(value, varp->min, varp->max))))
+ TI_ERR(varp->errmsg);
+
+ *varp->var = value;
+ if (value != -1) {
+ if (varp->var2)
+ *varp->var2 = value;
+ if (varp->var3)
+ *varp->var3 = value;
+ }
+ }
+
+ return 0;
+}
+
+/* Parse optional locking parameters. */
+static int get_raid_locking_parms(struct dm_target *ti, char **argv,
+ int *locking_parms,
+ struct dm_raid45_locking_type **locking_type)
+{
+ if (!strnicmp(argv[0], "locking", strlen(argv[0]))) {
+ char *lckstr = argv[1];
+ size_t lcksz = strlen(lckstr);
+
+ if (!strnicmp(lckstr, "none", lcksz)) {
+ *locking_type = &locking_none;
+ *locking_parms = 2;
+ } else if (!strnicmp(lckstr, "cluster", lcksz)) {
+ DMERR("locking type \"%s\" not yet implemented",
+ lckstr);
+ return -EINVAL;
+ } else {
+ DMERR("unknown locking type \"%s\"", lckstr);
+ return -EINVAL;
+ }
+ }
+
+ *locking_parms = 0;
+ *locking_type = &locking_none;
+ return 0;
+}
+
+/* Set backing device read ahead properties of RAID set. */
+static void rs_set_read_ahead(struct raid_set *rs,
+ unsigned sectors, unsigned stripes)
+{
+ unsigned ra_pages = dm_div_up(sectors, SECTORS_PER_PAGE);
+ struct mapped_device *md = dm_table_get_md(rs->ti->table);
+ struct backing_dev_info *bdi = &dm_disk(md)->queue->backing_dev_info;
+
+ /* Set read-ahead for the RAID set and the component devices. */
+ if (ra_pages) {
+ unsigned p = rs->set.raid_devs;
+
+ bdi->ra_pages = stripes * ra_pages * rs->set.data_devs;
+
+ while (p--) {
+ struct request_queue *q =
+ bdev_get_queue(rs->dev[p].dev->bdev);
+
+ q->backing_dev_info.ra_pages = ra_pages;
+ }
+ }
+}
+
+/* Set congested function. */
+static void rs_set_congested_fn(struct raid_set *rs)
+{
+ struct mapped_device *md = dm_table_get_md(rs->ti->table);
+ struct backing_dev_info *bdi = &dm_disk(md)->queue->backing_dev_info;
+
+ /* Set congested function and data. */
+ bdi->congested_fn = rs_congested;
+ bdi->congested_data = rs;
+}
+
+/*
+ * Construct a RAID4/5 mapping:
+ *
+ * log_type #log_params <log_params> \
+ * raid_type [#parity_dev] #raid_variable_params <raid_params> \
+ * [locking "none"/"cluster"]
+ * #raid_devs #dev_to_initialize [<dev_path> <offset>]{3,}
+ *
+ * log_type = "core"/"disk",
+ * #log_params = 1-3 (1-2 for core dirty log type, 3 for disk dirty log only)
+ * log_params = [dirty_log_path] region_size [[no]sync])
+ *
+ * raid_type = "raid4", "raid5_la", "raid5_ra", "raid5_ls", "raid5_rs"
+ *
+ * #parity_dev = N if raid_type = "raid4"
+ * o N = -1: pick default = last device
+ * o N >= 0 and < #raid_devs: parity device index
+ *
+ * #raid_variable_params = 0-7; raid_params (-1 = default):
+ * [chunk_size [#stripes [io_size [recover_io_size \
+ * [%recovery_bandwidth [recovery_switch [#recovery_stripes]]]]]]]
+ * o chunk_size (unit to calculate drive addresses; must be 2^^n, > 8
+ * and <= CHUNK_SIZE_MAX)
+ * o #stripes is number of stripes allocated to stripe cache
+ * (must be > 1 and < STRIPES_MAX)
+ * o io_size (io unit size per device in sectors; must be 2^^n and > 8)
+ * o recover_io_size (io unit size per device for recovery in sectors;
+ must be 2^^n, > SECTORS_PER_PAGE and <= region_size)
+ * o %recovery_bandwith is the maximum amount spend for recovery during
+ * application io (1-100%)
+ * o recovery switch = [sync|nosync]
+ * o #recovery_stripes is the number of recovery stripes used for
+ * parallel recovery of the RAID set
+ * If raid_variable_params = 0, defaults will be used.
+ * Any raid_variable_param can be set to -1 to apply a default
+ *
+ * #raid_devs = N (N >= 3)
+ *
+ * #dev_to_initialize = N
+ * -1: initialize parity on all devices
+ * >= 0 and < #raid_devs: initialize raid_path; used to force reconstruction
+ * of a failed devices content after replacement
+ *
+ * <dev_path> = device_path (eg, /dev/sdd1)
+ * <offset> = begin at offset on <dev_path>
+ *
+ */
+#define MIN_PARMS 13
+static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int dev_to_init, dl_parms, i, locking_parms,
+ parity_parm, pi = -1, r, raid_devs;
+ sector_t tmp, sectors_per_dev;
+ struct dm_raid45_locking_type *locking;
+ struct raid_set *rs;
+ struct raid_type *raid_type;
+ struct variable_parms parms;
+
+ /* Ensure minimum number of parameters. */
+ if (argc < MIN_PARMS)
+ TI_ERR("Not enough parameters");
+
+ /* Fetch # of dirty log parameters. */
+ if (sscanf(argv[1], "%d", &dl_parms) != 1 ||
+ !range_ok(dl_parms, 1, 4711)) /* ;-) */
+ TI_ERR("Bad dirty log parameters number");
+
+ /* Check raid_type. */
+ raid_type = get_raid_type(argv[dl_parms + 2]);
+ if (!raid_type)
+ TI_ERR("Bad raid type");
+
+ /* In case of RAID4, parity drive is selectable. */
+ parity_parm = !!(raid_type->level == raid4);
+
+ /* Handle variable number of RAID parameters. */
+ r = get_raid_variable_parms(ti, argv + dl_parms + parity_parm + 3,
+ &parms);
+ if (r)
+ return r;
+
+ /* Handle any locking parameters. */
+ r = get_raid_locking_parms(ti,
+ argv + dl_parms + parity_parm +
+ parms.raid_parms + 4,
+ &locking_parms, &locking);
+ if (r)
+ return r;
+
+ /* # of raid devices. */
+ i = dl_parms + parity_parm + parms.raid_parms + locking_parms + 4;
+ if (sscanf(argv[i], "%d", &raid_devs) != 1 ||
+ raid_devs < raid_type->minimal_devs)
+ TI_ERR("Invalid number of raid devices");
+
+ /* In case of RAID4, check parity drive index is in limits. */
+ if (raid_type->level == raid4) {
+ /* Fetch index of parity device. */
+ if (sscanf(argv[dl_parms + 3], "%d", &pi) != 1 ||
+ (pi != -1 && !range_ok(pi, 0, raid_devs - 1)))
+ TI_ERR("Invalid RAID4 parity device index");
+ }
+
+ /*
+ * Index of device to initialize starts at 0
+ *
+ * o -1 -> don't initialize a selected device;
+ * initialize parity conforming to algorithm
+ * o 0..raid_devs-1 -> initialize respective device
+ * (used for reconstruction of a replaced device)
+ */
+ if (sscanf(argv[dl_parms + parity_parm + parms.raid_parms +
+ locking_parms + 5], "%d", &dev_to_init) != 1 ||
+ !range_ok(dev_to_init, -1, raid_devs - 1))
+ TI_ERR("Invalid number for raid device to initialize");
+
+ /* Check # of raid device arguments. */
+ if (argc - dl_parms - parity_parm - parms.raid_parms - 6 !=
+ 2 * raid_devs)
+ TI_ERR("Wrong number of raid device/offset arguments");
+
+ /*
+ * Check that the table length is devisable
+ * w/o rest by (raid_devs - parity_devs)
+ */
+ if (!multiple(ti->len, raid_devs - raid_type->parity_devs,
+ §ors_per_dev))
+ TI_ERR("Target length not divisible by number of data devices");
+
+ /*
+ * Check that the device size is
+ * devisable w/o rest by chunk size
+ */
+ if (!multiple(sectors_per_dev, parms.chunk_size, &tmp))
+ TI_ERR("Device length not divisible by chunk_size");
+
+ /****************************************************************
+ * Now that we checked the constructor arguments ->
+ * let's allocate the RAID set
+ ****************************************************************/
+ rs = context_alloc(raid_type, &parms, raid_devs, sectors_per_dev,
+ ti, dl_parms, argv);
+ if (IS_ERR(rs))
+ return PTR_ERR(rs);
+
+
+ rs->set.dev_to_init = rs->set.dev_to_init_parm = dev_to_init;
+ rs->set.pi = rs->set.pi_parm = pi;
+
+ /* Set RAID4 parity drive index. */
+ if (raid_type->level == raid4)
+ rs->set.pi = (pi == -1) ? rs->set.data_devs : pi;
+
+ recover_set_bandwidth(rs, parms.bandwidth);
+
+ /* Use locking type to lock stripe access. */
+ rs->locking = locking;
+
+ /* Get the device/offset tupels. */
+ argv += dl_parms + 6 + parity_parm + parms.raid_parms;
+ r = dev_parms(rs, argv, &i);
+ if (r)
+ goto err;
+
+ /* Set backing device information (eg. read ahead). */
+ rs_set_read_ahead(rs, 2 * rs->set.chunk_size /* sectors per device */,
+ 2 /* # of stripes */);
+ rs_set_congested_fn(rs); /* Set congested function. */
+ SetRSCheckOverwrite(rs); /* Allow chunk overwrite checks. */
+ rs->xor.speed = xor_optimize(rs); /* Select best xor algorithm. */
+
+ /* Set for recovery of any nosync regions. */
+ if (parms.recovery)
+ SetRSRecover(rs);
+ else {
+ /*
+ * Need to free recovery stripe(s) here in case
+ * of nosync, because xor_optimize uses one.
+ */
+ set_start_recovery(rs);
+ set_end_recovery(rs);
+ stripe_recover_free(rs);
+ }
+
+ /*
+ * Enable parity chunk creation enformcement for
+ * little numbers of array members where it doesn'ti
+ * gain us performance to xor parity out and back in as
+ * with larger array member numbers.
+ */
+ if (rs->set.raid_devs <= rs->set.raid_type->minimal_devs + 1)
+ SetRSEnforceParityCreation(rs);
+
+ /*
+ * Make sure that dm core only hands maximum io size
+ * length down and pays attention to io boundaries.
+ */
+ ti->split_io = rs->set.io_size;
+ ti->private = rs;
+
+ /* Initialize work queue to handle this RAID set's io. */
+ r = rs_workqueue_init(rs);
+ if (r)
+ goto err;
+
+ rs_log(rs, rs->recover.io_size); /* Log information about RAID set. */
+ return 0;
+
+err:
+ context_free(rs, i);
+ return r;
+}
+
+/*
+ * Destruct a raid mapping
+ */
+static void raid_dtr(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+
+ destroy_workqueue(rs->io.wq);
+ context_free(rs, rs->set.raid_devs);
+}
+
+/* Raid mapping function. */
+static int raid_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ /* I don't want to waste stripe cache capacity. */
+ if (bio_rw(bio) == READA)
+ return -EIO;
+ else {
+ struct raid_set *rs = ti->private;
+
+ /*
+ * Get io reference to be waiting for to drop
+ * to zero on device suspension/destruction.
+ */
+ io_get(rs);
+ bio->bi_sector -= ti->begin; /* Remap sector. */
+
+ /* Queue io to RAID set. */
+ mutex_lock(&rs->io.in_lock);
+ bio_list_add(&rs->io.in, bio);
+ mutex_unlock(&rs->io.in_lock);
+
+ /* Wake daemon to process input list. */
+ wake_do_raid(rs);
+
+ /* REMOVEME: statistics. */
+ atomic_inc(rs->stats + (bio_data_dir(bio) == READ ?
+ S_BIOS_READ : S_BIOS_WRITE));
+ return DM_MAPIO_SUBMITTED; /* Handle later. */
+ }
+}
+
+/* Device suspend. */
+static void raid_presuspend(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+ struct dm_dirty_log *dl = rs->recover.dl;
+
+ SetRSSuspend(rs);
+
+ if (RSRecover(rs))
+ dm_rh_stop_recovery(rs->recover.rh);
+
+ cancel_delayed_work(&rs->io.dws_do_raid);
+ flush_workqueue(rs->io.wq);
+ wait_ios(rs); /* Wait for completion of all ios being processed. */
+
+ if (dl->type->presuspend && dl->type->presuspend(dl))
+ /* FIXME: need better error handling. */
+ DMWARN("log presuspend failed");
+}
+
+static void raid_postsuspend(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+ struct dm_dirty_log *dl = rs->recover.dl;
+
+ if (dl->type->postsuspend && dl->type->postsuspend(dl))
+ /* FIXME: need better error handling. */
+ DMWARN("log postsuspend failed");
+
+}
+
+/* Device resume. */
+static void raid_resume(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+ struct recover *rec = &rs->recover;
+ struct dm_dirty_log *dl = rec->dl;
+
+DMINFO("%s...", __func__);
+ if (dl->type->resume && dl->type->resume(dl))
+ /* Resume dirty log. */
+ /* FIXME: need better error handling. */
+ DMWARN("log resume failed");
+
+ rec->nr_regions_to_recover =
+ rec->nr_regions - dl->type->get_sync_count(dl);
+
+ /* Restart any unfinished recovery. */
+ if (RSRecover(rs)) {
+ set_start_recovery(rs);
+ dm_rh_start_recovery(rec->rh);
+ }
+
+ ClearRSSuspend(rs);
+}
+
+/* Return stripe cache size. */
+static unsigned sc_size(struct raid_set *rs)
+{
+ return to_sector(atomic_read(&rs->sc.stripes) *
+ (sizeof(struct stripe) +
+ (sizeof(struct stripe_chunk) +
+ (sizeof(struct page_list) +
+ to_bytes(rs->set.io_size) *
+ rs->set.raid_devs)) +
+ (rs->recover.end_jiffies ?
+ 0 : rs->recover.recovery_stripes *
+ to_bytes(rs->set.raid_devs * rs->recover.io_size))));
+}
+
+/* REMOVEME: status output for development. */
+static void raid_devel_stats(struct dm_target *ti, char *result,
+ unsigned *size, unsigned maxlen)
+{
+ unsigned sz = *size;
+ unsigned long j;
+ char buf[BDEVNAME_SIZE], *p;
+ struct stats_map *sm;
+ struct raid_set *rs = ti->private;
+ struct recover *rec = &rs->recover;
+ struct timespec ts;
+
+ DMEMIT("%s %s=%u bw=%u\n",
+ version, rs->xor.f->name, rs->xor.chunks, rs->recover.bandwidth);
+ DMEMIT("act_ios=%d ", io_ref(rs));
+ DMEMIT("act_ios_max=%d\n", atomic_read(&rs->io.in_process_max));
+ DMEMIT("act_stripes=%d ", sc_active(&rs->sc));
+ DMEMIT("act_stripes_max=%d\n",
+ atomic_read(&rs->sc.active_stripes_max));
+
+ for (sm = stats_map; sm < ARRAY_END(stats_map); sm++)
+ DMEMIT("%s%d", sm->str, atomic_read(rs->stats + sm->type));
+
+ DMEMIT(" checkovr=%s\n", RSCheckOverwrite(rs) ? "on" : "off");
+ DMEMIT("sc=%u/%u/%u/%u/%u/%u/%u\n", rs->set.chunk_size,
+ atomic_read(&rs->sc.stripes), rs->set.io_size,
+ rec->recovery_stripes, rec->io_size, rs->sc.hash.buckets,
+ sc_size(rs));
+
+ j = (rec->end_jiffies ? rec->end_jiffies : jiffies) -
+ rec->start_jiffies;
+ jiffies_to_timespec(j, &ts);
+ sprintf(buf, "%ld.%ld", ts.tv_sec, ts.tv_nsec);
+ p = strchr(buf, '.');
+ p[3] = 0;
+
+ DMEMIT("rg=%llu/%llu/%llu/%u %s\n",
+ (unsigned long long) rec->nr_regions_recovered,
+ (unsigned long long) rec->nr_regions_to_recover,
+ (unsigned long long) rec->nr_regions, rec->bandwidth, buf);
+
+ *size = sz;
+}
+
+static int raid_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+{
+ unsigned p, sz = 0;
+ char buf[BDEVNAME_SIZE];
+ struct raid_set *rs = ti->private;
+ struct dm_dirty_log *dl = rs->recover.dl;
+ int raid_parms[] = {
+ rs->set.chunk_size_parm,
+ rs->sc.stripes_parm,
+ rs->set.io_size_parm,
+ rs->recover.io_size_parm,
+ rs->recover.bandwidth_parm,
+ -2,
+ rs->recover.recovery_stripes,
+ };
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ /* REMOVEME: statistics. */
+ if (RSDevelStats(rs))
+ raid_devel_stats(ti, result, &sz, maxlen);
+
+ DMEMIT("%u ", rs->set.raid_devs);
+
+ for (p = 0; p < rs->set.raid_devs; p++)
+ DMEMIT("%s ",
+ format_dev_t(buf, rs->dev[p].dev->bdev->bd_dev));
+
+ DMEMIT("2 ");
+ for (p = 0; p < rs->set.raid_devs; p++) {
+ DMEMIT("%c", !DevFailed(rs->dev + p) ? 'A' : 'D');
+
+ if (p == rs->set.pi)
+ DMEMIT("p");
+
+ if (p == rs->set.dev_to_init)
+ DMEMIT("i");
+ }
+
+ DMEMIT(" %llu/%llu ",
+ (unsigned long long) dl->type->get_sync_count(dl),
+ (unsigned long long) rs->recover.nr_regions);
+
+ sz += dl->type->status(dl, type, result+sz, maxlen-sz);
+ break;
+ case STATUSTYPE_TABLE:
+ sz = rs->recover.dl->type->status(rs->recover.dl, type,
+ result, maxlen);
+ DMEMIT("%s %u ", rs->set.raid_type->name, rs->set.raid_parms);
+
+ for (p = 0; p < rs->set.raid_parms; p++) {
+ if (raid_parms[p] > -2)
+ DMEMIT("%d ", raid_parms[p]);
+ else
+ DMEMIT("%s ", rs->recover.recovery ?
+ "sync" : "nosync");
+ }
+
+ DMEMIT("%u %d ", rs->set.raid_devs, rs->set.dev_to_init);
+
+ for (p = 0; p < rs->set.raid_devs; p++)
+ DMEMIT("%s %llu ",
+ format_dev_t(buf, rs->dev[p].dev->bdev->bd_dev),
+ (unsigned long long) rs->dev[p].start);
+ }
+
+ return 0;
+}
+
+/*
+ * Message interface
+ */
+/* Turn a delta into an absolute value. */
+static int _absolute(char *action, int act, int r)
+{
+ size_t len = strlen(action);
+
+ if (len < 2)
+ len = 2;
+
+ /* Make delta absolute. */
+ if (!strncmp("set", action, len))
+ ;
+ else if (!strncmp("grow", action, len))
+ r += act;
+ else if (!strncmp("shrink", action, len))
+ r = act - r;
+ else
+ r = -EINVAL;
+
+ return r;
+}
+
+ /* Change recovery io bandwidth. */
+static int bandwidth_change(struct raid_set *rs, int argc, char **argv,
+ enum raid_set_flags flag)
+{
+ int act = rs->recover.bandwidth, bandwidth;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d", &bandwidth) == 1 &&
+ range_ok(bandwidth, BANDWIDTH_MIN, BANDWIDTH_MAX)) {
+ /* Make delta bandwidth absolute. */
+ bandwidth = _absolute(argv[0], act, bandwidth);
+
+ /* Check range. */
+ if (range_ok(bandwidth, BANDWIDTH_MIN, BANDWIDTH_MAX)) {
+ recover_set_bandwidth(rs, bandwidth);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/* Set/reset development feature flags. */
+static int devel_flags(struct raid_set *rs, int argc, char **argv,
+ enum raid_set_flags flag)
+{
+ size_t len;
+
+ if (argc != 1)
+ return -EINVAL;
+
+ len = strlen(argv[0]);
+ if (len < 2)
+ len = 2;
+
+ if (!strncmp(argv[0], "on", len))
+ return test_and_set_bit(flag, &rs->io.flags) ? -EPERM : 0;
+ else if (!strncmp(argv[0], "off", len))
+ return test_and_clear_bit(flag, &rs->io.flags) ? 0 : -EPERM;
+ else if (!strncmp(argv[0], "reset", len)) {
+ if (flag == RS_DEVEL_STATS) {
+ if (test_bit(flag, &rs->io.flags)) {
+ stats_reset(rs);
+ return 0;
+ } else
+ return -EPERM;
+ } else {
+ set_bit(flag, &rs->io.flags);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/* Resize the stripe cache. */
+static int sc_resize(struct raid_set *rs, int argc, char **argv,
+ enum raid_set_flags flag)
+{
+ int act, stripes;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ /* Deny permission in case the daemon is still resizing!. */
+ if (atomic_read(&rs->sc.stripes_to_set))
+ return -EPERM;
+
+ if (sscanf(argv[1], "%d", &stripes) == 1 &&
+ stripes > 0) {
+ act = atomic_read(&rs->sc.stripes);
+
+ /* Make delta stripes absolute. */
+ stripes = _absolute(argv[0], act, stripes);
+
+ /*
+ * Check range and that the # of stripes changes.
+ * We leave the resizing to the wroker.
+ */
+ if (range_ok(stripes, STRIPES_MIN, STRIPES_MAX) &&
+ stripes != atomic_read(&rs->sc.stripes)) {
+ atomic_set(&rs->sc.stripes_to_set, stripes);
+ wake_do_raid(rs);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/* Change xor algorithm and number of chunks. */
+static int xor_set(struct raid_set *rs, int argc, char **argv,
+ enum raid_set_flags flag)
+{
+ if (argc == 2) {
+ int chunks;
+ char *algorithm = argv[0];
+ struct xor_func *f = ARRAY_END(xor_funcs);
+
+ if (sscanf(argv[1], "%d", &chunks) == 1 &&
+ range_ok(chunks, 2, XOR_CHUNKS_MAX) &&
+ chunks <= rs->set.raid_devs) {
+ while (f-- > xor_funcs) {
+ if (!strcmp(algorithm, f->name)) {
+ unsigned io_size = 0;
+ struct stripe *stripe = stripe_alloc(&rs->sc, rs->sc.mem_cache_client, SC_GROW);
+
+ DMINFO("xor: %s", f->name);
+ if (f->f == xor_blocks_wrapper &&
+ chunks > MAX_XOR_BLOCKS + 1) {
+ DMERR("chunks > MAX_XOR_BLOCKS"
+ " + 1");
+ break;
+ }
+
+ mutex_lock(&rs->io.xor_lock);
+ rs->xor.f = f;
+ rs->xor.chunks = chunks;
+ rs->xor.speed = 0;
+ mutex_unlock(&rs->io.xor_lock);
+
+ if (stripe) {
+ rs->xor.speed = xor_speed(stripe);
+ io_size = stripe->io.size;
+ stripe_free(stripe, rs->sc.mem_cache_client);
+ }
+
+ rs_log(rs, io_size);
+ return 0;
+ }
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Allow writes after they got prohibited because of a device failure.
+ *
+ * This needs to be called after userspace updated metadata state
+ * based on an event being thrown during device failure processing.
+ */
+static int allow_writes(struct raid_set *rs, int argc, char **argv,
+ enum raid_set_flags flag)
+{
+ if (TestClearRSProhibitWrites(rs)) {
+DMINFO("%s waking", __func__);
+ wake_do_raid(rs);
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+/* Parse the RAID message. */
+/*
+ * 'all[ow_writes]'
+ * 'ba[ndwidth] {se[t],g[row],sh[rink]} #' # e.g 'ba se 50'
+ * "o[verwrite] {on,of[f],r[eset]}' # e.g. 'o of'
+ * 'sta[tistics] {on,of[f],r[eset]}' # e.g. 'stat of'
+ * 'str[ipecache] {se[t],g[row],sh[rink]} #' # e.g. 'stripe set 1024'
+ * 'xor algorithm #chunks' # e.g. 'xor xor_8 5'
+ *
+ */
+static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+ if (argc) {
+ size_t len = strlen(argv[0]);
+ struct raid_set *rs = ti->private;
+ struct {
+ const char *name;
+ int (*f) (struct raid_set *rs, int argc, char **argv,
+ enum raid_set_flags flag);
+ enum raid_set_flags flag;
+ } msg_descr[] = {
+ { "allow_writes", allow_writes, 0 },
+ { "bandwidth", bandwidth_change, 0 },
+ { "overwrite", devel_flags, RS_CHECK_OVERWRITE },
+ { "statistics", devel_flags, RS_DEVEL_STATS },
+ { "stripe_cache", sc_resize, 0 },
+ { "xor", xor_set, 0 },
+ }, *m = ARRAY_END(msg_descr);
+
+ if (len < 3)
+ len = 3;
+
+ while (m-- > msg_descr) {
+ if (!strncmp(argv[0], m->name, len))
+ return m->f(rs, argc - 1, argv + 1, m->flag);
+ }
+
+ }
+
+ return -EINVAL;
+}
+/*
+ * END message interface
+ */
+
+/* Provide io hints. */
+static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct raid_set *rs = ti->private;
+
+ blk_limits_io_min(limits, rs->set.chunk_size);
+ blk_limits_io_opt(limits, rs->set.chunk_size * rs->set.data_devs);
+}
+
+static struct target_type raid_target = {
+ .name = "raid45",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = raid_ctr,
+ .dtr = raid_dtr,
+ .map = raid_map,
+ .presuspend = raid_presuspend,
+ .postsuspend = raid_postsuspend,
+ .resume = raid_resume,
+ .status = raid_status,
+ .message = raid_message,
+ .io_hints = raid_io_hints,
+};
+
+static void init_exit(const char *bad_msg, const char *good_msg, int r)
+{
+ if (r)
+ DMERR("Failed to %sregister target [%d]", bad_msg, r);
+ else
+ DMINFO("%s %s", good_msg, version);
+}
+
+static int __init dm_raid_init(void)
+{
+ int r = dm_register_target(&raid_target);
+
+ init_exit("", "initialized", r);
+ return r;
+}
+
+static void __exit dm_raid_exit(void)
+{
+ dm_unregister_target(&raid_target);
+ init_exit("un", "exit", 0);
+}
+
+/* Module hooks. */
+module_init(dm_raid_init);
+module_exit(dm_raid_exit);
+
+MODULE_DESCRIPTION(DM_NAME " raid4/5 target");
+MODULE_AUTHOR("Heinz Mauelshagen <heinzm@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("dm-raid4");
+MODULE_ALIAS("dm-raid5");
--- /dev/null
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
+ *
+ * Module Author: Heinz Mauelshagen (Mauelshagen@RedHat.com)
+ *
+ * Locking definitions for the device-mapper RAID45 target.
+ *
+ * This file is released under the GPL.
+ *
+ */
+
+#ifndef _DM_RAID45_H
+#define _DM_RAID45_H
+
+/* Factor out to dm.h! */
+#define STR_LEN(ptr, str) (ptr), (str), strlen((ptr))
+/* Reference to array end. */
+#define ARRAY_END(a) ((a) + ARRAY_SIZE(a))
+
+enum dm_lock_type { DM_RAID45_EX, DM_RAID45_SHARED };
+
+struct dm_raid45_locking_type {
+ /* Request a lock on a stripe. */
+ void* (*lock)(sector_t key, enum dm_lock_type type);
+
+ /* Release a lock on a stripe. */
+ void (*unlock)(void *lock_handle);
+};
+
+#endif
/*
* Conversion fns
*/
-static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
+region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
{
return sector >> rh->region_shift;
}
+EXPORT_SYMBOL_GPL(dm_rh_sector_to_region);
sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
{
}
EXPORT_SYMBOL_GPL(dm_rh_update_states);
-static void rh_inc(struct dm_region_hash *rh, region_t region)
+void dm_rh_inc(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg;
read_unlock(&rh->hash_lock);
}
+EXPORT_SYMBOL_GPL(dm_rh_inc);
void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
{
for (bio = bios->head; bio; bio = bio->bi_next) {
if (bio->bi_rw & REQ_FLUSH)
continue;
- rh_inc(rh, dm_rh_bio_to_region(rh, bio));
+ dm_rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
}
EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
}
EXPORT_SYMBOL_GPL(dm_rh_delay);
+void dm_rh_delay_by_region(struct dm_region_hash *rh,
+ struct bio *bio, region_t region)
+{
+ struct dm_region *reg;
+
+ /* FIXME: locking. */
+ read_lock(&rh->hash_lock);
+ reg = __rh_find(rh, region);
+ bio_list_add(®->delayed_bios, bio);
+ read_unlock(&rh->hash_lock);
+}
+EXPORT_SYMBOL_GPL(dm_rh_delay_by_region);
+
void dm_rh_stop_recovery(struct dm_region_hash *rh)
{
int i;
dd_new = dd_old = *dd;
- dd_new.dm_dev.mode |= new_mode;
+ dd_new.dm_dev.mode = new_mode;
dd_new.dm_dev.bdev = NULL;
r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
- if (r)
+ if (r == -EROFS) {
+ dd_new.dm_dev.mode &= ~FMODE_WRITE;
+ r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
+ }
+ if (!r)
return r;
- dd->dm_dev.mode |= new_mode;
+ dd->dm_dev.mode = new_mode;
close_dev(&dd_old, md);
return 0;
dd->dm_dev.mode = mode;
dd->dm_dev.bdev = NULL;
- if ((r = open_dev(dd, dev, t->md))) {
+ r = open_dev(dd, dev, t->md);
+ if (r == -EROFS) {
+ dd->dm_dev.mode &= ~FMODE_WRITE;
+ r = open_dev(dd, dev, t->md);
+ }
+ if (r) {
kfree(dd);
return r;
}
+ if (dd->dm_dev.mode != mode)
+ t->mode = dd->dm_dev.mode;
+
format_dev_t(dd->dm_dev.name, dev);
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
- } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
+ } else if (dd->dm_dev.mode != mode) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
*/
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
- struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
- dm_dev);
+ struct dm_dev_internal *dd;
+
+ if (!d)
+ return;
+ dd = container_of(d, struct dm_dev_internal, dm_dev);
if (atomic_dec_and_test(&dd->count)) {
close_dev(dd, ti->table->md);
list_del(&dd->list);
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
+ int retval = 0;
spin_lock(&_minor_lock);
md = bdev->bd_disk->private_data;
- if (!md)
+ if (!md) {
+ retval = -ENXIO;
goto out;
+ }
if (test_bit(DMF_FREEING, &md->flags) ||
dm_deleting_md(md)) {
md = NULL;
+ retval = -ENXIO;
+ goto out;
+ }
+ if (get_disk_ro(md->disk) && (mode & FMODE_WRITE)) {
+ md = NULL;
+ retval = -EROFS;
goto out;
}
out:
spin_unlock(&_minor_lock);
- return md ? 0 : -ENXIO;
+ return retval;
}
static int dm_blk_close(struct gendisk *disk, fmode_t mode)
if (!map || !dm_table_get_size(map))
goto out;
- /* We only support devices that have a single target */
- if (dm_table_get_num_targets(map) != 1)
- goto out;
-
- tgt = dm_table_get_target(map, 0);
-
if (dm_suspended_md(md)) {
r = -EAGAIN;
goto out;
}
- if (tgt->type->ioctl)
- r = tgt->type->ioctl(tgt, cmd, arg);
+ if (cmd == BLKRRPART) {
+ /* Emulate Re-read partitions table */
+ kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
+ r = 0;
+ } else {
+ /* We only support devices that have a single target */
+ if (dm_table_get_num_targets(map) != 1)
+ goto out;
+
+ tgt = dm_table_get_target(map, 0);
+
+ if (tgt->type->ioctl)
+ r = tgt->type->ioctl(tgt, cmd, arg);
+ }
out:
dm_table_put(map);
clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
write_unlock_irqrestore(&md->map_lock, flags);
+ dm_table_get(md->map);
+ if (!(dm_table_get_mode(t) & FMODE_WRITE))
+ set_disk_ro(md->disk, 1);
+ else
+ set_disk_ro(md->disk, 0);
+ dm_table_put(md->map);
+
return old_map;
}
{
return md->disk;
}
+EXPORT_SYMBOL_GPL(dm_disk);
struct kobject *dm_kobject(struct mapped_device *md)
{
config INTEL_MID_PTI
tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
- depends on PCI
+ depends on X86_INTEL_MID
default n
help
The PTI (Parallel Trace Interface) driver directs
return;
tp = netdev_priv(dev);
+
+ /* shoot NIC in the head before deallocating descriptors */
+ pci_disable_device(tp->pdev);
+
unregister_netdev(dev);
pci_free_consistent (pdev,
sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
static int __devexit ehea_remove(struct platform_device *dev);
+static struct of_device_id ehea_module_device_table[] = {
+ {
+ .name = "lhea",
+ .compatible = "IBM,lhea",
+ },
+ {
+ .type = "network",
+ .compatible = "IBM,lhea-ethernet",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehea_module_device_table);
+
static struct of_device_id ehea_device_table[] = {
{
.name = "lhea",
},
{},
};
-MODULE_DEVICE_TABLE(of, ehea_device_table);
static struct of_platform_driver ehea_driver = {
.driver = {
static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
{
const char text[] =
- "You must go to " \
- "http://wireless.kernel.org/en/users/Drivers/b43#devicefirmware " \
- "and download the correct firmware for this driver version. " \
- "Please carefully read all instructions on this website.\n";
+ "Please open a terminal and enter the command " \
+ "\"sudo /usr/sbin/install_bcm43xx_firmware\" to download " \
+ "the correct firmware for this driver version. " \
+ "For an off-line installation, go to " \
+ "http://en.opensuse.org/HCL/Network_Adapters_(Wireless)/" \
+ "Broadcom_BCM43xx and follow the instructions in the " \
+ "\"Installing firmware from RPM packages\" section.\n";
if (error)
b43err(wl, text);
struct device *dev = NULL;
spin_lock_irqsave(q->queue_lock, flags);
- sdev = q->queuedata;
+ sdev = scsi_device_from_queue(q);
if (!sdev) {
spin_unlock_irqrestore(q->queue_lock, flags);
err = SCSI_DH_NOSYS;
return -EINVAL;
spin_lock_irqsave(q->queue_lock, flags);
- sdev = q->queuedata;
+ sdev = scsi_device_from_queue(q);
if (!sdev || !get_device(&sdev->sdev_gendev))
err = -ENODEV;
spin_unlock_irqrestore(q->queue_lock, flags);
struct scsi_device_handler *scsi_dh = NULL;
spin_lock_irqsave(q->queue_lock, flags);
- sdev = q->queuedata;
+ sdev = scsi_device_from_queue(q);
if (!sdev || !get_device(&sdev->sdev_gendev))
sdev = NULL;
spin_unlock_irqrestore(q->queue_lock, flags);
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
static int fast_fail = 1;
static int client_reserve = 1;
+/*host data buffer size*/
+#define buff_size 4096
static struct scsi_transport_template *ibmvscsi_transport_template;
static struct ibmvscsi_ops *ibmvscsi_ops;
+#define IBMVSCSI_PROC_NAME "ibmvscsi"
+/* The driver is named ibmvscsic, map ibmvscsi to module name */
+MODULE_ALIAS(IBMVSCSI_PROC_NAME);
MODULE_DESCRIPTION("IBM Virtual SCSI");
MODULE_AUTHOR("Dave Boutcher");
MODULE_LICENSE("GPL");
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
- len = snprintf(buf, PAGE_SIZE, "%s\n",
+ len = snprintf(buf, buff_size, "%s\n",
hostdata->madapter_info.srp_version);
return len;
}
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
- len = snprintf(buf, PAGE_SIZE, "%s\n",
+ len = snprintf(buf, buff_size, "%s\n",
hostdata->madapter_info.partition_name);
return len;
}
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
- len = snprintf(buf, PAGE_SIZE, "%d\n",
+ len = snprintf(buf, buff_size, "%d\n",
hostdata->madapter_info.partition_number);
return len;
}
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
- len = snprintf(buf, PAGE_SIZE, "%d\n",
+ len = snprintf(buf, buff_size, "%d\n",
hostdata->madapter_info.mad_version);
return len;
}
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
- len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
+ len = snprintf(buf, buff_size, "%d\n", hostdata->madapter_info.os_type);
return len;
}
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
/* returns null-terminated host config data */
- if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
+ if (ibmvscsi_do_host_config(hostdata, buf, buff_size) == 0)
return strlen(buf);
else
return 0;
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
- .proc_name = "ibmvscsi",
+ .proc_name = IBMVSCSI_PROC_NAME,
.queuecommand = ibmvscsi_queuecommand,
.eh_abort_handler = ibmvscsi_eh_abort_handler,
.eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
.probe = ibmvscsi_probe,
.remove = ibmvscsi_remove,
.get_desired_dma = ibmvscsi_get_desired_dma,
- .name = "ibmvscsi",
+ .name = IBMVSCSI_PROC_NAME,
.pm = &ibmvscsi_pm_ops,
};
case MODE_SENSE:
{
struct scatterlist *sgl;
- caddr_t vaddr;
+ struct page *pg;
+ unsigned char *vaddr;
+ unsigned long flags;
sgl = scsi_sglist(scp);
- if (sg_page(sgl)) {
- vaddr = (caddr_t) sg_virt(&sgl[0]);
+ pg = sg_page(sgl);
+ if (pg) {
+ local_irq_save(flags);
+ vaddr = kmap_atomic(pg, KM_BIO_SRC_IRQ) + sgl->offset;
memset(vaddr, 0, scp->cmnd[4]);
+
+ kunmap_atomic(vaddr, KM_BIO_SRC_IRQ);
+ local_irq_restore(flags);
}
else {
con_log(CL_ANN, (KERN_WARNING
if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
+ struct page *pg;
+ unsigned char *vaddr;
+ unsigned long flags;
+
sgl = scsi_sglist(scp);
- if (sg_page(sgl)) {
- c = *(unsigned char *) sg_virt(&sgl[0]);
+ pg = sg_page(sgl);
+ if (pg) {
+ local_irq_save(flags);
+ vaddr = kmap_atomic(pg, KM_BIO_SRC_IRQ) + sgl->offset;
+
+ c = *vaddr;
+
+ kunmap_atomic(vaddr, KM_BIO_SRC_IRQ);
+ local_irq_restore(flags);
} else {
con_log(CL_ANN, (KERN_WARNING
"megaraid mailbox: invalid sg:%d\n",
{"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
{"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
+ {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
+#include <linux/netlink.h>
+#include <net/netlink.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_netlink_ml.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
#include <trace/events/scsi.h>
#define SENSE_TIMEOUT (10*HZ)
+#define TEST_UNIT_READY_TIMEOUT (30*HZ)
/*
* These should *probably* be handled by the host itself.
}
#endif
+#ifdef CONFIG_SCSI_NETLINK
+/**
+ * scsi_post_sense_event - called to post a 'Sense Code' event
+ *
+ * @sdev: SCSI device the sense code occured on
+ * @sshdr: SCSI sense code
+ *
+ * Returns:
+ * 0 on succesful return
+ * otherwise, failing error code
+ *
+ */
+static void scsi_post_sense_event(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sshdr)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct scsi_nl_sense_msg *msg;
+ u32 len, skblen;
+ int err;
+
+ if (!scsi_nl_sock) {
+ err = -ENOENT;
+ goto send_fail;
+ }
+
+ len = SCSI_NL_MSGALIGN(sizeof(*msg));
+ skblen = NLMSG_SPACE(len);
+
+ skb = alloc_skb(skblen, GFP_ATOMIC);
+ if (!skb) {
+ err = -ENOBUFS;
+ goto send_fail;
+ }
+
+ nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
+ skblen - sizeof(*nlh), 0);
+ if (!nlh) {
+ err = -ENOBUFS;
+ goto send_fail_skb;
+ }
+ msg = NLMSG_DATA(nlh);
+
+ INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT_ML,
+ ML_NL_SCSI_SENSE, len);
+ msg->host_no = sdev->host->host_no;
+ msg->channel = sdev->channel;
+ msg->id = sdev->id;
+ msg->lun = sdev->lun;
+ msg->sense = (sshdr->response_code << 24) | (sshdr->sense_key << 16) |
+ (sshdr->asc << 8) | sshdr->ascq;
+
+ err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_ML_EVENTS,
+ GFP_KERNEL);
+ if (err && (err != -ESRCH))
+ /* nlmsg_multicast already kfree_skb'd */
+ goto send_fail;
+
+ return;
+
+send_fail_skb:
+ kfree_skb(skb);
+send_fail:
+ sdev_printk(KERN_WARNING, sdev,
+ "Dropped SCSI Msg %02x/%02x/%02x/%02x: err %d\n",
+ sshdr->response_code, sshdr->sense_key,
+ sshdr->asc, sshdr->ascq, err);
+ return;
+}
+#else
+static inline void scsi_post_sense_event(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sshdr) {}
+#endif
+
/**
* scsi_check_sense - Examine scsi cmd sense
* @scmd: Cmd to have sense checked.
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
+ scsi_post_sense_event(sdev, &sshdr);
+
if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
sdev->scsi_dh_data->scsi_dh->check_sense) {
int rc;
* if the device is in the process of becoming ready, we
* should retry.
*/
- if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
+ if ((sshdr.asc == 0x04) &&
+ (sshdr.ascq == 0x01 || sshdr.ascq == 0x0a))
return NEEDS_RETRY;
/*
* if the device is not started, we need to wake
int retry_cnt = 1, rtn;
retry_tur:
- rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
+ rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, TEST_UNIT_READY_TIMEOUT, 0);
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
__func__, scmd, rtn));
spin_lock_irq(q->queue_lock);
}
+struct scsi_device *scsi_device_from_queue(struct request_queue *q)
+{
+ struct scsi_device *sdev = NULL;
+
+ if (q->request_fn == scsi_request_fn)
+ sdev = q->queuedata;
+
+ return sdev;
+}
+EXPORT_SYMBOL_GPL(scsi_device_from_queue);
+
u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{
struct device *host_dev;
/* if successful, scsi_host_lookup takes a shost reference */
shost = scsi_host_lookup(msg->host_no);
- if (!shost) {
+ if (IS_ERR(shost)) {
err = -ENODEV;
goto driver_exit;
}
* and displaying garbage for the Vendor, Product, or Revision
* strings.
*/
- if (sdev->inquiry_len < 36) {
+ if (sdev->inquiry_len < 36 && printk_ratelimit()) {
printk(KERN_INFO "scsi scan: INQUIRY result too short (%d),"
" using 36\n", sdev->inquiry_len);
sdev->inquiry_len = 36;
* Yes, this sense key/ASC combination shouldn't
* occur here. It's characteristic of these devices.
*/
- } else if (sense_valid &&
- sshdr.sense_key == UNIT_ATTENTION &&
+ } else if (sshdr.sense_key == UNIT_ATTENTION &&
sshdr.asc == 0x28) {
if (!spintime) {
spintime_expire = jiffies + 5 * HZ;
put_device(&sdkp->dev);
}
+static int sd_get_index(int *index)
+{
+ int error = -ENOMEM;
+ do {
+ if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
+ break;
+
+ spin_lock(&sd_index_lock);
+ error = ida_get_new(&sd_index_ida, index);
+ spin_unlock(&sd_index_lock);
+ } while (error == -EAGAIN);
+
+ return error;
+}
/**
* sd_probe - called during driver initialization and whenever a
* new scsi device is attached to the system. It is called once
if (!gd)
goto out_free;
- do {
- if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
- goto out_put;
-
- spin_lock(&sd_index_lock);
- error = ida_get_new(&sd_index_ida, &index);
- spin_unlock(&sd_index_lock);
- } while (error == -EAGAIN);
-
+ error = sd_get_index(&index);
if (error) {
sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
goto out_put;
return ret;
}
+/*
+* Each major represents 16 disks. A minor is used for the disk itself and 15
+* partitions. Mark each disk busy so that sd_probe can not reclaim this major.
+*/
+static int __init init_sd_ida(int *error)
+{
+ int *index, i, j, err;
+
+ index = kmalloc(SD_MAJORS * (256 / SD_MINORS) * sizeof(int), GFP_KERNEL);
+ if (!index)
+ return -ENOMEM;
+
+ /* Mark minors for all majors as busy */
+ for (i = 0; i < SD_MAJORS; i++)
+ {
+ for (j = 0; j < (256 / SD_MINORS); j++) {
+ err = sd_get_index(&index[i * (256 / SD_MINORS) + j]);
+ if (err) {
+ kfree(index);
+ return err;
+ }
+ }
+ }
+
+ /* Mark minors for claimed majors as free */
+ for (i = 0; i < SD_MAJORS; i++)
+ {
+ if (error[i])
+ continue;
+ for (j = 0; j < (256 / SD_MINORS); j++)
+ ida_remove(&sd_index_ida, index[i * (256 / SD_MINORS) + j]);
+ }
+ kfree(index);
+ return 0;
+}
+
/**
* init_sd - entry point for this driver (both when built in or when
* a module).
static int __init init_sd(void)
{
int majors = 0, i, err;
+ int error[SD_MAJORS];
SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
for (i = 0; i < SD_MAJORS; i++)
- if (register_blkdev(sd_major(i), "sd") == 0)
+ {
+ error[i] = register_blkdev(sd_major(i), "sd");
+ if (error[i] == 0)
majors++;
+ }
if (!majors)
return -ENODEV;
+ if (majors < SD_MAJORS) {
+ err = init_sd_ida(error);
+ if (err)
+ return err;
+ }
+
err = class_register(&sd_disk_class);
if (err)
goto err_out;
/*
* If there is an error; offline the device since all
* error recovery strategies would have already been
- * deployed on the host side.
+ * deployed on the host side. However, if the command
+ * were a pass-through command deal with it appropriately.
*/
- if (vm_srb->srb_status == SRB_STATUS_ERROR)
- scmnd->result = DID_TARGET_FAILURE << 16;
- else
+ switch (vm_srb->srb_status) {
+ case SRB_STATUS_ERROR:
+ switch (scmnd->cmnd[0]) {
+ case ATA_16:
+ case ATA_12:
+ scmnd->result = DID_PASSTHROUGH << 16;
+ break;
+ default:
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ }
+ break;
+ default:
scmnd->result = vm_srb->scsi_status;
+ }
+
/*
* If the LUN is invalid; remove the device.
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/bootsplash.h>
/* number of characters left in xmit buffer before select has we have room */
#define WAKEUP_CHARS 256
tty->minimum_to_wake = (minimum - (b - buf));
if (!input_available_p(tty, 0)) {
+ dev_t i_rdev = file->f_dentry->d_inode->i_rdev;
+
+ if (i_rdev == MKDEV(TTY_MAJOR, 0) ||
+ i_rdev == MKDEV(TTY_MAJOR, 1) ||
+ i_rdev == MKDEV(TTYAUX_MAJOR, 0) ||
+ i_rdev == MKDEV(TTYAUX_MAJOR, 1)) {
+ SPLASH_VERBOSE();
+ }
+
if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
retval = -EIO;
break;
#define CONFIG_SERIAL_MANY_PORTS 1
#endif
+#define arch_8250_sysrq_via_ctrl_o(a,b) 0
+
/*
* HUB6 is always on. This will be removed once the header
* files have been cleaned.
do {
if (likely(lsr & UART_LSR_DR))
+ {
ch = serial_in(up, UART_RX);
+ if (arch_8250_sysrq_via_ctrl_o(ch, &up->port))
+ goto ignore_char;
+ }
else
/*
* Intel 82571 has a Serial Over Lan device that will
#include <asm/irq_regs.h>
+#include <linux/bootsplash.h>
+
extern void ctrl_alt_del(void);
/*
pr_warning("can't emulate rawmode for keycode %d\n",
keycode);
+ /* This code has to be redone for some non-x86 platforms */
+ if (down == 1 && (keycode == 0x3c || keycode == 0x01)) {
+ /* F2 and ESC on PC keyboard */
+ if (splash_verbose())
+ return;
+ }
+
#ifdef CONFIG_SPARC
if (keycode == KEY_A && sparc_l1_a_state) {
sparc_l1_a_state = false;
notify_update(vc);
}
+#ifdef CONFIG_BOOTSPLASH
+void con_remap_def_color(struct vc_data *vc, int new_color)
+{
+ unsigned short *sbuf = screenpos(vc, 0, 1);
+ unsigned c, len = vc->vc_screenbuf_size >> 1;
+ unsigned int bits, old_color;
+
+ if (sbuf) {
+ old_color = vc->vc_def_color << 8;
+ new_color <<= 8;
+ while (len--) {
+ c = scr_readw(sbuf);
+ bits = (old_color ^ new_color) & 0xf000;
+ if (((c ^ old_color) & 0xf000) == 0)
+ scr_writew((c ^ bits), sbuf);
+ *sbuf ^= bits;
+ bits = (old_color ^ new_color) & 0x0f00;
+ if (((c ^ old_color) & 0x0f00) == 0)
+ scr_writew((c ^ bits), sbuf);
+ *sbuf ^= bits;
+ sbuf++;
+ }
+ new_color >>= 8;
+ }
+ vc->vc_def_color = vc->vc_color = new_color;
+ update_attr(vc);
+}
+#endif
+
/*
* Visible symbols for modules
*/
source "drivers/video/logo/Kconfig"
endif
+if FB
+ source "drivers/video/bootsplash/Kconfig"
+endif
+
config FB_SH_MOBILE_MERAM
tristate "SuperH Mobile MERAM read ahead support"
depends on (SUPERH || ARCH_SHMOBILE)
obj-$(CONFIG_VT) += console/
obj-$(CONFIG_LOGO) += logo/
obj-y += backlight/
+obj-$(CONFIG_BOOTSPLASH) += bootsplash/
obj-$(CONFIG_EXYNOS_VIDEO) += exynos/
var->vmode = mode->vmode;
}
+#ifdef CONFIG_PPC_PSERIES
+static int is_powerblade(const char *model)
+{
+ struct device_node *root;
+ const char* cp;
+ int len, l, rc = 0;
+
+ root = of_find_node_by_path("/");
+ if (root && model) {
+ l = strlen(model);
+ cp = of_get_property(root, "model", &len);
+ if (cp)
+ rc = memcmp(model, cp, min(len, l)) == 0;
+ of_node_put(root);
+ }
+ return rc;
+}
+#endif
+
/*
* Build the modedb for head 1 (head 2 will come later), check panel infos
* from either BIOS or EDID, and pick up the default mode
has_default_mode = 1;
}
+#ifdef CONFIG_PPC_PSERIES
+ if (!has_default_mode && (
+ is_powerblade("IBM,8842") || /* JS20 */
+ is_powerblade("IBM,8844") || /* JS21 */
+ is_powerblade("IBM,7998") || /* JS12/JS21/JS22 */
+ is_powerblade("IBM,0792") || /* QS21 */
+ is_powerblade("IBM,0793") /* QS22 */
+ )) {
+ printk("Falling back to 800x600 on JSxx hardware\n");
+ if (fb_find_mode(&info->var, info, "800x600@60",
+ info->monspecs.modedb,
+ info->monspecs.modedb_len, NULL, 8) != 0)
+ has_default_mode = 1;
+ }
+#endif
+
/*
* Still no mode, let's pick up a default from the db
*/
--- /dev/null
+#
+# Bootsplash configuration
+#
+
+menu "Bootsplash configuration"
+
+config BOOTSPLASH
+ bool "Bootup splash screen"
+ depends on FRAMEBUFFER_CONSOLE && FB_VESA
+ default n
+ ---help---
+ This option enables the Linux bootsplash screen. For more
+ information on the bootsplash screen have a look at
+ http://www.bootsplash.org/.
+ If you are unsure, say N
+endmenu
+
--- /dev/null
+# Makefile for the Linux bootsplash
+
+obj-$(CONFIG_BOOTSPLASH) += bootsplash.o
+obj-$(CONFIG_BOOTSPLASH) += decode-jpg.o
+obj-$(CONFIG_BOOTSPLASH) += render.o
--- /dev/null
+/*
+ * linux/drivers/video/bootsplash/bootsplash.c -
+ * splash screen handling functions.
+ *
+ * (w) 2001-2004 by Volker Poplawski, <volker@poplawski.de>,
+ * Stefan Reinauer, <stepan@suse.de>,
+ * Steffen Winterfeldt, <snwint@suse.de>,
+ * Michael Schroeder <mls@suse.de>
+ * 2009-2011 Egbert Eich <eich@suse.de>
+ *
+ * Ideas & SuSE screen work by Ken Wimer, <wimer@suse.de>
+ *
+ * For more information on this code check http://www.bootsplash.org/
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fb.h>
+#include <linux/vt_kern.h>
+#include <linux/vmalloc.h>
+#include <linux/unistd.h>
+#include <linux/syscalls.h>
+#include <linux/console.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include <asm/irq.h>
+
+#include "../console/fbcon.h"
+#include <linux/bootsplash.h>
+#include "decode-jpg.h"
+
+#ifndef DEBUG
+# define SPLASH_DEBUG(fmt, args...)
+#else
+# define SPLASH_DEBUG(fmt, args...) \
+ printk(KERN_WARNING "%s: " fmt "\n", __func__, ##args)
+#endif
+extern signed char con2fb_map[MAX_NR_CONSOLES];
+
+#define SPLASH_VERSION "3.2.0-2010/03/31"
+
+/* These errors have to match fbcon-jpegdec.h */
+static unsigned char *jpg_errors[] = {
+ "no SOI found",
+ "not 8 bit",
+ "height mismatch",
+ "width mismatch",
+ "bad width or height",
+ "too many COMPPs",
+ "illegal HV",
+ "quant table selector",
+ "picture is not YCBCR 221111",
+ "unknow CID in scan",
+ "dct not sequential",
+ "wrong marker",
+ "no EOI",
+ "bad tables",
+ "depth mismatch",
+ "scale error",
+ "out of memory"
+};
+
+static int splash_usesilent;
+static unsigned long splash_default = 0xf01;
+
+static int jpeg_get(unsigned char *buf, unsigned char *pic,
+ int width, int height, enum splash_color_format cf,
+ struct jpeg_decdata *decdata);
+static int splash_look_for_jpeg(struct vc_data *vc, int width, int height);
+
+static int __init splash_setup(char *options)
+{
+ splash_usesilent = 0;
+
+ if (!strncmp("silent", options, 6)) {
+ printk(KERN_INFO "bootsplash: silent mode.\n");
+ splash_usesilent = 1;
+ /* skip "silent," */
+ if (strlen(options) == 6)
+ return 0;
+ options += 7;
+ }
+ if (!strncmp("verbose", options, 7)) {
+ printk(KERN_INFO "bootsplash: verbose mode.\n");
+ splash_usesilent = 0;
+ if (strlen(options) == 7)
+ return 0;
+ options += 8;
+ }
+ if (strict_strtoul(options, 0, &splash_default) == -EINVAL)
+ splash_default = 0;
+
+ return 0;
+}
+
+__setup("splash=", splash_setup);
+
+
+static int splash_hasinter(unsigned char *buf, int num)
+{
+ unsigned char *bufend = buf + num * 12;
+ while (buf < bufend) {
+ if (buf[1] > 127) /* inter? */
+ return 1;
+ buf += buf[3] > 127 ? 24 : 12; /* blend? */
+ }
+ return 0;
+}
+
+static int boxextract(unsigned char *buf, unsigned short *dp,
+ unsigned char *cols, int *blendp)
+{
+ dp[0] = buf[0] | buf[1] << 8;
+ dp[1] = buf[2] | buf[3] << 8;
+ dp[2] = buf[4] | buf[5] << 8;
+ dp[3] = buf[6] | buf[7] << 8;
+ *(unsigned int *)(cols + 0) =
+ *(unsigned int *)(cols + 4) =
+ *(unsigned int *)(cols + 8) =
+ *(unsigned int *)(cols + 12) = *(unsigned int *)(buf + 8);
+ if (dp[1] > 32767) {
+ dp[1] = ~dp[1];
+ *(unsigned int *)(cols + 4) = *(unsigned int *)(buf + 12);
+ *(unsigned int *)(cols + 8) = *(unsigned int *)(buf + 16);
+ *(unsigned int *)(cols + 12) = *(unsigned int *)(buf + 20);
+ *blendp = 1;
+ return 24;
+ }
+ return 12;
+}
+
+static void boxit(unsigned char *pic, int bytes, unsigned char *buf, int num,
+ int percent, int xoff, int yoff, int overpaint,
+ enum splash_color_format cf)
+{
+ int x, y, p, doblend, r, g, b, a, add;
+ unsigned int i = 0;
+ unsigned short data1[4];
+ unsigned char cols1[16];
+ unsigned short data2[4];
+ unsigned char cols2[16];
+ unsigned char *bufend;
+ union pt picp;
+ unsigned int stipple[32], sti, stin, stinn, stixs, stixe, stiys, stiye;
+ int xs, xe, ys, ye, xo, yo;
+ int octpp = splash_octpp(cf);
+
+ SPLASH_DEBUG();
+ if (num == 0 || percent < -1)
+ return;
+ bufend = buf + num * 12;
+ stipple[0] = 0xffffffff;
+ stin = 1;
+ stinn = 0;
+ stixs = stixe = 0;
+ stiys = stiye = 0;
+ while (buf < bufend) {
+ doblend = 0;
+ buf += boxextract(buf, data1, cols1, &doblend);
+ if (data1[0] == 32767 && data1[1] == 32767) {
+ /* box stipple */
+ if (stinn == 32)
+ continue;
+ if (stinn == 0) {
+ stixs = data1[2];
+ stixe = data1[3];
+ stiys = stiye = 0;
+ } else if (stinn == 4) {
+ stiys = data1[2];
+ stiye = data1[3];
+ }
+ stipple[stinn++] = (cols1[0] << 24) |
+ (cols1[1] << 16) |
+ (cols1[2] << 8) |
+ cols1[3] ;
+ stipple[stinn++] = (cols1[4] << 24) |
+ (cols1[5] << 16) |
+ (cols1[6] << 8) |
+ cols1[7] ;
+ stipple[stinn++] = (cols1[8] << 24) |
+ (cols1[9] << 16) |
+ (cols1[10] << 8) |
+ cols1[11] ;
+ stipple[stinn++] = (cols1[12] << 24) |
+ (cols1[13] << 16) |
+ (cols1[14] << 8) |
+ cols1[15] ;
+ stin = stinn;
+ continue;
+ }
+ stinn = 0;
+ if (data1[0] > 32767)
+ buf += boxextract(buf, data2, cols2, &doblend);
+ if (data1[0] == 32767 && data1[1] == 32766) {
+ /* box copy */
+ i = 12 * (short)data1[3];
+ doblend = 0;
+ i += boxextract(buf + i, data1, cols1, &doblend);
+ if (data1[0] > 32767)
+ boxextract(buf + i, data2, cols2, &doblend);
+ }
+ if (data1[0] == 32767)
+ continue;
+ if (data1[2] > 32767) {
+ if (overpaint)
+ continue;
+ data1[2] = ~data1[2];
+ }
+ if (data1[3] > 32767) {
+ if (percent == 65536)
+ continue;
+ data1[3] = ~data1[3];
+ }
+ if (data1[0] > 32767) {
+ if (percent < 0)
+ continue;
+ data1[0] = ~data1[0];
+ for (i = 0; i < 4; i++)
+ data1[i] = (data1[i] * (65536 - percent)
+ + data2[i] * percent) >> 16;
+ for (i = 0; i < 16; i++)
+ cols1[i] = (cols1[i] * (65536 - percent)
+ + cols2[i] * percent) >> 16;
+ }
+ *(unsigned int *)cols2 = *(unsigned int *)cols1;
+ a = cols2[3];
+ if (a == 0 && !doblend)
+ continue;
+
+ if (stixs >= 32768) {
+ xo = xs = (stixs ^ 65535) + data1[0];
+ xe = stixe ? stixe + data1[0] : data1[2];
+ } else if (stixe >= 32768) {
+ xs = stixs ? data1[2] - stixs : data1[0];
+ xe = data1[2] - (stixe ^ 65535);
+ xo = xe + 1;
+ } else {
+ xo = xs = stixs;
+ xe = stixe ? stixe : data1[2];
+ }
+ if (stiys >= 32768) {
+ yo = ys = (stiys ^ 65535) + data1[1];
+ ye = stiye ? stiye + data1[1] : data1[3];
+ } else if (stiye >= 32768) {
+ ys = stiys ? data1[3] - stiys : data1[1];
+ ye = data1[3] - (stiye ^ 65535);
+ yo = ye + 1;
+ } else {
+ yo = ys = stiys;
+ ye = stiye ? stiye : data1[3];
+ }
+ xo = 32 - (xo & 31);
+ yo = stin - (yo % stin);
+ if (xs < data1[0])
+ xs = data1[0];
+ if (xe > data1[2])
+ xe = data1[2];
+ if (ys < data1[1])
+ ys = data1[1];
+ if (ye > data1[3])
+ ye = data1[3];
+
+ for (y = ys; y <= ye; y++) {
+ sti = stipple[(y + yo) % stin];
+ x = (xs + xo) & 31;
+ if (x)
+ sti = (sti << x) | (sti >> (32 - x));
+ if (doblend) {
+ p = data1[3] - data1[1];
+ if (p != 0)
+ p = ((y - data1[1]) << 16) / p;
+ for (i = 0; i < 8; i++)
+ cols2[i + 8] = (cols1[i] * (65536 - p)
+ + cols1[i + 8] * p)
+ >> 16;
+ }
+ add = (xs & 1);
+ add ^= (add ^ y) & 1 ? 1 : 3; /*2x2 ordered dithering*/
+ picp.ub = (pic + (xs + xoff) * octpp
+ + (y + yoff) * bytes);
+ for (x = xs; x <= xe; x++) {
+ if (!(sti & 0x80000000)) {
+ sti <<= 1;
+ switch (octpp) {
+ case 2:
+ picp.us++;
+ break;
+ case 3:
+ picp.ub += 3;
+ break;
+ case 4:
+ picp.ul++;
+ break;
+ }
+ add ^= 3;
+ continue;
+ }
+ sti = (sti << 1) | 1;
+ if (doblend) {
+ p = data1[2] - data1[0];
+ if (p != 0)
+ p = ((x - data1[0]) << 16) / p;
+ for (i = 0; i < 4; i++)
+ cols2[i] = (cols2[i + 8] * (65536 - p)
+ + cols2[i + 12] * p)
+ >> 16;
+ a = cols2[3];
+ }
+ r = cols2[0];
+ g = cols2[1];
+ b = cols2[2];
+#define CLAMP(x) ((x) >= 256 ? 255 : (x))
+#define BLEND(x, v, a) ((x * (255 - a) + v * a) / 255)
+ switch (cf) {
+ case SPLASH_DEPTH_15:
+ if (a != 255) {
+ i = *picp.us;
+ r = BLEND((i>>7 & 0xf8), r, a);
+ g = BLEND((i>>2 & 0xf8), g, a);
+ b = BLEND((i<<3 & 0xf8), b, a);
+ }
+ r += add * 2 + 1;
+ g += add;
+ b += add * 2 + 1;
+ i = ((CLAMP(r) & 0xf8) << 7) |
+ ((CLAMP(g) & 0xf8) << 2) |
+ ((CLAMP(b)) >> 3);
+ *(picp.us++) = i;
+ break;
+ case SPLASH_DEPTH_16:
+ if (a != 255) {
+ i = *picp.us;
+ r = BLEND((i>>8 & 0xf8), r, a);
+ g = BLEND((i>>3 & 0xfc), g, a);
+ b = BLEND((i<<3 & 0xf8), b, a);
+ }
+ r += add * 2 + 1;
+ g += add;
+ b += add * 2 + 1;
+ i = ((CLAMP(r) & 0xf8) << 8) |
+ ((CLAMP(g) & 0xfc) << 3) |
+ ((CLAMP(b)) >> 3);
+ *(picp.us++) = i;
+ break;
+ case SPLASH_DEPTH_24_PACKED:
+ if (a != 255) {
+ i = *picp.ub;
+ r = BLEND((i & 0xff), r, a);
+ i = *(picp.ub + 1);
+ g = BLEND((i & 0xff), g, a);
+ i = *(picp.ub + 2);
+ b = BLEND((i & 0xff), b, a);
+ }
+ *(picp.ub++) = CLAMP(r);
+ *(picp.ub++) = CLAMP(g);
+ *(picp.ub++) = CLAMP(b);
+ break;
+ case SPLASH_DEPTH_24:
+ if (a != 255) {
+ i = *picp.ul;
+ r = BLEND((i>>16 & 0xff), r, a);
+ g = BLEND((i>>8 & 0xff), g, a);
+ b = BLEND((i & 0xff), b, a);
+ }
+ i = ((CLAMP(r) << 16)
+ | (CLAMP(g) << 8)
+ | (CLAMP(b)));
+ *(picp.ul++) = i;
+ break;
+ default:
+ break;
+ }
+ add ^= 3;
+ }
+ }
+ }
+}
+
+static void box_offsets(unsigned char *buf, int num,
+ int screen_w, int screen_h, int pic_w, int pic_h,
+ int *x_off, int *y_off)
+{
+ int a, doblend;
+ int x_min = pic_w, x_max = 0;
+ int y_min = pic_h, y_max = 0;
+ unsigned int i = 0;
+ unsigned short data1[4];
+ unsigned char cols1[16];
+ unsigned short data2[4];
+ unsigned char cols2[16];
+ unsigned char *bufend;
+ unsigned int stin, stinn, stixs, stixe, stiys, stiye;
+ int xs, xe, ys, ye;
+
+ SPLASH_DEBUG();
+
+ if ((screen_w == pic_w && screen_h == pic_h) || num == 0)
+ *x_off = *y_off = 0;
+
+ bufend = buf + num * 12;
+ stin = 1;
+ stinn = 0;
+ stixs = stixe = 0;
+ stiys = stiye = 0;
+
+ while (buf < bufend) {
+ doblend = 0;
+ buf += boxextract(buf, data1, cols1, &doblend);
+ if (data1[0] == 32767 && data1[1] == 32767) {
+ /* box stipple */
+ if (stinn == 32)
+ continue;
+ if (stinn == 0) {
+ stixs = data1[2];
+ stixe = data1[3];
+ stiys = stiye = 0;
+ } else if (stinn == 4) {
+ stiys = data1[2];
+ stiye = data1[3];
+ }
+ stin = stinn;
+ continue;
+ }
+ stinn = 0;
+ if (data1[0] > 32767)
+ buf += boxextract(buf, data2, cols2, &doblend);
+ if (data1[0] == 32767 && data1[1] == 32766) {
+ /* box copy */
+ i = 12 * (short)data1[3];
+ doblend = 0;
+ i += boxextract(buf + i, data1, cols1, &doblend);
+ if (data1[0] > 32767)
+ boxextract(buf + i, data2, cols2, &doblend);
+ }
+ if (data1[0] == 32767)
+ continue;
+ if (data1[2] > 32767)
+ data1[2] = ~data1[2];
+ if (data1[3] > 32767)
+ data1[3] = ~data1[3];
+ if (data1[0] > 32767) {
+ data1[0] = ~data1[0];
+ for (i = 0; i < 4; i++)
+ data1[i] = (data1[i] * (65536 - 1)
+ + data2[i] * 1) >> 16;
+ }
+ *(unsigned int *)cols2 = *(unsigned int *)cols1;
+ a = cols2[3];
+ if (a == 0 && !doblend)
+ continue;
+
+ if (stixs >= 32768) {
+ xs = (stixs ^ 65535) + data1[0];
+ xe = stixe ? stixe + data1[0] : data1[2];
+ } else if (stixe >= 32768) {
+ xs = stixs ? data1[2] - stixs : data1[0];
+ xe = data1[2] - (stixe ^ 65535);
+ } else {
+ xs = stixs;
+ xe = stixe ? stixe : data1[2];
+ }
+ if (stiys >= 32768) {
+ ys = (stiys ^ 65535) + data1[1];
+ ye = stiye ? stiye + data1[1] : data1[3];
+ } else if (stiye >= 32768) {
+ ys = stiys ? data1[3] - stiys : data1[1];
+ ye = data1[3] - (stiye ^ 65535);
+ } else {
+ ys = stiys;
+ ye = stiye ? stiye : data1[3];
+ }
+ if (xs < data1[0])
+ xs = data1[0];
+ if (xe > data1[2])
+ xe = data1[2];
+ if (ys < data1[1])
+ ys = data1[1];
+ if (ye > data1[3])
+ ye = data1[3];
+
+ if (xs < x_min)
+ x_min = xs;
+ if (xe > x_max)
+ x_max = xe;
+ if (ys < y_min)
+ y_min = ys;
+ if (ye > y_max)
+ y_max = ye;
+ }
+ {
+ int x_center = (x_min + x_max) / 2;
+ int y_center = (y_min + y_max) / 2;
+
+ if (screen_w == pic_w)
+ *x_off = 0;
+ else {
+ if (x_center < (pic_w + pic_w / 5) >> 1 &&
+ x_center > (pic_w - pic_w / 5) >> 1) {
+ *x_off = (screen_w - pic_w) >> 1;
+ } else {
+ int x = x_center * screen_w / pic_w;
+ *x_off = x - x_center;
+ if (x_min + *x_off < 0)
+ *x_off = 0;
+ if (x_max + *x_off > screen_w)
+ *x_off = screen_w - pic_w;
+ }
+ }
+ if (screen_h == pic_h)
+ *y_off = 0;
+ else {
+ if (y_center < (pic_h + pic_h / 5) >> 1 &&
+ y_center > (pic_h - pic_h / 5) >> 1)
+ *y_off = (screen_h - pic_h) >> 1;
+ else {
+ int x = y_center * screen_h / pic_h;
+ *y_off = x - y_center;
+ if (y_min + *y_off < 0)
+ *y_off = 0;
+ if (y_max + *x_off > screen_h)
+ *y_off = screen_h - pic_h;
+ }
+ }
+ }
+}
+
+static int splash_check_jpeg(unsigned char *jpeg,
+ int width, int height)
+{
+ int size, err;
+ unsigned char *mem;
+ struct jpeg_decdata *decdata; /* private decoder data */
+
+
+ size = ((width + 15) & ~15) * ((height + 15) & ~15) * 2;
+ mem = vmalloc(size);
+ if (!mem) {
+ printk(KERN_INFO "bootsplash: no memory for decoded picture.\n");
+ return -1;
+ }
+ decdata = vmalloc(sizeof(*decdata));
+ if (!decdata) {
+ printk(KERN_INFO "bootsplash: not enough memory.\n");
+ vfree(mem);
+ return -1;
+ }
+ /* test decode: use fixed depth of 16 */
+ err = jpeg_decode(jpeg, mem,
+ ((width + 15) & ~15), ((height + 15) & ~15),
+ SPLASH_DEPTH_16,
+ decdata);
+ if (err)
+ printk(KERN_INFO "bootsplash: "
+ "error while decompressing picture: %s (%d)\n",
+ jpg_errors[err - 1], err);
+ vfree(decdata);
+ vfree(mem);
+ return err ? -1 : 0;
+}
+
+static void splash_free(struct vc_data *vc, struct fb_info *info)
+{
+ struct splash_data *sd;
+ struct splash_data *next;
+ SPLASH_DEBUG();
+ for (sd = vc->vc_splash_data; sd; sd = next) {
+ next = sd->next;
+ sd->pic->ref_cnt--;
+ if (!sd->pic->ref_cnt) {
+ vfree(sd->pic->splash_pic);
+ vfree(sd->pic);
+ }
+ sd->imgd->ref_cnt--;
+ if (!sd->imgd->ref_cnt) {
+ vfree(sd->imgd->splash_sboxes);
+ vfree(sd->imgd);
+ }
+ vfree(sd);
+ }
+ vc->vc_splash_data = 0;
+ if (info)
+ info->splash_data = 0;
+}
+
+static int splash_mkpenguin(struct splash_data *data,
+ int pxo, int pyo, int pwi, int phe,
+ int pr, int pg, int pb)
+{
+ unsigned char *buf;
+ int i;
+
+ if (pwi == 0 || phe == 0)
+ return 0;
+
+ buf = (unsigned char *)data + sizeof(*data);
+
+ pwi += pxo - 1;
+ phe += pyo - 1;
+
+ *buf++ = pxo;
+ *buf++ = pxo >> 8;
+ *buf++ = pyo;
+ *buf++ = pyo >> 8;
+ *buf++ = pwi;
+ *buf++ = pwi >> 8;
+ *buf++ = phe;
+ *buf++ = phe >> 8;
+ *buf++ = pr;
+ *buf++ = pg;
+ *buf++ = pb;
+ *buf++ = 0;
+
+ for (i = 0; i < 12; i++, buf++)
+ *buf = buf[-12];
+
+ buf[-24] ^= 0xff;
+ buf[-23] ^= 0xff;
+ buf[-1] = 0xff;
+
+ return 2;
+}
+
+static const int splash_offsets[3][16] = {
+ /* len, unit, size, state, fgcol, col, xo, yo, wi, he
+ boxcnt, ssize, sboxcnt, percent, overok, palcnt */
+ /* V1 */
+ { 20, -1, 16, -1, -1, -1, 8, 10, 12, 14,
+ -1, -1, -1, -1, -1, -1 },
+ /* V2 */
+ { 35, 8, 12, 9, 10, 11, 16, 18, 20, 22,
+ -1, -1, -1, -1, -1, -1 },
+ /* V3 */
+ { 38, 8, 12, 9, 10, 11, 16, 18, 20, 22,
+ 24, 28, 32, 34, 36, 37 },
+};
+
+#define SPLASH_OFF_LEN offsets[0]
+#define SPLASH_OFF_UNIT offsets[1]
+#define SPLASH_OFF_SIZE offsets[2]
+#define SPLASH_OFF_STATE offsets[3]
+#define SPLASH_OFF_FGCOL offsets[4]
+#define SPLASH_OFF_COL offsets[5]
+#define SPLASH_OFF_XO offsets[6]
+#define SPLASH_OFF_YO offsets[7]
+#define SPLASH_OFF_WI offsets[8]
+#define SPLASH_OFF_HE offsets[9]
+#define SPLASH_OFF_BOXCNT offsets[10]
+#define SPLASH_OFF_SSIZE offsets[11]
+#define SPLASH_OFF_SBOXCNT offsets[12]
+#define SPLASH_OFF_PERCENT offsets[13]
+#define SPLASH_OFF_OVEROK offsets[14]
+#define SPLASH_OFF_PALCNT offsets[15]
+
+static inline int splash_getb(unsigned char *pos, int off)
+{
+ return off == -1 ? 0 : pos[off];
+}
+
+static inline int splash_gets(unsigned char *pos, int off)
+{
+ return off == -1 ? 0 : pos[off] | pos[off + 1] << 8;
+}
+
+static inline int splash_geti(unsigned char *pos, int off)
+{
+ return off == -1 ? 0 : (pos[off] |
+ pos[off + 1] << 8 |
+ pos[off + 2] << 16 |
+ pos[off + 3] << 24);
+}
+
+/* move the given splash_data to the current one */
+static void splash_pivot_current(struct vc_data *vc, struct splash_data *new)
+{
+ struct splash_data *sd;
+ struct splash_pic_data *pic;
+ int state, percent, silent;
+
+ sd = vc->vc_splash_data;
+ if (!sd || sd == new)
+ return;
+
+ state = sd->splash_state;
+ percent = sd->splash_percent;
+ silent = sd->splash_dosilent;
+ if (sd->pic->ref_cnt > 1) {
+ pic = kzalloc(sizeof(struct splash_pic_data), GFP_KERNEL);
+ if (!pic)
+ return;
+ sd->pic = pic;
+ }
+ sd->pic->ref_cnt = 1;
+ sd->pic->splash_pic_size = 0;
+ sd->pic->splash_pic = NULL;
+ sd->splash_vc_text_wi = sd->imgd->splash_text_wi;
+ sd->splash_vc_text_he = sd->imgd->splash_text_he;
+ for (; sd->next; sd = sd->next) {
+ if (sd->next == new) {
+ sd->next = new->next;
+ new->next = vc->vc_splash_data;
+ vc->vc_splash_data = new;
+ /* copy the current states */
+ new->splash_state = state;
+ new->splash_percent = percent;
+ new->splash_dosilent = silent;
+ new->splash_vc_text_wi = new->imgd->splash_text_wi;
+ new->splash_vc_text_he = new->imgd->splash_text_he;
+
+ new->splash_boxes_xoff = 0;
+ new->splash_boxes_yoff = 0;
+ new->splash_sboxes_xoff = 0;
+ new->splash_sboxes_yoff = 0;
+
+ if (new->pic->ref_cnt > 1) {
+ struct splash_pic_data *pic;
+ pic = kzalloc(sizeof(struct splash_pic_data),
+ GFP_KERNEL);
+ if (!pic)
+ return;
+
+ new->pic = pic;
+ }
+ new->pic->ref_cnt = 1;
+ new->pic->splash_pic_size = 0;
+ new->pic->splash_pic = NULL;
+
+ return;
+ }
+ }
+}
+
+static int update_boxes(struct vc_data *vc,
+ const int *offsets,
+ unsigned char *ndata, int len, unsigned char * end,
+ int *update)
+{
+ int boxcnt;
+ int sboxcnt;
+ struct splash_data *sd;
+ struct splash_img_data *imgd;
+ int i;
+
+ sd = vc->vc_splash_data;
+ if (sd != 0) {
+ int up = 0;
+ imgd = sd->imgd;
+ i = splash_getb(ndata, SPLASH_OFF_STATE);
+ if (i != 255) {
+ sd->splash_state = i; /*@!@*/
+ up = -1;
+ }
+ i = splash_getb(ndata, SPLASH_OFF_FGCOL);
+ if (i != 255) {
+ imgd->splash_fg_color = i;
+ up = -1;
+ }
+ i = splash_getb(ndata, SPLASH_OFF_COL);
+ if (i != 255) {
+ imgd->splash_color = i;
+ up = -1;
+ }
+ boxcnt = sboxcnt = 0;
+ if (ndata + len <= end) {
+ boxcnt = splash_gets(ndata, SPLASH_OFF_BOXCNT);
+ sboxcnt = splash_gets(ndata, SPLASH_OFF_SBOXCNT);
+ }
+ if (boxcnt) {
+ i = splash_gets(ndata, len);
+ if (boxcnt + i
+ <= imgd->splash_boxcount &&
+ ndata + len + 2 + boxcnt * 12
+ <= end) {
+ if (splash_geti(ndata, len + 2)
+ != 0x7ffd7fff ||
+ !memcmp(ndata + len + 2,
+ imgd->splash_boxes + i * 12,
+ 8)) {
+ memcpy(imgd->splash_boxes + i * 12,
+ ndata + len + 2,
+ boxcnt * 12);
+ up |= 1;
+ }
+ }
+ len += boxcnt * 12 + 2;
+ }
+ if (sboxcnt) {
+ i = splash_gets(ndata, len);
+ if ((sboxcnt + i <= imgd->splash_sboxcount) &&
+ (ndata + len + 2 + sboxcnt * 12 <= end)) {
+ if ((splash_geti(ndata, len + 2) != 0x7ffd7fff)
+ || !memcmp(ndata + len + 2,
+ imgd->splash_sboxes + i * 12,
+ 8)) {
+ memcpy(imgd->splash_sboxes + i * 12,
+ ndata + len + 2,
+ sboxcnt * 12);
+ up |= 2;
+ }
+ }
+ }
+ if (update)
+ *update = up;
+ }
+ return 0;
+}
+
+static int splash_getraw(unsigned char *start, unsigned char *end, int *update)
+{
+ unsigned char *ndata;
+ int version;
+ int splash_size;
+ int unit;
+ int width, height;
+ int silentsize;
+ int boxcnt;
+ int sboxcnt;
+ int palcnt;
+ int len;
+ const int *offsets;
+ struct vc_data *vc = NULL;
+ struct fb_info *info = NULL;
+ struct splash_data *sd;
+ struct splash_img_data *imgd;
+ struct splash_pic_data *pic;
+ struct splash_data *splash_found = NULL;
+ int unit_found = -1;
+ int oldpercent, oldsilent;
+
+ if (update)
+ *update = -1;
+
+ if (!update ||
+ start[7] < '2' ||
+ start[7] > '3' ||
+ splash_geti(start, 12) != (int)0xffffffff)
+ printk(KERN_INFO "bootsplash %s: looking for picture...\n",
+ SPLASH_VERSION);
+
+ oldpercent = -3;
+ oldsilent = -1;
+ for (ndata = start; ndata < end; ndata++) {
+ if (ndata[0] != 'B' ||
+ ndata[1] != 'O' ||
+ ndata[2] != 'O' ||
+ ndata[3] != 'T')
+ continue;
+ if (ndata[4] != 'S' ||
+ ndata[5] != 'P' ||
+ ndata[6] != 'L' ||
+ ndata[7] < '1' ||
+ ndata[7] > '3')
+ continue;
+
+ version = ndata[7] - '0';
+ offsets = splash_offsets[version - 1];
+ len = SPLASH_OFF_LEN;
+
+ unit = splash_getb(ndata, SPLASH_OFF_UNIT);
+ if (unit >= MAX_NR_CONSOLES)
+ continue;
+
+ if (unit)
+ vc_allocate(unit);
+
+ vc = vc_cons[unit].d;
+ if (!vc)
+ continue;
+
+ info = registered_fb[(int)con2fb_map[unit]];
+
+ splash_size = splash_geti(ndata, SPLASH_OFF_SIZE);
+
+ /*
+ * Update. Wonder what should happen here now
+ * since we can have multiple splash_data records
+ */
+ if (splash_size == (int)0xffffffff && version > 1) {
+ if (update_boxes(vc, offsets, ndata, len, end, update) < 0)
+ return -1;
+
+ return unit;
+ }
+
+ if (splash_size == 0) {
+ printk(KERN_INFO
+ "bootsplash: ...found, freeing memory.\n");
+ if (vc->vc_splash_data)
+ splash_free(vc, info);
+ return unit;
+ }
+ boxcnt = splash_gets(ndata, SPLASH_OFF_BOXCNT);
+ palcnt = 3 * splash_getb(ndata, SPLASH_OFF_PALCNT);
+ if (ndata + len + splash_size > end) {
+ printk(KERN_ERR
+ "bootsplash: ...found, but truncated!\n");
+ return -1;
+ }
+ silentsize = splash_geti(ndata, SPLASH_OFF_SSIZE);
+ if (silentsize)
+ printk(KERN_INFO
+ "bootsplash: silentjpeg size %d bytes\n",
+ silentsize);
+ if (silentsize >= splash_size) {
+ printk(KERN_ERR "bootsplash: bigger than splashsize!\n");
+ return -1;
+ }
+ splash_size -= silentsize;
+ if (!splash_usesilent)
+ silentsize = 0;
+
+ sboxcnt = splash_gets(ndata, SPLASH_OFF_SBOXCNT);
+ if (vc->vc_splash_data) {
+ oldpercent = vc->vc_splash_data->splash_percent;/*@!@*/
+ oldsilent = vc->vc_splash_data->splash_dosilent;/*@!@*/
+ }
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ break;
+ imgd = vmalloc(sizeof(*imgd)
+ + splash_size + (version < 3 ? 2 * 12 : 0));
+ if (!imgd) {
+ vfree(sd);
+ break;
+ }
+ pic = kzalloc(sizeof(*pic), GFP_KERNEL);
+ if (!pic) {
+ vfree(sd);
+ vfree(pic);
+ break;
+ }
+ memset(imgd, 0, sizeof(*imgd));
+ sd->imgd = imgd;
+ sd->pic = pic;
+ imgd->ref_cnt = 1;
+ pic->ref_cnt = 1;
+ jpeg_get_size(ndata + len + boxcnt * 12 + palcnt,
+ &imgd->splash_width, &imgd->splash_height);
+ if (splash_check_jpeg(ndata + len + boxcnt * 12 + palcnt,
+ imgd->splash_width,
+ imgd->splash_height)) {
+ ndata += len + splash_size - 1;
+ vfree(imgd);
+ vfree(sd);
+ continue;
+ }
+ if (silentsize) {
+ imgd->splash_silentjpeg = vmalloc(silentsize);
+ if (imgd->splash_silentjpeg) {
+ memcpy(imgd->splash_silentjpeg,
+ ndata + len + splash_size, silentsize);
+ imgd->splash_sboxes = imgd->splash_silentjpeg;
+ imgd->splash_silentjpeg += 12 * sboxcnt;
+ imgd->splash_sboxcount = sboxcnt;
+ }
+ }
+ imgd->splash_fg_color = splash_getb(ndata, SPLASH_OFF_FGCOL);
+ imgd->splash_color = splash_getb(ndata, SPLASH_OFF_COL);
+ imgd->splash_overpaintok = splash_getb(ndata, SPLASH_OFF_OVEROK);
+ imgd->splash_text_xo = splash_gets(ndata, SPLASH_OFF_XO);
+ imgd->splash_text_yo = splash_gets(ndata, SPLASH_OFF_YO);
+ imgd->splash_text_wi = splash_gets(ndata, SPLASH_OFF_WI);
+ imgd->splash_text_he = splash_gets(ndata, SPLASH_OFF_HE);
+ if (version == 1) {
+ imgd->splash_text_xo *= 8;
+ imgd->splash_text_wi *= 8;
+ imgd->splash_text_yo *= 16;
+ imgd->splash_text_he *= 16;
+ imgd->splash_color = (splash_default >> 8) & 0x0f;
+ imgd->splash_fg_color = (splash_default >> 4) & 0x0f;
+ }
+
+ /* fake penguin box for older formats */
+ if (version == 1)
+ boxcnt = splash_mkpenguin(sd, imgd->splash_text_xo + 10,
+ imgd->splash_text_yo + 10,
+ imgd->splash_text_wi - 20,
+ imgd->splash_text_he - 20,
+ 0xf0, 0xf0, 0xf0);
+ else if (version == 2)
+ boxcnt = splash_mkpenguin(sd,
+ splash_gets(ndata, 24),
+ splash_gets(ndata, 26),
+ splash_gets(ndata, 28),
+ splash_gets(ndata, 30),
+ splash_getb(ndata, 32),
+ splash_getb(ndata, 33),
+ splash_getb(ndata, 34));
+
+ memcpy((char *)imgd
+ + sizeof(*imgd) + (version < 3 ? boxcnt * 12 : 0),
+ ndata + len,
+ splash_size);
+ imgd->splash_boxcount = boxcnt;
+ imgd->splash_boxes = (unsigned char *)imgd + sizeof(*imgd);
+ imgd->splash_palette = imgd->splash_boxes + boxcnt * 12;
+ imgd->splash_jpeg = imgd->splash_palette + palcnt;
+
+ sd->splash_state = splash_getb(ndata, SPLASH_OFF_STATE);/*@!@*/
+ sd->splash_percent = oldpercent == -3 ?
+ splash_gets(ndata, SPLASH_OFF_PERCENT) :
+ oldpercent; /*@!@*/
+ sd->pic->splash_pic = NULL;
+ sd->pic->splash_pic_size = 0;
+
+ sd->splash_dosilent = imgd->splash_silentjpeg != 0 ?
+ (oldsilent == -1 ? 1 : oldsilent) :
+ 0; /* @!@ */
+
+ sd->splash_vc_text_wi = imgd->splash_text_wi;
+ sd->splash_vc_text_he = imgd->splash_text_he;
+
+ sd->next = vc->vc_splash_data;
+ vc->vc_splash_data = sd;
+
+ if (info) {
+ width = info->var.xres;
+ height = info->var.yres;
+ if (imgd->splash_width != width ||
+ imgd->splash_height != height) {
+ ndata += len + splash_size - 1;
+ continue;
+ }
+ }
+ printk(KERN_INFO
+ "bootsplash: ...found (%dx%d, %d bytes, v%d).\n",
+ imgd->splash_width, imgd->splash_height,
+ splash_size, version);
+ if (version == 1) {
+ printk(KERN_WARNING
+ "bootsplash: Using deprecated v1 header. "
+ "Updating your splash utility recommended.\n");
+ printk(KERN_INFO
+ "bootsplash: Find the latest version at "
+ "http://www.bootsplash.org/\n");
+ }
+
+ splash_found = sd;
+ unit_found = unit;
+ }
+
+ if (splash_found) {
+ splash_pivot_current(vc, splash_found);
+ return unit_found;
+ } else {
+ vc = vc_cons[0].d;
+ if (vc) {
+ info = registered_fb[(int)con2fb_map[0]];
+ if (info) {
+ width = info->var.xres;
+ height = info->var.yres;
+ } else
+ width = height = 0;
+ if (!splash_look_for_jpeg(vc, width, height))
+ return -1;
+ return 0;
+ }
+ }
+
+ printk(KERN_ERR "bootsplash: ...no good signature found.\n");
+ return -1;
+}
+
+static void splash_update_redraw(struct vc_data *vc, struct fb_info *info)
+{
+ update_region(vc,
+ vc->vc_origin + vc->vc_size_row * vc->vc_top,
+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
+ splash_clear_margins(vc, info, 0);
+}
+
+int splash_do_verbose(void)
+{
+ struct vc_data *vc;
+ struct fb_info *info;
+ int ret = 0;
+
+ SPLASH_DEBUG();
+ if (!oops_in_progress)
+ console_lock();
+
+ if (!splash_usesilent)
+ goto done;
+
+ vc = vc_cons[0].d;
+
+ if (!vc || !vc->vc_splash_data || !vc->vc_splash_data->splash_state)
+ goto done;
+ if (!vc->vc_splash_data->imgd->splash_silentjpeg)
+ goto done;
+
+ if (!vc->vc_splash_data->splash_dosilent)
+ goto done;
+ vc->vc_splash_data->splash_dosilent = 0;
+ if (fg_console != vc->vc_num)
+ goto done;
+
+ info = registered_fb[(int)con2fb_map[0]];
+
+ if (!info || !info->splash_data)
+ goto done;
+
+ splash_update_redraw(vc, info);
+ ret = 0;
+
+ done:
+ if (!oops_in_progress)
+ console_unlock();
+
+ return ret;
+}
+
+static void splash_verbose_callback(struct work_struct *ignored)
+{
+ splash_do_verbose();
+}
+
+static DECLARE_WORK(splash_work, splash_verbose_callback);
+
+int splash_verbose(void)
+{
+ if (!oops_in_progress)
+ schedule_work(&splash_work);
+ else
+ return splash_do_verbose();
+ return 0;
+}
+
+static void splash_off(struct vc_data *vc, struct fb_info *info)
+{
+ int rows = info->var.xres / vc->vc_font.width;
+ int cols = info->var.yres / vc->vc_font.height;
+ SPLASH_DEBUG();
+
+ info->splash_data = 0;
+ if (rows != vc->vc_rows || cols != vc->vc_cols)
+ vc_resize(vc, rows, cols);
+}
+
+/* look for the splash with the matching size and set it as the current */
+static int splash_look_for_jpeg(struct vc_data *vc, int width, int height)
+{
+ struct splash_data *sd, *found = NULL;
+ int found_delta_x = INT_MAX, found_delta_y = INT_MAX;
+
+ for (sd = vc->vc_splash_data; sd; sd = sd->next) {
+ int delta_x = abs(sd->imgd->splash_width - width) * height;
+ int delta_y = abs(sd->imgd->splash_height - height) * width;
+ if (!found ||
+ (found_delta_x + found_delta_y > delta_x + delta_y)) {
+ found = sd;
+ found_delta_x = delta_x;
+ found_delta_y = delta_y;
+ }
+ }
+
+ if (found) {
+ SPLASH_DEBUG("bootsplash: "
+ "scalable image found (%dx%d scaled to %dx%d).",
+ found->imgd->splash_width,
+ found->imgd->splash_height,
+ width, height);
+
+ splash_pivot_current(vc, found);
+
+ /* textarea margins are constant independent from image size */
+ if (found->imgd->splash_height != height)
+ found->splash_vc_text_he = height
+ - (found->imgd->splash_height
+ - found->imgd->splash_text_he);
+ else
+ found->splash_vc_text_he = found->imgd->splash_text_he;
+ if (found->imgd->splash_width != width)
+ found->splash_vc_text_wi =
+ width
+ - (found->imgd->splash_width
+ - found->imgd->splash_text_wi);
+ else
+ found->splash_vc_text_wi = found->imgd->splash_text_wi;
+
+ if (found->imgd->splash_width != width
+ || found->imgd->splash_height != height) {
+ box_offsets(found->imgd->splash_boxes,
+ found->imgd->splash_boxcount,
+ width, height,
+ found->imgd->splash_width,
+ found->imgd->splash_height,
+ &found->splash_boxes_xoff,
+ &found->splash_boxes_yoff);
+ SPLASH_DEBUG("bootsplash: offsets for boxes: x=%d y=%d",
+ found->splash_boxes_xoff,
+ found->splash_boxes_yoff);
+
+ if (found->imgd->splash_sboxes) {
+ box_offsets(found->imgd->splash_sboxes,
+ found->imgd->splash_sboxcount,
+ width, height,
+ found->imgd->splash_width,
+ found->imgd->splash_height,
+ &found->splash_sboxes_xoff,
+ &found->splash_sboxes_yoff);
+ SPLASH_DEBUG("bootsplash: "
+ "offsets sboxes: x=%d y=%d",
+ found->splash_sboxes_xoff,
+ found->splash_sboxes_yoff);
+ }
+ } else {
+ found->splash_sboxes_xoff = 0;
+ found->splash_sboxes_yoff = 0;
+ }
+ return 0;
+ }
+ return -1;
+}
+
+static int splash_recolor(struct vc_data *vc, struct fb_info *info)
+{
+ int color;
+
+ SPLASH_DEBUG();
+ if (!vc->vc_splash_data)
+ return -1;
+ if (!vc->vc_splash_data->splash_state)
+ return 0;
+ color = vc->vc_splash_data->imgd->splash_color << 4 |
+ vc->vc_splash_data->imgd->splash_fg_color;
+ if (vc->vc_def_color != color)
+ con_remap_def_color(vc, color);
+ if (info && info->splash_data && fg_console == vc->vc_num)
+ splash_update_redraw(vc, info);
+ vc->vc_splash_data->color_set = 1;
+ return 0;
+}
+
+int splash_prepare(struct vc_data *vc, struct fb_info *info)
+{
+ int err;
+ int width, height, octpp, size, sbytes;
+ enum splash_color_format cf = SPLASH_DEPTH_UNKNOWN;
+ int pic_update = 0;
+ struct jpeg_decdata *decdata; /* private decoder data */
+
+ SPLASH_DEBUG("vc_num: %i", vc->vc_num);
+
+#if 0 /* Nouveau fb sets a different ops, so we can't use the condition */
+ if (info->fbops->fb_imageblit != cfb_imageblit) {
+ printk(KERN_ERR "bootsplash: "
+ "found, but framebuffer can't "
+ "handle it!\n");
+ return -1;
+ }
+#endif
+
+ if (!vc->vc_splash_data || !vc->vc_splash_data->splash_state) {
+ splash_off(vc, info);
+ return -1;
+ }
+
+ width = info->var.xres;
+ height = info->var.yres;
+ switch (info->var.bits_per_pixel) {
+ case 16:
+ if ((info->var.red.length +
+ info->var.green.length +
+ info->var.blue.length) == 15)
+ cf = SPLASH_DEPTH_15;
+ else
+ cf = SPLASH_DEPTH_16;
+ break;
+ case 24:
+ cf = SPLASH_DEPTH_24_PACKED;
+ break;
+ case 32:
+ cf = SPLASH_DEPTH_24;
+ break;
+ }
+ if (cf == SPLASH_DEPTH_UNKNOWN) {
+ printk(KERN_INFO "bootsplash: unsupported pixel format: %i\n",
+ info->var.bits_per_pixel);
+ splash_off(vc, info);
+ return -2;
+ }
+ octpp = splash_octpp(cf);
+
+ if (splash_look_for_jpeg(vc, width, height) < 0) {
+ printk(KERN_INFO "bootsplash: no matching splash %dx%d\n",
+ width, height);
+ splash_off(vc, info);
+ return -2;
+ }
+
+ sbytes = ((width + 15) & ~15) * octpp;
+ size = sbytes * ((height + 15) & ~15);
+
+ if (size != vc->vc_splash_data->pic->splash_pic_size) {
+ if (vc->vc_splash_data->pic->ref_cnt > 1) {
+ struct splash_pic_data *pic;
+ pic = kzalloc(sizeof(struct splash_pic_data),
+ GFP_KERNEL);
+ if (!pic)
+ return -2;
+ vc->vc_splash_data->pic = pic;
+ }
+ vc->vc_splash_data->pic->ref_cnt = 1;
+ vc->vc_splash_data->pic->splash_pic = NULL;
+ vc->vc_splash_data->pic->splash_pic_size = 0;
+ }
+ if (!vc->vc_splash_data->pic->splash_pic) {
+ vc->vc_splash_data->pic->splash_pic = vmalloc(size);
+ pic_update = 1;
+ }
+ if (!vc->vc_splash_data->pic->splash_pic) {
+ printk(KERN_INFO "bootsplash: not enough memory.\n");
+ splash_off(vc, info);
+ return -3;
+ }
+
+ decdata = vmalloc(sizeof(*decdata));
+ if (!decdata) {
+ printk(KERN_INFO "bootsplash: not enough memory.\n");
+ splash_off(vc, info);
+ return -3;
+ }
+
+ if (vc->vc_splash_data->imgd->splash_silentjpeg &&
+ vc->vc_splash_data->splash_dosilent) {
+ pic_update = 1;
+ err = jpeg_get(vc->vc_splash_data->imgd->splash_silentjpeg,
+ vc->vc_splash_data->pic->splash_pic,
+ width, height, cf, decdata);
+ if (err) {
+ printk(KERN_INFO "bootsplash: "
+ "error while decompressing silent picture: "
+ "%s (%d)\n",
+ jpg_errors[err - 1], err);
+ vc->vc_splash_data->splash_dosilent = 0;
+ } else {
+ if (vc->vc_splash_data->imgd->splash_sboxcount)
+ boxit(vc->vc_splash_data->pic->splash_pic,
+ sbytes,
+ vc->vc_splash_data->imgd->splash_sboxes,
+ vc->vc_splash_data->imgd->splash_sboxcount,
+ vc->vc_splash_data->splash_percent,
+ vc->vc_splash_data->splash_sboxes_xoff,
+ vc->vc_splash_data->splash_sboxes_yoff,
+ vc->vc_splash_data->splash_percent < 0 ?
+ 1 : 0,
+ cf);
+ splashcopy(info->screen_base,
+ vc->vc_splash_data->pic->splash_pic,
+ info->var.yres,
+ info->var.xres,
+ info->fix.line_length, sbytes,
+ octpp);
+ }
+ } else
+ vc->vc_splash_data->splash_dosilent = 0;
+
+ if (pic_update) {
+ err = jpeg_get(vc->vc_splash_data->imgd->splash_jpeg,
+ vc->vc_splash_data->pic->splash_pic,
+ width, height, cf, decdata);
+ if (err) {
+ printk(KERN_INFO "bootsplash: "
+ "error while decompressing picture: %s (%d) .\n",
+ jpg_errors[err - 1], err);
+ splash_off(vc, info);
+ return -4;
+ }
+ }
+
+ vfree(decdata);
+
+ vc->vc_splash_data->pic->splash_pic_size = size;
+ vc->vc_splash_data->pic->splash_pic_stride = sbytes;
+
+ if (vc->vc_splash_data->imgd->splash_boxcount)
+ boxit(vc->vc_splash_data->pic->splash_pic,
+ sbytes,
+ vc->vc_splash_data->imgd->splash_boxes,
+ vc->vc_splash_data->imgd->splash_boxcount,
+ vc->vc_splash_data->splash_percent,
+ vc->vc_splash_data->splash_boxes_xoff,
+ vc->vc_splash_data->splash_boxes_yoff,
+ 0,
+ cf);
+ if (vc->vc_splash_data->splash_state) {
+ int cols = vc->vc_splash_data->splash_vc_text_wi
+ / vc->vc_font.width;
+ int rows = vc->vc_splash_data->splash_vc_text_he
+ / vc->vc_font.height;
+
+ info->splash_data = vc->vc_splash_data;
+
+ info->splash_data->need_sync = 0;
+ /* XEN fb needs some sync after the direct modification of
+ * fb area; maybe other FBs would need similar hack, but
+ * so far I don't care.
+ */
+ if (!strcmp(info->fix.id, "xen")) {
+ info->splash_data->need_sync = 1;
+ /* sync the whole splash once */
+ splash_sync_region(info, 0, 0,
+ info->var.xres, info->var.yres);
+ }
+
+ /* vc_resize also calls con_switch which resets yscroll */
+ if (rows != vc->vc_rows || cols != vc->vc_cols)
+ vc_resize(vc, cols, rows);
+ if (!vc->vc_splash_data->color_set)
+ splash_recolor(vc, NULL);
+ } else {
+ SPLASH_DEBUG("Splash Status is off\n");
+ splash_off(vc, info);
+ return -5;
+ }
+ return 0;
+}
+
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/proc_fs.h>
+
+static int splash_read_proc(char *buffer, char **start, off_t offset, int size,
+ int *eof, void *data);
+static int splash_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+static int splash_status(struct vc_data *vc);
+static int splash_proc_register(void);
+
+static struct proc_dir_entry *proc_splash;
+
+static int splash_status(struct vc_data *vc)
+{
+ struct fb_info *info;
+
+ printk(KERN_INFO "bootsplash: status on console %d changed to %s\n",
+ vc->vc_num,
+ vc->vc_splash_data &&
+ vc->vc_splash_data->splash_state ? "on" : "off");
+
+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
+ if (!info)
+ return 0;
+
+ if (fg_console == vc->vc_num)
+ splash_prepare(vc, info);
+ if (vc->vc_splash_data && vc->vc_splash_data->splash_state)
+ splash_recolor(vc, info);
+ else {
+ splash_off(vc, info);
+ if (vc->vc_def_color != 0x07)
+ con_remap_def_color(vc, 0x07);
+ }
+
+ return 0;
+}
+
+int splash_copy_current_img(int unit_s, int unit_t)
+{
+ struct fb_info *info;
+ struct vc_data *vc_s;
+ struct vc_data *vc_t;
+ struct splash_data *sd_s;
+ struct splash_data *sd_t;
+ int size;
+
+ if (unit_s >= MAX_NR_CONSOLES || unit_t >= MAX_NR_CONSOLES)
+ return -1;
+
+ vc_s = vc_cons[unit_s].d;
+ if (!vc_s) {
+ printk(KERN_WARNING "bootsplash: "
+ "copy: source (%i) is invalid.\n", unit_s);
+ return -1;
+ }
+ sd_s = vc_s->vc_splash_data;
+ if (!sd_s || !sd_s->imgd) {
+ printk(KERN_INFO "bootsplash: "
+ "copy: source_vc (%i) doesn't have valid splash data.\n",
+ unit_s);
+ return -1;
+ }
+ vc_allocate(unit_t);
+ vc_t = vc_cons[unit_t].d;
+ if (!vc_t) {
+ printk(KERN_WARNING "bootsplash: copy: dest (%i) is invalid.\n",
+ unit_t);
+ return -1;
+ }
+ sd_t = kzalloc(sizeof(*sd_t), GFP_KERNEL);
+ if (!sd_t)
+ return -1;
+ vc_t->vc_splash_data = sd_t;
+
+ sd_t->imgd = sd_s->imgd;
+ sd_t->imgd->ref_cnt++;
+
+ /* now recreate all the rest */
+ sd_t->splash_state = sd_s->splash_state;
+ sd_t->splash_percent = sd_s->splash_percent;
+ sd_t->splash_dosilent = sd_s->splash_dosilent;
+ sd_t->splash_vc_text_wi = sd_s->imgd->splash_text_wi;
+ sd_t->splash_vc_text_he = sd_s->imgd->splash_text_he;
+
+ sd_t->splash_boxes_xoff = 0;
+ sd_t->splash_boxes_yoff = 0;
+ sd_t->splash_sboxes_xoff = 0;
+ sd_t->splash_sboxes_yoff = 0;
+
+ info = registered_fb[(int) con2fb_map[vc_t->vc_num]];
+ size = (((info->var.xres + 15) & ~15)
+ * ((info->var.bits_per_pixel + 1) >> 3))
+ * ((info->var.yres + 15) & ~15);
+ if (size != vc_s->vc_splash_data->pic->splash_pic_size) {
+ sd_t->pic = kzalloc(sizeof(struct splash_pic_data), GFP_KERNEL);
+ if (!sd_t->pic)
+ return -1;
+ sd_t->pic->ref_cnt = 1;
+ } else {
+ sd_t->pic = sd_s->pic;
+ sd_t->pic->ref_cnt++;
+ }
+
+ splash_status(vc_t);
+
+ return 0;
+}
+
+static int splash_read_proc(char *buffer, char **start, off_t offset, int size,
+ int *eof, void *data)
+{
+ int len;
+ int xres, yres;
+ struct vc_data *vc = vc_cons[0].d;
+ struct fb_info *info = registered_fb[(int)con2fb_map[0]];
+ int color = vc->vc_splash_data ?
+ vc->vc_splash_data->imgd->splash_color << 4 |
+ vc->vc_splash_data->imgd->splash_fg_color : splash_default >> 4;
+ int status = vc->vc_splash_data ?
+ vc->vc_splash_data->splash_state & 1 : 0;
+
+ if (info) {
+ xres = info->var.xres;
+ yres = info->var.yres;
+ } else
+ xres = yres = 0;
+
+ len = sprintf(buffer, "Splash screen v%s (0x%02x, %dx%d%s): %s\n",
+ SPLASH_VERSION, color, xres, yres,
+ (vc->vc_splash_data ?
+ vc->vc_splash_data->splash_dosilent : 0) ? ", silent" :
+ "",
+ status ? "on" : "off");
+ if (offset >= len)
+ return 0;
+
+ *start = buffer - offset;
+
+ return (size < len - offset ? size : len - offset);
+}
+
+void splash_set_percent(struct vc_data *vc, int pe)
+{
+ struct fb_info *info;
+ struct fbcon_ops *ops;
+ struct splash_data *vc_splash_data;
+ int oldpe;
+
+ SPLASH_DEBUG(" console: %d val: %d\n", vc->vc_num, pe);
+
+ if (pe < -2)
+ pe = 0;
+ if (pe > 65535)
+ pe = 65535;
+ pe += pe > 32767;
+
+ vc_splash_data = vc->vc_splash_data;
+ if (!vc_splash_data || vc_splash_data->splash_percent == pe)
+ return;
+
+ oldpe = vc_splash_data->splash_percent;
+ vc_splash_data->splash_percent = pe;
+ if (fg_console != vc->vc_num ||
+ !vc_splash_data->splash_state) {
+ return;
+ }
+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
+ if (!info)
+ return;
+
+ ops = info->fbcon_par;
+ if (ops->blank_state)
+ return;
+ if (!vc_splash_data->imgd->splash_overpaintok
+ || pe == 65536
+ || pe < oldpe) {
+ if (splash_hasinter(vc_splash_data->imgd->splash_boxes,
+ vc_splash_data->imgd->splash_boxcount)) {
+ splash_status(vc);
+ } else
+ splash_prepare(vc, info);
+ } else {
+ struct splash_data *splash_data = info->splash_data;
+ enum splash_color_format cf = SPLASH_DEPTH_UNKNOWN;
+ switch (info->var.bits_per_pixel) {
+ case 16:
+ if ((info->var.red.length +
+ info->var.green.length +
+ info->var.blue.length) == 15)
+ cf = SPLASH_DEPTH_15;
+ else
+ cf = SPLASH_DEPTH_16;
+ break;
+ case 24:
+ cf = SPLASH_DEPTH_24_PACKED;
+ break;
+ case 32:
+ cf = SPLASH_DEPTH_24;
+ break;
+ }
+ if (cf == SPLASH_DEPTH_UNKNOWN)
+ return;
+ if (splash_data) {
+ if (splash_data->imgd->splash_silentjpeg
+ && splash_data->splash_dosilent) {
+ boxit(info->screen_base,
+ info->fix.line_length,
+ splash_data->imgd->splash_sboxes,
+ splash_data->imgd->splash_sboxcount,
+ splash_data->splash_percent,
+ splash_data->splash_sboxes_xoff,
+ splash_data->splash_sboxes_yoff,
+ 1,
+ cf);
+ /* FIXME: get a proper width/height */
+ splash_sync_region(info,
+ splash_data->splash_sboxes_xoff,
+ splash_data->splash_sboxes_yoff,
+ info->var.xres -
+ splash_data->splash_sboxes_xoff,
+ 8);
+ }
+ }
+ }
+}
+
+static const char *get_unit(const char *buffer, int *unit)
+{
+
+ *unit = -1;
+ if (buffer[0] >= '0' && buffer[0] <= '9') {
+ *unit = buffer[0] - '0';
+ buffer++;
+ if (buffer[0] >= '0' && buffer[0] <= '9') {
+ *unit = *unit * 10 + buffer[0] - '0';
+ buffer++;
+ }
+ if (*buffer == ' ')
+ buffer++;
+ }
+ return buffer;
+}
+
+static int splash_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ int new, unit;
+ unsigned long uval;
+ struct vc_data *vc;
+ struct splash_data *vc_splash_data;
+
+ SPLASH_DEBUG();
+
+ if (!buffer || !splash_default)
+ return count;
+
+ console_lock();
+ unit = 0;
+ if (buffer[0] == '@') {
+ buffer++;
+ buffer = get_unit(buffer, &unit);
+ if (unit < 0 || unit >= MAX_NR_CONSOLES || !vc_cons[unit].d) {
+ console_unlock();
+ return count;
+ }
+ }
+ SPLASH_DEBUG(" unit: %i", unit);
+ vc = vc_cons[unit].d;
+ vc_splash_data = vc->vc_splash_data;
+
+ if (!strncmp(buffer, "redraw", 6)) {
+ SPLASH_DEBUG(" redraw");
+ splash_status(vc);
+ console_unlock();
+ return count;
+ }
+
+ if (!strncmp(buffer, "show", 4) || !strncmp(buffer, "hide", 4)) {
+ long int pe;
+
+ SPLASH_DEBUG("show/hide");
+ if (buffer[4] == ' ' && buffer[5] == 'p')
+ pe = 0;
+ else if (buffer[4] == '\n')
+ pe = 65535;
+ else if (strict_strtol(buffer + 5, 0, &pe) == -EINVAL)
+ pe = 0;
+ if (pe < -2)
+ pe = 0;
+ if (pe > 65535)
+ pe = 65535;
+ if (*buffer == 'h')
+ pe = 65535 - pe;
+ splash_set_percent(vc, pe);
+ console_unlock();
+ return count;
+ }
+
+ if (!strncmp(buffer, "copy", 4)) {
+ buffer += 4;
+ if (buffer[0] == ' ')
+ buffer++;
+ buffer = get_unit(buffer, &unit);
+ if (unit < 0 || unit >= MAX_NR_CONSOLES) {
+ console_unlock();
+ return count;
+ }
+ buffer = get_unit(buffer, &new);
+ if (new < 0 || new >= MAX_NR_CONSOLES) {
+ console_unlock();
+ return count;
+ }
+ splash_copy_current_img(unit, new);
+ console_unlock();
+ return count;
+ }
+
+ if (!strncmp(buffer, "silent\n", 7)
+ || !strncmp(buffer, "verbose\n", 8)) {
+ SPLASH_DEBUG(" silent/verbose");
+
+ if (vc_splash_data &&
+ vc_splash_data->imgd->splash_silentjpeg) {
+ if (vc_splash_data->splash_dosilent !=
+ (buffer[0] == 's')) {
+ vc_splash_data->splash_dosilent =
+ buffer[0] == 's';
+ splash_status(vc);
+ }
+ }
+ console_unlock();
+ return count;
+ }
+
+ if (!strncmp(buffer, "freesilent\n", 11)) {
+ SPLASH_DEBUG(" freesilent");
+
+ if (vc_splash_data &&
+ vc_splash_data->imgd->splash_silentjpeg) {
+ struct splash_data *sd;
+ printk(KERN_INFO "bootsplash: freeing silent jpeg\n");
+ for (sd = vc_splash_data; sd; sd = sd->next) {
+ sd->imgd->splash_silentjpeg = 0;
+ vfree(sd->imgd->splash_sboxes);
+ sd->imgd->splash_sboxes = 0;
+ sd->imgd->splash_sboxcount = 0;
+ }
+ if (vc_splash_data->splash_dosilent)
+ splash_status(vc);
+
+ vc->vc_splash_data->splash_dosilent = 0;
+ }
+ console_unlock();
+ return count;
+ }
+
+ if (!strncmp(buffer, "BOOTSPL", 7)) {
+ int up = -1;
+
+ SPLASH_DEBUG(" BOOTSPL");
+ unit = splash_getraw((unsigned char *)buffer,
+ (unsigned char *)buffer + count,
+ &up);
+ SPLASH_DEBUG(" unit: %i up: %i", unit, up);
+ if (unit >= 0) {
+ struct fb_info *info;
+
+ vc = vc_cons[unit].d;
+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
+ if (!info) {
+ console_unlock();
+ return count;
+ }
+
+ if (up == -1) {
+ splash_status(vc);
+ } else {
+ struct splash_data *vc_splash_data
+ = vc->vc_splash_data;
+ struct splash_data *splash_data
+ = info->splash_data;
+ struct fbcon_ops *ops = info->fbcon_par;
+ enum splash_color_format cf = SPLASH_DEPTH_UNKNOWN;
+
+ switch (info->var.bits_per_pixel) {
+ case 16:
+ if ((info->var.red.length +
+ info->var.green.length +
+ info->var.blue.length) == 15)
+ cf = SPLASH_DEPTH_15;
+ else
+ cf = SPLASH_DEPTH_16;
+ break;
+ case 24:
+ cf = SPLASH_DEPTH_24_PACKED;
+ break;
+ case 32:
+ cf = SPLASH_DEPTH_24;
+ break;
+ }
+ if (cf == SPLASH_DEPTH_UNKNOWN)
+ up = 0;
+ if (ops->blank_state ||
+ !vc_splash_data ||
+ !splash_data)
+ up = 0;
+ if ((up & 2) != 0
+ && splash_data->imgd->splash_silentjpeg
+ && splash_data->splash_dosilent) {
+ boxit(info->screen_base,
+ info->fix.line_length,
+ splash_data->imgd->splash_sboxes,
+ splash_data->imgd->splash_sboxcount,
+ splash_data->splash_percent,
+ splash_data->splash_sboxes_xoff,
+ splash_data->splash_sboxes_yoff,
+ 1,
+ cf);
+ } else if ((up & 1) != 0) {
+ boxit(info->screen_base,
+ info->fix.line_length,
+ splash_data->imgd->splash_boxes,
+ splash_data->imgd->splash_boxcount,
+ splash_data->splash_percent,
+ splash_data->splash_boxes_xoff,
+ splash_data->splash_boxes_yoff,
+ 1,
+ cf);
+ }
+ }
+ }
+ console_unlock();
+ return count;
+ }
+
+ if (!vc_splash_data) {
+ console_unlock();
+ return count;
+ }
+
+ if (buffer[0] == 't') {
+ vc_splash_data->splash_state ^= 1;
+ SPLASH_DEBUG(" t");
+ splash_status(vc);
+ console_unlock();
+ return count;
+ }
+ if (strict_strtoul(buffer, 0, &uval) == -EINVAL)
+ uval = 1;
+ if (uval > 1) {
+ /* expert user */
+ vc_splash_data->imgd->splash_color = uval >> 8 & 0xff;
+ vc_splash_data->imgd->splash_fg_color = uval >> 4 & 0x0f;
+ }
+ if ((uval & 1) == vc_splash_data->splash_state)
+ splash_recolor(vc, NULL);
+ else {
+ vc_splash_data->splash_state = uval & 1;
+ splash_status(vc);
+ }
+ console_unlock();
+ return count;
+}
+
+static int splash_proc_register(void)
+{
+ proc_splash = create_proc_entry("splash", 0, 0);
+ if (proc_splash) {
+ proc_splash->read_proc = splash_read_proc;
+ proc_splash->write_proc = splash_write_proc;
+ return 0;
+ }
+ return 1;
+}
+
+#endif /* CONFIG_PROC_FS */
+
+#define INIT_CONSOLE 0
+
+void splash_init(void)
+{
+ static bool splash_not_initialized = true;
+ struct fb_info *info;
+ struct vc_data *vc;
+ int isramfs = 1;
+ int fd;
+ int len;
+ int max_len = 1024*1024*2;
+ char *mem;
+
+ if (splash_not_initialized == false)
+ return;
+ vc = vc_cons[INIT_CONSOLE].d;
+ info = registered_fb[(int)con2fb_map[INIT_CONSOLE]];
+ if (!vc
+ || !info
+ || info->var.bits_per_pixel < 16) /* not supported */
+ return;
+#ifdef CONFIG_PROC_FS
+ splash_proc_register();
+#endif
+ splash_not_initialized = false;
+ if (vc->vc_splash_data)
+ return;
+ fd = sys_open("/bootsplash", O_RDONLY, 0);
+ if (fd < 0) {
+ isramfs = 0;
+ fd = sys_open("/initrd.image", O_RDONLY, 0);
+ }
+ if (fd < 0)
+ return;
+ len = (int)sys_lseek(fd, (off_t)0, 2);
+ if (len <= 0) {
+ sys_close(fd);
+ return;
+ }
+ /* Don't look for more than the last 2MB */
+ if (len > max_len) {
+ printk(KERN_INFO "bootsplash: "
+ "scanning last %dMB of initrd for signature\n",
+ max_len>>20);
+ sys_lseek(fd, (off_t)(len - max_len), 0);
+ len = max_len;
+ } else {
+ sys_lseek(fd, (off_t)0, 0);
+ }
+
+ mem = vmalloc(len);
+ if (mem) {
+ console_lock();
+ if ((int)sys_read(fd, mem, len) == len
+ && (splash_getraw((unsigned char *)mem,
+ (unsigned char *)mem + len, (int *)0)
+ == INIT_CONSOLE)
+ && vc->vc_splash_data)
+ vc->vc_splash_data->splash_state = splash_default & 1;
+ console_unlock();
+ vfree(mem);
+ }
+ sys_close(fd);
+ if (isramfs)
+ sys_unlink("/bootsplash");
+ return;
+}
+
+#define SPLASH_ALIGN 15
+
+static u32 *do_coefficients(u32 from, u32 to, u32 *shift)
+{
+ u32 *coefficients;
+ u32 left = to;
+ int n = 1;
+ u32 upper = 31;
+ int col_cnt = 0;
+ int row_cnt = 0;
+ int m;
+ u32 rnd = from >> 1;
+
+ if (from > to) {
+ left = to;
+ rnd = from >> 1;
+
+ while (upper > 0) {
+ if ((1 << upper) & from)
+ break;
+ upper--;
+ }
+ upper++;
+
+ *shift = 32 - 8 - 1 - upper;
+
+ coefficients = vmalloc(sizeof(u32) * (from / to + 2) * from + 1);
+ if (!coefficients)
+ return NULL;
+
+ n = 1;
+ while (1) {
+ u32 sum = left;
+ col_cnt = 0;
+ m = n++;
+ while (sum < from) {
+ coefficients[n++] =
+ ((left << *shift) + rnd) / from;
+ col_cnt++;
+ left = to;
+ sum += left;
+ }
+ left = sum - from;
+ coefficients[n++] =
+ (((to - left) << *shift) + rnd) / from;
+ col_cnt++;
+ coefficients[m] = col_cnt;
+ row_cnt++;
+ if (!left) {
+ coefficients[0] = row_cnt;
+ return coefficients;
+ }
+ }
+ } else {
+ left = 0;
+ rnd = to >> 1;
+
+ while (upper > 0) {
+ if ((1 << upper) & to)
+ break;
+ upper--;
+ }
+ upper++;
+
+ *shift = 32 - 8 - 1 - upper;
+
+ coefficients = vmalloc(sizeof(u32) * 3 * from + 1);
+ if (!coefficients)
+ return NULL;
+
+ while (1) {
+ u32 diff;
+ u32 sum = left;
+ col_cnt = 0;
+ row_cnt++;
+ while (sum < to) {
+ col_cnt++;
+ sum += from;
+ }
+ left = sum - to;
+ diff = from - left;
+ if (!left) {
+ coefficients[n] = col_cnt;
+ coefficients[0] = row_cnt;
+ return coefficients;
+ }
+ coefficients[n++] = col_cnt - 1;
+ coefficients[n++] = ((diff << *shift) + rnd) / from;
+ coefficients[n++] = ((left << *shift) + rnd) / from;
+ }
+ }
+}
+
+
+struct pixel {
+ u32 red;
+ u32 green;
+ u32 blue;
+};
+
+#define put_pixel(pix, buf, cf) \
+ switch (cf) { \
+ case SPLASH_DEPTH_15: \
+ *(u16 *)(buf) = (u16)((pix).red << 10 | \
+ (pix).green << 5 | (pix).blue); \
+ (buf) += 2; \
+ break; \
+ case SPLASH_DEPTH_16: \
+ *(u16 *)(buf) = (u16)((pix).red << 11 | \
+ (pix).green << 5 | (pix).blue); \
+ (buf) += 2; \
+ break; \
+ case SPLASH_DEPTH_24_PACKED: \
+ *(u16 *)(buf) = (u16)((pix).red << 8 | (pix).green); \
+ buf += 2; \
+ *((buf)++) = (pix).blue; \
+ break; \
+ case SPLASH_DEPTH_24: \
+ *(u32 *)(buf) = (u32)((pix).red << 16 | \
+ (pix).green << 8 | (pix).blue); \
+ (buf) += 4; \
+ break; \
+ case SPLASH_DEPTH_UNKNOWN: \
+ break; \
+ }
+
+#define get_pixel(pix, buf, depth) \
+ switch (depth) { \
+ case SPLASH_DEPTH_15: \
+ (pix).red = ((*(u16 *)(buf)) >> 10) & 0x1f; \
+ (pix).green = ((*(u16 *)(buf)) >> 5) & 0x1f; \
+ (pix).blue = (*(u16 *)(buf)) & 0x1f; \
+ (buf) += 2; \
+ break; \
+ case SPLASH_DEPTH_16: \
+ (pix).red = ((*(u16 *)(buf)) >> 11) & 0x1f; \
+ (pix).green = ((*(u16 *)(buf)) >> 5) & 0x3f; \
+ (pix).blue = (*(u16 *)(buf)) & 0x1f; \
+ (buf) += 2; \
+ break; \
+ case SPLASH_DEPTH_24_PACKED: \
+ (pix).blue = *(((buf))++); \
+ (pix).green = *(((buf))++); \
+ (pix).red = *(((buf))++); \
+ break; \
+ case SPLASH_DEPTH_24: \
+ (pix).blue = *(((buf))++); \
+ (pix).green = *(((buf))++); \
+ (pix).red = *(((buf))++); \
+ (buf)++; \
+ break; \
+ case SPLASH_DEPTH_UNKNOWN: \
+ break; \
+ }
+
+static inline void
+scale_x_down(enum splash_color_format cf, int src_w,
+ unsigned char **src_p, u32 *x_coeff,
+ u32 x_shift, u32 y_coeff, struct pixel *row_buffer)
+{
+ u32 curr_x_coeff = 1;
+ struct pixel curr_pixel, tmp_pixel;
+ u32 x_array_size = x_coeff[0];
+ int x_column_num;
+ int i;
+ int l, m;
+ int k = 0;
+ u32 rnd = (1 << (x_shift - 1));
+
+ for (i = 0; i < src_w; ) {
+ curr_x_coeff = 1;
+ get_pixel(tmp_pixel, *src_p, cf);
+ i++;
+ for (l = 0; l < x_array_size; l++) {
+ x_column_num = x_coeff[curr_x_coeff++];
+ curr_pixel.red = 0;
+ curr_pixel.green = 0;
+ curr_pixel.blue = 0;
+ for (m = 0; m < x_column_num - 1; m++) {
+ curr_pixel.red += tmp_pixel.red
+ * x_coeff[curr_x_coeff];
+ curr_pixel.green += tmp_pixel.green
+ * x_coeff[curr_x_coeff];
+ curr_pixel.blue += tmp_pixel.blue
+ * x_coeff[curr_x_coeff];
+ curr_x_coeff++;
+ get_pixel(tmp_pixel, *src_p, cf);
+ i++;
+ }
+ curr_pixel.red += tmp_pixel.red * x_coeff[curr_x_coeff];
+ curr_pixel.green += tmp_pixel.green
+ * x_coeff[curr_x_coeff];
+ curr_pixel.blue += tmp_pixel.blue
+ * x_coeff[curr_x_coeff];
+ curr_x_coeff++;
+ curr_pixel.red = (curr_pixel.red + rnd) >> x_shift;
+ curr_pixel.green = (curr_pixel.green + rnd) >> x_shift;
+ curr_pixel.blue = (curr_pixel.blue + rnd) >> x_shift;
+ row_buffer[k].red += curr_pixel.red * y_coeff;
+ row_buffer[k].green += curr_pixel.green * y_coeff;
+ row_buffer[k].blue += curr_pixel.blue * y_coeff;
+ k++;
+ }
+ }
+}
+
+static inline void
+scale_x_up(enum splash_color_format cf, int src_w,
+ unsigned char **src_p, u32 *x_coeff,
+ u32 x_shift, u32 y_coeff, struct pixel *row_buffer)
+{
+ u32 curr_x_coeff = 1;
+ struct pixel curr_pixel, tmp_pixel;
+ u32 x_array_size = x_coeff[0];
+ int x_column_num;
+ int i;
+ int l, m;
+ int k = 0;
+ u32 rnd = (1 << (x_shift - 1));
+
+ for (i = 0; i < src_w;) {
+ curr_x_coeff = 1;
+ get_pixel(tmp_pixel, *src_p, cf);
+ i++;
+ for (l = 0; l < x_array_size - 1; l++) {
+ x_column_num = x_coeff[curr_x_coeff++];
+ for (m = 0; m < x_column_num; m++) {
+ row_buffer[k].red += tmp_pixel.red * y_coeff;
+ row_buffer[k].green += tmp_pixel.green * y_coeff;
+ row_buffer[k].blue += tmp_pixel.blue * y_coeff;
+ k++;
+ }
+ curr_pixel.red = tmp_pixel.red * x_coeff[curr_x_coeff];
+ curr_pixel.green = tmp_pixel.green
+ * x_coeff[curr_x_coeff];
+ curr_pixel.blue = tmp_pixel.blue * x_coeff[curr_x_coeff];
+ curr_x_coeff++;
+ get_pixel(tmp_pixel, *src_p, cf);
+ i++;
+ row_buffer[k].red += ((curr_pixel.red
+ + (tmp_pixel.red
+ * x_coeff[curr_x_coeff])
+ + rnd) >> x_shift) * y_coeff;
+ row_buffer[k].green += ((curr_pixel.green
+ + (tmp_pixel.green
+ * x_coeff[curr_x_coeff])
+ + rnd) >> x_shift) * y_coeff;
+ row_buffer[k].blue += ((curr_pixel.blue
+ + (tmp_pixel.blue
+ * x_coeff[curr_x_coeff])
+ + rnd) >> x_shift) * y_coeff;
+ k++;
+ curr_x_coeff++;
+ }
+ for (m = 0; m < x_coeff[curr_x_coeff]; m++) {
+ row_buffer[k].red += tmp_pixel.red * y_coeff;
+ row_buffer[k].green += tmp_pixel.green * y_coeff;
+ row_buffer[k].blue += tmp_pixel.blue * y_coeff;
+ k++;
+ }
+ }
+}
+
+static int scale_y_down(unsigned char *src, unsigned char *dst,
+ enum splash_color_format cf,
+ int src_w, int src_h, int dst_w, int dst_h)
+{
+ int octpp = splash_octpp(cf);
+ int src_x_bytes = octpp * ((src_w + SPLASH_ALIGN) & ~SPLASH_ALIGN);
+ int dst_x_bytes = octpp * ((dst_w + SPLASH_ALIGN) & ~SPLASH_ALIGN);
+ int j;
+ struct pixel *row_buffer;
+ u32 x_shift, y_shift;
+ u32 *x_coeff;
+ u32 *y_coeff;
+ u32 curr_y_coeff = 1;
+ unsigned char *src_p;
+ unsigned char *src_p_line = src;
+ char *dst_p_line;
+ int r, s;
+ int y_array_rows;
+ int y_column_num;
+ int k;
+ u32 rnd;
+ int xup;
+
+ row_buffer = vmalloc(sizeof(struct pixel)
+ * (dst_w + 1));
+ x_coeff = do_coefficients(src_w, dst_w, &x_shift);
+ y_coeff = do_coefficients(src_h, dst_h, &y_shift);
+ if (!row_buffer || !x_coeff || !y_coeff) {
+ vfree(row_buffer);
+ vfree(x_coeff);
+ vfree(y_coeff);
+ return -ENOMEM;
+ }
+ y_array_rows = y_coeff[0];
+ rnd = (1 << (y_shift - 1));
+ xup = (src_w <= dst_w) ? 1 : 0;
+
+ dst_p_line = dst;
+
+ for (j = 0; j < src_h;) {
+ curr_y_coeff = 1;
+ for (r = 0; r < y_array_rows; r++) {
+ y_column_num = y_coeff[curr_y_coeff++];
+ for (k = 0; k < dst_w + 1; k++) {
+ row_buffer[k].red = 0;
+ row_buffer[k].green = 0;
+ row_buffer[k].blue = 0;
+ }
+ src_p = src_p_line;
+ if (xup)
+ scale_x_up(cf, src_w, &src_p, x_coeff,
+ x_shift, y_coeff[curr_y_coeff],
+ row_buffer);
+ else
+ scale_x_down(cf, src_w, &src_p, x_coeff,
+ x_shift, y_coeff[curr_y_coeff],
+ row_buffer);
+ curr_y_coeff++;
+ for (s = 1; s < y_column_num; s++) {
+ src_p = src_p_line = src_p_line + src_x_bytes;
+ j++;
+ if (xup)
+ scale_x_up(cf, src_w, &src_p,
+ x_coeff, x_shift,
+ y_coeff[curr_y_coeff],
+ row_buffer);
+ else
+ scale_x_down(cf, src_w, &src_p,
+ x_coeff, x_shift,
+ y_coeff[curr_y_coeff],
+ row_buffer);
+ curr_y_coeff++;
+ }
+ for (k = 0; k < dst_w; k++) {
+ row_buffer[k].red = (row_buffer[k].red + rnd)
+ >> y_shift;
+ row_buffer[k].green = (row_buffer[k].green
+ + rnd)
+ >> y_shift;
+ row_buffer[k].blue = (row_buffer[k].blue + rnd)
+ >> y_shift;
+ put_pixel(row_buffer[k], dst, cf);
+ }
+ dst = dst_p_line = dst_p_line + dst_x_bytes;
+ }
+ src_p_line = src_p_line + src_x_bytes;
+ j++;
+ }
+ vfree(row_buffer);
+ vfree(x_coeff);
+ vfree(y_coeff);
+ return 0;
+}
+
+static int scale_y_up(unsigned char *src, unsigned char *dst,
+ enum splash_color_format cf,
+ int src_w, int src_h, int dst_w, int dst_h)
+{
+ int octpp = splash_octpp(cf);
+ int src_x_bytes = octpp * ((src_w + SPLASH_ALIGN) & ~SPLASH_ALIGN);
+ int dst_x_bytes = octpp * ((dst_w + SPLASH_ALIGN) & ~SPLASH_ALIGN);
+ int j;
+ u32 x_shift, y_shift;
+ u32 *x_coeff;
+ u32 *y_coeff;
+ struct pixel *row_buf_list[2];
+ struct pixel *row_buffer;
+ u32 curr_y_coeff = 1;
+ unsigned char *src_p;
+ unsigned char *src_p_line = src;
+ char *dst_p_line;
+ int r, s;
+ int y_array_rows;
+ int y_column_num;
+ int k;
+ u32 rnd;
+ int bi;
+ int xup;
+ int writes;
+
+ x_coeff = do_coefficients(src_w, dst_w, &x_shift);
+ y_coeff = do_coefficients(src_h, dst_h, &y_shift);
+ row_buf_list[0] = vmalloc(2 * sizeof(struct pixel)
+ * (dst_w + 1));
+ if (!row_buf_list[0] || !x_coeff || !y_coeff) {
+ vfree(row_buf_list[0]);
+ vfree(x_coeff);
+ vfree(y_coeff);
+ return -ENOMEM;
+ }
+ row_buf_list[1] = row_buf_list[0] + (dst_w + 1);
+
+ y_array_rows = y_coeff[0];
+ rnd = (1 << (y_shift - 1));
+ bi = 1;
+ xup = (src_w <= dst_w) ? 1 : 0;
+ writes = 0;
+
+ dst_p_line = dst;
+ src_p = src_p_line;
+
+ row_buffer = row_buf_list[0];
+
+ for (j = 0; j < src_h;) {
+ memset(row_buf_list[0], 0, (2 * sizeof(struct pixel)
+ * (dst_w + 1)));
+ curr_y_coeff = 1;
+ if (xup)
+ scale_x_up(cf, src_w, &src_p, x_coeff,
+ x_shift, 1, row_buffer);
+ else
+ scale_x_down(cf, src_w, &src_p, x_coeff, x_shift, 1,
+ row_buffer);
+ src_p = src_p_line = src_p_line + src_x_bytes;
+ j++;
+ for (r = 0; r < y_array_rows - 1; r++) {
+ struct pixel *old_row_buffer = row_buffer;
+ u32 prev_y_coeff_val;
+
+ y_column_num = y_coeff[curr_y_coeff];
+ for (s = 0; s < y_column_num; s++) {
+ for (k = 0; k < dst_w; k++)
+ put_pixel(row_buffer[k], dst, cf);
+ dst = dst_p_line = dst_p_line + dst_x_bytes;
+ writes++;
+ }
+ curr_y_coeff++;
+ row_buffer = row_buf_list[(bi++) % 2];
+ prev_y_coeff_val = y_coeff[curr_y_coeff++];
+ if (xup)
+ scale_x_up(cf, src_w, &src_p, x_coeff,
+ x_shift, 1, row_buffer);
+ else
+ scale_x_down(cf, src_w, &src_p, x_coeff,
+ x_shift, 1, row_buffer);
+ src_p = src_p_line = src_p_line + src_x_bytes;
+ j++;
+ for (k = 0; k < dst_w; k++) {
+ struct pixel pix;
+ pix.red = ((old_row_buffer[k].red
+ * prev_y_coeff_val)
+ + (row_buffer[k].red
+ * y_coeff[curr_y_coeff])
+ + rnd) >> y_shift;
+ pix.green = ((old_row_buffer[k].green
+ * prev_y_coeff_val)
+ + (row_buffer[k].green
+ * y_coeff[curr_y_coeff])
+ + rnd) >> y_shift;
+ pix.blue = ((old_row_buffer[k].blue
+ * prev_y_coeff_val)
+ + (row_buffer[k].blue
+ * y_coeff[curr_y_coeff])
+ + rnd) >> y_shift;
+ old_row_buffer[k].red = 0;
+ old_row_buffer[k].green = 0;
+ old_row_buffer[k].blue = 0;
+ put_pixel(pix, dst, cf);
+ }
+ dst = dst_p_line = dst_p_line + dst_x_bytes;
+ writes++;
+ curr_y_coeff++;
+ }
+ for (r = 0; r < y_coeff[curr_y_coeff]; r++) {
+ for (k = 0; k < dst_w; k++)
+ put_pixel(row_buffer[k], dst, cf);
+
+ dst = dst_p_line = dst_p_line + dst_x_bytes;
+ writes++;
+ }
+ }
+ vfree(row_buf_list[0]);
+ vfree(x_coeff);
+ vfree(y_coeff);
+
+ return 0;
+}
+
+static int jpeg_get(unsigned char *buf, unsigned char *pic,
+ int width, int height, enum splash_color_format cf,
+ struct jpeg_decdata *decdata)
+{
+ int my_width, my_height;
+ int err;
+ int octpp = splash_octpp(cf);
+
+ jpeg_get_size(buf, &my_width, &my_height);
+
+ if (my_height != height || my_width != width) {
+ int my_size = ((my_width + 15) & ~15)
+ * ((my_height + 15) & ~15) * octpp;
+ unsigned char *mem = vmalloc(my_size);
+ if (!mem)
+ return 17;
+ err = jpeg_decode(buf, mem, ((my_width + 15) & ~15),
+ ((my_height + 15) & ~15), cf, decdata);
+ if (err) {
+ vfree(mem);
+ return err;
+ }
+ printk(KERN_INFO
+ "bootsplash: scaling image from %dx%d to %dx%d\n",
+ my_width, my_height, width, height);
+ if (my_height <= height)
+ err = scale_y_up(mem, pic, cf, my_width, my_height,
+ ((width + 15) & ~15),
+ ((height + 15) & ~15));
+ else
+ err = scale_y_down(mem, pic, cf, my_width, my_height,
+ ((width + 15) & ~15),
+ ((height + 15) & ~15));
+ vfree(mem);
+ if (err < 0)
+ return 17;
+ } else {
+ err = jpeg_decode(buf, pic, ((width + 15) & ~15),
+ ((height + 15) & ~15), cf, decdata);
+ if (err)
+ return err;
+ }
+ return 0;
+}
--- /dev/null
+/*
+ * linux/drivers/video/bootsplash/decode-jpg.c - a tiny jpeg decoder.
+ *
+ * (w) August 2001 by Michael Schroeder, <mls@suse.de>
+ *
+ */
+
+#include <linux/string.h>
+#include <asm/byteorder.h>
+#include <linux/bootsplash.h>
+#include "decode-jpg.h"
+
+#define ISHIFT 11
+
+#define IFIX(a) ((int)((a) * (1 << ISHIFT) + .5))
+#define IMULT(a, b) (((a) * (b)) >> ISHIFT)
+#define ITOINT(a) ((a) >> ISHIFT)
+
+/* special markers */
+#define M_BADHUFF -1
+#define M_EOF 0x80
+
+struct in {
+ unsigned char *p;
+ unsigned int bits;
+ int left;
+ int marker;
+
+ int (*func)(void *);
+ void *data;
+};
+
+/*********************************/
+struct dec_hufftbl;
+struct enc_hufftbl;
+
+union hufftblp {
+ struct dec_hufftbl *dhuff;
+ struct enc_hufftbl *ehuff;
+};
+
+struct scan {
+ int dc; /* old dc value */
+
+ union hufftblp hudc;
+ union hufftblp huac;
+ int next; /* when to switch to next scan */
+
+ int cid; /* component id */
+ int hv; /* horiz/vert, copied from comp */
+ int tq; /* quant tbl, copied from comp */
+};
+
+/*********************************/
+
+#define DECBITS 10 /* seems to be the optimum */
+
+struct dec_hufftbl {
+ int maxcode[17];
+ int valptr[16];
+ unsigned char vals[256];
+ unsigned int llvals[1 << DECBITS];
+};
+
+static void decode_mcus(struct in *, int *, int, struct scan *, int *);
+static int dec_readmarker(struct in *);
+static void dec_makehuff(struct dec_hufftbl *, int *, unsigned char *);
+
+static void setinput(struct in *, unsigned char *);
+/*********************************/
+
+#undef PREC
+#define PREC int
+
+static void idctqtab(unsigned char *, PREC *);
+static void idct(int *, int *, PREC *, PREC, int);
+static void scaleidctqtab(PREC *, PREC);
+
+/*********************************/
+
+static void initcol(PREC[][64]);
+
+static void col221111(int *out, unsigned char *pic, int width);
+static void col221111_15(int *out, unsigned char *pic, int width);
+static void col221111_16(int *out, unsigned char *pic, int width);
+static void col221111_32(int *out, unsigned char *pic, int width);
+
+/*********************************/
+
+#define M_SOI 0xd8
+#define M_APP0 0xe0
+#define M_DQT 0xdb
+#define M_SOF0 0xc0
+#define M_DHT 0xc4
+#define M_DRI 0xdd
+#define M_SOS 0xda
+#define M_RST0 0xd0
+#define M_EOI 0xd9
+#define M_COM 0xfe
+
+static unsigned char *datap;
+
+static int getbyte(void)
+{
+ return *datap++;
+}
+
+static int getword(void)
+{
+ int c1, c2;
+ c1 = *datap++;
+ c2 = *datap++;
+ return c1 << 8 | c2;
+}
+
+struct comp {
+ int cid;
+ int hv;
+ int tq;
+};
+
+#define MAXCOMP 4
+struct jpginfo {
+ int nc; /* number of components */
+ int ns; /* number of scans */
+ int dri; /* restart interval */
+ int nm; /* mcus til next marker */
+ int rm; /* next restart marker */
+};
+
+static struct jpginfo info;
+static struct comp comps[MAXCOMP];
+
+static struct scan dscans[MAXCOMP];
+
+static unsigned char quant[4][64];
+
+static struct dec_hufftbl dhuff[4];
+
+#define dec_huffdc (dhuff + 0)
+#define dec_huffac (dhuff + 2)
+
+static struct in in;
+
+static int readtables(int till)
+{
+ int m, l, i, j, lq, pq, tq;
+ int tc, th, tt;
+
+ for (;;) {
+ if (getbyte() != 0xff)
+ return -1;
+ m = getbyte();
+ if (m == till)
+ break;
+
+ switch (m) {
+ case 0xc2:
+ return 0;
+
+ case M_DQT:
+ lq = getword();
+ while (lq > 2) {
+ pq = getbyte();
+ tq = pq & 15;
+ if (tq > 3)
+ return -1;
+ pq >>= 4;
+ if (pq != 0)
+ return -1;
+ for (i = 0; i < 64; i++)
+ quant[tq][i] = getbyte();
+ lq -= 64 + 1;
+ }
+ break;
+
+ case M_DHT:
+ l = getword();
+ while (l > 2) {
+ int hufflen[16], k;
+ unsigned char huffvals[256];
+
+ tc = getbyte();
+ th = tc & 15;
+ tc >>= 4;
+ tt = tc * 2 + th;
+ if (tc > 1 || th > 1)
+ return -1;
+ for (i = 0; i < 16; i++)
+ hufflen[i] = getbyte();
+ l -= 1 + 16;
+ k = 0;
+ for (i = 0; i < 16; i++) {
+ for (j = 0; j < hufflen[i]; j++)
+ huffvals[k++] = getbyte();
+ l -= hufflen[i];
+ }
+ dec_makehuff(dhuff + tt, hufflen,
+ huffvals);
+ }
+ break;
+
+ case M_DRI:
+ l = getword();
+ info.dri = getword();
+ break;
+
+ default:
+ l = getword();
+ while (l-- > 2)
+ getbyte();
+ break;
+ }
+ }
+ return 0;
+}
+
+static void dec_initscans(void)
+{
+ int i;
+
+ info.nm = info.dri + 1;
+ info.rm = M_RST0;
+ for (i = 0; i < info.ns; i++)
+ dscans[i].dc = 0;
+}
+
+static int dec_checkmarker(void)
+{
+ int i;
+
+ if (dec_readmarker(&in) != info.rm)
+ return -1;
+ info.nm = info.dri;
+ info.rm = (info.rm + 1) & ~0x08;
+ for (i = 0; i < info.ns; i++)
+ dscans[i].dc = 0;
+ return 0;
+}
+
+void jpeg_get_size(unsigned char *buf, int *width, int *height)
+{
+ datap = buf;
+ getbyte();
+ getbyte();
+ readtables(M_SOF0);
+ getword();
+ getbyte();
+ *height = getword();
+ *width = getword();
+}
+
+int jpeg_decode(unsigned char *buf, unsigned char *pic,
+ int width, int height, enum splash_color_format cf,
+ struct jpeg_decdata *decdata)
+{
+ int i, j, m, tac, tdc;
+ int mcusx, mcusy, mx, my;
+ int max[6];
+
+ if (!decdata || !buf || !pic)
+ return -1;
+ datap = buf;
+ if (getbyte() != 0xff)
+ return ERR_NO_SOI;
+ if (getbyte() != M_SOI)
+ return ERR_NO_SOI;
+ if (readtables(M_SOF0))
+ return ERR_BAD_TABLES;
+ getword();
+ i = getbyte();
+ if (i != 8)
+ return ERR_NOT_8BIT;
+ if (((getword() + 15) & ~15) != height)
+ return ERR_HEIGHT_MISMATCH;
+ if (((getword() + 15) & ~15) != width)
+ return ERR_WIDTH_MISMATCH;
+ if ((height & 15) || (width & 15))
+ return ERR_BAD_WIDTH_OR_HEIGHT;
+ info.nc = getbyte();
+ if (info.nc > MAXCOMP)
+ return ERR_TOO_MANY_COMPPS;
+ for (i = 0; i < info.nc; i++) {
+ int h, v;
+ comps[i].cid = getbyte();
+ comps[i].hv = getbyte();
+ v = comps[i].hv & 15;
+ h = comps[i].hv >> 4;
+ comps[i].tq = getbyte();
+ if (h > 3 || v > 3)
+ return ERR_ILLEGAL_HV;
+ if (comps[i].tq > 3)
+ return ERR_QUANT_TABLE_SELECTOR;
+ }
+ if (readtables(M_SOS))
+ return ERR_BAD_TABLES;
+ getword();
+ info.ns = getbyte();
+ if (info.ns != 3)
+ return ERR_NOT_YCBCR_221111;
+ for (i = 0; i < 3; i++) {
+ dscans[i].cid = getbyte();
+ tdc = getbyte();
+ tac = tdc & 15;
+ tdc >>= 4;
+ if (tdc > 1 || tac > 1)
+ return ERR_QUANT_TABLE_SELECTOR;
+ for (j = 0; j < info.nc; j++)
+ if (comps[j].cid == dscans[i].cid)
+ break;
+ if (j == info.nc)
+ return ERR_UNKNOWN_CID_IN_SCAN;
+ dscans[i].hv = comps[j].hv;
+ dscans[i].tq = comps[j].tq;
+ dscans[i].hudc.dhuff = dec_huffdc + tdc;
+ dscans[i].huac.dhuff = dec_huffac + tac;
+ }
+
+ i = getbyte();
+ j = getbyte();
+ m = getbyte();
+
+ if (i != 0 || j != 63 || m != 0)
+ return ERR_NOT_SEQUENTIAL_DCT;
+
+ if (dscans[0].cid != 1 || dscans[1].cid != 2 || dscans[2].cid != 3)
+ return ERR_NOT_YCBCR_221111;
+
+ if (dscans[0].hv != 0x22 ||
+ dscans[1].hv != 0x11 ||
+ dscans[2].hv != 0x11)
+ return ERR_NOT_YCBCR_221111;
+
+ mcusx = width >> 4;
+ mcusy = height >> 4;
+
+
+ idctqtab(quant[dscans[0].tq], decdata->dquant[0]);
+ idctqtab(quant[dscans[1].tq], decdata->dquant[1]);
+ idctqtab(quant[dscans[2].tq], decdata->dquant[2]);
+ initcol(decdata->dquant);
+ setinput(&in, datap);
+
+#if 0
+ /* landing zone */
+ img[len] = 0;
+ img[len + 1] = 0xff;
+ img[len + 2] = M_EOF;
+#endif
+
+ dec_initscans();
+
+ dscans[0].next = 6 - 4;
+ dscans[1].next = 6 - 4 - 1;
+ dscans[2].next = 6 - 4 - 1 - 1; /* 411 encoding */
+ for (my = 0; my < mcusy; my++) {
+ for (mx = 0; mx < mcusx; mx++) {
+ if (info.dri && !--info.nm)
+ if (dec_checkmarker())
+ return ERR_WRONG_MARKER;
+
+ decode_mcus(&in, decdata->dcts, 6, dscans, max);
+ idct(decdata->dcts, decdata->out, decdata->dquant[0],
+ IFIX(128.5), max[0]);
+ idct(decdata->dcts + 64,
+ decdata->out + 64,
+ decdata->dquant[0], IFIX(128.5), max[1]);
+ idct(decdata->dcts + 128,
+ decdata->out + 128,
+ decdata->dquant[0], IFIX(128.5), max[2]);
+ idct(decdata->dcts + 192,
+ decdata->out + 192,
+ decdata->dquant[0], IFIX(128.5), max[3]);
+ idct(decdata->dcts + 256,
+ decdata->out + 256,
+ decdata->dquant[1], IFIX(0.5), max[4]);
+ idct(decdata->dcts + 320,
+ decdata->out + 320,
+ decdata->dquant[2], IFIX(0.5), max[5]);
+
+ switch (cf) {
+ case SPLASH_DEPTH_24:
+ col221111_32(decdata->out,
+ (pic + (my * 16 * mcusx + mx)
+ * 16 * 4),
+ mcusx * 16 * 4);
+ break;
+ case SPLASH_DEPTH_24_PACKED:
+ col221111(decdata->out,
+ (pic + (my * 16 * mcusx + mx)
+ * 16 * 3),
+ mcusx * 16 * 3);
+ break;
+ case SPLASH_DEPTH_16:
+ col221111_16(decdata->out,
+ (pic + (my * 16 * mcusx + mx)
+ * 16 * 2), mcusx * 16 * 2);
+ break;
+ case SPLASH_DEPTH_15:
+ col221111_15(decdata->out,
+ (pic + (my * 16 * mcusx + mx)
+ * 16 * 2), mcusx * 16 * 2);
+ break;
+ default:
+ return ERR_DEPTH_MISMATCH;
+ break;
+ }
+ }
+ }
+
+ m = dec_readmarker(&in);
+ if (m != M_EOI)
+ return ERR_NO_EOI;
+
+ return 0;
+}
+
+/****************************************************************/
+/************** huffman decoder ***************/
+/****************************************************************/
+
+static int fillbits(struct in *, int, unsigned int);
+static int dec_rec2(struct in *, struct dec_hufftbl *, int *, int, int);
+
+static void setinput(struct in *in, unsigned char *p)
+{
+ in->p = p;
+ in->left = 0;
+ in->bits = 0;
+ in->marker = 0;
+}
+
+static int fillbits(struct in *in, int le, unsigned int bi)
+{
+ int b, m;
+
+ if (in->marker) {
+ if (le <= 16)
+ in->bits = bi << 16, le += 16;
+ return le;
+ }
+ while (le <= 24) {
+ b = *in->p++;
+ if (b == 0xff) {
+ m = *in->p++;
+ if (m != 0) {
+ if (m == M_EOF) {
+ if (in->func) {
+ m = in->func(in->data);
+ if (m == 0)
+ continue;
+ }
+ }
+ in->marker = m;
+ if (le <= 16)
+ bi = bi << 16, le += 16;
+ break;
+ }
+ }
+ bi = bi << 8 | b;
+ le += 8;
+ }
+ in->bits = bi; /* tmp... 2 return values needed */
+ return le;
+}
+
+static int dec_readmarker(struct in *in)
+{
+ int m;
+
+ in->left = fillbits(in, in->left, in->bits);
+ m = in->marker;
+ if (m == 0)
+ return 0;
+ in->left = 0;
+ in->marker = 0;
+ return m;
+}
+
+#define LEBI_DCL int le, bi
+#define LEBI_GET(in) (le = in->left, bi = in->bits)
+#define LEBI_PUT(in) (in->left = le, in->bits = bi)
+
+#define GETBITS(in, n) \
+ ( \
+ (le < (n) ? le = fillbits(in, le, bi), bi = in->bits : 0), \
+ (le -= (n)), \
+ bi >> le & ((1 << (n)) - 1) \
+ )
+
+#define UNGETBITS(in, n) ( \
+ le += (n) \
+ )
+
+
+static int dec_rec2(struct in *in, struct dec_hufftbl *hu, int *runp, int c, int i)
+{
+ LEBI_DCL;
+
+ LEBI_GET(in);
+ if (i) {
+ UNGETBITS(in, i & 127);
+ *runp = i >> 8 & 15;
+ i >>= 16;
+ } else {
+ for (i = DECBITS;
+ (c = ((c << 1) | GETBITS(in, 1))) >= (hu->maxcode[i]);
+ i++)
+ ;
+ if (i >= 16) {
+ in->marker = M_BADHUFF;
+ return 0;
+ }
+ i = hu->vals[hu->valptr[i] + c - hu->maxcode[i - 1] * 2];
+ *runp = i >> 4;
+ i &= 15;
+ }
+ if (i == 0) { /* sigh, 0xf0 is 11 bit */
+ LEBI_PUT(in);
+ return 0;
+ }
+ /* receive part */
+ c = GETBITS(in, i);
+ if (c < (1 << (i - 1)))
+ c += (-1 << i) + 1;
+ LEBI_PUT(in);
+ return c;
+}
+
+#define DEC_REC(in, hu, r, i) ( \
+ r = GETBITS(in, DECBITS), \
+ i = hu->llvals[r], \
+ i & 128 ? \
+ ( \
+ UNGETBITS(in, i & 127), \
+ r = i >> 8 & 15, \
+ i >> 16 \
+ ) \
+ : \
+ ( \
+ LEBI_PUT(in), \
+ i = dec_rec2(in, hu, &r, r, i), \
+ LEBI_GET(in), \
+ i \
+ ) \
+ )
+
+static void decode_mcus(struct in *in, int *dct, int n, struct scan *sc, int *maxp)
+{
+ struct dec_hufftbl *hu;
+ int i, r, t;
+ LEBI_DCL;
+
+ memset(dct, 0, n * 64 * sizeof(*dct));
+ LEBI_GET(in);
+ while (n-- > 0) {
+ hu = sc->hudc.dhuff;
+ *dct++ = (sc->dc += DEC_REC(in, hu, r, t));
+
+ hu = sc->huac.dhuff;
+ i = 63;
+ while (i > 0) {
+ t = DEC_REC(in, hu, r, t);
+ if (t == 0 && r == 0) {
+ dct += i;
+ break;
+ }
+ dct += r;
+ *dct++ = t;
+ i -= r + 1;
+ }
+ *maxp++ = 64 - i;
+ if (n == sc->next)
+ sc++;
+ }
+ LEBI_PUT(in);
+}
+
+static void dec_makehuff(struct dec_hufftbl *hu, int *hufflen, unsigned char *huffvals)
+{
+ int code, k, i, j, d, x, c, v;
+ for (i = 0; i < (1 << DECBITS); i++)
+ hu->llvals[i] = 0;
+
+/*
+ * llvals layout:
+ *
+ * value v already known, run r, backup u bits:
+ * vvvvvvvvvvvvvvvv 0000 rrrr 1 uuuuuuu
+ * value unknown, size b bits, run r, backup u bits:
+ * 000000000000bbbb 0000 rrrr 0 uuuuuuu
+ * value and size unknown:
+ * 0000000000000000 0000 0000 0 0000000
+ */
+ code = 0;
+ k = 0;
+ for (i = 0; i < 16; i++, code <<= 1) { /* sizes */
+ hu->valptr[i] = k;
+ for (j = 0; j < hufflen[i]; j++) {
+ hu->vals[k] = *huffvals++;
+ if (i < DECBITS) {
+ c = code << (DECBITS - 1 - i);
+ v = hu->vals[k] & 0x0f; /* size */
+ for (d = 1 << (DECBITS - 1 - i); --d >= 0;) {
+ if (v + i < DECBITS) {
+ /* both fit in table */
+ x = d >> (DECBITS - 1 - v -
+ i);
+ if (v && x < (1 << (v - 1)))
+ x += (-1 << v) + 1;
+ x = x << 16 |
+ (hu->vals[k] & 0xf0) << 4
+ | (DECBITS - (i + 1 + v))
+ | 128;
+ } else
+ x = v << 16
+ | (hu->vals[k] & 0xf0) << 4
+ | (DECBITS - (i + 1));
+ hu->llvals[c | d] = x;
+ }
+ }
+ code++;
+ k++;
+ }
+ hu->maxcode[i] = code;
+ }
+ hu->maxcode[16] = 0x20000; /* always terminate decode */
+}
+
+/****************************************************************/
+/************** idct ***************/
+/****************************************************************/
+
+#define ONE ((PREC)IFIX(1.))
+#define S2 ((PREC)IFIX(0.382683432))
+#define C2 ((PREC)IFIX(0.923879532))
+#define C4 ((PREC)IFIX(0.707106781))
+
+#define S22 ((PREC)IFIX(2 * 0.382683432))
+#define C22 ((PREC)IFIX(2 * 0.923879532))
+#define IC4 ((PREC)IFIX(1 / 0.707106781))
+
+#define C3IC1 ((PREC)IFIX(0.847759065)) /* c3/c1 */
+#define C5IC1 ((PREC)IFIX(0.566454497)) /* c5/c1 */
+#define C7IC1 ((PREC)IFIX(0.198912367)) /* c7/c1 */
+
+#define XPP(a, b) (t = a + b, b = a - b, a = t)
+#define XMP(a, b) (t = a - b, b = a + b, a = t)
+#define XPM(a, b) (t = a + b, b = b - a, a = t)
+
+#define ROT(a, b, s, c) (t = IMULT(a + b, s), \
+ a = IMULT(a, c - s) + t, \
+ b = IMULT(b, c + s) - t)
+
+#define IDCT \
+ ( \
+ XPP(t0, t1), \
+ XMP(t2, t3), \
+ t2 = IMULT(t2, IC4) - t3, \
+ XPP(t0, t3), \
+ XPP(t1, t2), \
+ XMP(t4, t7), \
+ XPP(t5, t6), \
+ XMP(t5, t7), \
+ t5 = IMULT(t5, IC4), \
+ ROT(t4, t6, S22, C22), \
+ t6 -= t7, \
+ t5 -= t6, \
+ t4 -= t5, \
+ XPP(t0, t7), \
+ XPP(t1, t6), \
+ XPP(t2, t5), \
+ XPP(t3, t4) \
+ )
+
+static unsigned char zig2[64] = {
+ 0, 2, 3, 9, 10, 20, 21, 35,
+ 14, 16, 25, 31, 39, 46, 50, 57,
+ 5, 7, 12, 18, 23, 33, 37, 48,
+ 27, 29, 41, 44, 52, 55, 59, 62,
+ 15, 26, 30, 40, 45, 51, 56, 58,
+ 1, 4, 8, 11, 19, 22, 34, 36,
+ 28, 42, 43, 53, 54, 60, 61, 63,
+ 6, 13, 17, 24, 32, 38, 47, 49
+};
+
+void idct(int *in, int *out, PREC *quant, PREC off, int max)
+{
+ PREC t0, t1, t2, t3, t4, t5, t6, t7, t;
+ PREC tmp[64], *tmpp;
+ int i, j;
+ unsigned char *zig2p;
+
+ t0 = off;
+ if (max == 1) {
+ t0 += in[0] * quant[0];
+ for (i = 0; i < 64; i++)
+ out[i] = ITOINT(t0);
+ return;
+ }
+ zig2p = zig2;
+ tmpp = tmp;
+ for (i = 0; i < 8; i++) {
+ j = *zig2p++;
+ t0 += in[j] * quant[j];
+ j = *zig2p++;
+ t5 = in[j] * quant[j];
+ j = *zig2p++;
+ t2 = in[j] * quant[j];
+ j = *zig2p++;
+ t7 = in[j] * quant[j];
+ j = *zig2p++;
+ t1 = in[j] * quant[j];
+ j = *zig2p++;
+ t4 = in[j] * quant[j];
+ j = *zig2p++;
+ t3 = in[j] * quant[j];
+ j = *zig2p++;
+ t6 = in[j] * quant[j];
+ IDCT;
+ tmpp[0 * 8] = t0;
+ tmpp[1 * 8] = t1;
+ tmpp[2 * 8] = t2;
+ tmpp[3 * 8] = t3;
+ tmpp[4 * 8] = t4;
+ tmpp[5 * 8] = t5;
+ tmpp[6 * 8] = t6;
+ tmpp[7 * 8] = t7;
+ tmpp++;
+ t0 = 0;
+ }
+ for (i = 0; i < 8; i++) {
+ t0 = tmp[8 * i + 0];
+ t1 = tmp[8 * i + 1];
+ t2 = tmp[8 * i + 2];
+ t3 = tmp[8 * i + 3];
+ t4 = tmp[8 * i + 4];
+ t5 = tmp[8 * i + 5];
+ t6 = tmp[8 * i + 6];
+ t7 = tmp[8 * i + 7];
+ IDCT;
+ out[8 * i + 0] = ITOINT(t0);
+ out[8 * i + 1] = ITOINT(t1);
+ out[8 * i + 2] = ITOINT(t2);
+ out[8 * i + 3] = ITOINT(t3);
+ out[8 * i + 4] = ITOINT(t4);
+ out[8 * i + 5] = ITOINT(t5);
+ out[8 * i + 6] = ITOINT(t6);
+ out[8 * i + 7] = ITOINT(t7);
+ }
+}
+
+static unsigned char zig[64] = {
+ 0, 1, 5, 6, 14, 15, 27, 28,
+ 2, 4, 7, 13, 16, 26, 29, 42,
+ 3, 8, 12, 17, 25, 30, 41, 43,
+ 9, 11, 18, 24, 31, 40, 44, 53,
+ 10, 19, 23, 32, 39, 45, 52, 54,
+ 20, 22, 33, 38, 46, 51, 55, 60,
+ 21, 34, 37, 47, 50, 56, 59, 61,
+ 35, 36, 48, 49, 57, 58, 62, 63
+};
+
+static PREC aaidct[8] = {
+ IFIX(0.3535533906), IFIX(0.4903926402),
+ IFIX(0.4619397663), IFIX(0.4157348062),
+ IFIX(0.3535533906), IFIX(0.2777851165),
+ IFIX(0.1913417162), IFIX(0.0975451610)
+};
+
+
+static void idctqtab(unsigned char *qin, PREC *qout)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++)
+ for (j = 0; j < 8; j++)
+ qout[zig[i * 8 + j]] = qin[zig[i * 8 + j]] *
+ IMULT(aaidct[i], aaidct[j]);
+}
+
+static void scaleidctqtab(PREC *q, PREC sc)
+{
+ int i;
+
+ for (i = 0; i < 64; i++)
+ q[i] = IMULT(q[i], sc);
+}
+
+/****************************************************************/
+/************** color decoder ***************/
+/****************************************************************/
+
+#define ROUND
+
+/*
+ * YCbCr Color transformation:
+ *
+ * y:0..255 Cb:-128..127 Cr:-128..127
+ *
+ * R = Y + 1.40200 * Cr
+ * G = Y - 0.34414 * Cb - 0.71414 * Cr
+ * B = Y + 1.77200 * Cb
+ *
+ * =>
+ * Cr *= 1.40200;
+ * Cb *= 1.77200;
+ * Cg = 0.19421 * Cb + .50937 * Cr;
+ * R = Y + Cr;
+ * G = Y - Cg;
+ * B = Y + Cb;
+ *
+ * =>
+ * Cg = (50 * Cb + 130 * Cr + 128) >> 8;
+ */
+
+static void initcol(q)
+PREC q[][64];
+{
+ scaleidctqtab(q[1], IFIX(1.77200));
+ scaleidctqtab(q[2], IFIX(1.40200));
+}
+
+/* This is optimized for the stupid sun SUNWspro compiler. */
+#define STORECLAMP(a, x) \
+ ( \
+ (a) = (x), \
+ (unsigned int)(x) >= 256 ? \
+ ((a) = (x) < 0 ? 0 : 255) \
+ : \
+ 0 \
+ )
+
+#define CLAMP(x) ((unsigned int)(x) >= 256 ? ((x) < 0 ? 0 : 255) : (x))
+
+#ifdef ROUND
+
+#define CBCRCG(yin, xin) \
+ ( \
+ cb = outc[0 + yin * 8 + xin], \
+ cr = outc[64 + yin * 8 + xin], \
+ cg = (50 * cb + 130 * cr + 128) >> 8 \
+ )
+
+#else
+
+#define CBCRCG(yin, xin) \
+ ( \
+ cb = outc[0 + yin * 8 + xin], \
+ cr = outc[64 + yin * 8 + xin], \
+ cg = (3 * cb + 8 * cr) >> 4 \
+ )
+
+#endif
+
+#define PIC(yin, xin, p, xout) \
+ ( \
+ y = outy[(yin) * 8 + xin], \
+ STORECLAMP(p[(xout) * 3 + 0], y + cr), \
+ STORECLAMP(p[(xout) * 3 + 1], y - cg), \
+ STORECLAMP(p[(xout) * 3 + 2], y + cb) \
+ )
+
+#ifdef __LITTLE_ENDIAN
+#define PIC_15(yin, xin, p, xout, add) \
+ ( \
+ y = outy[(yin) * 8 + xin], \
+ y = ((CLAMP(y + cr + add * 2 + 1) & 0xf8) << 7) | \
+ ((CLAMP(y - cg + add * 2 + 1) & 0xf8) << 2) | \
+ ((CLAMP(y + cb + add * 2 + 1)) >> 3), \
+ p[(xout) * 2 + 0] = y & 0xff, \
+ p[(xout) * 2 + 1] = y >> 8 \
+ )
+
+#define PIC_16(yin, xin, p, xout, add) \
+ ( \
+ y = outy[(yin) * 8 + xin], \
+ y = ((CLAMP(y + cr + add * 2 + 1) & 0xf8) << 8) | \
+ ((CLAMP(y - cg + add) & 0xfc) << 3) | \
+ ((CLAMP(y + cb + add * 2 + 1)) >> 3), \
+ p[(xout) * 2 + 0] = y & 0xff, \
+ p[(xout) * 2 + 1] = y >> 8 \
+ )
+#else
+#define PIC_15(yin, xin, p, xout, add) \
+ ( \
+ y = outy[(yin) * 8 + xin], \
+ y = ((CLAMP(y + cr + add * 2 + 1) & 0xf8) << 7) | \
+ ((CLAMP(y - cg + add * 2 + 1) & 0xf8) << 2) | \
+ ((CLAMP(y + cb + add * 2 + 1)) >> 3), \
+ p[(xout) * 2 + 0] = y >> 8, \
+ p[(xout) * 2 + 1] = y & 0xff \
+ )
+
+#define PIC_16(yin, xin, p, xout, add) \
+ ( \
+ y = outy[(yin) * 8 + xin], \
+ y = ((CLAMP(y + cr + add * 2 + 1) & 0xf8) << 8) | \
+ ((CLAMP(y - cg + add) & 0xfc) << 3) | \
+ ((CLAMP(y + cb + add * 2 + 1)) >> 3), \
+ p[(xout) * 2 + 0] = y >> 8, \
+ p[(xout) * 2 + 1] = y & 0xff \
+ )
+#endif
+
+#define PIC_32(yin, xin, p, xout) \
+ ( \
+ y = outy[(yin) * 8 + xin], \
+ STORECLAMP(p[(xout) * 4 + 0], y + cb), \
+ STORECLAMP(p[(xout) * 4 + 1], y - cg), \
+ STORECLAMP(p[(xout) * 4 + 2], y + cr), \
+ p[(xout) * 4 + 3] = 0 \
+ )
+
+#define PIC221111(xin) \
+ ( \
+ CBCRCG(0, xin), \
+ PIC(xin / 4 * 8 + 0, (xin & 3) * 2 + 0, pic0, xin * 2 + 0), \
+ PIC(xin / 4 * 8 + 0, (xin & 3) * 2 + 1, pic0, xin * 2 + 1), \
+ PIC(xin / 4 * 8 + 1, (xin & 3) * 2 + 0, pic1, xin * 2 + 0), \
+ PIC(xin / 4 * 8 + 1, (xin & 3) * 2 + 1, pic1, xin * 2 + 1) \
+ )
+
+#define PIC221111_15(xin) \
+ ( \
+ CBCRCG(0, xin), \
+ PIC_15(xin / 4 * 8 + 0, (xin & 3) * 2 + 0, pic0, xin * 2 + 0, 3), \
+ PIC_15(xin / 4 * 8 + 0, (xin & 3) * 2 + 1, pic0, xin * 2 + 1, 0), \
+ PIC_15(xin / 4 * 8 + 1, (xin & 3) * 2 + 0, pic1, xin * 2 + 0, 1), \
+ PIC_15(xin / 4 * 8 + 1, (xin & 3) * 2 + 1, pic1, xin * 2 + 1, 2) \
+ )
+
+#define PIC221111_16(xin) \
+ ( \
+ CBCRCG(0, xin), \
+ PIC_16(xin / 4 * 8 + 0, (xin & 3) * 2 + 0, pic0, xin * 2 + 0, 3), \
+ PIC_16(xin / 4 * 8 + 0, (xin & 3) * 2 + 1, pic0, xin * 2 + 1, 0), \
+ PIC_16(xin / 4 * 8 + 1, (xin & 3) * 2 + 0, pic1, xin * 2 + 0, 1), \
+ PIC_16(xin / 4 * 8 + 1, (xin & 3) * 2 + 1, pic1, xin * 2 + 1, 2) \
+ )
+
+#define PIC221111_32(xin) \
+ ( \
+ CBCRCG(0, xin), \
+ PIC_32(xin / 4 * 8 + 0, (xin & 3) * 2 + 0, pic0, xin * 2 + 0), \
+ PIC_32(xin / 4 * 8 + 0, (xin & 3) * 2 + 1, pic0, xin * 2 + 1), \
+ PIC_32(xin / 4 * 8 + 1, (xin & 3) * 2 + 0, pic1, xin * 2 + 0), \
+ PIC_32(xin / 4 * 8 + 1, (xin & 3) * 2 + 1, pic1, xin * 2 + 1) \
+ )
+
+static void col221111(int *out, unsigned char *pic, int width)
+{
+ int i, j, k;
+ unsigned char *pic0, *pic1;
+ int *outy, *outc;
+ int cr, cg, cb, y;
+
+ pic0 = pic;
+ pic1 = pic + width;
+ outy = out;
+ outc = out + 64 * 4;
+ for (i = 2; i > 0; i--) {
+ for (j = 4; j > 0; j--) {
+ for (k = 0; k < 8; k++)
+ PIC221111(k);
+ outc += 8;
+ outy += 16;
+ pic0 += 2 * width;
+ pic1 += 2 * width;
+ }
+ outy += 64 * 2 - 16 * 4;
+ }
+}
+
+static void col221111_15(int *out, unsigned char *pic, int width)
+{
+ int i, j, k;
+ unsigned char *pic0, *pic1;
+ int *outy, *outc;
+ int cr, cg, cb, y;
+
+ pic0 = pic;
+ pic1 = pic + width;
+ outy = out;
+ outc = out + 64 * 4;
+ for (i = 2; i > 0; i--) {
+ for (j = 4; j > 0; j--) {
+ for (k = 0; k < 8; k++)
+ PIC221111_15(k);
+ outc += 8;
+ outy += 16;
+ pic0 += 2 * width;
+ pic1 += 2 * width;
+ }
+ outy += 64 * 2 - 16 * 4;
+ }
+}
+
+static void col221111_16(int *out, unsigned char *pic, int width)
+{
+ int i, j, k;
+ unsigned char *pic0, *pic1;
+ int *outy, *outc;
+ int cr, cg, cb, y;
+
+ pic0 = pic;
+ pic1 = pic + width;
+ outy = out;
+ outc = out + 64 * 4;
+ for (i = 2; i > 0; i--) {
+ for (j = 4; j > 0; j--) {
+ for (k = 0; k < 8; k++)
+ PIC221111_16(k);
+ outc += 8;
+ outy += 16;
+ pic0 += 2 * width;
+ pic1 += 2 * width;
+ }
+ outy += 64 * 2 - 16 * 4;
+ }
+}
+
+static void col221111_32(int *out, unsigned char *pic, int width)
+{
+ int i, j, k;
+ unsigned char *pic0, *pic1;
+ int *outy, *outc;
+ int cr, cg, cb, y;
+
+ pic0 = pic;
+ pic1 = pic + width;
+ outy = out;
+ outc = out + 64 * 4;
+ for (i = 2; i > 0; i--) {
+ for (j = 4; j > 0; j--) {
+ for (k = 0; k < 8; k++)
+ PIC221111_32(k);
+ outc += 8;
+ outy += 16;
+ pic0 += 2 * width;
+ pic1 += 2 * width;
+ }
+ outy += 64 * 2 - 16 * 4;
+ }
+}
--- /dev/null
+/*
+ * linux/drivers/video/bootsplash/decode-jpg.h - a tiny jpeg decoder.
+ *
+ * (w) August 2001 by Michael Schroeder, <mls@suse.de>
+ */
+
+#ifndef __DECODE_JPG_H
+#define __DECODE_JPG_H
+
+#define ERR_NO_SOI 1
+#define ERR_NOT_8BIT 2
+#define ERR_HEIGHT_MISMATCH 3
+#define ERR_WIDTH_MISMATCH 4
+#define ERR_BAD_WIDTH_OR_HEIGHT 5
+#define ERR_TOO_MANY_COMPPS 6
+#define ERR_ILLEGAL_HV 7
+#define ERR_QUANT_TABLE_SELECTOR 8
+#define ERR_NOT_YCBCR_221111 9
+#define ERR_UNKNOWN_CID_IN_SCAN 10
+#define ERR_NOT_SEQUENTIAL_DCT 11
+#define ERR_WRONG_MARKER 12
+#define ERR_NO_EOI 13
+#define ERR_BAD_TABLES 14
+#define ERR_DEPTH_MISMATCH 15
+
+struct jpeg_decdata {
+ int dcts[6 * 64 + 16];
+ int out[64 * 6];
+ int dquant[3][64];
+};
+
+extern int jpeg_decode(unsigned char *buf, unsigned char *pic,
+ int width, int height, enum splash_color_format cf,
+ struct jpeg_decdata *);
+extern void jpeg_get_size(unsigned char *, int *, int *);
+
+#endif
--- /dev/null
+/*
+ * linux/drivers/video/bootsplash/render.c - splash screen render functions.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fb.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+#include <asm/irq.h>
+
+#include "../console/fbcon.h"
+#include <linux/bootsplash.h>
+
+#ifndef DEBUG
+# define SPLASH_DEBUG(fmt, args...)
+#else
+# define SPLASH_DEBUG(fmt, args...) \
+ printk(KERN_WARNING "%s: " fmt "\n", __func__, ##args)
+#endif
+
+/* fake a region sync */
+void splash_sync_region(struct fb_info *info, int x, int y,
+ int width, int height)
+{
+ struct splash_data *sd = info->splash_data;
+ if (sd && sd->need_sync) {
+ /* issue a fake copyarea (copy to the very same position)
+ * for marking the dirty region; this is required for Xen fb
+ * (bnc#739020)
+ */
+ struct fb_copyarea area;
+ area.sx = area.dx = x;
+ area.sy = area.dy = y;
+ area.width = width;
+ area.height = height;
+ info->fbops->fb_copyarea(info, &area);
+ }
+}
+
+void splash_putcs(struct vc_data *vc, struct fb_info *info,
+ const unsigned short *s, int count, int ypos, int xpos)
+{
+ struct splash_data *sd;
+ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ int fgshift = (vc->vc_hi_font_mask) ? 9 : 8;
+ union pt src;
+ union pt dst, splashsrc;
+ unsigned int d, x, y;
+ u32 dd, fgx, bgx;
+ u16 c = scr_readw(s);
+ int fg_color, bg_color, transparent;
+ int n;
+ int octpp = (info->var.bits_per_pixel + 1) >> 3;
+ int drawn_width;
+
+ if (!oops_in_progress
+ && (console_blanked || info->splash_data->splash_dosilent))
+ return;
+ sd = info->splash_data;
+
+ fg_color = attr_fgcol(fgshift, c);
+ bg_color = attr_bgcol(bgshift, c);
+ transparent = sd->imgd->splash_color == bg_color;
+ xpos = xpos * vc->vc_font.width + sd->imgd->splash_text_xo;
+ ypos = ypos * vc->vc_font.height + sd->imgd->splash_text_yo;
+ splashsrc.ub = (u8 *)(sd->pic->splash_pic
+ + ypos * sd->pic->splash_pic_stride
+ + xpos * octpp);
+ dst.ub = (u8 *)(info->screen_base
+ + ypos * info->fix.line_length
+ + xpos * octpp);
+ fgx = ((u32 *)info->pseudo_palette)[fg_color];
+ if (transparent && sd->imgd->splash_color == 15) {
+ if (fgx == 0xffea)
+ fgx = 0xfe4a;
+ else if (fgx == 0x57ea)
+ fgx = 0x0540;
+ else if (fgx == 0xffff)
+ fgx = 0x52aa;
+ }
+ bgx = ((u32 *)info->pseudo_palette)[bg_color];
+ d = 0;
+ drawn_width = 0;
+ while (count--) {
+ c = scr_readw(s++);
+ src.ub = vc->vc_font.data
+ + ((c & charmask)
+ * vc->vc_font.height
+ * ((vc->vc_font.width + 7) >> 3));
+ for (y = 0; y < vc->vc_font.height; y++) {
+ for (x = 0; x < vc->vc_font.width; ) {
+ if ((x & 7) == 0)
+ d = *src.ub++;
+ switch (octpp) {
+ case 2:
+ if (d & 0x80)
+ dd = fgx;
+ else
+ dd = (transparent ?
+ *splashsrc.us : bgx);
+ splashsrc.us += 1;
+ if (d & 0x40)
+ dd |= fgx << 16;
+ else
+ dd |= (transparent ? *splashsrc.us : bgx) << 16;
+ splashsrc.us += 1;
+ d <<= 2;
+ x += 2;
+ fb_writel(dd, dst.ul);
+ dst.ul += 1;
+ break;
+ case 3:
+ for (n = 0; n <= 16; n += 8) {
+ if (d & 0x80)
+ dd = (fgx >> n) & 0xff;
+ else
+ dd = (transparent ? *splashsrc.ul : ((bgx >> n) & 0xff));
+ splashsrc.ub += 1;
+ fb_writeb(dd, dst.ub);
+ dst.ub += 1;
+ }
+ d <<= 1;
+ x += 1;
+ break;
+ case 4:
+ if (d & 0x80)
+ dd = fgx;
+ else
+ dd = (transparent ? *splashsrc.ul : bgx);
+ splashsrc.ul += 1;
+ d <<= 1;
+ x += 1;
+ fb_writel(dd, dst.ul);
+ dst.ul += 1;
+ break;
+ }
+ }
+ dst.ub += info->fix.line_length
+ - vc->vc_font.width * octpp;
+ splashsrc.ub += sd->pic->splash_pic_stride
+ - vc->vc_font.width * octpp;
+ }
+ dst.ub -= info->fix.line_length * vc->vc_font.height
+ - vc->vc_font.width * octpp;
+ splashsrc.ub -= sd->pic->splash_pic_stride * vc->vc_font.height
+ - vc->vc_font.width * octpp;
+ drawn_width += vc->vc_font.width;
+ }
+ splash_sync_region(info, xpos, ypos, drawn_width, vc->vc_font.height);
+}
+
+static void splash_renderc(struct fb_info *info,
+ int fg_color, int bg_color,
+ u8 *src,
+ int ypos, int xpos,
+ int height, int width)
+{
+ struct splash_data *sd;
+ int transparent;
+ u32 dd, fgx, bgx;
+ union pt dst, splashsrc;
+ unsigned int d, x, y;
+ int n;
+ int octpp = (info->var.bits_per_pixel + 1) >> 3;
+
+ if (!oops_in_progress
+ && (console_blanked || info->splash_data->splash_dosilent))
+ return;
+
+ sd = info->splash_data;
+
+ transparent = sd->imgd->splash_color == bg_color;
+ splashsrc.ub = (u8 *)(sd->pic->splash_pic
+ + ypos * sd->pic->splash_pic_stride
+ + xpos * octpp);
+ dst.ub = (u8 *)(info->screen_base
+ + ypos * info->fix.line_length
+ + xpos * octpp);
+ fgx = ((u32 *)info->pseudo_palette)[fg_color];
+ if (transparent && (sd->imgd->splash_color == 15)) {
+ if (fgx == 0xffea)
+ fgx = 0xfe4a;
+ else if (fgx == 0x57ea)
+ fgx = 0x0540;
+ else if (fgx == 0xffff)
+ fgx = 0x52aa;
+ }
+ bgx = ((u32 *)info->pseudo_palette)[bg_color];
+ d = 0;
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; ) {
+ if ((x & 7) == 0)
+ d = *src++;
+ switch (octpp) {
+ case 2:
+ if (d & 0x80)
+ dd = fgx;
+ else
+ dd = (transparent ? *splashsrc.us : bgx);
+ splashsrc.us += 1;
+ if (d & 0x40)
+ dd |= fgx << 16;
+ else
+ dd |= (transparent ? *splashsrc.us : bgx) << 16;
+ splashsrc.us += 1;
+ d <<= 2;
+ x += 2;
+ fb_writel(dd, dst.ul);
+ dst.ul += 1;
+ break;
+ case 3:
+ for (n = 0; n <= 16; n += 8) {
+ if (d & 0x80)
+ dd = (fgx >> n) & 0xff;
+ else
+ dd = (transparent ? *splashsrc.ub : bgx);
+ splashsrc.ub += 1;
+ fb_writeb(dd, dst.ub);
+ dst.ub += 1;
+ }
+ d <<= 1;
+ x += 1;
+ break;
+ case 4:
+ if (d & 0x80)
+ dd = fgx;
+ else
+ dd = (transparent ? *splashsrc.ul : bgx);
+ splashsrc.ul += 1;
+ d <<= 1;
+ x += 1;
+ fb_writel(dd, dst.ul);
+ dst.ul += 1;
+ break;
+ }
+ }
+ dst.ub += info->fix.line_length - width * octpp;
+ splashsrc.ub += sd->pic->splash_pic_stride - width * octpp;
+ }
+ splash_sync_region(info, xpos, ypos, width, height);
+}
+
+void splashcopy(u8 *dst, u8 *src, int height, int width,
+ int dstbytes, int srcbytes, int octpp)
+{
+ int i;
+
+ width *= octpp;
+ while (height-- > 0) {
+ union pt p, q;
+ p.ul = (u32 *)dst;
+ q.ul = (u32 *)src;
+ for (i = 0; i < width / 4; i++)
+ fb_writel(*q.ul++, p.ul++);
+ if (width & 2)
+ fb_writew(*q.us++, p.us++);
+ if (width & 1)
+ fb_writeb(*q.ub, p.ub);
+ dst += dstbytes;
+ src += srcbytes;
+ }
+}
+
+static void splashset(u8 *dst, int height, int width,
+ int dstbytes, u32 bgx, int octpp) {
+ int i;
+
+ width *= octpp;
+ if (octpp == 2)
+ bgx |= bgx << 16;
+ while (height-- > 0) {
+ union pt p;
+ p.ul = (u32 *)dst;
+ if (!(octpp & 1)) {
+ for (i = 0; i < width / 4; i++)
+ fb_writel(bgx, p.ul++);
+ if (width & 2)
+ fb_writew(bgx, p.us++);
+ if (width & 1)
+ fb_writeb(bgx, p.ub);
+ dst += dstbytes;
+ } else { /* slow! */
+ for (i = 0; i < width; i++)
+ fb_writeb((bgx >> ((i % 3) * 8)) & 0xff,
+ p.ub++);
+ }
+ }
+}
+
+static void splashfill(struct fb_info *info, int sy, int sx,
+ int height, int width) {
+ int octpp = (info->var.bits_per_pixel + 1) >> 3;
+ struct splash_data *sd = info->splash_data;
+
+ splashcopy((u8 *)(info->screen_base
+ + sy * info->fix.line_length + sx * octpp),
+ (u8 *)(sd->pic->splash_pic
+ + sy * sd->pic->splash_pic_stride
+ + sx * octpp),
+ height, width, info->fix.line_length,
+ sd->pic->splash_pic_stride,
+ octpp);
+ splash_sync_region(info, sx, sy, width, height);
+}
+
+void splash_clear(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int height, int width)
+{
+ struct splash_data *sd;
+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ int bg_color = attr_bgcol_ec(bgshift, vc, info);
+ int transparent;
+ int octpp = (info->var.bits_per_pixel + 1) >> 3;
+ u32 bgx;
+ u8 *dst;
+
+ if (!oops_in_progress
+ && (console_blanked || info->splash_data->splash_dosilent))
+ return;
+
+ sd = info->splash_data;
+
+ transparent = sd->imgd->splash_color == bg_color;
+
+ sy = sy * vc->vc_font.height + sd->imgd->splash_text_yo;
+ sx = sx * vc->vc_font.width + sd->imgd->splash_text_xo;
+ height *= vc->vc_font.height;
+ width *= vc->vc_font.width;
+ if (transparent) {
+ splashfill(info, sy, sx, height, width);
+ return;
+ }
+ dst = (u8 *)(info->screen_base
+ + sy * info->fix.line_length
+ + sx * octpp);
+ bgx = ((u32 *)info->pseudo_palette)[bg_color];
+ splashset(dst,
+ height, width,
+ info->fix.line_length,
+ bgx,
+ (info->var.bits_per_pixel + 1) >> 3);
+ splash_sync_region(info, sx, sy, width, height);
+}
+
+void splash_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct splash_data *sd;
+ struct fb_copyarea area;
+
+ if (!oops_in_progress
+ && (console_blanked || info->splash_data->splash_dosilent))
+ return;
+
+ sd = info->splash_data;
+
+ area.sx = sx * vc->vc_font.width;
+ area.sy = sy * vc->vc_font.height;
+ area.dx = dx * vc->vc_font.width;
+ area.dy = dy * vc->vc_font.height;
+ area.sx += sd->imgd->splash_text_xo;
+ area.sy += sd->imgd->splash_text_yo;
+ area.dx += sd->imgd->splash_text_xo;
+ area.dy += sd->imgd->splash_text_yo;
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
+void splash_clear_margins(struct vc_data *vc, struct fb_info *info,
+ int bottom_only)
+{
+ struct splash_data *sd;
+ unsigned int tw = vc->vc_cols*vc->vc_font.width;
+ unsigned int th = vc->vc_rows*vc->vc_font.height;
+ SPLASH_DEBUG();
+
+ if (!oops_in_progress
+ && (console_blanked || info->splash_data->splash_dosilent))
+ return;
+
+ sd = info->splash_data;
+
+ if (!bottom_only) {
+ /* top margin */
+ splashfill(info,
+ 0,
+ 0,
+ sd->imgd->splash_text_yo,
+ info->var.xres);
+ /* left margin */
+ splashfill(info,
+ sd->imgd->splash_text_yo,
+ 0,
+ th,
+ sd->imgd->splash_text_xo);
+ /* right margin */
+ splashfill(info,
+ sd->imgd->splash_text_yo,
+ sd->imgd->splash_text_xo + tw,
+ th,
+ info->var.xres - sd->imgd->splash_text_xo - tw);
+ }
+ splashfill(info,
+ sd->imgd->splash_text_yo + th,
+ 0,
+ info->var.yres - sd->imgd->splash_text_yo - th,
+ info->var.xres);
+}
+
+int splash_cursor(struct fb_info *info, struct fb_cursor *cursor)
+{
+ struct splash_data *sd;
+ int i;
+ unsigned int dsize, s_pitch;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return 0;
+
+ sd = info->splash_data;
+
+ s_pitch = (cursor->image.width + 7) >> 3;
+ dsize = s_pitch * cursor->image.height;
+ if (cursor->enable) {
+ switch (cursor->rop) {
+ case ROP_XOR:
+ for (i = 0; i < dsize; i++)
+ info->fb_cursordata[i] = cursor->image.data[i]
+ ^ cursor->mask[i];
+ break;
+ case ROP_COPY:
+ default:
+ for (i = 0; i < dsize; i++)
+ info->fb_cursordata[i] = cursor->image.data[i]
+ & cursor->mask[i];
+ break;
+ }
+ } else if (info->fb_cursordata != cursor->image.data)
+ memcpy(info->fb_cursordata, cursor->image.data, dsize);
+ cursor->image.data = info->fb_cursordata;
+ splash_renderc(info, cursor->image.fg_color, cursor->image.bg_color,
+ (u8 *)info->fb_cursordata,
+ cursor->image.dy + sd->imgd->splash_text_yo,
+ cursor->image.dx + sd->imgd->splash_text_xo,
+ cursor->image.height,
+ cursor->image.width);
+ return 0;
+}
+
+void splash_bmove_redraw(struct vc_data *vc, struct fb_info *info,
+ int y, int sx, int dx, int width)
+{
+ struct splash_data *sd;
+ unsigned short *d = (unsigned short *) (vc->vc_origin
+ + vc->vc_size_row * y
+ + dx * 2);
+ unsigned short *s = d + (dx - sx);
+ unsigned short *start = d;
+ unsigned short *ls = d;
+ unsigned short *le = d + width;
+ unsigned short c;
+ int x = dx;
+ unsigned short attr = 1;
+
+ if (console_blanked || info->splash_data->splash_dosilent)
+ return;
+
+ sd = info->splash_data;
+
+ do {
+ c = scr_readw(d);
+ if (attr != (c & 0xff00)) {
+ attr = c & 0xff00;
+ if (d > start) {
+ splash_putcs(vc, info, start, d - start, y, x);
+ x += d - start;
+ start = d;
+ }
+ }
+ if (s >= ls && s < le && c == scr_readw(s)) {
+ if (d > start) {
+ splash_putcs(vc, info, start, d - start, y, x);
+ x += d - start + 1;
+ start = d + 1;
+ } else {
+ x++;
+ start++;
+ }
+ }
+ s++;
+ d++;
+ } while (d < le);
+ if (d > start)
+ splash_putcs(vc, info, start, d - start, y, x);
+}
+
+void splash_blank(struct vc_data *vc, struct fb_info *info, int blank)
+{
+ SPLASH_DEBUG();
+
+ if (blank) {
+ splashset((u8 *)info->screen_base,
+ info->var.yres, info->var.xres,
+ info->fix.line_length,
+ 0,
+ (info->var.bits_per_pixel + 1) >> 3);
+ splash_sync_region(info, 0, 0, info->var.xres, info->var.yres);
+ } else {
+ /* splash_prepare(vc, info); *//* do we really need this? */
+ splash_clear_margins(vc, info, 0);
+ /* no longer needed, done in fbcon_blank */
+ /* update_screen(vc->vc_num); */
+ }
+}
#include <asm/types.h>
#include "fbcon.h"
+#include <linux/bootsplash.h>
+
+
/*
* Accelerated handlers.
*/
{
struct fb_copyarea area;
+ if (SPLASH_DATA(info)) {
+ splash_bmove(vc, info,
+ sy, sx, dy, dx, height, width);
+ return;
+ }
+
area.sx = sx * vc->vc_font.width;
area.sy = sy * vc->vc_font.height;
area.dx = dx * vc->vc_font.width;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
struct fb_fillrect region;
+ if (SPLASH_DATA(info)) {
+ splash_clear(vc, info,
+ sy, sx, height, width);
+ return;
+ }
+
region.color = attr_bgcol_ec(bgshift, vc, info);
region.dx = sx * vc->vc_font.width;
region.dy = sy * vc->vc_font.height;
image.height = vc->vc_font.height;
image.depth = 1;
+ if (SPLASH_DATA(info)) {
+ splash_putcs(vc, info, s, count, yy, xx);
+ return;
+ }
+
if (attribute) {
buf = kmalloc(cellsize, GFP_KERNEL);
if (!buf)
unsigned int bs = info->var.yres - bh;
struct fb_fillrect region;
+ if (SPLASH_DATA(info)) {
+ splash_clear_margins(vc, info, bottom_only);
+ return;
+ }
+
region.color = attr_bgcol_ec(bgshift, vc, info);
region.rop = ROP_COPY;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
+ if (SPLASH_DATA(info)) {
+ splash_cursor(info, &cursor);
+ ops->cursor_reset = 0;
+ return;
+ }
+
if (info->fbops->fb_cursor)
err = info->fbops->fb_cursor(info, &cursor);
#include <asm/irq.h>
#include "fbcon.h"
+#include <linux/bootsplash.h>
#ifdef FBCONDEBUG
# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
static struct display fb_display[MAX_NR_CONSOLES];
+#ifdef CONFIG_BOOTSPLASH
+signed char con2fb_map[MAX_NR_CONSOLES];
+#else
static signed char con2fb_map[MAX_NR_CONSOLES];
+#endif
static signed char con2fb_map_boot[MAX_NR_CONSOLES];
static int logo_lines;
for (i = first_fb_vc; i <= last_fb_vc; i++)
con2fb_map[i] = info_idx;
+ splash_init();
+
err = take_over_console(&fb_con, first_fb_vc, last_fb_vc,
fbcon_is_default);
new_cols /= vc->vc_font.width;
new_rows /= vc->vc_font.height;
+#ifdef CONFIG_BOOTSPLASH
+ if (vc->vc_splash_data && vc->vc_splash_data->splash_state) {
+ new_cols = vc->vc_splash_data->splash_vc_text_wi
+ / vc->vc_font.width;
+ new_rows = vc->vc_splash_data->splash_vc_text_he
+ / vc->vc_font.height;
+ logo = 0;
+ con_remap_def_color(vc,
+ (vc->vc_splash_data->imgd->splash_color
+ << 4) |
+ vc->vc_splash_data->imgd->splash_fg_color);
+ }
+#endif
+
+
/*
* We must always set the mode. The mode of the previous console
* driver could be in the same resolution but we are using different
fbcon_softback_note(vc, t, count);
if (logo_shown >= 0)
goto redraw_up;
+ if (SPLASH_DATA(info))
+ goto redraw_up;
switch (p->scrollmode) {
case SCROLL_MOVE:
fbcon_redraw_blit(vc, info, p, t, b - t - count,
count = vc->vc_rows;
if (logo_shown >= 0)
goto redraw_down;
+ if (SPLASH_DATA(info))
+ goto redraw_down;
switch (p->scrollmode) {
case SCROLL_MOVE:
fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
}
return;
}
+
+ if (SPLASH_DATA(info) && sy == dy && height == 1) {
+ /*must use slower redraw bmove to keep background pic intact*/
+ splash_bmove_redraw(vc, info, sy, sx, dx, width);
+ return;
+ }
ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
height, width);
}
info = registered_fb[con2fb_map[vc->vc_num]];
ops = info->fbcon_par;
+#ifdef CONFIG_BOOTSPLASH
+ {
+ struct splash_data *prev_sd = vc->vc_splash_data;
+ splash_prepare(vc, info);
+ if (vc->vc_splash_data && vc->vc_splash_data->splash_state &&
+ vc->vc_splash_data != prev_sd) {
+ vc_resize(vc, vc->vc_splash_data->splash_vc_text_wi
+ / vc->vc_font.width,
+ vc->vc_splash_data->splash_vc_text_he
+ / vc->vc_font.height);
+ con_remap_def_color(vc,
+ vc->vc_splash_data->imgd->splash_color << 4
+ | vc->vc_splash_data->imgd->splash_fg_color);
+ }
+ }
+#endif
+
if (softback_top) {
if (softback_lines)
fbcon_set_origin(vc);
{
struct fb_event event;
+ if (SPLASH_DATA(info)) {
+ splash_blank(vc, info, blank);
+ return;
+ }
+
if (blank) {
unsigned short charmask = vc->vc_hi_font_mask ?
0x1ff : 0xff;
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ if (SPLASH_DATA(info)) {
+ cols = TEXT_WIDTH_FROM_SPLASH_DATA(info);
+ rows = TEXT_HIGHT_FROM_SPLASH_DATA(info);
+ }
cols /= w;
rows /= h;
vc_resize(vc, cols, rows);
* low-level frame buffer device
*/
+#ifdef CONFIG_BOOTSPLASH
+struct splash_img_data {
+ int ref_cnt;
+ int splash_color; /* transparent color */
+ int splash_fg_color; /* foreground color */
+ int splash_width; /* width of image */
+ int splash_height; /* height of image */
+ int splash_text_xo; /* text area origin of origin */
+ int splash_text_yo;
+ int splash_text_wi; /* text area size of jpeg*/
+ int splash_text_he;
+ int splash_boxcount;
+ int splash_sboxcount;
+ int splash_overpaintok; /* is it ok to overpaint boxes */
+ unsigned char *splash_boxes;
+ unsigned char *splash_jpeg; /* jpeg */
+ unsigned char *splash_sboxes;
+ unsigned char *splash_silentjpeg;
+ unsigned char *splash_palette; /* palette for 8-bit */
+};
+
+struct splash_pic_data {
+ int ref_cnt;
+ unsigned char *splash_pic;
+ int splash_pic_stride;
+ int splash_pic_size;
+};
+
+struct splash_data {
+ struct splash_data *next;
+ struct splash_img_data *imgd;
+ struct splash_pic_data *pic;
+ int splash_state; /* show splash? */
+ int splash_percent;
+ int splash_dosilent; /* show silent jpeg */
+
+ int splash_vc_text_wi; /* text area size used*/
+ int splash_vc_text_he;
+ int splash_boxes_xoff;
+ int splash_boxes_yoff;
+ int splash_sboxes_xoff;
+ int splash_sboxes_yoff;
+
+ bool color_set;
+ bool need_sync;
+};
+#endif
+
struct display {
/* Filled in by the low-level console driver */
const u_char *fontdata;
unsigned charcount = font->charcount;
int rc;
- if (vga_video_type < VIDEO_TYPE_EGAM)
+ if (vga_video_type < VIDEO_TYPE_EGAM || vga_is_gfx)
return -EINVAL;
if (font->width != VGA_FONTWIDTH ||
static int vgacon_font_get(struct vc_data *c, struct console_font *font)
{
- if (vga_video_type < VIDEO_TYPE_EGAM)
+ if (vga_video_type < VIDEO_TYPE_EGAM || vga_is_gfx)
return -EINVAL;
font->width = VGA_FONTWIDTH;
source "fs/reiserfs/Kconfig"
source "fs/jfs/Kconfig"
+config FS_RICHACL
+ bool
+
source "fs/xfs/Kconfig"
source "fs/gfs2/Kconfig"
source "fs/ocfs2/Kconfig"
obj-$(CONFIG_NFS_COMMON) += nfs_common/
obj-$(CONFIG_GENERIC_ACL) += generic_acl.o
+obj-$(CONFIG_FS_RICHACL) += richacl.o
+richacl-y := richacl_base.o richacl_inode.o richacl_xattr.o
+
obj-$(CONFIG_FHANDLE) += fhandle.o
obj-y += quota/
num_bytes += div64_u64(data_used + meta_used, 50);
if (num_bytes * 3 > meta_used)
- num_bytes = div64_u64(meta_used, 3) * 2;
+ num_bytes = div64_u64(meta_used, 3);
return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
}
If you select Y here, then you will be able to turn on debugging
with a command such as "echo 1 > /sys/kernel/debug/ext4/mballoc-debug"
+
+config EXT4_FS_RICHACL
+ bool "Ext4 Rich Access Control Lists (EXPERIMENTAL)"
+ depends on EXT4_FS_XATTR && EXPERIMENTAL
+ select FS_RICHACL
+ help
+ Rich ACLs are an implementation of NFSv4 ACLs, extended by file masks
+ to fit into the standard POSIX file permission model. They are
+ designed to work seamlessly locally as well as across the NFSv4 and
+ CIFS/SMB2 network file system protocols.
ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
+ext4-$(CONFIG_EXT4_FS_RICHACL) += richacl.o
*/
tid_t i_sync_tid;
tid_t i_datasync_tid;
+#ifdef CONFIG_EXT4_FS_RICHACL
+ struct richacl *i_richacl;
+#endif
+
};
/*
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
+#include "richacl.h"
/*
* Called when an inode is released. Note that this is different
#endif
.get_acl = ext4_get_acl,
.fiemap = ext4_fiemap,
+ .permission = ext4_permission,
+ .may_create = ext4_may_create,
+ .may_delete = ext4_may_delete,
};
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
+#include "richacl.h"
#include <trace/events/ext4.h>
if (err)
goto fail_drop;
- err = ext4_init_acl(handle, inode, dir);
+ if (EXT4_IS_RICHACL(dir))
+ err = ext4_init_richacl(handle, inode, dir);
+ else
+ err = ext4_init_acl(handle, inode, dir);
+
if (err)
goto fail_free_drop;
#include "xattr.h"
#include "acl.h"
#include "truncate.h"
+#include "richacl.h"
#include <trace/events/ext4.h>
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
+#ifdef CONFIG_EXT4_FS_RICHACL
+ ei->i_richacl = EXT4_RICHACL_NOT_CACHED;
+#endif
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
int orphan = 0;
const unsigned int ia_valid = attr->ia_valid;
- error = inode_change_ok(inode, attr);
+ if (EXT4_IS_RICHACL(inode))
+ error = richacl_inode_change_ok(inode, attr,
+ ext4_richacl_permission);
+ else
+ error = inode_change_ok(inode, attr);
if (error)
return error;
if (orphan && inode->i_nlink)
ext4_orphan_del(NULL, inode);
- if (!rc && (ia_valid & ATTR_MODE))
- rc = ext4_acl_chmod(inode);
-
+ if (!rc && (ia_valid & ATTR_MODE)) {
+ if (EXT4_IS_RICHACL(inode))
+ rc = ext4_richacl_chmod(inode);
+ else
+ rc = ext4_acl_chmod(inode);
+ }
err_out:
ext4_std_error(inode->i_sb, error);
if (!error)
#include "xattr.h"
#include "acl.h"
+#include "richacl.h"
#include <trace/events/ext4.h>
/*
#endif
.get_acl = ext4_get_acl,
.fiemap = ext4_fiemap,
+ .permission = ext4_permission,
+ .may_create = ext4_may_create,
+ .may_delete = ext4_may_delete,
};
const struct inode_operations ext4_special_inode_operations = {
.removexattr = generic_removexattr,
#endif
.get_acl = ext4_get_acl,
+ .permission = ext4_permission,
+ .may_create = ext4_may_create,
+ .may_delete = ext4_may_delete,
};
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2010
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/richacl_xattr.h>
+
+#include "ext4.h"
+#include "ext4_jbd2.h"
+#include "xattr.h"
+#include "acl.h"
+#include "richacl.h"
+
+static inline struct richacl *
+ext4_iget_richacl(struct inode *inode)
+{
+ struct richacl *acl = EXT4_RICHACL_NOT_CACHED;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ spin_lock(&inode->i_lock);
+ if (ei->i_richacl != EXT4_RICHACL_NOT_CACHED)
+ acl = richacl_get(ei->i_richacl);
+ spin_unlock(&inode->i_lock);
+
+ return acl;
+}
+
+static inline void
+ext4_iset_richacl(struct inode *inode, struct richacl *acl)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ spin_lock(&inode->i_lock);
+ if (ei->i_richacl != EXT4_RICHACL_NOT_CACHED)
+ richacl_put(ei->i_richacl);
+ ei->i_richacl = richacl_get(acl);
+ spin_unlock(&inode->i_lock);
+}
+
+static struct richacl *
+ext4_get_richacl(struct inode *inode)
+{
+ const int name_index = EXT4_XATTR_INDEX_RICHACL;
+ void *value = NULL;
+ struct richacl *acl;
+ int retval;
+
+ if (!IS_RICHACL(inode))
+ return ERR_PTR(-EOPNOTSUPP);
+ acl = ext4_iget_richacl(inode);
+ if (acl != EXT4_RICHACL_NOT_CACHED)
+ return acl;
+ retval = ext4_xattr_get(inode, name_index, "", NULL, 0);
+ if (retval > 0) {
+ value = kmalloc(retval, GFP_KERNEL);
+ if (!value)
+ return ERR_PTR(-ENOMEM);
+ retval = ext4_xattr_get(inode, name_index, "", value, retval);
+ }
+ if (retval > 0) {
+ acl = richacl_from_xattr(value, retval);
+ if (acl == ERR_PTR(-EINVAL))
+ acl = ERR_PTR(-EIO);
+ } else if (retval == -ENODATA || retval == -ENOSYS)
+ acl = NULL;
+ else
+ acl = ERR_PTR(retval);
+ kfree(value);
+
+ if (!IS_ERR_OR_NULL(acl))
+ ext4_iset_richacl(inode, acl);
+
+ return acl;
+}
+
+static int
+ext4_set_richacl(handle_t *handle, struct inode *inode, struct richacl *acl)
+{
+ const int name_index = EXT4_XATTR_INDEX_RICHACL;
+ size_t size = 0;
+ void *value = NULL;
+ int retval;
+
+ if (acl) {
+ mode_t mode = inode->i_mode;
+ if (richacl_equiv_mode(acl, &mode) == 0) {
+ inode->i_mode = mode;
+ ext4_mark_inode_dirty(handle, inode);
+ acl = NULL;
+ }
+ }
+ if (acl) {
+ size = richacl_xattr_size(acl);
+ value = kmalloc(size, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+ richacl_to_xattr(acl, value);
+ }
+ if (handle)
+ retval = ext4_xattr_set_handle(handle, inode, name_index, "",
+ value, size, 0);
+ else
+ retval = ext4_xattr_set(inode, name_index, "", value, size, 0);
+ kfree(value);
+ if (!retval)
+ ext4_iset_richacl(inode, acl);
+
+ return retval;
+}
+
+int
+ext4_richacl_permission(struct inode *inode, unsigned int mask)
+{
+ struct richacl *acl;
+ int retval;
+
+ if (!IS_RICHACL(inode))
+ BUG();
+
+ acl = ext4_get_richacl(inode);
+ if (acl && IS_ERR(acl))
+ retval = PTR_ERR(acl);
+ else {
+ retval = richacl_inode_permission(inode, acl, mask);
+ richacl_put(acl);
+ }
+
+ return retval;
+}
+
+int ext4_permission(struct inode *inode, int mask)
+{
+ if (IS_RICHACL(inode))
+ return ext4_richacl_permission(inode,
+ richacl_want_to_mask(mask));
+ else
+ return generic_permission(inode, mask);
+}
+
+int ext4_may_create(struct inode *dir, int isdir)
+{
+ return richacl_may_create(dir, isdir, ext4_richacl_permission);
+}
+
+int ext4_may_delete(struct inode *dir, struct inode *inode, int replace)
+{
+ return richacl_may_delete(dir, inode, replace, ext4_richacl_permission);
+}
+
+int
+ext4_init_richacl(handle_t *handle, struct inode *inode, struct inode *dir)
+{
+ struct richacl *dir_acl = NULL;
+
+ if (!S_ISLNK(inode->i_mode)) {
+ dir_acl = ext4_get_richacl(dir);
+ if (IS_ERR(dir_acl))
+ return PTR_ERR(dir_acl);
+ }
+ if (dir_acl) {
+ struct richacl *acl;
+ int retval;
+
+ acl = richacl_inherit(dir_acl, inode);
+ richacl_put(dir_acl);
+
+ retval = PTR_ERR(acl);
+ if (acl && !IS_ERR(acl)) {
+ retval = ext4_set_richacl(handle, inode, acl);
+ richacl_put(acl);
+ }
+ return retval;
+ } else {
+ inode->i_mode &= ~current_umask();
+ return 0;
+ }
+}
+
+int
+ext4_richacl_chmod(struct inode *inode)
+{
+ struct richacl *acl;
+ int retval;
+
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+ acl = ext4_get_richacl(inode);
+ if (IS_ERR_OR_NULL(acl))
+ return PTR_ERR(acl);
+ acl = richacl_chmod(acl, inode->i_mode);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ retval = ext4_set_richacl(NULL, inode, acl);
+ richacl_put(acl);
+
+ return retval;
+}
+
+static size_t
+ext4_xattr_list_richacl(struct dentry *dentry, char *list, size_t list_len,
+ const char *name, size_t name_len, int type)
+{
+ const size_t size = sizeof(RICHACL_XATTR);
+ if (!IS_RICHACL(dentry->d_inode))
+ return 0;
+ if (list && size <= list_len)
+ memcpy(list, RICHACL_XATTR, size);
+ return size;
+}
+
+static int
+ext4_xattr_get_richacl(struct dentry *dentry, const char *name, void *buffer,
+ size_t buffer_size, int type)
+{
+ struct richacl *acl;
+ size_t size;
+
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+ acl = ext4_get_richacl(dentry->d_inode);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl == NULL)
+ return -ENODATA;
+ size = richacl_xattr_size(acl);
+ if (buffer) {
+ if (size > buffer_size)
+ return -ERANGE;
+ richacl_to_xattr(acl, buffer);
+ }
+ richacl_put(acl);
+
+ return size;
+}
+
+static int
+ext4_xattr_set_richacl(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
+{
+ handle_t *handle;
+ struct richacl *acl = NULL;
+ int retval, retries = 0;
+ struct inode *inode = dentry->d_inode;
+
+ if (!IS_RICHACL(dentry->d_inode))
+ return -EOPNOTSUPP;
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+ if (current_fsuid() != inode->i_uid &&
+ ext4_richacl_permission(inode, ACE4_WRITE_ACL) &&
+ !capable(CAP_FOWNER))
+ return -EPERM;
+ if (value) {
+ acl = richacl_from_xattr(value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+
+ inode->i_mode &= ~S_IRWXUGO;
+ inode->i_mode |= richacl_masks_to_mode(acl);
+ }
+
+retry:
+ handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ ext4_mark_inode_dirty(handle, inode);
+ retval = ext4_set_richacl(handle, inode, acl);
+ ext4_journal_stop(handle);
+ if (retval == ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+ richacl_put(acl);
+ return retval;
+}
+
+const struct xattr_handler ext4_richacl_xattr_handler = {
+ .prefix = RICHACL_XATTR,
+ .list = ext4_xattr_list_richacl,
+ .get = ext4_xattr_get_richacl,
+ .set = ext4_xattr_set_richacl,
+};
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2010
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#ifndef __FS_EXT4_RICHACL_H
+#define __FS_EXT4_RICHACL_H
+
+#include <linux/richacl.h>
+
+#ifdef CONFIG_EXT4_FS_RICHACL
+
+# define EXT4_IS_RICHACL(inode) IS_RICHACL(inode)
+
+/* Value for i_richacl if RICHACL has not been cached */
+# define EXT4_RICHACL_NOT_CACHED ((void *)-1)
+
+extern int ext4_permission(struct inode *, int);
+extern int ext4_richacl_permission(struct inode *, unsigned int);
+extern int ext4_may_create(struct inode *, int);
+extern int ext4_may_delete(struct inode *, struct inode *, int);
+extern int ext4_init_richacl(handle_t *, struct inode *, struct inode *);
+extern int ext4_richacl_chmod(struct inode *);
+
+#else /* CONFIG_FS_EXT4_RICHACL */
+
+# define EXT4_IS_RICHACL(inode) (0)
+
+# define ext4_permission NULL
+# define ext4_may_create NULL
+# define ext4_may_delete NULL
+# define ext4_richacl_permission NULL
+
+static inline int
+ext4_init_richacl(handle_t *handle, struct inode *inode, struct inode *dir)
+{
+ return 0;
+}
+
+static inline int
+ext4_richacl_chmod(struct inode *inode)
+{
+ return 0;
+}
+
+#endif /* CONFIG_FS_EXT4_RICHACL */
+#endif /* __FS_EXT4_RICHACL_H */
#include "xattr.h"
#include "acl.h"
#include "mballoc.h"
+#include "richacl.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>
ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
-
+#ifdef CONFIG_EXT4_FS_RICHACL
+ ei->i_richacl = EXT4_RICHACL_NOT_CACHED;
+#endif
ei->vfs_inode.i_version = 1;
ei->vfs_inode.i_data.writeback_index = 0;
memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
invalidate_inode_buffers(inode);
end_writeback(inode);
dquot_drop(inode);
+#ifdef CONFIG_EXT4_FS_RICHACL
+ if (EXT4_I(inode)->i_richacl &&
+ EXT4_I(inode)->i_richacl != EXT4_RICHACL_NOT_CACHED) {
+ richacl_put(EXT4_I(inode)->i_richacl);
+ EXT4_I(inode)->i_richacl = EXT4_RICHACL_NOT_CACHED;
+ }
+#endif
ext4_discard_preallocations(inode);
if (EXT4_I(inode)->jinode) {
jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
Opt_nouid32, Opt_debug, Opt_removed,
- Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
+ Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_richacl, Opt_noacl,
Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
Opt_journal_dev, Opt_journal_checksum, Opt_journal_async_commit,
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
{Opt_acl, "acl"},
+ {Opt_richacl, "richacl"},
{Opt_noacl, "noacl"},
{Opt_noload, "norecovery"},
{Opt_noload, "noload"},
case Opt_nouser_xattr:
ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
break;
+ case Opt_richacl:
+ sb->s_flags |= MS_RICHACL;
+ return 1;
case Opt_sb:
return 1; /* handled by get_sb_block() */
case Opt_removed:
}
}
#endif
+#if defined(CONFIG_EXT4_FS_RICHACL) && defined(CONFIG_EXT4_FS_POSIX_ACL)
+ if (test_opt(sb, POSIX_ACL) && (sb->s_flags & MS_RICHACL))
+ clear_opt(sb, POSIX_ACL);
+#endif
return 1;
}
(sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
+ if (sb->s_flags & MS_RICHACL)
+ SEQ_OPTS_PUTS("richacl");
+
ext4_show_quota_options(seq, sb);
return 0;
}
int err;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
ext4_group_t first_not_zeroed;
+ unsigned long acl_flags = 0;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
#ifdef CONFIG_EXT4_FS_XATTR
set_opt(sb, XATTR_USER);
#endif
-#ifdef CONFIG_EXT4_FS_POSIX_ACL
+#if defined(CONFIG_EXT4_FS_POSIX_ACL)
set_opt(sb, POSIX_ACL);
#endif
set_opt(sb, MBLK_IO_SUBMIT);
}
}
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+ if (sb->s_flags & MS_RICHACL)
+ acl_flags = MS_RICHACL;
+ else if (test_opt(sb, POSIX_ACL))
+ acl_flags = MS_POSIXACL;
+
+ sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_RICHACL)) | acl_flags;
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
(EXT4_HAS_COMPAT_FEATURE(sb, ~0U) ||
ext4_group_t g;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
int err = 0;
+ unsigned long acl_flags = 0;
#ifdef CONFIG_QUOTA
int i;
#endif
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ext4_abort(sb, "Abort forced by user");
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+ if (sb->s_flags & MS_RICHACL)
+ acl_flags = MS_RICHACL;
+ else if (test_opt(sb, POSIX_ACL))
+ acl_flags = MS_POSIXACL;
+
+ sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_RICHACL)) | acl_flags;
es = sbi->s_es;
#ifdef CONFIG_EXT4_FS_SECURITY
[EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler,
#endif
+#ifdef CONFIG_EXT4_FS_RICHACL
+ [EXT4_XATTR_INDEX_RICHACL] = &ext4_richacl_xattr_handler,
+#endif
};
const struct xattr_handler *ext4_xattr_handlers[] = {
#ifdef CONFIG_EXT4_FS_SECURITY
&ext4_xattr_security_handler,
#endif
+#ifdef CONFIG_EXT4_FS_RICHACL
+ &ext4_richacl_xattr_handler,
+#endif
NULL
};
#define EXT4_XATTR_INDEX_TRUSTED 4
#define EXT4_XATTR_INDEX_LUSTRE 5
#define EXT4_XATTR_INDEX_SECURITY 6
+#define EXT4_XATTR_INDEX_RICHACL 7
struct ext4_xattr_header {
__le32 h_magic; /* magic number for identification */
extern const struct xattr_handler ext4_xattr_acl_access_handler;
extern const struct xattr_handler ext4_xattr_acl_default_handler;
extern const struct xattr_handler ext4_xattr_security_handler;
+extern const struct xattr_handler ext4_xattr_acl_access_handler;
+extern const struct xattr_handler ext4_xattr_acl_default_handler;
+extern const struct xattr_handler ext4_xattr_security_handler;
+extern const struct xattr_handler ext4_richacl_xattr_handler;
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, num);
spin_unlock(&tree->hash_lock);
- BUG_ON(node);
+ if (node) {
+ printk(KERN_CRIT "new node %u already hashed?\n", num);
+ WARN_ON(1);
+ return node;
+ }
node = __hfs_bnode_create(tree, num);
if (!node)
return ERR_PTR(-ENOMEM);
}
/*
+ * Do the directory specific tests of inode_permission() and call the
+ * may_delete inode operation. The may_delete inode operation must do the
+ * sticky check when needed.
+ */
+static int may_delete_iop(struct inode *dir, struct inode *inode, int replace)
+{
+ int error;
+
+ if (IS_RDONLY(dir))
+ return -EROFS;
+ if (IS_IMMUTABLE(dir))
+ return -EACCES;
+ error = dir->i_op->may_delete(dir, inode, replace);
+ if (!error)
+ error = security_inode_permission(dir, MAY_WRITE | MAY_EXEC);
+
+ return error;
+}
+
+/*
* Check whether we can remove a link victim from directory dir, check
* whether the type of victim is right.
* 1. We can't do it if dir is read-only (done in permission())
* 10. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
-static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
+static int may_delete(struct inode *dir, struct dentry *victim,
+ int isdir, int replace)
{
int error;
BUG_ON(victim->d_parent->d_inode != dir);
audit_inode_child(victim, dir);
-
- error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ if (dir->i_op->may_delete)
+ error = may_delete_iop(dir, victim->d_inode, replace);
+ else {
+ error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ if (!error && check_sticky(dir, victim->d_inode))
+ error = -EPERM;
+ }
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
- if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
- IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
+ if (IS_APPEND(victim->d_inode) || IS_IMMUTABLE(victim->d_inode) ||
+ IS_SWAPFILE(victim->d_inode))
return -EPERM;
if (isdir) {
if (!S_ISDIR(victim->d_inode->i_mode))
return 0;
}
+/*
+ * Do the directory specific tests of inode_permission() and call the
+ * may_create inode operation.
+ */
+static int may_create_iop(struct inode *dir, int isdir)
+{
+ int error;
+
+ if (IS_RDONLY(dir))
+ return -EROFS;
+ if (IS_IMMUTABLE(dir))
+ return -EACCES;
+ error = dir->i_op->may_create(dir, isdir);
+ if (!error)
+ error = security_inode_permission(dir, MAY_WRITE | MAY_EXEC);
+
+ return error;
+}
+
/* Check whether we can create an object with dentry child in directory
* dir.
* 1. We can't do it if child already exists (open has special treatment for
* 3. We should have write and exec permissions on dir
* 4. We can't do it if dir is immutable (done in permission())
*/
-static inline int may_create(struct inode *dir, struct dentry *child)
+static inline int may_create(struct inode *dir, struct dentry *child, int isdir)
{
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
- return inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ if (dir->i_op->may_create)
+ return may_create_iop(dir, isdir);
+ else
+ return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
/*
int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
- int error = may_create(dir, dentry);
+ int error = may_create(dir, dentry, 0);
if (error)
return error;
/* Negative dentry, just create the file */
if (!dentry->d_inode) {
umode_t mode = op->mode;
- if (!IS_POSIXACL(dir->d_inode))
+ if (!IS_ACL(dir->d_inode))
mode &= ~current_umask();
/*
* This write is needed to ensure that a
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
- int error = may_create(dir, dentry);
+ int error = may_create(dir, dentry, 0);
if (error)
return error;
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!IS_POSIXACL(path.dentry->d_inode))
+ if (!IS_ACL(path.dentry->d_inode))
mode &= ~current_umask();
error = may_mknod(mode);
if (error)
int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
- int error = may_create(dir, dentry);
+ int error = may_create(dir, dentry, 1);
unsigned max_links = dir->i_sb->s_max_links;
if (error)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!IS_POSIXACL(path.dentry->d_inode))
+ if (!IS_ACL(path.dentry->d_inode))
mode &= ~current_umask();
error = mnt_want_write(path.mnt);
if (error)
int vfs_rmdir(struct inode *dir, struct dentry *dentry)
{
- int error = may_delete(dir, dentry, 1);
+ int error = may_delete(dir, dentry, 1, 0);
if (error)
return error;
int vfs_unlink(struct inode *dir, struct dentry *dentry)
{
- int error = may_delete(dir, dentry, 0);
+ int error = may_delete(dir, dentry, 0, 0);
if (error)
return error;
int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
{
- int error = may_create(dir, dentry);
+ int error = may_create(dir, dentry, 0);
if (error)
return error;
if (!inode)
return -ENOENT;
- error = may_create(dir, new_dentry);
+ error = may_create(dir, new_dentry, S_ISDIR(inode->i_mode));
if (error)
return error;
if (old_dentry->d_inode == new_dentry->d_inode)
return 0;
- error = may_delete(old_dir, old_dentry, is_dir);
+ error = may_delete(old_dir, old_dentry, is_dir, 0);
if (error)
return error;
if (!new_dentry->d_inode)
- error = may_create(new_dir, new_dentry);
+ error = may_create(new_dir, new_dentry, is_dir);
else
- error = may_delete(new_dir, new_dentry, is_dir);
+ error = may_delete(new_dir, new_dentry, is_dir, 1);
if (error)
return error;
desc->dir_cookie = &dir_ctx->dir_cookie;
desc->decode = NFS_PROTO(inode)->decode_dirent;
desc->plus = NFS_USE_READDIRPLUS(inode);
+ if (filp->f_pos > 0 && !test_bit(NFS_INO_SEEN_GETATTR, &NFS_I(inode)->flags))
+ desc->plus = 0;
+ clear_bit(NFS_INO_SEEN_GETATTR, &NFS_I(inode)->flags);
nfs_block_sillyrename(dentry);
res = nfs_revalidate_mapping(inode, filp->f_mapping);
struct inode *inode = dentry->d_inode;
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
int err;
+ struct dentry *p;
+ struct inode *pi;
+
+ rcu_read_lock();
+ p = dentry->d_parent;
+ pi = rcu_dereference(p)->d_inode;
+ if (pi && !test_bit(NFS_INO_SEEN_GETATTR, &NFS_I(pi)->flags))
+ set_bit(NFS_INO_SEEN_GETATTR, &NFS_I(pi)->flags);
+ rcu_read_unlock();
/* Flush out writes to the server in order to update c/mtime. */
if (S_ISREG(inode->i_mode)) {
--- /dev/null
+/*
+ * Copyright (C) 2006, 2010 Novell, Inc.
+ * Written by Andreas Gruenbacher <agruen@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/richacl.h>
+
+MODULE_LICENSE("GPL");
+
+/*
+ * Special e_who identifiers: ACEs which have ACE4_SPECIAL_WHO set in
+ * ace->e_flags use these constants in ace->u.e_who.
+ *
+ * For efficiency, we compare pointers instead of comparing strings.
+ */
+const char richace_owner_who[] = "OWNER@";
+EXPORT_SYMBOL_GPL(richace_owner_who);
+const char richace_group_who[] = "GROUP@";
+EXPORT_SYMBOL_GPL(richace_group_who);
+const char richace_everyone_who[] = "EVERYONE@";
+EXPORT_SYMBOL_GPL(richace_everyone_who);
+
+/**
+ * richacl_alloc - allocate a richacl
+ * @count: number of entries
+ */
+struct richacl *
+richacl_alloc(int count)
+{
+ size_t size = sizeof(struct richacl) + count * sizeof(struct richace);
+ struct richacl *acl = kzalloc(size, GFP_KERNEL);
+
+ if (acl) {
+ atomic_set(&acl->a_refcount, 1);
+ acl->a_count = count;
+ }
+ return acl;
+}
+EXPORT_SYMBOL_GPL(richacl_alloc);
+
+/**
+ * richacl_clone - create a copy of a richacl
+ */
+static struct richacl *
+richacl_clone(const struct richacl *acl)
+{
+ int count = acl->a_count;
+ size_t size = sizeof(struct richacl) + count * sizeof(struct richace);
+ struct richacl *dup = kmalloc(size, GFP_KERNEL);
+
+ if (dup) {
+ memcpy(dup, acl, size);
+ atomic_set(&dup->a_refcount, 1);
+ }
+ return dup;
+}
+
+/**
+ * richacl_mask_to_mode - compute the file permission bits which correspond to @mask
+ * @mask: %ACE4_* permission mask
+ *
+ * See richacl_masks_to_mode().
+ */
+static int
+richacl_mask_to_mode(unsigned int mask)
+{
+ int mode = 0;
+
+ if (mask & ACE4_POSIX_MODE_READ)
+ mode |= MAY_READ;
+ if (mask & ACE4_POSIX_MODE_WRITE)
+ mode |= MAY_WRITE;
+ if (mask & ACE4_POSIX_MODE_EXEC)
+ mode |= MAY_EXEC;
+
+ return mode;
+}
+
+/**
+ * richacl_masks_to_mode - compute the file permission bits from the file masks
+ *
+ * When setting a richacl, we set the file permission bits to indicate maximum
+ * permissions: for example, we set the Write permission when a mask contains
+ * ACE4_APPEND_DATA even if it does not also contain ACE4_WRITE_DATA.
+ *
+ * Permissions which are not in ACE4_POSIX_MODE_READ, ACE4_POSIX_MODE_WRITE, or
+ * ACE4_POSIX_MODE_EXEC cannot be represented in the file permission bits.
+ * Such permissions can still be effective, but not for new files or after a
+ * chmod(), and only if they were set explicitly, for example, by setting a
+ * richacl.
+ */
+int
+richacl_masks_to_mode(const struct richacl *acl)
+{
+ return richacl_mask_to_mode(acl->a_owner_mask) << 6 |
+ richacl_mask_to_mode(acl->a_group_mask) << 3 |
+ richacl_mask_to_mode(acl->a_other_mask);
+}
+EXPORT_SYMBOL_GPL(richacl_masks_to_mode);
+
+/**
+ * richacl_mode_to_mask - compute a file mask from the lowest three mode bits
+ *
+ * When the file permission bits of a file are set with chmod(), this specifies
+ * the maximum permissions that processes will get. All permissions beyond
+ * that will be removed from the file masks, and become ineffective.
+ *
+ * We also add in the permissions which are always allowed no matter what the
+ * acl says.
+ */
+unsigned int
+richacl_mode_to_mask(mode_t mode)
+{
+ unsigned int mask = ACE4_POSIX_ALWAYS_ALLOWED;
+
+ if (mode & MAY_READ)
+ mask |= ACE4_POSIX_MODE_READ;
+ if (mode & MAY_WRITE)
+ mask |= ACE4_POSIX_MODE_WRITE;
+ if (mode & MAY_EXEC)
+ mask |= ACE4_POSIX_MODE_EXEC;
+
+ return mask;
+}
+
+/**
+ * richacl_want_to_mask - convert the iop->permission want argument to a mask
+ * @want: @want argument of the permission inode operation
+ *
+ * When checking for append, @want is (MAY_WRITE | MAY_APPEND).
+ *
+ * Richacls use the iop->may_create and iop->may_delete hooks which are
+ * used for checking if creating and deleting files is allowed. These hooks do
+ * not use richacl_want_to_mask(), so we do not have to deal with mapping
+ * MAY_WRITE to ACE4_ADD_FILE, ACE4_ADD_SUBDIRECTORY, and ACE4_DELETE_CHILD
+ * here.
+ */
+unsigned int
+richacl_want_to_mask(int want)
+{
+ unsigned int mask = 0;
+
+ if (want & MAY_READ)
+ mask |= ACE4_READ_DATA;
+ if (want & MAY_APPEND)
+ mask |= ACE4_APPEND_DATA;
+ else if (want & MAY_WRITE)
+ mask |= ACE4_WRITE_DATA;
+ if (want & MAY_EXEC)
+ mask |= ACE4_EXECUTE;
+
+ return mask;
+}
+EXPORT_SYMBOL_GPL(richacl_want_to_mask);
+
+/**
+ * richace_is_same_identifier - are both identifiers the same?
+ */
+int
+richace_is_same_identifier(const struct richace *a, const struct richace *b)
+{
+#define WHO_FLAGS (ACE4_SPECIAL_WHO | ACE4_IDENTIFIER_GROUP)
+ if ((a->e_flags & WHO_FLAGS) != (b->e_flags & WHO_FLAGS))
+ return 0;
+ if (a->e_flags & ACE4_SPECIAL_WHO)
+ return a->u.e_who == b->u.e_who;
+ else
+ return a->u.e_id == b->u.e_id;
+#undef WHO_FLAGS
+}
+
+/**
+ * richacl_set_who - set a special who value
+ * @ace: acl entry
+ * @who: who value to use
+ */
+int
+richace_set_who(struct richace *ace, const char *who)
+{
+ if (!strcmp(who, richace_owner_who))
+ who = richace_owner_who;
+ else if (!strcmp(who, richace_group_who))
+ who = richace_group_who;
+ else if (!strcmp(who, richace_everyone_who))
+ who = richace_everyone_who;
+ else
+ return -EINVAL;
+
+ ace->u.e_who = who;
+ ace->e_flags |= ACE4_SPECIAL_WHO;
+ ace->e_flags &= ~ACE4_IDENTIFIER_GROUP;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(richace_set_who);
+
+/**
+ * richacl_allowed_to_who - mask flags allowed to a specific who value
+ *
+ * Computes the mask values allowed to a specific who value, taking
+ * EVERYONE@ entries into account.
+ */
+static unsigned int richacl_allowed_to_who(struct richacl *acl,
+ struct richace *who)
+{
+ struct richace *ace;
+ unsigned int allowed = 0;
+
+ richacl_for_each_entry_reverse(ace, acl) {
+ if (richace_is_inherit_only(ace))
+ continue;
+ if (richace_is_same_identifier(ace, who) ||
+ richace_is_everyone(ace)) {
+ if (richace_is_allow(ace))
+ allowed |= ace->e_mask;
+ else if (richace_is_deny(ace))
+ allowed &= ~ace->e_mask;
+ }
+ }
+ return allowed;
+}
+
+/**
+ * richacl_group_class_allowed - maximum permissions the group class is allowed
+ *
+ * See richacl_compute_max_masks().
+ */
+static unsigned int richacl_group_class_allowed(struct richacl *acl)
+{
+ struct richace *ace;
+ unsigned int everyone_allowed = 0, group_class_allowed = 0;
+ int had_group_ace = 0;
+
+ richacl_for_each_entry_reverse(ace, acl) {
+ if (richace_is_inherit_only(ace) ||
+ richace_is_owner(ace))
+ continue;
+
+ if (richace_is_everyone(ace)) {
+ if (richace_is_allow(ace))
+ everyone_allowed |= ace->e_mask;
+ else if (richace_is_deny(ace))
+ everyone_allowed &= ~ace->e_mask;
+ } else {
+ group_class_allowed |=
+ richacl_allowed_to_who(acl, ace);
+
+ if (richace_is_group(ace))
+ had_group_ace = 1;
+ }
+ }
+ if (!had_group_ace)
+ group_class_allowed |= everyone_allowed;
+ return group_class_allowed;
+}
+
+/**
+ * richacl_compute_max_masks - compute upper bound masks
+ *
+ * Computes upper bound owner, group, and other masks so that none of
+ * the mask flags allowed by the acl are disabled (for any choice of the
+ * file owner or group membership).
+ */
+void richacl_compute_max_masks(struct richacl *acl)
+{
+ unsigned int gmask = ~0;
+ struct richace *ace;
+
+ /*
+ * @gmask contains all permissions which the group class is ever
+ * allowed. We use it to avoid adding permissions to the group mask
+ * from everyone@ allow aces which the group class is always denied
+ * through other aces. For example, the following acl would otherwise
+ * result in a group mask or rw:
+ *
+ * group@:w::deny
+ * everyone@:rw::allow
+ *
+ * Avoid computing @gmask for acls which do not include any group class
+ * deny aces: in such acls, the group class is never denied any
+ * permissions from everyone@ allow aces.
+ */
+
+restart:
+ acl->a_owner_mask = 0;
+ acl->a_group_mask = 0;
+ acl->a_other_mask = 0;
+
+ richacl_for_each_entry_reverse(ace, acl) {
+ if (richace_is_inherit_only(ace))
+ continue;
+
+ if (richace_is_owner(ace)) {
+ if (richace_is_allow(ace))
+ acl->a_owner_mask |= ace->e_mask;
+ else if (richace_is_deny(ace))
+ acl->a_owner_mask &= ~ace->e_mask;
+ } else if (richace_is_everyone(ace)) {
+ if (richace_is_allow(ace)) {
+ acl->a_owner_mask |= ace->e_mask;
+ acl->a_group_mask |= ace->e_mask & gmask;
+ acl->a_other_mask |= ace->e_mask;
+ } else if (richace_is_deny(ace)) {
+ acl->a_owner_mask &= ~ace->e_mask;
+ acl->a_group_mask &= ~ace->e_mask;
+ acl->a_other_mask &= ~ace->e_mask;
+ }
+ } else {
+ if (richace_is_allow(ace)) {
+ acl->a_owner_mask |= ace->e_mask & gmask;
+ acl->a_group_mask |= ace->e_mask & gmask;
+ } else if (richace_is_deny(ace) && gmask == ~0) {
+ gmask = richacl_group_class_allowed(acl);
+ if (likely(gmask != ~0)) /* should always be true */
+ goto restart;
+ }
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(richacl_compute_max_masks);
+
+/**
+ * richacl_chmod - update the file masks to reflect the new mode
+ * @mode: new file permission bits
+ *
+ * Return a copy of @acl where the file masks have been replaced by the file
+ * masks corresponding to the file permission bits in @mode, or returns @acl
+ * itself if the file masks are already up to date. Takes over a reference
+ * to @acl.
+ */
+struct richacl *
+richacl_chmod(struct richacl *acl, mode_t mode)
+{
+ unsigned int owner_mask, group_mask, other_mask;
+ struct richacl *clone;
+
+ owner_mask = richacl_mode_to_mask(mode >> 6);
+ group_mask = richacl_mode_to_mask(mode >> 3);
+ other_mask = richacl_mode_to_mask(mode);
+
+ if (acl->a_owner_mask == owner_mask &&
+ acl->a_group_mask == group_mask &&
+ acl->a_other_mask == other_mask &&
+ (!richacl_is_auto_inherit(acl) || richacl_is_protected(acl)))
+ return acl;
+
+ clone = richacl_clone(acl);
+ richacl_put(acl);
+ if (!clone)
+ return ERR_PTR(-ENOMEM);
+
+ clone->a_owner_mask = owner_mask;
+ clone->a_group_mask = group_mask;
+ clone->a_other_mask = other_mask;
+ if (richacl_is_auto_inherit(clone))
+ clone->a_flags |= ACL4_PROTECTED;
+
+ return clone;
+}
+EXPORT_SYMBOL_GPL(richacl_chmod);
+
+/**
+ * richacl_permission - richacl permission check algorithm
+ * @inode: inode to check
+ * @acl: rich acl of the inode
+ * @mask: requested access (ACE4_* bitmask)
+ *
+ * Checks if the current process is granted @mask flags in @acl.
+ */
+int
+richacl_permission(struct inode *inode, const struct richacl *acl,
+ unsigned int mask)
+{
+ const struct richace *ace;
+ unsigned int file_mask, requested = mask, denied = 0;
+ int in_owning_group = in_group_p(inode->i_gid);
+ int in_owner_or_group_class = in_owning_group;
+
+ /*
+ * A process is
+ * - in the owner file class if it owns the file,
+ * - in the group file class if it is in the file's owning group or
+ * it matches any of the user or group entries, and
+ * - in the other file class otherwise.
+ */
+
+ /*
+ * Check if the acl grants the requested access and determine which
+ * file class the process is in.
+ */
+ richacl_for_each_entry(ace, acl) {
+ unsigned int ace_mask = ace->e_mask;
+
+ if (richace_is_inherit_only(ace))
+ continue;
+ if (richace_is_owner(ace)) {
+ if (current_fsuid() != inode->i_uid)
+ continue;
+ goto is_owner;
+ } else if (richace_is_group(ace)) {
+ if (!in_owning_group)
+ continue;
+ } else if (richace_is_unix_id(ace)) {
+ if (ace->e_flags & ACE4_IDENTIFIER_GROUP) {
+ if (!in_group_p(ace->u.e_id))
+ continue;
+ } else {
+ if (current_fsuid() != ace->u.e_id)
+ continue;
+ }
+ } else
+ goto is_everyone;
+
+ /*
+ * Apply the group file mask to entries other than OWNER@ and
+ * EVERYONE@. This is not required for correct access checking
+ * but ensures that we grant the same permissions as the acl
+ * computed by richacl_apply_masks() would grant. See
+ * richacl_apply_masks() for a more detailed explanation.
+ */
+ if (richace_is_allow(ace))
+ ace_mask &= acl->a_group_mask;
+
+is_owner:
+ /* The process is in the owner or group file class. */
+ in_owner_or_group_class = 1;
+
+is_everyone:
+ /* Check which mask flags the ACE allows or denies. */
+ if (richace_is_deny(ace))
+ denied |= ace_mask & mask;
+ mask &= ~ace_mask;
+
+ /*
+ * Keep going until we know which file class
+ * the process is in.
+ */
+ if (!mask && in_owner_or_group_class)
+ break;
+ }
+ denied |= mask;
+
+ /*
+ * The file class a process is in determines which file mask applies.
+ * Check if that file mask also grants the requested access.
+ */
+ if (current_fsuid() == inode->i_uid)
+ file_mask = acl->a_owner_mask;
+ else if (in_owner_or_group_class)
+ file_mask = acl->a_group_mask;
+ else
+ file_mask = acl->a_other_mask;
+ denied |= requested & ~file_mask;
+
+ return denied ? -EACCES : 0;
+}
+EXPORT_SYMBOL_GPL(richacl_permission);
+
+/**
+ * richacl_inherit - compute the inherited acl of a new file
+ * @dir_acl: acl of the containing direcory
+ * @inode: inode of the new file (create mode in i_mode)
+ *
+ * A directory can have acl entries which files and/or directories created
+ * inside the directory will inherit. This function computes the acl for such
+ * a new file. If there is no inheritable acl, it will return %NULL.
+ *
+ * The file permission bits in inode->i_mode must be set to the create mode.
+ * If there is an inheritable acl, the maximum permissions that the acl grants
+ * will be computed and permissions not granted by the acl will be removed from
+ * inode->i_mode. If there is no inheritable acl, the umask will be applied
+ * instead.
+ */
+struct richacl *
+richacl_inherit(const struct richacl *dir_acl, struct inode *inode)
+{
+ const struct richace *dir_ace;
+ struct richacl *acl = NULL;
+ struct richace *ace;
+ int count = 0;
+ mode_t mask = ~current_umask();
+
+ if (S_ISDIR(inode->i_mode)) {
+ richacl_for_each_entry(dir_ace, dir_acl) {
+ if (!richace_is_inheritable(dir_ace))
+ continue;
+ count++;
+ }
+ if (!count)
+ goto mask;
+ acl = richacl_alloc(count);
+ if (!acl)
+ return ERR_PTR(-ENOMEM);
+ ace = acl->a_entries;
+ richacl_for_each_entry(dir_ace, dir_acl) {
+ if (!richace_is_inheritable(dir_ace))
+ continue;
+ memcpy(ace, dir_ace, sizeof(struct richace));
+ if (dir_ace->e_flags & ACE4_NO_PROPAGATE_INHERIT_ACE)
+ richace_clear_inheritance_flags(ace);
+ if ((dir_ace->e_flags & ACE4_FILE_INHERIT_ACE) &&
+ !(dir_ace->e_flags & ACE4_DIRECTORY_INHERIT_ACE))
+ ace->e_flags |= ACE4_INHERIT_ONLY_ACE;
+ ace++;
+ }
+ } else {
+ richacl_for_each_entry(dir_ace, dir_acl) {
+ if (!(dir_ace->e_flags & ACE4_FILE_INHERIT_ACE))
+ continue;
+ count++;
+ }
+ if (!count)
+ goto mask;
+ acl = richacl_alloc(count);
+ if (!acl)
+ return ERR_PTR(-ENOMEM);
+ ace = acl->a_entries;
+ richacl_for_each_entry(dir_ace, dir_acl) {
+ if (!(dir_ace->e_flags & ACE4_FILE_INHERIT_ACE))
+ continue;
+ memcpy(ace, dir_ace, sizeof(struct richace));
+ richace_clear_inheritance_flags(ace);
+ /*
+ * ACE4_DELETE_CHILD is meaningless for
+ * non-directories, so clear it.
+ */
+ ace->e_mask &= ~ACE4_DELETE_CHILD;
+ ace++;
+ }
+ }
+
+ richacl_compute_max_masks(acl);
+
+ /*
+ * Ensure that the acl will not grant any permissions beyond the create
+ * mode.
+ */
+ acl->a_owner_mask &= richacl_mode_to_mask(inode->i_mode >> 6);
+ acl->a_group_mask &= richacl_mode_to_mask(inode->i_mode >> 3);
+ acl->a_other_mask &= richacl_mode_to_mask(inode->i_mode);
+ mask = ~S_IRWXUGO | richacl_masks_to_mode(acl);
+
+ if (richacl_is_auto_inherit(dir_acl)) {
+ /*
+ * We need to set ACL4_PROTECTED because we are
+ * doing an implicit chmod
+ */
+ acl->a_flags = ACL4_AUTO_INHERIT | ACL4_PROTECTED;
+ richacl_for_each_entry(ace, acl)
+ ace->e_flags |= ACE4_INHERITED_ACE;
+ }
+
+mask:
+ inode->i_mode &= mask;
+ return acl;
+}
+EXPORT_SYMBOL_GPL(richacl_inherit);
+
+/**
+ * richacl_equiv_mode - check if @acl is equivalent to file permission bits
+ * @mode_p: the file mode (including the file type)
+ *
+ * If @acl can be fully represented by file permission bits, this function
+ * returns 0, and the file permission bits in @mode_p are set to the equivalent
+ * of @acl.
+ *
+ * This function is used to avoid storing richacls on disk if the acl can be
+ * computed from the file permission bits. It allows user-space to make sure
+ * that a file has no explicit richacl set.
+ */
+int
+richacl_equiv_mode(const struct richacl *acl, mode_t *mode_p)
+{
+ const struct richace *ace = acl->a_entries;
+ unsigned int x;
+ mode_t mode;
+
+ if (acl->a_count != 1 ||
+ acl->a_flags ||
+ !richace_is_everyone(ace) ||
+ !richace_is_allow(ace) ||
+ ace->e_flags & ~ACE4_SPECIAL_WHO)
+ return -1;
+
+ /*
+ * Figure out the permissions we care about: ACE4_DELETE_CHILD is
+ * meaningless for non-directories, so we ignore it.
+ */
+ x = ~ACE4_POSIX_ALWAYS_ALLOWED;
+ if (!S_ISDIR(*mode_p))
+ x &= ~ACE4_DELETE_CHILD;
+
+ if ((ace->e_mask & x) != (ACE4_POSIX_MODE_ALL & x))
+ return -1;
+
+ mode = richacl_masks_to_mode(acl);
+ if ((acl->a_owner_mask & x) != (richacl_mode_to_mask(mode >> 6) & x) ||
+ (acl->a_group_mask & x) != (richacl_mode_to_mask(mode >> 3) & x) ||
+ (acl->a_other_mask & x) != (richacl_mode_to_mask(mode) & x))
+ return -1;
+
+ *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(richacl_equiv_mode);
--- /dev/null
+/*
+ * Copyright (C) 2010 Novell, Inc.
+ * Written by Andreas Gruenbacher <agruen@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/richacl.h>
+
+/**
+ * richacl_may_create - helper for implementing iop->may_create
+ */
+int
+richacl_may_create(struct inode *dir, int isdir,
+ int (*richacl_permission)(struct inode *, unsigned int))
+{
+ if (IS_RICHACL(dir))
+ return richacl_permission(dir,
+ ACE4_EXECUTE | (isdir ?
+ ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE));
+ else
+ return generic_permission(dir, MAY_WRITE | MAY_EXEC);
+}
+EXPORT_SYMBOL(richacl_may_create);
+
+static int
+check_sticky(struct inode *dir, struct inode *inode)
+{
+ if (!(dir->i_mode & S_ISVTX))
+ return 0;
+ if (inode->i_uid == current_fsuid())
+ return 0;
+ if (dir->i_uid == current_fsuid())
+ return 0;
+ return !capable(CAP_FOWNER);
+}
+
+/**
+ * richacl_may_delete - helper for implementing iop->may_delete
+ */
+int
+richacl_may_delete(struct inode *dir, struct inode *inode, int replace,
+ int (*richacl_permission)(struct inode *, unsigned int))
+{
+ int error;
+
+ if (IS_RICHACL(inode)) {
+ error = richacl_permission(dir,
+ ACE4_EXECUTE | ACE4_DELETE_CHILD);
+ if (!error && check_sticky(dir, inode))
+ error = -EPERM;
+ if (error && !richacl_permission(inode, ACE4_DELETE))
+ error = 0;
+ if (!error && replace)
+ error = richacl_permission(dir,
+ ACE4_EXECUTE | (S_ISDIR(inode->i_mode) ?
+ ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE));
+ } else {
+ error = generic_permission(dir, MAY_WRITE | MAY_EXEC);
+ if (!error && check_sticky(dir, inode))
+ error = -EPERM;
+ }
+
+ return error;
+}
+EXPORT_SYMBOL(richacl_may_delete);
+
+/**
+ * richacl_inode_permission - helper for implementing iop->permission
+ * @inode: inode to check
+ * @acl: rich acl of the inode (may be NULL)
+ * @mask: requested access (ACE4_* bitmask)
+ *
+ * This function is supposed to be used by file systems for implementing the
+ * permission inode operation.
+ */
+int
+richacl_inode_permission(struct inode *inode, const struct richacl *acl,
+ unsigned int mask)
+{
+ if (acl) {
+ if (!richacl_permission(inode, acl, mask))
+ return 0;
+ } else {
+ int mode = inode->i_mode;
+
+ if (current_fsuid() == inode->i_uid)
+ mode >>= 6;
+ else if (in_group_p(inode->i_gid))
+ mode >>= 3;
+ if (!(mask & ~richacl_mode_to_mask(mode)))
+ return 0;
+ }
+
+ /*
+ * Keep in sync with the capability checks in generic_permission().
+ */
+ if (!(mask & ~ACE4_POSIX_MODE_ALL)) {
+ /*
+ * Read/write DACs are always overridable.
+ * Executable DACs are overridable if at
+ * least one exec bit is set.
+ */
+ if (!(mask & ACE4_POSIX_MODE_EXEC) || execute_ok(inode))
+ if (capable(CAP_DAC_OVERRIDE))
+ return 0;
+ }
+ /*
+ * Searching includes executable on directories, else just read.
+ */
+ if (!(mask & ~(ACE4_READ_DATA | ACE4_LIST_DIRECTORY | ACE4_EXECUTE)) &&
+ (S_ISDIR(inode->i_mode) || !(mask & ACE4_EXECUTE)))
+ if (capable(CAP_DAC_READ_SEARCH))
+ return 0;
+
+ return -EACCES;
+}
+EXPORT_SYMBOL_GPL(richacl_inode_permission);
+
+/**
+ * richacl_inode_change_ok - helper for implementing iop->setattr
+ * @inode: inode to check
+ * @attr: requested inode attribute changes
+ * @richacl_permission: permission function taking an inode and ACE4_* flags
+ *
+ * Keep in sync with inode_change_ok().
+ */
+int
+richacl_inode_change_ok(struct inode *inode, struct iattr *attr,
+ int (*richacl_permission)(struct inode *, unsigned int))
+{
+ unsigned int ia_valid = attr->ia_valid;
+
+ /* If force is set do it anyway. */
+ if (ia_valid & ATTR_FORCE)
+ return 0;
+
+ /* Make sure a caller can chown. */
+ if ((ia_valid & ATTR_UID) &&
+ (current_fsuid() != inode->i_uid ||
+ attr->ia_uid != inode->i_uid) &&
+ (current_fsuid() != attr->ia_uid ||
+ richacl_permission(inode, ACE4_WRITE_OWNER)) &&
+ !capable(CAP_CHOWN))
+ goto error;
+
+ /* Make sure caller can chgrp. */
+ if ((ia_valid & ATTR_GID)) {
+ int in_group = in_group_p(attr->ia_gid);
+ if ((current_fsuid() != inode->i_uid ||
+ (!in_group && attr->ia_gid != inode->i_gid)) &&
+ (!in_group ||
+ richacl_permission(inode, ACE4_WRITE_OWNER)) &&
+ !capable(CAP_CHOWN))
+ goto error;
+ }
+
+ /* Make sure a caller can chmod. */
+ if (ia_valid & ATTR_MODE) {
+ if (current_fsuid() != inode->i_uid &&
+ richacl_permission(inode, ACE4_WRITE_ACL) &&
+ !capable(CAP_FOWNER))
+ goto error;
+ /* Also check the setgid bit! */
+ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
+ inode->i_gid) && !capable(CAP_FSETID))
+ attr->ia_mode &= ~S_ISGID;
+ }
+
+ /* Check for setting the inode time. */
+ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
+ if (current_fsuid() != inode->i_uid &&
+ richacl_permission(inode, ACE4_WRITE_ATTRIBUTES) &&
+ !capable(CAP_FOWNER))
+ goto error;
+ }
+ return 0;
+error:
+ return -EPERM;
+}
+EXPORT_SYMBOL_GPL(richacl_inode_change_ok);
--- /dev/null
+/*
+ * Copyright (C) 2006, 2010 Novell, Inc.
+ * Written by Andreas Gruenbacher <agruen@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/richacl_xattr.h>
+
+MODULE_LICENSE("GPL");
+
+/**
+ * richacl_from_xattr - convert a richacl xattr into the in-memory representation
+ */
+struct richacl *
+richacl_from_xattr(const void *value, size_t size)
+{
+ const struct richacl_xattr *xattr_acl = value;
+ const struct richace_xattr *xattr_ace = (void *)(xattr_acl + 1);
+ struct richacl *acl;
+ struct richace *ace;
+ int count;
+
+ if (size < sizeof(struct richacl_xattr) ||
+ xattr_acl->a_version != ACL4_XATTR_VERSION ||
+ (xattr_acl->a_flags & ~ACL4_VALID_FLAGS))
+ return ERR_PTR(-EINVAL);
+
+ count = le16_to_cpu(xattr_acl->a_count);
+ if (count > ACL4_XATTR_MAX_COUNT)
+ return ERR_PTR(-EINVAL);
+
+ acl = richacl_alloc(count);
+ if (!acl)
+ return ERR_PTR(-ENOMEM);
+
+ acl->a_flags = xattr_acl->a_flags;
+ acl->a_owner_mask = le32_to_cpu(xattr_acl->a_owner_mask);
+ if (acl->a_owner_mask & ~ACE4_VALID_MASK)
+ goto fail_einval;
+ acl->a_group_mask = le32_to_cpu(xattr_acl->a_group_mask);
+ if (acl->a_group_mask & ~ACE4_VALID_MASK)
+ goto fail_einval;
+ acl->a_other_mask = le32_to_cpu(xattr_acl->a_other_mask);
+ if (acl->a_other_mask & ~ACE4_VALID_MASK)
+ goto fail_einval;
+
+ richacl_for_each_entry(ace, acl) {
+ const char *who = (void *)(xattr_ace + 1), *end;
+ ssize_t used = (void *)who - value;
+
+ if (used > size)
+ goto fail_einval;
+ end = memchr(who, 0, size - used);
+ if (!end)
+ goto fail_einval;
+
+ ace->e_type = le16_to_cpu(xattr_ace->e_type);
+ ace->e_flags = le16_to_cpu(xattr_ace->e_flags);
+ ace->e_mask = le32_to_cpu(xattr_ace->e_mask);
+ ace->u.e_id = le32_to_cpu(xattr_ace->e_id);
+
+ if (ace->e_flags & ~ACE4_VALID_FLAGS)
+ goto fail_einval;
+ if (ace->e_type > ACE4_ACCESS_DENIED_ACE_TYPE ||
+ (ace->e_mask & ~ACE4_VALID_MASK))
+ goto fail_einval;
+
+ if (who == end) {
+ if (ace->u.e_id == -1)
+ goto fail_einval; /* uid/gid needed */
+ } else if (richace_set_who(ace, who))
+ goto fail_einval;
+
+ xattr_ace = (void *)who + ALIGN(end - who + 1, 4);
+ }
+
+ return acl;
+
+fail_einval:
+ richacl_put(acl);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(richacl_from_xattr);
+
+/**
+ * richacl_xattr_size - compute the size of the xattr representation of @acl
+ */
+size_t
+richacl_xattr_size(const struct richacl *acl)
+{
+ size_t size = sizeof(struct richacl_xattr);
+ const struct richace *ace;
+
+ richacl_for_each_entry(ace, acl) {
+ size += sizeof(struct richace_xattr) +
+ (richace_is_unix_id(ace) ? 4 :
+ ALIGN(strlen(ace->u.e_who) + 1, 4));
+ }
+ return size;
+}
+EXPORT_SYMBOL_GPL(richacl_xattr_size);
+
+/**
+ * richacl_to_xattr - convert @acl into its xattr representation
+ * @acl: the richacl to convert
+ * @buffer: buffer of size richacl_xattr_size(@acl) for the result
+ */
+void
+richacl_to_xattr(const struct richacl *acl, void *buffer)
+{
+ struct richacl_xattr *xattr_acl = buffer;
+ struct richace_xattr *xattr_ace;
+ const struct richace *ace;
+
+ xattr_acl->a_version = ACL4_XATTR_VERSION;
+ xattr_acl->a_flags = acl->a_flags;
+ xattr_acl->a_count = cpu_to_le16(acl->a_count);
+
+ xattr_acl->a_owner_mask = cpu_to_le32(acl->a_owner_mask);
+ xattr_acl->a_group_mask = cpu_to_le32(acl->a_group_mask);
+ xattr_acl->a_other_mask = cpu_to_le32(acl->a_other_mask);
+
+ xattr_ace = (void *)(xattr_acl + 1);
+ richacl_for_each_entry(ace, acl) {
+ xattr_ace->e_type = cpu_to_le16(ace->e_type);
+ xattr_ace->e_flags = cpu_to_le16(ace->e_flags &
+ ACE4_VALID_FLAGS);
+ xattr_ace->e_mask = cpu_to_le32(ace->e_mask);
+ if (richace_is_unix_id(ace)) {
+ xattr_ace->e_id = cpu_to_le32(ace->u.e_id);
+ memset(xattr_ace->e_who, 0, 4);
+ xattr_ace = (void *)xattr_ace->e_who + 4;
+ } else {
+ int sz = ALIGN(strlen(ace->u.e_who) + 1, 4);
+
+ xattr_ace->e_id = cpu_to_le32(-1);
+ memset(xattr_ace->e_who + sz - 4, 0, 4);
+ strcpy(xattr_ace->e_who, ace->u.e_who);
+ xattr_ace = (void *)xattr_ace->e_who + sz;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(richacl_to_xattr);
return NULL;
}
-/**
- * do_remount_sb - asks filesystem to change mount options.
- * @sb: superblock in question
- * @flags: numeric part of options
- * @data: the rest of options
- * @force: whether or not to force the change
- *
- * Alters the mount options of a mounted file system.
- */
-int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
+#define REMOUNT_FORCE 1
+#define REMOUNT_SHRINK_DCACHE 2
+
+static int __do_remount_sb(struct super_block *sb, int flags, void *data, int rflags)
{
int retval;
int remount_ro;
if (flags & MS_RDONLY)
acct_auto_close(sb);
- shrink_dcache_sb(sb);
+ if (rflags & REMOUNT_SHRINK_DCACHE)
+ shrink_dcache_sb(sb);
sync_filesystem(sb);
remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
/* If we are remounting RDONLY and current sb is read/write,
make sure there are no rw files opened */
if (remount_ro) {
- if (force) {
+ if (rflags & REMOUNT_FORCE) {
mark_files_ro(sb);
} else {
retval = sb_prepare_remount_readonly(sb);
if (sb->s_op->remount_fs) {
retval = sb->s_op->remount_fs(sb, &flags, data);
if (retval) {
- if (!force)
+ if (!(rflags & REMOUNT_FORCE))
goto cancel_readonly;
/* If forced remount, go ahead despite any errors */
WARN(1, "forced remount of a %s fs returned %i\n",
return retval;
}
+/**
+ * do_remount_sb - asks filesystem to change mount options.
+ * @sb: superblock in question
+ * @flags: numeric part of options
+ * @data: the rest of options
+ * @force: whether or not to force the change
+ *
+ * Alters the mount options of a mounted file system.
+ */
+int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
+{
+ return __do_remount_sb(sb, flags, data,
+ REMOUNT_SHRINK_DCACHE|(force? REMOUNT_FORCE : 0));
+}
+
static void do_emergency_remount(struct work_struct *work)
{
struct super_block *sb, *p = NULL;
}
s->s_flags |= MS_ACTIVE;
} else {
- do_remount_sb(s, flags, data, 0);
+ __do_remount_sb(s, flags, data, 0);
}
return dget(s->s_root);
}
MEM_KEEP(exit.rodata) \
} \
\
+ EH_FRAME \
+ \
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
BSS(bss_align) \
. = ALIGN(stop_align); \
VMLINUX_SYMBOL(__bss_stop) = .;
+
+#ifdef CONFIG_STACK_UNWIND
+#define EH_FRAME \
+ /* Unwind data binary search table */ \
+ . = ALIGN(8); \
+ .eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_unwind_hdr) = .; \
+ *(.eh_frame_hdr) \
+ VMLINUX_SYMBOL(__end_unwind_hdr) = .; \
+ } \
+ /* Unwind data */ \
+ . = ALIGN(8); \
+ .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_unwind) = .; \
+ *(.eh_frame) \
+ VMLINUX_SYMBOL(__end_unwind) = .; \
+ }
+#else
+#define EH_FRAME
+#endif
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
+#ifndef CONFIG_KERNEL_DESKTOP
+ BLK_DEF_MAX_SECTORS = 2048,
+#else
BLK_DEF_MAX_SECTORS = 1024,
+#endif
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
--- /dev/null
+/*
+ * linux/drivers/video/bootsplash/bootsplash.h - splash screen definition.
+ *
+ * (w) 2001-2003 by Volker Poplawski, <volker@poplawski.de>
+ * Stefan Reinauer, <stepan@suse.de>
+ *
+ *
+ * idea and SuSE screen work by Ken Wimer, <wimer@suse.de>
+ */
+
+#ifndef __BOOTSPLASH_H
+#define __BOOTSPLASH_H
+
+# ifdef CONFIG_BOOTSPLASH
+
+struct fb_info;
+union pt {
+ u32 *ul;
+ u16 *us;
+ u8 *ub;
+};
+
+enum splash_color_format {
+ SPLASH_DEPTH_UNKNOWN = 0,
+ SPLASH_DEPTH_15 = 15,
+ SPLASH_DEPTH_16 = 16,
+ SPLASH_DEPTH_24_PACKED = 24,
+ SPLASH_DEPTH_24 = 32
+};
+
+#define splash_octpp(cf) (((int)cf + 1) >> 3)
+
+struct vc_data;
+struct fb_info;
+struct fb_cursor;
+struct splash_data;
+
+/* splash.c */
+extern int splash_prepare(struct vc_data *, struct fb_info *);
+extern void splash_init(void);
+extern int splash_verbose(void);
+
+/* splash_render.c */
+extern void splash_putcs(struct vc_data *vc, struct fb_info *info,
+ const unsigned short *s, int count,
+ int ypos, int xpos);
+extern void splash_sync_region(struct fb_info *info, int x, int y,
+ int width, int height);
+extern void splashcopy(u8 *dst, u8 *src, int height, int width,
+ int dstbytes, int srcbytes, int octpp);
+extern void splash_clear(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int height, int width);
+extern void splash_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width);
+extern void splash_clear_margins(struct vc_data *vc, struct fb_info *info,
+ int bottom_only);
+extern int splash_cursor(struct fb_info *info, struct fb_cursor *cursor);
+extern void splash_bmove_redraw(struct vc_data *vc, struct fb_info *info,
+ int y, int sx, int dx, int width);
+extern void splash_blank(struct vc_data *vc, struct fb_info *info,
+ int blank);
+
+# define SPLASH_VERBOSE() splash_verbose()
+# define SPLASH_DATA(x) (x->splash_data)
+# define TEXT_WIDTH_FROM_SPLASH_DATA(x) (x->splash_data->splash_vc_text_wi)
+# define TEXT_HIGHT_FROM_SPLASH_DATA(x) (x->splash_data->splash_vc_text_he)
+/* vt.c */
+extern void con_remap_def_color(struct vc_data *vc, int new_color);
+
+# else
+# define splash_init()
+# define splash_verbose() 0
+# define SPLASH_VERBOSE()
+# define splash_blank(vc, info, blank)
+# define splash_bmove(vc, info, sy, sx, dy, dx, height, width)
+# define splash_bmove_redraw(vc, info, sy, sx, dx, width)
+# define splash_cursor(info, cursor)
+# define splash_clear(vc, info, sy, sx, height, width)
+# define splash_clear_margins(vc, info, bottom_only)
+# define splash_putcs(vc, info, s, count, ypos, xpos)
+
+# define SPLASH_DATA(x) 0
+# define TEXT_WIDTH_FROM_SPLASH_DATA(x) 0
+# define TEXT_HIGHT_FROM_SPLASH_DATA(x) 0
+# endif
+
+#endif
unsigned long vc_uni_pagedir;
unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
+#ifdef CONFIG_BOOTSPLASH
+ struct splash_data *vc_splash_data;
+#endif
/* additional information is in vt_kern.h */
};
extern int __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf);
extern __printf(3, 4)
+
+#if defined(KMSG_COMPONENT) && (defined(CONFIG_KMSG_IDS) || defined(__KMSG_CHECKER))
+/* dev_printk_hash for message documentation */
+#if defined(__KMSG_CHECKER) && defined(KMSG_COMPONENT)
+
+/* generate magic string for scripts/kmsg-doc to parse */
+#define dev_printk_hash(level, dev, format, arg...) \
+ __KMSG_DEV(level _FMT_ format _ARGS_ dev, ## arg _END_)
+
+#elif defined(CONFIG_KMSG_IDS) && defined(KMSG_COMPONENT)
+
+int printk_dev_hash(const char *, const char *, const char *, ...);
+#define dev_printk_hash(level, dev, format, arg...) \
+ printk_dev_hash(level "%s.%06x: ", dev_driver_string(dev), \
+ "%s: " format, dev_name(dev), ## arg)
+
+#endif
+
+#define dev_printk(level, dev, format, arg...) \
+ dev_printk_hash(level , dev, format, ## arg)
+#define dev_emerg(dev, format, arg...) \
+ dev_printk_hash(KERN_EMERG , dev , format , ## arg)
+#define dev_alert(dev, format, arg...) \
+ dev_printk_hash(KERN_ALERT , dev , format , ## arg)
+#define dev_crit(dev, format, arg...) \
+ dev_printk_hash(KERN_CRIT , dev , format , ## arg)
+#define dev_err(dev, format, arg...) \
+ dev_printk_hash(KERN_ERR , dev , format , ## arg)
+#define dev_warn(dev, format, arg...) \
+ dev_printk_hash(KERN_WARNING , dev , format , ## arg)
+#define dev_notice(dev, format, arg...) \
+ dev_printk_hash(KERN_NOTICE , dev , format , ## arg)
+#define _dev_info(dev, format, arg...) \
+ dev_printk_hash(KERN_INFO , dev , format , ## arg)
+#else
int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
;
int dev_notice(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
int _dev_info(const struct device *dev, const char *fmt, ...);
-
+#endif
#else
static inline int __dev_printk(const char *level, const struct device *dev,
*/
region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region);
+region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector);
void *dm_rh_region_context(struct dm_region *reg);
/*
int dm_rh_flush(struct dm_region_hash *rh);
/* Inc/dec pending count on regions. */
+void dm_rh_inc(struct dm_region_hash *rh, region_t region);
void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios);
void dm_rh_dec(struct dm_region_hash *rh, region_t region);
/* Delay bios on regions. */
void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
+void dm_rh_delay_by_region(struct dm_region_hash *rh, struct bio *bio,
+ region_t region);
void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
void *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
void *par;
+#ifdef CONFIG_BOOTSPLASH
+ struct splash_data *splash_data;
+ char fb_cursordata[64];
+#endif
/* we need the PCI or similar aperture base/size not
smem_start/size as smem_start may just be an object
allocated inside the aperture so may not actually overlap */
#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
MS_VERBOSE is deprecated. */
#define MS_SILENT 32768
-#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
+#define MS_POSIXACL (1<<16) /* Supports POSIX ACLs */
#define MS_UNBINDABLE (1<<17) /* change to unbindable */
#define MS_PRIVATE (1<<18) /* change to private */
#define MS_SLAVE (1<<19) /* change to slave */
#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
#define MS_I_VERSION (1<<23) /* Update inode I_version field */
#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
+#define MS_RICHACL (1<<25) /* Supports richacls */
#define MS_NOSEC (1<<28)
#define MS_BORN (1<<29)
#define MS_ACTIVE (1<<30)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
#define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
+#define IS_RICHACL(inode) __IS_FLG(inode, MS_RICHACL)
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
+/*
+ * IS_ACL() tells the VFS to not apply the umask
+ * and use iop->check_acl for acl permission checks when defined.
+ */
+#define IS_ACL(inode) __IS_FLG(inode, MS_POSIXACL | MS_RICHACL)
+
/* the read-only stuff doesn't really belong here, but any other place is
probably as bad and I don't want to create yet another include file. */
void (*truncate_range)(struct inode *, loff_t, loff_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
+ int (*may_create) (struct inode *, int);
+ int (*may_delete) (struct inode *, struct inode *, int);
+
+
} ____cacheline_aligned;
struct seq_file;
extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
+extern int unsupported;
extern int sysctl_panic_on_stackoverflow;
extern const char *print_tainted(void);
extern void add_taint(unsigned flag);
+extern void add_nonfatal_taint(unsigned flag);
extern int test_taint(unsigned flag);
extern unsigned long get_taint(void);
extern int root_mountflags;
#define TAINT_FIRMWARE_WORKAROUND 11
#define TAINT_OOT_MODULE 12
+#ifdef CONFIG_ENTERPRISE_SUPPORT
+/*
+ * Take the upper bits to hopefully allow them
+ * to stay the same for more than one release.
+ */
+#define TAINT_NO_SUPPORT 30
+#define TAINT_EXTERNAL_SUPPORT 31
+#endif
+
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
void task_dirty_inc(struct task_struct *tsk);
/* readahead.c */
+#ifndef CONFIG_KERNEL_DESKTOP
+#define VM_MAX_READAHEAD 512 /* kbytes */
+#else
#define VM_MAX_READAHEAD 128 /* kbytes */
+#endif
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
/* Size of RO sections of the module (text+rodata) */
unsigned int init_ro_size, core_ro_size;
+ /* The handle returned from unwind_add_table. */
+ void *unwind_info;
+
/* Arch-specific module values */
struct mod_arch_specific arch;
bool is_module_address(unsigned long addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);
+const char *supported_printable(int taint);
static inline int within_module_core(unsigned long addr, struct module *mod)
{
#define NFS_INO_PNFS_COMMIT (8) /* use pnfs code for commit */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
+#define NFS_INO_SEEN_GETATTR (11) /* flag to track if app is calling
+ * getattr in a directory during
+ * readdir
+ */
static inline struct nfs_inode *NFS_I(const struct inode *inode)
{
#define pr_fmt(fmt) fmt
#endif
+#if defined(__KMSG_CHECKER) && defined(KMSG_COMPONENT)
+
+/* generate magic string for scripts/kmsg-doc to parse */
+#define pr_printk_hash(level, format, ...) \
+ __KMSG_PRINT(level _FMT_ format _ARGS_ #__VA_ARGS__ _END_)
+
+#elif defined(CONFIG_KMSG_IDS) && defined(KMSG_COMPONENT)
+
+int printk_hash(const char *, const char *, ...);
+#define pr_printk_hash(level, format, ...) \
+ printk_hash(level KMSG_COMPONENT ".%06x" ": ", format, ##__VA_ARGS__)
+
+#else /* !defined(CONFIG_KMSG_IDS) */
+
+#define pr_printk_hash(level, format, ...) \
+ printk(level pr_fmt(format), ##__VA_ARGS__)
+
+#endif
+
#define pr_emerg(fmt, ...) \
- printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+ pr_printk_hash(KERN_EMERG, fmt, ##__VA_ARGS__)
#define pr_alert(fmt, ...) \
- printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+ pr_printk_hash(KERN_ALERT, fmt, ##__VA_ARGS__)
#define pr_crit(fmt, ...) \
- printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+ pr_printk_hash(KERN_CRIT, fmt, ##__VA_ARGS__)
#define pr_err(fmt, ...) \
- printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+ pr_printk_hash(KERN_ERR, fmt, ##__VA_ARGS__)
#define pr_warning(fmt, ...) \
- printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+ pr_printk_hash(KERN_WARNING, fmt, ##__VA_ARGS__)
#define pr_warn pr_warning
#define pr_notice(fmt, ...) \
- printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+ pr_printk_hash(KERN_NOTICE, fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) \
- printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+ pr_printk_hash(KERN_INFO, fmt, ##__VA_ARGS__)
#define pr_cont(fmt, ...) \
- printk(KERN_CONT fmt, ##__VA_ARGS__)
+ pr_printk_hash(KERN_CONT, fmt, ##__VA_ARGS__)
/* pr_devel() should produce zero code unless DEBUG is defined */
#ifdef DEBUG
--- /dev/null
+/*
+ * Copyright (C) 2006, 2010 Novell, Inc.
+ * Written by Andreas Gruenbacher <agruen@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __RICHACL_H
+#define __RICHACL_H
+#include <linux/slab.h>
+
+struct richace {
+ unsigned short e_type;
+ unsigned short e_flags;
+ unsigned int e_mask;
+ union {
+ unsigned int e_id;
+ const char *e_who;
+ } u;
+};
+
+struct richacl {
+ atomic_t a_refcount;
+ unsigned int a_owner_mask;
+ unsigned int a_group_mask;
+ unsigned int a_other_mask;
+ unsigned short a_count;
+ unsigned short a_flags;
+ struct richace a_entries[0];
+};
+
+#define richacl_for_each_entry(_ace, _acl) \
+ for (_ace = _acl->a_entries; \
+ _ace != _acl->a_entries + _acl->a_count; \
+ _ace++)
+
+#define richacl_for_each_entry_reverse(_ace, _acl) \
+ for (_ace = _acl->a_entries + _acl->a_count - 1; \
+ _ace != _acl->a_entries - 1; \
+ _ace--)
+
+/* a_flags values */
+#define ACL4_AUTO_INHERIT 0x01
+#define ACL4_PROTECTED 0x02
+/*#define ACL4_DEFAULTED 0x04*/
+
+#define ACL4_VALID_FLAGS ( \
+ ACL4_AUTO_INHERIT | \
+ ACL4_PROTECTED)
+
+/* e_type values */
+#define ACE4_ACCESS_ALLOWED_ACE_TYPE 0x0000
+#define ACE4_ACCESS_DENIED_ACE_TYPE 0x0001
+/*#define ACE4_SYSTEM_AUDIT_ACE_TYPE 0x0002*/
+/*#define ACE4_SYSTEM_ALARM_ACE_TYPE 0x0003*/
+
+/* e_flags bitflags */
+#define ACE4_FILE_INHERIT_ACE 0x0001
+#define ACE4_DIRECTORY_INHERIT_ACE 0x0002
+#define ACE4_NO_PROPAGATE_INHERIT_ACE 0x0004
+#define ACE4_INHERIT_ONLY_ACE 0x0008
+/*#define ACE4_SUCCESSFUL_ACCESS_ACE_FLAG 0x0010*/
+/*#define ACE4_FAILED_ACCESS_ACE_FLAG 0x0020*/
+#define ACE4_IDENTIFIER_GROUP 0x0040
+#define ACE4_INHERITED_ACE 0x0080
+/* in-memory representation only */
+#define ACE4_SPECIAL_WHO 0x4000
+
+#define ACE4_VALID_FLAGS ( \
+ ACE4_FILE_INHERIT_ACE | \
+ ACE4_DIRECTORY_INHERIT_ACE | \
+ ACE4_NO_PROPAGATE_INHERIT_ACE | \
+ ACE4_INHERIT_ONLY_ACE | \
+ ACE4_IDENTIFIER_GROUP | \
+ ACE4_INHERITED_ACE)
+
+/* e_mask bitflags */
+#define ACE4_READ_DATA 0x00000001
+#define ACE4_LIST_DIRECTORY 0x00000001
+#define ACE4_WRITE_DATA 0x00000002
+#define ACE4_ADD_FILE 0x00000002
+#define ACE4_APPEND_DATA 0x00000004
+#define ACE4_ADD_SUBDIRECTORY 0x00000004
+#define ACE4_READ_NAMED_ATTRS 0x00000008
+#define ACE4_WRITE_NAMED_ATTRS 0x00000010
+#define ACE4_EXECUTE 0x00000020
+#define ACE4_DELETE_CHILD 0x00000040
+#define ACE4_READ_ATTRIBUTES 0x00000080
+#define ACE4_WRITE_ATTRIBUTES 0x00000100
+#define ACE4_WRITE_RETENTION 0x00000200
+#define ACE4_WRITE_RETENTION_HOLD 0x00000400
+#define ACE4_DELETE 0x00010000
+#define ACE4_READ_ACL 0x00020000
+#define ACE4_WRITE_ACL 0x00040000
+#define ACE4_WRITE_OWNER 0x00080000
+#define ACE4_SYNCHRONIZE 0x00100000
+
+/* Valid ACE4_* flags for directories and non-directories */
+#define ACE4_VALID_MASK ( \
+ ACE4_READ_DATA | ACE4_LIST_DIRECTORY | \
+ ACE4_WRITE_DATA | ACE4_ADD_FILE | \
+ ACE4_APPEND_DATA | ACE4_ADD_SUBDIRECTORY | \
+ ACE4_READ_NAMED_ATTRS | \
+ ACE4_WRITE_NAMED_ATTRS | \
+ ACE4_EXECUTE | \
+ ACE4_DELETE_CHILD | \
+ ACE4_READ_ATTRIBUTES | \
+ ACE4_WRITE_ATTRIBUTES | \
+ ACE4_WRITE_RETENTION | \
+ ACE4_WRITE_RETENTION_HOLD | \
+ ACE4_DELETE | \
+ ACE4_READ_ACL | \
+ ACE4_WRITE_ACL | \
+ ACE4_WRITE_OWNER | \
+ ACE4_SYNCHRONIZE)
+
+/*
+ * The POSIX permissions are supersets of the following NFSv4 permissions:
+ *
+ * - MAY_READ maps to READ_DATA or LIST_DIRECTORY, depending on the type
+ * of the file system object.
+ *
+ * - MAY_WRITE maps to WRITE_DATA or ACE4_APPEND_DATA for files, and to
+ * ADD_FILE, ACE4_ADD_SUBDIRECTORY, or ACE4_DELETE_CHILD for directories.
+ *
+ * - MAY_EXECUTE maps to ACE4_EXECUTE.
+ *
+ * (Some of these NFSv4 permissions have the same bit values.)
+ */
+#define ACE4_POSIX_MODE_READ ( \
+ ACE4_READ_DATA | ACE4_LIST_DIRECTORY)
+#define ACE4_POSIX_MODE_WRITE ( \
+ ACE4_WRITE_DATA | ACE4_ADD_FILE | \
+ ACE4_APPEND_DATA | ACE4_ADD_SUBDIRECTORY | \
+ ACE4_DELETE_CHILD)
+#define ACE4_POSIX_MODE_EXEC ( \
+ ACE4_EXECUTE)
+#define ACE4_POSIX_MODE_ALL (ACE4_POSIX_MODE_READ | ACE4_POSIX_MODE_WRITE | \
+ ACE4_POSIX_MODE_EXEC)
+
+/* These permissions are always allowed no matter what the acl says. */
+#define ACE4_POSIX_ALWAYS_ALLOWED ( \
+ ACE4_SYNCHRONIZE | \
+ ACE4_READ_ATTRIBUTES | \
+ ACE4_READ_ACL)
+
+/**
+ * richacl_get - grab another reference to a richacl handle
+ */
+static inline struct richacl *
+richacl_get(struct richacl *acl)
+{
+ if (acl)
+ atomic_inc(&acl->a_refcount);
+ return acl;
+}
+
+/**
+ * richacl_put - free a richacl handle
+ */
+static inline void
+richacl_put(struct richacl *acl)
+{
+ if (acl && atomic_dec_and_test(&acl->a_refcount))
+ kfree(acl);
+}
+
+static inline int
+richacl_is_auto_inherit(const struct richacl *acl)
+{
+ return acl->a_flags & ACL4_AUTO_INHERIT;
+}
+
+static inline int
+richacl_is_protected(const struct richacl *acl)
+{
+ return acl->a_flags & ACL4_PROTECTED;
+}
+
+/*
+ * Special e_who identifiers: we use these pointer values in comparisons
+ * instead of doing a strcmp.
+ */
+extern const char richace_owner_who[];
+extern const char richace_group_who[];
+extern const char richace_everyone_who[];
+
+/**
+ * richace_is_owner - check if @ace is an OWNER@ entry
+ */
+static inline int
+richace_is_owner(const struct richace *ace)
+{
+ return (ace->e_flags & ACE4_SPECIAL_WHO) &&
+ ace->u.e_who == richace_owner_who;
+}
+
+/**
+ * richace_is_group - check if @ace is a GROUP@ entry
+ */
+static inline int
+richace_is_group(const struct richace *ace)
+{
+ return (ace->e_flags & ACE4_SPECIAL_WHO) &&
+ ace->u.e_who == richace_group_who;
+}
+
+/**
+ * richace_is_everyone - check if @ace is an EVERYONE@ entry
+ */
+static inline int
+richace_is_everyone(const struct richace *ace)
+{
+ return (ace->e_flags & ACE4_SPECIAL_WHO) &&
+ ace->u.e_who == richace_everyone_who;
+}
+
+/**
+ * richace_is_unix_id - check if @ace applies to a specific uid or gid
+ */
+static inline int
+richace_is_unix_id(const struct richace *ace)
+{
+ return !(ace->e_flags & ACE4_SPECIAL_WHO);
+}
+
+/**
+ * richace_is_inherit_only - check if @ace is for inheritance only
+ *
+ * ACEs with the %ACE4_INHERIT_ONLY_ACE flag set have no effect during
+ * permission checking.
+ */
+static inline int
+richace_is_inherit_only(const struct richace *ace)
+{
+ return ace->e_flags & ACE4_INHERIT_ONLY_ACE;
+}
+
+/**
+ * richace_is_inheritable - check if @ace is inheritable
+ */
+static inline int
+richace_is_inheritable(const struct richace *ace)
+{
+ return ace->e_flags & (ACE4_FILE_INHERIT_ACE |
+ ACE4_DIRECTORY_INHERIT_ACE);
+}
+
+/**
+ * richace_clear_inheritance_flags - clear all inheritance flags in @ace
+ */
+static inline void
+richace_clear_inheritance_flags(struct richace *ace)
+{
+ ace->e_flags &= ~(ACE4_FILE_INHERIT_ACE |
+ ACE4_DIRECTORY_INHERIT_ACE |
+ ACE4_NO_PROPAGATE_INHERIT_ACE |
+ ACE4_INHERIT_ONLY_ACE);
+}
+
+/**
+ * richace_is_allow - check if @ace is an %ALLOW type entry
+ */
+static inline int
+richace_is_allow(const struct richace *ace)
+{
+ return ace->e_type == ACE4_ACCESS_ALLOWED_ACE_TYPE;
+}
+
+/**
+ * richace_is_deny - check if @ace is a %DENY type entry
+ */
+static inline int
+richace_is_deny(const struct richace *ace)
+{
+ return ace->e_type == ACE4_ACCESS_DENIED_ACE_TYPE;
+}
+
+extern struct richacl *richacl_alloc(int);
+extern int richace_is_same_identifier(const struct richace *,
+ const struct richace *);
+extern int richace_set_who(struct richace *, const char *);
+extern int richacl_masks_to_mode(const struct richacl *);
+extern unsigned int richacl_mode_to_mask(mode_t);
+extern unsigned int richacl_want_to_mask(int);
+extern void richacl_compute_max_masks(struct richacl *);
+extern struct richacl *richacl_chmod(struct richacl *, mode_t);
+extern int richacl_permission(struct inode *, const struct richacl *,
+ unsigned int);
+extern struct richacl *richacl_inherit(const struct richacl *, struct inode *);
+extern int richacl_equiv_mode(const struct richacl *, mode_t *);
+
+/* richacl_inode.c */
+
+#ifdef CONFIG_FS_RICHACL
+extern int richacl_may_create(struct inode *, int,
+ int (*)(struct inode *, unsigned int));
+extern int richacl_may_delete(struct inode *, struct inode *, int,
+ int (*)(struct inode *, unsigned int));
+extern int richacl_inode_permission(struct inode *, const struct richacl *,
+ unsigned int);
+extern int richacl_inode_change_ok(struct inode *, struct iattr *,
+ int (*)(struct inode *, unsigned int));
+#else
+static inline int
+richacl_inode_change_ok(struct inode *inode, struct iattr *attr,
+ int (*richacl_permission)(struct inode *inode,
+ unsigned int mask))
+{
+ return -EPERM;
+}
+#endif
+
+#endif /* __RICHACL_H */
--- /dev/null
+/*
+ * Copyright (C) 2006, 2010 Novell, Inc.
+ * Written by Andreas Gruenbacher <agruen@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __RICHACL_XATTR_H
+#define __RICHACL_XATTR_H
+
+#include <linux/richacl.h>
+
+#define RICHACL_XATTR "system.richacl"
+
+struct richace_xattr {
+ __le16 e_type;
+ __le16 e_flags;
+ __le32 e_mask;
+ __le32 e_id;
+ char e_who[0];
+};
+
+struct richacl_xattr {
+ unsigned char a_version;
+ unsigned char a_flags;
+ __le16 a_count;
+ __le32 a_owner_mask;
+ __le32 a_group_mask;
+ __le32 a_other_mask;
+};
+
+#define ACL4_XATTR_VERSION 0
+#define ACL4_XATTR_MAX_COUNT 1024
+
+extern struct richacl *richacl_from_xattr(const void *, size_t);
+extern size_t richacl_xattr_size(const struct richacl *acl);
+extern void richacl_to_xattr(const struct richacl *, void *);
+
+#endif /* __RICHACL_XATTR_H */
bool (*)(struct rpc_task *, void *),
void *);
void rpc_wake_up_status(struct rpc_wait_queue *, int);
+void rpc_wake_up_softconn_status(struct rpc_wait_queue *, int);
int rpc_queue_empty(struct rpc_wait_queue *);
void rpc_delay(struct rpc_task *, unsigned long);
void * rpc_malloc(struct rpc_task *, size_t);
--- /dev/null
+#ifndef _LINUX_UNWIND_H
+#define _LINUX_UNWIND_H
+
+/*
+ * Copyright (C) 2002-2009 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ * This code is released under version 2 of the GNU GPL.
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whistles, so there
+ * is not much point in implementing the full Dwarf2 unwind API.
+ */
+
+#include <linux/linkage.h>
+
+struct module;
+struct stacktrace_ops;
+struct unwind_frame_info;
+
+typedef asmlinkage int (*unwind_callback_fn)(struct unwind_frame_info *,
+ const struct stacktrace_ops *,
+ void *);
+
+#ifdef CONFIG_STACK_UNWIND
+
+#include <asm/unwind.h>
+#include <asm/stacktrace.h>
+
+#ifndef ARCH_UNWIND_SECTION_NAME
+#define ARCH_UNWIND_SECTION_NAME ".eh_frame"
+#endif
+
+/*
+ * Initialize unwind support.
+ */
+extern void unwind_init(void);
+extern void unwind_setup(void);
+
+#ifdef CONFIG_MODULES
+
+extern void *unwind_add_table(struct module *,
+ const void *table_start,
+ unsigned long table_size);
+
+extern void unwind_remove_table(void *handle, int init_only);
+
+#endif
+
+extern int unwind_init_frame_info(struct unwind_frame_info *,
+ struct task_struct *,
+ /*const*/ struct pt_regs *);
+
+/*
+ * Prepare to unwind a blocked task.
+ */
+extern int unwind_init_blocked(struct unwind_frame_info *,
+ struct task_struct *);
+
+/*
+ * Prepare to unwind the currently running thread.
+ */
+extern int unwind_init_running(struct unwind_frame_info *,
+ unwind_callback_fn,
+ const struct stacktrace_ops *,
+ void *data);
+
+/*
+ * Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error.
+ */
+extern int unwind(struct unwind_frame_info *);
+
+/*
+ * Unwind until the return pointer is in user-land (or until an error
+ * occurs). Returns 0 if successful, negative number in case of
+ * error.
+ */
+extern int unwind_to_user(struct unwind_frame_info *);
+
+#else /* CONFIG_STACK_UNWIND */
+
+struct unwind_frame_info {};
+
+static inline void unwind_init(void) {}
+static inline void unwind_setup(void) {}
+
+#ifdef CONFIG_MODULES
+
+static inline void *unwind_add_table(struct module *mod,
+ const void *table_start,
+ unsigned long table_size)
+{
+ return NULL;
+}
+
+#endif
+
+static inline void unwind_remove_table(void *handle, int init_only)
+{
+}
+
+static inline int unwind_init_frame_info(struct unwind_frame_info *info,
+ struct task_struct *tsk,
+ const struct pt_regs *regs)
+{
+ return -ENOSYS;
+}
+
+static inline int unwind_init_blocked(struct unwind_frame_info *info,
+ struct task_struct *tsk)
+{
+ return -ENOSYS;
+}
+
+static inline int unwind_init_running(struct unwind_frame_info *info,
+ unwind_callback_fn cb,
+ const struct stacktrace_ops *ops,
+ void *data)
+{
+ return -ENOSYS;
+}
+
+static inline int unwind(struct unwind_frame_info *info)
+{
+ return -ENOSYS;
+}
+
+static inline int unwind_to_user(struct unwind_frame_info *info)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_STACK_UNWIND */
+#endif /* _LINUX_UNWIND_H */
extern void __starget_for_each_device(struct scsi_target *, void *,
void (*fn)(struct scsi_device *,
void *));
+extern struct scsi_device *scsi_device_from_queue(struct request_queue *);
/* only exposed to implement shost_for_each_device */
extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
/* SCSI Transport Broadcast Groups */
/* leaving groups 0 and 1 unassigned */
#define SCSI_NL_GRP_FC_EVENTS (1<<2) /* Group 2 */
-#define SCSI_NL_GRP_CNT 3
+#define SCSI_NL_GRP_ML_EVENTS (1<<3) /* Group 3 */
+#define SCSI_NL_GRP_CNT 4
/* SCSI_TRANSPORT_MSG event message header */
/* scsi_nl_hdr->transport value */
#define SCSI_NL_TRANSPORT 0
#define SCSI_NL_TRANSPORT_FC 1
-#define SCSI_NL_MAX_TRANSPORTS 2
+#define SCSI_NL_TRANSPORT_ML 2
+#define SCSI_NL_MAX_TRANSPORTS 3
/* Transport-based scsi_nl_hdr->msgtype values are defined in each transport */
--- /dev/null
+/*
+ * SCSI Midlayer Netlink Interface
+ *
+ * Copyright (C) 2008 Hannes Reinecke, SuSE Linux Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef SCSI_NETLINK_ML_H
+#define SCSI_NETLINK_ML_H
+
+#include <scsi/scsi_netlink.h>
+
+/*
+ * This file intended to be included by both kernel and user space
+ */
+
+/*
+ * FC Transport Message Types
+ */
+ /* kernel -> user */
+#define ML_NL_SCSI_SENSE 0x0100
+ /* user -> kernel */
+/* none */
+
+
+/*
+ * Message Structures :
+ */
+
+/* macro to round up message lengths to 8byte boundary */
+#define SCSI_NL_MSGALIGN(len) (((len) + 7) & ~7)
+
+
+/*
+ * SCSI Midlayer SCSI Sense messages :
+ * SCSI_NL_SCSI_SENSE
+ *
+ */
+struct scsi_nl_sense_msg {
+ struct scsi_nl_hdr snlh; /* must be 1st element ! */
+ uint64_t seconds;
+ u64 id;
+ u64 lun;
+ u16 host_no;
+ u16 channel;
+ u32 sense;
+} __attribute__((aligned(sizeof(uint64_t))));
+
+
+#endif /* SCSI_NETLINK_ML_H */
+
+config SUSE_KERNEL
+ def_bool y
+
+config ENTERPRISE_SUPPORT
+ bool "Enable enterprise support facility"
+ depends on SUSE_KERNEL
+ help
+ This feature enables the handling of the "supported" module flag.
+ This flag can be used to report unsupported module loads or even
+ refuse them entirely. It is useful when ensuring that the kernel
+ remains in a state that Novell Technical Services, or its
+ technical partners, is prepared to support.
+
+ Modules in the list of supported modules will be marked supported
+ on build. The default enforcement mode is to report, but not
+ deny, loading of unsupported modules.
+
+ If you aren't building a kernel for an enterprise distribution,
+ say n.
+
+config SPLIT_PACKAGE
+ bool "Split the kernel package into multiple RPMs"
+ depends on SUSE_KERNEL && MODULES
+ help
+ This is an option used by the kernel packaging infrastructure
+ to split kernel modules into different packages. It isn't used
+ by the kernel itself, but allows the the packager to make
+ decisions on a per-config basis.
+
+ If you aren't packaging a kernel for distribution, it's safe to
+ say n.
+
+config KERNEL_DESKTOP
+ bool "Kernel to suit desktop workloads"
+ help
+ This is an option used to tune kernel parameters to better suit
+ desktop workloads.
+
config ARCH
string
option env="ARCH"
menuconfig CGROUPS
boolean "Control Group support"
depends on EVENTFD
+ default !KERNEL_DESKTOP
help
This option adds support for grouping sets of processes together, for
use with process control subsystems such as Cpusets, CFS, memory
menuconfig CGROUP_SCHED
bool "Group CPU scheduler"
- default n
+ default !KERNEL_DESKTOP
help
This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups. It uses cgroups to group
#include <linux/rmap.h>
#include <linux/mempolicy.h>
#include <linux/key.h>
+#include <linux/unwind.h>
#include <linux/buffer_head.h>
#include <linux/page_cgroup.h>
#include <linux/debug_locks.h>
* Need to run as early as possible, to initialize the
* lockdep hash:
*/
+ unwind_init();
lockdep_init();
smp_setup_processor_id();
debug_objects_early_init();
mm_init_owner(&init_mm, &init_task);
mm_init_cpumask(&init_mm);
setup_command_line(command_line);
+ unwind_setup();
setup_nr_cpu_ids();
setup_per_cpu_areas();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
choice
prompt "Timer frequency"
+ default HZ_1000 if KERNEL_DESKTOP
default HZ_250
help
Allows the configuration of the timer frequency. It is customary
choice
prompt "Preemption Model"
+ default PREEMPT if KERNEL_DESKTOP
default PREEMPT_NONE
config PREEMPT_NONE
obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
+obj-$(CONFIG_STACK_UNWIND) += unwind.o
obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
struct kobject *kernel_kobj;
EXPORT_SYMBOL_GPL(kernel_kobj);
+#ifdef CONFIG_ENTERPRISE_SUPPORT
+const char *supported_printable(int taint)
+{
+ int mask = TAINT_PROPRIETARY_MODULE|TAINT_NO_SUPPORT;
+ if ((taint & mask) == mask)
+ return "No, Proprietary and Unsupported modules are loaded";
+ else if (taint & TAINT_PROPRIETARY_MODULE)
+ return "No, Proprietary modules are loaded";
+ else if (taint & TAINT_NO_SUPPORT)
+ return "No, Unsupported modules are loaded";
+ else if (taint & TAINT_EXTERNAL_SUPPORT)
+ return "Yes, External";
+ else
+ return "Yes";
+}
+
+static ssize_t supported_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", supported_printable(get_taint()));
+}
+KERNEL_ATTR_RO(supported);
+#endif
+
static struct attribute * kernel_attrs[] = {
&fscaps_attr.attr,
#if defined(CONFIG_HOTPLUG)
&kexec_crash_size_attr.attr,
&vmcoreinfo_attr.attr,
#endif
+#ifdef CONFIG_ENTERPRISE_SUPPORT
+ &supported_attr.attr,
+#endif
NULL
};
#include <linux/device.h>
#include <linux/string.h>
#include <linux/mutex.h>
+#include <linux/unwind.h>
#include <linux/rculist.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
+#ifdef CONFIG_ENTERPRISE_SUPPORT
+/* Allow unsupported modules switch. */
+#ifdef UNSUPPORTED_MODULES
+int unsupported = UNSUPPORTED_MODULES;
+#else
+int unsupported = 2; /* don't warn when loading unsupported modules. */
+#endif
+
+static int __init unsupported_setup(char *str)
+{
+ get_option(&str, &unsupported);
+ return 1;
+}
+__setup("unsupported=", unsupported_setup);
+#endif
+
/*
* Mutex protects:
* 1) List of modules (also safely readable with preempt_disable),
struct _ddebug *debug;
unsigned int num_debug;
struct {
- unsigned int sym, str, mod, vers, info, pcpu;
+ unsigned int sym, str, mod, vers, info, pcpu, unwind;
} index;
};
#endif /* CONFIG_SMP */
+static unsigned int find_unwind(struct load_info *info)
+{
+ int section = 0;
+#ifdef ARCH_UNWIND_SECTION_NAME
+ section = find_sec(info, ARCH_UNWIND_SECTION_NAME);
+ if (section)
+ info->sechdrs[section].sh_flags |= SHF_ALLOC;
+#endif
+ return section;
+}
+
+static void add_unwind_table(struct module *mod, struct load_info *info)
+{
+ int index = info->index.unwind;
+
+ /* Size of section 0 is 0, so this is ok if there is no unwind info. */
+ mod->unwind_info = unwind_add_table(mod,
+ (void *)info->sechdrs[index].sh_addr,
+ info->sechdrs[index].sh_size);
+}
+
#define MODINFO_ATTR(field) \
static void setup_modinfo_##field(struct module *mod, const char *s) \
{ \
buf[l++] = 'F';
if (mod->taints & (1 << TAINT_CRAP))
buf[l++] = 'C';
+#ifdef CONFIG_ENTERPRISE_SUPPORT
+ if (mod->taints & (1 << TAINT_NO_SUPPORT))
+ buf[l++] = 'N';
+ if (mod->taints & (1 << TAINT_EXTERNAL_SUPPORT))
+ buf[l++] = 'X';
+#endif
/*
* TAINT_FORCED_RMMOD: could be added.
* TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
static struct module_attribute modinfo_taint =
__ATTR(taint, 0444, show_taint, NULL);
+#ifdef CONFIG_ENTERPRISE_SUPPORT
+static void setup_modinfo_supported(struct module *mod, const char *s)
+{
+ if (!s) {
+ mod->taints |= (1 << TAINT_NO_SUPPORT);
+ return;
+ }
+
+ if (strcmp(s, "external") == 0)
+ mod->taints |= (1 << TAINT_EXTERNAL_SUPPORT);
+ else if (strcmp(s, "yes"))
+ mod->taints |= (1 << TAINT_NO_SUPPORT);
+}
+
+static ssize_t show_modinfo_supported(struct module_attribute *mattr,
+ struct module_kobject *mk, char *buffer)
+{
+ return sprintf(buffer, "%s\n", supported_printable(mk->mod->taints));
+}
+
+static struct module_attribute modinfo_supported = {
+ .attr = { .name = "supported", .mode = 0444 },
+ .show = show_modinfo_supported,
+ .setup = setup_modinfo_supported,
+};
+#endif
+
static struct module_attribute *modinfo_attrs[] = {
&module_uevent,
&modinfo_version,
&modinfo_coresize,
&modinfo_initsize,
&modinfo_taint,
+#ifdef CONFIG_ENTERPRISE_SUPPORT
+ &modinfo_supported,
+#endif
#ifdef CONFIG_MODULE_UNLOAD
&modinfo_refcnt,
#endif
add_sect_attrs(mod, info);
add_notes_attrs(mod, info);
+#ifdef CONFIG_ENTERPRISE_SUPPORT
+ /* We don't use add_taint() here because it also disables lockdep. */
+ if (mod->taints & (1 << TAINT_EXTERNAL_SUPPORT))
+ add_nonfatal_taint(TAINT_EXTERNAL_SUPPORT);
+ else if (mod->taints == (1 << TAINT_NO_SUPPORT)) {
+ if (unsupported == 0) {
+ printk(KERN_WARNING "%s: module not supported by "
+ "Novell, refusing to load. To override, echo "
+ "1 > /proc/sys/kernel/unsupported\n", mod->name);
+ err = -ENOEXEC;
+ goto out_remove_attrs;
+ }
+ add_nonfatal_taint(TAINT_NO_SUPPORT);
+ if (unsupported == 1) {
+ printk(KERN_WARNING "%s: module is not supported by "
+ "Novell. Novell Technical Services may decline "
+ "your support request if it involves a kernel "
+ "fault.\n", mod->name);
+ }
+ }
+#endif
+
kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
return 0;
+out_remove_attrs:
+ remove_notes_attrs(mod);
+ remove_sect_attrs(mod);
+ del_usage_links(mod);
+ module_remove_modinfo_attrs(mod);
out_unreg_param:
module_param_sysfs_remove(mod);
out_unreg_holders:
/* Remove dynamic debug info */
ddebug_remove_module(mod->name);
+ unwind_remove_table(mod->unwind_info, 0);
+
/* Arch-specific cleanup. */
module_arch_cleanup(mod);
info->index.pcpu = find_pcpusec(info);
+ info->index.unwind = find_unwind(info);
+
/* Check module struct version now, before we try to use module. */
if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
return ERR_PTR(-ENOEXEC);
if (err < 0)
goto unlink;
+ /* Initialize unwind table */
+ add_unwind_table(mod, &info);
+
/* Get rid of temporary copy. */
free_copy(&info);
/* Drop initial reference. */
module_put(mod);
trim_init_extable(mod);
+ unwind_remove_table(mod->unwind_info, 1);
#ifdef CONFIG_KALLSYMS
mod->num_symtab = mod->core_num_syms;
mod->symtab = mod->core_symtab;
if (last_unloaded_module[0])
printk(" [last unloaded: %s]", last_unloaded_module);
printk("\n");
+#ifdef CONFIG_ENTERPRISE_SUPPORT
+ printk("Supported: %s\n", supported_printable(get_taint()));
+#endif
}
#ifdef CONFIG_MODVERSIONS
{ TAINT_CRAP, 'C', ' ' },
{ TAINT_FIRMWARE_WORKAROUND, 'I', ' ' },
{ TAINT_OOT_MODULE, 'O', ' ' },
+#ifdef CONFIG_ENTERPRISE_SUPPORT
+ { TAINT_NO_SUPPORT, 'N', ' ' },
+ { TAINT_EXTERNAL_SUPPORT, 'X', ' ' },
+#endif
};
/**
* 'C' - modules from drivers/staging are loaded.
* 'I' - Working around severe firmware bug.
* 'O' - Out-of-tree module has been loaded.
+ * 'N' - Unsuported modules loaded.
+ * 'X' - Modules with external support loaded.
*
* The string is overwritten by the next call to print_tainted().
*/
return tainted_mask;
}
+void add_nonfatal_taint(unsigned flag)
+{
+ set_bit(flag, &tainted_mask);
+}
+
void add_taint(unsigned flag)
{
/*
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/rculist.h>
+#include <linux/jhash.h>
+#include <linux/device.h>
#include <asm/uaccess.h>
return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
}
-#ifdef CONFIG_KGDB_KDB
+#if defined(CONFIG_KGDB_KDB) || defined(CONFIG_DEBUG_KERNEL)
/* kdb dmesg command needs access to the syslog buffer. do_syslog()
* uses locks so it cannot be used during debugging. Just tell kdb
* where the start and end of the physical and logical logs are. This
rcu_read_unlock();
}
#endif
+
+#if defined CONFIG_PRINTK && defined CONFIG_KMSG_IDS
+
+/**
+ * printk_hash - print a kernel message include a hash over the message
+ * @prefix: message prefix including the ".%06x" for the hash
+ * @fmt: format string
+ */
+asmlinkage int printk_hash(const char *prefix, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ r = printk(prefix, jhash(fmt, strlen(fmt), 0) & 0xffffff);
+ va_start(args, fmt);
+ r += vprintk(fmt, args);
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(printk_hash);
+
+/**
+ * printk_dev_hash - print a kernel message include a hash over the message
+ * @prefix: message prefix including the ".%06x" for the hash
+ * @dev: device this printk is all about
+ * @fmt: format string
+ */
+asmlinkage int printk_dev_hash(const char *prefix, const char *driver_name,
+ const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ r = printk(prefix, driver_name, jhash(fmt, strlen(fmt), 0) & 0xffffff);
+ va_start(args, fmt);
+ r += vprintk(fmt, args);
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(printk_dev_hash);
+#endif
.extra1 = &pid_max_min,
.extra2 = &pid_max_max,
},
+#if defined(CONFIG_MODULES) && defined(CONFIG_ENTERPRISE_SUPPORT)
+ {
+ .procname = "unsupported",
+ .data = &unsupported,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
{
.procname = "panic_on_oops",
.data = &panic_on_oops,
.proc_handler = proc_dointvec,
},
#endif
+ {
+ .procname = "suid_dumpable",
+ .data = &suid_dumpable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#if defined(CONFIG_S390) && defined(CONFIG_SMP)
{
.procname = "spin_retry",
{ CTL_INT, KERN_COMPAT_LOG, "compat-log" },
{ CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
{ CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
+ { CTL_INT, KERN_SETUID_DUMPABLE, "suid_dumpable" },
{}
};
--- /dev/null
+/*
+ * Copyright (C) 2002-2006 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ * This code is released under version 2 of the GNU GPL.
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whistles, so there
+ * is not much point in implementing the full Dwarf2 unwind API.
+ */
+
+#include <linux/unwind.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <linux/sort.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <linux/slab.h>
+
+extern const char __start_unwind[], __end_unwind[];
+extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];
+
+#define MAX_STACK_DEPTH 8
+
+#define EXTRA_INFO(f) { \
+ BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
+ % FIELD_SIZEOF(struct unwind_frame_info, f)) \
+ + offsetof(struct unwind_frame_info, f) \
+ / FIELD_SIZEOF(struct unwind_frame_info, f), \
+ FIELD_SIZEOF(struct unwind_frame_info, f) \
+ }
+#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
+
+static const struct {
+ unsigned offs:BITS_PER_LONG / 2;
+ unsigned width:BITS_PER_LONG / 2;
+} reg_info[] = {
+ UNW_REGISTER_INFO
+};
+
+#undef PTREGS_INFO
+#undef EXTRA_INFO
+
+#ifndef REG_INVALID
+#define REG_INVALID(r) (reg_info[r].width == 0)
+#endif
+
+#define DW_CFA_nop 0x00
+#define DW_CFA_set_loc 0x01
+#define DW_CFA_advance_loc1 0x02
+#define DW_CFA_advance_loc2 0x03
+#define DW_CFA_advance_loc4 0x04
+#define DW_CFA_offset_extended 0x05
+#define DW_CFA_restore_extended 0x06
+#define DW_CFA_undefined 0x07
+#define DW_CFA_same_value 0x08
+#define DW_CFA_register 0x09
+#define DW_CFA_remember_state 0x0a
+#define DW_CFA_restore_state 0x0b
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_register 0x0d
+#define DW_CFA_def_cfa_offset 0x0e
+#define DW_CFA_def_cfa_expression 0x0f
+#define DW_CFA_expression 0x10
+#define DW_CFA_offset_extended_sf 0x11
+#define DW_CFA_def_cfa_sf 0x12
+#define DW_CFA_def_cfa_offset_sf 0x13
+#define DW_CFA_val_offset 0x14
+#define DW_CFA_val_offset_sf 0x15
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_lo_user 0x1c
+#define DW_CFA_GNU_window_save 0x2d
+#define DW_CFA_GNU_args_size 0x2e
+#define DW_CFA_GNU_negative_offset_extended 0x2f
+#define DW_CFA_hi_user 0x3f
+
+#define DW_EH_PE_FORM 0x07
+#define DW_EH_PE_native 0x00
+#define DW_EH_PE_leb128 0x01
+#define DW_EH_PE_data2 0x02
+#define DW_EH_PE_data4 0x03
+#define DW_EH_PE_data8 0x04
+#define DW_EH_PE_signed 0x08
+#define DW_EH_PE_ADJUST 0x70
+#define DW_EH_PE_abs 0x00
+#define DW_EH_PE_pcrel 0x10
+#define DW_EH_PE_textrel 0x20
+#define DW_EH_PE_datarel 0x30
+#define DW_EH_PE_funcrel 0x40
+#define DW_EH_PE_aligned 0x50
+#define DW_EH_PE_indirect 0x80
+#define DW_EH_PE_omit 0xff
+
+#define DW_OP_addr 0x03
+#define DW_OP_deref 0x06
+#define DW_OP_const1u 0x08
+#define DW_OP_const1s 0x09
+#define DW_OP_const2u 0x0a
+#define DW_OP_const2s 0x0b
+#define DW_OP_const4u 0x0c
+#define DW_OP_const4s 0x0d
+#define DW_OP_const8u 0x0e
+#define DW_OP_const8s 0x0f
+#define DW_OP_constu 0x10
+#define DW_OP_consts 0x11
+#define DW_OP_dup 0x12
+#define DW_OP_drop 0x13
+#define DW_OP_over 0x14
+#define DW_OP_pick 0x15
+#define DW_OP_swap 0x16
+#define DW_OP_rot 0x17
+#define DW_OP_xderef 0x18
+#define DW_OP_abs 0x19
+#define DW_OP_and 0x1a
+#define DW_OP_div 0x1b
+#define DW_OP_minus 0x1c
+#define DW_OP_mod 0x1d
+#define DW_OP_mul 0x1e
+#define DW_OP_neg 0x1f
+#define DW_OP_not 0x20
+#define DW_OP_or 0x21
+#define DW_OP_plus 0x22
+#define DW_OP_plus_uconst 0x23
+#define DW_OP_shl 0x24
+#define DW_OP_shr 0x25
+#define DW_OP_shra 0x26
+#define DW_OP_xor 0x27
+#define DW_OP_bra 0x28
+#define DW_OP_eq 0x29
+#define DW_OP_ge 0x2a
+#define DW_OP_gt 0x2b
+#define DW_OP_le 0x2c
+#define DW_OP_lt 0x2d
+#define DW_OP_ne 0x2e
+#define DW_OP_skip 0x2f
+#define DW_OP_lit0 0x30
+#define DW_OP_lit31 0x4f
+#define DW_OP_reg0 0x50
+#define DW_OP_reg31 0x6f
+#define DW_OP_breg0 0x70
+#define DW_OP_breg31 0x8f
+#define DW_OP_regx 0x90
+#define DW_OP_fbreg 0x91
+#define DW_OP_bregx 0x92
+#define DW_OP_piece 0x93
+#define DW_OP_deref_size 0x94
+#define DW_OP_xderef_size 0x95
+#define DW_OP_nop 0x96
+
+typedef unsigned long uleb128_t;
+typedef signed long sleb128_t;
+#define sleb128abs __builtin_labs
+
+static struct unwind_table {
+ struct {
+ unsigned long pc;
+ unsigned long range;
+ } core, init;
+ const void *address;
+ unsigned long size;
+ const unsigned char *header;
+ unsigned long hdrsz;
+ struct unwind_table *link;
+ const char *name;
+} root_table;
+
+struct unwind_item {
+ enum item_location {
+ Nowhere,
+ Memory,
+ Register,
+ Value
+ } where;
+ uleb128_t value;
+};
+
+struct unwind_state {
+ uleb128_t loc, org;
+ const u8 *cieStart, *cieEnd;
+ uleb128_t codeAlign;
+ sleb128_t dataAlign;
+ struct cfa {
+ uleb128_t reg, offs, elen;
+ const u8 *expr;
+ } cfa;
+ struct unwind_item regs[ARRAY_SIZE(reg_info)];
+ unsigned stackDepth:8;
+ unsigned version:8;
+ const u8 *label;
+ const u8 *stack[MAX_STACK_DEPTH];
+};
+
+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
+
+static unsigned unwind_debug;
+static int __init unwind_debug_setup(char *s)
+{
+ unwind_debug = simple_strtoul(s, NULL, 0);
+ return 1;
+}
+__setup("unwind_debug=", unwind_debug_setup);
+#define dprintk(lvl, fmt, args...) \
+ ((void)(lvl > unwind_debug \
+ || printk(KERN_DEBUG "unwind: " fmt "\n", ##args)))
+
+static struct unwind_table *find_table(unsigned long pc)
+{
+ struct unwind_table *table;
+
+ for (table = &root_table; table; table = table->link)
+ if ((pc >= table->core.pc
+ && pc < table->core.pc + table->core.range)
+ || (pc >= table->init.pc
+ && pc < table->init.pc + table->init.range))
+ break;
+
+ return table;
+}
+
+static unsigned long read_pointer(const u8 **pLoc,
+ const void *end,
+ signed ptrType,
+ unsigned long text_base,
+ unsigned long data_base);
+
+static void init_unwind_table(struct unwind_table *table,
+ const char *name,
+ const void *core_start,
+ unsigned long core_size,
+ const void *init_start,
+ unsigned long init_size,
+ const void *table_start,
+ unsigned long table_size,
+ const u8 *header_start,
+ unsigned long header_size)
+{
+ const u8 *ptr = header_start + 4;
+ const u8 *end = header_start + header_size;
+
+ table->core.pc = (unsigned long)core_start;
+ table->core.range = core_size;
+ table->init.pc = (unsigned long)init_start;
+ table->init.range = init_size;
+ table->address = table_start;
+ table->size = table_size;
+ /* See if the linker provided table looks valid. */
+ if (header_size <= 4
+ || header_start[0] != 1
+ || (void *)read_pointer(&ptr, end, header_start[1], 0, 0)
+ != table_start
+ || !read_pointer(&ptr, end, header_start[2], 0, 0)
+ || !read_pointer(&ptr, end, header_start[3], 0,
+ (unsigned long)header_start)
+ || !read_pointer(&ptr, end, header_start[3], 0,
+ (unsigned long)header_start))
+ header_start = NULL;
+ table->hdrsz = header_size;
+ smp_wmb();
+ table->header = header_start;
+ table->link = NULL;
+ table->name = name;
+}
+
+void __init unwind_init(void)
+{
+ init_unwind_table(&root_table, "kernel",
+ _text, _end - _text,
+ NULL, 0,
+ __start_unwind, __end_unwind - __start_unwind,
+ __start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);
+}
+
+static const u32 bad_cie, not_fde;
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
+static signed fde_pointer_type(const u32 *cie);
+
+struct eh_frame_hdr_table_entry {
+ unsigned long start, fde;
+};
+
+static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
+{
+ const struct eh_frame_hdr_table_entry *e1 = p1;
+ const struct eh_frame_hdr_table_entry *e2 = p2;
+
+ return (e1->start > e2->start) - (e1->start < e2->start);
+}
+
+static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
+{
+ struct eh_frame_hdr_table_entry *e1 = p1;
+ struct eh_frame_hdr_table_entry *e2 = p2;
+ unsigned long v;
+
+ v = e1->start;
+ e1->start = e2->start;
+ e2->start = v;
+ v = e1->fde;
+ e1->fde = e2->fde;
+ e2->fde = v;
+}
+
+static void __init setup_unwind_table(struct unwind_table *table,
+ void *(*alloc)(unsigned long))
+{
+ const u8 *ptr;
+ unsigned long tableSize = table->size, hdrSize;
+ unsigned n;
+ const u32 *fde;
+ struct {
+ u8 version;
+ u8 eh_frame_ptr_enc;
+ u8 fde_count_enc;
+ u8 table_enc;
+ unsigned long eh_frame_ptr;
+ unsigned int fde_count;
+ struct eh_frame_hdr_table_entry table[];
+ } __attribute__((__packed__)) *header;
+
+ if (table->header)
+ return;
+
+ if (table->hdrsz)
+ printk(KERN_WARNING ".eh_frame_hdr for '%s' present but unusable\n",
+ table->name);
+
+ if (tableSize & (sizeof(*fde) - 1))
+ return;
+
+ for (fde = table->address, n = 0;
+ tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ const u32 *cie = cie_for_fde(fde, table);
+ signed ptrType;
+
+ if (cie == ¬_fde)
+ continue;
+ if (cie == NULL
+ || cie == &bad_cie
+ || (ptrType = fde_pointer_type(cie)) < 0)
+ return;
+ ptr = (const u8 *)(fde + 2);
+ if (!read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType, 0, 0))
+ return;
+ ++n;
+ }
+
+ if (tableSize || !n)
+ return;
+
+ hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+ + 2 * n * sizeof(unsigned long);
+ dprintk(2, "Binary lookup table size for %s: %lu bytes", table->name, hdrSize);
+ header = alloc(hdrSize);
+ if (!header)
+ return;
+ header->version = 1;
+ header->eh_frame_ptr_enc = DW_EH_PE_abs|DW_EH_PE_native;
+ header->fde_count_enc = DW_EH_PE_abs|DW_EH_PE_data4;
+ header->table_enc = DW_EH_PE_abs|DW_EH_PE_native;
+ put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
+ BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
+ % __alignof(typeof(header->fde_count)));
+ header->fde_count = n;
+
+ BUILD_BUG_ON(offsetof(typeof(*header), table)
+ % __alignof(typeof(*header->table)));
+ for (fde = table->address, tableSize = table->size, n = 0;
+ tableSize;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ const u32 *cie = fde + 1 - fde[1] / sizeof(*fde);
+
+ if (!fde[1])
+ continue; /* this is a CIE */
+ ptr = (const u8 *)(fde + 2);
+ header->table[n].start = read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ fde_pointer_type(cie), 0, 0);
+ header->table[n].fde = (unsigned long)fde;
+ ++n;
+ }
+ WARN_ON(n != header->fde_count);
+
+ sort(header->table,
+ n,
+ sizeof(*header->table),
+ cmp_eh_frame_hdr_table_entries,
+ swap_eh_frame_hdr_table_entries);
+
+ table->hdrsz = hdrSize;
+ smp_wmb();
+ table->header = (const void *)header;
+}
+
+static void *__init balloc(unsigned long sz)
+{
+ return __alloc_bootmem_nopanic(sz,
+ sizeof(unsigned int),
+ __pa(MAX_DMA_ADDRESS));
+}
+
+void __init unwind_setup(void)
+{
+ setup_unwind_table(&root_table, balloc);
+}
+
+#ifdef CONFIG_MODULES
+
+static struct unwind_table *last_table;
+
+/* Must be called with module_mutex held. */
+void *unwind_add_table(struct module *module,
+ const void *table_start,
+ unsigned long table_size)
+{
+ struct unwind_table *table;
+
+ if (table_size <= 0)
+ return NULL;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ init_unwind_table(table, module->name,
+ module->module_core, module->core_size,
+ module->module_init, module->init_size,
+ table_start, table_size,
+ NULL, 0);
+
+ if (last_table)
+ last_table->link = table;
+ else
+ root_table.link = table;
+ last_table = table;
+
+ return table;
+}
+
+struct unlink_table_info
+{
+ struct unwind_table *table;
+ int init_only;
+};
+
+static int unlink_table(void *arg)
+{
+ struct unlink_table_info *info = arg;
+ struct unwind_table *table = info->table, *prev;
+
+ for (prev = &root_table; prev->link && prev->link != table; prev = prev->link)
+ ;
+
+ if (prev->link) {
+ if (info->init_only) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ info->table = NULL;
+ } else {
+ prev->link = table->link;
+ if (!prev->link)
+ last_table = prev;
+ }
+ } else
+ info->table = NULL;
+
+ return 0;
+}
+
+/* Must be called with module_mutex held. */
+void unwind_remove_table(void *handle, int init_only)
+{
+ struct unwind_table *table = handle;
+ struct unlink_table_info info;
+
+ if (!table || table == &root_table)
+ return;
+
+ if (init_only && table == last_table) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ return;
+ }
+
+ info.table = table;
+ info.init_only = init_only;
+ stop_machine(unlink_table, &info, NULL);
+
+ if (info.table)
+ kfree(table);
+}
+
+#endif /* CONFIG_MODULES */
+
+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ uleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (uleb128_t)(*cur & 0x7f) << shift;
+ if (!(*cur++ & 0x80))
+ break;
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ sleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (sleb128_t)(*cur & 0x7f) << shift;
+ if (!(*cur & 0x80)) {
+ value |= -(*cur++ & 0x40) << shift;
+ break;
+ }
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
+{
+ const u32 *cie;
+
+ if (!*fde || (*fde & (sizeof(*fde) - 1)))
+ return &bad_cie;
+ if (!fde[1])
+ return ¬_fde; /* this is a CIE */
+ if ((fde[1] & (sizeof(*fde) - 1))
+ || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address)
+ return NULL; /* this is not a valid FDE */
+ cie = fde + 1 - fde[1] / sizeof(*fde);
+ if (*cie <= sizeof(*cie) + 4
+ || *cie >= fde[1] - sizeof(*fde)
+ || (*cie & (sizeof(*cie) - 1))
+ || cie[1])
+ return NULL; /* this is not a (valid) CIE */
+ return cie;
+}
+
+static unsigned long read_pointer(const u8 **pLoc,
+ const void *end,
+ signed ptrType,
+ unsigned long text_base,
+ unsigned long data_base)
+{
+ unsigned long value = 0;
+ union {
+ const u8 *p8;
+ const u16 *p16u;
+ const s16 *p16s;
+ const u32 *p32u;
+ const s32 *p32s;
+ const unsigned long *pul;
+ } ptr;
+
+ if (ptrType < 0 || ptrType == DW_EH_PE_omit) {
+ dprintk(1, "Invalid pointer encoding %02X (%p,%p).", ptrType, *pLoc, end);
+ return 0;
+ }
+ ptr.p8 = *pLoc;
+ switch (ptrType & DW_EH_PE_FORM) {
+ case DW_EH_PE_data2:
+ if (end < (const void *)(ptr.p16u + 1)) {
+ dprintk(1, "Data16 overrun (%p,%p).", ptr.p8, end);
+ return 0;
+ }
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned(ptr.p16s++);
+ else
+ value = get_unaligned(ptr.p16u++);
+ break;
+ case DW_EH_PE_data4:
+#ifdef CONFIG_64BIT
+ if (end < (const void *)(ptr.p32u + 1)) {
+ dprintk(1, "Data32 overrun (%p,%p).", ptr.p8, end);
+ return 0;
+ }
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned(ptr.p32s++);
+ else
+ value = get_unaligned(ptr.p32u++);
+ break;
+ case DW_EH_PE_data8:
+ BUILD_BUG_ON(sizeof(u64) != sizeof(value));
+#else
+ BUILD_BUG_ON(sizeof(u32) != sizeof(value));
+#endif
+ case DW_EH_PE_native:
+ if (end < (const void *)(ptr.pul + 1)) {
+ dprintk(1, "DataUL overrun (%p,%p).", ptr.p8, end);
+ return 0;
+ }
+ value = get_unaligned(ptr.pul++);
+ break;
+ case DW_EH_PE_leb128:
+ BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
+ value = ptrType & DW_EH_PE_signed
+ ? get_sleb128(&ptr.p8, end)
+ : get_uleb128(&ptr.p8, end);
+ if ((const void *)ptr.p8 > end) {
+ dprintk(1, "DataLEB overrun (%p,%p).", ptr.p8, end);
+ return 0;
+ }
+ break;
+ default:
+ dprintk(2, "Cannot decode pointer type %02X (%p,%p).",
+ ptrType, ptr.p8, end);
+ return 0;
+ }
+ switch (ptrType & DW_EH_PE_ADJUST) {
+ case DW_EH_PE_abs:
+ break;
+ case DW_EH_PE_pcrel:
+ value += (unsigned long)*pLoc;
+ break;
+ case DW_EH_PE_textrel:
+ if (likely(text_base)) {
+ value += text_base;
+ break;
+ }
+ dprintk(2, "Text-relative encoding %02X (%p,%p), but zero text base.",
+ ptrType, *pLoc, end);
+ return 0;
+ case DW_EH_PE_datarel:
+ if (likely(data_base)) {
+ value += data_base;
+ break;
+ }
+ dprintk(2, "Data-relative encoding %02X (%p,%p), but zero data base.",
+ ptrType, *pLoc, end);
+ return 0;
+ default:
+ dprintk(2, "Cannot adjust pointer type %02X (%p,%p).",
+ ptrType, *pLoc, end);
+ return 0;
+ }
+ if ((ptrType & DW_EH_PE_indirect)
+ && probe_kernel_address(value, value)) {
+ dprintk(1, "Cannot read indirect value %lx (%p,%p).",
+ value, *pLoc, end);
+ return 0;
+ }
+ *pLoc = ptr.p8;
+
+ return value;
+}
+
+static signed fde_pointer_type(const u32 *cie)
+{
+ const u8 *ptr = (const u8 *)(cie + 2);
+ unsigned version = *ptr;
+
+ if (version != 1)
+ return -1; /* unsupported */
+ if (*++ptr) {
+ const char *aug;
+ const u8 *end = (const u8 *)(cie + 1) + *cie;
+ uleb128_t len;
+
+ /* check if augmentation size is first (and thus present) */
+ if (*ptr != 'z')
+ return -1;
+ /* check if augmentation string is nul-terminated */
+ if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL)
+ return -1;
+ ++ptr; /* skip terminator */
+ get_uleb128(&ptr, end); /* skip code alignment */
+ get_sleb128(&ptr, end); /* skip data alignment */
+ /* skip return address column */
+ version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end);
+ len = get_uleb128(&ptr, end); /* augmentation length */
+ if (ptr + len < ptr || ptr + len > end)
+ return -1;
+ end = ptr + len;
+ while (*++aug) {
+ if (ptr >= end)
+ return -1;
+ switch (*aug) {
+ case 'L':
+ ++ptr;
+ break;
+ case 'P': {
+ signed ptrType = *ptr++;
+
+ if (!read_pointer(&ptr, end, ptrType, 0, 0)
+ || ptr > end)
+ return -1;
+ }
+ break;
+ case 'R':
+ return *ptr;
+ default:
+ return -1;
+ }
+ }
+ }
+ return DW_EH_PE_native|DW_EH_PE_abs;
+}
+
+static int advance_loc(unsigned long delta, struct unwind_state *state)
+{
+ state->loc += delta * state->codeAlign;
+
+ return delta > 0;
+}
+
+static void set_rule(uleb128_t reg,
+ enum item_location where,
+ uleb128_t value,
+ struct unwind_state *state)
+{
+ if (reg < ARRAY_SIZE(state->regs)) {
+ state->regs[reg].where = where;
+ state->regs[reg].value = value;
+ }
+}
+
+static int processCFI(const u8 *start,
+ const u8 *end,
+ unsigned long targetLoc,
+ signed ptrType,
+ struct unwind_state *state)
+{
+ union {
+ const u8 *p8;
+ const u16 *p16;
+ const u32 *p32;
+ } ptr;
+ int result = 1;
+
+ if (start != state->cieStart) {
+ state->loc = state->org;
+ result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state);
+ if (targetLoc == 0 && state->label == NULL)
+ return result;
+ }
+ for (ptr.p8 = start; result && ptr.p8 < end; ) {
+ switch (*ptr.p8 >> 6) {
+ uleb128_t value;
+
+ case 0:
+ switch (*ptr.p8++) {
+ case DW_CFA_nop:
+ break;
+ case DW_CFA_set_loc:
+ state->loc = read_pointer(&ptr.p8, end, ptrType, 0, 0);
+ if (state->loc == 0)
+ result = 0;
+ break;
+ case DW_CFA_advance_loc1:
+ result = ptr.p8 < end && advance_loc(*ptr.p8++, state);
+ break;
+ case DW_CFA_advance_loc2:
+ result = ptr.p8 <= end + 2
+ && advance_loc(*ptr.p16++, state);
+ break;
+ case DW_CFA_advance_loc4:
+ result = ptr.p8 <= end + 4
+ && advance_loc(*ptr.p32++, state);
+ break;
+ case DW_CFA_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Memory, get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value, get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_offset_extended_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Memory, get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value, get_sleb128(&ptr.p8, end), state);
+ break;
+ /*todo case DW_CFA_expression: */
+ /*todo case DW_CFA_val_expression: */
+ case DW_CFA_restore_extended:
+ case DW_CFA_undefined:
+ case DW_CFA_same_value:
+ set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state);
+ break;
+ case DW_CFA_register:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Register,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_remember_state:
+ if (ptr.p8 == state->label) {
+ state->label = NULL;
+ return 1;
+ }
+ if (state->stackDepth >= MAX_STACK_DEPTH) {
+ dprintk(1, "State stack overflow (%p,%p).", ptr.p8, end);
+ return 0;
+ }
+ state->stack[state->stackDepth++] = ptr.p8;
+ break;
+ case DW_CFA_restore_state:
+ if (state->stackDepth) {
+ const uleb128_t loc = state->loc;
+ const u8 *label = state->label;
+
+ state->label = state->stack[state->stackDepth - 1];
+ memcpy(&state->cfa, &badCFA, sizeof(state->cfa));
+ memset(state->regs, 0, sizeof(state->regs));
+ state->stackDepth = 0;
+ result = processCFI(start, end, 0, ptrType, state);
+ state->loc = loc;
+ state->label = label;
+ } else {
+ dprintk(1, "State stack underflow (%p,%p).", ptr.p8, end);
+ return 0;
+ }
+ break;
+ case DW_CFA_def_cfa:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ state->cfa.elen = 0;
+ /*nobreak*/
+ case DW_CFA_def_cfa_offset:
+ state->cfa.offs = get_uleb128(&ptr.p8, end);
+ break;
+ case DW_CFA_def_cfa_sf:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ state->cfa.elen = 0;
+ /*nobreak*/
+ case DW_CFA_def_cfa_offset_sf:
+ state->cfa.offs = get_sleb128(&ptr.p8, end)
+ * state->dataAlign;
+ break;
+ case DW_CFA_def_cfa_register:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ state->cfa.elen = 0;
+ break;
+ case DW_CFA_def_cfa_expression:
+ state->cfa.elen = get_uleb128(&ptr.p8, end);
+ if (!state->cfa.elen) {
+ dprintk(1, "Zero-length CFA expression.");
+ return 0;
+ }
+ state->cfa.expr = ptr.p8;
+ ptr.p8 += state->cfa.elen;
+ break;
+ case DW_CFA_GNU_args_size:
+ get_uleb128(&ptr.p8, end);
+ break;
+ case DW_CFA_GNU_negative_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Memory,
+ (uleb128_t)0 - get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_GNU_window_save:
+ default:
+ dprintk(1, "Unrecognized CFI op %02X (%p,%p).", ptr.p8[-1], ptr.p8 - 1, end);
+ result = 0;
+ break;
+ }
+ break;
+ case 1:
+ result = advance_loc(*ptr.p8++ & 0x3f, state);
+ break;
+ case 2:
+ value = *ptr.p8++ & 0x3f;
+ set_rule(value, Memory, get_uleb128(&ptr.p8, end), state);
+ break;
+ case 3:
+ set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
+ break;
+ }
+ if (ptr.p8 > end) {
+ dprintk(1, "Data overrun (%p,%p).", ptr.p8, end);
+ result = 0;
+ }
+ if (result && targetLoc != 0 && targetLoc < state->loc)
+ return 1;
+ }
+
+ if (result && ptr.p8 < end)
+ dprintk(1, "Data underrun (%p,%p).", ptr.p8, end);
+
+ return result
+ && ptr.p8 == end
+ && (targetLoc == 0
+ || (/*todo While in theory this should apply, gcc in practice omits
+ everything past the function prolog, and hence the location
+ never reaches the end of the function.
+ targetLoc < state->loc &&*/ state->label == NULL));
+}
+
+static unsigned long evaluate(const u8 *expr, const u8 *end,
+ const struct unwind_frame_info *frame)
+{
+ union {
+ const u8 *pu8;
+ const s8 *ps8;
+ const u16 *pu16;
+ const s16 *ps16;
+ const u32 *pu32;
+ const s32 *ps32;
+ const u64 *pu64;
+ const s64 *ps64;
+ } ptr = { expr };
+ unsigned long stack[8], val1, val2;
+ unsigned int stidx = 0;
+#define PUSH(v) ({ unsigned long v__ = (v); if (stidx >= ARRAY_SIZE(stack)) return 0; stack[stidx++] = v__; })
+#define POP() ({ if (!stidx) return 0; stack[--stidx]; })
+
+ while (ptr.pu8 < end) {
+ switch (*ptr.pu8++) {
+ /*todo case DW_OP_addr: */
+ case DW_OP_deref:
+ val1 = POP();
+ if (probe_kernel_address(val1, val2)) {
+ dprintk(1, "Cannot de-reference %lx (%p,%p).", val1, ptr.pu8 - 1, end);
+ return 0;
+ }
+ PUSH(val2);
+ break;
+ /*todo? case DW_OP_xderef: */
+ /*todo case DW_OP_deref_size: */
+ /*todo? case DW_OP_xderef_size: */
+ case DW_OP_const1u:
+ if (ptr.pu8 < end)
+ PUSH(*ptr.pu8);
+ ++ptr.pu8;
+ break;
+ case DW_OP_const1s:
+ if (ptr.pu8 < end)
+ PUSH(*ptr.ps8);
+ ++ptr.ps8;
+ break;
+ case DW_OP_const2u:
+ if (ptr.pu8 + 1 < end)
+ PUSH(*ptr.pu16);
+ ++ptr.pu16;
+ break;
+ case DW_OP_const2s:
+ if (ptr.pu8 + 1 < end)
+ PUSH(*ptr.ps16);
+ ++ptr.ps16;
+ break;
+ case DW_OP_const4u:
+ if (ptr.pu8 + 3 < end)
+ PUSH(*ptr.pu32);
+ ++ptr.pu32;
+ break;
+ case DW_OP_const4s:
+ if (ptr.pu8 + 3 < end)
+ PUSH(*ptr.ps32);
+ ++ptr.ps32;
+ break;
+ case DW_OP_const8u:
+ if (ptr.pu8 + 7 < end)
+ PUSH(*ptr.pu64);
+ ++ptr.pu64;
+ break;
+ case DW_OP_const8s:
+ if (ptr.pu8 + 7 < end)
+ PUSH(*ptr.ps64);
+ ++ptr.ps64;
+ break;
+ case DW_OP_constu:
+ PUSH(get_uleb128(&ptr.pu8, end));
+ break;
+ case DW_OP_consts:
+ PUSH(get_sleb128(&ptr.pu8, end));
+ break;
+ case DW_OP_dup:
+ if (!stidx)
+ return 0;
+ PUSH(stack[stidx - 1]);
+ break;
+ case DW_OP_drop:
+ (void)POP();
+ break;
+ case DW_OP_over:
+ if (stidx <= 1)
+ return 0;
+ PUSH(stack[stidx - 2]);
+ break;
+ case DW_OP_pick:
+ if (ptr.pu8 < end) {
+ if (stidx <= *ptr.pu8)
+ return 0;
+ PUSH(stack[stidx - *ptr.pu8 - 1]);
+ }
+ ++ptr.pu8;
+ break;
+ case DW_OP_swap:
+ if (stidx <= 1)
+ return 0;
+ val1 = stack[stidx - 1];
+ stack[stidx - 1] = stack[stidx - 2];
+ stack[stidx - 2] = val1;
+ break;
+ case DW_OP_rot:
+ if (stidx <= 2)
+ return 0;
+ val1 = stack[stidx - 1];
+ stack[stidx - 1] = stack[stidx - 2];
+ stack[stidx - 2] = stack[stidx - 3];
+ stack[stidx - 3] = val1;
+ break;
+ case DW_OP_abs:
+ PUSH(__builtin_labs(POP()));
+ break;
+ case DW_OP_and:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 & val1);
+ break;
+ case DW_OP_div:
+ val1 = POP();
+ if (!val1)
+ return 0;
+ val2 = POP();
+ PUSH(val2 / val1);
+ break;
+ case DW_OP_minus:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 - val1);
+ break;
+ case DW_OP_mod:
+ val1 = POP();
+ if (!val1)
+ return 0;
+ val2 = POP();
+ PUSH(val2 % val1);
+ break;
+ case DW_OP_mul:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 * val1);
+ break;
+ case DW_OP_neg:
+ PUSH(-(long)POP());
+ break;
+ case DW_OP_not:
+ PUSH(~POP());
+ break;
+ case DW_OP_or:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 | val1);
+ break;
+ case DW_OP_plus:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 + val1);
+ break;
+ case DW_OP_plus_uconst:
+ PUSH(POP() + get_uleb128(&ptr.pu8, end));
+ break;
+ case DW_OP_shl:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val1 < BITS_PER_LONG ? val2 << val1 : 0);
+ break;
+ case DW_OP_shr:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val1 < BITS_PER_LONG ? val2 >> val1 : 0);
+ break;
+ case DW_OP_shra:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val1 < BITS_PER_LONG ? (long)val2 >> val1 : (val2 < 0 ? -1 : 0));
+ break;
+ case DW_OP_xor:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 ^ val1);
+ break;
+ case DW_OP_bra:
+ if (!POP()) {
+ ++ptr.ps16;
+ break;
+ }
+ /*nobreak*/
+ case DW_OP_skip:
+ if (ptr.pu8 + 1 < end) {
+ ptr.pu8 += *ptr.ps16;
+ if (ptr.pu8 < expr)
+ return 0;
+ } else
+ ++ptr.ps16;
+ break;
+ case DW_OP_eq:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 == val1);
+ break;
+ case DW_OP_ne:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 != val1);
+ break;
+ case DW_OP_lt:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 < val1);
+ break;
+ case DW_OP_le:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 <= val1);
+ case DW_OP_ge:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 >= val1);
+ break;
+ case DW_OP_gt:
+ val1 = POP();
+ val2 = POP();
+ PUSH(val2 > val1);
+ break;
+ case DW_OP_lit0 ... DW_OP_lit31:
+ PUSH(ptr.pu8[-1] - DW_OP_lit0);
+ break;
+ case DW_OP_breg0 ... DW_OP_breg31:
+ val1 = ptr.pu8[-1] - DW_OP_breg0;
+ if (0)
+ case DW_OP_bregx:
+ val1 = get_uleb128(&ptr.pu8, end);
+ if (val1 >= ARRAY_SIZE(reg_info)
+ || reg_info[val1].width != sizeof(unsigned long))
+ return 0;
+ PUSH(((const unsigned long *)frame)[reg_info[val1].offs]
+ + get_sleb128(&ptr.pu8, end));
+ break;
+ /*todo? case DW_OP_fbreg: */
+ /*todo? case DW_OP_piece: */
+ case DW_OP_nop:
+ break;
+ default:
+ dprintk(1, "Unsupported expression op %02x (%p,%p).", ptr.pu8[-1], ptr.pu8 - 1, end);
+ return 0;
+ }
+ }
+ if (ptr.pu8 > end)
+ return 0;
+ val1 = POP();
+#undef POP
+#undef PUSH
+ return val1;
+}
+
+/* Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error. */
+int unwind(struct unwind_frame_info *frame)
+{
+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
+ const u32 *fde = NULL, *cie = NULL;
+ const u8 *ptr = NULL, *end = NULL;
+ unsigned long pc = UNW_PC(frame) - frame->call_frame, sp;
+ unsigned long startLoc = 0, endLoc = 0, cfa;
+ unsigned i;
+ signed ptrType = -1;
+ uleb128_t retAddrReg = 0;
+ const struct unwind_table *table;
+ struct unwind_state state;
+
+ if (UNW_PC(frame) == 0)
+ return -EINVAL;
+ if ((table = find_table(pc)) != NULL
+ && !(table->size & (sizeof(*fde) - 1))) {
+ const u8 *hdr = table->header;
+ unsigned long tableSize;
+
+ smp_rmb();
+ if (hdr && hdr[0] == 1) {
+ switch (hdr[3] & DW_EH_PE_FORM) {
+ case DW_EH_PE_native: tableSize = sizeof(unsigned long); break;
+ case DW_EH_PE_data2: tableSize = 2; break;
+ case DW_EH_PE_data4: tableSize = 4; break;
+ case DW_EH_PE_data8: tableSize = 8; break;
+ default: tableSize = 0; break;
+ }
+ ptr = hdr + 4;
+ end = hdr + table->hdrsz;
+ if (tableSize
+ && read_pointer(&ptr, end, hdr[1], 0, 0)
+ == (unsigned long)table->address
+ && (i = read_pointer(&ptr, end, hdr[2], 0, 0)) > 0
+ && i == (end - ptr) / (2 * tableSize)
+ && !((end - ptr) % (2 * tableSize))) {
+ do {
+ const u8 *cur = ptr + (i / 2) * (2 * tableSize);
+
+ startLoc = read_pointer(&cur,
+ cur + tableSize,
+ hdr[3], 0,
+ (unsigned long)hdr);
+ if (pc < startLoc)
+ i /= 2;
+ else {
+ ptr = cur - tableSize;
+ i = (i + 1) / 2;
+ }
+ } while (startLoc && i > 1);
+ if (i == 1
+ && (startLoc = read_pointer(&ptr,
+ ptr + tableSize,
+ hdr[3], 0,
+ (unsigned long)hdr)) != 0
+ && pc >= startLoc)
+ fde = (void *)read_pointer(&ptr,
+ ptr + tableSize,
+ hdr[3], 0,
+ (unsigned long)hdr);
+ }
+ }
+ if (hdr && !fde)
+ dprintk(3, "Binary lookup for %lx failed.", pc);
+
+ if (fde != NULL) {
+ cie = cie_for_fde(fde, table);
+ ptr = (const u8 *)(fde + 2);
+ if (cie != NULL
+ && cie != &bad_cie
+ && cie != ¬_fde
+ && (ptrType = fde_pointer_type(cie)) >= 0
+ && read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType, 0, 0) == startLoc) {
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed;
+ endLoc = startLoc
+ + read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType, 0, 0);
+ if (pc >= endLoc)
+ fde = NULL;
+ } else
+ fde = NULL;
+ if (!fde)
+ dprintk(1, "Binary lookup result for %lx discarded.", pc);
+ }
+ if (fde == NULL) {
+ for (fde = table->address, tableSize = table->size;
+ cie = NULL, tableSize > sizeof(*fde)
+ && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde,
+ fde += 1 + *fde / sizeof(*fde)) {
+ cie = cie_for_fde(fde, table);
+ if (cie == &bad_cie) {
+ cie = NULL;
+ break;
+ }
+ if (cie == NULL
+ || cie == ¬_fde
+ || (ptrType = fde_pointer_type(cie)) < 0)
+ continue;
+ ptr = (const u8 *)(fde + 2);
+ startLoc = read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType, 0, 0);
+ if (!startLoc)
+ continue;
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed;
+ endLoc = startLoc
+ + read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType, 0, 0);
+ if (pc >= startLoc && pc < endLoc)
+ break;
+ }
+ if (!fde)
+ dprintk(3, "Linear lookup for %lx failed.", pc);
+ }
+ }
+ if (cie != NULL) {
+ memset(&state, 0, sizeof(state));
+ state.cieEnd = ptr; /* keep here temporarily */
+ ptr = (const u8 *)(cie + 2);
+ end = (const u8 *)(cie + 1) + *cie;
+ frame->call_frame = 1;
+ if ((state.version = *ptr) != 1)
+ cie = NULL; /* unsupported version */
+ else if (*++ptr) {
+ /* check if augmentation size is first (and thus present) */
+ if (*ptr == 'z') {
+ while (++ptr < end && *ptr) {
+ switch (*ptr) {
+ /* check for ignorable (or already handled)
+ * nul-terminated augmentation string */
+ case 'L':
+ case 'P':
+ case 'R':
+ continue;
+ case 'S':
+ frame->call_frame = 0;
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ if (ptr >= end || *ptr)
+ cie = NULL;
+ }
+ if (!cie)
+ dprintk(1, "CIE unusable (%p,%p).", ptr, end);
+ ++ptr;
+ }
+ if (cie != NULL) {
+ /* get code aligment factor */
+ state.codeAlign = get_uleb128(&ptr, end);
+ /* get data aligment factor */
+ state.dataAlign = get_sleb128(&ptr, end);
+ if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
+ cie = NULL;
+ else if (UNW_PC(frame) % state.codeAlign
+ || UNW_SP(frame) % sleb128abs(state.dataAlign)) {
+ dprintk(1, "Input pointer(s) misaligned (%lx,%lx).",
+ UNW_PC(frame), UNW_SP(frame));
+ return -EPERM;
+ } else {
+ retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end);
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ ptr += augSize;
+ }
+ if (ptr > end
+ || retAddrReg >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(retAddrReg)
+ || reg_info[retAddrReg].width != sizeof(unsigned long))
+ cie = NULL;
+ }
+ if (!cie)
+ dprintk(1, "CIE validation failed (%p,%p).", ptr, end);
+ }
+ if (cie != NULL) {
+ state.cieStart = ptr;
+ ptr = state.cieEnd;
+ state.cieEnd = end;
+ end = (const u8 *)(fde + 1) + *fde;
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ if ((ptr += augSize) > end)
+ fde = NULL;
+ }
+ if (!fde)
+ dprintk(1, "FDE validation failed (%p,%p).", ptr, end);
+ }
+ if (cie == NULL || fde == NULL) {
+#ifdef CONFIG_FRAME_POINTER
+ unsigned long top = TSK_STACK_TOP(frame->task);
+ unsigned long bottom = STACK_BOTTOM(frame->task);
+ unsigned long fp = UNW_FP(frame);
+ unsigned long sp = UNW_SP(frame);
+ unsigned long link;
+
+ if ((sp | fp) & (sizeof(unsigned long) - 1))
+ return -EPERM;
+
+# if FRAME_RETADDR_OFFSET < 0
+ if (!(sp < top && fp <= sp && bottom < fp))
+# else
+ if (!(sp > top && fp >= sp && bottom > fp))
+# endif
+ return -ENXIO;
+
+ if (probe_kernel_address(fp + FRAME_LINK_OFFSET, link))
+ return -ENXIO;
+
+# if FRAME_RETADDR_OFFSET < 0
+ if (!(link > bottom && link < fp))
+# else
+ if (!(link < bottom && link > fp))
+# endif
+ return -ENXIO;
+
+ if (link & (sizeof(link) - 1))
+ return -ENXIO;
+
+ fp += FRAME_RETADDR_OFFSET;
+ if (probe_kernel_address(fp, UNW_PC(frame)))
+ return -ENXIO;
+
+ /* Ok, we can use it */
+# if FRAME_RETADDR_OFFSET < 0
+ UNW_SP(frame) = fp - sizeof(UNW_PC(frame));
+# else
+ UNW_SP(frame) = fp + sizeof(UNW_PC(frame));
+# endif
+ UNW_FP(frame) = link;
+ return 0;
+#else
+ return -ENXIO;
+#endif
+ }
+ state.org = startLoc;
+ memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
+ /* process instructions */
+ if (!processCFI(ptr, end, pc, ptrType, &state)
+ || state.loc > endLoc
+ || state.regs[retAddrReg].where == Nowhere) {
+ dprintk(1, "Unusable unwind info (%p,%p).", ptr, end);
+ return -EIO;
+ }
+ if (state.cfa.elen) {
+ cfa = evaluate(state.cfa.expr, state.cfa.expr + state.cfa.elen, frame);
+ if (!cfa) {
+ dprintk(1, "Bad CFA expr (%p:%lu).", state.cfa.expr, state.cfa.elen);
+ return -EIO;
+ }
+ } else if (state.cfa.reg >= ARRAY_SIZE(reg_info)
+ || reg_info[state.cfa.reg].width != sizeof(unsigned long)
+ || FRAME_REG(state.cfa.reg, unsigned long) % sizeof(unsigned long)
+ || state.cfa.offs % sizeof(unsigned long)) {
+ dprintk(1, "Bad CFA (%lu,%lx).", state.cfa.reg, state.cfa.offs);
+ return -EIO;
+ } else
+ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
+ /* update frame */
+#ifndef CONFIG_AS_CFI_SIGNAL_FRAME
+ if (frame->call_frame
+ && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
+ frame->call_frame = 0;
+#endif
+ startLoc = min((unsigned long)UNW_SP(frame), cfa);
+ endLoc = max((unsigned long)UNW_SP(frame), cfa);
+ if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
+ startLoc = min(STACK_LIMIT(cfa), cfa);
+ endLoc = max(STACK_LIMIT(cfa), cfa);
+ }
+#ifndef CONFIG_64BIT
+# define CASES CASE(8); CASE(16); CASE(32)
+#else
+# define CASES CASE(8); CASE(16); CASE(32); CASE(64)
+#endif
+ pc = UNW_PC(frame);
+ sp = UNW_SP(frame);
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+ if (REG_INVALID(i)) {
+ if (state.regs[i].where == Nowhere)
+ continue;
+ dprintk(1, "Cannot restore register %u (%d).",
+ i, state.regs[i].where);
+ return -EIO;
+ }
+ switch (state.regs[i].where) {
+ default:
+ break;
+ case Register:
+ if (state.regs[i].value >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(state.regs[i].value)
+ || reg_info[i].width > reg_info[state.regs[i].value].width) {
+ dprintk(1, "Cannot restore register %u from register %lu.",
+ i, state.regs[i].value);
+ return -EIO;
+ }
+ switch (reg_info[state.regs[i].value].width) {
+#define CASE(n) \
+ case sizeof(u##n): \
+ state.regs[i].value = FRAME_REG(state.regs[i].value, \
+ const u##n); \
+ break
+ CASES;
+#undef CASE
+ default:
+ dprintk(1, "Unsupported register size %u (%lu).",
+ reg_info[state.regs[i].value].width,
+ state.regs[i].value);
+ return -EIO;
+ }
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+ if (REG_INVALID(i))
+ continue;
+ switch (state.regs[i].where) {
+ case Nowhere:
+ if (reg_info[i].width != sizeof(UNW_SP(frame))
+ || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
+ != &UNW_SP(frame))
+ continue;
+ UNW_SP(frame) = cfa;
+ break;
+ case Register:
+ switch (reg_info[i].width) {
+#define CASE(n) case sizeof(u##n): \
+ FRAME_REG(i, u##n) = state.regs[i].value; \
+ break
+ CASES;
+#undef CASE
+ default:
+ dprintk(1, "Unsupported register size %u (%u).",
+ reg_info[i].width, i);
+ return -EIO;
+ }
+ break;
+ case Value:
+ if (reg_info[i].width != sizeof(unsigned long)) {
+ dprintk(1, "Unsupported value size %u (%u).",
+ reg_info[i].width, i);
+ return -EIO;
+ }
+ FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
+ * state.dataAlign;
+ break;
+ case Memory: {
+ unsigned long addr = cfa + state.regs[i].value
+ * state.dataAlign;
+
+ if ((state.regs[i].value * state.dataAlign)
+ % sizeof(unsigned long)
+ || addr < startLoc
+ || addr + sizeof(unsigned long) < addr
+ || addr + sizeof(unsigned long) > endLoc) {
+ dprintk(1, "Bad memory location %lx (%lx).",
+ addr, state.regs[i].value);
+ return -EIO;
+ }
+ switch (reg_info[i].width) {
+#define CASE(n) case sizeof(u##n): \
+ if (probe_kernel_address(addr, \
+ FRAME_REG(i, u##n))) \
+ return -EFAULT; \
+ break
+ CASES;
+#undef CASE
+ default:
+ dprintk(1, "Unsupported memory size %u (%u).",
+ reg_info[i].width, i);
+ return -EIO;
+ }
+ }
+ break;
+ }
+ }
+
+ if (UNW_PC(frame) % state.codeAlign
+ || UNW_SP(frame) % sleb128abs(state.dataAlign)) {
+ dprintk(1, "Output pointer(s) misaligned (%lx,%lx).",
+ UNW_PC(frame), UNW_SP(frame));
+ return -EIO;
+ }
+ if (pc == UNW_PC(frame) && sp == UNW_SP(frame)) {
+ dprintk(1, "No progress (%lx,%lx).", pc, sp);
+ return -EIO;
+ }
+
+ return 0;
+#undef CASES
+#undef FRAME_REG
+}
+EXPORT_SYMBOL_GPL(unwind);
+
+int unwind_init_frame_info(struct unwind_frame_info *info,
+ struct task_struct *tsk,
+ /*const*/ struct pt_regs *regs)
+{
+ info->task = tsk;
+ info->call_frame = 0;
+ arch_unw_init_frame_info(info, regs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(unwind_init_frame_info);
+
+/*
+ * Prepare to unwind a blocked task.
+ */
+int unwind_init_blocked(struct unwind_frame_info *info,
+ struct task_struct *tsk)
+{
+ info->task = tsk;
+ info->call_frame = 0;
+ arch_unw_init_blocked(info);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(unwind_init_blocked);
+
+/*
+ * Prepare to unwind the currently running thread.
+ */
+int unwind_init_running(struct unwind_frame_info *info,
+ asmlinkage unwind_callback_fn callback,
+ const struct stacktrace_ops *ops, void *data)
+{
+ info->task = current;
+ info->call_frame = 0;
+
+ return arch_unwind_init_running(info, callback, ops, data);
+}
+EXPORT_SYMBOL_GPL(unwind_init_running);
+
+/*
+ * Unwind until the return pointer is in user-land (or until an error
+ * occurs). Returns 0 if successful, negative number in case of
+ * error.
+ */
+int unwind_to_user(struct unwind_frame_info *info)
+{
+ while (!arch_unw_user_mode(info)) {
+ int err = unwind(info);
+
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(unwind_to_user);
larger and slower, but it gives very useful debugging information
in case of kernel bugs. (precise oopses/stacktraces/warnings)
+config UNWIND_INFO
+ bool "Compile the kernel with frame unwind information"
+ depends on !IA64 && !PARISC && !ARM
+ depends on !MODULES || !(MIPS || PPC || SUPERH || V850)
+ help
+ If you say Y here the resulting kernel image will be slightly larger
+ but not slower, and it will give very useful debugging information.
+ If you don't debug the kernel, you can say N, but we may not be able
+ to solve problems without frame unwind information or frame pointers.
+
+config STACK_UNWIND
+ bool "Stack unwind support"
+ depends on UNWIND_INFO
+ depends on X86
+ help
+ This enables more precise stack traces, omitting all unrelated
+ occurrences of pointers into kernel code from the dump.
+
config BOOT_PRINTK_DELAY
bool "Delay each boot printk message by N milliseconds"
depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
+ select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !X86 && !ARM_UNWIND
+ select UNWIND_INFO if X86 && !FRAME_POINTER
help
Provide stacktrace filter for fault-injection capabilities
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !X86 && !ARM_UNWIND
+ select UNWIND_INFO if X86 && !FRAME_POINTER
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
va_end(args);
}
- pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
+ if (!(gfp_mask & __GFP_WAIT)) {
+ pr_info("The following is only an harmless informational message.\n");
+ pr_info("Unless you get a _continuous_flood_ of these messages it means\n");
+ pr_info("everything is working fine. Allocations from irqs cannot be\n");
+ pr_info("perfectly reliable and the kernel is designed to handle that.\n");
+ }
+ pr_info("%s: page allocation failure. order:%d, mode:0x%x\n",
current->comm, order, gfp_mask);
dump_stack();
void grab_swap_token(struct mm_struct *mm)
{
int current_interval;
- unsigned int old_prio = mm->token_priority;
+ unsigned int old_prio;
static unsigned int global_faults;
static unsigned int last_aging;
global_faults++;
+ if (mm == NULL)
+ return;
+ old_prio = mm->token_priority;
current_interval = global_faults - mm->faultstamp;
if (!spin_trylock(&swap_token_lock))
index++;
}
cleancache_invalidate_inode(mapping);
+ /*
+ * Cycle the tree_lock to make sure all __delete_from_page_cache()
+ * calls run from page reclaim have finished as well (this handles the
+ * case when page reclaim took the last page from our range).
+ */
+ spin_lock_irq(&mapping->tree_lock);
+ spin_unlock_irq(&mapping->tree_lock);
}
EXPORT_SYMBOL(truncate_inode_pages_range);
if (!dev)
return -ENOMEM;
+ if (!try_module_get(THIS_MODULE)) {
+ free_netdev(dev);
+ return -ENOENT;
+ }
+
dev_net_set(dev, net);
res = register_netdev(dev);
- if (res)
+ if (res) {
free_netdev(dev);
+ module_put(THIS_MODULE);
+ }
return res;
}
br_dev_delete(dev, NULL);
rtnl_unlock();
+ if (ret == 0)
+ module_put(THIS_MODULE);
return ret;
}
To compile it as a module, choose M here. If unsure, say N.
+config NF_CONNTRACK_SLP
+ tristate "SLP protocol support"
+ depends on NF_CONNTRACK
+ depends on NETFILTER_ADVANCED
+ help
+ SLP queries are sometimes sent as broadcast messages from an
+ unprivileged port and responded to with unicast messages to the
+ same port. This make them hard to firewall properly because connection
+ tracking doesn't deal with broadcasts. This helper tracks locally
+ originating broadcast SLP queries and the corresponding
+ responses. It relies on correct IP address configuration, specifically
+ netmask and broadcast address.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NF_CT_NETLINK
tristate 'Connection tracking netlink interface'
select NETFILTER_NETLINK
obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
+obj-$(CONFIG_NF_CONNTRACK_SLP) += nf_conntrack_slp.o
# transparent proxy support
obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
struct nf_conntrack_expect *exp);
EXPORT_SYMBOL_GPL(nf_nat_ftp_hook);
-static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char);
-static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char);
+static int try_rfc959(const char *, size_t, struct nf_conntrack_man *,
+ char, unsigned int *);
+static int try_rfc1123(const char *, size_t, struct nf_conntrack_man *,
+ char, unsigned int *);
+static int try_eprt(const char *, size_t, struct nf_conntrack_man *,
+ char, unsigned int *);
static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *,
- char);
+ char, unsigned int *);
static struct ftp_search {
const char *pattern;
char skip;
char term;
enum nf_ct_ftp_type ftptype;
- int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char);
+ int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *);
} search[IP_CT_DIR_MAX][2] = {
[IP_CT_DIR_ORIGINAL] = {
{
{
.pattern = "227 ",
.plen = sizeof("227 ") - 1,
- .skip = '(',
- .term = ')',
.ftptype = NF_CT_FTP_PASV,
- .getnum = try_rfc959,
+ .getnum = try_rfc1123,
},
{
.pattern = "229 ",
i++;
else {
/* Unexpected character; true if it's the
- terminator and we're finished. */
- if (*data == term && i == array_size - 1)
+ terminator (or we don't care about one)
+ and we're finished. */
+ if ((*data == term || !term) && i == array_size - 1)
return len;
pr_debug("Char %u (got %u nums) `%u' unexpected\n",
/* Returns 0, or length of numbers: 192,168,1,1,5,6 */
static int try_rfc959(const char *data, size_t dlen,
- struct nf_conntrack_man *cmd, char term)
+ struct nf_conntrack_man *cmd, char term,
+ unsigned int *offset)
{
int length;
u_int32_t array[6];
return length;
}
+/*
+ * From RFC 1123:
+ * The format of the 227 reply to a PASV command is not
+ * well standardized. In particular, an FTP client cannot
+ * assume that the parentheses shown on page 40 of RFC-959
+ * will be present (and in fact, Figure 3 on page 43 omits
+ * them). Therefore, a User-FTP program that interprets
+ * the PASV reply must scan the reply for the first digit
+ * of the host and port numbers.
+ */
+static int try_rfc1123(const char *data, size_t dlen,
+ struct nf_conntrack_man *cmd, char term,
+ unsigned int *offset)
+{
+ int i;
+ for (i = 0; i < dlen; i++)
+ if (isdigit(data[i]))
+ break;
+
+ if (i == dlen)
+ return 0;
+
+ *offset += i;
+
+ return try_rfc959(data + i, dlen - i, cmd, 0, offset);
+}
+
/* Grab port: number up to delimiter */
static int get_port(const char *data, int start, size_t dlen, char delim,
__be16 *port)
/* Returns 0, or length of numbers: |1|132.235.1.2|6275| or |2|3ffe::1|6275| */
static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd,
- char term)
+ char term, unsigned int *offset)
{
char delim;
int length;
/* Returns 0, or length of numbers: |||6446| */
static int try_epsv_response(const char *data, size_t dlen,
- struct nf_conntrack_man *cmd, char term)
+ struct nf_conntrack_man *cmd, char term,
+ unsigned int *offset)
{
char delim;
unsigned int *numlen,
struct nf_conntrack_man *cmd,
int (*getnum)(const char *, size_t,
- struct nf_conntrack_man *, char))
+ struct nf_conntrack_man *, char,
+ unsigned int *))
{
- size_t i;
+ size_t i = plen;
pr_debug("find_pattern `%s': dlen = %Zu\n", pattern, dlen);
if (dlen == 0)
pr_debug("Pattern matches!\n");
/* Now we've found the constant string, try to skip
to the 'skip' character */
- for (i = plen; data[i] != skip; i++)
- if (i == dlen - 1) return -1;
+ if (skip) {
+ for (i = plen; data[i] != skip; i++)
+ if (i == dlen - 1) return -1;
- /* Skip over the last character */
- i++;
+ /* Skip over the last character */
+ i++;
+ }
pr_debug("Skipped up to `%c'!\n", skip);
*numoff = i;
- *numlen = getnum(data + i, dlen - i, cmd, term);
+ *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
if (!*numlen)
return -1;
--- /dev/null
+/*
+ * NetBIOS name service broadcast connection tracking helper
+ *
+ * (c) 2007 Jiri Bohac <jbohac@suse.cz>
+ * (c) 2005 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+/*
+ * This helper tracks locally originating NetBIOS name service
+ * requests by issuing permanent expectations (valid until
+ * timing out) matching all reply connections from the
+ * destination network. The only NetBIOS specific thing is
+ * actually the port number.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/if_addr.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <net/route.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#define SLP_PORT 427
+
+MODULE_AUTHOR("Jiri Bohac <jbohac@suse.cz>");
+MODULE_DESCRIPTION("SLP broadcast connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_conntrack_slp");
+
+static unsigned int timeout __read_mostly = 3;
+module_param(timeout, uint, 0400);
+MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
+
+static int help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ struct nf_conntrack_expect *exp;
+ struct rtable *rt = skb_rtable(skb);
+ struct in_device *in_dev;
+ __be32 mask = 0;
+ __be32 src = 0;
+
+ /* we're only interested in locally generated packets */
+ if (skb->sk == NULL)
+ goto out;
+ if (rt == NULL || !(rt->rt_flags & (RTCF_MULTICAST|RTCF_BROADCAST)))
+ goto out;
+ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ goto out;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(rt->dst.dev);
+ if (in_dev != NULL) {
+ for_primary_ifa(in_dev) {
+ /* this is a hack as slp uses multicast we can't match
+ * the destination address to some broadcast address. So
+ * just take the first one. Better would be to install
+ * expectations for all addresses */
+ mask = ifa->ifa_mask;
+ src = ifa->ifa_broadcast;
+ break;
+ } endfor_ifa(in_dev);
+ }
+ rcu_read_unlock();
+
+ if (mask == 0 || src == 0)
+ goto out;
+
+ exp = nf_ct_expect_alloc(ct);
+ if (exp == NULL)
+ goto out;
+
+ exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+ exp->tuple.src.u3.ip = src;
+ exp->tuple.src.u.udp.port = htons(SLP_PORT);
+
+ exp->mask.src.u3.ip = mask;
+ exp->mask.src.u.udp.port = htons(0xFFFF);
+
+ exp->expectfn = NULL;
+ exp->flags = NF_CT_EXPECT_PERMANENT;
+ exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
+ exp->helper = NULL;
+
+ nf_ct_expect_related(exp);
+ nf_ct_expect_put(exp);
+
+ nf_ct_refresh(ct, skb, timeout * HZ);
+out:
+ return NF_ACCEPT;
+}
+
+static struct nf_conntrack_expect_policy exp_policy = {
+ .max_expected = 1,
+};
+
+static struct nf_conntrack_helper helper __read_mostly = {
+ .name = "slp",
+ .tuple.src.l3num = AF_INET,
+ .tuple.src.u.udp.port = __constant_htons(SLP_PORT),
+ .tuple.dst.protonum = IPPROTO_UDP,
+ .me = THIS_MODULE,
+ .help = help,
+ .expect_policy = &exp_policy,
+};
+
+static int __init nf_conntrack_slp_init(void)
+{
+ exp_policy.timeout = timeout;
+ return nf_conntrack_helper_register(&helper);
+}
+
+static void __exit nf_conntrack_slp_fini(void)
+{
+ nf_conntrack_helper_unregister(&helper);
+}
+
+module_init(nf_conntrack_slp_init);
+module_exit(nf_conntrack_slp_fini);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_status);
+/**
+ * rpc_wake_up_softconn_status - wake up all SOFTCONN rpc_tasks and set their
+ * status value.
+ * @queue: rpc_wait_queue on which the tasks are sleeping
+ * @status: status value to set
+ *
+ * Grabs queue->lock
+ */
+void rpc_wake_up_softconn_status(struct rpc_wait_queue *queue, int status)
+{
+ struct rpc_task *task, *next;
+ struct list_head *head;
+
+ spin_lock_bh(&queue->lock);
+ head = &queue->tasks[queue->maxpriority];
+ for (;;) {
+ list_for_each_entry_safe(task, next, head, u.tk_wait.list)
+ if (RPC_IS_SOFTCONN(task)) {
+ task->tk_status = status;
+ rpc_wake_up_task_queue_locked(queue, task);
+ }
+ if (head == &queue->tasks[0])
+ break;
+ head--;
+ }
+ spin_unlock_bh(&queue->lock);
+}
+EXPORT_SYMBOL_GPL(rpc_wake_up_softconn_status);
+
static void __rpc_queue_timer_fn(unsigned long ptr)
{
struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
case -ECONNREFUSED:
case -ECONNRESET:
case -ENETUNREACH:
- /* retry with existing socket, after a delay */
+ /* Retry with existing socket after a delay, except
+ * for SOFTCONN tasks which fail. */
+ xprt_clear_connecting(xprt);
+ rpc_wake_up_softconn_status(&xprt->pending, status);
+ return;
case 0:
case -EINPROGRESS:
case -EALREADY:
$(CPP) -D__GENKSYMS__ $(c_flags) $< | \
$(GENKSYMS) $(if $(1), -T $(2)) -a $(ARCH) \
$(if $(KBUILD_PRESERVE),-p) \
+ $(if $(KBUILD_OVERRIDE),-o) \
-r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@
# Built-in and composite module parts
$(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
$(call cmd,force_checksrc)
+ $(call cmd,force_check_kmsg)
$(call if_changed_rule,cc_o_c)
# Single-part modules are special since we need to mark them in $(MODVERDIR)
$(single-used-m): $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
$(call cmd,force_checksrc)
+ $(call cmd,force_check_kmsg)
$(call if_changed_rule,cc_o_c)
@{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod)
targets += $(multi-used-y) $(multi-used-m)
+# kmsg check tool
+ifneq ($(KBUILD_KMSG_CHECK),0)
+ ifeq ($(KBUILD_KMSG_CHECK),2)
+ kmsg_cmd := print
+ quiet_cmd_force_check_kmsg = KMSG_PRINT $<
+ $(shell [ -d $(objtree)/man ] || mkdir -p $(objtree)/man)
+ else
+ kmsg_cmd := check
+ quiet_cmd_force_check_kmsg = KMSG_CHECK $<
+ endif
+ cmd_force_check_kmsg = $(KMSG_CHECK) $(kmsg_cmd) $(CC) $(c_flags) $< ;
+endif
# Descending
# ---------------------------------------------------------------------------
$(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
$(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
$(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \
- $(if $(cross_build),-c)
+ $(if $(cross_build),-c) \
+ $(if $(CONFIG_ENTERPRISE_SUPPORT), \
+ -N $(firstword $(wildcard $(dir $(MODVERDIR))/Module.supported \
+ $(objtree)/Module.supported \
+ $(srctree)/Module.supported /dev/null)))
quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
cmd_modpost = $(modpost) -s
int in_source_file;
static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
- flag_preserve, flag_warnings;
+ flag_override, flag_preserve, flag_warnings;
static const char *arch = "";
static const char *mod_prefix = "";
sym->is_declared = 1;
return sym;
} else if (!sym->is_declared) {
- if (sym->is_override && flag_preserve) {
+ if (sym->is_override && flag_override) {
print_location();
fprintf(stderr, "ignoring ");
print_type_name(type, name);
struct symbol *n = sym->expansion_trail;
if (sym->status != STATUS_UNCHANGED) {
+ int fail = sym->is_override && flag_preserve;
+
if (!has_changed) {
print_location();
fprintf(stderr, "%s: %s: modversion "
"changed because of changes "
- "in ", flag_preserve ? "error" :
+ "in ", fail ? "error" :
"warning", name);
} else
fprintf(stderr, ", ");
if (sym->status == STATUS_DEFINED)
fprintf(stderr, " (became defined)");
has_changed = 1;
- if (flag_preserve)
+ if (fail)
errors++;
}
sym->expansion_trail = 0;
" -D, --dump Dump expanded symbol defs (for debugging only)\n"
" -r, --reference file Read reference symbols from a file\n"
" -T, --dump-types file Dump expanded types into file\n"
+ " -o, --override Allow to override reference modversions\n"
" -p, --preserve Preserve reference modversions or fail\n"
" -w, --warnings Enable warnings\n"
" -q, --quiet Disable warnings (default)\n"
" -D Dump expanded symbol defs (for debugging only)\n"
" -r file Read reference symbols from a file\n"
" -T file Dump expanded types into file\n"
+ " -o Allow to override reference modversions\n"
" -p Preserve reference modversions or fail\n"
" -w Enable warnings\n"
" -q Disable warnings (default)\n"
{"reference", 1, 0, 'r'},
{"dump-types", 1, 0, 'T'},
{"preserve", 0, 0, 'p'},
+ {"override", 0, 0, 'o'},
{"version", 0, 0, 'V'},
{"help", 0, 0, 'h'},
{0, 0, 0, 0}
};
- while ((o = getopt_long(argc, argv, "a:dwqVDr:T:ph",
+ while ((o = getopt_long(argc, argv, "a:dwqVDr:T:oph",
&long_opts[0], NULL)) != EOF)
#else /* __GNU_LIBRARY__ */
- while ((o = getopt(argc, argv, "a:dwqVDr:T:ph")) != EOF)
+ while ((o = getopt(argc, argv, "a:dwqVDr:T:oph")) != EOF)
#endif /* __GNU_LIBRARY__ */
switch (o) {
case 'a':
return 1;
}
break;
+ case 'o':
+ flag_override = 1;
+ break;
case 'p':
+ flag_override = 1;
flag_preserve = 1;
break;
case 'h':
allnoconfig allyesconfig allmodconfig alldefconfig randconfig: $(obj)/conf
$< --$@ $(Kconfig)
+
+UNAME_RELEASE := $(shell uname -r)
+CLONECONFIG := $(firstword $(wildcard /proc/config.gz \
+ /lib/modules/$(UNAME_RELEASE)/.config \
+ /etc/kernel-config \
+ /boot/config-$(UNAME_RELEASE)))
+cloneconfig: $(obj)/conf
+ $(Q)case "$(CLONECONFIG)" in \
+ '') echo -e "The configuration of the running" \
+ "kernel could not be determined\n"; \
+ false ;; \
+ *.gz) gzip -cd $(CLONECONFIG) > .config.running ;; \
+ *) cat $(CLONECONFIG) > .config.running ;; \
+ esac && \
+ echo -e "Cloning configuration file $(CLONECONFIG)\n"
+ $(Q)$< --defconfig=.config.running arch/$(SRCARCH)/Kconfig
+
PHONY += listnewconfig oldnoconfig savedefconfig defconfig
--- /dev/null
+#!/usr/bin/perl -w
+#
+# kmsg kernel messages check and print tool.
+#
+# To check the source code for missing messages the script is called
+# with check, the name compiler and the compile parameters
+# kmsg-doc check $(CC) $(c_flags) $<
+# To create man pages for the messages the script is called with
+# kmsg-doc print $(CC) $(c_flags) $<
+#
+# Copyright IBM Corp. 2008
+# Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+# Michael Holzheu <holzheu@linux.vnet.ibm.com>
+#
+
+use Cwd;
+use bigint;
+
+my $errors = 0;
+my $warnings = 0;
+my $srctree = "";
+my $objtree = "";
+my $kmsg_count = 0;
+
+sub remove_quotes($)
+{
+ my ($string) = @_;
+ my $inside = 0;
+ my $slash = 0;
+ my $result = "";
+
+ foreach my $str (split(/([\\"])/, $string)) {
+ if ($inside && ($str ne "\"" || $slash)) {
+ $result .= $str;
+ }
+ # Check for backslash before quote
+ if ($str eq "\"") {
+ if (!$slash) {
+ $inside = !$inside;
+ }
+ $slash = 0;
+ } elsif ($str eq "\\") {
+ $slash = !$slash;
+ } elsif ($str ne "") {
+ $slash = 0;
+ }
+ }
+ return $result;
+}
+
+sub string_to_bytes($)
+{
+ my ($string) = @_;
+ my %is_escape = ('"', 0x22, '\'', 0x27, 'n', 0x0a, 'r', 0x0d, 'b', 0x08,
+ 't', 0x09, 'f', 0x0c, 'a', 0x07, 'v', 0x0b, '?', 0x3f);
+ my (@ar, $slash, $len);
+
+ # scan string, interpret backslash escapes and write bytes to @ar
+ $len = 0;
+ foreach my $ch (split(//, $string)) {
+ if ($ch eq '\\') {
+ $slash = !$slash;
+ if (!$slash) {
+ $ar[$len] = ord('\\');
+ $len++;
+ }
+ } elsif ($slash && defined $is_escape{$ch}) {
+ # C99 backslash escapes: \\ \" \' \n \r \b \t \f \a \v \?
+ $ar[$len] = $is_escape{$ch};
+ $len++;
+ $slash = 0;
+ } elsif ($slash) {
+ # FIXME: C99 backslash escapes \nnn \xhh
+ die("Unknown backslash escape in message $string.");
+ } else {
+ # normal character
+ $ar[$len] = ord($ch);
+ $len++;
+ }
+ }
+ return @ar;
+}
+
+sub calc_jhash($)
+{
+ my ($string) = @_;
+ my @ar;
+ my ($a, $b, $c, $i, $length, $len);
+
+ @ar = string_to_bytes($string);
+ $length = @ar;
+ # add dummy elements to @ar to avoid if then else hell
+ push @ar, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ $a = 0x9e3779b9;
+ $b = 0x9e3779b9;
+ $c = 0;
+ $i = 0;
+ for ($len = $length + 12; $len >= 12; $len -= 12) {
+ if ($len < 24) {
+ # add length for last round
+ $c += $length;
+ }
+ $a += $ar[$i] + ($ar[$i+1]<<8) + ($ar[$i+2]<<16) + ($ar[$i+3]<<24);
+ $b += $ar[$i+4] + ($ar[$i+5]<<8) + ($ar[$i+6]<<16) + ($ar[$i+7]<<24);
+ if ($len >= 24) {
+ $c += $ar[$i+8] + ($ar[$i+9]<<8) + ($ar[$i+10]<<16) + ($ar[$i+11]<<24);
+ } else {
+ $c += ($ar[$i+8]<<8) + ($ar[$i+9]<<16) + ($ar[$i+10]<<24);
+ }
+ $a &= 0xffffffff; $b &= 0xffffffff; $c &= 0xffffffff;
+ $a -= $b; $a -= $c; $a ^= ($c >> 13); $a &= 0xffffffff;
+ $b -= $c; $b -= $a; $b ^= ($a << 8); $b &= 0xffffffff;
+ $c -= $a; $c -= $b; $c ^= ($b >> 13); $c &= 0xffffffff;
+ $a -= $b; $a -= $c; $a ^= ($c >> 12); $a &= 0xffffffff;
+ $b -= $c; $b -= $a; $b ^= ($a << 16); $b &= 0xffffffff;
+ $c -= $a; $c -= $b; $c ^= ($b >> 5); $c &= 0xffffffff;
+ $a -= $b; $a -= $c; $a ^= ($c >> 3); $a &= 0xffffffff;
+ $b -= $c; $b -= $a; $b ^= ($a << 10); $b &= 0xffffffff;
+ $c -= $a; $c -= $b; $c ^= ($b >> 15); $c &= 0xffffffff;
+ $i += 12;
+ }
+ return $c;
+}
+
+sub add_kmsg_desc($$$$$$)
+{
+ my ($component, $text, $sev, $argv, $desc, $user) = @_;
+ my ($hash, $tag);
+
+ $text = remove_quotes($text);
+ $hash = substr(sprintf("%08x", calc_jhash($text)), 2, 6);
+ $tag = $component . "." . $hash;
+
+ if ($kmsg_desc{$tag}) {
+ if ($text ne $kmsg_desc{$tag}->{'TEXT'}) {
+ warn "Duplicate message with tag $tag\n";
+ warn " --- $kmsg_desc{$tag}->{'TEXT'}\n";
+ warn " +++ $text\n";
+ } else {
+ warn "Duplicate message description for \"$text\"\n";
+ }
+ $errors++;
+ return;
+ }
+ $kmsg_desc{$tag}->{'TEXT'} = $text;
+ $kmsg_desc{$tag}->{'SEV'} = $sev;
+ $kmsg_desc{$tag}->{'ARGV'} = $argv;
+ $kmsg_desc{$tag}->{'DESC'} = $desc;
+ $kmsg_desc{$tag}->{'USER'} = $user;
+}
+
+sub add_kmsg_print($$$$)
+{
+ my ($component, $sev, $text, $argv) = @_;
+ my ($hash, $tag, $count, $parm);
+
+ $text = remove_quotes($text);
+ $hash = substr(sprintf("%08x", calc_jhash($text)), 2, 6);
+ $tag = $component . "." . $hash;
+
+ # Pretty print severity
+ $sev =~ s/"<0>"/Emerg/;
+ $sev =~ s/"<1>"/Alert/;
+ $sev =~ s/"<2>"/Critical/;
+ $sev =~ s/"<3>"/Error/;
+ $sev =~ s/"<4>"/Warning/;
+ $sev =~ s/"<5>"/Notice/;
+ $sev =~ s/"<6>"/Informational/;
+ $sev =~ s/"<7>"/Debug/;
+ $kmsg_print{$kmsg_count}->{'TAG'} = $tag;
+ $kmsg_print{$kmsg_count}->{'TEXT'} = $text;
+ $kmsg_print{$kmsg_count}->{'SEV'} = $sev;
+ $kmsg_print{$kmsg_count}->{'ARGV'} = $argv;
+ $kmsg_count += 1;
+}
+
+sub process_source_file($$)
+{
+ my ($component, $file) = @_;
+ my $state;
+ my ($text, $sev, $argv, $desc, $user);
+
+ if (!open(FD, "$file")) {
+ return "";
+ }
+
+ $state = 0;
+ while (<FD>) {
+ chomp;
+ # kmsg message component: #define KMSG_COMPONENT "<component>"
+ if (/^#define\s+KMSG_COMPONENT\s+\"(.*)\"[^\"]*$/o) {
+ $component = $1;
+ }
+ if ($state == 0) {
+ # single line kmsg for undocumented messages, format:
+ # /*? Text: "<message>" */
+ if (/^\s*\/\*\?\s*Text:\s*(\".*\")\s*\*\/\s*$/o) {
+ add_kmsg_desc($component, $1, "", "", "", "");
+ }
+ # kmsg message start: '/*?'
+ if (/^\s*\/\*\?\s*$/o) {
+ $state = 1;
+ ($text, $sev, $argv, $desc, $user) = ( "", "", "", "", "" );
+ }
+ } elsif ($state == 1) {
+ # kmsg message end: ' */'
+ if (/^\s*\*\/\s*/o) {
+ add_kmsg_desc($component, $text, $sev, $argv, $desc, $user);
+ $state = 0;
+ }
+ # kmsg message text: ' * Text: "<message>"'
+ elsif (/^\s*\*\s*Text:\s*(\".*\")\s*$/o) {
+ $text = $1;
+ }
+ # kmsg message severity: ' * Severity: <sev>'
+ elsif (/^\s*\*\s*Severity:\s*(\S*)\s*$/o) {
+ $sev = $1;
+ }
+ # kmsg message parameter: ' * Parameter: <argv>'
+ elsif (/^\s*\*\s*Parameter:\s*(\S*)\s*$/o) {
+ if (!defined($1)) {
+ $argv = "";
+ } else {
+ $argv = $1;
+ }
+ $state = 2;
+ }
+ # kmsg message description start: ' * Description:'
+ elsif (/^\s*\*\s*Description:\s*(\S*)\s*$/o) {
+ if (!defined($1)) {
+ $desc = "";
+ } else {
+ $desc = $1;
+ }
+ $state = 3;
+ }
+ # kmsg has unrecognizable lines
+ else {
+ warn "Warning(${file}:$.): Cannot understand $_";
+ $warnings++;
+ $state = 0;
+ }
+ } elsif ($state == 2) {
+ # kmsg message end: ' */'
+ if (/^\s*\*\//o) {
+ warn "Warning(${file}:$.): Missing description, skipping message";
+ $warnings++;
+ $state = 0;
+ }
+ # kmsg message description start: ' * Description:'
+ elsif (/^\s*\*\s*Description:\s*$/o) {
+ $desc = $1;
+ $state = 3;
+ }
+ # kmsg message parameter line: ' * <argv>'
+ elsif (/^\s*\*(.*)$/o) {
+ $argv .= "\n" . $1;
+ } else {
+ warn "Warning(${file}:$.): Cannot understand $_";
+ $warnings++;
+ $state = 0;
+ }
+ } elsif ($state == 3) {
+ # kmsg message end: ' */'
+ if (/^\s*\*\/\s*/o) {
+ add_kmsg_desc($component, $text, $sev, $argv, $desc, $user);
+ $state = 0;
+ }
+ # kmsg message description start: ' * User action:'
+ elsif (/^\s*\*\s*User action:\s*$/o) {
+ $user = $1;
+ $state = 4;
+ }
+ # kmsg message description line: ' * <text>'
+ elsif (/^\s*\*\s*(.*)$/o) {
+ $desc .= "\n" . $1;
+ } else {
+ warn "Warning(${file}:$.): Cannot understand $_";
+ $warnings++;
+ $state = 0;
+ }
+ } elsif ($state == 4) {
+ # kmsg message end: ' */'
+ if (/^\s*\*\/\s*/o) {
+ add_kmsg_desc($component, $text, $sev, $argv, $desc, $user);
+ $state = 0;
+ }
+ # kmsg message user action line: ' * <text>'
+ elsif (/^\s*\*\s*(.*)$/o) {
+ $user .= "\n" . $1;
+ } else {
+ warn "Warning(${file}:$.): Cannot understand $_";
+ $warnings++;
+ $state = 0;
+ }
+ }
+ }
+ return $component;
+}
+
+sub process_cpp_file($$$$)
+{
+ my ($cc, $options, $file, $component) = @_;
+
+ open(FD, "$cc $gcc_options|") or die ("Preprocessing failed.");
+
+ while (<FD>) {
+ chomp;
+ if (/.*__KMSG_PRINT\(\s*(\S*)\s*_FMT_(.*)_ARGS_\s*"(.*)"\s*_END_\s*\)/o) {
+ if ($component ne "") {
+ add_kmsg_print($component, $1, $2, $3);
+ } else {
+ warn "Error(${file}:$.): kmsg without component\n";
+ $errors++;
+ }
+ } elsif (/.*__KMSG_DEV\(\s*(\S*)\s*_FMT_(.*)_ARGS_\s*(.*)?_END_\s*\)/o) {
+ if ($component ne "") {
+ add_kmsg_print($component, $1, "\"%s: \"" . $2, $3);
+ } else {
+ warn "Error(${file}:$.): kmsg without component\n";
+ $errors++;
+ }
+ }
+ }
+}
+
+sub check_messages($)
+{
+ my $component = "@_";
+ my $failed = 0;
+
+ for ($i = 0; $i < $kmsg_count; $i++) {
+ $tag = $kmsg_print{$i}->{'TAG'};
+ if (!defined($kmsg_desc{$tag})) {
+ add_kmsg_desc($component,
+ "\"" . $kmsg_print{$i}->{'TEXT'} . "\"",
+ $kmsg_print{$i}->{'SEV'},
+ $kmsg_print{$i}->{'ARGV'},
+ "Please insert description here",
+ "What is the user supposed to do");
+ $kmsg_desc{$tag}->{'CHECK'} = 1;
+ $failed = 1;
+ warn "$component: Missing description for: ".
+ $kmsg_print{$i}->{'TEXT'}."\n";
+ $errors++;
+ next;
+ }
+ if ($kmsg_desc{$tag}->{'SEV'} ne "" &&
+ $kmsg_desc{$tag}->{'SEV'} ne $kmsg_print{$i}->{'SEV'}) {
+ warn "Message severity mismatch for \"$kmsg_print{$i}->{'TEXT'}\"\n";
+ warn " --- $kmsg_desc{$tag}->{'SEV'}\n";
+ warn " +++ $kmsg_print{$i}->{'SEV'}\n";
+ }
+ }
+ return $failed;
+}
+
+sub print_templates()
+{
+ print "Templates for missing messages:\n";
+ foreach $tag ( sort { $kmsg_desc{$a} <=> $kmsg_desc{$b} } keys %kmsg_desc ) {
+ if (!defined($kmsg_desc{$tag}->{'CHECK'})) {
+ next;
+ }
+ print "/*?\n";
+ print " * Text: \"$kmsg_desc{$tag}->{'TEXT'}\"\n";
+ print " * Severity: $kmsg_desc{$tag}->{'SEV'}\n";
+ $argv = $kmsg_desc{$tag}->{'ARGV'};
+ if ($argv ne "") {
+ print " * Parameter:\n";
+ @parms = split(/\s*,\s*/,$kmsg_desc{$tag}->{'ARGV'});
+ $count = 0;
+ foreach $parm (@parms) {
+ $count += 1;
+ if (!($parm eq "")) {
+ print " * \@$count: $parm\n";
+ }
+ }
+ }
+ print " * Description:\n";
+ print " * $kmsg_desc{$tag}->{'DESC'}\n";
+ print " * User action:\n";
+ print " * $kmsg_desc{$tag}->{'USER'}\n";
+ print " */\n\n";
+ }
+}
+
+sub write_man_pages()
+{
+ my ($i, $file);
+
+ for ($i = 0; $i < $kmsg_count; $i++) {
+ $tag = $kmsg_print{$i}->{'TAG'};
+ if (!defined($kmsg_desc{$tag}) ||
+ defined($kmsg_desc{$tag}->{'CHECK'}) ||
+ $kmsg_desc{$tag}->{'DESC'} eq "") {
+ next;
+ }
+ $file = $objtree . "man/" . $tag . ".9";
+ if (!open(WR, ">$file")) {
+ warn "Error: Cannot open file $file\n";
+ $errors++;
+ return;
+ }
+ print WR ".TH \"$tag\" 9 \"Linux Messages\" LINUX\n";
+ print WR ".SH Message\n";
+ print WR $tag . ": " . $kmsg_desc{$tag}->{'TEXT'} . "\n";
+ print WR ".SH Severity\n";
+ print WR "$kmsg_desc{$tag}->{'SEV'}\n";
+ $argv = $kmsg_desc{$tag}->{'ARGV'};
+ if ($argv ne "") {
+ print WR ".SH Parameters\n";
+ @parms = split(/\s*\n\s*/,$kmsg_desc{$tag}->{'ARGV'});
+ foreach $parm (@parms) {
+ $parm =~ s/^\s*(.*)\s*$/$1/;
+ if (!($parm eq "")) {
+ print WR "$parm\n\n";
+ }
+ }
+ }
+ print WR ".SH Description";
+ print WR "$kmsg_desc{$tag}->{'DESC'}\n";
+ $user = $kmsg_desc{$tag}->{'USER'};
+ if ($user ne "") {
+ print WR ".SH User action";
+ print WR "$user\n";
+ }
+ }
+}
+
+if (defined($ENV{'srctree'})) {
+ $srctree = "$ENV{'srctree'}" . "/";
+} else {
+ $srctree = getcwd;
+}
+
+if (defined($ENV{'objtree'})) {
+ $objtree = "$ENV{'objtree'}" . "/";
+} else {
+ $objtree = getcwd;
+}
+
+if (defined($ENV{'SRCARCH'})) {
+ $srcarch = "$ENV{'SRCARCH'}" . "/";
+} else {
+ print "kmsg-doc called without a valid \$SRCARCH\n";
+ exit 1;
+}
+
+$option = shift;
+
+$cc = shift;
+$gcc_options = "-E -D __KMSG_CHECKER ";
+foreach $tmp (@ARGV) {
+ $tmp =~ s/\(/\\\(/;
+ $tmp =~ s/\)/\\\)/;
+ $gcc_options .= " $tmp";
+ $filename = $tmp;
+}
+
+$component = process_source_file("", $filename);
+if ($component ne "") {
+ process_source_file($component, $srctree . "Documentation/kmsg/" .
+ $srcarch . $component);
+ process_source_file($component, $srctree . "Documentation/kmsg/" .
+ $component);
+}
+
+process_cpp_file($cc, $gcc_options, $filename, $component);
+if ($option eq "check") {
+ if (check_messages($component)) {
+ print_templates();
+ }
+} elsif ($option eq "print") {
+ write_man_pages();
+}
+
+exit($errors);
}
}
+void *supported_file;
+unsigned long supported_size;
+
+static const char *supported(struct module *mod)
+{
+ unsigned long pos = 0;
+ char *line;
+
+ /* In a first shot, do a simple linear scan. */
+ while ((line = get_next_line(&pos, supported_file,
+ supported_size))) {
+ const char *basename, *how = "yes";
+ char *l = line;
+
+ /* optional type-of-support flag */
+ for (l = line; *l != '\0'; l++) {
+ if (*l == ' ' || *l == '\t') {
+ *l = '\0';
+ how = l + 1;
+ break;
+ }
+ }
+
+ /* skip directory components */
+ if ((l = strrchr(line, '/')))
+ line = l + 1;
+ /* strip .ko extension */
+ l = line + strlen(line);
+ if (l - line > 3 && !strcmp(l-3, ".ko"))
+ *(l-3) = '\0';
+
+ /* skip directory components */
+ if ((basename = strrchr(mod->name, '/')))
+ basename++;
+ else
+ basename = mod->name;
+ if (!strcmp(basename, line))
+ return how;
+ }
+ return NULL;
+}
+
static void read_symbols(char *modname)
{
const char *symname;
buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n");
}
+static void add_supported_flag(struct buffer *b, struct module *mod)
+{
+ const char *how = supported(mod);
+ if (how)
+ buf_printf(b, "\nMODULE_INFO(supported, \"%s\");\n", how);
+}
+
/**
* Record CRCs for unresolved symbols
**/
fclose(file);
}
+static void read_supported(const char *fname)
+{
+ supported_file = grab_file(fname, &supported_size);
+ if (!supported_file)
+ ; /* ignore error */
+}
+
/* parse Module.symvers file. line format:
* 0x12345678<tab>symbol<tab>module[[<tab>export]<tab>something]
**/
struct buffer buf = { };
char *kernel_read = NULL, *module_read = NULL;
char *dump_write = NULL;
+ const char *supported = NULL;
int opt;
int err;
struct ext_sym_list *extsym_iter;
struct ext_sym_list *extsym_start = NULL;
- while ((opt = getopt(argc, argv, "i:I:e:cmsSo:awM:K:")) != -1) {
+ while ((opt = getopt(argc, argv, "i:I:e:cmsSo:awM:K:N:")) != -1) {
switch (opt) {
case 'i':
kernel_read = optarg;
case 'w':
warn_unresolved = 1;
break;
+ case 'N':
+ supported = optarg;
+ break;
default:
exit(1);
}
}
+ if (supported)
+ read_supported(supported);
if (kernel_read)
read_dump(kernel_read, 1);
if (module_read)
add_header(&buf, mod);
add_intree_flag(&buf, !external_module);
add_staging_flag(&buf, mod->name);
+ add_supported_flag(&buf, mod);
err |= add_versions(&buf, mod);
add_depends(&buf, mod, modules);
add_moddevtable(&buf, mod);
apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
- resource.o sid.o file.o
+ resource.o sid.o file.o net.o
-clean-files := capability_names.h rlim_names.h
+clean-files := capability_names.h rlim_names.h af_names.h
# Build a lower case string table of capability names
-e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/[\2] = "\L\1",/p';\
echo "};" >> $@
+quiet_cmd_make-af = GEN $@
+cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ; sed -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "s/^\#define[ \\t]\\+AF_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z >> $@ ; echo "};" >> $@
+
# Build a lower case string table of rlimit names.
# Transforms lines from
tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
$(obj)/capability.o : $(obj)/capability_names.h
+$(obj)/net.o : $(obj)/af_names.h
$(obj)/resource.o : $(obj)/rlim_names.h
$(obj)/capability_names.h : $(srctree)/include/linux/capability.h \
$(src)/Makefile
$(obj)/rlim_names.h : $(srctree)/include/asm-generic/resource.h \
$(src)/Makefile
$(call cmd,make-rlim)
+$(obj)/af_names.h : $(srctree)/include/linux/socket.h
+ $(call cmd,make-af)
.llseek = default_llseek,
};
+/**
+ * __next_namespace - find the next namespace to list
+ * @root: root namespace to stop search at (NOT NULL)
+ * @ns: current ns position (NOT NULL)
+ *
+ * Find the next namespace from @ns under @root and handle all locking needed
+ * while switching current namespace.
+ *
+ * Returns: next namespace or NULL if at last namespace under @root
+ * NOTE: will not unlock root->lock
+ */
+static struct aa_namespace *__next_namespace(struct aa_namespace *root,
+ struct aa_namespace *ns)
+{
+ struct aa_namespace *parent;
+
+ /* is next namespace a child */
+ if (!list_empty(&ns->sub_ns)) {
+ struct aa_namespace *next;
+ next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
+ read_lock(&next->lock);
+ return next;
+ }
+
+ /* check if the next ns is a sibling, parent, gp, .. */
+ parent = ns->parent;
+ while (parent) {
+ read_unlock(&ns->lock);
+ list_for_each_entry_continue(ns, &parent->sub_ns, base.list) {
+ read_lock(&ns->lock);
+ return ns;
+ }
+ if (parent == root)
+ return NULL;
+ ns = parent;
+ parent = parent->parent;
+ }
+
+ return NULL;
+}
+
+/**
+ * __first_profile - find the first profile in a namespace
+ * @root: namespace that is root of profiles being displayed (NOT NULL)
+ * @ns: namespace to start in (NOT NULL)
+ *
+ * Returns: unrefcounted profile or NULL if no profile
+ */
+static struct aa_profile *__first_profile(struct aa_namespace *root,
+ struct aa_namespace *ns)
+{
+ for ( ; ns; ns = __next_namespace(root, ns)) {
+ if (!list_empty(&ns->base.profiles))
+ return list_first_entry(&ns->base.profiles,
+ struct aa_profile, base.list);
+ }
+ return NULL;
+}
+
+/**
+ * __next_profile - step to the next profile in a profile tree
+ * @profile: current profile in tree (NOT NULL)
+ *
+ * Perform a depth first taversal on the profile tree in a namespace
+ *
+ * Returns: next profile or NULL if done
+ * Requires: profile->ns.lock to be held
+ */
+static struct aa_profile *__next_profile(struct aa_profile *p)
+{
+ struct aa_profile *parent;
+ struct aa_namespace *ns = p->ns;
+
+ /* is next profile a child */
+ if (!list_empty(&p->base.profiles))
+ return list_first_entry(&p->base.profiles, typeof(*p),
+ base.list);
+
+ /* is next profile a sibling, parent sibling, gp, subling, .. */
+ parent = p->parent;
+ while (parent) {
+ list_for_each_entry_continue(p, &parent->base.profiles,
+ base.list)
+ return p;
+ p = parent;
+ parent = parent->parent;
+ }
+
+ /* is next another profile in the namespace */
+ list_for_each_entry_continue(p, &ns->base.profiles, base.list)
+ return p;
+
+ return NULL;
+}
+
+/**
+ * next_profile - step to the next profile in where ever it may be
+ * @root: root namespace (NOT NULL)
+ * @profile: current profile (NOT NULL)
+ *
+ * Returns: next profile or NULL if there isn't one
+ */
+static struct aa_profile *next_profile(struct aa_namespace *root,
+ struct aa_profile *profile)
+{
+ struct aa_profile *next = __next_profile(profile);
+ if (next)
+ return next;
+
+ /* finished all profiles in namespace move to next namespace */
+ return __first_profile(root, __next_namespace(root, profile->ns));
+}
+
+/**
+ * p_start - start a depth first traversal of profile tree
+ * @f: seq_file to fill
+ * @pos: current position
+ *
+ * Returns: first profile under current namespace or NULL if none found
+ *
+ * acquires first ns->lock
+ */
+static void *p_start(struct seq_file *f, loff_t *pos)
+ __acquires(root->lock)
+{
+ struct aa_profile *profile = NULL;
+ struct aa_namespace *root = aa_current_profile()->ns;
+ loff_t l = *pos;
+ f->private = aa_get_namespace(root);
+
+
+ /* find the first profile */
+ read_lock(&root->lock);
+ profile = __first_profile(root, root);
+
+ /* skip to position */
+ for (; profile && l > 0; l--)
+ profile = next_profile(root, profile);
+
+ return profile;
+}
+
+/**
+ * p_next - read the next profile entry
+ * @f: seq_file to fill
+ * @p: profile previously returned
+ * @pos: current position
+ *
+ * Returns: next profile after @p or NULL if none
+ *
+ * may acquire/release locks in namespace tree as necessary
+ */
+static void *p_next(struct seq_file *f, void *p, loff_t *pos)
+{
+ struct aa_profile *profile = p;
+ struct aa_namespace *root = f->private;
+ (*pos)++;
+
+ return next_profile(root, profile);
+}
+
+/**
+ * p_stop - stop depth first traversal
+ * @f: seq_file we are filling
+ * @p: the last profile writen
+ *
+ * Release all locking done by p_start/p_next on namespace tree
+ */
+static void p_stop(struct seq_file *f, void *p)
+ __releases(root->lock)
+{
+ struct aa_profile *profile = p;
+ struct aa_namespace *root = f->private, *ns;
+
+ if (profile) {
+ for (ns = profile->ns; ns && ns != root; ns = ns->parent)
+ read_unlock(&ns->lock);
+ }
+ read_unlock(&root->lock);
+ aa_put_namespace(root);
+}
+
+/**
+ * seq_show_profile - show a profile entry
+ * @f: seq_file to file
+ * @p: current position (profile) (NOT NULL)
+ *
+ * Returns: error on failure
+ */
+static int seq_show_profile(struct seq_file *f, void *p)
+{
+ struct aa_profile *profile = (struct aa_profile *)p;
+ struct aa_namespace *root = f->private;
+
+ if (profile->ns != root)
+ seq_printf(f, ":%s://", aa_ns_name(root, profile->ns));
+ seq_printf(f, "%s (%s)\n", profile->base.hname,
+ COMPLAIN_MODE(profile) ? "complain" : "enforce");
+
+ return 0;
+}
+
+static const struct seq_operations aa_fs_profiles_op = {
+ .start = p_start,
+ .next = p_next,
+ .stop = p_stop,
+ .show = seq_show_profile,
+};
+
+static int profiles_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &aa_fs_profiles_op);
+}
+
+static int profiles_release(struct inode *inode, struct file *file)
+{
+ return seq_release(inode, file);
+}
+
+static const struct file_operations aa_fs_profiles_fops = {
+ .open = profiles_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = profiles_release,
+};
+
static int aa_fs_seq_show(struct seq_file *seq, void *v)
{
struct aa_fs_entry *fs_file = seq->private;
AA_FS_DIR("file", aa_fs_entry_file),
AA_FS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
AA_FS_DIR("rlimit", aa_fs_entry_rlimit),
+ AA_FS_FILE_BOOLEAN("network", 1),
{ }
};
AA_FS_FILE_FOPS(".replace", 0640, &aa_fs_profile_replace),
AA_FS_FILE_FOPS(".remove", 0640, &aa_fs_profile_remove),
AA_FS_DIR("features", aa_fs_entry_features),
+ AA_FS_FILE_STRING("matching", "pattern=aadfa audit perms=crwxamlk/ "
+ "user::other"),
+ AA_FS_FILE_FOPS("profiles", 0440, &aa_fs_profiles_fops),
{ }
};
u32 denied;
uid_t ouid;
} fs;
+ struct {
+ int type, protocol;
+ struct sock *sk;
+ } net;
};
};
--- /dev/null
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor network mediation definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_NET_H
+#define __AA_NET_H
+
+#include <net/sock.h>
+
+/* struct aa_net - network confinement data
+ * @allowed: basic network families permissions
+ * @audit_network: which network permissions to force audit
+ * @quiet_network: which network permissions to quiet rejects
+ */
+struct aa_net {
+ u16 allow[AF_MAX];
+ u16 audit[AF_MAX];
+ u16 quiet[AF_MAX];
+};
+
+extern int aa_net_perm(int op, struct aa_profile *profile, u16 family,
+ int type, int protocol, struct sock *sk);
+extern int aa_revalidate_sk(int op, struct sock *sk);
+
+static inline void aa_free_net_rules(struct aa_net *new)
+{
+ /* NOP */
+}
+
+#endif /* __AA_NET_H */
#include "capability.h"
#include "domain.h"
#include "file.h"
+#include "net.h"
#include "resource.h"
extern const char *const profile_mode_names[];
* @policy: general match rules governing policy
* @file: The set of rules governing basic file access and domain transitions
* @caps: capabilities for the profile
+ * @net: network controls for the profile
* @rlimits: rlimits for the profile
*
* The AppArmor profile contains the basic confinement data. Each profile
struct aa_policydb policy;
struct aa_file_rules file;
struct aa_caps caps;
+ struct aa_net net;
struct aa_rlimit rlimits;
};
#include "include/context.h"
#include "include/file.h"
#include "include/ipc.h"
+#include "include/net.h"
#include "include/path.h"
#include "include/policy.h"
#include "include/procattr.h"
return error;
}
+static int apparmor_socket_create(int family, int type, int protocol, int kern)
+{
+ struct aa_profile *profile;
+ int error = 0;
+
+ if (kern)
+ return 0;
+
+ profile = __aa_current_profile();
+ if (!unconfined(profile))
+ error = aa_net_perm(OP_CREATE, profile, family, type, protocol,
+ NULL);
+ return error;
+}
+
+static int apparmor_socket_bind(struct socket *sock,
+ struct sockaddr *address, int addrlen)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_BIND, sk);
+}
+
+static int apparmor_socket_connect(struct socket *sock,
+ struct sockaddr *address, int addrlen)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_CONNECT, sk);
+}
+
+static int apparmor_socket_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_LISTEN, sk);
+}
+
+static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_ACCEPT, sk);
+}
+
+static int apparmor_socket_sendmsg(struct socket *sock,
+ struct msghdr *msg, int size)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_SENDMSG, sk);
+}
+
+static int apparmor_socket_recvmsg(struct socket *sock,
+ struct msghdr *msg, int size, int flags)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_RECVMSG, sk);
+}
+
+static int apparmor_socket_getsockname(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_GETSOCKNAME, sk);
+}
+
+static int apparmor_socket_getpeername(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_GETPEERNAME, sk);
+}
+
+static int apparmor_socket_getsockopt(struct socket *sock, int level,
+ int optname)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_GETSOCKOPT, sk);
+}
+
+static int apparmor_socket_setsockopt(struct socket *sock, int level,
+ int optname)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_SETSOCKOPT, sk);
+}
+
+static int apparmor_socket_shutdown(struct socket *sock, int how)
+{
+ struct sock *sk = sock->sk;
+
+ return aa_revalidate_sk(OP_SOCK_SHUTDOWN, sk);
+}
+
static struct security_operations apparmor_ops = {
.name = "apparmor",
.getprocattr = apparmor_getprocattr,
.setprocattr = apparmor_setprocattr,
+ .socket_create = apparmor_socket_create,
+ .socket_bind = apparmor_socket_bind,
+ .socket_connect = apparmor_socket_connect,
+ .socket_listen = apparmor_socket_listen,
+ .socket_accept = apparmor_socket_accept,
+ .socket_sendmsg = apparmor_socket_sendmsg,
+ .socket_recvmsg = apparmor_socket_recvmsg,
+ .socket_getsockname = apparmor_socket_getsockname,
+ .socket_getpeername = apparmor_socket_getpeername,
+ .socket_getsockopt = apparmor_socket_getsockopt,
+ .socket_setsockopt = apparmor_socket_setsockopt,
+ .socket_shutdown = apparmor_socket_shutdown,
+
.cred_alloc_blank = apparmor_cred_alloc_blank,
.cred_free = apparmor_cred_free,
.cred_prepare = apparmor_cred_prepare,
--- /dev/null
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor network mediation
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/context.h"
+#include "include/net.h"
+#include "include/policy.h"
+
+#include "af_names.h"
+
+static const char *sock_type_names[] = {
+ "unknown(0)",
+ "stream",
+ "dgram",
+ "raw",
+ "rdm",
+ "seqpacket",
+ "dccp",
+ "unknown(7)",
+ "unknown(8)",
+ "unknown(9)",
+ "packet",
+};
+
+/* audit callback for net specific fields */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+ struct apparmor_audit_data *aad = sa->apparmor_audit_data;
+
+ audit_log_format(ab, " family=");
+ if (address_family_names[sa->u.net->family]) {
+ audit_log_string(ab, address_family_names[sa->u.net->family]);
+ } else {
+ audit_log_format(ab, " \"unknown(%d)\"", sa->u.net->family);
+ }
+
+ audit_log_format(ab, " sock_type=");
+ if (sock_type_names[aad->net.type]) {
+ audit_log_string(ab, sock_type_names[aad->net.type]);
+ } else {
+ audit_log_format(ab, "\"unknown(%d)\"", aad->net.type);
+ }
+
+ audit_log_format(ab, " protocol=%d", aad->net.protocol);
+}
+
+/**
+ * audit_net - audit network access
+ * @profile: profile being enforced (NOT NULL)
+ * @op: operation being checked
+ * @family: network family
+ * @type: network type
+ * @protocol: network protocol
+ * @sk: socket auditing is being applied to
+ * @error: error code for failure else 0
+ *
+ * Returns: %0 or sa->error else other errorcode on failure
+ */
+static int audit_net(struct aa_profile *profile, int op, u16 family, int type,
+ int protocol, struct sock *sk, int error)
+{
+ int audit_type = AUDIT_APPARMOR_AUTO;
+ struct common_audit_data sa;
+
+ struct apparmor_audit_data aad = {
+ .op = op,
+ .net = {
+ .type = type,
+ .protocol = protocol,
+ },
+ .error = error
+ };
+
+ struct lsm_network_audit net = {
+ .family = family,
+ .sk = sk,
+ };
+
+
+ if (sk) {
+ COMMON_AUDIT_DATA_INIT(&sa, NET);
+ } else {
+ COMMON_AUDIT_DATA_INIT(&sa, NONE);
+ }
+ /* todo fill in socket addr info */
+
+ sa.apparmor_audit_data = &aad;
+ sa.u.net = &net;
+
+ if (likely(!aad.error)) {
+ u16 audit_mask = profile->net.audit[net.family];
+ if (likely((AUDIT_MODE(profile) != AUDIT_ALL) &&
+ !(1 << aad.net.type & audit_mask)))
+ return 0;
+ audit_type = AUDIT_APPARMOR_AUDIT;
+ } else {
+ u16 quiet_mask = profile->net.quiet[net.family];
+ u16 kill_mask = 0;
+ u16 denied = (1 << aad.net.type) & ~quiet_mask;
+
+ if (denied & kill_mask)
+ audit_type = AUDIT_APPARMOR_KILL;
+
+ if ((denied & quiet_mask) &&
+ AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+ AUDIT_MODE(profile) != AUDIT_ALL)
+ return COMPLAIN_MODE(profile) ? 0 : aad.error;
+ }
+
+ return aa_audit(audit_type, profile, GFP_KERNEL, &sa, audit_cb);
+}
+
+/**
+ * aa_net_perm - very course network access check
+ * @op: operation being checked
+ * @profile: profile being enforced (NOT NULL)
+ * @family: network family
+ * @type: network type
+ * @protocol: network protocol
+ *
+ * Returns: %0 else error if permission denied
+ */
+int aa_net_perm(int op, struct aa_profile *profile, u16 family, int type,
+ int protocol, struct sock *sk)
+{
+ u16 family_mask;
+ int error;
+
+ if ((family < 0) || (family >= AF_MAX))
+ return -EINVAL;
+
+ if ((type < 0) || (type >= SOCK_MAX))
+ return -EINVAL;
+
+ /* unix domain and netlink sockets are handled by ipc */
+ if (family == AF_UNIX || family == AF_NETLINK)
+ return 0;
+
+ family_mask = profile->net.allow[family];
+
+ error = (family_mask & (1 << type)) ? 0 : -EACCES;
+
+ return audit_net(profile, op, family, type, protocol, sk, error);
+}
+
+/**
+ * aa_revalidate_sk - Revalidate access to a sock
+ * @op: operation being checked
+ * @sk: sock being revalidated (NOT NULL)
+ *
+ * Returns: %0 else error if permission denied
+ */
+int aa_revalidate_sk(int op, struct sock *sk)
+{
+ struct aa_profile *profile;
+ int error = 0;
+
+ /* aa_revalidate_sk should not be called from interrupt context
+ * don't mediate these calls as they are not task related
+ */
+ if (in_interrupt())
+ return 0;
+
+ profile = __aa_current_profile();
+ if (!unconfined(profile))
+ error = aa_net_perm(op, profile, sk->sk_family, sk->sk_type,
+ sk->sk_protocol, sk);
+
+ return error;
+}
aa_free_file_rules(&profile->file);
aa_free_cap_rules(&profile->caps);
+ aa_free_net_rules(&profile->net);
aa_free_rlimit_rules(&profile->rlimits);
aa_free_sid(profile->sid);
return 0;
}
+static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name)
+{
+ if (unpack_nameX(e, AA_U16, name)) {
+ if (!inbounds(e, sizeof(u16)))
+ return 0;
+ if (data)
+ *data = le16_to_cpu(get_unaligned((u16 *) e->pos));
+ e->pos += sizeof(u16);
+ return 1;
+ }
+ return 0;
+}
+
static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
{
if (unpack_nameX(e, AA_U32, name)) {
{
struct aa_profile *profile = NULL;
const char *name = NULL;
+ size_t size = 0;
int i, error = -EPROTO;
kernel_cap_t tmpcap;
u32 tmp;
if (!unpack_rlimits(e, profile))
goto fail;
+ size = unpack_array(e, "net_allowed_af");
+ if (size) {
+
+ for (i = 0; i < size; i++) {
+ /* discard extraneous rules that this kernel will
+ * never request
+ */
+ if (i > AF_MAX) {
+ u16 tmp;
+ if (!unpack_u16(e, &tmp, NULL) ||
+ !unpack_u16(e, &tmp, NULL) ||
+ !unpack_u16(e, &tmp, NULL))
+ goto fail;
+ continue;
+ }
+ if (!unpack_u16(e, &profile->net.allow[i], NULL))
+ goto fail;
+ if (!unpack_u16(e, &profile->net.audit[i], NULL))
+ goto fail;
+ if (!unpack_u16(e, &profile->net.quiet[i], NULL))
+ goto fail;
+ }
+ if (!unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+ /*
+ * allow unix domain and netlink sockets they are handled
+ * by IPC
+ */
+ }
+ profile->net.allow[AF_UNIX] = 0xffff;
+ profile->net.allow[AF_NETLINK] = 0xffff;
+
if (unpack_nameX(e, AA_STRUCT, "policydb")) {
/* generic policy dfa - optional and may be NULL */
profile->policy.dfa = unpack_dfa(e);
* two pass drawing so that the P state bars are on top of the C state blocks
*/
while (pwr) {
- if (pwr->type == CSTATE)
+ if (pwr->type == CSTATE) {
+ /* If the first event is an _end event, start timestamp is zero
+ -> ignore these */
+ if (pwr->start_time == 0 || pwr->end_time == 0) {
+ pwr = pwr->next;
+ continue;
+ }
svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
+ }
pwr = pwr->next;
}
old_irr = ioapic->irr;
if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
entry = ioapic->redirtbl[irq];
- level ^= entry.fields.polarity;
+// polarity is always active high in qemu
+// level ^= entry.fields.polarity;
if (!level)
ioapic->irr &= ~mask;
else {