diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd index 938ef71..3105644 100644 --- a/Documentation/ABI/testing/sysfs-class-mtd +++ b/Documentation/ABI/testing/sysfs-class-mtd @@ -14,8 +14,7 @@ Description: The /sys/class/mtd/mtd{0,1,2,3,...} directories correspond to each /dev/mtdX character device. These may represent physical/simulated flash devices, partitions on a flash - device, or concatenated flash devices. They exist regardless - of whether CONFIG_MTD_CHAR is actually enabled. + device, or concatenated flash devices. What: /sys/class/mtd/mtdXro/ Date: April 2009 @@ -23,8 +22,7 @@ KernelVersion: 2.6.29 Contact: linux-mtd@lists.infradead.org Description: These directories provide the corresponding read-only device - nodes for /sys/class/mtd/mtdX/ . They are only created - (for the benefit of udev) if CONFIG_MTD_CHAR is enabled. + nodes for /sys/class/mtd/mtdX/ . What: /sys/class/mtd/mtdX/dev Date: April 2009 diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt index b0d5410..d9be7a9 100644 --- a/Documentation/acpi/enumeration.txt +++ b/Documentation/acpi/enumeration.txt @@ -66,6 +66,83 @@ the ACPI device explicitly to acpi_platform_device_ids list defined in drivers/acpi/acpi_platform.c. This limitation is only for the platform devices, SPI and I2C devices are created automatically as described below. +DMA support +~~~~~~~~~~~ +DMA controllers enumerated via ACPI should be registered in the system to +provide generic access to their resources. For example, a driver that would +like to be accessible to slave devices via generic API call +dma_request_slave_channel() must register itself at the end of the probe +function like this: + + err = devm_acpi_dma_controller_register(dev, xlate_func, dw); + /* Handle the error if it's not a case of !CONFIG_ACPI */ + +and implement custom xlate function if needed (usually acpi_dma_simple_xlate() +is enough) which converts the FixedDMA resource provided by struct +acpi_dma_spec into the corresponding DMA channel. A piece of code for that case +could look like: + + #ifdef CONFIG_ACPI + struct filter_args { + /* Provide necessary information for the filter_func */ + ... + }; + + static bool filter_func(struct dma_chan *chan, void *param) + { + /* Choose the proper channel */ + ... + } + + static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec, + struct acpi_dma *adma) + { + dma_cap_mask_t cap; + struct filter_args args; + + /* Prepare arguments for filter_func */ + ... + return dma_request_channel(cap, filter_func, &args); + } + #else + static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec, + struct acpi_dma *adma) + { + return NULL; + } + #endif + +dma_request_slave_channel() will call xlate_func() for each registered DMA +controller. In the xlate function the proper channel must be chosen based on +information in struct acpi_dma_spec and the properties of the controller +provided by struct acpi_dma. + +Clients must call dma_request_slave_channel() with the string parameter that +corresponds to a specific FixedDMA resource. By default "tx" means the first +entry of the FixedDMA resource array, "rx" means the second entry. The table +below shows a layout: + + Device (I2C0) + { + ... + Method (_CRS, 0, NotSerialized) + { + Name (DBUF, ResourceTemplate () + { + FixedDMA (0x0018, 0x0004, Width32bit, _Y48) + FixedDMA (0x0019, 0x0005, Width32bit, ) + }) + ... + } + } + +So, the FixedDMA with request line 0x0018 is "tx" and next one is "rx" in +this example. + +In robust cases the client unfortunately needs to call +acpi_dma_request_slave_chan_by_index() directly and therefore choose the +specific FixedDMA resource by its index. + SPI serial bus support ~~~~~~~~~~~~~~~~~~~~~~ Slave devices behind SPI bus have SpiSerialBus resource attached to them. diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt index 3c046ee..c80e8a3 100644 --- a/Documentation/devicetree/bindings/dma/atmel-dma.txt +++ b/Documentation/devicetree/bindings/dma/atmel-dma.txt @@ -1,14 +1,39 @@ * Atmel Direct Memory Access Controller (DMA) Required properties: -- compatible: Should be "atmel,-dma" -- reg: Should contain DMA registers location and length -- interrupts: Should contain DMA interrupt +- compatible: Should be "atmel,-dma". +- reg: Should contain DMA registers location and length. +- interrupts: Should contain DMA interrupt. +- #dma-cells: Must be <2>, used to represent the number of integer cells in +the dmas property of client devices. -Examples: +Example: -dma@ffffec00 { +dma0: dma@ffffec00 { compatible = "atmel,at91sam9g45-dma"; reg = <0xffffec00 0x200>; interrupts = <21>; + #dma-cells = <2>; +}; + +DMA clients connected to the Atmel DMA controller must use the format +described in the dma.txt file, using a three-cell specifier for each channel: +a phandle plus two interger cells. +The three cells in order are: + +1. A phandle pointing to the DMA controller. +2. The memory interface (16 most significant bits), the peripheral interface +(16 less significant bits). +3. The peripheral identifier for the hardware handshaking interface. The +identifier can be different for tx and rx. + +Example: + +i2c0@i2c@f8010000 { + compatible = "atmel,at91sam9x5-i2c"; + reg = <0xf8010000 0x100>; + interrupts = <9 4 6>; + dmas = <&dma0 1 7>, + <&dma0 1 8>; + dma-names = "tx", "rx"; }; diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt index e7f8d7e..6a983c1 100644 --- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt +++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt @@ -56,20 +56,20 @@ Example for an AM33xx board: nand-bus-width = <16>; ti,nand-ecc-opt = "bch8"; - gpmc,sync-clk = <0>; - gpmc,cs-on = <0>; - gpmc,cs-rd-off = <44>; - gpmc,cs-wr-off = <44>; - gpmc,adv-on = <6>; - gpmc,adv-rd-off = <34>; - gpmc,adv-wr-off = <44>; - gpmc,we-off = <40>; - gpmc,oe-off = <54>; - gpmc,access = <64>; - gpmc,rd-cycle = <82>; - gpmc,wr-cycle = <82>; - gpmc,wr-access = <40>; - gpmc,wr-data-mux-bus = <0>; + gpmc,sync-clk-ps = <0>; + gpmc,cs-on-ns = <0>; + gpmc,cs-rd-off-ns = <44>; + gpmc,cs-wr-off-ns = <44>; + gpmc,adv-on-ns = <6>; + gpmc,adv-rd-off-ns = <34>; + gpmc,adv-wr-off-ns = <44>; + gpmc,we-off-ns = <40>; + gpmc,oe-off-ns = <54>; + gpmc,access-ns = <64>; + gpmc,rd-cycle-ns = <82>; + gpmc,wr-cycle-ns = <82>; + gpmc,wr-access-ns = <40>; + gpmc,wr-data-mux-bus-ns = <0>; #address-cells = <1>; #size-cells = <1>; diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt index 6e1f61f..9315ac9 100644 --- a/Documentation/devicetree/bindings/mtd/partition.txt +++ b/Documentation/devicetree/bindings/mtd/partition.txt @@ -5,8 +5,12 @@ on platforms which have strong conventions about which portions of a flash are used for what purposes, but which don't use an on-flash partition table such as RedBoot. -#address-cells & #size-cells must both be present in the mtd device and be -equal to 1. +#address-cells & #size-cells must both be present in the mtd device. There are +two valid values for both: +<1>: for partitions that require a single 32-bit cell to represent their + size/address (aka the value is below 4 GiB) +<2>: for partitions that require two 32-bit cells to represent their + size/address (aka the value is 4 GiB or greater). Required properties: - reg : The partition's offset and size within the mtd bank. @@ -36,3 +40,31 @@ flash@0 { reg = <0x0100000 0x200000>; }; }; + +flash@1 { + #address-cells = <1>; + #size-cells = <2>; + + /* a 4 GiB partition */ + partition@0 { + label = "filesystem"; + reg = <0x00000000 0x1 0x00000000>; + }; +}; + +flash@2 { + #address-cells = <2>; + #size-cells = <2>; + + /* an 8 GiB partition */ + partition@0 { + label = "filesystem #1"; + reg = <0x0 0x00000000 0x2 0x00000000>; + }; + + /* a 4 GiB partition */ + partition@200000000 { + label = "filesystem #2"; + reg = <0x2 0x00000000 0x1 0x00000000>; + }; +}; diff --git a/Documentation/devicetree/bindings/net/gpmc-eth.txt b/Documentation/devicetree/bindings/net/gpmc-eth.txt index 24cb4e4..ace4a64 100644 --- a/Documentation/devicetree/bindings/net/gpmc-eth.txt +++ b/Documentation/devicetree/bindings/net/gpmc-eth.txt @@ -26,16 +26,16 @@ Required properties: - bank-width: Address width of the device in bytes. GPMC supports 8-bit and 16-bit devices and so must be either 1 or 2 bytes. - compatible: Compatible string property for the ethernet child device. -- gpmc,cs-on: Chip-select assertion time -- gpmc,cs-rd-off: Chip-select de-assertion time for reads -- gpmc,cs-wr-off: Chip-select de-assertion time for writes -- gpmc,oe-on: Output-enable assertion time -- gpmc,oe-off Output-enable de-assertion time -- gpmc,we-on: Write-enable assertion time -- gpmc,we-off: Write-enable de-assertion time -- gpmc,access: Start cycle to first data capture (read access) -- gpmc,rd-cycle: Total read cycle time -- gpmc,wr-cycle: Total write cycle time +- gpmc,cs-on-ns: Chip-select assertion time +- gpmc,cs-rd-off-ns: Chip-select de-assertion time for reads +- gpmc,cs-wr-off-ns: Chip-select de-assertion time for writes +- gpmc,oe-on-ns: Output-enable assertion time +- gpmc,oe-off-ns: Output-enable de-assertion time +- gpmc,we-on-ns: Write-enable assertion time +- gpmc,we-off-ns: Write-enable de-assertion time +- gpmc,access-ns: Start cycle to first data capture (read access) +- gpmc,rd-cycle-ns: Total read cycle time +- gpmc,wr-cycle-ns: Total write cycle time - reg: Chip-select, base address (relative to chip-select) and size of the memory mapped for the device. Note that base address will be typically 0 as this @@ -65,24 +65,24 @@ gpmc: gpmc@6e000000 { bank-width = <2>; gpmc,mux-add-data; - gpmc,cs-on = <0>; - gpmc,cs-rd-off = <186>; - gpmc,cs-wr-off = <186>; - gpmc,adv-on = <12>; - gpmc,adv-rd-off = <48>; - gpmc,adv-wr-off = <48>; - gpmc,oe-on = <54>; - gpmc,oe-off = <168>; - gpmc,we-on = <54>; - gpmc,we-off = <168>; - gpmc,rd-cycle = <186>; - gpmc,wr-cycle = <186>; - gpmc,access = <114>; - gpmc,page-burst-access = <6>; - gpmc,bus-turnaround = <12>; - gpmc,cycle2cycle-delay = <18>; - gpmc,wr-data-mux-bus = <90>; - gpmc,wr-access = <186>; + gpmc,cs-on-ns = <0>; + gpmc,cs-rd-off-ns = <186>; + gpmc,cs-wr-off-ns = <186>; + gpmc,adv-on-ns = <12>; + gpmc,adv-rd-off-ns = <48>; + gpmc,adv-wr-off-ns = <48>; + gpmc,oe-on-ns = <54>; + gpmc,oe-off-ns = <168>; + gpmc,we-on-ns = <54>; + gpmc,we-off-ns = <168>; + gpmc,rd-cycle-ns = <186>; + gpmc,wr-cycle-ns = <186>; + gpmc,access-ns = <114>; + gpmc,page-burst-access-ns = <6>; + gpmc,bus-turnaround-ns = <12>; + gpmc,cycle2cycle-delay-ns = <18>; + gpmc,wr-data-mux-bus-ns = <90>; + gpmc,wr-access-ns = <186>; gpmc,cycle2cycle-samecsen; gpmc,cycle2cycle-diffcsen; diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt new file mode 100644 index 0000000..fff93d5 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt @@ -0,0 +1,22 @@ +* Marvell Armada 370/XP thermal management + +Required properties: + +- compatible: Should be set to one of the following: + marvell,armada370-thermal + marvell,armadaxp-thermal + +- reg: Device's register space. + Two entries are expected, see the examples below. + The first one is required for the sensor register; + the second one is required for the control register + to be used for sensor initialization (a.k.a. calibration). + +Example: + + thermal@d0018300 { + compatible = "marvell,armada370-thermal"; + reg = <0xd0018300 0x4 + 0xd0018304 0x4>; + status = "okay"; + }; diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt new file mode 100644 index 0000000..279ac0a --- /dev/null +++ b/Documentation/dmatest.txt @@ -0,0 +1,81 @@ + DMA Test Guide + ============== + + Andy Shevchenko + +This small document introduces how to test DMA drivers using dmatest module. + + Part 1 - How to build the test module + +The menuconfig contains an option that could be found by following path: + Device Drivers -> DMA Engine support -> DMA Test client + +In the configuration file the option called CONFIG_DMATEST. The dmatest could +be built as module or inside kernel. Let's consider those cases. + + Part 2 - When dmatest is built as a module... + +After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest +folder with nodes will be created. They are the same as module parameters with +addition of the 'run' node that controls run and stop phases of the test. + +Note that in this case test will not run on load automatically. + +Example of usage: + % echo dma0chan0 > /sys/kernel/debug/dmatest/channel + % echo 2000 > /sys/kernel/debug/dmatest/timeout + % echo 1 > /sys/kernel/debug/dmatest/iterations + % echo 1 > /sys/kernel/debug/dmatest/run + +Hint: available channel list could be extracted by running the following +command: + % ls -1 /sys/class/dma/ + +After a while you will start to get messages about current status or error like +in the original code. + +Note that running a new test will stop any in progress test. + +The following command should return actual state of the test. + % cat /sys/kernel/debug/dmatest/run + +To wait for test done the user may perform a busy loop that checks the state. + + % while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ] + > do + > echo -n "." + > sleep 1 + > done + > echo + + Part 3 - When built-in in the kernel... + +The module parameters that is supplied to the kernel command line will be used +for the first performed test. After user gets a control, the test could be +interrupted or re-run with same or different parameters. For the details see +the above section "Part 2 - When dmatest is built as a module..." + +In both cases the module parameters are used as initial values for the test case. +You always could check them at run-time by running + % grep -H . /sys/module/dmatest/parameters/* + + Part 4 - Gathering the test results + +The module provides a storage for the test results in the memory. The gathered +data could be used after test is done. + +The special file 'results' in the debugfs represents gathered data of the in +progress test. The messages collected are printed to the kernel log as well. + +Example of output: + % cat /sys/kernel/debug/dmatest/results + dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) + +The message format is unified across the different types of errors. A number in +the parens represents additional information, e.g. error code, error counter, +or status. + +Comparison between buffers is stored to the dedicated structure. + +Note that the verify result is now accessible only via file 'results' in the +debugfs. diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt index 7671352..b349d57 100644 --- a/Documentation/filesystems/btrfs.txt +++ b/Documentation/filesystems/btrfs.txt @@ -1,8 +1,8 @@ - BTRFS - ===== +BTRFS +===== -Btrfs is a new copy on write filesystem for Linux aimed at +Btrfs is a copy on write filesystem for Linux aimed at implementing advanced features while focusing on fault tolerance, repair and easy administration. Initially developed by Oracle, Btrfs is licensed under the GPL and open for contribution from anyone. @@ -34,9 +34,175 @@ The main Btrfs features include: * Online filesystem defragmentation +Mount Options +============= - MAILING LIST - ============ +When mounting a btrfs filesystem, the following option are accepted. +Unless otherwise specified, all options default to off. + + alloc_start= + Debugging option to force all block allocations above a certain + byte threshold on each block device. The value is specified in + bytes, optionally with a K, M, or G suffix, case insensitive. + Default is 1MB. + + autodefrag + Detect small random writes into files and queue them up for the + defrag process. Works best for small files; Not well suited for + large database workloads. + + check_int + check_int_data + check_int_print_mask= + These debugging options control the behavior of the integrity checking + module (the BTRFS_FS_CHECK_INTEGRITY config option required). + + check_int enables the integrity checker module, which examines all + block write requests to ensure on-disk consistency, at a large + memory and CPU cost. + + check_int_data includes extent data in the integrity checks, and + implies the check_int option. + + check_int_print_mask takes a bitmask of BTRFSIC_PRINT_MASK_* values + as defined in fs/btrfs/check-integrity.c, to control the integrity + checker module behavior. + + See comments at the top of fs/btrfs/check-integrity.c for more info. + + compress + compress= + compress-force + compress-force= + Control BTRFS file data compression. Type may be specified as "zlib" + "lzo" or "no" (for no compression, used for remounting). If no type + is specified, zlib is used. If compress-force is specified, + all files will be compressed, whether or not they compress well. + If compression is enabled, nodatacow and nodatasum are disabled. + + degraded + Allow mounts to continue with missing devices. A read-write mount may + fail with too many devices missing, for example if a stripe member + is completely missing. + + device= + Specify a device during mount so that ioctls on the control device + can be avoided. Especialy useful when trying to mount a multi-device + setup as root. May be specified multiple times for multiple devices. + + discard + Issue frequent commands to let the block device reclaim space freed by + the filesystem. This is useful for SSD devices, thinly provisioned + LUNs and virtual machine images, but may have a significant + performance impact. (The fstrim command is also available to + initiate batch trims from userspace). + + enospc_debug + Debugging option to be more verbose in some ENOSPC conditions. + + fatal_errors= + Action to take when encountering a fatal error: + "bug" - BUG() on a fatal error. This is the default. + "panic" - panic() on a fatal error. + + flushoncommit + The 'flushoncommit' mount option forces any data dirtied by a write in a + prior transaction to commit as part of the current commit. This makes + the committed state a fully consistent view of the file system from the + application's perspective (i.e., it includes all completed file system + operations). This was previously the behavior only when a snapshot is + created. + + inode_cache + Enable free inode number caching. Defaults to off due to an overflow + problem when the free space crcs don't fit inside a single page. + + max_inline= + Specify the maximum amount of space, in bytes, that can be inlined in + a metadata B-tree leaf. The value is specified in bytes, optionally + with a K, M, or G suffix, case insensitive. In practice, this value + is limited by the root sector size, with some space unavailable due + to leaf headers. For a 4k sectorsize, max inline data is ~3900 bytes. + + metadata_ratio= + Specify that 1 metadata chunk should be allocated after every + data chunks. Off by default. + + noacl + Disable support for Posix Access Control Lists (ACLs). See the + acl(5) manual page for more information about ACLs. + + nobarrier + Disables the use of block layer write barriers. Write barriers ensure + that certain IOs make it through the device cache and are on persistent + storage. If used on a device with a volatile (non-battery-backed) + write-back cache, this option will lead to filesystem corruption on a + system crash or power loss. + + nodatacow + Disable data copy-on-write for newly created files. Implies nodatasum, + and disables all compression. + + nodatasum + Disable data checksumming for newly created files. + + notreelog + Disable the tree logging used for fsync and O_SYNC writes. + + recovery + Enable autorecovery attempts if a bad tree root is found at mount time. + Currently this scans a list of several previous tree roots and tries to + use the first readable. + + skip_balance + Skip automatic resume of interrupted balance operation after mount. + May be resumed with "btrfs balance resume." + + space_cache (*) + Enable the on-disk freespace cache. + nospace_cache + Disable freespace cache loading without clearing the cache. + clear_cache + Force clearing and rebuilding of the disk space cache if something + has gone wrong. + + ssd + nossd + ssd_spread + Options to control ssd allocation schemes. By default, BTRFS will + enable or disable ssd allocation heuristics depending on whether a + rotational or nonrotational disk is in use. The ssd and nossd options + can override this autodetection. + + The ssd_spread mount option attempts to allocate into big chunks + of unused space, and may perform better on low-end ssds. ssd_spread + implies ssd, enabling all other ssd heuristics as well. + + subvol= + Mount subvolume at rather than the root subvolume. is + relative to the top level subvolume. + + subvolid= + Mount subvolume specified by an ID number rather than the root subvolume. + This allows mounting of subvolumes which are not in the root of the mounted + filesystem. + You can use "btrfs subvolume list" to see subvolume ID numbers. + + subvolrootid= (deprecated) + Mount subvolume specified by rather than the root subvolume. + This allows mounting of subvolumes which are not in the root of the mounted + filesystem. + You can use "btrfs subvolume show " to see the object ID for a subvolume. + + thread_pool= + The number of worker threads to allocate. The default number is equal + to the number of CPUs + 2, or 8, whichever is smaller. + + user_subvol_rm_allowed + Allow subvolumes to be deleted by a non-root user. Use with caution. + +MAILING LIST +============ There is a Btrfs mailing list hosted on vger.kernel.org. You can find details on how to subscribe here: @@ -49,8 +215,8 @@ http://dir.gmane.org/gmane.comp.file-systems.btrfs - IRC - === +IRC +=== Discussion of Btrfs also occurs on the #btrfs channel of the Freenode IRC network. diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt index 77a1d11..6f83fa9 100644 --- a/Documentation/gpio.txt +++ b/Documentation/gpio.txt @@ -72,11 +72,11 @@ in this document, but drivers acting as clients to the GPIO interface must not care how it's implemented.) That said, if the convention is supported on their platform, drivers should -use it when possible. Platforms must declare GENERIC_GPIO support in their -Kconfig (boolean true), and provide an file. Drivers that can't -work without standard GPIO calls should have Kconfig entries which depend -on GENERIC_GPIO. The GPIO calls are available, either as "real code" or as -optimized-away stubs, when drivers use the include file: +use it when possible. Platforms must select ARCH_REQUIRE_GPIOLIB or +ARCH_WANT_OPTIONAL_GPIOLIB in their Kconfig. Drivers that can't work without +standard GPIO calls should have Kconfig entries which depend on GPIOLIB. The +GPIO calls are available, either as "real code" or as optimized-away stubs, +when drivers use the include file: #include diff --git a/Documentation/thermal/exynos_thermal_emulation b/Documentation/thermal/exynos_thermal_emulation index b73bbfb..36a3e79 100644 --- a/Documentation/thermal/exynos_thermal_emulation +++ b/Documentation/thermal/exynos_thermal_emulation @@ -13,11 +13,11 @@ Thermal emulation mode supports software debug for TMU's operation. User can set manually with software code and TMU will read current temperature from user value not from sensor's value. -Enabling CONFIG_EXYNOS_THERMAL_EMUL option will make this support in available. -When it's enabled, sysfs node will be created under -/sys/bus/platform/devices/'exynos device name'/ with name of 'emulation'. +Enabling CONFIG_THERMAL_EMULATION option will make this support available. +When it's enabled, sysfs node will be created as +/sys/devices/virtual/thermal/thermal_zone'zone id'/emul_temp. -The sysfs node, 'emulation', will contain value 0 for the initial state. When you input any +The sysfs node, 'emul_node', will contain value 0 for the initial state. When you input any temperature you want to update to sysfs node, it automatically enable emulation mode and current temperature will be changed into it. (Exynos also supports user changable delay time which would be used to delay of diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt index 6859661..a71bd5b 100644 --- a/Documentation/thermal/sysfs-api.txt +++ b/Documentation/thermal/sysfs-api.txt @@ -31,15 +31,17 @@ temperature) and throttle appropriate devices. 1. thermal sysfs driver interface functions 1.1 thermal zone device interface -1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *name, +1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *type, int trips, int mask, void *devdata, - struct thermal_zone_device_ops *ops) + struct thermal_zone_device_ops *ops, + const struct thermal_zone_params *tzp, + int passive_delay, int polling_delay)) This interface function adds a new thermal zone device (sensor) to /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the thermal cooling devices registered at the same time. - name: the thermal zone name. + type: the thermal zone type. trips: the total number of trip points this thermal zone supports. mask: Bit string: If 'n'th bit is set, then trip point 'n' is writeable. devdata: device private data @@ -57,6 +59,12 @@ temperature) and throttle appropriate devices. will be fired. .set_emul_temp: set the emulation temperature which helps in debugging different threshold temperature points. + tzp: thermal zone platform parameters. + passive_delay: number of milliseconds to wait between polls when + performing passive cooling. + polling_delay: number of milliseconds to wait between polls when checking + whether trip points have been crossed (0 for interrupt driven systems). + 1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz) @@ -265,6 +273,10 @@ emul_temp Unit: millidegree Celsius WO, Optional + WARNING: Be careful while enabling this option on production systems, + because userland can easily disable the thermal policy by simply + flooding this sysfs node with low temperature values. + ***************************** * Cooling device attributes * ***************************** @@ -363,7 +375,7 @@ This function returns the thermal_instance corresponding to a given {thermal_zone, cooling_device, trip_point} combination. Returns NULL if such an instance does not exist. -5.3:notify_thermal_framework: +5.3:thermal_notify_framework: This function handles the trip events from sensor drivers. It starts throttling the cooling devices according to the policy configured. For CRITICAL and HOT trip points, this notifies the respective drivers, @@ -375,11 +387,3 @@ platform data is provided, this uses the step_wise throttling policy. This function serves as an arbitrator to set the state of a cooling device. It sets the cooling device to the deepest cooling state if possible. - -5.5:thermal_register_governor: -This function lets the various thermal governors to register themselves -with the Thermal framework. At run time, depending on a zone's platform -data, a particular governor is used for throttling. - -5.6:thermal_unregister_governor: -This function unregisters a governor from the thermal framework. diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt new file mode 100644 index 0000000..2b1af76 --- /dev/null +++ b/Documentation/xtensa/mmu.txt @@ -0,0 +1,46 @@ +MMUv3 initialization sequence. + +The code in the initialize_mmu macro sets up MMUv3 memory mapping +identically to MMUv2 fixed memory mapping. Depending on +CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX symbol this code is +located in one of the following address ranges: + + 0xF0000000..0xFFFFFFFF (will keep same address in MMU v2 layout; + typically ROM) + 0x00000000..0x07FFFFFF (system RAM; this code is actually linked + at 0xD0000000..0xD7FFFFFF [cached] + or 0xD8000000..0xDFFFFFFF [uncached]; + in any case, initially runs elsewhere + than linked, so have to be careful) + +The code has the following assumptions: + This code fragment is run only on an MMU v3. + TLBs are in their reset state. + ITLBCFG and DTLBCFG are zero (reset state). + RASID is 0x04030201 (reset state). + PS.RING is zero (reset state). + LITBASE is zero (reset state, PC-relative literals); required to be PIC. + +TLB setup proceeds along the following steps. + + Legend: + VA = virtual address (two upper nibbles of it); + PA = physical address (two upper nibbles of it); + pc = physical range that contains this code; + +After step 2, we jump to virtual address in 0x40000000..0x5fffffff +that corresponds to next instruction to execute in this code. +After step 4, we jump to intended (linked) address of this code. + + Step 0 Step1 Step 2 Step3 Step 4 Step5 + ============ ===== ============ ===== ============ ===== + VA PA PA VA PA PA VA PA PA + ------ -- -- ------ -- -- ------ -- -- + E0..FF -> E0 -> E0 E0..FF -> E0 F0..FF -> F0 -> F0 + C0..DF -> C0 -> C0 C0..DF -> C0 E0..EF -> F0 -> F0 + A0..BF -> A0 -> A0 A0..BF -> A0 D8..DF -> 00 -> 00 + 80..9F -> 80 -> 80 80..9F -> 80 D0..D7 -> 00 -> 00 + 60..7F -> 60 -> 60 60..7F -> 60 + 40..5F -> 40 40..5F -> pc -> pc 40..5F -> pc + 20..3F -> 20 -> 20 20..3F -> 20 + 00..1F -> 00 -> 00 00..1F -> 00 diff --git a/Documentation/zh_CN/gpio.txt b/Documentation/zh_CN/gpio.txt index 4fa7b4e..d5b8f01 100644 --- a/Documentation/zh_CN/gpio.txt +++ b/Documentation/zh_CN/gpio.txt @@ -84,10 +84,10 @@ GPIO 公约 控制器的抽象函数来实现它。(有一些可选的代码能支持这种策略的实现,本文档 后面会介绍,但作为 GPIO 接口的客户端驱动程序必须与它的实现无关。) -也就是说,如果在他们的平台上支持这个公约,驱动应尽可能的使用它。平台 -必须在 Kconfig 中声明对 GENERIC_GPIO的支持 (布尔型 true),并提供 -一个 文件。那些调用标准 GPIO 函数的驱动应该在 Kconfig -入口中声明依赖GENERIC_GPIO。当驱动包含文件: +也就是说,如果在他们的平台上支持这个公约,驱动应尽可能的使用它。同时,平台 +必须在 Kconfig 中选择 ARCH_REQUIRE_GPIOLIB 或者 ARCH_WANT_OPTIONAL_GPIOLIB +选项。那些调用标准 GPIO 函数的驱动应该在 Kconfig 入口中声明依赖GENERIC_GPIO。 +当驱动包含文件: #include diff --git a/MAINTAINERS b/MAINTAINERS index 5f5c895..3d7782b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8029,11 +8029,14 @@ F: arch/xtensa/ THERMAL M: Zhang Rui +M: Eduardo Valentin L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git +Q: https://patchwork.kernel.org/project/linux-pm/list/ S: Supported F: drivers/thermal/ F: include/linux/thermal.h +F: include/linux/cpu_cooling.h THINGM BLINK(1) USB RGB LED DRIVER M: Vivien Didelot diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 8629127..837a1f2 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -55,9 +55,6 @@ config GENERIC_CALIBRATE_DELAY bool default y -config GENERIC_GPIO - bool - config ZONE_DMA bool default y diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index e6f4eca..491ae79 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -16,8 +16,6 @@ config ARC select GENERIC_FIND_FIRST_BIT # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP select GENERIC_IRQ_SHOW - select GENERIC_KERNEL_EXECVE - select GENERIC_KERNEL_THREAD select GENERIC_PENDING_IRQ if SMP select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB @@ -61,9 +59,6 @@ config GENERIC_CALIBRATE_DELAY config GENERIC_HWEIGHT def_bool y -config BINFMT_ELF - def_bool y - config STACKTRACE_SUPPORT def_bool y select STACKTRACE @@ -82,6 +77,7 @@ menu "ARC Architecture Configuration" menu "ARC Platform/SoC/Board" source "arch/arc/plat-arcfpga/Kconfig" +source "arch/arc/plat-tb10x/Kconfig" #New platform adds here endmenu @@ -134,9 +130,6 @@ if SMP config ARC_HAS_COH_CACHES def_bool n -config ARC_HAS_COH_LLSC - def_bool n - config ARC_HAS_COH_RTSC def_bool n @@ -304,6 +297,9 @@ config ARC_FPU_SAVE_RESTORE based on actual usage of FPU by a task. Thus our implemn does this for all tasks in system. +config ARC_CANT_LLSC + def_bool n + menuconfig ARC_CPU_REL_4_10 bool "Enable support for Rel 4.10 features" default n @@ -314,9 +310,7 @@ menuconfig ARC_CPU_REL_4_10 config ARC_HAS_LLSC bool "Insn: LLOCK/SCOND (efficient atomic ops)" default y - depends on ARC_CPU_770 - # if SMP, enable LLSC ONLY if ARC implementation has coherent atomics - depends on !SMP || ARC_HAS_COH_LLSC + depends on ARC_CPU_770 && !ARC_CANT_LLSC config ARC_HAS_SWAPE bool "Insn: SWAPE (endian-swap)" @@ -415,13 +409,6 @@ config ARC_DBG_TLB_MISS_COUNT Counts number of I and D TLB Misses and exports them via Debugfs The counters can be cleared via Debugfs as well -config CMDLINE - string "Kernel command line to built-in" - default "print-fatal-signals=1" - help - The default command line which will be appended to the optional - u-boot provided command line (see below) - config CMDLINE_UBOOT bool "Support U-boot kernel command line passing" default n @@ -430,8 +417,8 @@ config CMDLINE_UBOOT command line from the U-boot environment to the Linux kernel then switch this option on. ARC U-boot will setup the cmdline in RAM/flash and set r2 to point - to it. kernel startup code will copy the string into cmdline buffer - and also append CONFIG_CMDLINE. + to it. kernel startup code will append this to DeviceTree + /bootargs provided cmdline args. config ARC_BUILTIN_DTB_NAME string "Built in DTB" @@ -441,6 +428,10 @@ config ARC_BUILTIN_DTB_NAME source "kernel/Kconfig.preempt" +menu "Executable file formats" +source "fs/Kconfig.binfmt" +endmenu + endmenu # "ARC Architecture Configuration" source "mm/Kconfig" diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 92379c7..183397f 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -8,6 +8,10 @@ UTS_MACHINE := arc +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := arc-elf32- +endif + KBUILD_DEFCONFIG := fpga_defconfig cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__ @@ -87,20 +91,23 @@ core-y += arch/arc/ core-y += arch/arc/boot/dts/ core-$(CONFIG_ARC_PLAT_FPGA_LEGACY) += arch/arc/plat-arcfpga/ +core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/ drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/ libs-y += arch/arc/lib/ $(LIBGCC) +boot := arch/arc/boot + #default target for make without any arguements. -KBUILD_IMAGE := bootpImage +KBUILD_IMAGE := bootpImage all: $(KBUILD_IMAGE) -boot := arch/arc/boot - bootpImage: vmlinux -uImage: vmlinux +boot_targets += uImage uImage.bin uImage.gz + +$(boot_targets): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ %.dtb %.dtb.S %.dtb.o: scripts diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile index 7d514c2..e597cb34 100644 --- a/arch/arc/boot/Makefile +++ b/arch/arc/boot/Makefile @@ -3,7 +3,6 @@ targets := vmlinux.bin vmlinux.bin.gz uImage # uImage build relies on mkimage being availble on your host for ARC target # You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage # and make sure it's reacable from your PATH -MKIMAGE := $(srctree)/scripts/mkuboot.sh OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S @@ -12,7 +11,12 @@ LINUX_START_TEXT = $$(readelf -h vmlinux | \ UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE) UIMAGE_ENTRYADDR = $(LINUX_START_TEXT) -UIMAGE_COMPRESSION = gzip + +suffix-y := bin +suffix-$(CONFIG_KERNEL_GZIP) := gz + +targets += uImage uImage.bin uImage.gz +extra-y += vmlinux.bin vmlinux.bin.gz $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) @@ -20,7 +24,12 @@ $(obj)/vmlinux.bin: vmlinux FORCE $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) -$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE - $(call if_changed,uimage) +$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE + $(call if_changed,uimage,none) + +$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE + $(call if_changed,uimage,gzip) -PHONY += FORCE +$(obj)/uImage: $(obj)/uImage.$(suffix-y) + @ln -sf $(notdir $<) $@ + @echo ' Image $@ is ready' diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile index 5776835..faf240e 100644 --- a/arch/arc/boot/dts/Makefile +++ b/arch/arc/boot/dts/Makefile @@ -8,6 +8,8 @@ endif obj-y += $(builtindtb-y).dtb.o targets += $(builtindtb-y).dtb +.SECONDARY: $(obj)/$(builtindtb-y).dtb.S + dtbs: $(addprefix $(obj)/, $(builtindtb-y).dtb) -clean-files := *.dtb +clean-files := *.dtb *.dtb.S diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi new file mode 100644 index 0000000..941ad11 --- /dev/null +++ b/arch/arc/boot/dts/abilis_tb100.dtsi @@ -0,0 +1,340 @@ +/* + * Abilis Systems TB100 SOC device tree + * + * Copyright (C) Abilis Systems 2013 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/include/ "abilis_tb10x.dtsi" + +/* interrupt specifiers + * -------------------- + * 0: rising, 1: low, 2: high, 3: falling, + */ + +/ { + clock-frequency = <500000000>; /* 500 MHZ */ + + soc100 { + bus-frequency = <166666666>; + + pll0: oscillator { + clock-frequency = <1000000000>; + }; + cpu_clk: clkdiv_cpu { + clock-mult = <1>; + clock-div = <2>; + }; + ahb_clk: clkdiv_ahb { + clock-mult = <1>; + clock-div = <6>; + }; + + iomux: iomux@FF10601c { + /* Port 1 */ + pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */ + pingrp = "mis0_pins"; + }; + pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */ + pingrp = "mis1_pins"; + }; + pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */ + pingrp = "gpioa_pins"; + }; + pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */ + pingrp = "mip1_pins"; + }; + /* Port 2 */ + pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */ + pingrp = "mis2_pins"; + }; + pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */ + pingrp = "mis3_pins"; + }; + pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */ + pingrp = "gpioc_pins"; + }; + pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */ + pingrp = "mip3_pins"; + }; + /* Port 3 */ + pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */ + pingrp = "mis4_pins"; + }; + pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */ + pingrp = "mis5_pins"; + }; + pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */ + pingrp = "gpioe_pins"; + }; + pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */ + pingrp = "mip5_pins"; + }; + /* Port 4 */ + pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */ + pingrp = "mis6_pins"; + }; + pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */ + pingrp = "mis7_pins"; + }; + pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */ + pingrp = "gpiog_pins"; + }; + pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */ + pingrp = "mip7_pins"; + }; + /* Port 5 */ + pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */ + pingrp = "gpioj_pins"; + }; + pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */ + pingrp = "gpiok_pins"; + }; + pctl_ciplus: pctl-ciplus { /* CI+ interface */ + pingrp = "ciplus_pins"; + }; + pctl_mcard: pctl-mcard { /* M-Card interface */ + pingrp = "mcard_pins"; + }; + /* Port 6 */ + pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */ + pingrp = "mop_pins"; + }; + pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */ + pingrp = "mos0_pins"; + }; + pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */ + pingrp = "mos1_pins"; + }; + pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */ + pingrp = "mos2_pins"; + }; + pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */ + pingrp = "mos3_pins"; + }; + /* Port 7 */ + pctl_uart0: pctl-uart0 { /* UART 0 */ + pingrp = "uart0_pins"; + }; + pctl_uart1: pctl-uart1 { /* UART 1 */ + pingrp = "uart1_pins"; + }; + pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */ + pingrp = "gpiol_pins"; + }; + pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */ + pingrp = "gpiom_pins"; + }; + /* Port 8 */ + pctl_spi3: pctl-spi3 { + pingrp = "spi3_pins"; + }; + /* Port 9 */ + pctl_spi1: pctl-spi1 { + pingrp = "spi1_pins"; + }; + pctl_gpio_n: pctl-gpio-n { + pingrp = "gpion_pins"; + }; + /* Unmuxed GPIOs */ + pctl_gpio_b: pctl-gpio-b { + pingrp = "gpiob_pins"; + }; + pctl_gpio_d: pctl-gpio-d { + pingrp = "gpiod_pins"; + }; + pctl_gpio_f: pctl-gpio-f { + pingrp = "gpiof_pins"; + }; + pctl_gpio_h: pctl-gpio-h { + pingrp = "gpioh_pins"; + }; + pctl_gpio_i: pctl-gpio-i { + pingrp = "gpioi_pins"; + }; + }; + + gpioa: gpio@FF140000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF140000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <0>; + gpio-pins = <&pctl_gpio_a>; + }; + gpiob: gpio@FF141000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF141000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <3>; + gpio-pins = <&pctl_gpio_b>; + }; + gpioc: gpio@FF142000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF142000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <5>; + gpio-pins = <&pctl_gpio_c>; + }; + gpiod: gpio@FF143000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF143000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <8>; + gpio-pins = <&pctl_gpio_d>; + }; + gpioe: gpio@FF144000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF144000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <10>; + gpio-pins = <&pctl_gpio_e>; + }; + gpiof: gpio@FF145000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF145000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <13>; + gpio-pins = <&pctl_gpio_f>; + }; + gpiog: gpio@FF146000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF146000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <15>; + gpio-pins = <&pctl_gpio_g>; + }; + gpioh: gpio@FF147000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF147000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <18>; + gpio-pins = <&pctl_gpio_h>; + }; + gpioi: gpio@FF148000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF148000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <20>; + gpio-pins = <&pctl_gpio_i>; + }; + gpioj: gpio@FF149000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF149000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <32>; + gpio-pins = <&pctl_gpio_j>; + }; + gpiok: gpio@FF14a000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14A000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <64>; + gpio-pins = <&pctl_gpio_k>; + }; + gpiol: gpio@FF14b000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14B000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <86>; + gpio-pins = <&pctl_gpio_l>; + }; + gpiom: gpio@FF14c000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14C000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <90>; + gpio-pins = <&pctl_gpio_m>; + }; + gpion: gpio@FF14d000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14D000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <94>; + gpio-pins = <&pctl_gpio_n>; + }; + }; +}; diff --git a/arch/arc/boot/dts/abilis_tb100_dvk.dts b/arch/arc/boot/dts/abilis_tb100_dvk.dts new file mode 100644 index 0000000..c0fd362 --- /dev/null +++ b/arch/arc/boot/dts/abilis_tb100_dvk.dts @@ -0,0 +1,127 @@ +/* + * Abilis Systems TB100 Development Kit PCB device tree + * + * Copyright (C) Abilis Systems 2013 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/dts-v1/; + +/include/ "abilis_tb100.dtsi" + +/ { + chosen { + bootargs = "earlycon=uart8250,mmio32,0xff100000,9600n8 console=ttyS0,9600n8"; + }; + + aliases { }; + + memory { + device_type = "memory"; + reg = <0x80000000 0x08000000>; /* 128M */ + }; + + soc100 { + uart@FF100000 { + pinctrl-names = "abilis,simple-default"; + pinctrl-0 = <&pctl_uart0>; + }; + ethernet@FE100000 { + phy-mode = "rgmii"; + }; + + i2c0: i2c@FF120000 { + sda-hold-time = <432>; + }; + i2c1: i2c@FF121000 { + sda-hold-time = <432>; + }; + i2c2: i2c@FF122000 { + sda-hold-time = <432>; + }; + i2c3: i2c@FF123000 { + sda-hold-time = <432>; + }; + i2c4: i2c@FF124000 { + sda-hold-time = <432>; + }; + + leds { + compatible = "gpio-leds"; + power { + label = "Power"; + gpios = <&gpioi 0>; + linux,default-trigger = "default-on"; + }; + heartbeat { + label = "Heartbeat"; + gpios = <&gpioi 1>; + linux,default-trigger = "heartbeat"; + }; + led2 { + label = "LED2"; + gpios = <&gpioi 2>; + default-state = "off"; + }; + led3 { + label = "LED3"; + gpios = <&gpioi 3>; + default-state = "off"; + }; + led4 { + label = "LED4"; + gpios = <&gpioi 4>; + default-state = "off"; + }; + led5 { + label = "LED5"; + gpios = <&gpioi 5>; + default-state = "off"; + }; + led6 { + label = "LED6"; + gpios = <&gpioi 6>; + default-state = "off"; + }; + led7 { + label = "LED7"; + gpios = <&gpioi 7>; + default-state = "off"; + }; + led8 { + label = "LED8"; + gpios = <&gpioi 8>; + default-state = "off"; + }; + led9 { + label = "LED9"; + gpios = <&gpioi 9>; + default-state = "off"; + }; + led10 { + label = "LED10"; + gpios = <&gpioi 10>; + default-state = "off"; + }; + led11 { + label = "LED11"; + gpios = <&gpioi 11>; + default-state = "off"; + }; + }; + }; +}; diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi new file mode 100644 index 0000000..fd25c21 --- /dev/null +++ b/arch/arc/boot/dts/abilis_tb101.dtsi @@ -0,0 +1,349 @@ +/* + * Abilis Systems TB101 SOC device tree + * + * Copyright (C) Abilis Systems 2013 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/include/ "abilis_tb10x.dtsi" + +/* interrupt specifiers + * -------------------- + * 0: rising, 1: low, 2: high, 3: falling, + */ + +/ { + clock-frequency = <500000000>; /* 500 MHZ */ + + soc100 { + bus-frequency = <166666666>; + + pll0: oscillator { + clock-frequency = <1000000000>; + }; + cpu_clk: clkdiv_cpu { + clock-mult = <1>; + clock-div = <2>; + }; + ahb_clk: clkdiv_ahb { + clock-mult = <1>; + clock-div = <6>; + }; + + iomux: iomux@FF10601c { + /* Port 1 */ + pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */ + pingrp = "mis0_pins"; + }; + pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */ + pingrp = "mis1_pins"; + }; + pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */ + pingrp = "gpioa_pins"; + }; + pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */ + pingrp = "mip1_pins"; + }; + /* Port 2 */ + pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */ + pingrp = "mis2_pins"; + }; + pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */ + pingrp = "mis3_pins"; + }; + pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */ + pingrp = "gpioc_pins"; + }; + pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */ + pingrp = "mip3_pins"; + }; + /* Port 3 */ + pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */ + pingrp = "mis4_pins"; + }; + pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */ + pingrp = "mis5_pins"; + }; + pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */ + pingrp = "gpioe_pins"; + }; + pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */ + pingrp = "mip5_pins"; + }; + /* Port 4 */ + pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */ + pingrp = "mis6_pins"; + }; + pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */ + pingrp = "mis7_pins"; + }; + pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */ + pingrp = "gpiog_pins"; + }; + pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */ + pingrp = "mip7_pins"; + }; + /* Port 5 */ + pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */ + pingrp = "gpioj_pins"; + }; + pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */ + pingrp = "gpiok_pins"; + }; + pctl_ciplus: pctl-ciplus { /* CI+ interface */ + pingrp = "ciplus_pins"; + }; + pctl_mcard: pctl-mcard { /* M-Card interface */ + pingrp = "mcard_pins"; + }; + pctl_stc0: pctl-stc0 { /* Smart card I/F 0 */ + pingrp = "stc0_pins"; + }; + pctl_stc1: pctl-stc1 { /* Smart card I/F 1 */ + pingrp = "stc1_pins"; + }; + /* Port 6 */ + pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */ + pingrp = "mop_pins"; + }; + pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */ + pingrp = "mos0_pins"; + }; + pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */ + pingrp = "mos1_pins"; + }; + pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */ + pingrp = "mos2_pins"; + }; + pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */ + pingrp = "mos3_pins"; + }; + /* Port 7 */ + pctl_uart0: pctl-uart0 { /* UART 0 */ + pingrp = "uart0_pins"; + }; + pctl_uart1: pctl-uart1 { /* UART 1 */ + pingrp = "uart1_pins"; + }; + pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */ + pingrp = "gpiol_pins"; + }; + pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */ + pingrp = "gpiom_pins"; + }; + /* Port 8 */ + pctl_spi3: pctl-spi3 { + pingrp = "spi3_pins"; + }; + pctl_jtag: pctl-jtag { + pingrp = "jtag_pins"; + }; + /* Port 9 */ + pctl_spi1: pctl-spi1 { + pingrp = "spi1_pins"; + }; + pctl_gpio_n: pctl-gpio-n { + pingrp = "gpion_pins"; + }; + /* Unmuxed GPIOs */ + pctl_gpio_b: pctl-gpio-b { + pingrp = "gpiob_pins"; + }; + pctl_gpio_d: pctl-gpio-d { + pingrp = "gpiod_pins"; + }; + pctl_gpio_f: pctl-gpio-f { + pingrp = "gpiof_pins"; + }; + pctl_gpio_h: pctl-gpio-h { + pingrp = "gpioh_pins"; + }; + pctl_gpio_i: pctl-gpio-i { + pingrp = "gpioi_pins"; + }; + }; + + gpioa: gpio@FF140000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF140000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <0>; + gpio-pins = <&pctl_gpio_a>; + }; + gpiob: gpio@FF141000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF141000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <3>; + gpio-pins = <&pctl_gpio_b>; + }; + gpioc: gpio@FF142000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF142000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <5>; + gpio-pins = <&pctl_gpio_c>; + }; + gpiod: gpio@FF143000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF143000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <8>; + gpio-pins = <&pctl_gpio_d>; + }; + gpioe: gpio@FF144000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF144000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <10>; + gpio-pins = <&pctl_gpio_e>; + }; + gpiof: gpio@FF145000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF145000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <13>; + gpio-pins = <&pctl_gpio_f>; + }; + gpiog: gpio@FF146000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF146000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <15>; + gpio-pins = <&pctl_gpio_g>; + }; + gpioh: gpio@FF147000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF147000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <18>; + gpio-pins = <&pctl_gpio_h>; + }; + gpioi: gpio@FF148000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF148000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <20>; + gpio-pins = <&pctl_gpio_i>; + }; + gpioj: gpio@FF149000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF149000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <32>; + gpio-pins = <&pctl_gpio_j>; + }; + gpiok: gpio@FF14a000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14A000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <64>; + gpio-pins = <&pctl_gpio_k>; + }; + gpiol: gpio@FF14b000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14B000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <86>; + gpio-pins = <&pctl_gpio_l>; + }; + gpiom: gpio@FF14c000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14C000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <90>; + gpio-pins = <&pctl_gpio_m>; + }; + gpion: gpio@FF14d000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14D000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <94>; + gpio-pins = <&pctl_gpio_n>; + }; + }; +}; diff --git a/arch/arc/boot/dts/abilis_tb101_dvk.dts b/arch/arc/boot/dts/abilis_tb101_dvk.dts new file mode 100644 index 0000000..6f8c381 --- /dev/null +++ b/arch/arc/boot/dts/abilis_tb101_dvk.dts @@ -0,0 +1,127 @@ +/* + * Abilis Systems TB101 Development Kit PCB device tree + * + * Copyright (C) Abilis Systems 2013 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/dts-v1/; + +/include/ "abilis_tb101.dtsi" + +/ { + chosen { + bootargs = "earlycon=uart8250,mmio32,0xff100000,9600n8 console=ttyS0,9600n8"; + }; + + aliases { }; + + memory { + device_type = "memory"; + reg = <0x80000000 0x08000000>; /* 128M */ + }; + + soc100 { + uart@FF100000 { + pinctrl-names = "abilis,simple-default"; + pinctrl-0 = <&pctl_uart0>; + }; + ethernet@FE100000 { + phy-mode = "rgmii"; + }; + + i2c0: i2c@FF120000 { + sda-hold-time = <432>; + }; + i2c1: i2c@FF121000 { + sda-hold-time = <432>; + }; + i2c2: i2c@FF122000 { + sda-hold-time = <432>; + }; + i2c3: i2c@FF123000 { + sda-hold-time = <432>; + }; + i2c4: i2c@FF124000 { + sda-hold-time = <432>; + }; + + leds { + compatible = "gpio-leds"; + power { + label = "Power"; + gpios = <&gpioi 0>; + linux,default-trigger = "default-on"; + }; + heartbeat { + label = "Heartbeat"; + gpios = <&gpioi 1>; + linux,default-trigger = "heartbeat"; + }; + led2 { + label = "LED2"; + gpios = <&gpioi 2>; + default-state = "off"; + }; + led3 { + label = "LED3"; + gpios = <&gpioi 3>; + default-state = "off"; + }; + led4 { + label = "LED4"; + gpios = <&gpioi 4>; + default-state = "off"; + }; + led5 { + label = "LED5"; + gpios = <&gpioi 5>; + default-state = "off"; + }; + led6 { + label = "LED6"; + gpios = <&gpioi 6>; + default-state = "off"; + }; + led7 { + label = "LED7"; + gpios = <&gpioi 7>; + default-state = "off"; + }; + led8 { + label = "LED8"; + gpios = <&gpioi 8>; + default-state = "off"; + }; + led9 { + label = "LED9"; + gpios = <&gpioi 9>; + default-state = "off"; + }; + led10 { + label = "LED10"; + gpios = <&gpioi 10>; + default-state = "off"; + }; + led11 { + label = "LED11"; + gpios = <&gpioi 11>; + default-state = "off"; + }; + }; + }; +}; diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi new file mode 100644 index 0000000..a6139fc --- /dev/null +++ b/arch/arc/boot/dts/abilis_tb10x.dtsi @@ -0,0 +1,247 @@ +/* + * Abilis Systems TB10X SOC device tree + * + * Copyright (C) Abilis Systems 2013 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* interrupt specifiers + * -------------------- + * 0: rising, 1: low, 2: high, 3: falling, + */ + +/ { + compatible = "abilis,arc-tb10x"; + #address-cells = <1>; + #size-cells = <1>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + cpu@0 { + device_type = "cpu"; + compatible = "snps,arc770d"; + reg = <0>; + }; + }; + + soc100 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + ranges = <0xfe000000 0xfe000000 0x02000000 + 0x000F0000 0x000F0000 0x00010000>; + compatible = "abilis,tb10x", "simple-bus"; + + pll0: oscillator { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-output-names = "pll0"; + }; + cpu_clk: clkdiv_cpu { + compatible = "fixed-factor-clock"; + #clock-cells = <0>; + clocks = <&pll0>; + clock-output-names = "cpu_clk"; + }; + ahb_clk: clkdiv_ahb { + compatible = "fixed-factor-clock"; + #clock-cells = <0>; + clocks = <&pll0>; + clock-output-names = "ahb_clk"; + }; + + iomux: iomux@FF10601c { + #address-cells = <1>; + #size-cells = <1>; + compatible = "abilis,tb10x-iomux"; + reg = <0xFF10601c 0x4>; + }; + + intc: interrupt-controller { + compatible = "snps,arc700-intc"; + interrupt-controller; + #interrupt-cells = <1>; + }; + tb10x_ictl: pic@fe002000 { + compatible = "abilis,tb10x_ictl"; + reg = <0xFE002000 0x20>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&intc>; + interrupts = <5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 + 20 21 22 23 24 25 26 27 28 29 30 31>; + }; + + uart@FF100000 { + compatible = "snps,dw-apb-uart", + "abilis,simple-pinctrl"; + reg = <0xFF100000 0x100>; + clock-frequency = <166666666>; + interrupts = <25 1>; + reg-shift = <2>; + reg-io-width = <4>; + interrupt-parent = <&tb10x_ictl>; + }; + ethernet@FE100000 { + compatible = "snps,dwmac-3.70a","snps,dwmac"; + reg = <0xFE100000 0x1058>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <6 1>; + interrupt-names = "macirq"; + clocks = <&ahb_clk>; + clock-names = "stmmaceth"; + }; + dma@FE000000 { + compatible = "snps,dma-spear1340"; + reg = <0xFE000000 0x400>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <14 1>; + dma-channels = <6>; + dma-requests = <0>; + dma-masters = <1>; + #dma-cells = <3>; + chan_allocation_order = <0>; + chan_priority = <1>; + block_size = <0x7ff>; + data_width = <2 0 0 0>; + clocks = <&ahb_clk>; + clock-names = "hclk"; + }; + + i2c0: i2c@FF120000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xFF120000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <12 1>; + clocks = <&ahb_clk>; + }; + i2c1: i2c@FF121000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xFF121000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <12 1>; + clocks = <&ahb_clk>; + }; + i2c2: i2c@FF122000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xFF122000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <12 1>; + clocks = <&ahb_clk>; + }; + i2c3: i2c@FF123000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xFF123000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <12 1>; + clocks = <&ahb_clk>; + }; + i2c4: i2c@FF124000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xFF124000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <12 1>; + clocks = <&ahb_clk>; + }; + + spi0: spi@0xFE010000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "abilis,tb100-spi"; + num-cs = <1>; + reg = <0xFE010000 0x20>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <26 1>; + clocks = <&ahb_clk>; + }; + spi1: spi@0xFE011000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "abilis,tb100-spi", + "abilis,simple-pinctrl"; + num-cs = <2>; + reg = <0xFE011000 0x20>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <10 1>; + clocks = <&ahb_clk>; + }; + + tb10x_tsm: tb10x-tsm@ff316000 { + compatible = "abilis,tb100-tsm"; + reg = <0xff316000 0x400>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <17 1>; + output-clkdiv = <4>; + global-packet-delay = <0x21>; + port-packet-delay = <0>; + }; + tb10x_stream_proc: tb10x-stream-proc { + compatible = "abilis,tb100-streamproc"; + reg = <0xfff00000 0x200>, + <0x000f0000 0x10000>, + <0xfff00200 0x105>, + <0xff10600c 0x1>, + <0xfe001018 0x1>; + reg-names = "mbox", + "sp_iccm", + "mbox_irq", + "cpuctrl", + "a6it_int_force"; + interrupt-parent = <&tb10x_ictl>; + interrupts = <20 1>, <19 1>; + interrupt-names = "cmd_irq", "event_irq"; + }; + tb10x_mdsc0: tb10x-mdscr@FF300000 { + compatible = "abilis,tb100-mdscr"; + reg = <0xFF300000 0x7000>; + tb100-mdscr-manage-tsin; + }; + tb10x_mscr0: tb10x-mdscr@FF307000 { + compatible = "abilis,tb100-mdscr"; + reg = <0xFF307000 0x7000>; + }; + tb10x_scr0: tb10x-mdscr@ff30e000 { + compatible = "abilis,tb100-mdscr"; + reg = <0xFF30e000 0x4000>; + tb100-mdscr-manage-tsin; + }; + tb10x_scr1: tb10x-mdscr@ff312000 { + compatible = "abilis,tb100-mdscr"; + reg = <0xFF312000 0x4000>; + tb100-mdscr-manage-tsin; + }; + tb10x_wfb: tb10x-wfb@ff319000 { + compatible = "abilis,tb100-wfb"; + reg = <0xff319000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <16 1>; + }; + }; +}; diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts new file mode 100644 index 0000000..ea16d78 --- /dev/null +++ b/arch/arc/boot/dts/nsimosci.dts @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +/dts-v1/; + +/include/ "skeleton.dtsi" + +/ { + compatible = "snps,nsimosci"; + clock-frequency = <80000000>; /* 80 MHZ */ + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&intc>; + + chosen { + bootargs = "console=tty0 consoleblank=0"; + }; + + aliases { + serial0 = &uart0; + }; + + memory { + device_type = "memory"; + reg = <0x80000000 0x10000000>; /* 256M */ + }; + + fpga { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + + /* child and parent address space 1:1 mapped */ + ranges; + + intc: interrupt-controller { + compatible = "snps,arc700-intc"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + uart0: serial@c0000000 { + compatible = "snps,dw-apb-uart"; + reg = <0xc0000000 0x2000>; + interrupts = <11>; + #clock-frequency = <80000000>; + clock-frequency = <3686400>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + status = "okay"; + }; + + pgu0: pgu@c9000000 { + compatible = "snps,arcpgufb"; + reg = <0xc9000000 0x400>; + }; + + ps2: ps2@c9001000 { + compatible = "snps,arc_ps2"; + reg = <0xc9000400 0x14>; + interrupts = <13>; + interrupt-names = "arc_ps2_irq"; + }; + + eth0: ethernet@c0003000 { + compatible = "snps,oscilan"; + reg = <0xc0003000 0x44>; + interrupts = <7>, <8>; + interrupt-names = "rx", "tx"; + }; + }; +}; diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig index b869806..95350be 100644 --- a/arch/arc/configs/fpga_defconfig +++ b/arch/arc/configs/fpga_defconfig @@ -9,7 +9,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs" +CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_SLUB_DEBUG is not set @@ -24,6 +24,7 @@ CONFIG_ARC_PLAT_FPGA_LEGACY=y CONFIG_ARC_BOARD_ML509=y # CONFIG_ARC_HAS_RTSC is not set CONFIG_ARC_BUILTIN_DTB_NAME="angel4" +CONFIG_PREEMPT=y # CONFIG_COMPACTION is not set # CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_NET=y diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig new file mode 100644 index 0000000..446c96c --- /dev/null +++ b/arch/arc/configs/nsimosci_defconfig @@ -0,0 +1,75 @@ +CONFIG_CROSS_COMPILE="arc-elf32-" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="ARCLinux" +# CONFIG_SWAP is not set +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="../arc_initramfs" +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_KPROBES=y +CONFIG_MODULES=y +# CONFIG_LBDAF is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARC_PLAT_FPGA_LEGACY=y +CONFIG_ARC_BOARD_ML509=y +# CONFIG_ARC_IDE is not set +# CONFIG_ARCTANGENT_EMAC is not set +# CONFIG_ARC_HAS_RTSC is not set +CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci" +# CONFIG_COMPACTION is not set +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_NET_KEY=y +CONFIG_INET=y +# CONFIG_IPV6 is not set +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set +# CONFIG_BLK_DEV is not set +CONFIG_NETDEVICES=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +# CONFIG_MOUSE_PS2_ALPS is not set +# CONFIG_MOUSE_PS2_LOGIPS2PP is not set +# CONFIG_MOUSE_PS2_SYNAPTICS is not set +# CONFIG_MOUSE_PS2_TRACKPOINT is not set +CONFIG_MOUSE_PS2_TOUCHKIT=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_ARC_PS2=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_ARC=y +CONFIG_SERIAL_ARC_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_FB=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_LOGO=y +# CONFIG_HID is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_TMPFS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_XZ_DEC=y diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig new file mode 100644 index 0000000..4fa5cd9 --- /dev/null +++ b/arch/arc/configs/tb10x_defconfig @@ -0,0 +1,117 @@ +CONFIG_CROSS_COMPILE="arc-elf32-" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="tb10x" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=16 +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="../tb10x-rootfs.cpio" +CONFIG_INITRAMFS_ROOT_UID=2100 +CONFIG_INITRAMFS_ROOT_GID=501 +# CONFIG_RD_GZIP is not set +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_AIO is not set +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLOCK is not set +CONFIG_ARC_PLAT_TB10X=y +CONFIG_ARC_CACHE_LINE_SHIFT=5 +# CONFIG_ARC_HAS_RTSC is not set +CONFIG_ARC_STACK_NONEXEC=y +CONFIG_HZ=250 +CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk" +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_COMPACTION is not set +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_PROC_DEVICETREE=y +CONFIG_NETDEVICES=y +# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_DEBUG_FS=y +CONFIG_STMMAC_DA=y +CONFIG_STMMAC_CHAINED=y +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=1 +CONFIG_SERIAL_8250_RUNTIME_UARTS=1 +CONFIG_SERIAL_8250_DW=y +# CONFIG_HW_RANDOM is not set +CONFIG_I2C=y +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_GPIO_SYSFS=y +# CONFIG_HWMON is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_ONESHOT=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_CPU=y +CONFIG_LEDS_TRIGGER_GPIO=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_LEDS_TRIGGER_TRANSIENT=y +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=y +CONFIG_NET_DMA=y +CONFIG_ASYNC_TX_DMA=y +# CONFIG_IOMMU_SUPPORT is not set +# CONFIG_DNOTIFY is not set +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +# CONFIG_NETWORK_FILESYSTEMS is not set +# CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_STACKOVERFLOW=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index 97ee96f..ee1f6ea 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h @@ -20,12 +20,20 @@ #include +/* + * Semantically we need this because icache doesn't snoop dcache/dma. + * However ARC Cache flush requires paddr as well as vaddr, latter not available + * in the flush_icache_page() API. So we no-op it but do the equivalent work + * in update_mmu_cache() + */ +#define flush_icache_page(vma, page) + void flush_cache_all(void); void flush_icache_range(unsigned long start, unsigned long end); -void flush_icache_page(struct vm_area_struct *vma, struct page *page); -void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr, - int len); +void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); +void __inv_icache_page(unsigned long paddr, unsigned long vaddr); +void __flush_dcache_page(unsigned long paddr); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 @@ -58,7 +66,7 @@ void dma_cache_wback(unsigned long start, unsigned long sz); do { \ memcpy(dst, src, len); \ if (vma->vm_flags & VM_EXEC) \ - flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\ + __sync_icache_dcache((unsigned long)(dst), vaddr, len); \ } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h index 4c588f9..57898a1 100644 --- a/arch/arc/include/asm/irq.h +++ b/arch/arc/include/asm/irq.h @@ -9,7 +9,8 @@ #ifndef __ASM_ARC_IRQ_H #define __ASM_ARC_IRQ_H -#define NR_IRQS 32 +#define NR_CPU_IRQS 32 /* number of interrupt lines of ARC770 CPU */ +#define NR_IRQS 128 /* allow some CPU external IRQ handling */ /* Platform Independent IRQs */ #define TIMER0_IRQ 3 diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h index 4dff5a1..602b097 100644 --- a/arch/arc/include/asm/serial.h +++ b/arch/arc/include/asm/serial.h @@ -22,4 +22,14 @@ #define BASE_BAUD (arc_get_core_freq() / 16) +/* + * This is definitely going to break early 8250 consoles on multi-platform + * images but hey, it won't add any code complexity for a debug feature of + * one broken driver. + */ +#ifdef CONFIG_ARC_PLAT_TB10X +#undef BASE_BAUD +#define BASE_BAUD (arc_get_core_freq() / 16 / 3) +#endif + #endif /* _ASM_ARC_SERIAL_H */ diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h index 3eb2ce0..fe91719 100644 --- a/arch/arc/include/asm/tlb.h +++ b/arch/arc/include/asm/tlb.h @@ -21,20 +21,28 @@ #ifndef __ASSEMBLY__ -#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm) +#define tlb_flush(tlb) \ +do { \ + if (tlb->fullmm) \ + flush_tlb_mm((tlb)->mm); \ +} while (0) /* * This pair is called at time of munmap/exit to flush cache and TLB entries * for mappings being torn down. * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now) * as we don't support aliasing configs in our VIPT D$. - * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well- - * albiet for difft reasons - its better handled by moving to new ASID + * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range * * Note, read http://lkml.org/lkml/2004/1/15/6 */ #define tlb_start_vma(tlb, vma) -#define tlb_end_vma(tlb, vma) + +#define tlb_end_vma(tlb, vma) \ +do { \ + if (!tlb->fullmm) \ + flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ +} while (0) #define __tlb_remove_tlb_entry(tlb, ptep, address) diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c index 0dc148e..7dcda70 100644 --- a/arch/arc/kernel/asm-offsets.c +++ b/arch/arc/kernel/asm-offsets.c @@ -11,9 +11,9 @@ #include #include #include +#include #include #include -#include int main(void) { diff --git a/arch/arc/kernel/clk.c b/arch/arc/kernel/clk.c index 66ce0dc..10c7b0b 100644 --- a/arch/arc/kernel/clk.c +++ b/arch/arc/kernel/clk.c @@ -8,7 +8,7 @@ #include -unsigned long core_freq = 800000000; +unsigned long core_freq = 80000000; /* * As of now we default to device-tree provided clock diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c index d14764a..b8a549c 100644 --- a/arch/arc/kernel/disasm.c +++ b/arch/arc/kernel/disasm.c @@ -12,8 +12,8 @@ #include #include #include +#include #include -#include #if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \ defined(CONFIG_KPROBES) diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 91eeab8..0c6d664 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -393,12 +393,14 @@ ARC_ENTRY EV_TLBProtV #ifdef CONFIG_ARC_MISALIGN_ACCESS SAVE_CALLEE_SAVED_USER mov r3, sp ; callee_regs -#endif bl do_misaligned_access -#ifdef CONFIG_ARC_MISALIGN_ACCESS - DISCARD_CALLEE_SAVED_USER + ; TBD: optimize - do this only if a callee reg was involved + ; either a dst of emulated LD/ST or src with address-writeback + RESTORE_CALLEE_SAVED_USER +#else + bl do_misaligned_error #endif b ret_from_exception diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 551c10d..8115fa5 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include "../../drivers/irqchip/irqchip.h" #include #include #include @@ -26,7 +28,7 @@ * -Disable all IRQs (on CPU side) * -Optionally, setup the High priority Interrupts as Level 2 IRQs */ -void __init arc_init_IRQ(void) +void __cpuinit arc_init_IRQ(void) { int level_mask = 0; @@ -97,15 +99,13 @@ static const struct irq_domain_ops arc_intc_domain_ops = { static struct irq_domain *root_domain; -void __init init_onchip_IRQ(void) +static int __init +init_onchip_IRQ(struct device_node *intc, struct device_node *parent) { - struct device_node *intc = NULL; + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); - intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc"); - if(!intc) - panic("DeviceTree Missing incore intc\n"); - - root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0, + root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0, &arc_intc_domain_ops, NULL); if (!root_domain) @@ -113,8 +113,12 @@ void __init init_onchip_IRQ(void) /* with this we don't need to export root_domain */ irq_set_default_host(root_domain); + + return 0; } +IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ); + /* * Late Interrupt system init called from start_kernel for Boot CPU only * @@ -123,12 +127,13 @@ void __init init_onchip_IRQ(void) */ void __init init_IRQ(void) { - init_onchip_IRQ(); - /* Any external intc can be setup here */ if (machine_desc->init_irq) machine_desc->init_irq(); + /* process the entire interrupt tree in one go */ + irqchip_init(); + #ifdef CONFIG_SMP /* Master CPU can initialize it's side of IPI */ if (machine_desc->init_smp) diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 3bfeacb..5a7b80e 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c index cdd3593..376e046 100644 --- a/arch/arc/kernel/module.c +++ b/arch/arc/kernel/module.c @@ -47,7 +47,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, } } #endif - return 0; + return 0; } void module_arch_cleanup(struct module *mod) @@ -141,5 +141,5 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, mod->arch.unw_info = unw; } #endif - return 0; + return 0; } diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 2d95ac0..b2b3731 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -14,14 +14,13 @@ #include #include #include +#include #include #include #include -#include #include #include #include -#include #include #include #include @@ -32,14 +31,14 @@ int running_on_hw = 1; /* vs. on ISS */ char __initdata command_line[COMMAND_LINE_SIZE]; -struct machine_desc *machine_desc __initdata; +struct machine_desc *machine_desc __cpuinitdata; struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; -void __init read_arc_build_cfg_regs(void) +void __cpuinit read_arc_build_cfg_regs(void) { struct bcr_perip uncached_space; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -238,7 +237,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) return buf; } -void __init arc_chk_ccms(void) +void __cpuinit arc_chk_ccms(void) { #if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM) struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -273,7 +272,7 @@ void __init arc_chk_ccms(void) * hardware has dedicated regs which need to be saved/restored on ctx-sw * (Single Precision uses core regs), thus kernel is kind of oblivious to it */ -void __init arc_chk_fpu(void) +void __cpuinit arc_chk_fpu(void) { struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -294,7 +293,7 @@ void __init arc_chk_fpu(void) * such as only for boot CPU etc */ -void __init setup_processor(void) +void __cpuinit setup_processor(void) { char str[512]; int cpu_id = smp_processor_id(); @@ -319,23 +318,20 @@ void __init setup_processor(void) void __init setup_arch(char **cmdline_p) { + /* This also populates @boot_command_line from /bootargs */ + machine_desc = setup_machine_fdt(__dtb_start); + if (!machine_desc) + panic("Embedded DT invalid\n"); + + /* Append any u-boot provided cmdline */ #ifdef CONFIG_CMDLINE_UBOOT - /* Make sure that a whitespace is inserted before */ - strlcat(command_line, " ", sizeof(command_line)); + /* Add a whitespace seperator between the 2 cmdlines */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, command_line, COMMAND_LINE_SIZE); #endif - /* - * Append .config cmdline to base command line, which might already - * contain u-boot "bootargs" (handled by head.S, if so configured) - */ - strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line)); /* Save unparsed command line copy for /proc/cmdline */ - strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); - *cmdline_p = command_line; - - machine_desc = setup_machine_fdt(__dtb_start); - if (!machine_desc) - panic("Embedded DT invalid\n"); + *cmdline_p = boot_command_line; /* To force early parsing of things like mem=xxx */ parse_early_param(); diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index f13f728..09f4309 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c index 7496995..0471d9c 100644 --- a/arch/arc/kernel/traps.c +++ b/arch/arc/kernel/traps.c @@ -16,11 +16,12 @@ #include #include #include -#include +#include +#include +#include #include -#include #include -#include +#include void __init trap_init(void) { @@ -83,6 +84,7 @@ DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC) DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC) DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR) DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT) +DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN) #ifdef CONFIG_ARC_MISALIGN_ACCESS /* @@ -91,21 +93,11 @@ DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT) int do_misaligned_access(unsigned long cause, unsigned long address, struct pt_regs *regs, struct callee_regs *cregs) { - if (misaligned_fixup(address, regs, cause, cregs) != 0) { - siginfo_t info; - - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void __user *)address; - return handle_exception(cause, "Misaligned Access", regs, - &info); - } + if (misaligned_fixup(address, regs, cause, cregs) != 0) + return do_misaligned_error(cause, address, regs); + return 0; } - -#else -DO_ERROR_INFO(SIGSEGV, "Misaligned Access", do_misaligned_access, SEGV_ACCERR) #endif /* diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 0aec0198..11c301b 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -26,7 +26,6 @@ static noinline void print_reg_file(long *reg_rev, int start_num) char buf[512]; int n = 0, len = sizeof(buf); - /* weird loop because pt_regs regs rev r12..r0, r25..r13 */ for (i = start_num; i < start_num + 13; i++) { n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t", i, (unsigned long)*reg_rev); @@ -34,13 +33,18 @@ static noinline void print_reg_file(long *reg_rev, int start_num) if (((i + 1) % 3) == 0) n += scnprintf(buf + n, len - n, "\n"); + /* because pt_regs has regs reversed: r12..r0, r25..r13 */ reg_rev--; } if (start_num != 0) n += scnprintf(buf + n, len - n, "\n\n"); - pr_info("%s", buf); + /* To continue printing callee regs on same line as scratch regs */ + if (start_num == 0) + pr_info("%s", buf); + else + pr_cont("%s\n", buf); } static void show_callee_regs(struct callee_regs *cregs) @@ -83,6 +87,10 @@ static void show_faulting_vma(unsigned long address, char *buf) dev_t dev = 0; char *nm = buf; + /* can't use print_vma_addr() yet as it doesn't check for + * non-inclusive vma + */ + vma = find_vma(current->active_mm, address); /* check against the find_vma( ) behaviour which returns the next VMA @@ -98,10 +106,13 @@ static void show_faulting_vma(unsigned long address, char *buf) ino = inode->i_ino; } pr_info(" @off 0x%lx in [%s]\n" - " VMA: 0x%08lx to 0x%08lx\n\n", - address - vma->vm_start, nm, vma->vm_start, vma->vm_end); - } else + " VMA: 0x%08lx to 0x%08lx\n", + vma->vm_start < TASK_UNMAPPED_BASE ? + address : address - vma->vm_start, + nm, vma->vm_start, vma->vm_end); + } else { pr_info(" @No matching VMA found\n"); + } } static void show_ecr_verbose(struct pt_regs *regs) @@ -110,7 +121,7 @@ static void show_ecr_verbose(struct pt_regs *regs) unsigned long address; cause_reg = current->thread.cause_code; - pr_info("\n[ECR]: 0x%08x => ", cause_reg); + pr_info("\n[ECR ]: 0x%08x => ", cause_reg); /* For Data fault, this is data address not instruction addr */ address = current->thread.fault_address; @@ -120,7 +131,7 @@ static void show_ecr_verbose(struct pt_regs *regs) /* For DTLB Miss or ProtV, display the memory involved too */ if (vec == ECR_V_DTLB_MISS) { - pr_cont("Invalid (%s) @ 0x%08lx by insn @ 0x%08lx\n", + pr_cont("Invalid %s 0x%08lx by insn @ 0x%08lx\n", (cause_code == 0x01) ? "Read From" : ((cause_code == 0x02) ? "Write to" : "EX"), address, regs->ret); @@ -168,20 +179,23 @@ void show_regs(struct pt_regs *regs) if (current->thread.cause_code) show_ecr_verbose(regs); - pr_info("[EFA]: 0x%08lx\n", current->thread.fault_address); - pr_info("[ERET]: 0x%08lx (PC of Faulting Instr)\n", regs->ret); + pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n", + current->thread.fault_address, + (void *)regs->blink, (void *)regs->ret); - show_faulting_vma(regs->ret, buf); /* faulting code, not data */ + if (user_mode(regs)) + show_faulting_vma(regs->ret, buf); /* faulting code, not data */ - /* can't use print_vma_addr() yet as it doesn't check for - * non-inclusive vma - */ + pr_info("[STAT32]: 0x%08lx", regs->status32); + +#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit : "" + if (!user_mode(regs)) + pr_cont(" : %2s %2s %2s %2s %2s\n", + STS_BIT(regs, AE), STS_BIT(regs, A2), STS_BIT(regs, A1), + STS_BIT(regs, E2), STS_BIT(regs, E1)); - /* print special regs */ - pr_info("status32: 0x%08lx\n", regs->status32); - pr_info(" SP: 0x%08lx\tFP: 0x%08lx\n", regs->sp, regs->fp); - pr_info("BTA: 0x%08lx\tBLINK: 0x%08lx\n", - regs->bta, regs->blink); + pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n", + regs->bta, regs->sp, regs->fp); pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", regs->lp_start, regs->lp_end, regs->lp_count); diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 88d617d..c854cf9 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -72,16 +72,6 @@ #include #include - -#ifdef CONFIG_ARC_HAS_ICACHE -static void __ic_line_inv_no_alias(unsigned long, int); -static void __ic_line_inv_2_alias(unsigned long, int); -static void __ic_line_inv_4_alias(unsigned long, int); - -/* Holds the ptr to flush routine, dependign on size due to aliasing issues */ -static void (*___flush_icache_rtn) (unsigned long, int); -#endif - char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) { int n = 0; @@ -109,7 +99,7 @@ char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) * the cpuinfo structure for later use. * No Validation done here, simply read/convert the BCRs */ -void __init read_decode_cache_bcr(void) +void __cpuinit read_decode_cache_bcr(void) { struct bcr_cache ibcr, dbcr; struct cpuinfo_arc_cache *p_ic, *p_dc; @@ -141,7 +131,7 @@ void __init read_decode_cache_bcr(void) * 3. Enable the Caches, setup default flush mode for D-Cache * 3. Calculate the SHMLBA used by user space */ -void __init arc_cache_init(void) +void __cpuinit arc_cache_init(void) { unsigned int temp; unsigned int cpu = smp_processor_id(); @@ -171,30 +161,6 @@ void __init arc_cache_init(void) } #endif - - /* - * if Cache way size is <= page size then no aliasing exhibited - * otherwise ratio determines num of aliases. - * e.g. 32K I$, 2 way set assoc, 8k pg size - * way-sz = 32k/2 = 16k - * way-pg-ratio = 16k/8k = 2, so 2 aliases possible - * (meaning 1 line could be in 2 possible locations). - */ - way_pg_ratio = ic->sz / ARC_ICACHE_WAYS / PAGE_SIZE; - switch (way_pg_ratio) { - case 0: - case 1: - ___flush_icache_rtn = __ic_line_inv_no_alias; - break; - case 2: - ___flush_icache_rtn = __ic_line_inv_2_alias; - break; - case 4: - ___flush_icache_rtn = __ic_line_inv_4_alias; - break; - default: - panic("Unsupported I-Cache Sz\n"); - } #endif /* Enable/disable I-Cache */ @@ -391,75 +357,38 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz, /* * I-Cache Aliasing in ARC700 VIPT caches * - * For fetching code from I$, ARC700 uses vaddr (embedded in program code) - * to "index" into SET of cache-line and paddr from MMU to match the TAG - * in the WAYS of SET. + * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag. + * The orig Cache Management Module "CDU" only required paddr to invalidate a + * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry. + * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching + * the exact same line. * - * However the CDU iterface (to flush/inv) lines from software, only takes - * paddr (to have simpler hardware interface). For simpler cases, using paddr - * alone suffices. - * e.g. 2-way-set-assoc, 16K I$ (8k MMU pg sz, 32b cache line size): - * way_sz = cache_sz / num_ways = 16k/2 = 8k - * num_sets = way_sz / line_sz = 8k/32 = 256 => 8 bits - * Ignoring the bottom 5 bits corresp to the off within a 32b cacheline, - * bits req for calc set-index = bits 12:5 (0 based). Since this range fits - * inside the bottom 13 bits of paddr, which are same for vaddr and paddr - * (with 8k pg sz), paddr alone can be safely used by CDU to unambigously - * locate a cache-line. - * - * However for a difft sized cache, say 32k I$, above math yields need - * for 14 bits of vaddr to locate a cache line, which can't be provided by - * paddr, since the bit 13 (0 based) might differ between the two. - * - * This lack of extra bits needed for correct line addressing, defines the - * classical problem of Cache aliasing with VIPT architectures - * num_aliases = 1 << extra_bits - * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz => 2 aliases - * 2-way-set-assoc, 64K I$ with 8k MMU pg sz => 4 aliases - * 2-way-set-assoc, 16K I$ with 8k MMU pg sz => NO aliases + * However for larger Caches (way-size > page-size) - i.e. in Aliasing config, + * paddr alone could not be used to correctly index the cache. * * ------------------ * MMU v1/v2 (Fixed Page Size 8k) * ------------------ * The solution was to provide CDU with these additonal vaddr bits. These - * would be bits [x:13], x would depend on cache-geom. + * would be bits [x:13], x would depend on cache-geometry, 13 comes from + * standard page size of 8k. * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the * orig 5 bits of paddr were anyways ignored by CDU line ops, as they * represent the offset within cache-line. The adv of using this "clumsy" - * interface for additional info was no new reg was needed in CDU. + * interface for additional info was no new reg was needed in CDU programming + * model. * * 17:13 represented the max num of bits passable, actual bits needed were * fewer, based on the num-of-aliases possible. * -for 2 alias possibility, only bit 13 needed (32K cache) * -for 4 alias possibility, bits 14:13 needed (64K cache) * - * Since vaddr was not available for all instances of I$ flush req by core - * kernel, the only safe way (non-optimal though) was to kill all possible - * lines which could represent an alias (even if they didnt represent one - * in execution). - * e.g. for 64K I$, 4 aliases possible, so we did - * flush start - * flush start | 0x01 - * flush start | 0x2 - * flush start | 0x3 - * - * The penalty was invoking the operation itself, since tag match is anyways - * paddr based, a line which didn't represent an alias would not match the - * paddr, hence wont be killed - * - * Note that aliasing concerns are independent of line-sz for a given cache - * geometry (size + set_assoc) because the extra bits required by line-sz are - * reduced from the set calc. - * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz and using math above - * 32b line-sz: 9 bits set-index-calc, 5 bits offset-in-line => 1 extra bit - * 64b line-sz: 8 bits set-index-calc, 6 bits offset-in-line => 1 extra bit - * * ------------------ * MMU v3 * ------------------ - * This ver of MMU supports var page sizes (1k-16k) - Linux will support - * 8k (default), 16k and 4k. + * This ver of MMU supports variable page sizes (1k-16k): although Linux will + * only support 8k (default), 16k and 4k. * However from hardware perspective, smaller page sizes aggrevate aliasing * meaning more vaddr bits needed to disambiguate the cache-line-op ; * the existing scheme of piggybacking won't work for certain configurations. @@ -468,115 +397,29 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz, */ /*********************************************************** - * Machine specific helpers for per line I-Cache invalidate. - * 3 routines to accpunt for 1, 2, 4 aliases possible + * Machine specific helper for per line I-Cache invalidate. */ - -static void __ic_line_inv_no_alias(unsigned long start, int num_lines) -{ - while (num_lines-- > 0) { -#if (CONFIG_ARC_MMU_VER > 2) - write_aux_reg(ARC_REG_IC_PTAG, start); -#endif - write_aux_reg(ARC_REG_IC_IVIL, start); - start += ARC_ICACHE_LINE_LEN; - } -} - -static void __ic_line_inv_2_alias(unsigned long start, int num_lines) -{ - while (num_lines-- > 0) { - -#if (CONFIG_ARC_MMU_VER > 2) - /* - * MMU v3, CDU prog model (for line ops) now uses a new IC_PTAG - * reg to pass the "tag" bits and existing IVIL reg only looks - * at bits relevant for "index" (details above) - * Programming Notes: - * -when writing tag to PTAG reg, bit chopping can be avoided, - * CDU ignores non-tag bits. - * -Ideally "index" must be computed from vaddr, but it is not - * avail in these rtns. So to be safe, we kill the lines in all - * possible indexes corresp to num of aliases possible for - * given cache config. - */ - write_aux_reg(ARC_REG_IC_PTAG, start); - write_aux_reg(ARC_REG_IC_IVIL, - start & ~(0x1 << PAGE_SHIFT)); - write_aux_reg(ARC_REG_IC_IVIL, start | (0x1 << PAGE_SHIFT)); -#else - write_aux_reg(ARC_REG_IC_IVIL, start); - write_aux_reg(ARC_REG_IC_IVIL, start | 0x01); -#endif - start += ARC_ICACHE_LINE_LEN; - } -} - -static void __ic_line_inv_4_alias(unsigned long start, int num_lines) -{ - while (num_lines-- > 0) { - -#if (CONFIG_ARC_MMU_VER > 2) - write_aux_reg(ARC_REG_IC_PTAG, start); - - write_aux_reg(ARC_REG_IC_IVIL, - start & ~(0x3 << PAGE_SHIFT)); - write_aux_reg(ARC_REG_IC_IVIL, - start & ~(0x2 << PAGE_SHIFT)); - write_aux_reg(ARC_REG_IC_IVIL, - start & ~(0x1 << PAGE_SHIFT)); - write_aux_reg(ARC_REG_IC_IVIL, start | (0x3 << PAGE_SHIFT)); -#else - write_aux_reg(ARC_REG_IC_IVIL, start); - write_aux_reg(ARC_REG_IC_IVIL, start | 0x01); - write_aux_reg(ARC_REG_IC_IVIL, start | 0x02); - write_aux_reg(ARC_REG_IC_IVIL, start | 0x03); -#endif - start += ARC_ICACHE_LINE_LEN; - } -} - -static void __ic_line_inv(unsigned long start, unsigned long sz) +static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr, + unsigned long sz) { unsigned long flags; int num_lines, slack; + unsigned int addr; /* - * Ensure we properly floor/ceil the non-line aligned/sized requests - * and have @start - aligned to cache line, and integral @num_lines + * Ensure we properly floor/ceil the non-line aligned/sized requests: * However page sized flushes can be compile time optimised. - * -@start will be cache-line aligned already (being page aligned) + * -@phy_start will be cache-line aligned already (being page aligned) * -@sz will be integral multiple of line size (being page sized). */ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { - slack = start & ~ICACHE_LINE_MASK; + slack = phy_start & ~ICACHE_LINE_MASK; sz += slack; - start -= slack; + phy_start -= slack; } num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); - local_irq_save(flags); - (*___flush_icache_rtn) (start, num_lines); - local_irq_restore(flags); -} - -/* Unlike routines above, having vaddr for flush op (along with paddr), - * prevents the need to speculatively kill the lines in multiple sets - * based on ratio of way_sz : pg_sz - */ -static void __ic_line_inv_vaddr(unsigned long phy_start, - unsigned long vaddr, unsigned long sz) -{ - unsigned long flags; - int num_lines, slack; - unsigned int addr; - - slack = phy_start & ~ICACHE_LINE_MASK; - sz += slack; - phy_start -= slack; - num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); - #if (CONFIG_ARC_MMU_VER > 2) vaddr &= ~ICACHE_LINE_MASK; addr = phy_start; @@ -595,7 +438,7 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, write_aux_reg(ARC_REG_IC_IVIL, vaddr); vaddr += ARC_ICACHE_LINE_LEN; #else - /* this paddr contains vaddrs bits as needed */ + /* paddr contains stuffed vaddrs bits */ write_aux_reg(ARC_REG_IC_IVIL, addr); #endif addr += ARC_ICACHE_LINE_LEN; @@ -605,7 +448,6 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, #else -#define __ic_line_inv(start, sz) #define __ic_line_inv_vaddr(pstart, vstart, sz) #endif /* CONFIG_ARC_HAS_ICACHE */ @@ -615,10 +457,10 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, * Exported APIs */ -/* TBD: use pg_arch_1 to optimize this */ void flush_dcache_page(struct page *page) { - __dc_line_op((unsigned long)page_address(page), PAGE_SIZE, OP_FLUSH); + /* Make a note that dcache is not yet flushed for this page */ + set_bit(PG_arch_1, &page->flags); } EXPORT_SYMBOL(flush_dcache_page); @@ -642,8 +484,8 @@ void dma_cache_wback(unsigned long start, unsigned long sz) EXPORT_SYMBOL(dma_cache_wback); /* - * This is API for making I/D Caches consistent when modifying code - * (loadable modules, kprobes, etc) + * This is API for making I/D Caches consistent when modifying + * kernel code (loadable modules, kprobes, kgdb...) * This is called on insmod, with kernel virtual address for CODE of * the module. ARC cache maintenance ops require PHY address thus we * need to convert vmalloc addr to PHY addr @@ -652,7 +494,6 @@ void flush_icache_range(unsigned long kstart, unsigned long kend) { unsigned int tot_sz, off, sz; unsigned long phy, pfn; - unsigned long flags; /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */ @@ -673,8 +514,13 @@ void flush_icache_range(unsigned long kstart, unsigned long kend) /* Case: Kernel Phy addr (0x8000_0000 onwards) */ if (likely(kstart > PAGE_OFFSET)) { - __ic_line_inv(kstart, kend - kstart); - __dc_line_op(kstart, kend - kstart, OP_FLUSH); + /* + * The 2nd arg despite being paddr will be used to index icache + * This is OK since no alternate virtual mappings will exist + * given the callers for this case: kprobe/kgdb in built-in + * kernel code only. + */ + __sync_icache_dcache(kstart, kstart, kend - kstart); return; } @@ -692,42 +538,41 @@ void flush_icache_range(unsigned long kstart, unsigned long kend) pfn = vmalloc_to_pfn((void *)kstart); phy = (pfn << PAGE_SHIFT) + off; sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); - local_irq_save(flags); - __dc_line_op(phy, sz, OP_FLUSH); - __ic_line_inv(phy, sz); - local_irq_restore(flags); + __sync_icache_dcache(phy, kstart, sz); kstart += sz; tot_sz -= sz; } } /* - * Optimised ver of flush_icache_range() with spec callers: ptrace/signals - * where vaddr is also available. This allows passing both vaddr and paddr - * bits to CDU for cache flush, short-circuting the current pessimistic algo - * which kills all possible aliases. - * An added adv of knowing that vaddr is user-vaddr avoids various checks - * and handling for k-vaddr, k-paddr as done in orig ver above + * General purpose helper to make I and D cache lines consistent. + * @paddr is phy addr of region + * @vaddr is typically user or kernel vaddr (vmalloc) + * Howver in one instance, flush_icache_range() by kprobe (for a breakpt in + * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will + * use a paddr to index the cache (despite VIPT). This is fine since since a + * built-in kernel page will not have any virtual mappings (not even kernel) + * kprobe on loadable module is different as it will have kvaddr. */ -void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr, - int len) +void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) { - __ic_line_inv_vaddr(paddr, u_vaddr, len); + unsigned long flags; + + local_irq_save(flags); + __ic_line_inv_vaddr(paddr, vaddr, len); __dc_line_op(paddr, len, OP_FLUSH); + local_irq_restore(flags); } -/* - * XXX: This also needs to be optim using pg_arch_1 - * This is called when a page-cache page is about to be mapped into a - * user process' address space. It offers an opportunity for a - * port to ensure d-cache/i-cache coherency if necessary. - */ -void flush_icache_page(struct vm_area_struct *vma, struct page *page) +/* wrapper to compile time eliminate alignment checks in flush loop */ +void __inv_icache_page(unsigned long paddr, unsigned long vaddr) { - if (!(vma->vm_flags & VM_EXEC)) - return; + __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); +} - __ic_line_inv((unsigned long)page_address(page), PAGE_SIZE); +void __flush_dcache_page(unsigned long paddr) +{ + __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV); } void flush_icache_all(void) diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c index 014172b..aa652e2 100644 --- a/arch/arc/mm/extable.c +++ b/arch/arc/mm/extable.c @@ -27,7 +27,7 @@ int fixup_exception(struct pt_regs *regs) #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -long arc_copy_from_user_noinline(void *to, const void __user * from, +long arc_copy_from_user_noinline(void *to, const void __user *from, unsigned long n) { return __arc_copy_from_user(to, from, n); @@ -48,7 +48,7 @@ unsigned long arc_clear_user_noinline(void __user *to, } EXPORT_SYMBOL(arc_clear_user_noinline); -long arc_strncpy_from_user_noinline (char *dst, const char __user *src, +long arc_strncpy_from_user_noinline(char *dst, const char __user *src, long count) { return __arc_strncpy_from_user(dst, src, count); diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index af55aab..689ffd8 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 727d479..4a17736 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -10,9 +10,6 @@ #include #include #include -#ifdef CONFIG_BLOCK_DEV_RAM -#include -#endif #include #include #include diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c index 3e5c92c..739e65f 100644 --- a/arch/arc/mm/ioremap.c +++ b/arch/arc/mm/ioremap.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include void __iomem *ioremap(unsigned long paddr, unsigned long size) { diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 9b9ce23..003d69a 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -418,23 +418,37 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) local_irq_restore(flags); } -/* arch hook called by core VM at the end of handle_mm_fault( ), - * when a new PTE is entered in Page Tables or an existing one - * is modified. We aggresively pre-install a TLB entry +/* + * Called at the end of pagefault, for a userspace mapped page + * -pre-install the corresponding TLB entry into MMU + * -Finalize the delayed D-cache flush (wback+inv kernel mapping) */ - -void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress, +void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, pte_t *ptep) { + unsigned long vaddr = vaddr_unaligned & PAGE_MASK; + + create_tlb(vma, vaddr, ptep); - create_tlb(vma, vaddress, ptep); + /* icache doesn't snoop dcache, thus needs to be made coherent here */ + if (vma->vm_flags & VM_EXEC) { + struct page *page = pfn_to_page(pte_pfn(*ptep)); + + /* if page was dcache dirty, flush now */ + int dirty = test_and_clear_bit(PG_arch_1, &page->flags); + if (dirty) { + unsigned long paddr = pte_val(*ptep) & PAGE_MASK; + __flush_dcache_page(paddr); + __inv_icache_page(paddr, vaddr); + } + } } /* Read the Cache Build Confuration Registers, Decode them and save into * the cpuinfo structure for later use. * No Validation is done here, simply read/convert the BCRs */ -void __init read_decode_mmu_bcr(void) +void __cpuinit read_decode_mmu_bcr(void) { unsigned int tmp; struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */ @@ -466,7 +480,7 @@ void __init read_decode_mmu_bcr(void) char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) { int n = 0; - struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[smp_processor_id()].mmu; + struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ", p_mmu->ver, TO_KB(p_mmu->pg_sz)); @@ -480,7 +494,7 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) return buf; } -void __init arc_mmu_init(void) +void __cpuinit arc_mmu_init(void) { char str[256]; struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c index 4e20a1a..b3700c0 100644 --- a/arch/arc/plat-arcfpga/platform.c +++ b/arch/arc/plat-arcfpga/platform.c @@ -224,3 +224,15 @@ MACHINE_START(ML509, "ml509") .init_smp = iss_model_init_smp, #endif MACHINE_END + +static const char *nsimosci_compat[] __initdata = { + "snps,nsimosci", + NULL, +}; + +MACHINE_START(NSIMOSCI, "nsimosci") + .dt_compat = nsimosci_compat, + .init_early = NULL, + .init_machine = plat_fpga_populate_dev, + .init_irq = NULL, +MACHINE_END diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig new file mode 100644 index 0000000..4e12127 --- /dev/null +++ b/arch/arc/plat-tb10x/Kconfig @@ -0,0 +1,36 @@ +# Abilis Systems TB10x platform kernel configuration file +# +# Author: Christian Ruppert +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +menuconfig ARC_PLAT_TB10X + bool "Abilis TB10x" + select COMMON_CLK + select PINCTRL + select PINMUX + select ARCH_REQUIRE_GPIOLIB + help + Support for platforms based on the TB10x home media gateway SOC by + Abilis Systems. TB10x is based on the ARC700 CPU architecture. + Say Y if you are building a kernel for one of the SOCs in this + series (e.g. TB100 or TB101). If in doubt say N. + +if ARC_PLAT_TB10X + +config GENERIC_GPIO + def_bool y + +endif diff --git a/arch/arc/plat-tb10x/Makefile b/arch/arc/plat-tb10x/Makefile new file mode 100644 index 0000000..89611d2 --- /dev/null +++ b/arch/arc/plat-tb10x/Makefile @@ -0,0 +1,21 @@ +# Abilis Systems TB10x platform Makefile +# +# Author: Christian Ruppert +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +KBUILD_CFLAGS += -Iarch/arc/plat-tb10x/include + +obj-y += tb10x.o diff --git a/arch/arc/plat-tb10x/tb10x.c b/arch/arc/plat-tb10x/tb10x.c new file mode 100644 index 0000000..d356769 --- /dev/null +++ b/arch/arc/plat-tb10x/tb10x.c @@ -0,0 +1,71 @@ +/* + * Abilis Systems TB10x platform initialisation + * + * Copyright (C) Abilis Systems 2012 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#include +#include +#include +#include + +#include + + +static void __init tb10x_platform_init(void) +{ + of_clk_init(NULL); + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); +} + +static void __init tb10x_platform_late_init(void) +{ + struct device_node *dn; + + /* + * Pinctrl documentation recommends setting up the iomux here for + * all modules which don't require control over the pins themselves. + * Modules which need this kind of assistance are compatible with + * "abilis,simple-pinctrl", i.e. we can easily iterate over them. + * TODO: Does this recommended method work cleanly with pins required + * by modules? + */ + for_each_compatible_node(dn, NULL, "abilis,simple-pinctrl") { + struct platform_device *pd = of_find_device_by_node(dn); + struct pinctrl *pctl; + + pctl = pinctrl_get_select(&pd->dev, "abilis,simple-default"); + if (IS_ERR(pctl)) { + int ret = PTR_ERR(pctl); + dev_err(&pd->dev, "Could not set up pinctrl: %d\n", + ret); + } + } +} + +static const char *tb10x_compat[] __initdata = { + "abilis,arc-tb10x", + NULL, +}; + +MACHINE_START(TB10x, "tb10x") + .dt_compat = tb10x_compat, + .init_machine = tb10x_platform_init, + .init_late = tb10x_platform_late_init, +MACHINE_END diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index aa71a23..d423d58 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -109,9 +109,6 @@ config MIGHT_HAVE_PCI config SYS_SUPPORTS_APM_EMULATION bool -config GENERIC_GPIO - bool - config HAVE_TCM bool select GENERIC_ALLOCATOR @@ -900,7 +897,6 @@ config ARCH_MULTI_V7 bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)" default y select ARCH_MULTI_V6_V7 - select ARCH_VEXPRESS select CPU_V7 config ARCH_MULTI_V6_V7 diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index d110110..1460d9b 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -403,5 +403,17 @@ 0x44d80000 0x2000>; /* M3 DMEM */ ti,hwmods = "wkup_m3"; }; + + gpmc: gpmc@50000000 { + compatible = "ti,am3352-gpmc"; + ti,hwmods = "gpmc"; + reg = <0x50000000 0x2000>; + interrupts = <100>; + num-cs = <7>; + num-waitpins = <2>; + #address-cells = <2>; + #size-cells = <1>; + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi index 0a61bbb..3f0239e 100644 --- a/arch/arm/boot/dts/cros5250-common.dtsi +++ b/arch/arm/boot/dts/cros5250-common.dtsi @@ -175,6 +175,14 @@ i2c@12C70000 { samsung,i2c-sda-delay = <100>; samsung,i2c-max-bus-freq = <378000>; + + trackpad { + reg = <0x67>; + compatible = "cypress,cyapa"; + interrupts = <2 0>; + interrupt-parent = <&gpx1>; + wakeup-source; + }; }; i2c@12C80000 { diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 26d856b..3e0c792 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -214,7 +214,7 @@ }; usb@12110000 { - samsung,vbus-gpio = <&gpx2 6 1 3 3>; + samsung,vbus-gpio = <&gpx2 6 0>; }; dp-controller { diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts index bf4744b..d449feb 100644 --- a/arch/arm/boot/dts/exynos5250-snow.dts +++ b/arch/arm/boot/dts/exynos5250-snow.dts @@ -183,7 +183,7 @@ }; usb@12110000 { - samsung,vbus-gpio = <&gpx1 1 1 3 3>; + samsung,vbus-gpio = <&gpx1 1 0>; }; fixed-rate-clocks { diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts index 5a31964..3046d1f 100644 --- a/arch/arm/boot/dts/omap3-beagle-xm.dts +++ b/arch/arm/boot/dts/omap3-beagle-xm.dts @@ -122,6 +122,7 @@ &usb_otg_hs { interface-type = <0>; + usb-phy = <&usb2_phy>; mode = <3>; power = <50>; }; diff --git a/arch/arm/boot/dts/omap3-evm.dts b/arch/arm/boot/dts/omap3-evm.dts index 05f51e1..96d1c20 100644 --- a/arch/arm/boot/dts/omap3-evm.dts +++ b/arch/arm/boot/dts/omap3-evm.dts @@ -68,6 +68,7 @@ &usb_otg_hs { interface-type = <0>; + usb-phy = <&usb2_phy>; mode = <3>; power = <50>; }; diff --git a/arch/arm/boot/dts/omap3-overo.dtsi b/arch/arm/boot/dts/omap3-overo.dtsi index d4a7280..a626c50 100644 --- a/arch/arm/boot/dts/omap3-overo.dtsi +++ b/arch/arm/boot/dts/omap3-overo.dtsi @@ -73,6 +73,7 @@ &usb_otg_hs { interface-type = <0>; + usb-phy = <&usb2_phy>; mode = <3>; power = <50>; }; diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 4ad03d9..82a404d 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -519,7 +519,6 @@ interrupts = <0 92 0x4>, <0 93 0x4>; interrupt-names = "mc", "dma"; ti,hwmods = "usb_otg_hs"; - usb-phy = <&usb2_phy>; multipoint = <1>; num-eps = <16>; ram-bits = <12>; diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi index b89233e..f3447bc 100644 --- a/arch/arm/boot/dts/omap36xx.dtsi +++ b/arch/arm/boot/dts/omap36xx.dtsi @@ -20,9 +20,9 @@ cpu@0 { operating-points = < /* kHz uV */ - 300000 975000 - 600000 1075000 - 800000 1200000 + 300000 1012500 + 600000 1200000 + 800000 1325000 >; clock-latency = <300000>; /* From legacy driver */ }; diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts index c387bdc..a35d9cd 100644 --- a/arch/arm/boot/dts/omap4-sdp.dts +++ b/arch/arm/boot/dts/omap4-sdp.dts @@ -223,6 +223,15 @@ >; }; + mcspi1_pins: pinmux_mcspi1_pins { + pinctrl-single,pins = < + 0xf2 0x100 /* mcspi1_clk.mcspi1_clk INPUT | MODE0 */ + 0xf4 0x100 /* mcspi1_somi.mcspi1_somi INPUT | MODE0 */ + 0xf6 0x100 /* mcspi1_simo.mcspi1_simo INPUT | MODE0 */ + 0xf8 0x100 /* mcspi1_cs0.mcspi1_cs0 INPUT | MODE0*/ + >; + }; + dss_hdmi_pins: pinmux_dss_hdmi_pins { pinctrl-single,pins = < 0x5a 0x118 /* hdmi_cec.hdmi_cec INPUT PULLUP | MODE 0 */ @@ -358,12 +367,15 @@ }; &mcspi1 { + pinctrl-names = "default"; + pinctrl-0 = <&mcspi1_pins>; + eth@0 { compatible = "ks8851"; spi-max-frequency = <24000000>; reg = <0>; interrupt-parent = <&gpio2>; - interrupts = <2>; /* gpio line 34 */ + interrupts = <2 8>; /* gpio line 34, low triggered */ vdd-supply = <&vdd_eth>; }; }; diff --git a/arch/arm/boot/dts/omap4-var-som.dts b/arch/arm/boot/dts/omap4-var-som.dts index 222a413..7e04103 100644 --- a/arch/arm/boot/dts/omap4-var-som.dts +++ b/arch/arm/boot/dts/omap4-var-som.dts @@ -68,7 +68,7 @@ spi-max-frequency = <24000000>; reg = <0>; interrupt-parent = <&gpio6>; - interrupts = <11>; /* gpio line 171 */ + interrupts = <11 8>; /* gpio line 171, low triggered */ vdd-supply = <&vdd_eth>; }; }; diff --git a/arch/arm/boot/dts/omap4460.dtsi b/arch/arm/boot/dts/omap4460.dtsi index 7c2c23c..2cf227c 100644 --- a/arch/arm/boot/dts/omap4460.dtsi +++ b/arch/arm/boot/dts/omap4460.dtsi @@ -15,9 +15,9 @@ cpu@0 { operating-points = < /* kHz uV */ - 350000 975000 - 700000 1075000 - 920000 1200000 + 350000 1025000 + 700000 1200000 + 920000 1313000 >; clock-latency = <300000>; /* From legacy driver */ }; diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 33903ca..c1ef64b 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -137,6 +137,8 @@ CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_OMAP=y +CONFIG_SERIAL_OMAP_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_I2C_CHARDEV=y CONFIG_SPI=y @@ -153,6 +155,7 @@ CONFIG_OMAP_WATCHDOG=y CONFIG_TWL4030_WATCHDOG=y CONFIG_MFD_TPS65217=y CONFIG_MFD_TPS65910=y +CONFIG_TWL6040_CORE=y CONFIG_REGULATOR_TWL4030=y CONFIG_REGULATOR_TPS65023=y CONFIG_REGULATOR_TPS6507X=y @@ -195,6 +198,7 @@ CONFIG_SND_USB_AUDIO=m CONFIG_SND_SOC=m CONFIG_SND_OMAP_SOC=m CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m +CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m CONFIG_USB=y CONFIG_USB_DEBUG=y diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 70f1bde..5af04f6 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -180,6 +180,13 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) unsigned long dt_root; const char *model; +#ifdef CONFIG_ARCH_MULTIPLATFORM + DT_MACHINE_START(GENERIC_DT, "Generic DT based system") + MACHINE_END + + mdesc_best = (struct machine_desc *)&__mach_desc_GENERIC_DT; +#endif + if (!dt_phys) return NULL; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 728007c..1522c7a 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -659,9 +660,19 @@ struct screen_info screen_info = { static int __init customize_machine(void) { - /* customizes platform devices, or adds new ones */ + /* + * customizes platform devices, or adds new ones + * On DT based machines, we fall back to populating the + * machine from the device tree, if no callback is provided, + * otherwise we would always need an init_machine callback. + */ if (machine_desc->init_machine) machine_desc->init_machine(); +#ifdef CONFIG_OF + else + of_platform_populate(NULL, of_default_bus_match_table, + NULL, NULL); +#endif return 0; } arch_initcall(customize_machine); diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h index 3f30aa1..57344b7 100644 --- a/arch/arm/mach-exynos/include/mach/regs-pmu.h +++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h @@ -344,6 +344,7 @@ #define EXYNOS5_FSYS_ARM_OPTION S5P_PMUREG(0x2208) #define EXYNOS5_ISP_ARM_OPTION S5P_PMUREG(0x2288) #define EXYNOS5_ARM_COMMON_OPTION S5P_PMUREG(0x2408) +#define EXYNOS5_ARM_L2_OPTION S5P_PMUREG(0x2608) #define EXYNOS5_TOP_PWR_OPTION S5P_PMUREG(0x2C48) #define EXYNOS5_TOP_PWR_SYSMEM_OPTION S5P_PMUREG(0x2CC8) #define EXYNOS5_JPEG_MEM_OPTION S5P_PMUREG(0x2F48) diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c index daebc1a..97d6885 100644 --- a/arch/arm/mach-exynos/pmu.c +++ b/arch/arm/mach-exynos/pmu.c @@ -228,6 +228,7 @@ static struct exynos_pmu_conf exynos5250_pmu_config[] = { { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} }, { EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} }, { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} }, + { EXYNOS5_ARM_L2_OPTION, { 0x10, 0x10, 0x0 } }, { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} }, { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} }, { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} }, @@ -353,11 +354,9 @@ static void exynos5_init_pmu(void) /* * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable - * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable */ tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION); - tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL | - EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN); + tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN; __raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION); /* diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 78f795d..ba44328 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig @@ -5,6 +5,7 @@ config ARCH_MXC select AUTO_ZRELADDR if !ZBOOT_ROM select CLKDEV_LOOKUP select CLKSRC_MMIO + select GENERIC_ALLOCATOR select GENERIC_CLOCKEVENTS select GENERIC_IRQ_CHIP select MULTI_IRQ_HANDLER @@ -61,10 +62,6 @@ config MXC_ULPI config ARCH_HAS_RNGA bool -config IRAM_ALLOC - bool - select GENERIC_ALLOCATOR - config HAVE_IMX_ANATOP bool diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 9309589..70ae7c4 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o obj-$(CONFIG_MXC_TZIC) += tzic.o obj-$(CONFIG_MXC_AVIC) += avic.o -obj-$(CONFIG_IRAM_ALLOC) += iram_alloc.o obj-$(CONFIG_MXC_ULPI) += ulpi.o obj-$(CONFIG_MXC_USE_EPIT) += epit.o obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 4cba7db..c08ae3f 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -12,6 +12,7 @@ #define __ASM_ARCH_MXC_COMMON_H__ struct platform_device; +struct pt_regs; struct clk; enum mxc_cpu_pwr_mode; diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S index a58c8b0..67b9c48 100644 --- a/arch/arm/mach-imx/headsmp.S +++ b/arch/arm/mach-imx/headsmp.S @@ -24,7 +24,7 @@ ENTRY(v7_secondary_startup) ENDPROC(v7_secondary_startup) #endif -#ifdef CONFIG_PM +#ifdef CONFIG_ARM_CPU_SUSPEND /* * The following code must assume it is running from physical address * where absolute virtual addresses to the data section have to be diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 5e91112..3daf1ed 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c @@ -11,7 +11,9 @@ */ #include +#include #include +#include #include "common.h" diff --git a/arch/arm/mach-imx/iram_alloc.c b/arch/arm/mach-imx/iram_alloc.c deleted file mode 100644 index e05cf40..0000000 --- a/arch/arm/mach-imx/iram_alloc.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include "linux/platform_data/imx-iram.h" - -static unsigned long iram_phys_base; -static void __iomem *iram_virt_base; -static struct gen_pool *iram_pool; - -static inline void __iomem *iram_phys_to_virt(unsigned long p) -{ - return iram_virt_base + (p - iram_phys_base); -} - -void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr) -{ - if (!iram_pool) - return NULL; - - *dma_addr = gen_pool_alloc(iram_pool, size); - pr_debug("iram alloc - %dB@0x%lX\n", size, *dma_addr); - if (!*dma_addr) - return NULL; - return iram_phys_to_virt(*dma_addr); -} -EXPORT_SYMBOL(iram_alloc); - -void iram_free(unsigned long addr, unsigned int size) -{ - if (!iram_pool) - return; - - gen_pool_free(iram_pool, addr, size); -} -EXPORT_SYMBOL(iram_free); - -int __init iram_init(unsigned long base, unsigned long size) -{ - iram_phys_base = base; - - iram_pool = gen_pool_create(PAGE_SHIFT, -1); - if (!iram_pool) - return -ENOMEM; - - gen_pool_add(iram_pool, base, size, -1); - iram_virt_base = ioremap(iram_phys_base, size); - if (!iram_virt_base) - return -EIO; - - pr_debug("i.MX IRAM pool: %ld KB@0x%p\n", size / 1024, iram_virt_base); - return 0; -} diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c index 1a4e887..68ab858 100644 --- a/arch/arm/mach-omap1/dma.c +++ b/arch/arm/mach-omap1/dma.c @@ -301,7 +301,7 @@ static int __init omap1_system_dma_init(void) if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); - goto exit_device_put; + goto exit_iounmap; } p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); @@ -309,7 +309,7 @@ static int __init omap1_system_dma_init(void) dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", __func__, pdev->name); ret = -ENOMEM; - goto exit_device_del; + goto exit_iounmap; } d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); @@ -402,8 +402,8 @@ exit_release_d: kfree(d); exit_release_p: kfree(p); -exit_device_del: - platform_device_del(pdev); +exit_iounmap: + iounmap(dma_base); exit_device_put: platform_device_put(pdev); diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 857b1f0..f49cd51 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -37,8 +37,6 @@ config ARCH_OMAP2PLUS_TYPICAL select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5 select PM_RUNTIME select REGULATOR - select SERIAL_OMAP - select SERIAL_OMAP_CONSOLE select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 select VFP diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 62bb352..55a9d67 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -32,12 +32,12 @@ obj-$(CONFIG_SOC_HAS_OMAP2_SDRC) += sdrc.o # SMP support ONLY available for OMAP4 -obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o -obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o +smp-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o +smp-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o omap-4-5-common = omap4-common.o omap-wakeupgen.o \ sleep44xx.o -obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-common) -obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-common) +obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-common) $(smp-y) +obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-common) $(smp-y) plus_sec := $(call as-instr,.arch_extension sec,+sec) AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec) diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 6de7860..04c1165 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -112,13 +112,13 @@ static u8 omap3_beagle_version; */ static struct { int mmc1_gpio_wp; - int usb_pwr_level; + bool usb_pwr_level; /* 0 - Active Low, 1 - Active High */ int dvi_pd_gpio; int usr_button_gpio; int mmc_caps; } beagle_config = { .mmc1_gpio_wp = -EINVAL, - .usb_pwr_level = GPIOF_OUT_INIT_LOW, + .usb_pwr_level = 0, .dvi_pd_gpio = -EINVAL, .usr_button_gpio = 4, .mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, @@ -178,7 +178,7 @@ static void __init omap3_beagle_init_rev(void) case 0: printk(KERN_INFO "OMAP3 Beagle Rev: xM Ax/Bx\n"); omap3_beagle_version = OMAP3BEAGLE_BOARD_XM; - beagle_config.usb_pwr_level = GPIOF_OUT_INIT_HIGH; + beagle_config.usb_pwr_level = 1; beagle_config.mmc_caps &= ~MMC_CAP_8_BIT_DATA; break; case 2: diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index 1a88467..18ca61e 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -73,11 +73,11 @@ #define LIS302_IRQ1_GPIO 181 #define LIS302_IRQ2_GPIO 180 /* Not yet in use */ -/* list all spi devices here */ +/* List all SPI devices here. Note that the list/probe order seems to matter! */ enum { RX51_SPI_WL1251, - RX51_SPI_MIPID, /* LCD panel */ RX51_SPI_TSC2005, /* Touch Controller */ + RX51_SPI_MIPID, /* LCD panel */ }; static struct wl12xx_platform_data wl1251_pdata; diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c index dab9fc0..49fd0d5 100644 --- a/arch/arm/mach-omap2/dma.c +++ b/arch/arm/mach-omap2/dma.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "soc.h" @@ -304,6 +305,9 @@ static int __init omap2_system_dma_init(void) if (res) return res; + if (of_have_populated_dt()) + return res; + pdev = platform_device_register_full(&omap_dma_dev_info); if (IS_ERR(pdev)) return PTR_ERR(pdev); diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index ed946df..6c4da12 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1520,36 +1520,22 @@ static int gpmc_probe_dt(struct platform_device *pdev) return ret; } - for_each_node_by_name(child, "nand") { - ret = gpmc_probe_nand_child(pdev, child); - if (ret < 0) { - of_node_put(child); - return ret; - } - } + for_each_child_of_node(pdev->dev.of_node, child) { - for_each_node_by_name(child, "onenand") { - ret = gpmc_probe_onenand_child(pdev, child); - if (ret < 0) { - of_node_put(child); - return ret; - } - } + if (!child->name) + continue; - for_each_node_by_name(child, "nor") { - ret = gpmc_probe_generic_child(pdev, child); - if (ret < 0) { - of_node_put(child); - return ret; - } - } + if (of_node_cmp(child->name, "nand") == 0) + ret = gpmc_probe_nand_child(pdev, child); + else if (of_node_cmp(child->name, "onenand") == 0) + ret = gpmc_probe_onenand_child(pdev, child); + else if (of_node_cmp(child->name, "ethernet") == 0 || + of_node_cmp(child->name, "nor") == 0) + ret = gpmc_probe_generic_child(pdev, child); - for_each_node_by_name(child, "ethernet") { - ret = gpmc_probe_generic_child(pdev, child); - if (ret < 0) { + if (WARN(ret < 0, "%s: probing gpmc child %s failed\n", + __func__, child->full_name)) of_node_put(child); - return ret; - } } return 0; diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 0f4c18e..1272c41 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -419,11 +419,15 @@ void __init omap3xxx_check_revision(void) cpu_rev = "1.0"; break; case 1: - /* FALLTHROUGH */ - default: omap_revision = AM335X_REV_ES2_0; cpu_rev = "2.0"; break; + case 2: + /* FALLTHROUGH */ + default: + omap_revision = AM335X_REV_ES2_1; + cpu_rev = "2.1"; + break; } break; case 0xb8f2: @@ -644,13 +648,12 @@ void __init omap_soc_device_init(void) soc_dev_attr->revision = soc_rev; soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR_OR_NULL(soc_dev)) { + if (IS_ERR(soc_dev)) { kfree(soc_dev_attr); return; } parent = soc_device_to_device(soc_dev); - if (!IS_ERR_OR_NULL(parent)) - device_create_file(parent, &omap_soc_attr); + device_create_file(parent, &omap_soc_attr); } #endif /* CONFIG_SOC_BUS */ diff --git a/arch/arm/mach-omap2/mux34xx.h b/arch/arm/mach-omap2/mux34xx.h index 6543ebf..3f26d29 100644 --- a/arch/arm/mach-omap2/mux34xx.h +++ b/arch/arm/mach-omap2/mux34xx.h @@ -393,6 +393,10 @@ #define OMAP3_CONTROL_PADCONF_SAD2D_SWAKEUP_OFFSET 0xa1c #define OMAP3_CONTROL_PADCONF_JTAG_RTCK_OFFSET 0xa1e #define OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET 0xa20 +#define OMAP3_CONTROL_PADCONF_GPIO_127 0xa24 +#define OMAP3_CONTROL_PADCONF_GPIO_126 0xa26 +#define OMAP3_CONTROL_PADCONF_GPIO_128 0xa28 +#define OMAP3_CONTROL_PADCONF_GPIO_129 0xa2a #define OMAP3_CONTROL_PADCONF_MUX_SIZE \ - (OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET + 0x2) + (OMAP3_CONTROL_PADCONF_GPIO_129 + 0x2) diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index eeea4fa..e6d2307 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -876,4 +876,4 @@ static int __init omap_device_late_init(void) bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle); return 0; } -omap_late_initcall(omap_device_late_init); +omap_late_initcall_sync(omap_device_late_init); diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h index 18fdeeb..197cc16 100644 --- a/arch/arm/mach-omap2/soc.h +++ b/arch/arm/mach-omap2/soc.h @@ -396,6 +396,7 @@ IS_OMAP_TYPE(3430, 0x3430) #define AM335X_CLASS 0x33500033 #define AM335X_REV_ES1_0 AM335X_CLASS #define AM335X_REV_ES2_0 (AM335X_CLASS | (0x1 << 8)) +#define AM335X_REV_ES2_1 (AM335X_CLASS | (0x2 << 8)) #define OMAP443X_CLASS 0x44300044 #define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8)) @@ -496,6 +497,7 @@ level(__##fn); #define omap_subsys_initcall(fn) omap_initcall(subsys_initcall, fn) #define omap_device_initcall(fn) omap_initcall(device_initcall, fn) #define omap_late_initcall(fn) omap_initcall(late_initcall, fn) +#define omap_late_initcall_sync(fn) omap_initcall(late_initcall_sync, fn) #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig index 80ca974..6988b11 100644 --- a/arch/arm/mach-prima2/Kconfig +++ b/arch/arm/mach-prima2/Kconfig @@ -38,7 +38,7 @@ config ARCH_MARCO select CPU_V7 select HAVE_ARM_SCU if SMP select HAVE_SMP - select SMP_ON_UP + select SMP_ON_UP if SMP help Support for CSR SiRFSoC ARM Cortex A9 Platform diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 9075461..96100db 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -162,7 +162,6 @@ config MACH_XCEP select MTD select MTD_CFI select MTD_CFI_INTELEXT - select MTD_CHAR select MTD_PHYSMAP select PXA25x select SMC91X diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c index 3621599..7aa6e8c 100644 --- a/arch/arm/mach-spear/spear13xx.c +++ b/arch/arm/mach-spear/spear13xx.c @@ -35,6 +35,8 @@ void __init spear13xx_l2x0_init(void) * write alloc and 'Full line of zero' options * */ + if (!IS_ENABLED(CONFIG_CACHE_L2X0)) + return; writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL); diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 20c3b37..84d72fc 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -63,6 +63,7 @@ config ARCH_TEGRA_114_SOC select ARM_ARCH_TIMER select ARM_GIC select ARM_L1_CACHE_SHIFT_6 + select CPU_FREQ_TABLE if CPU_FREQ select CPU_V7 select PINCTRL select PINCTRL_TEGRA114 diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index f66d7de..6a4387e 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig @@ -19,6 +19,8 @@ if ARCH_U8500 config UX500_SOC_COMMON bool default y + select ABX500_CORE + select AB8500_CORE select ARM_ERRATA_754322 select ARM_ERRATA_764369 if SMP select ARM_GIC diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index a15dd6b..3cd555a 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -403,8 +403,8 @@ static int mop500_prox_activate(struct device *dev) "no regulator\n"); return PTR_ERR(prox_regulator); } - regulator_enable(prox_regulator); - return 0; + + return regulator_enable(prox_regulator); } static void mop500_prox_deactivate(struct device *dev) diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 995928b..e90b5ab 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -191,7 +191,7 @@ static const char *db8500_read_soc_id(void) /* Throw these device-specific numbers into the entropy pool */ add_device_randomness(uid, 0x14); return kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x", - readl((u32 *)uid+1), + readl((u32 *)uid+0), readl((u32 *)uid+1), readl((u32 *)uid+2), readl((u32 *)uid+3), readl((u32 *)uid+4)); } diff --git a/arch/arm/plat-orion/Makefile b/arch/arm/plat-orion/Makefile index 2eca54b..9433605 100644 --- a/arch/arm/plat-orion/Makefile +++ b/arch/arm/plat-orion/Makefile @@ -3,6 +3,6 @@ # ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include -orion-gpio-$(CONFIG_GENERIC_GPIO) += gpio.o +orion-gpio-$(CONFIG_GPIOLIB) += gpio.o obj-$(CONFIG_PLAT_ORION_LEGACY) += irq.o pcie.o time.o common.o mpp.o obj-$(CONFIG_PLAT_ORION_LEGACY) += $(orion-gpio-y) diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index e39c2ba..249fe63 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -150,7 +150,7 @@ err_out: } /* - * GENERIC_GPIO primitives. + * GPIO primitives. */ static int orion_gpio_request(struct gpio_chip *chip, unsigned pin) { diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 43b0e9f..48347dc 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -95,9 +95,6 @@ config SWIOTLB config IOMMU_HELPER def_bool SWIOTLB -config GENERIC_GPIO - bool - source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index 22c4030..bdc3558 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -26,9 +26,6 @@ config AVR32 There is an AVR32 Linux project with a web page at http://avr32linux.org/. -config GENERIC_GPIO - def_bool y - config STACKTRACE_SUPPORT def_bool y diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 453ebe4..a117652 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -27,7 +27,7 @@ config BLACKFIN select HAVE_OPROFILE select HAVE_PERF_EVENTS select ARCH_HAVE_CUSTOM_GPIO_H - select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_REQUIRE_GPIOLIB select HAVE_UID16 select HAVE_UNDERSCORE_SYMBOL_PREFIX select VIRT_TO_BUS @@ -52,9 +52,6 @@ config GENERIC_BUG config ZONE_DMA def_bool y -config GENERIC_GPIO - def_bool y - config FORCE_MAX_ZONEORDER int default "14" diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 06dd026..8769a90 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -264,7 +264,6 @@ config ETRAX_AXISFLASHMAP select MTD_CFI select MTD_CFI_AMDSTD select MTD_JEDECPROBE if ETRAX_ARCH_V32 - select MTD_CHAR select MTD_BLOCK select MTD_COMPLEX_MAPPINGS help diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig index af4a486..c55971a 100644 --- a/arch/cris/arch-v32/drivers/Kconfig +++ b/arch/cris/arch-v32/drivers/Kconfig @@ -404,7 +404,6 @@ config ETRAX_AXISFLASHMAP select MTD_CFI select MTD_CFI_AMDSTD select MTD_JEDECPROBE - select MTD_CHAR select MTD_BLOCK select MTD_COMPLEX_MAPPINGS help diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 841325f..33a9792 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -155,9 +155,6 @@ source "mm/Kconfig" source "kernel/Kconfig.hz" -config GENERIC_GPIO - def_bool n - endmenu source "init/Kconfig" diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index d393f84..1a2b774 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -101,9 +101,6 @@ config GENERIC_CALIBRATE_DELAY config HAVE_SETUP_PER_CPU_AREA def_bool y -config GENERIC_GPIO - bool - config DMI bool default y diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 6de8133..821170e 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -35,9 +35,6 @@ config ARCH_HAS_ILOG2_U32 config ARCH_HAS_ILOG2_U64 bool -config GENERIC_GPIO - bool - config GENERIC_HWEIGHT bool default y diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index b1cfff8..d266787 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -22,8 +22,7 @@ config M68KCLASSIC config COLDFIRE bool "Coldfire CPU family support" - select GENERIC_GPIO - select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_REQUIRE_GPIOLIB select ARCH_HAVE_CUSTOM_GPIO_H select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_MULDIV64 diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index 6f16c14..dcd9440 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -52,9 +52,6 @@ config GENERIC_HWEIGHT config GENERIC_CALIBRATE_DELAY def_bool y -config GENERIC_GPIO - def_bool n - config NO_IOPORT def_bool y diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 54237af..d22a4ec 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -54,9 +54,6 @@ config GENERIC_HWEIGHT config GENERIC_CALIBRATE_DELAY def_bool y -config GENERIC_GPIO - bool - config GENERIC_CSUM def_bool y diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e5f3794..a90cfc7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -61,8 +61,7 @@ config MIPS_ALCHEMY select SYS_HAS_CPU_MIPS32_R1 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_APM_EMULATION - select GENERIC_GPIO - select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_REQUIRE_GPIOLIB select SYS_SUPPORTS_ZBOOT select USB_ARCH_HAS_OHCI select USB_ARCH_HAS_EHCI @@ -225,7 +224,6 @@ config MACH_JZ4740 select SYS_SUPPORTS_ZBOOT_UART16550 select DMA_NONCOHERENT select IRQ_CPU - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB select SYS_HAS_EARLY_PRINTK select HAVE_PWM @@ -937,7 +935,6 @@ config CSRC_SB1250 bool config GPIO_TXX9 - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB bool @@ -1009,9 +1006,6 @@ config GENERIC_ISA_DMA_SUPPORT_BROKEN config ISA_DMA_API bool -config GENERIC_GPIO - bool - config HOLES_IN_ZONE bool @@ -1112,7 +1106,6 @@ config SOC_PNX833X select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_BIG_ENDIAN - select GENERIC_GPIO select CPU_MIPSR2_IRQ_VI config SOC_PNX8335 @@ -1203,7 +1196,6 @@ config CPU_LOONGSON2F bool "Loongson 2F" depends on SYS_HAS_CPU_LOONGSON2F select CPU_LOONGSON2 - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB help The Loongson 2F processor implements the MIPS III instruction set diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile index e526488..4c57b3e 100644 --- a/arch/mips/loongson/common/Makefile +++ b/arch/mips/loongson/common/Makefile @@ -4,7 +4,7 @@ obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \ pci.o bonito-irq.o mem.o machtype.o platform.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_GPIOLIB) += gpio.o # # Serial port support diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index 5524f2c..5364aab 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c @@ -118,7 +118,7 @@ EXPORT_SYMBOL(clk_put); /* GPIO support */ -#ifdef CONFIG_GENERIC_GPIO +#ifdef CONFIG_GPIOLIB int gpio_to_irq(unsigned gpio) { return -EINVAL; diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 81b9ddb..1072bfd 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -44,9 +44,6 @@ config GENERIC_HWEIGHT config NO_IOPORT def_bool y -config GENERIC_GPIO - def_bool y - config TRACE_IRQFLAGS_SUPPORT def_bool y diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index bbbe021..c33e3ad 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -82,11 +82,6 @@ config GENERIC_HWEIGHT bool default y -config GENERIC_GPIO - bool - help - Generic GPIO API support - config PPC bool default y diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index bd40bbb..6e287f1 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -138,7 +138,6 @@ config PPC4xx_GPIO bool "PPC4xx GPIO support" depends on 40x select ARCH_REQUIRE_GPIOLIB - select GENERIC_GPIO help Enable gpiolib support for ppc40x based boards diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 7be9336..d6c7506 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -248,7 +248,6 @@ config PPC4xx_GPIO bool "PPC4xx GPIO support" depends on 44x select ARCH_REQUIRE_GPIOLIB - select GENERIC_GPIO help Enable gpiolib support for ppc440 based boards diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 8f02b05..efdd37c 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -203,7 +203,6 @@ config GE_IMP3A select DEFAULT_UIMAGE select SWIOTLB select MMIO_NVRAM - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB select GE_FPGA help @@ -328,7 +327,7 @@ config B4_QDS select PPC_E500MC select PHYS_64BIT select SWIOTLB - select GENERIC_GPIO + select GPIOLIB select ARCH_REQUIRE_GPIOLIB select HAS_RAPIDIO select PPC_EPAPR_HV_PIC diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig index 7a6279e..1afd1e4 100644 --- a/arch/powerpc/platforms/86xx/Kconfig +++ b/arch/powerpc/platforms/86xx/Kconfig @@ -37,7 +37,6 @@ config GEF_PPC9A bool "GE PPC9A" select DEFAULT_UIMAGE select MMIO_NVRAM - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB select GE_FPGA help @@ -47,7 +46,6 @@ config GEF_SBC310 bool "GE SBC310" select DEFAULT_UIMAGE select MMIO_NVRAM - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB select GE_FPGA help @@ -57,7 +55,6 @@ config GEF_SBC610 bool "GE SBC610" select DEFAULT_UIMAGE select MMIO_NVRAM - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB select GE_FPGA select HAS_RAPIDIO diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index 1fb0b3c..8dec3c0 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -114,7 +114,6 @@ config 8xx_COPYBACK config 8xx_GPIO bool "GPIO API Support" - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB help Saying Y here will cause the ports on an MPC8xx processor to be used diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 34d224b..a881232 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -302,7 +302,6 @@ config QUICC_ENGINE config QE_GPIO bool "QE GPIO support" depends on QUICC_ENGINE - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB help Say Y here if you're going to use hardware that connects to the @@ -315,7 +314,6 @@ config CPM2 select PPC_LIB_RHEAP select PPC_PCI_CHOICE select ARCH_REQUIRE_GPIOLIB - select GENERIC_GPIO help The CPM2 (Communications Processor Module) is a coprocessor on embedded CPUs made by Freescale. Selecting this option means that @@ -353,7 +351,6 @@ config OF_RTC config SIMPLE_GPIO bool "Support for simple, memory-mapped GPIO controllers" depends on PPC - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB help Say Y here to support simple, memory-mapped GPIO controllers. @@ -364,7 +361,6 @@ config SIMPLE_GPIO config MCU_MPC8349EMITX bool "MPC8349E-mITX MCU driver" depends on I2C=y && PPC_83xx - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB help Say Y here to enable soft power-off functionality on the Freescale diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 78d8ace..8c868cf 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -93,9 +93,6 @@ config GENERIC_CSUM config GENERIC_HWEIGHT def_bool y -config GENERIC_GPIO - def_bool n - config GENERIC_CALIBRATE_DELAY bool diff --git a/arch/sh/boards/mach-sdk7786/Makefile b/arch/sh/boards/mach-sdk7786/Makefile index 8ae56e9..45d32e3 100644 --- a/arch/sh/boards/mach-sdk7786/Makefile +++ b/arch/sh/boards/mach-sdk7786/Makefile @@ -1,4 +1,4 @@ obj-y := fpga.o irq.o nmi.o setup.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_GPIOLIB) += gpio.o obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o diff --git a/arch/sh/boards/mach-x3proto/Makefile b/arch/sh/boards/mach-x3proto/Makefile index 708c21c..0cbe3d0 100644 --- a/arch/sh/boards/mach-x3proto/Makefile +++ b/arch/sh/boards/mach-x3proto/Makefile @@ -1,3 +1,3 @@ obj-y += setup.o ilsel.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_GPIOLIB) += gpio.o diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile index 7fdc102..990195d 100644 --- a/arch/sh/kernel/cpu/sh2a/Makefile +++ b/arch/sh/kernel/cpu/sh2a/Makefile @@ -21,4 +21,4 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7203) := pinmux-sh7203.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7264) := pinmux-sh7264.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7269) := pinmux-sh7269.o -obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) +obj-$(CONFIG_GPIOLIB) += $(pinmux-y) diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile index 6f13f33..d3634ae 100644 --- a/arch/sh/kernel/cpu/sh3/Makefile +++ b/arch/sh/kernel/cpu/sh3/Makefile @@ -30,4 +30,4 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7712) := clock-sh7712.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7720) := pinmux-sh7720.o obj-y += $(clock-y) -obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) +obj-$(CONFIG_GPIOLIB) += $(pinmux-y) diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 8fc6ec2..0705df7 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -47,6 +47,6 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SHX3) := pinmux-shx3.o obj-y += $(clock-y) obj-$(CONFIG_SMP) += $(smp-y) -obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) +obj-$(CONFIG_GPIOLIB) += $(pinmux-y) obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index a639c0d..9ac9f16 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -137,11 +137,6 @@ config GENERIC_ISA_DMA bool default y if SPARC32 -config GENERIC_GPIO - bool - help - Generic GPIO API support - config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y if SPARC64 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 5b6a40d..3aa3766 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -355,11 +355,17 @@ config HARDWALL config KERNEL_PL int "Processor protection level for kernel" range 1 2 - default "1" + default 2 if TILEGX + default 1 if !TILEGX ---help--- - This setting determines the processor protection level the - kernel will be built to run at. Generally you should use - the default value here. + Since MDE 4.2, the Tilera hypervisor runs the kernel + at PL2 by default. If running under an older hypervisor, + or as a KVM guest, you must run at PL1. (The current + hypervisor may also be recompiled with "make HV_PL=2" to + allow it to run a kernel at PL1, but clients running at PL1 + are not expected to be supported indefinitely.) + + If you're not sure, don't change the default. source "arch/tile/gxio/Kconfig" diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h index ccd847e..837dca5 100644 --- a/arch/tile/include/hv/hypervisor.h +++ b/arch/tile/include/hv/hypervisor.h @@ -107,7 +107,22 @@ #define HV_DISPATCH_ENTRY_SIZE 32 /** Version of the hypervisor interface defined by this file */ -#define _HV_VERSION 11 +#define _HV_VERSION 13 + +/** Last version of the hypervisor interface with old hv_init() ABI. + * + * The change from version 12 to version 13 corresponds to launching + * the client by default at PL2 instead of PL1 (corresponding to the + * hv itself running at PL3 instead of PL2). To make this explicit, + * the hv_init() API was also extended so the client can report its + * desired PL, resulting in a more helpful failure diagnostic. If you + * call hv_init() with _HV_VERSION_OLD_HV_INIT and omit the client_pl + * argument, the hypervisor will assume client_pl = 1. + * + * Note that this is a deprecated solution and we do not expect to + * support clients of the Tilera hypervisor running at PL1 indefinitely. + */ +#define _HV_VERSION_OLD_HV_INIT 12 /* Index into hypervisor interface dispatch code blocks. * @@ -377,7 +392,11 @@ typedef int HV_Errno; #ifndef __ASSEMBLER__ /** Pass HV_VERSION to hv_init to request this version of the interface. */ -typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber; +typedef enum { + HV_VERSION = _HV_VERSION, + HV_VERSION_OLD_HV_INIT = _HV_VERSION_OLD_HV_INIT, + +} HV_VersionNumber; /** Initializes the hypervisor. * @@ -385,9 +404,11 @@ typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber; * that this program expects, typically HV_VERSION. * @param chip_num Architecture number of the chip the client was built for. * @param chip_rev_num Revision number of the chip the client was built for. + * @param client_pl Privilege level the client is built for + * (not required if interface_version_number == HV_VERSION_OLD_HV_INIT). */ void hv_init(HV_VersionNumber interface_version_number, - int chip_num, int chip_rev_num); + int chip_num, int chip_rev_num, int client_pl); /** Queries we can make for hv_sysconf(). diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S index f71bfee..ac11530 100644 --- a/arch/tile/kernel/head_32.S +++ b/arch/tile/kernel/head_32.S @@ -38,7 +38,7 @@ ENTRY(_start) movei r2, TILE_CHIP_REV } { - moveli r0, _HV_VERSION + moveli r0, _HV_VERSION_OLD_HV_INIT jal hv_init } /* Get a reasonable default ASID in r0 */ diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S index f9a2734..6093964 100644 --- a/arch/tile/kernel/head_64.S +++ b/arch/tile/kernel/head_64.S @@ -34,13 +34,19 @@ ENTRY(_start) /* Notify the hypervisor of what version of the API we want */ { +#if KERNEL_PL == 1 && _HV_VERSION == 13 + /* Support older hypervisors by asking for API version 12. */ + movei r0, _HV_VERSION_OLD_HV_INIT +#else + movei r0, _HV_VERSION +#endif movei r1, TILE_CHIP - movei r2, TILE_CHIP_REV } { - moveli r0, _HV_VERSION - jal hv_init + movei r2, TILE_CHIP_REV + movei r3, KERNEL_PL } + jal hv_init /* Get a reasonable default ASID in r0 */ { move r0, zero diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c index b16ac49..b34f79a 100644 --- a/arch/tile/lib/spinlock_32.c +++ b/arch/tile/lib/spinlock_32.c @@ -101,7 +101,7 @@ EXPORT_SYMBOL(arch_spin_unlock_wait); * preserve the semantic that the same read lock can be acquired in an * interrupt context. */ -inline int arch_read_trylock(arch_rwlock_t *rwlock) +int arch_read_trylock(arch_rwlock_t *rwlock) { u32 val; __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1); diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 2943e3a..41bcc00 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -23,9 +23,6 @@ config UNICORE32 designs licensed by PKUnity Ltd. Please see web page at . -config GENERIC_GPIO - def_bool y - config GENERIC_CSUM def_bool y @@ -156,7 +153,7 @@ source "mm/Kconfig" config LEDS def_bool y - depends on GENERIC_GPIO + depends on GPIOLIB config ALIGNMENT_TRAP def_bool y @@ -219,7 +216,6 @@ if ARCH_PUV3 config PUV3_GPIO bool depends on !ARCH_FPGA - select GENERIC_GPIO select GPIO_SYSFS default y diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5db2117..6a154a9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -174,9 +174,6 @@ config GENERIC_BUG_RELATIVE_POINTERS config GENERIC_HWEIGHT def_bool y -config GENERIC_GPIO - bool - config ARCH_MAY_HAVE_PC_FDC def_bool y depends on ISA_DMA_API diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c index 6eb18c4..0e0fabf 100644 --- a/arch/x86/pci/mrst.c +++ b/arch/x86/pci/mrst.c @@ -141,6 +141,11 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn, */ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) { + if (bus == 0 && (devfn == PCI_DEVFN(2, 0) + || devfn == PCI_DEVFN(0, 0) + || devfn == PCI_DEVFN(3, 0))) + return 1; + /* This is a workaround for A0 LNC bug where PCI status register does * not have new CAP bit set. can not be written by SW either. * @@ -150,10 +155,7 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) */ if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) return 0; - if (bus == 0 && (devfn == PCI_DEVFN(2, 0) - || devfn == PCI_DEVFN(0, 0) - || devfn == PCI_DEVFN(3, 0))) - return 1; + return 0; /* langwell on others */ } diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index b09de49..0a1b95f 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -1,11 +1,9 @@ -config FRAME_POINTER - def_bool n - config ZONE_DMA def_bool y config XTENSA def_bool y + select ARCH_WANT_FRAME_POINTERS select HAVE_IDE select GENERIC_ATOMIC64 select HAVE_GENERIC_HARDIRQS @@ -33,9 +31,6 @@ config RWSEM_XCHGADD_ALGORITHM config GENERIC_HWEIGHT def_bool y -config GENERIC_GPIO - bool - config ARCH_HAS_ILOG2_U32 def_bool n @@ -52,6 +47,15 @@ config HZ source "init/Kconfig" source "kernel/Kconfig.freezer" +config LOCKDEP_SUPPORT + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +config TRACE_IRQFLAGS_SUPPORT + def_bool y + config MMU def_bool n @@ -103,6 +107,35 @@ config MATH_EMULATION help Can we use information of configuration file? +config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + bool "Initialize Xtensa MMU inside the Linux kernel code" + default y + help + Earlier version initialized the MMU in the exception vector + before jumping to _startup in head.S and had an advantage that + it was possible to place a software breakpoint at 'reset' and + then enter your normal kernel breakpoints once the MMU was mapped + to the kernel mappings (0XC0000000). + + This unfortunately doesn't work for U-Boot and likley also wont + work for using KEXEC to have a hot kernel ready for doing a + KDUMP. + + So now the MMU is initialized in head.S but it's necessary to + use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup. + xt-gdb can't place a Software Breakpoint in the 0XD region prior + to mapping the MMU and after mapping even if the area of low memory + was mapped gdb wouldn't remove the breakpoint on hitting it as the + PC wouldn't match. Since Hardware Breakpoints are recommended for + Linux configurations it seems reasonable to just assume they exist + and leave this older mechanism for unfortunate souls that choose + not to follow Tensilica's recommendation. + + Selecting this will cause U-Boot to set the KERNEL Load and Entry + address at 0x00003000 instead of the mapped std of 0xD0003000. + + If in doubt, say Y. + endmenu config XTENSA_CALIBRATE_CCOUNT @@ -252,21 +285,6 @@ endmenu menu "Executable file formats" -# only elf supported -config KCORE_ELF - def_bool y - depends on PROC_FS - help - If you enabled support for /proc file system then the file - /proc/kcore will contain the kernel core image in ELF format. This - can be used in gdb: - - $ cd /usr/src/linux ; gdb vmlinux /proc/kcore - - This is especially useful if you have compiled the kernel with the - "-g" option to preserve debugging information. It is mainly used - for examining kernel data structures on the live kernel. - source "fs/Kconfig.binfmt" endmenu diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile index 1fe01b7..89db089 100644 --- a/arch/xtensa/boot/boot-elf/Makefile +++ b/arch/xtensa/boot/boot-elf/Makefile @@ -12,6 +12,7 @@ endif export OBJCOPY_ARGS export CPPFLAGS_boot.lds += -P -C +export KBUILD_AFLAGS += -mtext-section-literals boot-y := bootstrap.o diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S index 7b646e0..932b58e 100644 --- a/arch/xtensa/boot/boot-elf/boot.lds.S +++ b/arch/xtensa/boot/boot-elf/boot.lds.S @@ -1,41 +1,29 @@ -#include +/* + * linux/arch/xtensa/boot/boot-elf/boot.lds.S + * + * Copyright (C) 2008 - 2013 by Tensilica Inc. + * + * Chris Zankel + * Marc Gauthier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include OUTPUT_ARCH(xtensa) ENTRY(_ResetVector) SECTIONS { - .start 0xD0000000 : { *(.start) } - - .text 0xD0000000: - { - __reloc_start = . ; - _text_start = . ; - *(.literal .text.literal .text) - _text_end = . ; - } - - .rodata ALIGN(0x04): - { - *(.rodata) - *(.rodata1) - } - - .data ALIGN(0x04): + .ResetVector.text XCHAL_RESET_VECTOR_VADDR : { - *(.data) - *(.data1) - *(.sdata) - *(.sdata2) - *(.got.plt) - *(.got) - *(.dynamic) + *(.ResetVector.text) } - __reloc_end = . ; - - . = ALIGN(0x10); - __image_load = . ; - .image 0xd0001000: + .image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS) { _image_start = .; *(image) @@ -43,7 +31,6 @@ SECTIONS _image_end = . ; } - .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3): { __bss_start = .; @@ -53,14 +40,15 @@ SECTIONS *(.bss) __bss_end = .; } - _end = .; - _param_start = .; - .ResetVector.text XCHAL_RESET_VECTOR_VADDR : + /* + * This is a remapped copy of the Reset Vector Code. + * It keeps gdb in sync with the PC after switching + * to the temporary mapping used while setting up + * the V2 MMU mappings for Linux. + */ + .ResetVector.remapped_text 0x46000000 (INFO): { - *(.ResetVector.text) + *(.ResetVector.remapped_text) } - - - PROVIDE (end = .); } diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S index 464298b..1388a49 100644 --- a/arch/xtensa/boot/boot-elf/bootstrap.S +++ b/arch/xtensa/boot/boot-elf/bootstrap.S @@ -1,18 +1,77 @@ +/* + * arch/xtensa/boot/boot-elf/bootstrap.S + * + * Low-level exception handling + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004 - 2013 by Tensilica Inc. + * + * Chris Zankel + * Marc Gauthier + * Piet Delaney + */ #include +#include +#include +#include +#include +#include +#include - -/* ResetVector - */ - .section .ResetVector.text, "ax" + .section .ResetVector.text, "ax" .global _ResetVector + .global reset + _ResetVector: - _j reset + _j _SetupMMU + + .begin no-absolute-literals + .literal_position + .align 4 RomInitAddr: - .word 0xd0001000 +#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \ + XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY + .word 0x00003000 +#else + .word 0xd0003000 +#endif RomBootParam: .word _bootparam +_bootparam: + .short BP_TAG_FIRST + .short 4 + .long BP_VERSION + .short BP_TAG_LAST + .short 0 + .long 0 + + .align 4 +_SetupMMU: + movi a0, 0 + wsr a0, windowbase + rsync + movi a0, 1 + wsr a0, windowstart + rsync + movi a0, 0x1F + wsr a0, ps + rsync + + Offset = _SetupMMU - _ResetVector + +#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + initialize_mmu +#endif + + .end no-absolute-literals + + rsil a0, XCHAL_DEBUGLEVEL-1 + rsync reset: l32r a0, RomInitAddr l32r a2, RomBootParam @@ -21,13 +80,25 @@ reset: jx a0 .align 4 - .section .bootstrap.data, "aw" - .globl _bootparam -_bootparam: - .short BP_TAG_FIRST - .short 4 - .long BP_VERSION - .short BP_TAG_LAST - .short 0 - .long 0 + .section .ResetVector.remapped_text, "x" + .global _RemappedResetVector + + /* Do org before literals */ + .org 0 + +_RemappedResetVector: + .begin no-absolute-literals + .literal_position + + _j _RemappedSetupMMU + + /* Position Remapped code at the same location as the original code */ + . = _RemappedResetVector + Offset + +_RemappedSetupMMU: +#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + initialize_mmu +#endif + + .end no-absolute-literals diff --git a/arch/xtensa/boot/boot-redboot/boot.ld b/arch/xtensa/boot/boot-redboot/boot.ld index 5bbcaf9..b0b9e95 100644 --- a/arch/xtensa/boot/boot-redboot/boot.ld +++ b/arch/xtensa/boot/boot-redboot/boot.ld @@ -33,7 +33,7 @@ SECTIONS . = ALIGN(0x10); __image_load = . ; - .image 0xd0001000: AT(__image_load) + .image 0xd0003000: AT(__image_load) { _image_start = .; *(image) diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile index bfbf8af..5457598 100644 --- a/arch/xtensa/boot/boot-uboot/Makefile +++ b/arch/xtensa/boot/boot-uboot/Makefile @@ -4,7 +4,11 @@ # for more details. # -UIMAGE_LOADADDR = 0xd0001000 +ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX +UIMAGE_LOADADDR = 0x00003000 +else +UIMAGE_LOADADDR = 0xd0003000 +endif UIMAGE_COMPRESSION = gzip $(obj)/../uImage: vmlinux.bin.gz FORCE diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig index ddab37b..77c52f8 100644 --- a/arch/xtensa/configs/iss_defconfig +++ b/arch/xtensa/configs/iss_defconfig @@ -10,7 +10,6 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_FIND_NEXT_BIT=y CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y -CONFIG_GENERIC_GPIO=y # CONFIG_ARCH_HAS_ILOG2_U32 is not set # CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_NO_IOPORT=y diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig index eaf1b8f..4799c6a 100644 --- a/arch/xtensa/configs/s6105_defconfig +++ b/arch/xtensa/configs/s6105_defconfig @@ -10,7 +10,6 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_FIND_NEXT_BIT=y CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y -CONFIG_GENERIC_GPIO=y # CONFIG_ARCH_HAS_ILOG2_U32 is not set # CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_NO_IOPORT=y diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 095f0a2..1b98264 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -15,6 +15,7 @@ generic-y += irq_regs.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kvm_para.h +generic-y += linkage.h generic-y += local.h generic-y += local64.h generic-y += percpu.h diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h index 40a8c17..36dc7a6 100644 --- a/arch/xtensa/include/asm/ftrace.h +++ b/arch/xtensa/include/asm/ftrace.h @@ -1 +1,33 @@ -/* empty */ +/* + * arch/xtensa/include/asm/ftrace.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Tensilica Inc. + */ +#ifndef _XTENSA_FTRACE_H +#define _XTENSA_FTRACE_H + +#include + +#define HAVE_ARCH_CALLER_ADDR +#define CALLER_ADDR0 ({ unsigned long a0, a1; \ + __asm__ __volatile__ ( \ + "mov %0, a0\n" \ + "mov %1, a1\n" \ + : "=r"(a0), "=r"(a1) : : ); \ + MAKE_PC_FROM_RA(a0, a1); }) +#ifdef CONFIG_FRAME_POINTER +extern unsigned long return_address(unsigned level); +#define CALLER_ADDR1 return_address(1) +#define CALLER_ADDR2 return_address(2) +#define CALLER_ADDR3 return_address(3) +#else +#define CALLER_ADDR1 (0) +#define CALLER_ADDR2 (0) +#define CALLER_ADDR3 (0) +#endif + +#endif /* _XTENSA_FTRACE_H */ diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h index e1f8ba4..722553f 100644 --- a/arch/xtensa/include/asm/initialize_mmu.h +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -23,6 +23,9 @@ #ifndef _XTENSA_INITIALIZE_MMU_H #define _XTENSA_INITIALIZE_MMU_H +#include +#include + #ifdef __ASSEMBLY__ #define XTENSA_HWVERSION_RC_2009_0 230000 @@ -48,6 +51,110 @@ * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) */ +#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY +/* + * Have MMU v3 + */ + +#if !XCHAL_HAVE_VECBASE +# error "MMU v3 requires reloc vectors" +#endif + + movi a1, 0 + _call0 1f + _j 2f + + .align 4 +1: movi a2, 0x10000000 + movi a3, 0x18000000 + add a2, a2, a0 +9: bgeu a2, a3, 9b /* PC is out of the expected range */ + + /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */ + + movi a2, 0x40000006 + idtlb a2 + iitlb a2 + isync + + /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code + * and jump to the new mapping. + */ +#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC) +#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC) + + srli a3, a0, 27 + slli a3, a3, 27 + addi a3, a3, CA_BYPASS + addi a7, a2, -1 + wdtlb a3, a7 + witlb a3, a7 + isync + + slli a4, a0, 5 + srli a4, a4, 5 + addi a5, a2, -6 + add a4, a4, a5 + jx a4 + + /* Step 3: unmap everything other than current area. + * Start at 0x60000000, wrap around, and end with 0x20000000 + */ +2: movi a4, 0x20000000 + add a5, a2, a4 +3: idtlb a5 + iitlb a5 + add a5, a5, a4 + bne a5, a2, 3b + + /* Step 4: Setup MMU with the old V2 mappings. */ + movi a6, 0x01000000 + wsr a6, ITLBCFG + wsr a6, DTLBCFG + isync + + movi a5, 0xd0000005 + movi a4, CA_WRITEBACK + wdtlb a4, a5 + witlb a4, a5 + + movi a5, 0xd8000005 + movi a4, CA_BYPASS + wdtlb a4, a5 + witlb a4, a5 + + movi a5, 0xe0000006 + movi a4, 0xf0000000 + CA_WRITEBACK + wdtlb a4, a5 + witlb a4, a5 + + movi a5, 0xf0000006 + movi a4, 0xf0000000 + CA_BYPASS + wdtlb a4, a5 + witlb a4, a5 + + isync + + /* Jump to self, using MMU v2 mappings. */ + movi a4, 1f + jx a4 + +1: + movi a2, VECBASE_RESET_VADDR + wsr a2, vecbase + + /* Step 5: remove temporary mapping. */ + idtlb a7 + iitlb a7 + isync + + movi a0, 0 + wsr a0, ptevaddr + rsync + +#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && + XCHAL_HAVE_SPANNING_WAY */ + .endm #endif /*__ASSEMBLY__*/ diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h index f865b1c..ea36674 100644 --- a/arch/xtensa/include/asm/irqflags.h +++ b/arch/xtensa/include/asm/irqflags.h @@ -47,7 +47,10 @@ static inline void arch_local_irq_restore(unsigned long flags) static inline bool arch_irqs_disabled_flags(unsigned long flags) { - return (flags & 0xf) != 0; +#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL +#error "XCHAL_EXCM_LEVEL and 1<= LOCKLEVEL; } static inline bool arch_irqs_disabled(void) diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h deleted file mode 100644 index bf2128a..0000000 --- a/arch/xtensa/include/asm/linkage.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * include/asm-xtensa/linkage.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2005 Tensilica Inc. - */ - -#ifndef _XTENSA_LINKAGE_H -#define _XTENSA_LINKAGE_H - -/* Nothing to do here ... */ - -#endif /* _XTENSA_LINKAGE_H */ diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h new file mode 100644 index 0000000..6a05fcb --- /dev/null +++ b/arch/xtensa/include/asm/stacktrace.h @@ -0,0 +1,36 @@ +/* + * arch/xtensa/include/asm/stacktrace.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2013 Tensilica Inc. + */ +#ifndef _XTENSA_STACKTRACE_H +#define _XTENSA_STACKTRACE_H + +#include + +struct stackframe { + unsigned long pc; + unsigned long sp; +}; + +static __always_inline unsigned long *stack_pointer(struct task_struct *task) +{ + unsigned long *sp; + + if (!task || task == current) + __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp)); + else + sp = (unsigned long *)task->thread.sp; + + return sp; +} + +void walk_stackframe(unsigned long *sp, + int (*fn)(struct stackframe *frame, void *data), + void *data); + +#endif /* _XTENSA_STACKTRACE_H */ diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h index 9e85ce8..3d35e5d 100644 --- a/arch/xtensa/include/asm/timex.h +++ b/arch/xtensa/include/asm/timex.h @@ -19,13 +19,16 @@ #define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL #define INTLEVEL(x) _INTLEVEL(x) -#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL +#if XCHAL_NUM_TIMERS > 0 && \ + INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL # define LINUX_TIMER 0 # define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT -#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL +#elif XCHAL_NUM_TIMERS > 1 && \ + INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL # define LINUX_TIMER 1 # define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT -#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL +#elif XCHAL_NUM_TIMERS > 2 && \ + INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL # define LINUX_TIMER 2 # define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT #else diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index b5464ef..917488a 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h @@ -22,10 +22,9 @@ extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); static inline void spill_registers(void) { - unsigned int a0, ps; __asm__ __volatile__ ( - "movi a14, " __stringify(PS_EXCM_BIT | LOCKLEVEL) "\n\t" + "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t" "mov a12, a0\n\t" "rsr a13, sar\n\t" "xsr a14, ps\n\t" @@ -35,7 +34,7 @@ static inline void spill_registers(void) "mov a0, a12\n\t" "wsr a13, sar\n\t" "wsr a14, ps\n\t" - : : "a" (&a0), "a" (&ps) + : : #if defined(CONFIG_FRAME_POINTER) : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15", #else diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h new file mode 100644 index 0000000..c52b656 --- /dev/null +++ b/arch/xtensa/include/asm/vectors.h @@ -0,0 +1,125 @@ +/* + * arch/xtensa/include/asm/xchal_vaddr_remap.h + * + * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual + * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU + * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000). + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 - 2012 Tensilica Inc. + * + * Pete Delaney + * Marc Gauthier + +#if defined(CONFIG_MMU) + +/* Will Become VECBASE */ +#define VIRTUAL_MEMORY_ADDRESS 0xD0000000 + +/* Image Virtual Start Address */ +#define KERNELOFFSET 0xD0003000 + +#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY + /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ + #define PHYSICAL_MEMORY_ADDRESS 0x00000000 + #define LOAD_MEMORY_ADDRESS 0x00003000 +#else + /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ + #define PHYSICAL_MEMORY_ADDRESS 0xD0000000 + #define LOAD_MEMORY_ADDRESS 0xD0003000 +#endif + +#else /* !defined(CONFIG_MMU) */ + /* MMU Not being used - Virtual == Physical */ + + /* VECBASE */ + #define VIRTUAL_MEMORY_ADDRESS 0x00002000 + + /* Location of the start of the kernel text, _start */ + #define KERNELOFFSET 0x00003000 + #define PHYSICAL_MEMORY_ADDRESS 0x00000000 + + /* Loaded just above possibly live vectors */ + #define LOAD_MEMORY_ADDRESS 0x00003000 + +#endif /* CONFIG_MMU */ + +#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) +#define XC_PADDR(offset) (PHYSICAL_MEMORY_ADDRESS + offset) + +/* Used to set VECBASE register */ +#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS + +#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \ + VECBASE_RESET_VADDR) +#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS) + +#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \ + VECBASE_RESET_VADDR) +#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS) + +#if XCHAL_HAVE_VECBASE + +#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) +#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS) +#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS) +#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS) +#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS) +#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS) +#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS) +#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS) +#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS) + +#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS) + +#undef XCHAL_NMI_VECTOR_VADDR +#define XCHAL_NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS) + +#undef XCHAL_INTLEVEL7_VECTOR_VADDR +#define XCHAL_INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS) + +/* + * These XCHAL_* #defines from varian/core.h + * are not valid to use with V3 MMU. Non-XCHAL + * constants are defined above and should be used. + */ +#undef XCHAL_VECBASE_RESET_VADDR +#undef XCHAL_RESET_VECTOR0_VADDR +#undef XCHAL_USER_VECTOR_VADDR +#undef XCHAL_KERNEL_VECTOR_VADDR +#undef XCHAL_DOUBLEEXC_VECTOR_VADDR +#undef XCHAL_WINDOW_VECTORS_VADDR +#undef XCHAL_INTLEVEL2_VECTOR_VADDR +#undef XCHAL_INTLEVEL3_VECTOR_VADDR +#undef XCHAL_INTLEVEL4_VECTOR_VADDR +#undef XCHAL_INTLEVEL5_VECTOR_VADDR +#undef XCHAL_INTLEVEL6_VECTOR_VADDR +#undef XCHAL_DEBUG_VECTOR_VADDR +#undef XCHAL_NMI_VECTOR_VADDR +#undef XCHAL_INTLEVEL7_VECTOR_VADDR + +#else + +#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR +#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR +#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR +#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR +#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR +#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR +#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR +#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR +#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR +#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR + +#endif + +#endif /* _XTENSA_VECTORS_H */ diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index c3a59d9..1e7fc87 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile @@ -4,14 +4,16 @@ extra-y := head.o vmlinux.lds -obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \ - setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ - pci-dma.o +obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \ + ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \ + vectors.o obj-$(CONFIG_KGDB) += xtensa-stub.o obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o +AFLAGS_head.o += -mtext-section-literals + # In the Xtensa architecture, assembly generates literals which must always # precede the L32R instruction with a relative offset less than 256 kB. # Therefore, the .text and .literal section must be combined in parenthesis diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 63845f9..5082507 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -354,16 +354,16 @@ common_exception: * so we can allow exceptions and interrupts (*) again. * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) * - * (*) We only allow interrupts of higher priority than current IRQ + * (*) We only allow interrupts if they were previously enabled and + * we're not handling an IRQ */ rsr a3, ps - addi a0, a0, -4 - movi a2, 1 + addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT + movi a2, LOCKLEVEL extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH # a3 = PS.INTLEVEL - movnez a2, a3, a3 # a2 = 1: level-1, > 1: high priority - moveqz a3, a2, a0 # a3 = IRQ level iff interrupt + moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt movi a2, 1 << PS_WOE_BIT or a3, a3, a2 rsr a0, exccause @@ -389,6 +389,22 @@ common_exception: save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT +#ifdef CONFIG_TRACE_IRQFLAGS + l32i a4, a1, PT_DEPC + /* Double exception means we came here with an exception + * while PS.EXCM was set, i.e. interrupts disabled. + */ + bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f + l32i a4, a1, PT_EXCCAUSE + bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f + /* We came here with an interrupt means interrupts were enabled + * and we've just disabled them. + */ + movi a4, trace_hardirqs_off + callx4 a4 +1: +#endif + /* Go to second-level dispatcher. Set up parameters to pass to the * exception handler and call the exception handler. */ @@ -407,11 +423,29 @@ common_exception: .global common_exception_return common_exception_return: +#ifdef CONFIG_TRACE_IRQFLAGS + l32i a4, a1, PT_DEPC + /* Double exception means we came here with an exception + * while PS.EXCM was set, i.e. interrupts disabled. + */ + bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f + l32i a4, a1, PT_EXCCAUSE + bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f + /* We came here with an interrupt means interrupts were enabled + * and we'll reenable them on return. + */ + movi a4, trace_hardirqs_on + callx4 a4 +1: +#endif + /* Jump if we are returning from kernel exceptions. */ 1: l32i a3, a1, PT_PS _bbci.l a3, PS_UM_BIT, 4f + rsil a2, 0 + /* Specific to a user exception exit: * We need to check some flags for signal handling and rescheduling, * and have to restore WB and WS, extra states, and all registers @@ -652,51 +686,19 @@ common_exception_exit: l32i a0, a1, PT_DEPC l32i a3, a1, PT_AREG3 - _bltui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f - - wsr a0, depc l32i a2, a1, PT_AREG2 - l32i a0, a1, PT_AREG0 - l32i a1, a1, PT_AREG1 - rfde + _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f -1: /* Restore a0...a3 and return */ - rsr a0, ps - extui a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH - movi a0, 2f - slli a2, a2, 4 - add a0, a2, a0 - l32i a2, a1, PT_AREG2 - jx a0 - - .macro irq_exit_level level - .align 16 - .if XCHAL_EXCM_LEVEL >= \level - l32i a0, a1, PT_PC - wsr a0, epc\level l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 - rfi \level - .endif - .endm + rfe - .align 16 -2: +1: wsr a0, depc l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 - rfe - - .align 16 - /* no rfi for level-1 irq, handled by rfe above*/ - nop - - irq_exit_level 2 - irq_exit_level 3 - irq_exit_level 4 - irq_exit_level 5 - irq_exit_level 6 + rfde ENDPROC(kernel_exception) diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index df88f98..ef12c0e 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -48,17 +48,36 @@ */ __HEAD + .begin no-absolute-literals + ENTRY(_start) - _j 2f + /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ + wsr a2, excsave1 + _j _SetupMMU + + .align 4 + .literal_position +.Lstartup: + .word _startup + .align 4 -1: .word _startup -2: l32r a0, 1b + .global _SetupMMU +_SetupMMU: + Offset = _SetupMMU - _start + +#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + initialize_mmu +#endif + .end no-absolute-literals + + l32r a0, .Lstartup jx a0 ENDPROC(_start) - .section .init.text, "ax" + __INIT + .literal_position ENTRY(_startup) @@ -67,10 +86,6 @@ ENTRY(_startup) movi a0, LOCKLEVEL wsr a0, ps - /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ - - wsr a2, excsave1 - /* Start with a fresh windowbase and windowstart. */ movi a1, 1 @@ -86,7 +101,9 @@ ENTRY(_startup) /* Clear debugging registers. */ #if XCHAL_HAVE_DEBUG +#if XCHAL_NUM_IBREAK > 0 wsr a0, ibreakenable +#endif wsr a0, icount movi a1, 15 wsr a0, icountlevel @@ -156,8 +173,6 @@ ENTRY(_startup) isync - initialize_mmu - /* Unpack data sections * * The linker script used to build the Linux kernel image @@ -205,6 +220,10 @@ ENTRY(_startup) ___flush_dcache_all a2 a3 #endif + memw + isync + ___invalidate_icache_all a2 a3 + isync /* Setup stack and enable window exceptions (keep irqs disabled) */ diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c index 44bf21c..2bd6c35 100644 --- a/arch/xtensa/kernel/platform.c +++ b/arch/xtensa/kernel/platform.c @@ -36,6 +36,7 @@ _F(void, power_off, (void), { while(1); }); _F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); }); _F(void, heartbeat, (void), { }); _F(int, pcibios_fixup, (void), { return 0; }); +_F(void, pcibios_init, (void), { }); #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT _F(void, calibrate_ccount, (void), diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c new file mode 100644 index 0000000..7d2c317 --- /dev/null +++ b/arch/xtensa/kernel/stacktrace.c @@ -0,0 +1,120 @@ +/* + * arch/xtensa/kernel/stacktrace.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2013 Tensilica Inc. + */ +#include +#include +#include + +#include +#include + +void walk_stackframe(unsigned long *sp, + int (*fn)(struct stackframe *frame, void *data), + void *data) +{ + unsigned long a0, a1; + unsigned long sp_end; + + a1 = (unsigned long)sp; + sp_end = ALIGN(a1, THREAD_SIZE); + + spill_registers(); + + while (a1 < sp_end) { + struct stackframe frame; + + sp = (unsigned long *)a1; + + a0 = *(sp - 4); + a1 = *(sp - 3); + + if (a1 <= (unsigned long)sp) + break; + + frame.pc = MAKE_PC_FROM_RA(a0, a1); + frame.sp = a1; + + if (fn(&frame, data)) + return; + } +} + +#ifdef CONFIG_STACKTRACE + +struct stack_trace_data { + struct stack_trace *trace; + unsigned skip; +}; + +static int stack_trace_cb(struct stackframe *frame, void *data) +{ + struct stack_trace_data *trace_data = data; + struct stack_trace *trace = trace_data->trace; + + if (trace_data->skip) { + --trace_data->skip; + return 0; + } + if (!kernel_text_address(frame->pc)) + return 0; + + trace->entries[trace->nr_entries++] = frame->pc; + return trace->nr_entries >= trace->max_entries; +} + +void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) +{ + struct stack_trace_data trace_data = { + .trace = trace, + .skip = trace->skip, + }; + walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + save_stack_trace_tsk(current, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +#endif + +#ifdef CONFIG_FRAME_POINTER + +struct return_addr_data { + unsigned long addr; + unsigned skip; +}; + +static int return_address_cb(struct stackframe *frame, void *data) +{ + struct return_addr_data *r = data; + + if (r->skip) { + --r->skip; + return 0; + } + if (!kernel_text_address(frame->pc)) + return 0; + r->addr = frame->pc; + return 1; +} + +unsigned long return_address(unsigned level) +{ + struct return_addr_data r = { + .skip = level + 1, + }; + walk_stackframe(stack_pointer(NULL), return_address_cb, &r); + return r.addr; +} +EXPORT_SYMBOL(return_address); + +#endif diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 458186d..3e8a05c 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -11,7 +11,7 @@ * * Essentially rewritten for the Xtensa architecture port. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2013 Tensilica Inc. * * Joe Taylor * Chris Zankel @@ -32,6 +32,7 @@ #include #include +#include #include #include #include @@ -195,7 +196,6 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) /* * IRQ handler. - * PS.INTLEVEL is the current IRQ priority level. */ extern void do_IRQ(int, struct pt_regs *); @@ -212,18 +212,21 @@ void do_interrupt(struct pt_regs *regs) XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL7_MASK, }; - unsigned level = get_sr(ps) & PS_INTLEVEL_MASK; - - if (WARN_ON_ONCE(level >= ARRAY_SIZE(int_level_mask))) - return; for (;;) { unsigned intread = get_sr(interrupt); unsigned intenable = get_sr(intenable); - unsigned int_at_level = intread & intenable & - int_level_mask[level]; + unsigned int_at_level = intread & intenable; + unsigned level; + + for (level = LOCKLEVEL; level > 0; --level) { + if (int_at_level & int_level_mask[level]) { + int_at_level &= int_level_mask[level]; + break; + } + } - if (!int_at_level) + if (level == 0) return; /* @@ -404,53 +407,25 @@ void show_regs(struct pt_regs * regs) regs->syscall); } -static __always_inline unsigned long *stack_pointer(struct task_struct *task) +static int show_trace_cb(struct stackframe *frame, void *data) { - unsigned long *sp; - - if (!task || task == current) - __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp)); - else - sp = (unsigned long *)task->thread.sp; - - return sp; + if (kernel_text_address(frame->pc)) { + printk(" [<%08lx>] ", frame->pc); + print_symbol("%s\n", frame->pc); + } + return 0; } void show_trace(struct task_struct *task, unsigned long *sp) { - unsigned long a0, a1, pc; - unsigned long sp_start, sp_end; - - if (sp) - a1 = (unsigned long)sp; - else - a1 = (unsigned long)stack_pointer(task); - - sp_start = a1 & ~(THREAD_SIZE-1); - sp_end = sp_start + THREAD_SIZE; + if (!sp) + sp = stack_pointer(task); printk("Call Trace:"); #ifdef CONFIG_KALLSYMS printk("\n"); #endif - spill_registers(); - - while (a1 > sp_start && a1 < sp_end) { - sp = (unsigned long*)a1; - - a0 = *(sp - 4); - a1 = *(sp - 3); - - if (a1 <= (unsigned long) sp) - break; - - pc = MAKE_PC_FROM_RA(a0, a1); - - if (kernel_text_address(pc)) { - printk(" [<%08lx>] ", pc); - print_symbol("%s\n", pc); - } - } + walk_stackframe(sp, show_trace_cb, NULL); printk("\n"); } diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 82109b42..f9e1753 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -50,6 +50,7 @@ #include #include #include +#include #define WINDOW_VECTORS_SIZE 0x180 @@ -220,7 +221,7 @@ ENTRY(_DoubleExceptionVector) xsr a0, depc # get DEPC, save a0 - movi a3, XCHAL_WINDOW_VECTORS_VADDR + movi a3, WINDOW_VECTORS_VADDR _bltu a0, a3, .Lfixup addi a3, a3, WINDOW_VECTORS_SIZE _bgeu a0, a3, .Lfixup @@ -385,9 +386,12 @@ ENDPROC(_DebugInterruptVector) .if XCHAL_EXCM_LEVEL >= \level .section .Level\level\()InterruptVector.text, "ax" ENTRY(_Level\level\()InterruptVector) - wsr a0, epc1 + wsr a0, excsave2 rsr a0, epc\level - xsr a0, epc1 + wsr a0, epc1 + movi a0, EXCCAUSE_LEVEL1_INTERRUPT + wsr a0, exccause + rsr a0, eps\level # branch to user or kernel vector j _SimulateUserKernelVectorException .endif @@ -439,10 +443,8 @@ ENDPROC(_WindowOverflow4) */ .align 4 _SimulateUserKernelVectorException: - wsr a0, excsave2 - movi a0, 4 # LEVEL1_INTERRUPT cause - wsr a0, exccause - rsr a0, ps + addi a0, a0, (1 << PS_EXCM_BIT) + wsr a0, ps bbsi.l a0, PS_UM_BIT, 1f # branch if user mode rsr a0, excsave2 # restore a0 j _KernelExceptionVector # simulate kernel vector exception diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 1469524..21acd11 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -18,6 +18,7 @@ #include #include +#include #include #include OUTPUT_ARCH(xtensa) @@ -30,7 +31,7 @@ jiffies = jiffies_64; #endif #ifndef KERNELOFFSET -#define KERNELOFFSET 0xd0001000 +#define KERNELOFFSET 0xd0003000 #endif /* Note: In the following macros, it would be nice to specify only the @@ -185,16 +186,16 @@ SECTIONS SECTION_VECTOR (_WindowVectors_text, .WindowVectors.text, - XCHAL_WINDOW_VECTORS_VADDR, 4, + WINDOW_VECTORS_VADDR, 4, .dummy) SECTION_VECTOR (_DebugInterruptVector_literal, .DebugInterruptVector.literal, - XCHAL_DEBUG_VECTOR_VADDR - 4, + DEBUG_VECTOR_VADDR - 4, SIZEOF(.WindowVectors.text), .WindowVectors.text) SECTION_VECTOR (_DebugInterruptVector_text, .DebugInterruptVector.text, - XCHAL_DEBUG_VECTOR_VADDR, + DEBUG_VECTOR_VADDR, 4, .DebugInterruptVector.literal) #undef LAST @@ -202,7 +203,7 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 2 SECTION_VECTOR (_Level2InterruptVector_text, .Level2InterruptVector.text, - XCHAL_INTLEVEL2_VECTOR_VADDR, + INTLEVEL2_VECTOR_VADDR, SIZEOF(LAST), LAST) # undef LAST # define LAST .Level2InterruptVector.text @@ -210,7 +211,7 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 3 SECTION_VECTOR (_Level3InterruptVector_text, .Level3InterruptVector.text, - XCHAL_INTLEVEL3_VECTOR_VADDR, + INTLEVEL3_VECTOR_VADDR, SIZEOF(LAST), LAST) # undef LAST # define LAST .Level3InterruptVector.text @@ -218,7 +219,7 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 4 SECTION_VECTOR (_Level4InterruptVector_text, .Level4InterruptVector.text, - XCHAL_INTLEVEL4_VECTOR_VADDR, + INTLEVEL4_VECTOR_VADDR, SIZEOF(LAST), LAST) # undef LAST # define LAST .Level4InterruptVector.text @@ -226,7 +227,7 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 5 SECTION_VECTOR (_Level5InterruptVector_text, .Level5InterruptVector.text, - XCHAL_INTLEVEL5_VECTOR_VADDR, + INTLEVEL5_VECTOR_VADDR, SIZEOF(LAST), LAST) # undef LAST # define LAST .Level5InterruptVector.text @@ -234,39 +235,39 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 6 SECTION_VECTOR (_Level6InterruptVector_text, .Level6InterruptVector.text, - XCHAL_INTLEVEL6_VECTOR_VADDR, + INTLEVEL6_VECTOR_VADDR, SIZEOF(LAST), LAST) # undef LAST # define LAST .Level6InterruptVector.text #endif SECTION_VECTOR (_KernelExceptionVector_literal, .KernelExceptionVector.literal, - XCHAL_KERNEL_VECTOR_VADDR - 4, + KERNEL_VECTOR_VADDR - 4, SIZEOF(LAST), LAST) #undef LAST SECTION_VECTOR (_KernelExceptionVector_text, .KernelExceptionVector.text, - XCHAL_KERNEL_VECTOR_VADDR, + KERNEL_VECTOR_VADDR, 4, .KernelExceptionVector.literal) SECTION_VECTOR (_UserExceptionVector_literal, .UserExceptionVector.literal, - XCHAL_USER_VECTOR_VADDR - 4, + USER_VECTOR_VADDR - 4, SIZEOF(.KernelExceptionVector.text), .KernelExceptionVector.text) SECTION_VECTOR (_UserExceptionVector_text, .UserExceptionVector.text, - XCHAL_USER_VECTOR_VADDR, + USER_VECTOR_VADDR, 4, .UserExceptionVector.literal) SECTION_VECTOR (_DoubleExceptionVector_literal, .DoubleExceptionVector.literal, - XCHAL_DOUBLEEXC_VECTOR_VADDR - 16, + DOUBLEEXC_VECTOR_VADDR - 16, SIZEOF(.UserExceptionVector.text), .UserExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, - XCHAL_DOUBLEEXC_VECTOR_VADDR, + DOUBLEEXC_VECTOR_VADDR, 32, .DoubleExceptionVector.literal) @@ -284,11 +285,26 @@ SECTIONS . = ALIGN(0x10); .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) } - .ResetVector.text XCHAL_RESET_VECTOR_VADDR : + .ResetVector.text RESET_VECTOR_VADDR : { *(.ResetVector.text) } + + /* + * This is a remapped copy of the Secondary Reset Vector Code. + * It keeps gdb in sync with the PC after switching + * to the temporary mapping used while setting up + * the V2 MMU mappings for Linux. + * + * Only debug information about this section is put in the kernel image. + */ + .SecondaryResetVector.remapped_text 0x46000000 (INFO): + { + *(.SecondaryResetVector.remapped_text) + } + + .xt.lit : { *(.xt.lit) } .xt.prop : { *(.xt.prop) } diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index afe058b..42c53c87 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -119,3 +119,8 @@ EXPORT_SYMBOL(outsl); EXPORT_SYMBOL(insb); EXPORT_SYMBOL(insw); EXPORT_SYMBOL(insl); + +extern long common_exception_return; +extern long _spill_registers; +EXPORT_SYMBOL(common_exception_return); +EXPORT_SYMBOL(_spill_registers); diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 0f77f9d..a107757 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -24,15 +24,19 @@ void __init paging_init(void) */ void __init init_mmu(void) { - /* Writing zeros to the TLBCFG special registers ensure - * that valid values exist in the register. For existing - * PGSZID fields, zero selects the first element of the - * page-size array. For nonexistent PGSZID fields, zero is - * the best value to write. Also, when changing PGSZID +#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) + /* + * Writing zeros to the instruction and data TLBCFG special + * registers ensure that valid values exist in the register. + * + * For existing PGSZID fields, zero selects the first element + * of the page-size array. For nonexistent PGSZID fields, + * zero is the best value to write. Also, when changing PGSZID * fields, the corresponding TLB must be flushed. */ set_itlbcfg_register(0); set_dtlbcfg_register(0); +#endif flush_tlb_all(); /* Set rasid register to a known value. */ diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c index 66f32ee..5f03a59 100644 --- a/arch/xtensa/oprofile/backtrace.c +++ b/arch/xtensa/oprofile/backtrace.c @@ -132,9 +132,7 @@ static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth) pc = MAKE_PC_FROM_RA(a0, pc); /* Add the PC to the trace. */ - if (kernel_text_address(pc)) - oprofile_add_trace(pc); - + oprofile_add_trace(pc); if (pc == (unsigned long) &common_exception_return) { regs = (struct pt_regs *)a1; if (user_mode(regs)) { diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c index da9866f..70cb408 100644 --- a/arch/xtensa/platforms/iss/console.c +++ b/arch/xtensa/platforms/iss/console.c @@ -56,13 +56,13 @@ static void rs_poll(unsigned long); static int rs_open(struct tty_struct *tty, struct file * filp) { tty->port = &serial_port; - spin_lock(&timer_lock); + spin_lock_bh(&timer_lock); if (tty->count == 1) { setup_timer(&serial_timer, rs_poll, (unsigned long)&serial_port); mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); } - spin_unlock(&timer_lock); + spin_unlock_bh(&timer_lock); return 0; } @@ -99,14 +99,13 @@ static int rs_write(struct tty_struct * tty, static void rs_poll(unsigned long priv) { struct tty_port *port = (struct tty_port *)priv; - struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; int i = 0; unsigned char c; spin_lock(&timer_lock); - while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){ - __simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0); + while (simc_poll(0)) { + simc_read(0, &c, 1); tty_insert_flip_char(port, c, TTY_NORMAL); i++; } @@ -244,8 +243,7 @@ static void iss_console_write(struct console *co, const char *s, unsigned count) int len = strlen(s); if (s != 0 && *s != 0) - __simc (SYS_write, 1, (unsigned long)s, - count < len ? count : len,0,0); + simc_write(1, s, count < len ? count : len); } static struct tty_driver* iss_console_device(struct console *c, int *index) diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h index b5a4edf..12b15ad 100644 --- a/arch/xtensa/platforms/iss/include/platform/simcall.h +++ b/arch/xtensa/platforms/iss/include/platform/simcall.h @@ -59,56 +59,58 @@ static int errno; -static inline int __simc(int a, int b, int c, int d, int e, int f) +static inline int __simc(int a, int b, int c, int d) { int ret; register int a1 asm("a2") = a; register int b1 asm("a3") = b; register int c1 asm("a4") = c; register int d1 asm("a5") = d; - register int e1 asm("a6") = e; - register int f1 asm("a7") = f; __asm__ __volatile__ ( "simcall\n" "mov %0, a2\n" "mov %1, a3\n" : "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1) - : "r"(c1), "r"(d1), "r"(e1), "r"(f1) + : "r"(c1), "r"(d1) : "memory"); return ret; } static inline int simc_open(const char *file, int flags, int mode) { - return __simc(SYS_open, (int) file, flags, mode, 0, 0); + return __simc(SYS_open, (int) file, flags, mode); } static inline int simc_close(int fd) { - return __simc(SYS_close, fd, 0, 0, 0, 0); + return __simc(SYS_close, fd, 0, 0); } static inline int simc_ioctl(int fd, int request, void *arg) { - return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0); + return __simc(SYS_ioctl, fd, request, (int) arg); } static inline int simc_read(int fd, void *buf, size_t count) { - return __simc(SYS_read, fd, (int) buf, count, 0, 0); + return __simc(SYS_read, fd, (int) buf, count); } static inline int simc_write(int fd, const void *buf, size_t count) { - return __simc(SYS_write, fd, (int) buf, count, 0, 0); + return __simc(SYS_write, fd, (int) buf, count); } static inline int simc_poll(int fd) { struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; - return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv, - 0, 0); + return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv); +} + +static inline int simc_lseek(int fd, uint32_t off, int whence) +{ + return __simc(SYS_lseek, fd, off, whence); } #endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */ diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c index e170010..da7d182 100644 --- a/arch/xtensa/platforms/iss/setup.c +++ b/arch/xtensa/platforms/iss/setup.c @@ -38,12 +38,6 @@ void __init platform_init(bp_tag_t* bootparam) } -#ifdef CONFIG_PCI -void platform_pcibios_init(void) -{ -} -#endif - void platform_halt(void) { pr_info(" ** Called platform_halt() **\n"); @@ -64,7 +58,9 @@ void platform_restart(void) "wsr a2, icountlevel\n\t" "movi a2, 0\n\t" "wsr a2, icount\n\t" +#if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" +#endif "wsr a2, lcount\n\t" "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index 0345f43..c0edb35 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -85,7 +85,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector, while (nbytes > 0) { unsigned long io; - __simc(SYS_lseek, dev->fd, offset, SEEK_SET, 0, 0); + simc_lseek(dev->fd, offset, SEEK_SET); if (write) io = simc_write(dev->fd, buffer, nbytes); else @@ -176,7 +176,7 @@ static int simdisk_attach(struct simdisk *dev, const char *filename) err = -ENODEV; goto out; } - dev->size = __simc(SYS_lseek, dev->fd, 0, SEEK_END, 0, 0); + dev->size = simc_lseek(dev->fd, 0, SEEK_END); set_capacity(dev->gd, dev->size >> SECTOR_SHIFT); dev->filename = filename; pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename); @@ -217,7 +217,7 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct simdisk *dev = PDE_DATA(file_inode(file)); - char *s = dev->filename; + const char *s = dev->filename; if (s) { ssize_t n = simple_read_from_buffer(buf, size, ppos, s, strlen(s)); @@ -238,7 +238,7 @@ static ssize_t proc_write_simdisk(struct file *file, const char __user *buf, if (tmp == NULL) return -ENOMEM; - if (copy_from_user(tmp, buffer, count)) { + if (copy_from_user(tmp, buf, count)) { err = -EFAULT; goto out_free; } diff --git a/arch/xtensa/platforms/xt2000/setup.c b/arch/xtensa/platforms/xt2000/setup.c index c7d90f1..f9bc879 100644 --- a/arch/xtensa/platforms/xt2000/setup.c +++ b/arch/xtensa/platforms/xt2000/setup.c @@ -69,7 +69,9 @@ void platform_restart(void) "wsr a2, icountlevel\n\t" "movi a2, 0\n\t" "wsr a2, icount\n\t" +#if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" +#endif "wsr a2, lcount\n\t" "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 9d888a2..96ef8ee 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c @@ -60,7 +60,9 @@ void platform_restart(void) "wsr a2, icountlevel\n\t" "movi a2, 0\n\t" "wsr a2, icount\n\t" +#if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" +#endif "wsr a2, lcount\n\t" "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index ec7f569..c84ee95 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -720,7 +720,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc, if ((obj_desc->common_field.start_field_bit_offset == 0) && (obj_desc->common_field.bit_length == access_bit_width)) { - status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ); + if (buffer_length >= sizeof(u64)) { + status = + acpi_ex_field_datum_io(obj_desc, 0, buffer, + ACPI_READ); + } else { + /* Use raw_datum (u64) to handle buffers < 64 bits */ + + status = + acpi_ex_field_datum_io(obj_desc, 0, &raw_datum, + ACPI_READ); + ACPI_MEMCPY(buffer, &raw_datum, buffer_length); + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 2a431ec..46f0f83 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -558,6 +558,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle, ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI)); + ACPI_MEMSET(info, 0, sizeof(struct acpi_evaluate_info)); info->prefix_node = device_node; info->pathname = METHOD_NAME__INI; info->parameters = NULL; diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index b15aceb..7e80772 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -349,7 +349,8 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state) return_value = 0; status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { - return (status); + acpi_ut_remove_reference(return_desc); + return_ACPI_STATUS(status); } /* Lookup the interface in the global _OSI list */ diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 9a7f0e3..11115bb 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -21,7 +21,7 @@ #include #include -static const char *part_probes[] = { "bcm47xxpart", NULL }; +static const char * const part_probes[] = { "bcm47xxpart", NULL }; static struct physmap_flash_data bcma_pflash_data = { .part_probe_types = part_probes, diff --git a/drivers/block/Makefile b/drivers/block/Makefile index a3b4023..ca07399 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -42,4 +42,5 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ +nvme-y := nvme-core.o nvme-scsi.o swim_mod-y := swim.o swim_asm.o diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c new file mode 100644 index 0000000..8efdfaa --- /dev/null +++ b/drivers/block/nvme-core.c @@ -0,0 +1,2073 @@ +/* + * NVM Express device driver + * Copyright (c) 2011, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NVME_Q_DEPTH 1024 +#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) +#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) +#define NVME_MINORS 64 +#define ADMIN_TIMEOUT (60 * HZ) + +static int nvme_major; +module_param(nvme_major, int, 0); + +static int use_threaded_interrupts; +module_param(use_threaded_interrupts, int, 0); + +static DEFINE_SPINLOCK(dev_list_lock); +static LIST_HEAD(dev_list); +static struct task_struct *nvme_thread; + +/* + * An NVM Express queue. Each device has at least two (one for admin + * commands and one for I/O commands). + */ +struct nvme_queue { + struct device *q_dmadev; + struct nvme_dev *dev; + spinlock_t q_lock; + struct nvme_command *sq_cmds; + volatile struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + wait_queue_head_t sq_full; + wait_queue_t sq_cong_wait; + struct bio_list sq_cong; + u32 __iomem *q_db; + u16 q_depth; + u16 cq_vector; + u16 sq_head; + u16 sq_tail; + u16 cq_head; + u16 cq_phase; + unsigned long cmdid_data[]; +}; + +/* + * Check we didin't inadvertently grow the command struct + */ +static inline void _nvme_check_size(void) +{ + BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); + BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); + BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); + BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); + BUILD_BUG_ON(sizeof(struct nvme_features) != 64); + BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); + BUILD_BUG_ON(sizeof(struct nvme_command) != 64); + BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); + BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); + BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); + BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); +} + +typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, + struct nvme_completion *); + +struct nvme_cmd_info { + nvme_completion_fn fn; + void *ctx; + unsigned long timeout; +}; + +static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) +{ + return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; +} + +/** + * alloc_cmdid() - Allocate a Command ID + * @nvmeq: The queue that will be used for this command + * @ctx: A pointer that will be passed to the handler + * @handler: The function to call on completion + * + * Allocate a Command ID for a queue. The data passed in will + * be passed to the completion handler. This is implemented by using + * the bottom two bits of the ctx pointer to store the handler ID. + * Passing in a pointer that's not 4-byte aligned will cause a BUG. + * We can change this if it becomes a problem. + * + * May be called with local interrupts disabled and the q_lock held, + * or with interrupts enabled and no locks held. + */ +static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, + nvme_completion_fn handler, unsigned timeout) +{ + int depth = nvmeq->q_depth - 1; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + int cmdid; + + do { + cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); + if (cmdid >= depth) + return -EBUSY; + } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); + + info[cmdid].fn = handler; + info[cmdid].ctx = ctx; + info[cmdid].timeout = jiffies + timeout; + return cmdid; +} + +static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, + nvme_completion_fn handler, unsigned timeout) +{ + int cmdid; + wait_event_killable(nvmeq->sq_full, + (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); + return (cmdid < 0) ? -EINTR : cmdid; +} + +/* Special values must be less than 0x1000 */ +#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA) +#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) +#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) +#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) +#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) + +static void special_completion(struct nvme_dev *dev, void *ctx, + struct nvme_completion *cqe) +{ + if (ctx == CMD_CTX_CANCELLED) + return; + if (ctx == CMD_CTX_FLUSH) + return; + if (ctx == CMD_CTX_COMPLETED) { + dev_warn(&dev->pci_dev->dev, + "completed id %d twice on queue %d\n", + cqe->command_id, le16_to_cpup(&cqe->sq_id)); + return; + } + if (ctx == CMD_CTX_INVALID) { + dev_warn(&dev->pci_dev->dev, + "invalid id %d completed on queue %d\n", + cqe->command_id, le16_to_cpup(&cqe->sq_id)); + return; + } + + dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); +} + +/* + * Called with local interrupts disabled and the q_lock held. May not sleep. + */ +static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, + nvme_completion_fn *fn) +{ + void *ctx; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + + if (cmdid >= nvmeq->q_depth) { + *fn = special_completion; + return CMD_CTX_INVALID; + } + if (fn) + *fn = info[cmdid].fn; + ctx = info[cmdid].ctx; + info[cmdid].fn = special_completion; + info[cmdid].ctx = CMD_CTX_COMPLETED; + clear_bit(cmdid, nvmeq->cmdid_data); + wake_up(&nvmeq->sq_full); + return ctx; +} + +static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, + nvme_completion_fn *fn) +{ + void *ctx; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + if (fn) + *fn = info[cmdid].fn; + ctx = info[cmdid].ctx; + info[cmdid].fn = special_completion; + info[cmdid].ctx = CMD_CTX_CANCELLED; + return ctx; +} + +struct nvme_queue *get_nvmeq(struct nvme_dev *dev) +{ + return dev->queues[get_cpu() + 1]; +} + +void put_nvmeq(struct nvme_queue *nvmeq) +{ + put_cpu(); +} + +/** + * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell + * @nvmeq: The queue to use + * @cmd: The command to send + * + * Safe to use from interrupt context + */ +static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) +{ + unsigned long flags; + u16 tail; + spin_lock_irqsave(&nvmeq->q_lock, flags); + tail = nvmeq->sq_tail; + memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); + if (++tail == nvmeq->q_depth) + tail = 0; + writel(tail, nvmeq->q_db); + nvmeq->sq_tail = tail; + spin_unlock_irqrestore(&nvmeq->q_lock, flags); + + return 0; +} + +static __le64 **iod_list(struct nvme_iod *iod) +{ + return ((void *)iod) + iod->offset; +} + +/* + * Will slightly overestimate the number of pages needed. This is OK + * as it only leads to a small amount of wasted memory for the lifetime of + * the I/O. + */ +static int nvme_npages(unsigned size) +{ + unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE); + return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); +} + +static struct nvme_iod * +nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) +{ + struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + + sizeof(__le64 *) * nvme_npages(nbytes) + + sizeof(struct scatterlist) * nseg, gfp); + + if (iod) { + iod->offset = offsetof(struct nvme_iod, sg[nseg]); + iod->npages = -1; + iod->length = nbytes; + iod->nents = 0; + } + + return iod; +} + +void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) +{ + const int last_prp = PAGE_SIZE / 8 - 1; + int i; + __le64 **list = iod_list(iod); + dma_addr_t prp_dma = iod->first_dma; + + if (iod->npages == 0) + dma_pool_free(dev->prp_small_pool, list[0], prp_dma); + for (i = 0; i < iod->npages; i++) { + __le64 *prp_list = list[i]; + dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); + dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); + prp_dma = next_prp_dma; + } + kfree(iod); +} + +static void bio_completion(struct nvme_dev *dev, void *ctx, + struct nvme_completion *cqe) +{ + struct nvme_iod *iod = ctx; + struct bio *bio = iod->private; + u16 status = le16_to_cpup(&cqe->status) >> 1; + + if (iod->nents) + dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, + bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + nvme_free_iod(dev, iod); + if (status) + bio_endio(bio, -EIO); + else + bio_endio(bio, 0); +} + +/* length is in bytes. gfp flags indicates whether we may sleep. */ +int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, + struct nvme_iod *iod, int total_len, gfp_t gfp) +{ + struct dma_pool *pool; + int length = total_len; + struct scatterlist *sg = iod->sg; + int dma_len = sg_dma_len(sg); + u64 dma_addr = sg_dma_address(sg); + int offset = offset_in_page(dma_addr); + __le64 *prp_list; + __le64 **list = iod_list(iod); + dma_addr_t prp_dma; + int nprps, i; + + cmd->prp1 = cpu_to_le64(dma_addr); + length -= (PAGE_SIZE - offset); + if (length <= 0) + return total_len; + + dma_len -= (PAGE_SIZE - offset); + if (dma_len) { + dma_addr += (PAGE_SIZE - offset); + } else { + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + if (length <= PAGE_SIZE) { + cmd->prp2 = cpu_to_le64(dma_addr); + return total_len; + } + + nprps = DIV_ROUND_UP(length, PAGE_SIZE); + if (nprps <= (256 / 8)) { + pool = dev->prp_small_pool; + iod->npages = 0; + } else { + pool = dev->prp_page_pool; + iod->npages = 1; + } + + prp_list = dma_pool_alloc(pool, gfp, &prp_dma); + if (!prp_list) { + cmd->prp2 = cpu_to_le64(dma_addr); + iod->npages = -1; + return (total_len - length) + PAGE_SIZE; + } + list[0] = prp_list; + iod->first_dma = prp_dma; + cmd->prp2 = cpu_to_le64(prp_dma); + i = 0; + for (;;) { + if (i == PAGE_SIZE / 8) { + __le64 *old_prp_list = prp_list; + prp_list = dma_pool_alloc(pool, gfp, &prp_dma); + if (!prp_list) + return total_len - length; + list[iod->npages++] = prp_list; + prp_list[0] = old_prp_list[i - 1]; + old_prp_list[i - 1] = cpu_to_le64(prp_dma); + i = 1; + } + prp_list[i++] = cpu_to_le64(dma_addr); + dma_len -= PAGE_SIZE; + dma_addr += PAGE_SIZE; + length -= PAGE_SIZE; + if (length <= 0) + break; + if (dma_len > 0) + continue; + BUG_ON(dma_len < 0); + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + return total_len; +} + +struct nvme_bio_pair { + struct bio b1, b2, *parent; + struct bio_vec *bv1, *bv2; + int err; + atomic_t cnt; +}; + +static void nvme_bio_pair_endio(struct bio *bio, int err) +{ + struct nvme_bio_pair *bp = bio->bi_private; + + if (err) + bp->err = err; + + if (atomic_dec_and_test(&bp->cnt)) { + bio_endio(bp->parent, bp->err); + if (bp->bv1) + kfree(bp->bv1); + if (bp->bv2) + kfree(bp->bv2); + kfree(bp); + } +} + +static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx, + int len, int offset) +{ + struct nvme_bio_pair *bp; + + BUG_ON(len > bio->bi_size); + BUG_ON(idx > bio->bi_vcnt); + + bp = kmalloc(sizeof(*bp), GFP_ATOMIC); + if (!bp) + return NULL; + bp->err = 0; + + bp->b1 = *bio; + bp->b2 = *bio; + + bp->b1.bi_size = len; + bp->b2.bi_size -= len; + bp->b1.bi_vcnt = idx; + bp->b2.bi_idx = idx; + bp->b2.bi_sector += len >> 9; + + if (offset) { + bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), + GFP_ATOMIC); + if (!bp->bv1) + goto split_fail_1; + + bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), + GFP_ATOMIC); + if (!bp->bv2) + goto split_fail_2; + + memcpy(bp->bv1, bio->bi_io_vec, + bio->bi_max_vecs * sizeof(struct bio_vec)); + memcpy(bp->bv2, bio->bi_io_vec, + bio->bi_max_vecs * sizeof(struct bio_vec)); + + bp->b1.bi_io_vec = bp->bv1; + bp->b2.bi_io_vec = bp->bv2; + bp->b2.bi_io_vec[idx].bv_offset += offset; + bp->b2.bi_io_vec[idx].bv_len -= offset; + bp->b1.bi_io_vec[idx].bv_len = offset; + bp->b1.bi_vcnt++; + } else + bp->bv1 = bp->bv2 = NULL; + + bp->b1.bi_private = bp; + bp->b2.bi_private = bp; + + bp->b1.bi_end_io = nvme_bio_pair_endio; + bp->b2.bi_end_io = nvme_bio_pair_endio; + + bp->parent = bio; + atomic_set(&bp->cnt, 2); + + return bp; + + split_fail_2: + kfree(bp->bv1); + split_fail_1: + kfree(bp); + return NULL; +} + +static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, + int idx, int len, int offset) +{ + struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset); + if (!bp) + return -ENOMEM; + + if (bio_list_empty(&nvmeq->sq_cong)) + add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); + bio_list_add(&nvmeq->sq_cong, &bp->b1); + bio_list_add(&nvmeq->sq_cong, &bp->b2); + + return 0; +} + +/* NVMe scatterlists require no holes in the virtual address */ +#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ + (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) + +static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, + struct bio *bio, enum dma_data_direction dma_dir, int psegs) +{ + struct bio_vec *bvec, *bvprv = NULL; + struct scatterlist *sg = NULL; + int i, length = 0, nsegs = 0, split_len = bio->bi_size; + + if (nvmeq->dev->stripe_size) + split_len = nvmeq->dev->stripe_size - + ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); + + sg_init_table(iod->sg, psegs); + bio_for_each_segment(bvec, bio, i) { + if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { + sg->length += bvec->bv_len; + } else { + if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) + return nvme_split_and_submit(bio, nvmeq, i, + length, 0); + + sg = sg ? sg + 1 : iod->sg; + sg_set_page(sg, bvec->bv_page, bvec->bv_len, + bvec->bv_offset); + nsegs++; + } + + if (split_len - length < bvec->bv_len) + return nvme_split_and_submit(bio, nvmeq, i, split_len, + split_len - length); + length += bvec->bv_len; + bvprv = bvec; + } + iod->nents = nsegs; + sg_mark_end(sg); + if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) + return -ENOMEM; + + BUG_ON(length != bio->bi_size); + return length; +} + +/* + * We reuse the small pool to allocate the 16-byte range here as it is not + * worth having a special pool for these or additional cases to handle freeing + * the iod. + */ +static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, + struct bio *bio, struct nvme_iod *iod, int cmdid) +{ + struct nvme_dsm_range *range; + struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; + + range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC, + &iod->first_dma); + if (!range) + return -ENOMEM; + + iod_list(iod)[0] = (__le64 *)range; + iod->npages = 0; + + range->cattr = cpu_to_le32(0); + range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); + range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); + + memset(cmnd, 0, sizeof(*cmnd)); + cmnd->dsm.opcode = nvme_cmd_dsm; + cmnd->dsm.command_id = cmdid; + cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); + cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); + cmnd->dsm.nr = 0; + cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); + + if (++nvmeq->sq_tail == nvmeq->q_depth) + nvmeq->sq_tail = 0; + writel(nvmeq->sq_tail, nvmeq->q_db); + + return 0; +} + +static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, + int cmdid) +{ + struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; + + memset(cmnd, 0, sizeof(*cmnd)); + cmnd->common.opcode = nvme_cmd_flush; + cmnd->common.command_id = cmdid; + cmnd->common.nsid = cpu_to_le32(ns->ns_id); + + if (++nvmeq->sq_tail == nvmeq->q_depth) + nvmeq->sq_tail = 0; + writel(nvmeq->sq_tail, nvmeq->q_db); + + return 0; +} + +int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) +{ + int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, + special_completion, NVME_IO_TIMEOUT); + if (unlikely(cmdid < 0)) + return cmdid; + + return nvme_submit_flush(nvmeq, ns, cmdid); +} + +/* + * Called with local interrupts disabled and the q_lock held. May not sleep. + */ +static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, + struct bio *bio) +{ + struct nvme_command *cmnd; + struct nvme_iod *iod; + enum dma_data_direction dma_dir; + int cmdid, length, result = -ENOMEM; + u16 control; + u32 dsmgmt; + int psegs = bio_phys_segments(ns->queue, bio); + + if ((bio->bi_rw & REQ_FLUSH) && psegs) { + result = nvme_submit_flush_data(nvmeq, ns); + if (result) + return result; + } + + iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); + if (!iod) + goto nomem; + iod->private = bio; + + result = -EBUSY; + cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); + if (unlikely(cmdid < 0)) + goto free_iod; + + if (bio->bi_rw & REQ_DISCARD) { + result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); + if (result) + goto free_cmdid; + return result; + } + if ((bio->bi_rw & REQ_FLUSH) && !psegs) + return nvme_submit_flush(nvmeq, ns, cmdid); + + control = 0; + if (bio->bi_rw & REQ_FUA) + control |= NVME_RW_FUA; + if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) + control |= NVME_RW_LR; + + dsmgmt = 0; + if (bio->bi_rw & REQ_RAHEAD) + dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; + + cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; + + memset(cmnd, 0, sizeof(*cmnd)); + if (bio_data_dir(bio)) { + cmnd->rw.opcode = nvme_cmd_write; + dma_dir = DMA_TO_DEVICE; + } else { + cmnd->rw.opcode = nvme_cmd_read; + dma_dir = DMA_FROM_DEVICE; + } + + result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs); + if (result <= 0) + goto free_cmdid; + length = result; + + cmnd->rw.command_id = cmdid; + cmnd->rw.nsid = cpu_to_le32(ns->ns_id); + length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, + GFP_ATOMIC); + cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); + cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); + cmnd->rw.control = cpu_to_le16(control); + cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); + + if (++nvmeq->sq_tail == nvmeq->q_depth) + nvmeq->sq_tail = 0; + writel(nvmeq->sq_tail, nvmeq->q_db); + + return 0; + + free_cmdid: + free_cmdid(nvmeq, cmdid, NULL); + free_iod: + nvme_free_iod(nvmeq->dev, iod); + nomem: + return result; +} + +static void nvme_make_request(struct request_queue *q, struct bio *bio) +{ + struct nvme_ns *ns = q->queuedata; + struct nvme_queue *nvmeq = get_nvmeq(ns->dev); + int result = -EBUSY; + + spin_lock_irq(&nvmeq->q_lock); + if (bio_list_empty(&nvmeq->sq_cong)) + result = nvme_submit_bio_queue(nvmeq, ns, bio); + if (unlikely(result)) { + if (bio_list_empty(&nvmeq->sq_cong)) + add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); + bio_list_add(&nvmeq->sq_cong, bio); + } + + spin_unlock_irq(&nvmeq->q_lock); + put_nvmeq(nvmeq); +} + +static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) +{ + u16 head, phase; + + head = nvmeq->cq_head; + phase = nvmeq->cq_phase; + + for (;;) { + void *ctx; + nvme_completion_fn fn; + struct nvme_completion cqe = nvmeq->cqes[head]; + if ((le16_to_cpu(cqe.status) & 1) != phase) + break; + nvmeq->sq_head = le16_to_cpu(cqe.sq_head); + if (++head == nvmeq->q_depth) { + head = 0; + phase = !phase; + } + + ctx = free_cmdid(nvmeq, cqe.command_id, &fn); + fn(nvmeq->dev, ctx, &cqe); + } + + /* If the controller ignores the cq head doorbell and continuously + * writes to the queue, it is theoretically possible to wrap around + * the queue twice and mistakenly return IRQ_NONE. Linux only + * requires that 0.1% of your interrupts are handled, so this isn't + * a big problem. + */ + if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) + return IRQ_NONE; + + writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); + nvmeq->cq_head = head; + nvmeq->cq_phase = phase; + + return IRQ_HANDLED; +} + +static irqreturn_t nvme_irq(int irq, void *data) +{ + irqreturn_t result; + struct nvme_queue *nvmeq = data; + spin_lock(&nvmeq->q_lock); + result = nvme_process_cq(nvmeq); + spin_unlock(&nvmeq->q_lock); + return result; +} + +static irqreturn_t nvme_irq_check(int irq, void *data) +{ + struct nvme_queue *nvmeq = data; + struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; + if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) + return IRQ_NONE; + return IRQ_WAKE_THREAD; +} + +static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) +{ + spin_lock_irq(&nvmeq->q_lock); + cancel_cmdid(nvmeq, cmdid, NULL); + spin_unlock_irq(&nvmeq->q_lock); +} + +struct sync_cmd_info { + struct task_struct *task; + u32 result; + int status; +}; + +static void sync_completion(struct nvme_dev *dev, void *ctx, + struct nvme_completion *cqe) +{ + struct sync_cmd_info *cmdinfo = ctx; + cmdinfo->result = le32_to_cpup(&cqe->result); + cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; + wake_up_process(cmdinfo->task); +} + +/* + * Returns 0 on success. If the result is negative, it's a Linux error code; + * if the result is positive, it's an NVM Express status code + */ +int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, + u32 *result, unsigned timeout) +{ + int cmdid; + struct sync_cmd_info cmdinfo; + + cmdinfo.task = current; + cmdinfo.status = -EINTR; + + cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, + timeout); + if (cmdid < 0) + return cmdid; + cmd->common.command_id = cmdid; + + set_current_state(TASK_KILLABLE); + nvme_submit_cmd(nvmeq, cmd); + schedule_timeout(timeout); + + if (cmdinfo.status == -EINTR) { + nvme_abort_command(nvmeq, cmdid); + return -EINTR; + } + + if (result) + *result = cmdinfo.result; + + return cmdinfo.status; +} + +int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, + u32 *result) +{ + return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); +} + +static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) +{ + int status; + struct nvme_command c; + + memset(&c, 0, sizeof(c)); + c.delete_queue.opcode = opcode; + c.delete_queue.qid = cpu_to_le16(id); + + status = nvme_submit_admin_cmd(dev, &c, NULL); + if (status) + return -EIO; + return 0; +} + +static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, + struct nvme_queue *nvmeq) +{ + int status; + struct nvme_command c; + int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; + + memset(&c, 0, sizeof(c)); + c.create_cq.opcode = nvme_admin_create_cq; + c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); + c.create_cq.cqid = cpu_to_le16(qid); + c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); + c.create_cq.cq_flags = cpu_to_le16(flags); + c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); + + status = nvme_submit_admin_cmd(dev, &c, NULL); + if (status) + return -EIO; + return 0; +} + +static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, + struct nvme_queue *nvmeq) +{ + int status; + struct nvme_command c; + int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; + + memset(&c, 0, sizeof(c)); + c.create_sq.opcode = nvme_admin_create_sq; + c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); + c.create_sq.sqid = cpu_to_le16(qid); + c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); + c.create_sq.sq_flags = cpu_to_le16(flags); + c.create_sq.cqid = cpu_to_le16(qid); + + status = nvme_submit_admin_cmd(dev, &c, NULL); + if (status) + return -EIO; + return 0; +} + +static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) +{ + return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); +} + +static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) +{ + return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); +} + +int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, + dma_addr_t dma_addr) +{ + struct nvme_command c; + + memset(&c, 0, sizeof(c)); + c.identify.opcode = nvme_admin_identify; + c.identify.nsid = cpu_to_le32(nsid); + c.identify.prp1 = cpu_to_le64(dma_addr); + c.identify.cns = cpu_to_le32(cns); + + return nvme_submit_admin_cmd(dev, &c, NULL); +} + +int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, + dma_addr_t dma_addr, u32 *result) +{ + struct nvme_command c; + + memset(&c, 0, sizeof(c)); + c.features.opcode = nvme_admin_get_features; + c.features.nsid = cpu_to_le32(nsid); + c.features.prp1 = cpu_to_le64(dma_addr); + c.features.fid = cpu_to_le32(fid); + + return nvme_submit_admin_cmd(dev, &c, result); +} + +int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, + dma_addr_t dma_addr, u32 *result) +{ + struct nvme_command c; + + memset(&c, 0, sizeof(c)); + c.features.opcode = nvme_admin_set_features; + c.features.prp1 = cpu_to_le64(dma_addr); + c.features.fid = cpu_to_le32(fid); + c.features.dword11 = cpu_to_le32(dword11); + + return nvme_submit_admin_cmd(dev, &c, result); +} + +/** + * nvme_cancel_ios - Cancel outstanding I/Os + * @queue: The queue to cancel I/Os on + * @timeout: True to only cancel I/Os which have timed out + */ +static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) +{ + int depth = nvmeq->q_depth - 1; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + unsigned long now = jiffies; + int cmdid; + + for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { + void *ctx; + nvme_completion_fn fn; + static struct nvme_completion cqe = { + .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), + }; + + if (timeout && !time_after(now, info[cmdid].timeout)) + continue; + dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); + ctx = cancel_cmdid(nvmeq, cmdid, &fn); + fn(nvmeq->dev, ctx, &cqe); + } +} + +static void nvme_free_queue_mem(struct nvme_queue *nvmeq) +{ + dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), + (void *)nvmeq->cqes, nvmeq->cq_dma_addr); + dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), + nvmeq->sq_cmds, nvmeq->sq_dma_addr); + kfree(nvmeq); +} + +static void nvme_free_queue(struct nvme_dev *dev, int qid) +{ + struct nvme_queue *nvmeq = dev->queues[qid]; + int vector = dev->entry[nvmeq->cq_vector].vector; + + spin_lock_irq(&nvmeq->q_lock); + nvme_cancel_ios(nvmeq, false); + while (bio_list_peek(&nvmeq->sq_cong)) { + struct bio *bio = bio_list_pop(&nvmeq->sq_cong); + bio_endio(bio, -EIO); + } + spin_unlock_irq(&nvmeq->q_lock); + + irq_set_affinity_hint(vector, NULL); + free_irq(vector, nvmeq); + + /* Don't tell the adapter to delete the admin queue */ + if (qid) { + adapter_delete_sq(dev, qid); + adapter_delete_cq(dev, qid); + } + + nvme_free_queue_mem(nvmeq); +} + +static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, + int depth, int vector) +{ + struct device *dmadev = &dev->pci_dev->dev; + unsigned extra = DIV_ROUND_UP(depth, 8) + (depth * + sizeof(struct nvme_cmd_info)); + struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); + if (!nvmeq) + return NULL; + + nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), + &nvmeq->cq_dma_addr, GFP_KERNEL); + if (!nvmeq->cqes) + goto free_nvmeq; + memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); + + nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), + &nvmeq->sq_dma_addr, GFP_KERNEL); + if (!nvmeq->sq_cmds) + goto free_cqdma; + + nvmeq->q_dmadev = dmadev; + nvmeq->dev = dev; + spin_lock_init(&nvmeq->q_lock); + nvmeq->cq_head = 0; + nvmeq->cq_phase = 1; + init_waitqueue_head(&nvmeq->sq_full); + init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); + bio_list_init(&nvmeq->sq_cong); + nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; + nvmeq->q_depth = depth; + nvmeq->cq_vector = vector; + + return nvmeq; + + free_cqdma: + dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, + nvmeq->cq_dma_addr); + free_nvmeq: + kfree(nvmeq); + return NULL; +} + +static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, + const char *name) +{ + if (use_threaded_interrupts) + return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, + nvme_irq_check, nvme_irq, + IRQF_DISABLED | IRQF_SHARED, + name, nvmeq); + return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, + IRQF_DISABLED | IRQF_SHARED, name, nvmeq); +} + +static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid, + int cq_size, int vector) +{ + int result; + struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); + + if (!nvmeq) + return ERR_PTR(-ENOMEM); + + result = adapter_alloc_cq(dev, qid, nvmeq); + if (result < 0) + goto free_nvmeq; + + result = adapter_alloc_sq(dev, qid, nvmeq); + if (result < 0) + goto release_cq; + + result = queue_request_irq(dev, nvmeq, "nvme"); + if (result < 0) + goto release_sq; + + return nvmeq; + + release_sq: + adapter_delete_sq(dev, qid); + release_cq: + adapter_delete_cq(dev, qid); + free_nvmeq: + dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), + (void *)nvmeq->cqes, nvmeq->cq_dma_addr); + dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), + nvmeq->sq_cmds, nvmeq->sq_dma_addr); + kfree(nvmeq); + return ERR_PTR(result); +} + +static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled) +{ + unsigned long timeout; + u32 bit = enabled ? NVME_CSTS_RDY : 0; + + timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; + + while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) { + msleep(100); + if (fatal_signal_pending(current)) + return -EINTR; + if (time_after(jiffies, timeout)) { + dev_err(&dev->pci_dev->dev, + "Device not ready; aborting initialisation\n"); + return -ENODEV; + } + } + + return 0; +} + +/* + * If the device has been passed off to us in an enabled state, just clear + * the enabled bit. The spec says we should set the 'shutdown notification + * bits', but doing so may cause the device to complete commands to the + * admin queue ... and we don't know what memory that might be pointing at! + */ +static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap) +{ + u32 cc = readl(&dev->bar->cc); + + if (cc & NVME_CC_ENABLE) + writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc); + return nvme_wait_ready(dev, cap, false); +} + +static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap) +{ + return nvme_wait_ready(dev, cap, true); +} + +static int nvme_configure_admin_queue(struct nvme_dev *dev) +{ + int result; + u32 aqa; + u64 cap = readq(&dev->bar->cap); + struct nvme_queue *nvmeq; + + dev->dbs = ((void __iomem *)dev->bar) + 4096; + dev->db_stride = NVME_CAP_STRIDE(cap); + + result = nvme_disable_ctrl(dev, cap); + if (result < 0) + return result; + + nvmeq = nvme_alloc_queue(dev, 0, 64, 0); + if (!nvmeq) + return -ENOMEM; + + aqa = nvmeq->q_depth - 1; + aqa |= aqa << 16; + + dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; + dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; + dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; + dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; + + writel(aqa, &dev->bar->aqa); + writeq(nvmeq->sq_dma_addr, &dev->bar->asq); + writeq(nvmeq->cq_dma_addr, &dev->bar->acq); + writel(dev->ctrl_config, &dev->bar->cc); + + result = nvme_enable_ctrl(dev, cap); + if (result) + goto free_q; + + result = queue_request_irq(dev, nvmeq, "nvme admin"); + if (result) + goto free_q; + + dev->queues[0] = nvmeq; + return result; + + free_q: + nvme_free_queue_mem(nvmeq); + return result; +} + +struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, + unsigned long addr, unsigned length) +{ + int i, err, count, nents, offset; + struct scatterlist *sg; + struct page **pages; + struct nvme_iod *iod; + + if (addr & 3) + return ERR_PTR(-EINVAL); + if (!length) + return ERR_PTR(-EINVAL); + + offset = offset_in_page(addr); + count = DIV_ROUND_UP(offset + length, PAGE_SIZE); + pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); + if (!pages) + return ERR_PTR(-ENOMEM); + + err = get_user_pages_fast(addr, count, 1, pages); + if (err < count) { + count = err; + err = -EFAULT; + goto put_pages; + } + + iod = nvme_alloc_iod(count, length, GFP_KERNEL); + sg = iod->sg; + sg_init_table(sg, count); + for (i = 0; i < count; i++) { + sg_set_page(&sg[i], pages[i], + min_t(int, length, PAGE_SIZE - offset), offset); + length -= (PAGE_SIZE - offset); + offset = 0; + } + sg_mark_end(&sg[i - 1]); + iod->nents = count; + + err = -ENOMEM; + nents = dma_map_sg(&dev->pci_dev->dev, sg, count, + write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (!nents) + goto free_iod; + + kfree(pages); + return iod; + + free_iod: + kfree(iod); + put_pages: + for (i = 0; i < count; i++) + put_page(pages[i]); + kfree(pages); + return ERR_PTR(err); +} + +void nvme_unmap_user_pages(struct nvme_dev *dev, int write, + struct nvme_iod *iod) +{ + int i; + + dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, + write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + + for (i = 0; i < iod->nents; i++) + put_page(sg_page(&iod->sg[i])); +} + +static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) +{ + struct nvme_dev *dev = ns->dev; + struct nvme_queue *nvmeq; + struct nvme_user_io io; + struct nvme_command c; + unsigned length, meta_len; + int status, i; + struct nvme_iod *iod, *meta_iod = NULL; + dma_addr_t meta_dma_addr; + void *meta, *uninitialized_var(meta_mem); + + if (copy_from_user(&io, uio, sizeof(io))) + return -EFAULT; + length = (io.nblocks + 1) << ns->lba_shift; + meta_len = (io.nblocks + 1) * ns->ms; + + if (meta_len && ((io.metadata & 3) || !io.metadata)) + return -EINVAL; + + switch (io.opcode) { + case nvme_cmd_write: + case nvme_cmd_read: + case nvme_cmd_compare: + iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); + break; + default: + return -EINVAL; + } + + if (IS_ERR(iod)) + return PTR_ERR(iod); + + memset(&c, 0, sizeof(c)); + c.rw.opcode = io.opcode; + c.rw.flags = io.flags; + c.rw.nsid = cpu_to_le32(ns->ns_id); + c.rw.slba = cpu_to_le64(io.slba); + c.rw.length = cpu_to_le16(io.nblocks); + c.rw.control = cpu_to_le16(io.control); + c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); + c.rw.reftag = cpu_to_le32(io.reftag); + c.rw.apptag = cpu_to_le16(io.apptag); + c.rw.appmask = cpu_to_le16(io.appmask); + + if (meta_len) { + meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len); + if (IS_ERR(meta_iod)) { + status = PTR_ERR(meta_iod); + meta_iod = NULL; + goto unmap; + } + + meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, + &meta_dma_addr, GFP_KERNEL); + if (!meta_mem) { + status = -ENOMEM; + goto unmap; + } + + if (io.opcode & 1) { + int meta_offset = 0; + + for (i = 0; i < meta_iod->nents; i++) { + meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + + meta_iod->sg[i].offset; + memcpy(meta_mem + meta_offset, meta, + meta_iod->sg[i].length); + kunmap_atomic(meta); + meta_offset += meta_iod->sg[i].length; + } + } + + c.rw.metadata = cpu_to_le64(meta_dma_addr); + } + + length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); + + nvmeq = get_nvmeq(dev); + /* + * Since nvme_submit_sync_cmd sleeps, we can't keep preemption + * disabled. We may be preempted at any point, and be rescheduled + * to a different CPU. That will cause cacheline bouncing, but no + * additional races since q_lock already protects against other CPUs. + */ + put_nvmeq(nvmeq); + if (length != (io.nblocks + 1) << ns->lba_shift) + status = -ENOMEM; + else + status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); + + if (meta_len) { + if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { + int meta_offset = 0; + + for (i = 0; i < meta_iod->nents; i++) { + meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + + meta_iod->sg[i].offset; + memcpy(meta, meta_mem + meta_offset, + meta_iod->sg[i].length); + kunmap_atomic(meta); + meta_offset += meta_iod->sg[i].length; + } + } + + dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem, + meta_dma_addr); + } + + unmap: + nvme_unmap_user_pages(dev, io.opcode & 1, iod); + nvme_free_iod(dev, iod); + + if (meta_iod) { + nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); + nvme_free_iod(dev, meta_iod); + } + + return status; +} + +static int nvme_user_admin_cmd(struct nvme_dev *dev, + struct nvme_admin_cmd __user *ucmd) +{ + struct nvme_admin_cmd cmd; + struct nvme_command c; + int status, length; + struct nvme_iod *uninitialized_var(iod); + unsigned timeout; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (copy_from_user(&cmd, ucmd, sizeof(cmd))) + return -EFAULT; + + memset(&c, 0, sizeof(c)); + c.common.opcode = cmd.opcode; + c.common.flags = cmd.flags; + c.common.nsid = cpu_to_le32(cmd.nsid); + c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); + c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); + c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); + c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); + c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); + c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); + c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); + c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); + + length = cmd.data_len; + if (cmd.data_len) { + iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr, + length); + if (IS_ERR(iod)) + return PTR_ERR(iod); + length = nvme_setup_prps(dev, &c.common, iod, length, + GFP_KERNEL); + } + + timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : + ADMIN_TIMEOUT; + if (length != cmd.data_len) + status = -ENOMEM; + else + status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result, + timeout); + + if (cmd.data_len) { + nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); + nvme_free_iod(dev, iod); + } + + if (!status && copy_to_user(&ucmd->result, &cmd.result, + sizeof(cmd.result))) + status = -EFAULT; + + return status; +} + +static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, + unsigned long arg) +{ + struct nvme_ns *ns = bdev->bd_disk->private_data; + + switch (cmd) { + case NVME_IOCTL_ID: + return ns->ns_id; + case NVME_IOCTL_ADMIN_CMD: + return nvme_user_admin_cmd(ns->dev, (void __user *)arg); + case NVME_IOCTL_SUBMIT_IO: + return nvme_submit_io(ns, (void __user *)arg); + case SG_GET_VERSION_NUM: + return nvme_sg_get_version_num((void __user *)arg); + case SG_IO: + return nvme_sg_io(ns, (void __user *)arg); + default: + return -ENOTTY; + } +} + +static const struct block_device_operations nvme_fops = { + .owner = THIS_MODULE, + .ioctl = nvme_ioctl, + .compat_ioctl = nvme_ioctl, +}; + +static void nvme_resubmit_bios(struct nvme_queue *nvmeq) +{ + while (bio_list_peek(&nvmeq->sq_cong)) { + struct bio *bio = bio_list_pop(&nvmeq->sq_cong); + struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; + + if (bio_list_empty(&nvmeq->sq_cong)) + remove_wait_queue(&nvmeq->sq_full, + &nvmeq->sq_cong_wait); + if (nvme_submit_bio_queue(nvmeq, ns, bio)) { + if (bio_list_empty(&nvmeq->sq_cong)) + add_wait_queue(&nvmeq->sq_full, + &nvmeq->sq_cong_wait); + bio_list_add_head(&nvmeq->sq_cong, bio); + break; + } + } +} + +static int nvme_kthread(void *data) +{ + struct nvme_dev *dev; + + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + spin_lock(&dev_list_lock); + list_for_each_entry(dev, &dev_list, node) { + int i; + for (i = 0; i < dev->queue_count; i++) { + struct nvme_queue *nvmeq = dev->queues[i]; + if (!nvmeq) + continue; + spin_lock_irq(&nvmeq->q_lock); + if (nvme_process_cq(nvmeq)) + printk("process_cq did something\n"); + nvme_cancel_ios(nvmeq, true); + nvme_resubmit_bios(nvmeq); + spin_unlock_irq(&nvmeq->q_lock); + } + } + spin_unlock(&dev_list_lock); + schedule_timeout(round_jiffies_relative(HZ)); + } + return 0; +} + +static DEFINE_IDA(nvme_index_ida); + +static int nvme_get_ns_idx(void) +{ + int index, error; + + do { + if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL)) + return -1; + + spin_lock(&dev_list_lock); + error = ida_get_new(&nvme_index_ida, &index); + spin_unlock(&dev_list_lock); + } while (error == -EAGAIN); + + if (error) + index = -1; + return index; +} + +static void nvme_put_ns_idx(int index) +{ + spin_lock(&dev_list_lock); + ida_remove(&nvme_index_ida, index); + spin_unlock(&dev_list_lock); +} + +static void nvme_config_discard(struct nvme_ns *ns) +{ + u32 logical_block_size = queue_logical_block_size(ns->queue); + ns->queue->limits.discard_zeroes_data = 0; + ns->queue->limits.discard_alignment = logical_block_size; + ns->queue->limits.discard_granularity = logical_block_size; + ns->queue->limits.max_discard_sectors = 0xffffffff; + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); +} + +static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, + struct nvme_id_ns *id, struct nvme_lba_range_type *rt) +{ + struct nvme_ns *ns; + struct gendisk *disk; + int lbaf; + + if (rt->attributes & NVME_LBART_ATTRIB_HIDE) + return NULL; + + ns = kzalloc(sizeof(*ns), GFP_KERNEL); + if (!ns) + return NULL; + ns->queue = blk_alloc_queue(GFP_KERNEL); + if (!ns->queue) + goto out_free_ns; + ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); + blk_queue_make_request(ns->queue, nvme_make_request); + ns->dev = dev; + ns->queue->queuedata = ns; + + disk = alloc_disk(NVME_MINORS); + if (!disk) + goto out_free_queue; + ns->ns_id = nsid; + ns->disk = disk; + lbaf = id->flbas & 0xf; + ns->lba_shift = id->lbaf[lbaf].ds; + ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); + blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); + if (dev->max_hw_sectors) + blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); + + disk->major = nvme_major; + disk->minors = NVME_MINORS; + disk->first_minor = NVME_MINORS * nvme_get_ns_idx(); + disk->fops = &nvme_fops; + disk->private_data = ns; + disk->queue = ns->queue; + disk->driverfs_dev = &dev->pci_dev->dev; + sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); + set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); + + if (dev->oncs & NVME_CTRL_ONCS_DSM) + nvme_config_discard(ns); + + return ns; + + out_free_queue: + blk_cleanup_queue(ns->queue); + out_free_ns: + kfree(ns); + return NULL; +} + +static void nvme_ns_free(struct nvme_ns *ns) +{ + int index = ns->disk->first_minor / NVME_MINORS; + put_disk(ns->disk); + nvme_put_ns_idx(index); + blk_cleanup_queue(ns->queue); + kfree(ns); +} + +static int set_queue_count(struct nvme_dev *dev, int count) +{ + int status; + u32 result; + u32 q_count = (count - 1) | ((count - 1) << 16); + + status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, + &result); + if (status) + return -EIO; + return min(result & 0xffff, result >> 16) + 1; +} + +static int nvme_setup_io_queues(struct nvme_dev *dev) +{ + int result, cpu, i, nr_io_queues, db_bar_size, q_depth; + + nr_io_queues = num_online_cpus(); + result = set_queue_count(dev, nr_io_queues); + if (result < 0) + return result; + if (result < nr_io_queues) + nr_io_queues = result; + + /* Deregister the admin queue's interrupt */ + free_irq(dev->entry[0].vector, dev->queues[0]); + + db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); + if (db_bar_size > 8192) { + iounmap(dev->bar); + dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), + db_bar_size); + dev->dbs = ((void __iomem *)dev->bar) + 4096; + dev->queues[0]->q_db = dev->dbs; + } + + for (i = 0; i < nr_io_queues; i++) + dev->entry[i].entry = i; + for (;;) { + result = pci_enable_msix(dev->pci_dev, dev->entry, + nr_io_queues); + if (result == 0) { + break; + } else if (result > 0) { + nr_io_queues = result; + continue; + } else { + nr_io_queues = 1; + break; + } + } + + result = queue_request_irq(dev, dev->queues[0], "nvme admin"); + /* XXX: handle failure here */ + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < nr_io_queues; i++) { + irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } + + q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, + NVME_Q_DEPTH); + for (i = 0; i < nr_io_queues; i++) { + dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i); + if (IS_ERR(dev->queues[i + 1])) + return PTR_ERR(dev->queues[i + 1]); + dev->queue_count++; + } + + for (; i < num_possible_cpus(); i++) { + int target = i % rounddown_pow_of_two(dev->queue_count - 1); + dev->queues[i + 1] = dev->queues[target + 1]; + } + + return 0; +} + +static void nvme_free_queues(struct nvme_dev *dev) +{ + int i; + + for (i = dev->queue_count - 1; i >= 0; i--) + nvme_free_queue(dev, i); +} + +/* + * Return: error value if an error occurred setting up the queues or calling + * Identify Device. 0 if these succeeded, even if adding some of the + * namespaces failed. At the moment, these failures are silent. TBD which + * failures should be reported. + */ +static int nvme_dev_add(struct nvme_dev *dev) +{ + int res, nn, i; + struct nvme_ns *ns; + struct nvme_id_ctrl *ctrl; + struct nvme_id_ns *id_ns; + void *mem; + dma_addr_t dma_addr; + int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; + + res = nvme_setup_io_queues(dev); + if (res) + return res; + + mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, + GFP_KERNEL); + if (!mem) + return -ENOMEM; + + res = nvme_identify(dev, 0, 1, dma_addr); + if (res) { + res = -EIO; + goto out; + } + + ctrl = mem; + nn = le32_to_cpup(&ctrl->nn); + dev->oncs = le16_to_cpup(&ctrl->oncs); + memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); + memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); + memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); + if (ctrl->mdts) + dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); + if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) && + (dev->pci_dev->device == 0x0953) && ctrl->vs[3]) + dev->stripe_size = 1 << (ctrl->vs[3] + shift); + + id_ns = mem; + for (i = 1; i <= nn; i++) { + res = nvme_identify(dev, i, 0, dma_addr); + if (res) + continue; + + if (id_ns->ncap == 0) + continue; + + res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, + dma_addr + 4096, NULL); + if (res) + memset(mem + 4096, 0, 4096); + + ns = nvme_alloc_ns(dev, i, mem, mem + 4096); + if (ns) + list_add_tail(&ns->list, &dev->namespaces); + } + list_for_each_entry(ns, &dev->namespaces, list) + add_disk(ns->disk); + res = 0; + + out: + dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); + return res; +} + +static int nvme_dev_remove(struct nvme_dev *dev) +{ + struct nvme_ns *ns, *next; + + spin_lock(&dev_list_lock); + list_del(&dev->node); + spin_unlock(&dev_list_lock); + + list_for_each_entry_safe(ns, next, &dev->namespaces, list) { + list_del(&ns->list); + del_gendisk(ns->disk); + nvme_ns_free(ns); + } + + nvme_free_queues(dev); + + return 0; +} + +static int nvme_setup_prp_pools(struct nvme_dev *dev) +{ + struct device *dmadev = &dev->pci_dev->dev; + dev->prp_page_pool = dma_pool_create("prp list page", dmadev, + PAGE_SIZE, PAGE_SIZE, 0); + if (!dev->prp_page_pool) + return -ENOMEM; + + /* Optimisation for I/Os between 4k and 128k */ + dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, + 256, 256, 0); + if (!dev->prp_small_pool) { + dma_pool_destroy(dev->prp_page_pool); + return -ENOMEM; + } + return 0; +} + +static void nvme_release_prp_pools(struct nvme_dev *dev) +{ + dma_pool_destroy(dev->prp_page_pool); + dma_pool_destroy(dev->prp_small_pool); +} + +static DEFINE_IDA(nvme_instance_ida); + +static int nvme_set_instance(struct nvme_dev *dev) +{ + int instance, error; + + do { + if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) + return -ENODEV; + + spin_lock(&dev_list_lock); + error = ida_get_new(&nvme_instance_ida, &instance); + spin_unlock(&dev_list_lock); + } while (error == -EAGAIN); + + if (error) + return -ENODEV; + + dev->instance = instance; + return 0; +} + +static void nvme_release_instance(struct nvme_dev *dev) +{ + spin_lock(&dev_list_lock); + ida_remove(&nvme_instance_ida, dev->instance); + spin_unlock(&dev_list_lock); +} + +static void nvme_free_dev(struct kref *kref) +{ + struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); + nvme_dev_remove(dev); + pci_disable_msix(dev->pci_dev); + iounmap(dev->bar); + nvme_release_instance(dev); + nvme_release_prp_pools(dev); + pci_disable_device(dev->pci_dev); + pci_release_regions(dev->pci_dev); + kfree(dev->queues); + kfree(dev->entry); + kfree(dev); +} + +static int nvme_dev_open(struct inode *inode, struct file *f) +{ + struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev, + miscdev); + kref_get(&dev->kref); + f->private_data = dev; + return 0; +} + +static int nvme_dev_release(struct inode *inode, struct file *f) +{ + struct nvme_dev *dev = f->private_data; + kref_put(&dev->kref, nvme_free_dev); + return 0; +} + +static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + struct nvme_dev *dev = f->private_data; + switch (cmd) { + case NVME_IOCTL_ADMIN_CMD: + return nvme_user_admin_cmd(dev, (void __user *)arg); + default: + return -ENOTTY; + } +} + +static const struct file_operations nvme_dev_fops = { + .owner = THIS_MODULE, + .open = nvme_dev_open, + .release = nvme_dev_release, + .unlocked_ioctl = nvme_dev_ioctl, + .compat_ioctl = nvme_dev_ioctl, +}; + +static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int bars, result = -ENOMEM; + struct nvme_dev *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), + GFP_KERNEL); + if (!dev->entry) + goto free; + dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), + GFP_KERNEL); + if (!dev->queues) + goto free; + + if (pci_enable_device_mem(pdev)) + goto free; + pci_set_master(pdev); + bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_request_selected_regions(pdev, bars, "nvme")) + goto disable; + + INIT_LIST_HEAD(&dev->namespaces); + dev->pci_dev = pdev; + pci_set_drvdata(pdev, dev); + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + result = nvme_set_instance(dev); + if (result) + goto disable; + + dev->entry[0].vector = pdev->irq; + + result = nvme_setup_prp_pools(dev); + if (result) + goto disable_msix; + + dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); + if (!dev->bar) { + result = -ENOMEM; + goto disable_msix; + } + + result = nvme_configure_admin_queue(dev); + if (result) + goto unmap; + dev->queue_count++; + + spin_lock(&dev_list_lock); + list_add(&dev->node, &dev_list); + spin_unlock(&dev_list_lock); + + result = nvme_dev_add(dev); + if (result) + goto delete; + + scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance); + dev->miscdev.minor = MISC_DYNAMIC_MINOR; + dev->miscdev.parent = &pdev->dev; + dev->miscdev.name = dev->name; + dev->miscdev.fops = &nvme_dev_fops; + result = misc_register(&dev->miscdev); + if (result) + goto remove; + + kref_init(&dev->kref); + return 0; + + remove: + nvme_dev_remove(dev); + delete: + spin_lock(&dev_list_lock); + list_del(&dev->node); + spin_unlock(&dev_list_lock); + + nvme_free_queues(dev); + unmap: + iounmap(dev->bar); + disable_msix: + pci_disable_msix(pdev); + nvme_release_instance(dev); + nvme_release_prp_pools(dev); + disable: + pci_disable_device(pdev); + pci_release_regions(pdev); + free: + kfree(dev->queues); + kfree(dev->entry); + kfree(dev); + return result; +} + +static void nvme_remove(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + misc_deregister(&dev->miscdev); + kref_put(&dev->kref, nvme_free_dev); +} + +/* These functions are yet to be implemented */ +#define nvme_error_detected NULL +#define nvme_dump_registers NULL +#define nvme_link_reset NULL +#define nvme_slot_reset NULL +#define nvme_error_resume NULL +#define nvme_suspend NULL +#define nvme_resume NULL + +static const struct pci_error_handlers nvme_err_handler = { + .error_detected = nvme_error_detected, + .mmio_enabled = nvme_dump_registers, + .link_reset = nvme_link_reset, + .slot_reset = nvme_slot_reset, + .resume = nvme_error_resume, +}; + +/* Move to pci_ids.h later */ +#define PCI_CLASS_STORAGE_EXPRESS 0x010802 + +static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, nvme_id_table); + +static struct pci_driver nvme_driver = { + .name = "nvme", + .id_table = nvme_id_table, + .probe = nvme_probe, + .remove = nvme_remove, + .suspend = nvme_suspend, + .resume = nvme_resume, + .err_handler = &nvme_err_handler, +}; + +static int __init nvme_init(void) +{ + int result; + + nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); + if (IS_ERR(nvme_thread)) + return PTR_ERR(nvme_thread); + + result = register_blkdev(nvme_major, "nvme"); + if (result < 0) + goto kill_kthread; + else if (result > 0) + nvme_major = result; + + result = pci_register_driver(&nvme_driver); + if (result) + goto unregister_blkdev; + return 0; + + unregister_blkdev: + unregister_blkdev(nvme_major, "nvme"); + kill_kthread: + kthread_stop(nvme_thread); + return result; +} + +static void __exit nvme_exit(void) +{ + pci_unregister_driver(&nvme_driver); + unregister_blkdev(nvme_major, "nvme"); + kthread_stop(nvme_thread); +} + +MODULE_AUTHOR("Matthew Wilcox "); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.8"); +module_init(nvme_init); +module_exit(nvme_exit); diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c new file mode 100644 index 0000000..fed54b0 --- /dev/null +++ b/drivers/block/nvme-scsi.c @@ -0,0 +1,3053 @@ +/* + * NVM Express device driver + * Copyright (c) 2011, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* + * Refer to the SCSI-NVMe Translation spec for details on how + * each command is translated. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int sg_version_num = 30534; /* 2 digits for each component */ + +#define SNTI_TRANSLATION_SUCCESS 0 +#define SNTI_INTERNAL_ERROR 1 + +/* VPD Page Codes */ +#define VPD_SUPPORTED_PAGES 0x00 +#define VPD_SERIAL_NUMBER 0x80 +#define VPD_DEVICE_IDENTIFIERS 0x83 +#define VPD_EXTENDED_INQUIRY 0x86 +#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1 + +/* CDB offsets */ +#define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6 +#define REPORT_LUNS_SR_OFFSET 2 +#define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10 +#define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4 +#define REQUEST_SENSE_DESC_OFFSET 1 +#define REQUEST_SENSE_DESC_MASK 0x01 +#define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1 +#define INQUIRY_EVPD_BYTE_OFFSET 1 +#define INQUIRY_PAGE_CODE_BYTE_OFFSET 2 +#define INQUIRY_EVPD_BIT_MASK 1 +#define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3 +#define START_STOP_UNIT_CDB_IMMED_OFFSET 1 +#define START_STOP_UNIT_CDB_IMMED_MASK 0x1 +#define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3 +#define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF +#define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4 +#define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0 +#define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4 +#define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4 +#define START_STOP_UNIT_CDB_START_OFFSET 4 +#define START_STOP_UNIT_CDB_START_MASK 0x1 +#define WRITE_BUFFER_CDB_MODE_OFFSET 1 +#define WRITE_BUFFER_CDB_MODE_MASK 0x1F +#define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2 +#define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3 +#define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6 +#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1 +#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0 +#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6 +#define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1 +#define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20 +#define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1 +#define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10 +#define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4 +#define FORMAT_UNIT_LONG_PARM_LIST_LEN 8 +#define FORMAT_UNIT_PROT_INT_OFFSET 3 +#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0 +#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07 +#define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET 7 + +/* Misc. defines */ +#define NIBBLE_SHIFT 4 +#define FIXED_SENSE_DATA 0x70 +#define DESC_FORMAT_SENSE_DATA 0x72 +#define FIXED_SENSE_DATA_ADD_LENGTH 10 +#define LUN_ENTRY_SIZE 8 +#define LUN_DATA_HEADER_SIZE 8 +#define ALL_LUNS_RETURNED 0x02 +#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01 +#define RESTRICTED_LUNS_RETURNED 0x00 +#define NVME_POWER_STATE_START_VALID 0x00 +#define NVME_POWER_STATE_ACTIVE 0x01 +#define NVME_POWER_STATE_IDLE 0x02 +#define NVME_POWER_STATE_STANDBY 0x03 +#define NVME_POWER_STATE_LU_CONTROL 0x07 +#define POWER_STATE_0 0 +#define POWER_STATE_1 1 +#define POWER_STATE_2 2 +#define POWER_STATE_3 3 +#define DOWNLOAD_SAVE_ACTIVATE 0x05 +#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E +#define ACTIVATE_DEFERRED_MICROCODE 0x0F +#define FORMAT_UNIT_IMMED_MASK 0x2 +#define FORMAT_UNIT_IMMED_OFFSET 1 +#define KELVIN_TEMP_FACTOR 273 +#define FIXED_FMT_SENSE_DATA_SIZE 18 +#define DESC_FMT_SENSE_DATA_SIZE 8 + +/* SCSI/NVMe defines and bit masks */ +#define INQ_STANDARD_INQUIRY_PAGE 0x00 +#define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00 +#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80 +#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83 +#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86 +#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1 +#define INQ_SERIAL_NUMBER_LENGTH 0x14 +#define INQ_NUM_SUPPORTED_VPD_PAGES 5 +#define VERSION_SPC_4 0x06 +#define ACA_UNSUPPORTED 0 +#define STANDARD_INQUIRY_LENGTH 36 +#define ADDITIONAL_STD_INQ_LENGTH 31 +#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C +#define RESERVED_FIELD 0 + +/* SCSI READ/WRITE Defines */ +#define IO_CDB_WP_MASK 0xE0 +#define IO_CDB_WP_SHIFT 5 +#define IO_CDB_FUA_MASK 0x8 +#define IO_6_CDB_LBA_OFFSET 0 +#define IO_6_CDB_LBA_MASK 0x001FFFFF +#define IO_6_CDB_TX_LEN_OFFSET 4 +#define IO_6_DEFAULT_TX_LEN 256 +#define IO_10_CDB_LBA_OFFSET 2 +#define IO_10_CDB_TX_LEN_OFFSET 7 +#define IO_10_CDB_WP_OFFSET 1 +#define IO_10_CDB_FUA_OFFSET 1 +#define IO_12_CDB_LBA_OFFSET 2 +#define IO_12_CDB_TX_LEN_OFFSET 6 +#define IO_12_CDB_WP_OFFSET 1 +#define IO_12_CDB_FUA_OFFSET 1 +#define IO_16_CDB_FUA_OFFSET 1 +#define IO_16_CDB_WP_OFFSET 1 +#define IO_16_CDB_LBA_OFFSET 2 +#define IO_16_CDB_TX_LEN_OFFSET 10 + +/* Mode Sense/Select defines */ +#define MODE_PAGE_INFO_EXCEP 0x1C +#define MODE_PAGE_CACHING 0x08 +#define MODE_PAGE_CONTROL 0x0A +#define MODE_PAGE_POWER_CONDITION 0x1A +#define MODE_PAGE_RETURN_ALL 0x3F +#define MODE_PAGE_BLK_DES_LEN 0x08 +#define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10 +#define MODE_PAGE_CACHING_LEN 0x14 +#define MODE_PAGE_CONTROL_LEN 0x0C +#define MODE_PAGE_POW_CND_LEN 0x28 +#define MODE_PAGE_INF_EXC_LEN 0x0C +#define MODE_PAGE_ALL_LEN 0x54 +#define MODE_SENSE6_MPH_SIZE 4 +#define MODE_SENSE6_ALLOC_LEN_OFFSET 4 +#define MODE_SENSE_PAGE_CONTROL_OFFSET 2 +#define MODE_SENSE_PAGE_CONTROL_MASK 0xC0 +#define MODE_SENSE_PAGE_CODE_OFFSET 2 +#define MODE_SENSE_PAGE_CODE_MASK 0x3F +#define MODE_SENSE_LLBAA_OFFSET 1 +#define MODE_SENSE_LLBAA_MASK 0x10 +#define MODE_SENSE_LLBAA_SHIFT 4 +#define MODE_SENSE_DBD_OFFSET 1 +#define MODE_SENSE_DBD_MASK 8 +#define MODE_SENSE_DBD_SHIFT 3 +#define MODE_SENSE10_MPH_SIZE 8 +#define MODE_SENSE10_ALLOC_LEN_OFFSET 7 +#define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1 +#define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1 +#define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4 +#define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7 +#define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10 +#define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1 +#define MODE_SELECT_6_BD_OFFSET 3 +#define MODE_SELECT_10_BD_OFFSET 6 +#define MODE_SELECT_10_LLBAA_OFFSET 4 +#define MODE_SELECT_10_LLBAA_MASK 1 +#define MODE_SELECT_6_MPH_SIZE 4 +#define MODE_SELECT_10_MPH_SIZE 8 +#define CACHING_MODE_PAGE_WCE_MASK 0x04 +#define MODE_SENSE_BLK_DESC_ENABLED 0 +#define MODE_SENSE_BLK_DESC_COUNT 1 +#define MODE_SELECT_PAGE_CODE_MASK 0x3F +#define SHORT_DESC_BLOCK 8 +#define LONG_DESC_BLOCK 16 +#define MODE_PAGE_POW_CND_LEN_FIELD 0x26 +#define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A +#define MODE_PAGE_CACHING_LEN_FIELD 0x12 +#define MODE_PAGE_CONTROL_LEN_FIELD 0x0A +#define MODE_SENSE_PC_CURRENT_VALUES 0 + +/* Log Sense defines */ +#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00 +#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07 +#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F +#define LOG_PAGE_TEMPERATURE_PAGE 0x0D +#define LOG_SENSE_CDB_SP_OFFSET 1 +#define LOG_SENSE_CDB_SP_NOT_ENABLED 0 +#define LOG_SENSE_CDB_PC_OFFSET 2 +#define LOG_SENSE_CDB_PC_MASK 0xC0 +#define LOG_SENSE_CDB_PC_SHIFT 6 +#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1 +#define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F +#define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7 +#define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8 +#define LOG_INFO_EXCP_PAGE_LENGTH 0xC +#define REMAINING_TEMP_PAGE_LENGTH 0xC +#define LOG_TEMP_PAGE_LENGTH 0x10 +#define LOG_TEMP_UNKNOWN 0xFF +#define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3 + +/* Read Capacity defines */ +#define READ_CAP_10_RESP_SIZE 8 +#define READ_CAP_16_RESP_SIZE 32 + +/* NVMe Namespace and Command Defines */ +#define NVME_GET_SMART_LOG_PAGE 0x02 +#define NVME_GET_FEAT_TEMP_THRESH 0x04 +#define BYTES_TO_DWORDS 4 +#define NVME_MAX_FIRMWARE_SLOT 7 + +/* Report LUNs defines */ +#define REPORT_LUNS_FIRST_LUN_OFFSET 8 + +/* SCSI ADDITIONAL SENSE Codes */ + +#define SCSI_ASC_NO_SENSE 0x00 +#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03 +#define SCSI_ASC_LUN_NOT_READY 0x04 +#define SCSI_ASC_WARNING 0x0B +#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10 +#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10 +#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10 +#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11 +#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D +#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20 +#define SCSI_ASC_ILLEGAL_COMMAND 0x20 +#define SCSI_ASC_ILLEGAL_BLOCK 0x21 +#define SCSI_ASC_INVALID_CDB 0x24 +#define SCSI_ASC_INVALID_LUN 0x25 +#define SCSI_ASC_INVALID_PARAMETER 0x26 +#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31 +#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44 + +/* SCSI ADDITIONAL SENSE Code Qualifiers */ + +#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00 +#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01 +#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01 +#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02 +#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03 +#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04 +#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08 +#define SCSI_ASCQ_INVALID_LUN_ID 0x09 + +/** + * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to + * enable DPOFUA support type 0x10 value. + */ +#define DEVICE_SPECIFIC_PARAMETER 0 +#define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR) + +/* MACROs to extract information from CDBs */ + +#define GET_OPCODE(cdb) cdb[0] + +#define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0) + +#define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0)) + +#define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \ +(cdb[index + 1] << 8) | \ +(cdb[index + 2] << 0)) + +#define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \ +(cdb[index + 1] << 16) | \ +(cdb[index + 2] << 8) | \ +(cdb[index + 3] << 0)) + +#define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \ +(((u64)cdb[index + 1]) << 48) | \ +(((u64)cdb[index + 2]) << 40) | \ +(((u64)cdb[index + 3]) << 32) | \ +(((u64)cdb[index + 4]) << 24) | \ +(((u64)cdb[index + 5]) << 16) | \ +(((u64)cdb[index + 6]) << 8) | \ +(((u64)cdb[index + 7]) << 0)) + +/* Inquiry Helper Macros */ +#define GET_INQ_EVPD_BIT(cdb) \ +((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \ +INQUIRY_EVPD_BIT_MASK) ? 1 : 0) + +#define GET_INQ_PAGE_CODE(cdb) \ +(GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET)) + +#define GET_INQ_ALLOC_LENGTH(cdb) \ +(GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET)) + +/* Report LUNs Helper Macros */ +#define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \ +(GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET)) + +/* Read Capacity Helper Macros */ +#define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \ +(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET)) + +#define IS_READ_CAP_16(cdb) \ +((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0) + +/* Request Sense Helper Macros */ +#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \ +(GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET)) + +/* Mode Sense Helper Macros */ +#define GET_MODE_SENSE_DBD(cdb) \ +((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \ +MODE_SENSE_DBD_SHIFT) + +#define GET_MODE_SENSE_LLBAA(cdb) \ +((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \ +MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT) + +#define GET_MODE_SENSE_MPH_SIZE(cdb10) \ +(cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE) + + +/* Struct to gather data that needs to be extracted from a SCSI CDB. + Not conforming to any particular CDB variant, but compatible with all. */ + +struct nvme_trans_io_cdb { + u8 fua; + u8 prot_info; + u64 lba; + u32 xfer_len; +}; + + +/* Internal Helper Functions */ + + +/* Copy data to userspace memory */ + +static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from, + unsigned long n) +{ + int res = SNTI_TRANSLATION_SUCCESS; + unsigned long not_copied; + int i; + void *index = from; + size_t remaining = n; + size_t xfer_len; + + if (hdr->iovec_count > 0) { + struct sg_iovec sgl; + + for (i = 0; i < hdr->iovec_count; i++) { + not_copied = copy_from_user(&sgl, hdr->dxferp + + i * sizeof(struct sg_iovec), + sizeof(struct sg_iovec)); + if (not_copied) + return -EFAULT; + xfer_len = min(remaining, sgl.iov_len); + not_copied = copy_to_user(sgl.iov_base, index, + xfer_len); + if (not_copied) { + res = -EFAULT; + break; + } + index += xfer_len; + remaining -= xfer_len; + if (remaining == 0) + break; + } + return res; + } + not_copied = copy_to_user(hdr->dxferp, from, n); + if (not_copied) + res = -EFAULT; + return res; +} + +/* Copy data from userspace memory */ + +static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to, + unsigned long n) +{ + int res = SNTI_TRANSLATION_SUCCESS; + unsigned long not_copied; + int i; + void *index = to; + size_t remaining = n; + size_t xfer_len; + + if (hdr->iovec_count > 0) { + struct sg_iovec sgl; + + for (i = 0; i < hdr->iovec_count; i++) { + not_copied = copy_from_user(&sgl, hdr->dxferp + + i * sizeof(struct sg_iovec), + sizeof(struct sg_iovec)); + if (not_copied) + return -EFAULT; + xfer_len = min(remaining, sgl.iov_len); + not_copied = copy_from_user(index, sgl.iov_base, + xfer_len); + if (not_copied) { + res = -EFAULT; + break; + } + index += xfer_len; + remaining -= xfer_len; + if (remaining == 0) + break; + } + return res; + } + + not_copied = copy_from_user(to, hdr->dxferp, n); + if (not_copied) + res = -EFAULT; + return res; +} + +/* Status/Sense Buffer Writeback */ + +static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key, + u8 asc, u8 ascq) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 xfer_len; + u8 resp[DESC_FMT_SENSE_DATA_SIZE]; + + if (scsi_status_is_good(status)) { + hdr->status = SAM_STAT_GOOD; + hdr->masked_status = GOOD; + hdr->host_status = DID_OK; + hdr->driver_status = DRIVER_OK; + hdr->sb_len_wr = 0; + } else { + hdr->status = status; + hdr->masked_status = status >> 1; + hdr->host_status = DID_OK; + hdr->driver_status = DRIVER_OK; + + memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE); + resp[0] = DESC_FORMAT_SENSE_DATA; + resp[1] = sense_key; + resp[2] = asc; + resp[3] = ascq; + + xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE); + hdr->sb_len_wr = xfer_len; + if (copy_to_user(hdr->sbp, resp, xfer_len) > 0) + res = -EFAULT; + } + + return res; +} + +static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc) +{ + u8 status, sense_key, asc, ascq; + int res = SNTI_TRANSLATION_SUCCESS; + + /* For non-nvme (Linux) errors, simply return the error code */ + if (nvme_sc < 0) + return nvme_sc; + + /* Mask DNR, More, and reserved fields */ + nvme_sc &= 0x7FF; + + switch (nvme_sc) { + /* Generic Command Status */ + case NVME_SC_SUCCESS: + status = SAM_STAT_GOOD; + sense_key = NO_SENSE; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_INVALID_OPCODE: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_ILLEGAL_COMMAND; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_INVALID_FIELD: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_INVALID_CDB; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_DATA_XFER_ERROR: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_POWER_LOSS: + status = SAM_STAT_TASK_ABORTED; + sense_key = ABORTED_COMMAND; + asc = SCSI_ASC_WARNING; + ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED; + break; + case NVME_SC_INTERNAL: + status = SAM_STAT_CHECK_CONDITION; + sense_key = HARDWARE_ERROR; + asc = SCSI_ASC_INTERNAL_TARGET_FAILURE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_ABORT_REQ: + status = SAM_STAT_TASK_ABORTED; + sense_key = ABORTED_COMMAND; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_ABORT_QUEUE: + status = SAM_STAT_TASK_ABORTED; + sense_key = ABORTED_COMMAND; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_FUSED_FAIL: + status = SAM_STAT_TASK_ABORTED; + sense_key = ABORTED_COMMAND; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_FUSED_MISSING: + status = SAM_STAT_TASK_ABORTED; + sense_key = ABORTED_COMMAND; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_INVALID_NS: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; + ascq = SCSI_ASCQ_INVALID_LUN_ID; + break; + case NVME_SC_LBA_RANGE: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_ILLEGAL_BLOCK; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_CAP_EXCEEDED: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_NS_NOT_READY: + status = SAM_STAT_CHECK_CONDITION; + sense_key = NOT_READY; + asc = SCSI_ASC_LUN_NOT_READY; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + + /* Command Specific Status */ + case NVME_SC_INVALID_FORMAT: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_FORMAT_COMMAND_FAILED; + ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED; + break; + case NVME_SC_BAD_ATTRIBUTES: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_INVALID_CDB; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + + /* Media Errors */ + case NVME_SC_WRITE_FAULT: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_READ_ERROR: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_UNRECOVERED_READ_ERROR; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_GUARD_CHECK: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED; + ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED; + break; + case NVME_SC_APPTAG_CHECK: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED; + ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED; + break; + case NVME_SC_REFTAG_CHECK: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED; + ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED; + break; + case NVME_SC_COMPARE_FAILED: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MISCOMPARE; + asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_ACCESS_DENIED: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; + ascq = SCSI_ASCQ_INVALID_LUN_ID; + break; + + /* Unspecified/Default */ + case NVME_SC_CMDID_CONFLICT: + case NVME_SC_CMD_SEQ_ERROR: + case NVME_SC_CQ_INVALID: + case NVME_SC_QID_INVALID: + case NVME_SC_QUEUE_SIZE: + case NVME_SC_ABORT_LIMIT: + case NVME_SC_ABORT_MISSING: + case NVME_SC_ASYNC_LIMIT: + case NVME_SC_FIRMWARE_SLOT: + case NVME_SC_FIRMWARE_IMAGE: + case NVME_SC_INVALID_VECTOR: + case NVME_SC_INVALID_LOG_PAGE: + default: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + } + + res = nvme_trans_completion(hdr, status, sense_key, asc, ascq); + + return res; +} + +/* INQUIRY Helper Functions */ + +static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *inq_response, + int alloc_len) +{ + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ns *id_ns; + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + int xfer_len; + u8 resp_data_format = 0x02; + u8 protect; + u8 cmdque = 0x01 << 1; + + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out_dma; + } + + /* nvme ns identify - use DPS value for PROTECT field */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + /* + * If nvme_sc was -ve, res will be -ve here. + * If nvme_sc was +ve, the status would bace been translated, and res + * can only be 0 or -ve. + * - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc + * - If -ve, return because its a Linux error. + */ + if (res) + goto out_free; + if (nvme_sc) { + res = nvme_sc; + goto out_free; + } + id_ns = mem; + (id_ns->dps) ? (protect = 0x01) : (protect = 0); + + memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); + inq_response[2] = VERSION_SPC_4; + inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */ + inq_response[4] = ADDITIONAL_STD_INQ_LENGTH; + inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */ + inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */ + strncpy(&inq_response[8], "NVMe ", 8); + strncpy(&inq_response[16], dev->model, 16); + strncpy(&inq_response[32], dev->firmware_rev, 4); + + xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + out_free: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out_dma: + return res; +} + +static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *inq_response, + int alloc_len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + + memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); + inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */ + inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */ + inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE; + inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE; + inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE; + inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE; + inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE; + + xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + return res; +} + +static int nvme_trans_unit_serial_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *inq_response, + int alloc_len) +{ + struct nvme_dev *dev = ns->dev; + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + + memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); + inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */ + inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */ + strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH); + + xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + return res; +} + +static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *inq_response, int alloc_len) +{ + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ctrl *id_ctrl; + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + u8 ieee[4]; + int xfer_len; + __be32 tmp_id = cpu_to_be32(ns->ns_id); + + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out_dma; + } + + /* nvme controller identify */ + nvme_sc = nvme_identify(dev, 0, 1, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_free; + if (nvme_sc) { + res = nvme_sc; + goto out_free; + } + id_ctrl = mem; + + /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */ + ieee[0] = id_ctrl->ieee[0] << 4; + ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4; + ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4; + ieee[3] = id_ctrl->ieee[2] >> 4; + + memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); + inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */ + inq_response[3] = 20; /* Page Length */ + /* Designation Descriptor start */ + inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ + inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */ + inq_response[6] = 0x00; /* Rsvd */ + inq_response[7] = 16; /* Designator Length */ + /* Designator start */ + inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/ + inq_response[9] = ieee[2]; /* IEEE ID */ + inq_response[10] = ieee[1]; /* IEEE ID */ + inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */ + inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8; + inq_response[13] = (dev->pci_dev->vendor & 0x00FF); + inq_response[14] = dev->serial[0]; + inq_response[15] = dev->serial[1]; + inq_response[16] = dev->model[0]; + inq_response[17] = dev->model[1]; + memcpy(&inq_response[18], &tmp_id, sizeof(u32)); + /* Last 2 bytes are zero */ + + xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + out_free: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out_dma: + return res; +} + +static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, + int alloc_len) +{ + u8 *inq_response; + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ctrl *id_ctrl; + struct nvme_id_ns *id_ns; + int xfer_len; + u8 microcode = 0x80; + u8 spt; + u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7}; + u8 grd_chk, app_chk, ref_chk, protect; + u8 uask_sup = 0x20; + u8 v_sup; + u8 luiclr = 0x01; + + inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); + if (inq_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out_dma; + } + + /* nvme ns identify */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_free; + if (nvme_sc) { + res = nvme_sc; + goto out_free; + } + id_ns = mem; + spt = spt_lut[(id_ns->dpc) & 0x07] << 3; + (id_ns->dps) ? (protect = 0x01) : (protect = 0); + grd_chk = protect << 2; + app_chk = protect << 1; + ref_chk = protect; + + /* nvme controller identify */ + nvme_sc = nvme_identify(dev, 0, 1, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_free; + if (nvme_sc) { + res = nvme_sc; + goto out_free; + } + id_ctrl = mem; + v_sup = id_ctrl->vwc; + + memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); + inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */ + inq_response[2] = 0x00; /* Page Length MSB */ + inq_response[3] = 0x3C; /* Page Length LSB */ + inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk; + inq_response[5] = uask_sup; + inq_response[6] = v_sup; + inq_response[7] = luiclr; + inq_response[8] = 0; + inq_response[9] = 0; + + xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + out_free: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out_dma: + kfree(inq_response); + out_mem: + return res; +} + +static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, + int alloc_len) +{ + u8 *inq_response; + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + + inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); + if (inq_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + + memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); + inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */ + inq_response[2] = 0x00; /* Page Length MSB */ + inq_response[3] = 0x3C; /* Page Length LSB */ + inq_response[4] = 0x00; /* Medium Rotation Rate MSB */ + inq_response[5] = 0x01; /* Medium Rotation Rate LSB */ + inq_response[6] = 0x00; /* Form Factor */ + + xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + kfree(inq_response); + out_mem: + return res; +} + +/* LOG SENSE Helper Functions */ + +static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, + int alloc_len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + u8 *log_response; + + log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL); + if (log_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH); + + log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; + /* Subpage=0x00, Page Length MSB=0 */ + log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH; + log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; + log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; + log_response[6] = LOG_PAGE_TEMPERATURE_PAGE; + + xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH); + res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); + + kfree(log_response); + out_mem: + return res; +} + +static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, + struct sg_io_hdr *hdr, int alloc_len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + u8 *log_response; + struct nvme_command c; + struct nvme_dev *dev = ns->dev; + struct nvme_smart_log *smart_log; + dma_addr_t dma_addr; + void *mem; + u8 temp_c; + u16 temp_k; + + log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL); + if (log_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH); + + mem = dma_alloc_coherent(&dev->pci_dev->dev, + sizeof(struct nvme_smart_log), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out_dma; + } + + /* Get SMART Log Page */ + memset(&c, 0, sizeof(c)); + c.common.opcode = nvme_admin_get_log_page; + c.common.nsid = cpu_to_le32(0xFFFFFFFF); + c.common.prp1 = cpu_to_le64(dma_addr); + c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / + BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); + res = nvme_submit_admin_cmd(dev, &c, NULL); + if (res != NVME_SC_SUCCESS) { + temp_c = LOG_TEMP_UNKNOWN; + } else { + smart_log = mem; + temp_k = (smart_log->temperature[1] << 8) + + (smart_log->temperature[0]); + temp_c = temp_k - KELVIN_TEMP_FACTOR; + } + + log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; + /* Subpage=0x00, Page Length MSB=0 */ + log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH; + /* Informational Exceptions Log Parameter 1 Start */ + /* Parameter Code=0x0000 bytes 4,5 */ + log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */ + log_response[7] = 0x04; /* PARAMETER LENGTH */ + /* Add sense Code and qualifier = 0x00 each */ + /* Use Temperature from NVMe Get Log Page, convert to C from K */ + log_response[10] = temp_c; + + xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH); + res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); + + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), + mem, dma_addr); + out_dma: + kfree(log_response); + out_mem: + return res; +} + +static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, + int alloc_len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + u8 *log_response; + struct nvme_command c; + struct nvme_dev *dev = ns->dev; + struct nvme_smart_log *smart_log; + dma_addr_t dma_addr; + void *mem; + u32 feature_resp; + u8 temp_c_cur, temp_c_thresh; + u16 temp_k; + + log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL); + if (log_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + memset(log_response, 0, LOG_TEMP_PAGE_LENGTH); + + mem = dma_alloc_coherent(&dev->pci_dev->dev, + sizeof(struct nvme_smart_log), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out_dma; + } + + /* Get SMART Log Page */ + memset(&c, 0, sizeof(c)); + c.common.opcode = nvme_admin_get_log_page; + c.common.nsid = cpu_to_le32(0xFFFFFFFF); + c.common.prp1 = cpu_to_le64(dma_addr); + c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / + BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); + res = nvme_submit_admin_cmd(dev, &c, NULL); + if (res != NVME_SC_SUCCESS) { + temp_c_cur = LOG_TEMP_UNKNOWN; + } else { + smart_log = mem; + temp_k = (smart_log->temperature[1] << 8) + + (smart_log->temperature[0]); + temp_c_cur = temp_k - KELVIN_TEMP_FACTOR; + } + + /* Get Features for Temp Threshold */ + res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0, + &feature_resp); + if (res != NVME_SC_SUCCESS) + temp_c_thresh = LOG_TEMP_UNKNOWN; + else + temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR; + + log_response[0] = LOG_PAGE_TEMPERATURE_PAGE; + /* Subpage=0x00, Page Length MSB=0 */ + log_response[3] = REMAINING_TEMP_PAGE_LENGTH; + /* Temperature Log Parameter 1 (Temperature) Start */ + /* Parameter Code = 0x0000 */ + log_response[6] = 0x01; /* Format and Linking = 01b */ + log_response[7] = 0x02; /* Parameter Length */ + /* Use Temperature from NVMe Get Log Page, convert to C from K */ + log_response[9] = temp_c_cur; + /* Temperature Log Parameter 2 (Reference Temperature) Start */ + log_response[11] = 0x01; /* Parameter Code = 0x0001 */ + log_response[12] = 0x01; /* Format and Linking = 01b */ + log_response[13] = 0x02; /* Parameter Length */ + /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */ + log_response[15] = temp_c_thresh; + + xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH); + res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); + + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), + mem, dma_addr); + out_dma: + kfree(log_response); + out_mem: + return res; +} + +/* MODE SENSE Helper Functions */ + +static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa, + u16 mode_data_length, u16 blk_desc_len) +{ + /* Quick check to make sure I don't stomp on my own memory... */ + if ((cdb10 && len < 8) || (!cdb10 && len < 4)) + return SNTI_INTERNAL_ERROR; + + if (cdb10) { + resp[0] = (mode_data_length & 0xFF00) >> 8; + resp[1] = (mode_data_length & 0x00FF); + /* resp[2] and [3] are zero */ + resp[4] = llbaa; + resp[5] = RESERVED_FIELD; + resp[6] = (blk_desc_len & 0xFF00) >> 8; + resp[7] = (blk_desc_len & 0x00FF); + } else { + resp[0] = (mode_data_length & 0x00FF); + /* resp[1] and [2] are zero */ + resp[3] = (blk_desc_len & 0x00FF); + } + + return SNTI_TRANSLATION_SUCCESS; +} + +static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *resp, int len, u8 llbaa) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ns *id_ns; + u8 flbas; + u32 lba_length; + + if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN) + return SNTI_INTERNAL_ERROR; + else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN) + return SNTI_INTERNAL_ERROR; + + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + + /* nvme ns identify */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ns = mem; + flbas = (id_ns->flbas) & 0x0F; + lba_length = (1 << (id_ns->lbaf[flbas].ds)); + + if (llbaa == 0) { + __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap)); + /* Byte 4 is reserved */ + __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF); + + memcpy(resp, &tmp_cap, sizeof(u32)); + memcpy(&resp[4], &tmp_len, sizeof(u32)); + } else { + __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap)); + __be32 tmp_len = cpu_to_be32(lba_length); + + memcpy(resp, &tmp_cap, sizeof(u64)); + /* Bytes 8, 9, 10, 11 are reserved */ + memcpy(&resp[12], &tmp_len, sizeof(u32)); + } + + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out: + return res; +} + +static int nvme_trans_fill_control_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *resp, + int len) +{ + if (len < MODE_PAGE_CONTROL_LEN) + return SNTI_INTERNAL_ERROR; + + resp[0] = MODE_PAGE_CONTROL; + resp[1] = MODE_PAGE_CONTROL_LEN_FIELD; + resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1, + * D_SENSE=1, GLTSD=1, RLEC=0 */ + resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */ + /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */ + resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */ + /* resp[6] and [7] are obsolete, thus zero */ + resp[8] = 0xFF; /* Busy timeout period = 0xffff */ + resp[9] = 0xFF; + /* Bytes 10,11: Extended selftest completion time = 0x0000 */ + + return SNTI_TRANSLATION_SUCCESS; +} + +static int nvme_trans_fill_caching_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, + u8 *resp, int len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + u32 feature_resp; + u8 vwc; + + if (len < MODE_PAGE_CACHING_LEN) + return SNTI_INTERNAL_ERROR; + + nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0, + &feature_resp); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out; + if (nvme_sc) { + res = nvme_sc; + goto out; + } + vwc = feature_resp & 0x00000001; + + resp[0] = MODE_PAGE_CACHING; + resp[1] = MODE_PAGE_CACHING_LEN_FIELD; + resp[2] = vwc << 2; + + out: + return res; +} + +static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *resp, + int len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + + if (len < MODE_PAGE_POW_CND_LEN) + return SNTI_INTERNAL_ERROR; + + resp[0] = MODE_PAGE_POWER_CONDITION; + resp[1] = MODE_PAGE_POW_CND_LEN_FIELD; + /* All other bytes are zero */ + + return res; +} + +static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *resp, + int len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + + if (len < MODE_PAGE_INF_EXC_LEN) + return SNTI_INTERNAL_ERROR; + + resp[0] = MODE_PAGE_INFO_EXCEP; + resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD; + resp[2] = 0x88; + /* All other bytes are zero */ + + return res; +} + +static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *resp, int len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u16 mode_pages_offset_1 = 0; + u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4; + + mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN; + mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN; + mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN; + + res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1], + MODE_PAGE_CACHING_LEN); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2], + MODE_PAGE_CONTROL_LEN); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3], + MODE_PAGE_POW_CND_LEN); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4], + MODE_PAGE_INF_EXC_LEN); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + + out: + return res; +} + +static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa) +{ + if (dbd == MODE_SENSE_BLK_DESC_ENABLED) { + /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */ + return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT; + } else { + return 0; + } +} + +static int nvme_trans_mode_page_create(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *cmd, + u16 alloc_len, u8 cdb10, + int (*mode_page_fill_func) + (struct nvme_ns *, + struct sg_io_hdr *hdr, u8 *, int), + u16 mode_pages_tot_len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + u8 *response; + u8 dbd, llbaa; + u16 resp_size; + int mph_size; + u16 mode_pages_offset_1; + u16 blk_desc_len, blk_desc_offset, mode_data_length; + + dbd = GET_MODE_SENSE_DBD(cmd); + llbaa = GET_MODE_SENSE_LLBAA(cmd); + mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10); + blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa); + + resp_size = mph_size + blk_desc_len + mode_pages_tot_len; + /* Refer spc4r34 Table 440 for calculation of Mode data Length field */ + mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len; + + blk_desc_offset = mph_size; + mode_pages_offset_1 = blk_desc_offset + blk_desc_len; + + response = kmalloc(resp_size, GFP_KERNEL); + if (response == NULL) { + res = -ENOMEM; + goto out_mem; + } + memset(response, 0, resp_size); + + res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10, + llbaa, mode_data_length, blk_desc_len); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out_free; + if (blk_desc_len > 0) { + res = nvme_trans_fill_blk_desc(ns, hdr, + &response[blk_desc_offset], + blk_desc_len, llbaa); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out_free; + } + res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1], + mode_pages_tot_len); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out_free; + + xfer_len = min(alloc_len, resp_size); + res = nvme_trans_copy_to_user(hdr, response, xfer_len); + + out_free: + kfree(response); + out_mem: + return res; +} + +/* Read Capacity Helper Functions */ + +static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns, + u8 cdb16) +{ + u8 flbas; + u32 lba_length; + u64 rlba; + u8 prot_en; + u8 p_type_lut[4] = {0, 0, 1, 2}; + __be64 tmp_rlba; + __be32 tmp_rlba_32; + __be32 tmp_len; + + flbas = (id_ns->flbas) & 0x0F; + lba_length = (1 << (id_ns->lbaf[flbas].ds)); + rlba = le64_to_cpup(&id_ns->nsze) - 1; + (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0); + + if (!cdb16) { + if (rlba > 0xFFFFFFFF) + rlba = 0xFFFFFFFF; + tmp_rlba_32 = cpu_to_be32(rlba); + tmp_len = cpu_to_be32(lba_length); + memcpy(response, &tmp_rlba_32, sizeof(u32)); + memcpy(&response[4], &tmp_len, sizeof(u32)); + } else { + tmp_rlba = cpu_to_be64(rlba); + tmp_len = cpu_to_be32(lba_length); + memcpy(response, &tmp_rlba, sizeof(u64)); + memcpy(&response[8], &tmp_len, sizeof(u32)); + response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en; + /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */ + /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */ + /* Bytes 16-31 - Reserved */ + } +} + +/* Start Stop Unit Helper Functions */ + +static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 pc, u8 pcmod, u8 start) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ctrl *id_ctrl; + int lowest_pow_st; /* max npss = lowest power consumption */ + unsigned ps_desired = 0; + + /* NVMe Controller Identify */ + mem = dma_alloc_coherent(&dev->pci_dev->dev, + sizeof(struct nvme_id_ctrl), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + nvme_sc = nvme_identify(dev, 0, 1, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ctrl = mem; + lowest_pow_st = id_ctrl->npss - 1; + + switch (pc) { + case NVME_POWER_STATE_START_VALID: + /* Action unspecified if POWER CONDITION MODIFIER != 0 */ + if (pcmod == 0 && start == 0x1) + ps_desired = POWER_STATE_0; + if (pcmod == 0 && start == 0x0) + ps_desired = lowest_pow_st; + break; + case NVME_POWER_STATE_ACTIVE: + /* Action unspecified if POWER CONDITION MODIFIER != 0 */ + if (pcmod == 0) + ps_desired = POWER_STATE_0; + break; + case NVME_POWER_STATE_IDLE: + /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */ + /* min of desired state and (lps-1) because lps is STOP */ + if (pcmod == 0x0) + ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1)); + else if (pcmod == 0x1) + ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1)); + else if (pcmod == 0x2) + ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1)); + break; + case NVME_POWER_STATE_STANDBY: + /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */ + if (pcmod == 0x0) + ps_desired = max(0, (lowest_pow_st - 2)); + else if (pcmod == 0x1) + ps_desired = max(0, (lowest_pow_st - 1)); + break; + case NVME_POWER_STATE_LU_CONTROL: + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0, + NULL); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) + res = nvme_sc; + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem, + dma_addr); + out: + return res; +} + +/* Write Buffer Helper Functions */ +/* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */ + +static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 opcode, u32 tot_len, u32 offset, + u8 buffer_id) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + struct nvme_command c; + struct nvme_iod *iod = NULL; + unsigned length; + + memset(&c, 0, sizeof(c)); + c.common.opcode = opcode; + if (opcode == nvme_admin_download_fw) { + if (hdr->iovec_count > 0) { + /* Assuming SGL is not allowed for this command */ + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + iod = nvme_map_user_pages(dev, DMA_TO_DEVICE, + (unsigned long)hdr->dxferp, tot_len); + if (IS_ERR(iod)) { + res = PTR_ERR(iod); + goto out; + } + length = nvme_setup_prps(dev, &c.common, iod, tot_len, + GFP_KERNEL); + if (length != tot_len) { + res = -ENOMEM; + goto out_unmap; + } + + c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); + c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); + } else if (opcode == nvme_admin_activate_fw) { + u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV; + c.common.cdw10[0] = cpu_to_le32(cdw10); + } + + nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_unmap; + if (nvme_sc) + res = nvme_sc; + + out_unmap: + if (opcode == nvme_admin_download_fw) { + nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod); + nvme_free_iod(dev, iod); + } + out: + return res; +} + +/* Mode Select Helper Functions */ + +static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10, + u16 *bd_len, u8 *llbaa) +{ + if (cdb10) { + /* 10 Byte CDB */ + *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) + + parm_list[MODE_SELECT_10_BD_OFFSET + 1]; + *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] && + MODE_SELECT_10_LLBAA_MASK; + } else { + /* 6 Byte CDB */ + *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET]; + } +} + +static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, + u16 idx, u16 bd_len, u8 llbaa) +{ + u16 bd_num; + + bd_num = bd_len / ((llbaa == 0) ? + SHORT_DESC_BLOCK : LONG_DESC_BLOCK); + /* Store block descriptor info if a FORMAT UNIT comes later */ + /* TODO Saving 1st BD info; what to do if multiple BD received? */ + if (llbaa == 0) { + /* Standard Block Descriptor - spc4r34 7.5.5.1 */ + ns->mode_select_num_blocks = + (parm_list[idx + 1] << 16) + + (parm_list[idx + 2] << 8) + + (parm_list[idx + 3]); + + ns->mode_select_block_len = + (parm_list[idx + 5] << 16) + + (parm_list[idx + 6] << 8) + + (parm_list[idx + 7]); + } else { + /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */ + ns->mode_select_num_blocks = + (((u64)parm_list[idx + 0]) << 56) + + (((u64)parm_list[idx + 1]) << 48) + + (((u64)parm_list[idx + 2]) << 40) + + (((u64)parm_list[idx + 3]) << 32) + + (((u64)parm_list[idx + 4]) << 24) + + (((u64)parm_list[idx + 5]) << 16) + + (((u64)parm_list[idx + 6]) << 8) + + ((u64)parm_list[idx + 7]); + + ns->mode_select_block_len = + (parm_list[idx + 12] << 24) + + (parm_list[idx + 13] << 16) + + (parm_list[idx + 14] << 8) + + (parm_list[idx + 15]); + } +} + +static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *mode_page, u8 page_code) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + unsigned dword11; + + switch (page_code) { + case MODE_PAGE_CACHING: + dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0); + nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11, + 0, NULL); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + break; + if (nvme_sc) { + res = nvme_sc; + break; + } + break; + case MODE_PAGE_CONTROL: + break; + case MODE_PAGE_POWER_CONDITION: + /* Verify the OS is not trying to set timers */ + if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_PARAMETER, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + if (!res) + res = SNTI_INTERNAL_ERROR; + break; + } + break; + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + if (!res) + res = SNTI_INTERNAL_ERROR; + break; + } + + return res; +} + +static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd, u16 parm_list_len, u8 pf, + u8 sp, u8 cdb10) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 *parm_list; + u16 bd_len; + u8 llbaa = 0; + u16 index, saved_index; + u8 page_code; + u16 mp_size; + + /* Get parm list from data-in/out buffer */ + parm_list = kmalloc(parm_list_len, GFP_KERNEL); + if (parm_list == NULL) { + res = -ENOMEM; + goto out; + } + + res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out_mem; + + nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa); + index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE); + + if (bd_len != 0) { + /* Block Descriptors present, parse */ + nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa); + index += bd_len; + } + saved_index = index; + + /* Multiple mode pages may be present; iterate through all */ + /* In 1st Iteration, don't do NVME Command, only check for CDB errors */ + do { + page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; + mp_size = parm_list[index + 1] + 2; + if ((page_code != MODE_PAGE_CACHING) && + (page_code != MODE_PAGE_CONTROL) && + (page_code != MODE_PAGE_POWER_CONDITION)) { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out_mem; + } + index += mp_size; + } while (index < parm_list_len); + + /* In 2nd Iteration, do the NVME Commands */ + index = saved_index; + do { + page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; + mp_size = parm_list[index + 1] + 2; + res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index], + page_code); + if (res != SNTI_TRANSLATION_SUCCESS) + break; + index += mp_size; + } while (index < parm_list_len); + + out_mem: + kfree(parm_list); + out: + return res; +} + +/* Format Unit Helper Functions */ + +static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns, + struct sg_io_hdr *hdr) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ns *id_ns; + u8 flbas; + + /* + * SCSI Expects a MODE SELECT would have been issued prior to + * a FORMAT UNIT, and the block size and number would be used + * from the block descriptor in it. If a MODE SELECT had not + * been issued, FORMAT shall use the current values for both. + */ + + if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) { + mem = dma_alloc_coherent(&dev->pci_dev->dev, + sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + /* nvme ns identify */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ns = mem; + + if (ns->mode_select_num_blocks == 0) + ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap); + if (ns->mode_select_block_len == 0) { + flbas = (id_ns->flbas) & 0x0F; + ns->mode_select_block_len = + (1 << (id_ns->lbaf[flbas].ds)); + } + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + mem, dma_addr); + } + out: + return res; +} + +static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len, + u8 format_prot_info, u8 *nvme_pf_code) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 *parm_list; + u8 pf_usage, pf_code; + + parm_list = kmalloc(len, GFP_KERNEL); + if (parm_list == NULL) { + res = -ENOMEM; + goto out; + } + res = nvme_trans_copy_from_user(hdr, parm_list, len); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out_mem; + + if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] & + FORMAT_UNIT_IMMED_MASK) != 0) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out_mem; + } + + if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN && + (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out_mem; + } + pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] & + FORMAT_UNIT_PROT_FIELD_USAGE_MASK; + pf_code = (pf_usage << 2) | format_prot_info; + switch (pf_code) { + case 0: + *nvme_pf_code = 0; + break; + case 2: + *nvme_pf_code = 1; + break; + case 3: + *nvme_pf_code = 2; + break; + case 7: + *nvme_pf_code = 3; + break; + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + + out_mem: + kfree(parm_list); + out: + return res; +} + +static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 prot_info) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ns *id_ns; + u8 i; + u8 flbas, nlbaf; + u8 selected_lbaf = 0xFF; + u32 cdw10 = 0; + struct nvme_command c; + + /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */ + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + /* nvme ns identify */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ns = mem; + flbas = (id_ns->flbas) & 0x0F; + nlbaf = id_ns->nlbaf; + + for (i = 0; i < nlbaf; i++) { + if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) { + selected_lbaf = i; + break; + } + } + if (selected_lbaf > 0x0F) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + } + if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + } + + cdw10 |= prot_info << 5; + cdw10 |= selected_lbaf & 0x0F; + memset(&c, 0, sizeof(c)); + c.format.opcode = nvme_admin_format_nvm; + c.format.nsid = cpu_to_le32(ns->ns_id); + c.format.cdw10 = cpu_to_le32(cdw10); + + nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) + res = nvme_sc; + + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out: + return res; +} + +/* Read/Write Helper Functions */ + +static inline void nvme_trans_get_io_cdb6(u8 *cmd, + struct nvme_trans_io_cdb *cdb_info) +{ + cdb_info->fua = 0; + cdb_info->prot_info = 0; + cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) & + IO_6_CDB_LBA_MASK; + cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET); + + /* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */ + if (cdb_info->xfer_len == 0) + cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN; +} + +static inline void nvme_trans_get_io_cdb10(u8 *cmd, + struct nvme_trans_io_cdb *cdb_info) +{ + cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) & + IO_CDB_FUA_MASK; + cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) & + IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; + cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET); + cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET); +} + +static inline void nvme_trans_get_io_cdb12(u8 *cmd, + struct nvme_trans_io_cdb *cdb_info) +{ + cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) & + IO_CDB_FUA_MASK; + cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) & + IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; + cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET); + cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET); +} + +static inline void nvme_trans_get_io_cdb16(u8 *cmd, + struct nvme_trans_io_cdb *cdb_info) +{ + cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) & + IO_CDB_FUA_MASK; + cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) & + IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; + cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET); + cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET); +} + +static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr, + struct nvme_trans_io_cdb *cdb_info, + u32 max_blocks) +{ + /* If using iovecs, send one nvme command per vector */ + if (hdr->iovec_count > 0) + return hdr->iovec_count; + else if (cdb_info->xfer_len > max_blocks) + return ((cdb_info->xfer_len - 1) / max_blocks) + 1; + else + return 1; +} + +static u16 nvme_trans_io_get_control(struct nvme_ns *ns, + struct nvme_trans_io_cdb *cdb_info) +{ + u16 control = 0; + + /* When Protection information support is added, implement here */ + + if (cdb_info->fua > 0) + control |= NVME_RW_FUA; + + return control; +} + +static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, + struct nvme_trans_io_cdb *cdb_info, u8 is_write) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + struct nvme_queue *nvmeq; + u32 num_cmds; + struct nvme_iod *iod; + u64 unit_len; + u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */ + u32 retcode; + u32 i = 0; + u64 nvme_offset = 0; + void __user *next_mapping_addr; + struct nvme_command c; + u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); + u16 control; + u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors); + + num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); + + /* + * This loop handles two cases. + * First, when an SGL is used in the form of an iovec list: + * - Use iov_base as the next mapping address for the nvme command_id + * - Use iov_len as the data transfer length for the command. + * Second, when we have a single buffer + * - If larger than max_blocks, split into chunks, offset + * each nvme command accordingly. + */ + for (i = 0; i < num_cmds; i++) { + memset(&c, 0, sizeof(c)); + if (hdr->iovec_count > 0) { + struct sg_iovec sgl; + + retcode = copy_from_user(&sgl, hdr->dxferp + + i * sizeof(struct sg_iovec), + sizeof(struct sg_iovec)); + if (retcode) + return -EFAULT; + unit_len = sgl.iov_len; + unit_num_blocks = unit_len >> ns->lba_shift; + next_mapping_addr = sgl.iov_base; + } else { + unit_num_blocks = min((u64)max_blocks, + (cdb_info->xfer_len - nvme_offset)); + unit_len = unit_num_blocks << ns->lba_shift; + next_mapping_addr = hdr->dxferp + + ((1 << ns->lba_shift) * nvme_offset); + } + + c.rw.opcode = opcode; + c.rw.nsid = cpu_to_le32(ns->ns_id); + c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset); + c.rw.length = cpu_to_le16(unit_num_blocks - 1); + control = nvme_trans_io_get_control(ns, cdb_info); + c.rw.control = cpu_to_le16(control); + + iod = nvme_map_user_pages(dev, + (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, + (unsigned long)next_mapping_addr, unit_len); + if (IS_ERR(iod)) { + res = PTR_ERR(iod); + goto out; + } + retcode = nvme_setup_prps(dev, &c.common, iod, unit_len, + GFP_KERNEL); + if (retcode != unit_len) { + nvme_unmap_user_pages(dev, + (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, + iod); + nvme_free_iod(dev, iod); + res = -ENOMEM; + goto out; + } + + nvme_offset += unit_num_blocks; + + nvmeq = get_nvmeq(dev); + /* + * Since nvme_submit_sync_cmd sleeps, we can't keep + * preemption disabled. We may be preempted at any + * point, and be rescheduled to a different CPU. That + * will cause cacheline bouncing, but no additional + * races since q_lock already protects against other + * CPUs. + */ + put_nvmeq(nvmeq); + nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, + NVME_IO_TIMEOUT); + if (nvme_sc != NVME_SC_SUCCESS) { + nvme_unmap_user_pages(dev, + (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, + iod); + nvme_free_iod(dev, iod); + res = nvme_trans_status_code(hdr, nvme_sc); + goto out; + } + nvme_unmap_user_pages(dev, + (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, + iod); + nvme_free_iod(dev, iod); + } + res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS); + + out: + return res; +} + + +/* SCSI Command Translation Functions */ + +static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + struct nvme_trans_io_cdb cdb_info; + u8 opcode = cmd[0]; + u64 xfer_bytes; + u64 sum_iov_len = 0; + struct sg_iovec sgl; + int i; + size_t not_copied; + + /* Extract Fields from CDB */ + switch (opcode) { + case WRITE_6: + case READ_6: + nvme_trans_get_io_cdb6(cmd, &cdb_info); + break; + case WRITE_10: + case READ_10: + nvme_trans_get_io_cdb10(cmd, &cdb_info); + break; + case WRITE_12: + case READ_12: + nvme_trans_get_io_cdb12(cmd, &cdb_info); + break; + case WRITE_16: + case READ_16: + nvme_trans_get_io_cdb16(cmd, &cdb_info); + break; + default: + /* Will never really reach here */ + res = SNTI_INTERNAL_ERROR; + goto out; + } + + /* Calculate total length of transfer (in bytes) */ + if (hdr->iovec_count > 0) { + for (i = 0; i < hdr->iovec_count; i++) { + not_copied = copy_from_user(&sgl, hdr->dxferp + + i * sizeof(struct sg_iovec), + sizeof(struct sg_iovec)); + if (not_copied) + return -EFAULT; + sum_iov_len += sgl.iov_len; + /* IO vector sizes should be multiples of block size */ + if (sgl.iov_len % (1 << ns->lba_shift) != 0) { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_PARAMETER, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + } + } else { + sum_iov_len = hdr->dxfer_len; + } + + /* As Per sg ioctl howto, if the lengths differ, use the lower one */ + xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len); + + /* If block count and actual data buffer size dont match, error out */ + if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) { + res = -EINVAL; + goto out; + } + + /* Check for 0 length transfer - it is not illegal */ + if (cdb_info.xfer_len == 0) + goto out; + + /* Send NVMe IO Command(s) */ + res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + + out: + return res; +} + +static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 evpd; + u8 page_code; + int alloc_len; + u8 *inq_response; + + evpd = GET_INQ_EVPD_BIT(cmd); + page_code = GET_INQ_PAGE_CODE(cmd); + alloc_len = GET_INQ_ALLOC_LENGTH(cmd); + + inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL); + if (inq_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + + if (evpd == 0) { + if (page_code == INQ_STANDARD_INQUIRY_PAGE) { + res = nvme_trans_standard_inquiry_page(ns, hdr, + inq_response, alloc_len); + } else { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + } + } else { + switch (page_code) { + case VPD_SUPPORTED_PAGES: + res = nvme_trans_supported_vpd_pages(ns, hdr, + inq_response, alloc_len); + break; + case VPD_SERIAL_NUMBER: + res = nvme_trans_unit_serial_page(ns, hdr, inq_response, + alloc_len); + break; + case VPD_DEVICE_IDENTIFIERS: + res = nvme_trans_device_id_page(ns, hdr, inq_response, + alloc_len); + break; + case VPD_EXTENDED_INQUIRY: + res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); + break; + case VPD_BLOCK_DEV_CHARACTERISTICS: + res = nvme_trans_bdev_char_page(ns, hdr, alloc_len); + break; + default: + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + } + kfree(inq_response); + out_mem: + return res; +} + +static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u16 alloc_len; + u8 sp; + u8 pc; + u8 page_code; + + sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET); + if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET); + page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK; + pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT; + if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET); + switch (page_code) { + case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE: + res = nvme_trans_log_supp_pages(ns, hdr, alloc_len); + break; + case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE: + res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len); + break; + case LOG_PAGE_TEMPERATURE_PAGE: + res = nvme_trans_log_temperature(ns, hdr, alloc_len); + break; + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + + out: + return res; +} + +static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 cdb10 = 0; + u16 parm_list_len; + u8 page_format; + u8 save_pages; + + page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET); + page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK; + + save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET); + save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK; + + if (GET_OPCODE(cmd) == MODE_SELECT) { + parm_list_len = GET_U8_FROM_CDB(cmd, + MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET); + } else { + parm_list_len = GET_U16_FROM_CDB(cmd, + MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET); + cdb10 = 1; + } + + if (parm_list_len != 0) { + /* + * According to SPC-4 r24, a paramter list length field of 0 + * shall not be considered an error + */ + res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len, + page_format, save_pages, cdb10); + } + + return res; +} + +static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u16 alloc_len; + u8 cdb10 = 0; + u8 page_code; + u8 pc; + + if (GET_OPCODE(cmd) == MODE_SENSE) { + alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET); + } else { + alloc_len = GET_U16_FROM_CDB(cmd, + MODE_SENSE10_ALLOC_LEN_OFFSET); + cdb10 = 1; + } + + pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) & + MODE_SENSE_PAGE_CONTROL_MASK; + if (pc != MODE_SENSE_PC_CURRENT_VALUES) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + + page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) & + MODE_SENSE_PAGE_CODE_MASK; + switch (page_code) { + case MODE_PAGE_CACHING: + res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, + cdb10, + &nvme_trans_fill_caching_page, + MODE_PAGE_CACHING_LEN); + break; + case MODE_PAGE_CONTROL: + res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, + cdb10, + &nvme_trans_fill_control_page, + MODE_PAGE_CONTROL_LEN); + break; + case MODE_PAGE_POWER_CONDITION: + res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, + cdb10, + &nvme_trans_fill_pow_cnd_page, + MODE_PAGE_POW_CND_LEN); + break; + case MODE_PAGE_INFO_EXCEP: + res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, + cdb10, + &nvme_trans_fill_inf_exc_page, + MODE_PAGE_INF_EXC_LEN); + break; + case MODE_PAGE_RETURN_ALL: + res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, + cdb10, + &nvme_trans_fill_all_pages, + MODE_PAGE_ALL_LEN); + break; + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + + out: + return res; +} + +static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + u32 alloc_len = READ_CAP_10_RESP_SIZE; + u32 resp_size = READ_CAP_10_RESP_SIZE; + u32 xfer_len; + u8 cdb16; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ns *id_ns; + u8 *response; + + cdb16 = IS_READ_CAP_16(cmd); + if (cdb16) { + alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd); + resp_size = READ_CAP_16_RESP_SIZE; + } + + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + /* nvme ns identify */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ns = mem; + + response = kmalloc(resp_size, GFP_KERNEL); + if (response == NULL) { + res = -ENOMEM; + goto out_dma; + } + memset(response, 0, resp_size); + nvme_trans_fill_read_cap(response, id_ns, cdb16); + + xfer_len = min(alloc_len, resp_size); + res = nvme_trans_copy_to_user(hdr, response, xfer_len); + + kfree(response); + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out: + return res; +} + +static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + u32 alloc_len, xfer_len, resp_size; + u8 select_report; + u8 *response; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ctrl *id_ctrl; + u32 ll_length, lun_id; + u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET; + __be32 tmp_len; + + alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd); + select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET); + + if ((select_report != ALL_LUNS_RETURNED) && + (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) && + (select_report != RESTRICTED_LUNS_RETURNED)) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } else { + /* NVMe Controller Identify */ + mem = dma_alloc_coherent(&dev->pci_dev->dev, + sizeof(struct nvme_id_ctrl), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + nvme_sc = nvme_identify(dev, 0, 1, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ctrl = mem; + ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE; + resp_size = ll_length + LUN_DATA_HEADER_SIZE; + + if (alloc_len < resp_size) { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out_dma; + } + + response = kmalloc(resp_size, GFP_KERNEL); + if (response == NULL) { + res = -ENOMEM; + goto out_dma; + } + memset(response, 0, resp_size); + + /* The first LUN ID will always be 0 per the SAM spec */ + for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) { + /* + * Set the LUN Id and then increment to the next LUN + * location in the parameter data. + */ + __be64 tmp_id = cpu_to_be64(lun_id); + memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64)); + lun_id_offset += LUN_ENTRY_SIZE; + } + tmp_len = cpu_to_be32(ll_length); + memcpy(response, &tmp_len, sizeof(u32)); + } + + xfer_len = min(alloc_len, resp_size); + res = nvme_trans_copy_to_user(hdr, response, xfer_len); + + kfree(response); + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem, + dma_addr); + out: + return res; +} + +static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 alloc_len, xfer_len, resp_size; + u8 desc_format; + u8 *response; + + alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd); + desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET); + desc_format &= REQUEST_SENSE_DESC_MASK; + + resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) : + (FIXED_FMT_SENSE_DATA_SIZE)); + response = kmalloc(resp_size, GFP_KERNEL); + if (response == NULL) { + res = -ENOMEM; + goto out; + } + memset(response, 0, resp_size); + + if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) { + /* Descriptor Format Sense Data */ + response[0] = DESC_FORMAT_SENSE_DATA; + response[1] = NO_SENSE; + /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */ + response[2] = SCSI_ASC_NO_SENSE; + response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + /* SDAT_OVFL = 0 | Additional Sense Length = 0 */ + } else { + /* Fixed Format Sense Data */ + response[0] = FIXED_SENSE_DATA; + /* Byte 1 = Obsolete */ + response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */ + /* Bytes 3-6 - Information - set to zero */ + response[7] = FIXED_SENSE_DATA_ADD_LENGTH; + /* Bytes 8-11 - Cmd Specific Information - set to zero */ + response[12] = SCSI_ASC_NO_SENSE; + response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + /* Byte 14 = Field Replaceable Unit Code = 0 */ + /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */ + } + + xfer_len = min(alloc_len, resp_size); + res = nvme_trans_copy_to_user(hdr, response, xfer_len); + + kfree(response); + out: + return res; +} + +static int nvme_trans_security_protocol(struct nvme_ns *ns, + struct sg_io_hdr *hdr, + u8 *cmd) +{ + return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); +} + +static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_queue *nvmeq; + struct nvme_command c; + u8 immed, pcmod, pc, no_flush, start; + + immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET); + pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET); + pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET); + no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET); + start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET); + + immed &= START_STOP_UNIT_CDB_IMMED_MASK; + pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK; + pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT; + no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK; + start &= START_STOP_UNIT_CDB_START_MASK; + + if (immed != 0) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + } else { + if (no_flush == 0) { + /* Issue NVME FLUSH command prior to START STOP UNIT */ + memset(&c, 0, sizeof(c)); + c.common.opcode = nvme_cmd_flush; + c.common.nsid = cpu_to_le32(ns->ns_id); + + nvmeq = get_nvmeq(ns->dev); + put_nvmeq(nvmeq); + nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); + + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out; + if (nvme_sc) { + res = nvme_sc; + goto out; + } + } + /* Setup the expected power state transition */ + res = nvme_trans_power_state(ns, hdr, pc, pcmod, start); + } + + out: + return res; +} + +static int nvme_trans_synchronize_cache(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_command c; + struct nvme_queue *nvmeq; + + memset(&c, 0, sizeof(c)); + c.common.opcode = nvme_cmd_flush; + c.common.nsid = cpu_to_le32(ns->ns_id); + + nvmeq = get_nvmeq(ns->dev); + put_nvmeq(nvmeq); + nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); + + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out; + if (nvme_sc) + res = nvme_sc; + + out: + return res; +} + +static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 parm_hdr_len = 0; + u8 nvme_pf_code = 0; + u8 format_prot_info, long_list, format_data; + + format_prot_info = GET_U8_FROM_CDB(cmd, + FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET); + long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET); + format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET); + + format_prot_info = (format_prot_info & + FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >> + FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT; + long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK; + format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK; + + if (format_data != 0) { + if (format_prot_info != 0) { + if (long_list == 0) + parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN; + else + parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN; + } + } else if (format_data == 0 && format_prot_info != 0) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + + /* Get parm header from data-in/out buffer */ + /* + * According to the translation spec, the only fields in the parameter + * list we are concerned with are in the header. So allocate only that. + */ + if (parm_hdr_len > 0) { + res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len, + format_prot_info, &nvme_pf_code); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + } + + /* Attempt to activate any previously downloaded firmware image */ + res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0); + + /* Determine Block size and count and send format command */ + res = nvme_trans_fmt_set_blk_size_count(ns, hdr); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + + res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code); + + out: + return res; +} + +static int nvme_trans_test_unit_ready(struct nvme_ns *ns, + struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + struct nvme_dev *dev = ns->dev; + + if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + NOT_READY, SCSI_ASC_LUN_NOT_READY, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + else + res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0); + + return res; +} + +static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u32 buffer_offset, parm_list_length; + u8 buffer_id, mode; + + parm_list_length = + GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET); + if (parm_list_length % BYTES_TO_DWORDS != 0) { + /* NVMe expects Firmware file to be a whole number of DWORDS */ + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET); + if (buffer_id > NVME_MAX_FIRMWARE_SLOT) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) & + WRITE_BUFFER_CDB_MODE_MASK; + buffer_offset = + GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET); + + switch (mode) { + case DOWNLOAD_SAVE_ACTIVATE: + res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw, + parm_list_length, buffer_offset, + buffer_id); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, + parm_list_length, buffer_offset, + buffer_id); + break; + case DOWNLOAD_SAVE_DEFER_ACTIVATE: + res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw, + parm_list_length, buffer_offset, + buffer_id); + break; + case ACTIVATE_DEFERRED_MICROCODE: + res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, + parm_list_length, buffer_offset, + buffer_id); + break; + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + + out: + return res; +} + +struct scsi_unmap_blk_desc { + __be64 slba; + __be32 nlb; + u32 resv; +}; + +struct scsi_unmap_parm_list { + __be16 unmap_data_len; + __be16 unmap_blk_desc_data_len; + u32 resv; + struct scsi_unmap_blk_desc desc[0]; +}; + +static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + struct nvme_dev *dev = ns->dev; + struct scsi_unmap_parm_list *plist; + struct nvme_dsm_range *range; + struct nvme_queue *nvmeq; + struct nvme_command c; + int i, nvme_sc, res = -ENOMEM; + u16 ndesc, list_len; + dma_addr_t dma_addr; + + list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET); + if (!list_len) + return -EINVAL; + + plist = kmalloc(list_len, GFP_KERNEL); + if (!plist) + return -ENOMEM; + + res = nvme_trans_copy_from_user(hdr, plist, list_len); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + + ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4; + if (!ndesc || ndesc > 256) { + res = -EINVAL; + goto out; + } + + range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), + &dma_addr, GFP_KERNEL); + if (!range) + goto out; + + for (i = 0; i < ndesc; i++) { + range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb)); + range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba)); + range[i].cattr = 0; + } + + memset(&c, 0, sizeof(c)); + c.dsm.opcode = nvme_cmd_dsm; + c.dsm.nsid = cpu_to_le32(ns->ns_id); + c.dsm.prp1 = cpu_to_le64(dma_addr); + c.dsm.nr = cpu_to_le32(ndesc - 1); + c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); + + nvmeq = get_nvmeq(dev); + put_nvmeq(nvmeq); + + nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); + res = nvme_trans_status_code(hdr, nvme_sc); + + dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), + range, dma_addr); + out: + kfree(plist); + return res; +} + +static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) +{ + u8 cmd[BLK_MAX_CDB]; + int retcode; + unsigned int opcode; + + if (hdr->cmdp == NULL) + return -EMSGSIZE; + if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) + return -EFAULT; + + opcode = cmd[0]; + + switch (opcode) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + retcode = nvme_trans_io(ns, hdr, 0, cmd); + break; + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + retcode = nvme_trans_io(ns, hdr, 1, cmd); + break; + case INQUIRY: + retcode = nvme_trans_inquiry(ns, hdr, cmd); + break; + case LOG_SENSE: + retcode = nvme_trans_log_sense(ns, hdr, cmd); + break; + case MODE_SELECT: + case MODE_SELECT_10: + retcode = nvme_trans_mode_select(ns, hdr, cmd); + break; + case MODE_SENSE: + case MODE_SENSE_10: + retcode = nvme_trans_mode_sense(ns, hdr, cmd); + break; + case READ_CAPACITY: + retcode = nvme_trans_read_capacity(ns, hdr, cmd); + break; + case SERVICE_ACTION_IN: + if (IS_READ_CAP_16(cmd)) + retcode = nvme_trans_read_capacity(ns, hdr, cmd); + else + goto out; + break; + case REPORT_LUNS: + retcode = nvme_trans_report_luns(ns, hdr, cmd); + break; + case REQUEST_SENSE: + retcode = nvme_trans_request_sense(ns, hdr, cmd); + break; + case SECURITY_PROTOCOL_IN: + case SECURITY_PROTOCOL_OUT: + retcode = nvme_trans_security_protocol(ns, hdr, cmd); + break; + case START_STOP: + retcode = nvme_trans_start_stop(ns, hdr, cmd); + break; + case SYNCHRONIZE_CACHE: + retcode = nvme_trans_synchronize_cache(ns, hdr, cmd); + break; + case FORMAT_UNIT: + retcode = nvme_trans_format_unit(ns, hdr, cmd); + break; + case TEST_UNIT_READY: + retcode = nvme_trans_test_unit_ready(ns, hdr, cmd); + break; + case WRITE_BUFFER: + retcode = nvme_trans_write_buffer(ns, hdr, cmd); + break; + case UNMAP: + retcode = nvme_trans_unmap(ns, hdr, cmd); + break; + default: + out: + retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + return retcode; +} + +int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr) +{ + struct sg_io_hdr hdr; + int retcode; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (copy_from_user(&hdr, u_hdr, sizeof(hdr))) + return -EFAULT; + if (hdr.interface_id != 'S') + return -EINVAL; + if (hdr.cmd_len > BLK_MAX_CDB) + return -EINVAL; + + retcode = nvme_scsi_translate(ns, &hdr); + if (retcode < 0) + return retcode; + if (retcode > 0) + retcode = SNTI_TRANSLATION_SUCCESS; + if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0) + return -EFAULT; + + return retcode; +} + +int nvme_sg_get_version_num(int __user *ip) +{ + return put_user(sg_version_num, ip); +} diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c deleted file mode 100644 index 9dcefe4..0000000 --- a/drivers/block/nvme.c +++ /dev/null @@ -1,1807 +0,0 @@ -/* - * NVM Express device driver - * Copyright (c) 2011, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define NVME_Q_DEPTH 1024 -#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) -#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) -#define NVME_MINORS 64 -#define NVME_IO_TIMEOUT (5 * HZ) -#define ADMIN_TIMEOUT (60 * HZ) - -static int nvme_major; -module_param(nvme_major, int, 0); - -static int use_threaded_interrupts; -module_param(use_threaded_interrupts, int, 0); - -static DEFINE_SPINLOCK(dev_list_lock); -static LIST_HEAD(dev_list); -static struct task_struct *nvme_thread; - -/* - * Represents an NVM Express device. Each nvme_dev is a PCI function. - */ -struct nvme_dev { - struct list_head node; - struct nvme_queue **queues; - u32 __iomem *dbs; - struct pci_dev *pci_dev; - struct dma_pool *prp_page_pool; - struct dma_pool *prp_small_pool; - int instance; - int queue_count; - int db_stride; - u32 ctrl_config; - struct msix_entry *entry; - struct nvme_bar __iomem *bar; - struct list_head namespaces; - char serial[20]; - char model[40]; - char firmware_rev[8]; - u32 max_hw_sectors; -}; - -/* - * An NVM Express namespace is equivalent to a SCSI LUN - */ -struct nvme_ns { - struct list_head list; - - struct nvme_dev *dev; - struct request_queue *queue; - struct gendisk *disk; - - int ns_id; - int lba_shift; -}; - -/* - * An NVM Express queue. Each device has at least two (one for admin - * commands and one for I/O commands). - */ -struct nvme_queue { - struct device *q_dmadev; - struct nvme_dev *dev; - spinlock_t q_lock; - struct nvme_command *sq_cmds; - volatile struct nvme_completion *cqes; - dma_addr_t sq_dma_addr; - dma_addr_t cq_dma_addr; - wait_queue_head_t sq_full; - wait_queue_t sq_cong_wait; - struct bio_list sq_cong; - u32 __iomem *q_db; - u16 q_depth; - u16 cq_vector; - u16 sq_head; - u16 sq_tail; - u16 cq_head; - u16 cq_phase; - unsigned long cmdid_data[]; -}; - -/* - * Check we didin't inadvertently grow the command struct - */ -static inline void _nvme_check_size(void) -{ - BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); - BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); - BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); - BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); - BUILD_BUG_ON(sizeof(struct nvme_features) != 64); - BUILD_BUG_ON(sizeof(struct nvme_command) != 64); - BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); - BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); - BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); - BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); -} - -typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, - struct nvme_completion *); - -struct nvme_cmd_info { - nvme_completion_fn fn; - void *ctx; - unsigned long timeout; -}; - -static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) -{ - return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; -} - -/** - * alloc_cmdid() - Allocate a Command ID - * @nvmeq: The queue that will be used for this command - * @ctx: A pointer that will be passed to the handler - * @handler: The function to call on completion - * - * Allocate a Command ID for a queue. The data passed in will - * be passed to the completion handler. This is implemented by using - * the bottom two bits of the ctx pointer to store the handler ID. - * Passing in a pointer that's not 4-byte aligned will cause a BUG. - * We can change this if it becomes a problem. - * - * May be called with local interrupts disabled and the q_lock held, - * or with interrupts enabled and no locks held. - */ -static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, - nvme_completion_fn handler, unsigned timeout) -{ - int depth = nvmeq->q_depth - 1; - struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); - int cmdid; - - do { - cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); - if (cmdid >= depth) - return -EBUSY; - } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); - - info[cmdid].fn = handler; - info[cmdid].ctx = ctx; - info[cmdid].timeout = jiffies + timeout; - return cmdid; -} - -static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, - nvme_completion_fn handler, unsigned timeout) -{ - int cmdid; - wait_event_killable(nvmeq->sq_full, - (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); - return (cmdid < 0) ? -EINTR : cmdid; -} - -/* Special values must be less than 0x1000 */ -#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA) -#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) -#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) -#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) -#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) - -static void special_completion(struct nvme_dev *dev, void *ctx, - struct nvme_completion *cqe) -{ - if (ctx == CMD_CTX_CANCELLED) - return; - if (ctx == CMD_CTX_FLUSH) - return; - if (ctx == CMD_CTX_COMPLETED) { - dev_warn(&dev->pci_dev->dev, - "completed id %d twice on queue %d\n", - cqe->command_id, le16_to_cpup(&cqe->sq_id)); - return; - } - if (ctx == CMD_CTX_INVALID) { - dev_warn(&dev->pci_dev->dev, - "invalid id %d completed on queue %d\n", - cqe->command_id, le16_to_cpup(&cqe->sq_id)); - return; - } - - dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); -} - -/* - * Called with local interrupts disabled and the q_lock held. May not sleep. - */ -static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, - nvme_completion_fn *fn) -{ - void *ctx; - struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); - - if (cmdid >= nvmeq->q_depth) { - *fn = special_completion; - return CMD_CTX_INVALID; - } - if (fn) - *fn = info[cmdid].fn; - ctx = info[cmdid].ctx; - info[cmdid].fn = special_completion; - info[cmdid].ctx = CMD_CTX_COMPLETED; - clear_bit(cmdid, nvmeq->cmdid_data); - wake_up(&nvmeq->sq_full); - return ctx; -} - -static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, - nvme_completion_fn *fn) -{ - void *ctx; - struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); - if (fn) - *fn = info[cmdid].fn; - ctx = info[cmdid].ctx; - info[cmdid].fn = special_completion; - info[cmdid].ctx = CMD_CTX_CANCELLED; - return ctx; -} - -static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) -{ - return dev->queues[get_cpu() + 1]; -} - -static void put_nvmeq(struct nvme_queue *nvmeq) -{ - put_cpu(); -} - -/** - * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell - * @nvmeq: The queue to use - * @cmd: The command to send - * - * Safe to use from interrupt context - */ -static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) -{ - unsigned long flags; - u16 tail; - spin_lock_irqsave(&nvmeq->q_lock, flags); - tail = nvmeq->sq_tail; - memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); - if (++tail == nvmeq->q_depth) - tail = 0; - writel(tail, nvmeq->q_db); - nvmeq->sq_tail = tail; - spin_unlock_irqrestore(&nvmeq->q_lock, flags); - - return 0; -} - -/* - * The nvme_iod describes the data in an I/O, including the list of PRP - * entries. You can't see it in this data structure because C doesn't let - * me express that. Use nvme_alloc_iod to ensure there's enough space - * allocated to store the PRP list. - */ -struct nvme_iod { - void *private; /* For the use of the submitter of the I/O */ - int npages; /* In the PRP list. 0 means small pool in use */ - int offset; /* Of PRP list */ - int nents; /* Used in scatterlist */ - int length; /* Of data, in bytes */ - dma_addr_t first_dma; - struct scatterlist sg[0]; -}; - -static __le64 **iod_list(struct nvme_iod *iod) -{ - return ((void *)iod) + iod->offset; -} - -/* - * Will slightly overestimate the number of pages needed. This is OK - * as it only leads to a small amount of wasted memory for the lifetime of - * the I/O. - */ -static int nvme_npages(unsigned size) -{ - unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE); - return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); -} - -static struct nvme_iod * -nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) -{ - struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + - sizeof(__le64 *) * nvme_npages(nbytes) + - sizeof(struct scatterlist) * nseg, gfp); - - if (iod) { - iod->offset = offsetof(struct nvme_iod, sg[nseg]); - iod->npages = -1; - iod->length = nbytes; - iod->nents = 0; - } - - return iod; -} - -static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) -{ - const int last_prp = PAGE_SIZE / 8 - 1; - int i; - __le64 **list = iod_list(iod); - dma_addr_t prp_dma = iod->first_dma; - - if (iod->npages == 0) - dma_pool_free(dev->prp_small_pool, list[0], prp_dma); - for (i = 0; i < iod->npages; i++) { - __le64 *prp_list = list[i]; - dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); - dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); - prp_dma = next_prp_dma; - } - kfree(iod); -} - -static void requeue_bio(struct nvme_dev *dev, struct bio *bio) -{ - struct nvme_queue *nvmeq = get_nvmeq(dev); - if (bio_list_empty(&nvmeq->sq_cong)) - add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); - bio_list_add(&nvmeq->sq_cong, bio); - put_nvmeq(nvmeq); - wake_up_process(nvme_thread); -} - -static void bio_completion(struct nvme_dev *dev, void *ctx, - struct nvme_completion *cqe) -{ - struct nvme_iod *iod = ctx; - struct bio *bio = iod->private; - u16 status = le16_to_cpup(&cqe->status) >> 1; - - if (iod->nents) - dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, - bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - nvme_free_iod(dev, iod); - if (status) { - bio_endio(bio, -EIO); - } else if (bio->bi_vcnt > bio->bi_idx) { - requeue_bio(dev, bio); - } else { - bio_endio(bio, 0); - } -} - -/* length is in bytes. gfp flags indicates whether we may sleep. */ -static int nvme_setup_prps(struct nvme_dev *dev, - struct nvme_common_command *cmd, struct nvme_iod *iod, - int total_len, gfp_t gfp) -{ - struct dma_pool *pool; - int length = total_len; - struct scatterlist *sg = iod->sg; - int dma_len = sg_dma_len(sg); - u64 dma_addr = sg_dma_address(sg); - int offset = offset_in_page(dma_addr); - __le64 *prp_list; - __le64 **list = iod_list(iod); - dma_addr_t prp_dma; - int nprps, i; - - cmd->prp1 = cpu_to_le64(dma_addr); - length -= (PAGE_SIZE - offset); - if (length <= 0) - return total_len; - - dma_len -= (PAGE_SIZE - offset); - if (dma_len) { - dma_addr += (PAGE_SIZE - offset); - } else { - sg = sg_next(sg); - dma_addr = sg_dma_address(sg); - dma_len = sg_dma_len(sg); - } - - if (length <= PAGE_SIZE) { - cmd->prp2 = cpu_to_le64(dma_addr); - return total_len; - } - - nprps = DIV_ROUND_UP(length, PAGE_SIZE); - if (nprps <= (256 / 8)) { - pool = dev->prp_small_pool; - iod->npages = 0; - } else { - pool = dev->prp_page_pool; - iod->npages = 1; - } - - prp_list = dma_pool_alloc(pool, gfp, &prp_dma); - if (!prp_list) { - cmd->prp2 = cpu_to_le64(dma_addr); - iod->npages = -1; - return (total_len - length) + PAGE_SIZE; - } - list[0] = prp_list; - iod->first_dma = prp_dma; - cmd->prp2 = cpu_to_le64(prp_dma); - i = 0; - for (;;) { - if (i == PAGE_SIZE / 8) { - __le64 *old_prp_list = prp_list; - prp_list = dma_pool_alloc(pool, gfp, &prp_dma); - if (!prp_list) - return total_len - length; - list[iod->npages++] = prp_list; - prp_list[0] = old_prp_list[i - 1]; - old_prp_list[i - 1] = cpu_to_le64(prp_dma); - i = 1; - } - prp_list[i++] = cpu_to_le64(dma_addr); - dma_len -= PAGE_SIZE; - dma_addr += PAGE_SIZE; - length -= PAGE_SIZE; - if (length <= 0) - break; - if (dma_len > 0) - continue; - BUG_ON(dma_len < 0); - sg = sg_next(sg); - dma_addr = sg_dma_address(sg); - dma_len = sg_dma_len(sg); - } - - return total_len; -} - -/* NVMe scatterlists require no holes in the virtual address */ -#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ - (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) - -static int nvme_map_bio(struct device *dev, struct nvme_iod *iod, - struct bio *bio, enum dma_data_direction dma_dir, int psegs) -{ - struct bio_vec *bvec, *bvprv = NULL; - struct scatterlist *sg = NULL; - int i, old_idx, length = 0, nsegs = 0; - - sg_init_table(iod->sg, psegs); - old_idx = bio->bi_idx; - bio_for_each_segment(bvec, bio, i) { - if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { - sg->length += bvec->bv_len; - } else { - if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) - break; - sg = sg ? sg + 1 : iod->sg; - sg_set_page(sg, bvec->bv_page, bvec->bv_len, - bvec->bv_offset); - nsegs++; - } - length += bvec->bv_len; - bvprv = bvec; - } - bio->bi_idx = i; - iod->nents = nsegs; - sg_mark_end(sg); - if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) { - bio->bi_idx = old_idx; - return -ENOMEM; - } - return length; -} - -static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, - int cmdid) -{ - struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; - - memset(cmnd, 0, sizeof(*cmnd)); - cmnd->common.opcode = nvme_cmd_flush; - cmnd->common.command_id = cmdid; - cmnd->common.nsid = cpu_to_le32(ns->ns_id); - - if (++nvmeq->sq_tail == nvmeq->q_depth) - nvmeq->sq_tail = 0; - writel(nvmeq->sq_tail, nvmeq->q_db); - - return 0; -} - -static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) -{ - int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, - special_completion, NVME_IO_TIMEOUT); - if (unlikely(cmdid < 0)) - return cmdid; - - return nvme_submit_flush(nvmeq, ns, cmdid); -} - -/* - * Called with local interrupts disabled and the q_lock held. May not sleep. - */ -static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, - struct bio *bio) -{ - struct nvme_command *cmnd; - struct nvme_iod *iod; - enum dma_data_direction dma_dir; - int cmdid, length, result = -ENOMEM; - u16 control; - u32 dsmgmt; - int psegs = bio_phys_segments(ns->queue, bio); - - if ((bio->bi_rw & REQ_FLUSH) && psegs) { - result = nvme_submit_flush_data(nvmeq, ns); - if (result) - return result; - } - - iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); - if (!iod) - goto nomem; - iod->private = bio; - - result = -EBUSY; - cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); - if (unlikely(cmdid < 0)) - goto free_iod; - - if ((bio->bi_rw & REQ_FLUSH) && !psegs) - return nvme_submit_flush(nvmeq, ns, cmdid); - - control = 0; - if (bio->bi_rw & REQ_FUA) - control |= NVME_RW_FUA; - if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) - control |= NVME_RW_LR; - - dsmgmt = 0; - if (bio->bi_rw & REQ_RAHEAD) - dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; - - cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; - - memset(cmnd, 0, sizeof(*cmnd)); - if (bio_data_dir(bio)) { - cmnd->rw.opcode = nvme_cmd_write; - dma_dir = DMA_TO_DEVICE; - } else { - cmnd->rw.opcode = nvme_cmd_read; - dma_dir = DMA_FROM_DEVICE; - } - - result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); - if (result < 0) - goto free_cmdid; - length = result; - - cmnd->rw.command_id = cmdid; - cmnd->rw.nsid = cpu_to_le32(ns->ns_id); - length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, - GFP_ATOMIC); - cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); - cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); - cmnd->rw.control = cpu_to_le16(control); - cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); - - bio->bi_sector += length >> 9; - - if (++nvmeq->sq_tail == nvmeq->q_depth) - nvmeq->sq_tail = 0; - writel(nvmeq->sq_tail, nvmeq->q_db); - - return 0; - - free_cmdid: - free_cmdid(nvmeq, cmdid, NULL); - free_iod: - nvme_free_iod(nvmeq->dev, iod); - nomem: - return result; -} - -static void nvme_make_request(struct request_queue *q, struct bio *bio) -{ - struct nvme_ns *ns = q->queuedata; - struct nvme_queue *nvmeq = get_nvmeq(ns->dev); - int result = -EBUSY; - - spin_lock_irq(&nvmeq->q_lock); - if (bio_list_empty(&nvmeq->sq_cong)) - result = nvme_submit_bio_queue(nvmeq, ns, bio); - if (unlikely(result)) { - if (bio_list_empty(&nvmeq->sq_cong)) - add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); - bio_list_add(&nvmeq->sq_cong, bio); - } - - spin_unlock_irq(&nvmeq->q_lock); - put_nvmeq(nvmeq); -} - -static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) -{ - u16 head, phase; - - head = nvmeq->cq_head; - phase = nvmeq->cq_phase; - - for (;;) { - void *ctx; - nvme_completion_fn fn; - struct nvme_completion cqe = nvmeq->cqes[head]; - if ((le16_to_cpu(cqe.status) & 1) != phase) - break; - nvmeq->sq_head = le16_to_cpu(cqe.sq_head); - if (++head == nvmeq->q_depth) { - head = 0; - phase = !phase; - } - - ctx = free_cmdid(nvmeq, cqe.command_id, &fn); - fn(nvmeq->dev, ctx, &cqe); - } - - /* If the controller ignores the cq head doorbell and continuously - * writes to the queue, it is theoretically possible to wrap around - * the queue twice and mistakenly return IRQ_NONE. Linux only - * requires that 0.1% of your interrupts are handled, so this isn't - * a big problem. - */ - if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) - return IRQ_NONE; - - writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); - nvmeq->cq_head = head; - nvmeq->cq_phase = phase; - - return IRQ_HANDLED; -} - -static irqreturn_t nvme_irq(int irq, void *data) -{ - irqreturn_t result; - struct nvme_queue *nvmeq = data; - spin_lock(&nvmeq->q_lock); - result = nvme_process_cq(nvmeq); - spin_unlock(&nvmeq->q_lock); - return result; -} - -static irqreturn_t nvme_irq_check(int irq, void *data) -{ - struct nvme_queue *nvmeq = data; - struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; - if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) - return IRQ_NONE; - return IRQ_WAKE_THREAD; -} - -static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) -{ - spin_lock_irq(&nvmeq->q_lock); - cancel_cmdid(nvmeq, cmdid, NULL); - spin_unlock_irq(&nvmeq->q_lock); -} - -struct sync_cmd_info { - struct task_struct *task; - u32 result; - int status; -}; - -static void sync_completion(struct nvme_dev *dev, void *ctx, - struct nvme_completion *cqe) -{ - struct sync_cmd_info *cmdinfo = ctx; - cmdinfo->result = le32_to_cpup(&cqe->result); - cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; - wake_up_process(cmdinfo->task); -} - -/* - * Returns 0 on success. If the result is negative, it's a Linux error code; - * if the result is positive, it's an NVM Express status code - */ -static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, - struct nvme_command *cmd, u32 *result, unsigned timeout) -{ - int cmdid; - struct sync_cmd_info cmdinfo; - - cmdinfo.task = current; - cmdinfo.status = -EINTR; - - cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, - timeout); - if (cmdid < 0) - return cmdid; - cmd->common.command_id = cmdid; - - set_current_state(TASK_KILLABLE); - nvme_submit_cmd(nvmeq, cmd); - schedule(); - - if (cmdinfo.status == -EINTR) { - nvme_abort_command(nvmeq, cmdid); - return -EINTR; - } - - if (result) - *result = cmdinfo.result; - - return cmdinfo.status; -} - -static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, - u32 *result) -{ - return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); -} - -static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) -{ - int status; - struct nvme_command c; - - memset(&c, 0, sizeof(c)); - c.delete_queue.opcode = opcode; - c.delete_queue.qid = cpu_to_le16(id); - - status = nvme_submit_admin_cmd(dev, &c, NULL); - if (status) - return -EIO; - return 0; -} - -static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, - struct nvme_queue *nvmeq) -{ - int status; - struct nvme_command c; - int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; - - memset(&c, 0, sizeof(c)); - c.create_cq.opcode = nvme_admin_create_cq; - c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); - c.create_cq.cqid = cpu_to_le16(qid); - c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); - c.create_cq.cq_flags = cpu_to_le16(flags); - c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); - - status = nvme_submit_admin_cmd(dev, &c, NULL); - if (status) - return -EIO; - return 0; -} - -static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, - struct nvme_queue *nvmeq) -{ - int status; - struct nvme_command c; - int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; - - memset(&c, 0, sizeof(c)); - c.create_sq.opcode = nvme_admin_create_sq; - c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); - c.create_sq.sqid = cpu_to_le16(qid); - c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); - c.create_sq.sq_flags = cpu_to_le16(flags); - c.create_sq.cqid = cpu_to_le16(qid); - - status = nvme_submit_admin_cmd(dev, &c, NULL); - if (status) - return -EIO; - return 0; -} - -static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) -{ - return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); -} - -static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) -{ - return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); -} - -static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, - dma_addr_t dma_addr) -{ - struct nvme_command c; - - memset(&c, 0, sizeof(c)); - c.identify.opcode = nvme_admin_identify; - c.identify.nsid = cpu_to_le32(nsid); - c.identify.prp1 = cpu_to_le64(dma_addr); - c.identify.cns = cpu_to_le32(cns); - - return nvme_submit_admin_cmd(dev, &c, NULL); -} - -static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, - dma_addr_t dma_addr, u32 *result) -{ - struct nvme_command c; - - memset(&c, 0, sizeof(c)); - c.features.opcode = nvme_admin_get_features; - c.features.nsid = cpu_to_le32(nsid); - c.features.prp1 = cpu_to_le64(dma_addr); - c.features.fid = cpu_to_le32(fid); - - return nvme_submit_admin_cmd(dev, &c, result); -} - -static int nvme_set_features(struct nvme_dev *dev, unsigned fid, - unsigned dword11, dma_addr_t dma_addr, u32 *result) -{ - struct nvme_command c; - - memset(&c, 0, sizeof(c)); - c.features.opcode = nvme_admin_set_features; - c.features.prp1 = cpu_to_le64(dma_addr); - c.features.fid = cpu_to_le32(fid); - c.features.dword11 = cpu_to_le32(dword11); - - return nvme_submit_admin_cmd(dev, &c, result); -} - -/** - * nvme_cancel_ios - Cancel outstanding I/Os - * @queue: The queue to cancel I/Os on - * @timeout: True to only cancel I/Os which have timed out - */ -static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) -{ - int depth = nvmeq->q_depth - 1; - struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); - unsigned long now = jiffies; - int cmdid; - - for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { - void *ctx; - nvme_completion_fn fn; - static struct nvme_completion cqe = { - .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, - }; - - if (timeout && !time_after(now, info[cmdid].timeout)) - continue; - dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); - ctx = cancel_cmdid(nvmeq, cmdid, &fn); - fn(nvmeq->dev, ctx, &cqe); - } -} - -static void nvme_free_queue_mem(struct nvme_queue *nvmeq) -{ - dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), - (void *)nvmeq->cqes, nvmeq->cq_dma_addr); - dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), - nvmeq->sq_cmds, nvmeq->sq_dma_addr); - kfree(nvmeq); -} - -static void nvme_free_queue(struct nvme_dev *dev, int qid) -{ - struct nvme_queue *nvmeq = dev->queues[qid]; - int vector = dev->entry[nvmeq->cq_vector].vector; - - spin_lock_irq(&nvmeq->q_lock); - nvme_cancel_ios(nvmeq, false); - while (bio_list_peek(&nvmeq->sq_cong)) { - struct bio *bio = bio_list_pop(&nvmeq->sq_cong); - bio_endio(bio, -EIO); - } - spin_unlock_irq(&nvmeq->q_lock); - - irq_set_affinity_hint(vector, NULL); - free_irq(vector, nvmeq); - - /* Don't tell the adapter to delete the admin queue */ - if (qid) { - adapter_delete_sq(dev, qid); - adapter_delete_cq(dev, qid); - } - - nvme_free_queue_mem(nvmeq); -} - -static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, - int depth, int vector) -{ - struct device *dmadev = &dev->pci_dev->dev; - unsigned extra = DIV_ROUND_UP(depth, 8) + (depth * - sizeof(struct nvme_cmd_info)); - struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); - if (!nvmeq) - return NULL; - - nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), - &nvmeq->cq_dma_addr, GFP_KERNEL); - if (!nvmeq->cqes) - goto free_nvmeq; - memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); - - nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), - &nvmeq->sq_dma_addr, GFP_KERNEL); - if (!nvmeq->sq_cmds) - goto free_cqdma; - - nvmeq->q_dmadev = dmadev; - nvmeq->dev = dev; - spin_lock_init(&nvmeq->q_lock); - nvmeq->cq_head = 0; - nvmeq->cq_phase = 1; - init_waitqueue_head(&nvmeq->sq_full); - init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); - bio_list_init(&nvmeq->sq_cong); - nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; - nvmeq->q_depth = depth; - nvmeq->cq_vector = vector; - - return nvmeq; - - free_cqdma: - dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, - nvmeq->cq_dma_addr); - free_nvmeq: - kfree(nvmeq); - return NULL; -} - -static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, - const char *name) -{ - if (use_threaded_interrupts) - return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, - nvme_irq_check, nvme_irq, - IRQF_DISABLED | IRQF_SHARED, - name, nvmeq); - return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, - IRQF_DISABLED | IRQF_SHARED, name, nvmeq); -} - -static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid, - int cq_size, int vector) -{ - int result; - struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); - - if (!nvmeq) - return ERR_PTR(-ENOMEM); - - result = adapter_alloc_cq(dev, qid, nvmeq); - if (result < 0) - goto free_nvmeq; - - result = adapter_alloc_sq(dev, qid, nvmeq); - if (result < 0) - goto release_cq; - - result = queue_request_irq(dev, nvmeq, "nvme"); - if (result < 0) - goto release_sq; - - return nvmeq; - - release_sq: - adapter_delete_sq(dev, qid); - release_cq: - adapter_delete_cq(dev, qid); - free_nvmeq: - dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), - (void *)nvmeq->cqes, nvmeq->cq_dma_addr); - dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), - nvmeq->sq_cmds, nvmeq->sq_dma_addr); - kfree(nvmeq); - return ERR_PTR(result); -} - -static int nvme_configure_admin_queue(struct nvme_dev *dev) -{ - int result = 0; - u32 aqa; - u64 cap; - unsigned long timeout; - struct nvme_queue *nvmeq; - - dev->dbs = ((void __iomem *)dev->bar) + 4096; - - nvmeq = nvme_alloc_queue(dev, 0, 64, 0); - if (!nvmeq) - return -ENOMEM; - - aqa = nvmeq->q_depth - 1; - aqa |= aqa << 16; - - dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; - dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; - dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; - dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; - - writel(0, &dev->bar->cc); - writel(aqa, &dev->bar->aqa); - writeq(nvmeq->sq_dma_addr, &dev->bar->asq); - writeq(nvmeq->cq_dma_addr, &dev->bar->acq); - writel(dev->ctrl_config, &dev->bar->cc); - - cap = readq(&dev->bar->cap); - timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; - dev->db_stride = NVME_CAP_STRIDE(cap); - - while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { - msleep(100); - if (fatal_signal_pending(current)) - result = -EINTR; - if (time_after(jiffies, timeout)) { - dev_err(&dev->pci_dev->dev, - "Device not ready; aborting initialisation\n"); - result = -ENODEV; - } - } - - if (result) { - nvme_free_queue_mem(nvmeq); - return result; - } - - result = queue_request_irq(dev, nvmeq, "nvme admin"); - dev->queues[0] = nvmeq; - return result; -} - -static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, - unsigned long addr, unsigned length) -{ - int i, err, count, nents, offset; - struct scatterlist *sg; - struct page **pages; - struct nvme_iod *iod; - - if (addr & 3) - return ERR_PTR(-EINVAL); - if (!length) - return ERR_PTR(-EINVAL); - - offset = offset_in_page(addr); - count = DIV_ROUND_UP(offset + length, PAGE_SIZE); - pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); - if (!pages) - return ERR_PTR(-ENOMEM); - - err = get_user_pages_fast(addr, count, 1, pages); - if (err < count) { - count = err; - err = -EFAULT; - goto put_pages; - } - - iod = nvme_alloc_iod(count, length, GFP_KERNEL); - sg = iod->sg; - sg_init_table(sg, count); - for (i = 0; i < count; i++) { - sg_set_page(&sg[i], pages[i], - min_t(int, length, PAGE_SIZE - offset), offset); - length -= (PAGE_SIZE - offset); - offset = 0; - } - sg_mark_end(&sg[i - 1]); - iod->nents = count; - - err = -ENOMEM; - nents = dma_map_sg(&dev->pci_dev->dev, sg, count, - write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (!nents) - goto free_iod; - - kfree(pages); - return iod; - - free_iod: - kfree(iod); - put_pages: - for (i = 0; i < count; i++) - put_page(pages[i]); - kfree(pages); - return ERR_PTR(err); -} - -static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, - struct nvme_iod *iod) -{ - int i; - - dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, - write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - - for (i = 0; i < iod->nents; i++) - put_page(sg_page(&iod->sg[i])); -} - -static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) -{ - struct nvme_dev *dev = ns->dev; - struct nvme_queue *nvmeq; - struct nvme_user_io io; - struct nvme_command c; - unsigned length; - int status; - struct nvme_iod *iod; - - if (copy_from_user(&io, uio, sizeof(io))) - return -EFAULT; - length = (io.nblocks + 1) << ns->lba_shift; - - switch (io.opcode) { - case nvme_cmd_write: - case nvme_cmd_read: - case nvme_cmd_compare: - iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); - break; - default: - return -EINVAL; - } - - if (IS_ERR(iod)) - return PTR_ERR(iod); - - memset(&c, 0, sizeof(c)); - c.rw.opcode = io.opcode; - c.rw.flags = io.flags; - c.rw.nsid = cpu_to_le32(ns->ns_id); - c.rw.slba = cpu_to_le64(io.slba); - c.rw.length = cpu_to_le16(io.nblocks); - c.rw.control = cpu_to_le16(io.control); - c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); - c.rw.reftag = io.reftag; - c.rw.apptag = io.apptag; - c.rw.appmask = io.appmask; - /* XXX: metadata */ - length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); - - nvmeq = get_nvmeq(dev); - /* - * Since nvme_submit_sync_cmd sleeps, we can't keep preemption - * disabled. We may be preempted at any point, and be rescheduled - * to a different CPU. That will cause cacheline bouncing, but no - * additional races since q_lock already protects against other CPUs. - */ - put_nvmeq(nvmeq); - if (length != (io.nblocks + 1) << ns->lba_shift) - status = -ENOMEM; - else - status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); - - nvme_unmap_user_pages(dev, io.opcode & 1, iod); - nvme_free_iod(dev, iod); - return status; -} - -static int nvme_user_admin_cmd(struct nvme_dev *dev, - struct nvme_admin_cmd __user *ucmd) -{ - struct nvme_admin_cmd cmd; - struct nvme_command c; - int status, length; - struct nvme_iod *uninitialized_var(iod); - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - if (copy_from_user(&cmd, ucmd, sizeof(cmd))) - return -EFAULT; - - memset(&c, 0, sizeof(c)); - c.common.opcode = cmd.opcode; - c.common.flags = cmd.flags; - c.common.nsid = cpu_to_le32(cmd.nsid); - c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); - c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); - c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); - c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); - c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); - c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); - c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); - c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); - - length = cmd.data_len; - if (cmd.data_len) { - iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr, - length); - if (IS_ERR(iod)) - return PTR_ERR(iod); - length = nvme_setup_prps(dev, &c.common, iod, length, - GFP_KERNEL); - } - - if (length != cmd.data_len) - status = -ENOMEM; - else - status = nvme_submit_admin_cmd(dev, &c, &cmd.result); - - if (cmd.data_len) { - nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); - nvme_free_iod(dev, iod); - } - - if (!status && copy_to_user(&ucmd->result, &cmd.result, - sizeof(cmd.result))) - status = -EFAULT; - - return status; -} - -static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, - unsigned long arg) -{ - struct nvme_ns *ns = bdev->bd_disk->private_data; - - switch (cmd) { - case NVME_IOCTL_ID: - return ns->ns_id; - case NVME_IOCTL_ADMIN_CMD: - return nvme_user_admin_cmd(ns->dev, (void __user *)arg); - case NVME_IOCTL_SUBMIT_IO: - return nvme_submit_io(ns, (void __user *)arg); - default: - return -ENOTTY; - } -} - -static const struct block_device_operations nvme_fops = { - .owner = THIS_MODULE, - .ioctl = nvme_ioctl, - .compat_ioctl = nvme_ioctl, -}; - -static void nvme_resubmit_bios(struct nvme_queue *nvmeq) -{ - while (bio_list_peek(&nvmeq->sq_cong)) { - struct bio *bio = bio_list_pop(&nvmeq->sq_cong); - struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; - if (nvme_submit_bio_queue(nvmeq, ns, bio)) { - bio_list_add_head(&nvmeq->sq_cong, bio); - break; - } - if (bio_list_empty(&nvmeq->sq_cong)) - remove_wait_queue(&nvmeq->sq_full, - &nvmeq->sq_cong_wait); - } -} - -static int nvme_kthread(void *data) -{ - struct nvme_dev *dev; - - while (!kthread_should_stop()) { - __set_current_state(TASK_RUNNING); - spin_lock(&dev_list_lock); - list_for_each_entry(dev, &dev_list, node) { - int i; - for (i = 0; i < dev->queue_count; i++) { - struct nvme_queue *nvmeq = dev->queues[i]; - if (!nvmeq) - continue; - spin_lock_irq(&nvmeq->q_lock); - if (nvme_process_cq(nvmeq)) - printk("process_cq did something\n"); - nvme_cancel_ios(nvmeq, true); - nvme_resubmit_bios(nvmeq); - spin_unlock_irq(&nvmeq->q_lock); - } - } - spin_unlock(&dev_list_lock); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ); - } - return 0; -} - -static DEFINE_IDA(nvme_index_ida); - -static int nvme_get_ns_idx(void) -{ - int index, error; - - do { - if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL)) - return -1; - - spin_lock(&dev_list_lock); - error = ida_get_new(&nvme_index_ida, &index); - spin_unlock(&dev_list_lock); - } while (error == -EAGAIN); - - if (error) - index = -1; - return index; -} - -static void nvme_put_ns_idx(int index) -{ - spin_lock(&dev_list_lock); - ida_remove(&nvme_index_ida, index); - spin_unlock(&dev_list_lock); -} - -static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, - struct nvme_id_ns *id, struct nvme_lba_range_type *rt) -{ - struct nvme_ns *ns; - struct gendisk *disk; - int lbaf; - - if (rt->attributes & NVME_LBART_ATTRIB_HIDE) - return NULL; - - ns = kzalloc(sizeof(*ns), GFP_KERNEL); - if (!ns) - return NULL; - ns->queue = blk_alloc_queue(GFP_KERNEL); - if (!ns->queue) - goto out_free_ns; - ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); -/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */ - blk_queue_make_request(ns->queue, nvme_make_request); - ns->dev = dev; - ns->queue->queuedata = ns; - - disk = alloc_disk(NVME_MINORS); - if (!disk) - goto out_free_queue; - ns->ns_id = nsid; - ns->disk = disk; - lbaf = id->flbas & 0xf; - ns->lba_shift = id->lbaf[lbaf].ds; - blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); - if (dev->max_hw_sectors) - blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); - - disk->major = nvme_major; - disk->minors = NVME_MINORS; - disk->first_minor = NVME_MINORS * nvme_get_ns_idx(); - disk->fops = &nvme_fops; - disk->private_data = ns; - disk->queue = ns->queue; - disk->driverfs_dev = &dev->pci_dev->dev; - sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); - set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); - - return ns; - - out_free_queue: - blk_cleanup_queue(ns->queue); - out_free_ns: - kfree(ns); - return NULL; -} - -static void nvme_ns_free(struct nvme_ns *ns) -{ - int index = ns->disk->first_minor / NVME_MINORS; - put_disk(ns->disk); - nvme_put_ns_idx(index); - blk_cleanup_queue(ns->queue); - kfree(ns); -} - -static int set_queue_count(struct nvme_dev *dev, int count) -{ - int status; - u32 result; - u32 q_count = (count - 1) | ((count - 1) << 16); - - status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, - &result); - if (status) - return -EIO; - return min(result & 0xffff, result >> 16) + 1; -} - -static int nvme_setup_io_queues(struct nvme_dev *dev) -{ - int result, cpu, i, nr_io_queues, db_bar_size, q_depth; - - nr_io_queues = num_online_cpus(); - result = set_queue_count(dev, nr_io_queues); - if (result < 0) - return result; - if (result < nr_io_queues) - nr_io_queues = result; - - /* Deregister the admin queue's interrupt */ - free_irq(dev->entry[0].vector, dev->queues[0]); - - db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); - if (db_bar_size > 8192) { - iounmap(dev->bar); - dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), - db_bar_size); - dev->dbs = ((void __iomem *)dev->bar) + 4096; - dev->queues[0]->q_db = dev->dbs; - } - - for (i = 0; i < nr_io_queues; i++) - dev->entry[i].entry = i; - for (;;) { - result = pci_enable_msix(dev->pci_dev, dev->entry, - nr_io_queues); - if (result == 0) { - break; - } else if (result > 0) { - nr_io_queues = result; - continue; - } else { - nr_io_queues = 1; - break; - } - } - - result = queue_request_irq(dev, dev->queues[0], "nvme admin"); - /* XXX: handle failure here */ - - cpu = cpumask_first(cpu_online_mask); - for (i = 0; i < nr_io_queues; i++) { - irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); - cpu = cpumask_next(cpu, cpu_online_mask); - } - - q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, - NVME_Q_DEPTH); - for (i = 0; i < nr_io_queues; i++) { - dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i); - if (IS_ERR(dev->queues[i + 1])) - return PTR_ERR(dev->queues[i + 1]); - dev->queue_count++; - } - - for (; i < num_possible_cpus(); i++) { - int target = i % rounddown_pow_of_two(dev->queue_count - 1); - dev->queues[i + 1] = dev->queues[target + 1]; - } - - return 0; -} - -static void nvme_free_queues(struct nvme_dev *dev) -{ - int i; - - for (i = dev->queue_count - 1; i >= 0; i--) - nvme_free_queue(dev, i); -} - -static int nvme_dev_add(struct nvme_dev *dev) -{ - int res, nn, i; - struct nvme_ns *ns, *next; - struct nvme_id_ctrl *ctrl; - struct nvme_id_ns *id_ns; - void *mem; - dma_addr_t dma_addr; - - res = nvme_setup_io_queues(dev); - if (res) - return res; - - mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, - GFP_KERNEL); - - res = nvme_identify(dev, 0, 1, dma_addr); - if (res) { - res = -EIO; - goto out_free; - } - - ctrl = mem; - nn = le32_to_cpup(&ctrl->nn); - memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); - memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); - memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); - if (ctrl->mdts) { - int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; - dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); - } - - id_ns = mem; - for (i = 1; i <= nn; i++) { - res = nvme_identify(dev, i, 0, dma_addr); - if (res) - continue; - - if (id_ns->ncap == 0) - continue; - - res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, - dma_addr + 4096, NULL); - if (res) - memset(mem + 4096, 0, 4096); - - ns = nvme_alloc_ns(dev, i, mem, mem + 4096); - if (ns) - list_add_tail(&ns->list, &dev->namespaces); - } - list_for_each_entry(ns, &dev->namespaces, list) - add_disk(ns->disk); - - goto out; - - out_free: - list_for_each_entry_safe(ns, next, &dev->namespaces, list) { - list_del(&ns->list); - nvme_ns_free(ns); - } - - out: - dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); - return res; -} - -static int nvme_dev_remove(struct nvme_dev *dev) -{ - struct nvme_ns *ns, *next; - - spin_lock(&dev_list_lock); - list_del(&dev->node); - spin_unlock(&dev_list_lock); - - list_for_each_entry_safe(ns, next, &dev->namespaces, list) { - list_del(&ns->list); - del_gendisk(ns->disk); - nvme_ns_free(ns); - } - - nvme_free_queues(dev); - - return 0; -} - -static int nvme_setup_prp_pools(struct nvme_dev *dev) -{ - struct device *dmadev = &dev->pci_dev->dev; - dev->prp_page_pool = dma_pool_create("prp list page", dmadev, - PAGE_SIZE, PAGE_SIZE, 0); - if (!dev->prp_page_pool) - return -ENOMEM; - - /* Optimisation for I/Os between 4k and 128k */ - dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, - 256, 256, 0); - if (!dev->prp_small_pool) { - dma_pool_destroy(dev->prp_page_pool); - return -ENOMEM; - } - return 0; -} - -static void nvme_release_prp_pools(struct nvme_dev *dev) -{ - dma_pool_destroy(dev->prp_page_pool); - dma_pool_destroy(dev->prp_small_pool); -} - -static DEFINE_IDA(nvme_instance_ida); - -static int nvme_set_instance(struct nvme_dev *dev) -{ - int instance, error; - - do { - if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) - return -ENODEV; - - spin_lock(&dev_list_lock); - error = ida_get_new(&nvme_instance_ida, &instance); - spin_unlock(&dev_list_lock); - } while (error == -EAGAIN); - - if (error) - return -ENODEV; - - dev->instance = instance; - return 0; -} - -static void nvme_release_instance(struct nvme_dev *dev) -{ - spin_lock(&dev_list_lock); - ida_remove(&nvme_instance_ida, dev->instance); - spin_unlock(&dev_list_lock); -} - -static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - int bars, result = -ENOMEM; - struct nvme_dev *dev; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), - GFP_KERNEL); - if (!dev->entry) - goto free; - dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), - GFP_KERNEL); - if (!dev->queues) - goto free; - - if (pci_enable_device_mem(pdev)) - goto free; - pci_set_master(pdev); - bars = pci_select_bars(pdev, IORESOURCE_MEM); - if (pci_request_selected_regions(pdev, bars, "nvme")) - goto disable; - - INIT_LIST_HEAD(&dev->namespaces); - dev->pci_dev = pdev; - pci_set_drvdata(pdev, dev); - dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - result = nvme_set_instance(dev); - if (result) - goto disable; - - dev->entry[0].vector = pdev->irq; - - result = nvme_setup_prp_pools(dev); - if (result) - goto disable_msix; - - dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); - if (!dev->bar) { - result = -ENOMEM; - goto disable_msix; - } - - result = nvme_configure_admin_queue(dev); - if (result) - goto unmap; - dev->queue_count++; - - spin_lock(&dev_list_lock); - list_add(&dev->node, &dev_list); - spin_unlock(&dev_list_lock); - - result = nvme_dev_add(dev); - if (result) - goto delete; - - return 0; - - delete: - spin_lock(&dev_list_lock); - list_del(&dev->node); - spin_unlock(&dev_list_lock); - - nvme_free_queues(dev); - unmap: - iounmap(dev->bar); - disable_msix: - pci_disable_msix(pdev); - nvme_release_instance(dev); - nvme_release_prp_pools(dev); - disable: - pci_disable_device(pdev); - pci_release_regions(pdev); - free: - kfree(dev->queues); - kfree(dev->entry); - kfree(dev); - return result; -} - -static void nvme_remove(struct pci_dev *pdev) -{ - struct nvme_dev *dev = pci_get_drvdata(pdev); - nvme_dev_remove(dev); - pci_disable_msix(pdev); - iounmap(dev->bar); - nvme_release_instance(dev); - nvme_release_prp_pools(dev); - pci_disable_device(pdev); - pci_release_regions(pdev); - kfree(dev->queues); - kfree(dev->entry); - kfree(dev); -} - -/* These functions are yet to be implemented */ -#define nvme_error_detected NULL -#define nvme_dump_registers NULL -#define nvme_link_reset NULL -#define nvme_slot_reset NULL -#define nvme_error_resume NULL -#define nvme_suspend NULL -#define nvme_resume NULL - -static const struct pci_error_handlers nvme_err_handler = { - .error_detected = nvme_error_detected, - .mmio_enabled = nvme_dump_registers, - .link_reset = nvme_link_reset, - .slot_reset = nvme_slot_reset, - .resume = nvme_error_resume, -}; - -/* Move to pci_ids.h later */ -#define PCI_CLASS_STORAGE_EXPRESS 0x010802 - -static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { - { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, - { 0, } -}; -MODULE_DEVICE_TABLE(pci, nvme_id_table); - -static struct pci_driver nvme_driver = { - .name = "nvme", - .id_table = nvme_id_table, - .probe = nvme_probe, - .remove = nvme_remove, - .suspend = nvme_suspend, - .resume = nvme_resume, - .err_handler = &nvme_err_handler, -}; - -static int __init nvme_init(void) -{ - int result; - - nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); - if (IS_ERR(nvme_thread)) - return PTR_ERR(nvme_thread); - - result = register_blkdev(nvme_major, "nvme"); - if (result < 0) - goto kill_kthread; - else if (result > 0) - nvme_major = result; - - result = pci_register_driver(&nvme_driver); - if (result) - goto unregister_blkdev; - return 0; - - unregister_blkdev: - unregister_blkdev(nvme_major, "nvme"); - kill_kthread: - kthread_stop(nvme_thread); - return result; -} - -static void __exit nvme_exit(void) -{ - pci_unregister_driver(&nvme_driver); - unregister_blkdev(nvme_major, "nvme"); - kthread_stop(nvme_thread); -} - -MODULE_AUTHOR("Matthew Wilcox "); -MODULE_LICENSE("GPL"); -MODULE_VERSION("0.8"); -module_init(nvme_init); -module_exit(nvme_exit); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index aeaea32..e992489 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -63,8 +63,6 @@ config INTEL_IOATDMA depends on PCI && X86 select DMA_ENGINE select DCA - select ASYNC_TX_DISABLE_PQ_VAL_DMA - select ASYNC_TX_DISABLE_XOR_VAL_DMA help Enable support for the Intel(R) I/OAT DMA engine present in recent Intel Xeon chipsets. @@ -174,15 +172,7 @@ config TEGRA20_APB_DMA This DMA controller transfers data from memory to peripheral fifo or vice versa. It does not support memory to memory data transfer. - - -config SH_DMAE - tristate "Renesas SuperH DMAC support" - depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) - depends on !SH_DMA_API - select DMA_ENGINE - help - Enable support for the Renesas SuperH DMA controllers. +source "drivers/dma/sh/Kconfig" config COH901318 bool "ST-Ericsson COH901318 DMA support" @@ -328,6 +318,10 @@ config DMA_ENGINE config DMA_VIRTUAL_CHANNELS tristate +config DMA_ACPI + def_bool y + depends on ACPI + config DMA_OF def_bool y depends on OF diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 488e3ff..a2b0df5 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -3,6 +3,7 @@ ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o +obj-$(CONFIG_DMA_ACPI) += acpi-dma.o obj-$(CONFIG_DMA_OF) += of-dma.o obj-$(CONFIG_NET_DMA) += iovlock.o @@ -18,7 +19,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o -obj-$(CONFIG_SH_DMAE) += sh/ +obj-$(CONFIG_SH_DMAE_BASE) += sh/ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c new file mode 100644 index 0000000..ba6fc62 --- /dev/null +++ b/drivers/dma/acpi-dma.c @@ -0,0 +1,279 @@ +/* + * ACPI helpers for DMA request / controller + * + * Based on of-dma.c + * + * Copyright (C) 2013, Intel Corporation + * Author: Andy Shevchenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +static LIST_HEAD(acpi_dma_list); +static DEFINE_MUTEX(acpi_dma_lock); + +/** + * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers + * @dev: struct device of DMA controller + * @acpi_dma_xlate: translation function which converts a dma specifier + * into a dma_chan structure + * @data pointer to controller specific data to be used by + * translation function + * + * Returns 0 on success or appropriate errno value on error. + * + * Allocated memory should be freed with appropriate acpi_dma_controller_free() + * call. + */ +int acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data) +{ + struct acpi_device *adev; + struct acpi_dma *adma; + + if (!dev || !acpi_dma_xlate) + return -EINVAL; + + /* Check if the device was enumerated by ACPI */ + if (!ACPI_HANDLE(dev)) + return -EINVAL; + + if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) + return -EINVAL; + + adma = kzalloc(sizeof(*adma), GFP_KERNEL); + if (!adma) + return -ENOMEM; + + adma->dev = dev; + adma->acpi_dma_xlate = acpi_dma_xlate; + adma->data = data; + + /* Now queue acpi_dma controller structure in list */ + mutex_lock(&acpi_dma_lock); + list_add_tail(&adma->dma_controllers, &acpi_dma_list); + mutex_unlock(&acpi_dma_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_dma_controller_register); + +/** + * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list + * @dev: struct device of DMA controller + * + * Memory allocated by acpi_dma_controller_register() is freed here. + */ +int acpi_dma_controller_free(struct device *dev) +{ + struct acpi_dma *adma; + + if (!dev) + return -EINVAL; + + mutex_lock(&acpi_dma_lock); + + list_for_each_entry(adma, &acpi_dma_list, dma_controllers) + if (adma->dev == dev) { + list_del(&adma->dma_controllers); + mutex_unlock(&acpi_dma_lock); + kfree(adma); + return 0; + } + + mutex_unlock(&acpi_dma_lock); + return -ENODEV; +} +EXPORT_SYMBOL_GPL(acpi_dma_controller_free); + +static void devm_acpi_dma_release(struct device *dev, void *res) +{ + acpi_dma_controller_free(dev); +} + +/** + * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register() + * @dev: device that is registering this DMA controller + * @acpi_dma_xlate: translation function + * @data pointer to controller specific data + * + * Managed acpi_dma_controller_register(). DMA controller registered by this + * function are automatically freed on driver detach. See + * acpi_dma_controller_register() for more information. + */ +int devm_acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data) +{ + void *res; + int ret; + + res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL); + if (!res) + return -ENOMEM; + + ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data); + if (ret) { + devres_free(res); + return ret; + } + devres_add(dev, res); + return 0; +} +EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register); + +/** + * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free() + * + * Unregister a DMA controller registered with + * devm_acpi_dma_controller_register(). Normally this function will not need to + * be called and the resource management code will ensure that the resource is + * freed. + */ +void devm_acpi_dma_controller_free(struct device *dev) +{ + WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL)); +} +EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); + +struct acpi_dma_parser_data { + struct acpi_dma_spec dma_spec; + size_t index; + size_t n; +}; + +/** + * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier + * @res: struct acpi_resource to get FixedDMA resources from + * @data: pointer to a helper struct acpi_dma_parser_data + */ +static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data) +{ + struct acpi_dma_parser_data *pdata = data; + + if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) { + struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma; + + if (pdata->n++ == pdata->index) { + pdata->dma_spec.chan_id = dma->channels; + pdata->dma_spec.slave_id = dma->request_lines; + } + } + + /* Tell the ACPI core to skip this resource */ + return 1; +} + +/** + * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel + * @dev: struct device to get DMA request from + * @index: index of FixedDMA descriptor for @dev + * + * Returns pointer to appropriate dma channel on success or NULL on error. + */ +struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, + size_t index) +{ + struct acpi_dma_parser_data pdata; + struct acpi_dma_spec *dma_spec = &pdata.dma_spec; + struct list_head resource_list; + struct acpi_device *adev; + struct acpi_dma *adma; + struct dma_chan *chan = NULL; + + /* Check if the device was enumerated by ACPI */ + if (!dev || !ACPI_HANDLE(dev)) + return NULL; + + if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) + return NULL; + + memset(&pdata, 0, sizeof(pdata)); + pdata.index = index; + + /* Initial values for the request line and channel */ + dma_spec->chan_id = -1; + dma_spec->slave_id = -1; + + INIT_LIST_HEAD(&resource_list); + acpi_dev_get_resources(adev, &resource_list, + acpi_dma_parse_fixed_dma, &pdata); + acpi_dev_free_resource_list(&resource_list); + + if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0) + return NULL; + + mutex_lock(&acpi_dma_lock); + + list_for_each_entry(adma, &acpi_dma_list, dma_controllers) { + dma_spec->dev = adma->dev; + chan = adma->acpi_dma_xlate(dma_spec, adma); + if (chan) + break; + } + + mutex_unlock(&acpi_dma_lock); + return chan; +} +EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index); + +/** + * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel + * @dev: struct device to get DMA request from + * @name: represents corresponding FixedDMA descriptor for @dev + * + * In order to support both Device Tree and ACPI in a single driver we + * translate the names "tx" and "rx" here based on the most common case where + * the first FixedDMA descriptor is TX and second is RX. + * + * Returns pointer to appropriate dma channel on success or NULL on error. + */ +struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, + const char *name) +{ + size_t index; + + if (!strcmp(name, "tx")) + index = 0; + else if (!strcmp(name, "rx")) + index = 1; + else + return NULL; + + return acpi_dma_request_slave_chan_by_index(dev, index); +} +EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name); + +/** + * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper + * @dma_spec: pointer to ACPI DMA specifier + * @adma: pointer to ACPI DMA controller data + * + * A simple translation function for ACPI based devices. Passes &struct + * dma_spec to the DMA controller driver provided filter function. Returns + * pointer to the channel if found or %NULL otherwise. + */ +struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, + struct acpi_dma *adma) +{ + struct acpi_dma_filter_info *info = adma->data; + + if (!info || !info->filter_fn) + return NULL; + + return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec); +} +EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 88cfc61..e923cda 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "at_hdmac_regs.h" #include "dmaengine.h" @@ -677,7 +678,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctrlb |= ATC_DST_ADDR_MODE_FIXED | ATC_SRC_ADDR_MODE_INCR | ATC_FC_MEM2PER - | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); + | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); reg = sconfig->dst_addr; for_each_sg(sgl, sg, sg_len, i) { struct at_desc *desc; @@ -716,7 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctrlb |= ATC_DST_ADDR_MODE_INCR | ATC_SRC_ADDR_MODE_FIXED | ATC_FC_PER2MEM - | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); + | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); reg = sconfig->src_addr; for_each_sg(sgl, sg, sg_len, i) { @@ -822,8 +823,8 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED | ATC_SRC_ADDR_MODE_INCR | ATC_FC_MEM2PER - | ATC_SIF(AT_DMA_MEM_IF) - | ATC_DIF(AT_DMA_PER_IF); + | ATC_SIF(atchan->mem_if) + | ATC_DIF(atchan->per_if); break; case DMA_DEV_TO_MEM: @@ -833,8 +834,8 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR | ATC_SRC_ADDR_MODE_FIXED | ATC_FC_PER2MEM - | ATC_SIF(AT_DMA_PER_IF) - | ATC_DIF(AT_DMA_MEM_IF); + | ATC_SIF(atchan->per_if) + | ATC_DIF(atchan->mem_if); break; default: @@ -1188,6 +1189,67 @@ static void atc_free_chan_resources(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); } +#ifdef CONFIG_OF +static bool at_dma_filter(struct dma_chan *chan, void *slave) +{ + struct at_dma_slave *atslave = slave; + + if (atslave->dma_dev == chan->device->dev) { + chan->private = atslave; + return true; + } else { + return false; + } +} + +static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *of_dma) +{ + struct dma_chan *chan; + struct at_dma_chan *atchan; + struct at_dma_slave *atslave; + dma_cap_mask_t mask; + unsigned int per_id; + struct platform_device *dmac_pdev; + + if (dma_spec->args_count != 2) + return NULL; + + dmac_pdev = of_find_device_by_node(dma_spec->np); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); + if (!atslave) + return NULL; + /* + * We can fill both SRC_PER and DST_PER, one of these fields will be + * ignored depending on DMA transfer direction. + */ + per_id = dma_spec->args[1]; + atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW + | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id) + | ATC_SRC_PER(per_id); + atslave->dma_dev = &dmac_pdev->dev; + + chan = dma_request_channel(mask, at_dma_filter, atslave); + if (!chan) + return NULL; + + atchan = to_at_dma_chan(chan); + atchan->per_if = dma_spec->args[0] & 0xff; + atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; + + return chan; +} +#else +static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *of_dma) +{ + return NULL; +} +#endif /*-- Module Management -----------------------------------------------*/ @@ -1342,6 +1404,8 @@ static int __init at_dma_probe(struct platform_device *pdev) for (i = 0; i < plat_dat->nr_channels; i++) { struct at_dma_chan *atchan = &atdma->chan[i]; + atchan->mem_if = AT_DMA_MEM_IF; + atchan->per_if = AT_DMA_PER_IF; atchan->chan_common.device = &atdma->dma_common; dma_cookie_init(&atchan->chan_common); list_add_tail(&atchan->chan_common.device_node, @@ -1388,8 +1452,25 @@ static int __init at_dma_probe(struct platform_device *pdev) dma_async_device_register(&atdma->dma_common); + /* + * Do not return an error if the dmac node is not present in order to + * not break the existing way of requesting channel with + * dma_request_channel(). + */ + if (pdev->dev.of_node) { + err = of_dma_controller_register(pdev->dev.of_node, + at_dma_xlate, atdma); + if (err) { + dev_err(&pdev->dev, "could not register of_dma_controller\n"); + goto err_of_dma_controller_register; + } + } + return 0; +err_of_dma_controller_register: + dma_async_device_unregister(&atdma->dma_common); + dma_pool_destroy(atdma->dma_desc_pool); err_pool_create: platform_set_drvdata(pdev, NULL); free_irq(platform_get_irq(pdev, 0), atdma); @@ -1406,7 +1487,7 @@ err_kfree: return err; } -static int __exit at_dma_remove(struct platform_device *pdev) +static int at_dma_remove(struct platform_device *pdev) { struct at_dma *atdma = platform_get_drvdata(pdev); struct dma_chan *chan, *_chan; @@ -1564,7 +1645,7 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = { }; static struct platform_driver at_dma_driver = { - .remove = __exit_p(at_dma_remove), + .remove = at_dma_remove, .shutdown = at_dma_shutdown, .id_table = atdma_devtypes, .driver = { diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 0eb3c13..c604d26 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -220,6 +220,8 @@ enum atc_status { * @device: parent device * @ch_regs: memory mapped register base * @mask: channel index in a mask + * @per_if: peripheral interface + * @mem_if: memory interface * @status: transmit status information from irq/prep* functions * to tasklet (use atomic operations) * @tasklet: bottom half to finish transaction work @@ -238,6 +240,8 @@ struct at_dma_chan { struct at_dma *device; void __iomem *ch_regs; u8 mask; + u8 per_if; + u8 mem_if; unsigned long status; struct tasklet_struct tasklet; u32 save_cfg; diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 797940e..3b23061 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -2748,7 +2748,7 @@ static int __init coh901318_probe(struct platform_device *pdev) return err; } -static int __exit coh901318_remove(struct platform_device *pdev) +static int coh901318_remove(struct platform_device *pdev) { struct coh901318_base *base = platform_get_drvdata(pdev); @@ -2760,7 +2760,7 @@ static int __exit coh901318_remove(struct platform_device *pdev) static struct platform_driver coh901318_driver = { - .remove = __exit_p(coh901318_remove), + .remove = coh901318_remove, .driver = { .name = "coh901318", }, diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index b2728d6..93f7992 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -62,6 +62,8 @@ #include #include #include +#include +#include #include static DEFINE_MUTEX(dma_list_mutex); @@ -174,7 +176,8 @@ static struct class dma_devclass = { #define dma_device_satisfies_mask(device, mask) \ __dma_device_satisfies_mask((device), &(mask)) static int -__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) +__dma_device_satisfies_mask(struct dma_device *device, + const dma_cap_mask_t *want) { dma_cap_mask_t has; @@ -463,7 +466,8 @@ static void dma_channel_rebalance(void) } } -static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, +static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, + struct dma_device *dev, dma_filter_fn fn, void *fn_param) { struct dma_chan *chan; @@ -505,7 +509,8 @@ static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_devic * @fn: optional callback to disposition available channels * @fn_param: opaque parameter to pass to dma_filter_fn */ -struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) +struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param) { struct dma_device *device, *_d; struct dma_chan *chan = NULL; @@ -555,12 +560,16 @@ EXPORT_SYMBOL_GPL(__dma_request_channel); * @dev: pointer to client device structure * @name: slave channel name */ -struct dma_chan *dma_request_slave_channel(struct device *dev, char *name) +struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) { /* If device-tree is present get slave info from here */ if (dev->of_node) return of_dma_request_slave_channel(dev->of_node, name); + /* If device was enumerated by ACPI get slave info from here */ + if (ACPI_HANDLE(dev)) + return acpi_dma_request_slave_chan_by_name(dev, name); + return NULL; } EXPORT_SYMBOL_GPL(dma_request_slave_channel); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a2c8904..d8ce4ec 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -2,6 +2,7 @@ * DMA Engine test module * * Copyright (C) 2007 Atmel Corporation + * Copyright (C) 2013 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -18,6 +19,10 @@ #include #include #include +#include +#include +#include +#include static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO); @@ -61,6 +66,9 @@ module_param(timeout, uint, S_IRUGO); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass -1 for infinite timeout"); +/* Maximum amount of mismatched bytes in buffer to print */ +#define MAX_ERROR_COUNT 32 + /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. @@ -78,13 +86,65 @@ MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " #define PATTERN_OVERWRITE 0x20 #define PATTERN_COUNT_MASK 0x1f +enum dmatest_error_type { + DMATEST_ET_OK, + DMATEST_ET_MAP_SRC, + DMATEST_ET_MAP_DST, + DMATEST_ET_PREP, + DMATEST_ET_SUBMIT, + DMATEST_ET_TIMEOUT, + DMATEST_ET_DMA_ERROR, + DMATEST_ET_DMA_IN_PROGRESS, + DMATEST_ET_VERIFY, + DMATEST_ET_VERIFY_BUF, +}; + +struct dmatest_verify_buffer { + unsigned int index; + u8 expected; + u8 actual; +}; + +struct dmatest_verify_result { + unsigned int error_count; + struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; + u8 pattern; + bool is_srcbuf; +}; + +struct dmatest_thread_result { + struct list_head node; + unsigned int n; + unsigned int src_off; + unsigned int dst_off; + unsigned int len; + enum dmatest_error_type type; + union { + unsigned long data; + dma_cookie_t cookie; + enum dma_status status; + int error; + struct dmatest_verify_result *vr; + }; +}; + +struct dmatest_result { + struct list_head node; + char *name; + struct list_head results; +}; + +struct dmatest_info; + struct dmatest_thread { struct list_head node; + struct dmatest_info *info; struct task_struct *task; struct dma_chan *chan; u8 **srcs; u8 **dsts; enum dma_transaction_type type; + bool done; }; struct dmatest_chan { @@ -93,25 +153,69 @@ struct dmatest_chan { struct list_head threads; }; -/* - * These are protected by dma_list_mutex since they're only used by - * the DMA filter function callback +/** + * struct dmatest_params - test parameters. + * @buf_size: size of the memcpy test buffer + * @channel: bus ID of the channel to test + * @device: bus ID of the DMA Engine to test + * @threads_per_chan: number of threads to start per channel + * @max_channels: maximum number of channels to use + * @iterations: iterations before stopping test + * @xor_sources: number of xor source buffers + * @pq_sources: number of p+q source buffers + * @timeout: transfer timeout in msec, -1 for infinite timeout */ -static LIST_HEAD(dmatest_channels); -static unsigned int nr_channels; +struct dmatest_params { + unsigned int buf_size; + char channel[20]; + char device[20]; + unsigned int threads_per_chan; + unsigned int max_channels; + unsigned int iterations; + unsigned int xor_sources; + unsigned int pq_sources; + int timeout; +}; -static bool dmatest_match_channel(struct dma_chan *chan) +/** + * struct dmatest_info - test information. + * @params: test parameters + * @lock: access protection to the fields of this structure + */ +struct dmatest_info { + /* Test parameters */ + struct dmatest_params params; + + /* Internal state */ + struct list_head channels; + unsigned int nr_channels; + struct mutex lock; + + /* debugfs related stuff */ + struct dentry *root; + struct dmatest_params dbgfs_params; + + /* Test results */ + struct list_head results; + struct mutex results_lock; +}; + +static struct dmatest_info test_info; + +static bool dmatest_match_channel(struct dmatest_params *params, + struct dma_chan *chan) { - if (test_channel[0] == '\0') + if (params->channel[0] == '\0') return true; - return strcmp(dma_chan_name(chan), test_channel) == 0; + return strcmp(dma_chan_name(chan), params->channel) == 0; } -static bool dmatest_match_device(struct dma_device *device) +static bool dmatest_match_device(struct dmatest_params *params, + struct dma_device *device) { - if (test_device[0] == '\0') + if (params->device[0] == '\0') return true; - return strcmp(dev_name(device->dev), test_device) == 0; + return strcmp(dev_name(device->dev), params->device) == 0; } static unsigned long dmatest_random(void) @@ -122,7 +226,8 @@ static unsigned long dmatest_random(void) return buf; } -static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) +static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, + unsigned int buf_size) { unsigned int i; u8 *buf; @@ -133,13 +238,14 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) for ( ; i < start + len; i++) buf[i] = PATTERN_SRC | PATTERN_COPY | (~i & PATTERN_COUNT_MASK); - for ( ; i < test_buf_size; i++) + for ( ; i < buf_size; i++) buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); buf++; } } -static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) +static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, + unsigned int buf_size) { unsigned int i; u8 *buf; @@ -150,40 +256,14 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) for ( ; i < start + len; i++) buf[i] = PATTERN_DST | PATTERN_OVERWRITE | (~i & PATTERN_COUNT_MASK); - for ( ; i < test_buf_size; i++) + for ( ; i < buf_size; i++) buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); } } -static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, - unsigned int counter, bool is_srcbuf) -{ - u8 diff = actual ^ pattern; - u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); - const char *thread_name = current->comm; - - if (is_srcbuf) - pr_warning("%s: srcbuf[0x%x] overwritten!" - " Expected %02x, got %02x\n", - thread_name, index, expected, actual); - else if ((pattern & PATTERN_COPY) - && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) - pr_warning("%s: dstbuf[0x%x] not copied!" - " Expected %02x, got %02x\n", - thread_name, index, expected, actual); - else if (diff & PATTERN_SRC) - pr_warning("%s: dstbuf[0x%x] was copied!" - " Expected %02x, got %02x\n", - thread_name, index, expected, actual); - else - pr_warning("%s: dstbuf[0x%x] mismatch!" - " Expected %02x, got %02x\n", - thread_name, index, expected, actual); -} - -static unsigned int dmatest_verify(u8 **bufs, unsigned int start, - unsigned int end, unsigned int counter, u8 pattern, - bool is_srcbuf) +static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, + unsigned int start, unsigned int end, unsigned int counter, + u8 pattern, bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; @@ -191,6 +271,7 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, u8 expected; u8 *buf; unsigned int counter_orig = counter; + struct dmatest_verify_buffer *vb; for (; (buf = *bufs); bufs++) { counter = counter_orig; @@ -198,18 +279,21 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, actual = buf[i]; expected = pattern | (~counter & PATTERN_COUNT_MASK); if (actual != expected) { - if (error_count < 32) - dmatest_mismatch(actual, pattern, i, - counter, is_srcbuf); + if (error_count < MAX_ERROR_COUNT && vr) { + vb = &vr->data[error_count]; + vb->index = i; + vb->expected = expected; + vb->actual = actual; + } error_count++; } counter++; } } - if (error_count > 32) + if (error_count > MAX_ERROR_COUNT) pr_warning("%s: %u errors suppressed\n", - current->comm, error_count - 32); + current->comm, error_count - MAX_ERROR_COUNT); return error_count; } @@ -249,6 +333,170 @@ static unsigned int min_odd(unsigned int x, unsigned int y) return val % 2 ? val : val - 1; } +static char *verify_result_get_one(struct dmatest_verify_result *vr, + unsigned int i) +{ + struct dmatest_verify_buffer *vb = &vr->data[i]; + u8 diff = vb->actual ^ vr->pattern; + static char buf[512]; + char *msg; + + if (vr->is_srcbuf) + msg = "srcbuf overwritten!"; + else if ((vr->pattern & PATTERN_COPY) + && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) + msg = "dstbuf not copied!"; + else if (diff & PATTERN_SRC) + msg = "dstbuf was copied!"; + else + msg = "dstbuf mismatch!"; + + snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, + vb->index, vb->expected, vb->actual); + + return buf; +} + +static char *thread_result_get(const char *name, + struct dmatest_thread_result *tr) +{ + static const char * const messages[] = { + [DMATEST_ET_OK] = "No errors", + [DMATEST_ET_MAP_SRC] = "src mapping error", + [DMATEST_ET_MAP_DST] = "dst mapping error", + [DMATEST_ET_PREP] = "prep error", + [DMATEST_ET_SUBMIT] = "submit error", + [DMATEST_ET_TIMEOUT] = "test timed out", + [DMATEST_ET_DMA_ERROR] = + "got completion callback (DMA_ERROR)", + [DMATEST_ET_DMA_IN_PROGRESS] = + "got completion callback (DMA_IN_PROGRESS)", + [DMATEST_ET_VERIFY] = "errors", + [DMATEST_ET_VERIFY_BUF] = "verify errors", + }; + static char buf[512]; + + snprintf(buf, sizeof(buf) - 1, + "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", + name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, + tr->len, tr->data); + + return buf; +} + +static int thread_result_add(struct dmatest_info *info, + struct dmatest_result *r, enum dmatest_error_type type, + unsigned int n, unsigned int src_off, unsigned int dst_off, + unsigned int len, unsigned long data) +{ + struct dmatest_thread_result *tr; + + tr = kzalloc(sizeof(*tr), GFP_KERNEL); + if (!tr) + return -ENOMEM; + + tr->type = type; + tr->n = n; + tr->src_off = src_off; + tr->dst_off = dst_off; + tr->len = len; + tr->data = data; + + mutex_lock(&info->results_lock); + list_add_tail(&tr->node, &r->results); + mutex_unlock(&info->results_lock); + + pr_warn("%s\n", thread_result_get(r->name, tr)); + return 0; +} + +static unsigned int verify_result_add(struct dmatest_info *info, + struct dmatest_result *r, unsigned int n, + unsigned int src_off, unsigned int dst_off, unsigned int len, + u8 **bufs, int whence, unsigned int counter, u8 pattern, + bool is_srcbuf) +{ + struct dmatest_verify_result *vr; + unsigned int error_count; + unsigned int buf_off = is_srcbuf ? src_off : dst_off; + unsigned int start, end; + + if (whence < 0) { + start = 0; + end = buf_off; + } else if (whence > 0) { + start = buf_off + len; + end = info->params.buf_size; + } else { + start = buf_off; + end = buf_off + len; + } + + vr = kmalloc(sizeof(*vr), GFP_KERNEL); + if (!vr) { + pr_warn("dmatest: No memory to store verify result\n"); + return dmatest_verify(NULL, bufs, start, end, counter, pattern, + is_srcbuf); + } + + vr->pattern = pattern; + vr->is_srcbuf = is_srcbuf; + + error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, + is_srcbuf); + if (error_count) { + vr->error_count = error_count; + thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, + dst_off, len, (unsigned long)vr); + return error_count; + } + + kfree(vr); + return 0; +} + +static void result_free(struct dmatest_info *info, const char *name) +{ + struct dmatest_result *r, *_r; + + mutex_lock(&info->results_lock); + list_for_each_entry_safe(r, _r, &info->results, node) { + struct dmatest_thread_result *tr, *_tr; + + if (name && strcmp(r->name, name)) + continue; + + list_for_each_entry_safe(tr, _tr, &r->results, node) { + if (tr->type == DMATEST_ET_VERIFY_BUF) + kfree(tr->vr); + list_del(&tr->node); + kfree(tr); + } + + kfree(r->name); + list_del(&r->node); + kfree(r); + } + + mutex_unlock(&info->results_lock); +} + +static struct dmatest_result *result_init(struct dmatest_info *info, + const char *name) +{ + struct dmatest_result *r; + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (r) { + r->name = kstrdup(name, GFP_KERNEL); + INIT_LIST_HEAD(&r->results); + mutex_lock(&info->results_lock); + list_add_tail(&r->node, &info->results); + mutex_unlock(&info->results_lock); + } + return r; +} + /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by @@ -268,6 +516,8 @@ static int dmatest_func(void *data) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); struct dmatest_thread *thread = data; struct dmatest_done done = { .wait = &done_wait }; + struct dmatest_info *info; + struct dmatest_params *params; struct dma_chan *chan; struct dma_device *dev; const char *thread_name; @@ -278,11 +528,12 @@ static int dmatest_func(void *data) dma_cookie_t cookie; enum dma_status status; enum dma_ctrl_flags flags; - u8 pq_coefs[pq_sources + 1]; + u8 *pq_coefs = NULL; int ret; int src_cnt; int dst_cnt; int i; + struct dmatest_result *result; thread_name = current->comm; set_freezable(); @@ -290,28 +541,39 @@ static int dmatest_func(void *data) ret = -ENOMEM; smp_rmb(); + info = thread->info; + params = &info->params; chan = thread->chan; dev = chan->device; if (thread->type == DMA_MEMCPY) src_cnt = dst_cnt = 1; else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ - src_cnt = min_odd(xor_sources | 1, dev->max_xor); + src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); dst_cnt = 1; } else if (thread->type == DMA_PQ) { /* force odd to ensure dst = src */ - src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0)); + src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); dst_cnt = 2; + + pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); + if (!pq_coefs) + goto err_thread_type; + for (i = 0; i < src_cnt; i++) pq_coefs[i] = 1; } else + goto err_thread_type; + + result = result_init(info, thread_name); + if (!result) goto err_srcs; thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) goto err_srcs; for (i = 0; i < src_cnt; i++) { - thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL); + thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); if (!thread->srcs[i]) goto err_srcbuf; } @@ -321,7 +583,7 @@ static int dmatest_func(void *data) if (!thread->dsts) goto err_dsts; for (i = 0; i < dst_cnt; i++) { - thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL); + thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); if (!thread->dsts[i]) goto err_dstbuf; } @@ -337,7 +599,7 @@ static int dmatest_func(void *data) | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; while (!kthread_should_stop() - && !(iterations && total_tests >= iterations)) { + && !(params->iterations && total_tests >= params->iterations)) { struct dma_async_tx_descriptor *tx = NULL; dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt]; @@ -353,24 +615,24 @@ static int dmatest_func(void *data) else if (thread->type == DMA_PQ) align = dev->pq_align; - if (1 << align > test_buf_size) { + if (1 << align > params->buf_size) { pr_err("%u-byte buffer too small for %d-byte alignment\n", - test_buf_size, 1 << align); + params->buf_size, 1 << align); break; } - len = dmatest_random() % test_buf_size + 1; + len = dmatest_random() % params->buf_size + 1; len = (len >> align) << align; if (!len) len = 1 << align; - src_off = dmatest_random() % (test_buf_size - len + 1); - dst_off = dmatest_random() % (test_buf_size - len + 1); + src_off = dmatest_random() % (params->buf_size - len + 1); + dst_off = dmatest_random() % (params->buf_size - len + 1); src_off = (src_off >> align) << align; dst_off = (dst_off >> align) << align; - dmatest_init_srcs(thread->srcs, src_off, len); - dmatest_init_dsts(thread->dsts, dst_off, len); + dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); + dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); for (i = 0; i < src_cnt; i++) { u8 *buf = thread->srcs[i] + src_off; @@ -380,10 +642,10 @@ static int dmatest_func(void *data) ret = dma_mapping_error(dev->dev, dma_srcs[i]); if (ret) { unmap_src(dev->dev, dma_srcs, len, i); - pr_warn("%s: #%u: mapping error %d with " - "src_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, ret, - src_off, len); + thread_result_add(info, result, + DMATEST_ET_MAP_SRC, + total_tests, src_off, dst_off, + len, ret); failed_tests++; continue; } @@ -391,16 +653,17 @@ static int dmatest_func(void *data) /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ for (i = 0; i < dst_cnt; i++) { dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], - test_buf_size, + params->buf_size, DMA_BIDIRECTIONAL); ret = dma_mapping_error(dev->dev, dma_dsts[i]); if (ret) { unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, test_buf_size, i); - pr_warn("%s: #%u: mapping error %d with " - "dst_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, ret, - dst_off, test_buf_size); + unmap_dst(dev->dev, dma_dsts, params->buf_size, + i); + thread_result_add(info, result, + DMATEST_ET_MAP_DST, + total_tests, src_off, dst_off, + len, ret); failed_tests++; continue; } @@ -428,11 +691,11 @@ static int dmatest_func(void *data) if (!tx) { unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); - pr_warning("%s: #%u: prep error with src_off=0x%x " - "dst_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, - src_off, dst_off, len); + unmap_dst(dev->dev, dma_dsts, params->buf_size, + dst_cnt); + thread_result_add(info, result, DMATEST_ET_PREP, + total_tests, src_off, dst_off, + len, 0); msleep(100); failed_tests++; continue; @@ -444,18 +707,18 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - pr_warning("%s: #%u: submit error %d with src_off=0x%x " - "dst_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, cookie, - src_off, dst_off, len); + thread_result_add(info, result, DMATEST_ET_SUBMIT, + total_tests, src_off, dst_off, + len, cookie); msleep(100); failed_tests++; continue; } dma_async_issue_pending(chan); - wait_event_freezable_timeout(done_wait, done.done, - msecs_to_jiffies(timeout)); + wait_event_freezable_timeout(done_wait, + done.done || kthread_should_stop(), + msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); @@ -468,56 +731,57 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ - pr_warning("%s: #%u: test timed out\n", - thread_name, total_tests - 1); + thread_result_add(info, result, DMATEST_ET_TIMEOUT, + total_tests, src_off, dst_off, + len, 0); failed_tests++; continue; } else if (status != DMA_SUCCESS) { - pr_warning("%s: #%u: got completion callback," - " but status is \'%s\'\n", - thread_name, total_tests - 1, - status == DMA_ERROR ? "error" : "in progress"); + enum dmatest_error_type type = (status == DMA_ERROR) ? + DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; + thread_result_add(info, result, type, + total_tests, src_off, dst_off, + len, status); failed_tests++; continue; } /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ - unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); + unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); error_count = 0; pr_debug("%s: verifying source buffer...\n", thread_name); - error_count += dmatest_verify(thread->srcs, 0, src_off, + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->srcs, -1, 0, PATTERN_SRC, true); - error_count += dmatest_verify(thread->srcs, src_off, - src_off + len, src_off, - PATTERN_SRC | PATTERN_COPY, true); - error_count += dmatest_verify(thread->srcs, src_off + len, - test_buf_size, src_off + len, - PATTERN_SRC, true); - - pr_debug("%s: verifying dest buffer...\n", - thread->task->comm); - error_count += dmatest_verify(thread->dsts, 0, dst_off, + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->srcs, 0, + src_off, PATTERN_SRC | PATTERN_COPY, true); + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->srcs, 1, + src_off + len, PATTERN_SRC, true); + + pr_debug("%s: verifying dest buffer...\n", thread_name); + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->dsts, -1, 0, PATTERN_DST, false); - error_count += dmatest_verify(thread->dsts, dst_off, - dst_off + len, src_off, - PATTERN_SRC | PATTERN_COPY, false); - error_count += dmatest_verify(thread->dsts, dst_off + len, - test_buf_size, dst_off + len, - PATTERN_DST, false); + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->dsts, 0, + src_off, PATTERN_SRC | PATTERN_COPY, false); + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->dsts, 1, + dst_off + len, PATTERN_DST, false); if (error_count) { - pr_warning("%s: #%u: %u errors with " - "src_off=0x%x dst_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, error_count, - src_off, dst_off, len); + thread_result_add(info, result, DMATEST_ET_VERIFY, + total_tests, src_off, dst_off, + len, error_count); failed_tests++; } else { - pr_debug("%s: #%u: No errors with " - "src_off=0x%x dst_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, - src_off, dst_off, len); + thread_result_add(info, result, DMATEST_ET_OK, + total_tests, src_off, dst_off, + len, 0); } } @@ -532,6 +796,8 @@ err_dsts: err_srcbuf: kfree(thread->srcs); err_srcs: + kfree(pq_coefs); +err_thread_type: pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", thread_name, total_tests, failed_tests, ret); @@ -539,7 +805,9 @@ err_srcs: if (ret) dmaengine_terminate_all(chan); - if (iterations > 0) + thread->done = true; + + if (params->iterations > 0) while (!kthread_should_stop()) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); interruptible_sleep_on(&wait_dmatest_exit); @@ -568,8 +836,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) kfree(dtc); } -static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type) +static int dmatest_add_threads(struct dmatest_info *info, + struct dmatest_chan *dtc, enum dma_transaction_type type) { + struct dmatest_params *params = &info->params; struct dmatest_thread *thread; struct dma_chan *chan = dtc->chan; char *op; @@ -584,7 +854,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty else return -EINVAL; - for (i = 0; i < threads_per_chan; i++) { + for (i = 0; i < params->threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { pr_warning("dmatest: No memory for %s-%s%u\n", @@ -592,6 +862,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty break; } + thread->info = info; thread->chan = dtc->chan; thread->type = type; smp_wmb(); @@ -612,7 +883,8 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty return i; } -static int dmatest_add_channel(struct dma_chan *chan) +static int dmatest_add_channel(struct dmatest_info *info, + struct dma_chan *chan) { struct dmatest_chan *dtc; struct dma_device *dma_dev = chan->device; @@ -629,75 +901,418 @@ static int dmatest_add_channel(struct dma_chan *chan) INIT_LIST_HEAD(&dtc->threads); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(dtc, DMA_MEMCPY); + cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(dtc, DMA_XOR); + cnt = dmatest_add_threads(info, dtc, DMA_XOR); thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(dtc, DMA_PQ); + cnt = dmatest_add_threads(info, dtc, DMA_PQ); thread_count += cnt > 0 ? cnt : 0; } pr_info("dmatest: Started %u threads using %s\n", thread_count, dma_chan_name(chan)); - list_add_tail(&dtc->node, &dmatest_channels); - nr_channels++; + list_add_tail(&dtc->node, &info->channels); + info->nr_channels++; return 0; } static bool filter(struct dma_chan *chan, void *param) { - if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device)) + struct dmatest_params *params = param; + + if (!dmatest_match_channel(params, chan) || + !dmatest_match_device(params, chan->device)) return false; else return true; } -static int __init dmatest_init(void) +static int __run_threaded_test(struct dmatest_info *info) { dma_cap_mask_t mask; struct dma_chan *chan; + struct dmatest_params *params = &info->params; int err = 0; dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); for (;;) { - chan = dma_request_channel(mask, filter, NULL); + chan = dma_request_channel(mask, filter, params); if (chan) { - err = dmatest_add_channel(chan); + err = dmatest_add_channel(info, chan); if (err) { dma_release_channel(chan); break; /* add_channel failed, punt */ } } else break; /* no more channels available */ - if (max_channels && nr_channels >= max_channels) + if (params->max_channels && + info->nr_channels >= params->max_channels) break; /* we have all we need */ } - return err; } -/* when compiled-in wait for drivers to load first */ -late_initcall(dmatest_init); -static void __exit dmatest_exit(void) +#ifndef MODULE +static int run_threaded_test(struct dmatest_info *info) +{ + int ret; + + mutex_lock(&info->lock); + ret = __run_threaded_test(info); + mutex_unlock(&info->lock); + return ret; +} +#endif + +static void __stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; struct dma_chan *chan; - list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { + list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { list_del(&dtc->node); chan = dtc->chan; dmatest_cleanup_channel(dtc); - pr_debug("dmatest: dropped channel %s\n", - dma_chan_name(chan)); + pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); dma_release_channel(chan); } + + info->nr_channels = 0; +} + +static void stop_threaded_test(struct dmatest_info *info) +{ + mutex_lock(&info->lock); + __stop_threaded_test(info); + mutex_unlock(&info->lock); +} + +static int __restart_threaded_test(struct dmatest_info *info, bool run) +{ + struct dmatest_params *params = &info->params; + int ret; + + /* Stop any running test first */ + __stop_threaded_test(info); + + if (run == false) + return 0; + + /* Clear results from previous run */ + result_free(info, NULL); + + /* Copy test parameters */ + memcpy(params, &info->dbgfs_params, sizeof(*params)); + + /* Run test with new parameters */ + ret = __run_threaded_test(info); + if (ret) { + __stop_threaded_test(info); + pr_err("dmatest: Can't run test\n"); + } + + return ret; +} + +static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + char tmp[20]; + ssize_t len; + + len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count); + if (len >= 0) { + tmp[len] = '\0'; + strlcpy(to, strim(tmp), available); + } + + return len; +} + +static ssize_t dtf_read_channel(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + return simple_read_from_buffer(buf, count, ppos, + info->dbgfs_params.channel, + strlen(info->dbgfs_params.channel)); +} + +static ssize_t dtf_write_channel(struct file *file, const char __user *buf, + size_t size, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + return dtf_write_string(info->dbgfs_params.channel, + sizeof(info->dbgfs_params.channel), + ppos, buf, size); +} + +static const struct file_operations dtf_channel_fops = { + .read = dtf_read_channel, + .write = dtf_write_channel, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t dtf_read_device(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + return simple_read_from_buffer(buf, count, ppos, + info->dbgfs_params.device, + strlen(info->dbgfs_params.device)); +} + +static ssize_t dtf_write_device(struct file *file, const char __user *buf, + size_t size, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + return dtf_write_string(info->dbgfs_params.device, + sizeof(info->dbgfs_params.device), + ppos, buf, size); +} + +static const struct file_operations dtf_device_fops = { + .read = dtf_read_device, + .write = dtf_write_device, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t dtf_read_run(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + char buf[3]; + struct dmatest_chan *dtc; + bool alive = false; + + mutex_lock(&info->lock); + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (!thread->done) { + alive = true; + break; + } + } + } + + if (alive) { + buf[0] = 'Y'; + } else { + __stop_threaded_test(info); + buf[0] = 'N'; + } + + mutex_unlock(&info->lock); + buf[1] = '\n'; + buf[2] = 0x00; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + char buf[16]; + bool bv; + int ret = 0; + + if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) + return -EFAULT; + + if (strtobool(buf, &bv) == 0) { + mutex_lock(&info->lock); + ret = __restart_threaded_test(info, bv); + mutex_unlock(&info->lock); + } + + return ret ? ret : count; +} + +static const struct file_operations dtf_run_fops = { + .read = dtf_read_run, + .write = dtf_write_run, + .open = simple_open, + .llseek = default_llseek, +}; + +static int dtf_results_show(struct seq_file *sf, void *data) +{ + struct dmatest_info *info = sf->private; + struct dmatest_result *result; + struct dmatest_thread_result *tr; + unsigned int i; + + mutex_lock(&info->results_lock); + list_for_each_entry(result, &info->results, node) { + list_for_each_entry(tr, &result->results, node) { + seq_printf(sf, "%s\n", + thread_result_get(result->name, tr)); + if (tr->type == DMATEST_ET_VERIFY_BUF) { + for (i = 0; i < tr->vr->error_count; i++) { + seq_printf(sf, "\t%s\n", + verify_result_get_one(tr->vr, i)); + } + } + } + } + + mutex_unlock(&info->results_lock); + return 0; +} + +static int dtf_results_open(struct inode *inode, struct file *file) +{ + return single_open(file, dtf_results_show, inode->i_private); +} + +static const struct file_operations dtf_results_fops = { + .open = dtf_results_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int dmatest_register_dbgfs(struct dmatest_info *info) +{ + struct dentry *d; + struct dmatest_params *params = &info->dbgfs_params; + int ret = -ENOMEM; + + d = debugfs_create_dir("dmatest", NULL); + if (IS_ERR(d)) + return PTR_ERR(d); + if (!d) + goto err_root; + + info->root = d; + + /* Copy initial values */ + memcpy(params, &info->params, sizeof(*params)); + + /* Test parameters */ + + d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->buf_size); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root, + info, &dtf_channel_fops); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root, + info, &dtf_device_fops); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->threads_per_chan); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->max_channels); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->iterations); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->xor_sources); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->pq_sources); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->timeout); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + /* Run or stop threaded test */ + d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, + info, &dtf_run_fops); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + /* Results of test in progress */ + d = debugfs_create_file("results", S_IRUGO, info->root, info, + &dtf_results_fops); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + return 0; + +err_node: + debugfs_remove_recursive(info->root); +err_root: + pr_err("dmatest: Failed to initialize debugfs\n"); + return ret; +} + +static int __init dmatest_init(void) +{ + struct dmatest_info *info = &test_info; + struct dmatest_params *params = &info->params; + int ret; + + memset(info, 0, sizeof(*info)); + + mutex_init(&info->lock); + INIT_LIST_HEAD(&info->channels); + + mutex_init(&info->results_lock); + INIT_LIST_HEAD(&info->results); + + /* Set default parameters */ + params->buf_size = test_buf_size; + strlcpy(params->channel, test_channel, sizeof(params->channel)); + strlcpy(params->device, test_device, sizeof(params->device)); + params->threads_per_chan = threads_per_chan; + params->max_channels = max_channels; + params->iterations = iterations; + params->xor_sources = xor_sources; + params->pq_sources = pq_sources; + params->timeout = timeout; + + ret = dmatest_register_dbgfs(info); + if (ret) + return ret; + +#ifdef MODULE + return 0; +#else + return run_threaded_test(info); +#endif +} +/* when compiled-in wait for drivers to load first */ +late_initcall(dmatest_init); + +static void __exit dmatest_exit(void) +{ + struct dmatest_info *info = &test_info; + + debugfs_remove_recursive(info->root); + stop_threaded_test(info); + result_free(info, NULL); } module_exit(dmatest_exit); diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 43a5329..2e5deaa 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include "dw_dmac_regs.h" #include "dmaengine.h" @@ -49,29 +51,22 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) return slave ? slave->src_master : 1; } -#define SRC_MASTER 0 -#define DST_MASTER 1 - -static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) +static inline void dwc_set_masters(struct dw_dma_chan *dwc) { - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_dma_slave *dws = chan->private; - unsigned int m; - - if (master == SRC_MASTER) - m = dwc_get_sms(dws); - else - m = dwc_get_dms(dws); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_dma_slave *dws = dwc->chan.private; + unsigned char mmax = dw->nr_masters - 1; - return min_t(unsigned int, dw->nr_masters - 1, m); + if (dwc->request_line == ~0) { + dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); + dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); + } } #define DWC_DEFAULT_CTLLO(_chan) ({ \ struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ bool _is_slave = is_slave_direction(_dwc->direction); \ - int _dms = dwc_get_master(_chan, DST_MASTER); \ - int _sms = dwc_get_master(_chan, SRC_MASTER); \ u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ DW_DMA_MSIZE_16; \ u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ @@ -81,8 +76,8 @@ static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) | DWC_CTLL_SRC_MSIZE(_smsize) \ | DWC_CTLL_LLP_D_EN \ | DWC_CTLL_LLP_S_EN \ - | DWC_CTLL_DMS(_dms) \ - | DWC_CTLL_SMS(_sms)); \ + | DWC_CTLL_DMS(_dwc->dst_master) \ + | DWC_CTLL_SMS(_dwc->src_master)); \ }) /* @@ -92,13 +87,6 @@ static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) */ #define NR_DESCS_PER_CHANNEL 64 -static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master) -{ - struct dw_dma *dw = to_dw_dma(chan->device); - - return dw->data_width[dwc_get_master(chan, master)]; -} - /*----------------------------------------------------------------------*/ static struct device *chan2dev(struct dma_chan *chan) @@ -172,13 +160,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc) if (dwc->initialized == true) return; - if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) { - /* autoconfigure based on request line from DT */ - if (dwc->direction == DMA_MEM_TO_DEV) - cfghi = DWC_CFGH_DST_PER(dwc->request_line); - else if (dwc->direction == DMA_DEV_TO_MEM) - cfghi = DWC_CFGH_SRC_PER(dwc->request_line); - } else if (dws) { + if (dws) { /* * We need controller-specific data to set up slave * transfers. @@ -189,9 +171,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc) cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; } else { if (dwc->direction == DMA_MEM_TO_DEV) - cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); + cfghi = DWC_CFGH_DST_PER(dwc->request_line); else if (dwc->direction == DMA_DEV_TO_MEM) - cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); + cfghi = DWC_CFGH_SRC_PER(dwc->request_line); } channel_writel(dwc, CFG_LO, cfglo); @@ -473,16 +455,16 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) (unsigned long long)llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { - /* initial residue value */ + /* Initial residue value */ dwc->residue = desc->total_len; - /* check first descriptors addr */ + /* Check first descriptors addr */ if (desc->txd.phys == llp) { spin_unlock_irqrestore(&dwc->lock, flags); return; } - /* check first descriptors llp */ + /* Check first descriptors llp */ if (desc->lli.llp == llp) { /* This one is currently in progress */ dwc->residue -= dwc_get_sent(dwc); @@ -588,7 +570,7 @@ inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) } EXPORT_SYMBOL(dw_dma_get_dst_addr); -/* called with dwc->lock held and all DMAC interrupts disabled */ +/* Called with dwc->lock held and all DMAC interrupts disabled */ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, u32 status_err, u32 status_xfer) { @@ -626,7 +608,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, dwc_chan_disable(dw, dwc); - /* make sure DMA does not restart by loading a new list */ + /* Make sure DMA does not restart by loading a new list */ channel_writel(dwc, LLP, 0); channel_writel(dwc, CTL_LO, 0); channel_writel(dwc, CTL_HI, 0); @@ -745,6 +727,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc; struct dw_desc *first; struct dw_desc *prev; @@ -767,8 +750,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, dwc->direction = DMA_MEM_TO_MEM; - data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER), - dwc_get_data_width(chan, DST_MASTER)); + data_width = min_t(unsigned int, dw->data_width[dwc->src_master], + dw->data_width[dwc->dst_master]); src_width = dst_width = min_t(unsigned int, data_width, dwc_fast_fls(src | dest | len)); @@ -826,6 +809,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned long flags, void *context) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); struct dma_slave_config *sconfig = &dwc->dma_sconfig; struct dw_desc *prev; struct dw_desc *first; @@ -859,7 +843,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : DWC_CTLL_FC(DW_DMA_FC_D_M2P); - data_width = dwc_get_data_width(chan, SRC_MASTER); + data_width = dw->data_width[dwc->src_master]; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -919,7 +903,7 @@ slave_sg_todev_fill_desc: ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : DWC_CTLL_FC(DW_DMA_FC_D_P2M); - data_width = dwc_get_data_width(chan, DST_MASTER); + data_width = dw->data_width[dwc->dst_master]; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -1001,13 +985,6 @@ static inline void convert_burst(u32 *maxburst) *maxburst = 0; } -static inline void convert_slave_id(struct dw_dma_chan *dwc) -{ - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - - dwc->dma_sconfig.slave_id -= dw->request_line_base; -} - static int set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { @@ -1020,9 +997,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); dwc->direction = sconfig->direction; + /* Take the request line from slave_id member */ + if (dwc->request_line == ~0) + dwc->request_line = sconfig->slave_id; + convert_burst(&dwc->dma_sconfig.src_maxburst); convert_burst(&dwc->dma_sconfig.dst_maxburst); - convert_slave_id(dwc); return 0; } @@ -1030,10 +1010,11 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) static inline void dwc_chan_pause(struct dw_dma_chan *dwc) { u32 cfglo = channel_readl(dwc, CFG_LO); + unsigned int count = 20; /* timeout iterations */ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); - while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) - cpu_relax(); + while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) + udelay(2); dwc->paused = true; } @@ -1169,6 +1150,8 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) * doesn't mean what you think it means), and status writeback. */ + dwc_set_masters(dwc); + spin_lock_irqsave(&dwc->lock, flags); i = dwc->descs_allocated; while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { @@ -1226,6 +1209,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) list_splice_init(&dwc->free_list, &list); dwc->descs_allocated = 0; dwc->initialized = false; + dwc->request_line = ~0; /* Disable interrupts */ channel_clear_bit(dw, MASK.XFER, dwc->mask); @@ -1241,42 +1225,36 @@ static void dwc_free_chan_resources(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "%s: done\n", __func__); } -struct dw_dma_filter_args { +/*----------------------------------------------------------------------*/ + +struct dw_dma_of_filter_args { struct dw_dma *dw; unsigned int req; unsigned int src; unsigned int dst; }; -static bool dw_dma_generic_filter(struct dma_chan *chan, void *param) +static bool dw_dma_of_filter(struct dma_chan *chan, void *param) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_dma_filter_args *fargs = param; - struct dw_dma_slave *dws = &dwc->slave; + struct dw_dma_of_filter_args *fargs = param; - /* ensure the device matches our channel */ + /* Ensure the device matches our channel */ if (chan->device != &fargs->dw->dma) return false; - dws->dma_dev = dw->dma.dev; - dws->cfg_hi = ~0; - dws->cfg_lo = ~0; - dws->src_master = fargs->src; - dws->dst_master = fargs->dst; - dwc->request_line = fargs->req; - - chan->private = dws; + dwc->src_master = fargs->src; + dwc->dst_master = fargs->dst; return true; } -static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec, - struct of_dma *ofdma) +static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) { struct dw_dma *dw = ofdma->of_dma_data; - struct dw_dma_filter_args fargs = { + struct dw_dma_of_filter_args fargs = { .dw = dw, }; dma_cap_mask_t cap; @@ -1297,8 +1275,48 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec, dma_cap_set(DMA_SLAVE, cap); /* TODO: there should be a simpler way to do this */ - return dma_request_channel(cap, dw_dma_generic_filter, &fargs); + return dma_request_channel(cap, dw_dma_of_filter, &fargs); +} + +#ifdef CONFIG_ACPI +static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct acpi_dma_spec *dma_spec = param; + + if (chan->device->dev != dma_spec->dev || + chan->chan_id != dma_spec->chan_id) + return false; + + dwc->request_line = dma_spec->slave_id; + dwc->src_master = dwc_get_sms(NULL); + dwc->dst_master = dwc_get_dms(NULL); + + return true; +} + +static void dw_dma_acpi_controller_register(struct dw_dma *dw) +{ + struct device *dev = dw->dma.dev; + struct acpi_dma_filter_info *info; + int ret; + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + if (!info) + return; + + dma_cap_zero(info->dma_cap); + dma_cap_set(DMA_SLAVE, info->dma_cap); + info->filter_fn = dw_dma_acpi_filter; + + ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, + info); + if (ret) + dev_err(dev, "could not register acpi_dma_controller\n"); } +#else /* !CONFIG_ACPI */ +static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} +#endif /* !CONFIG_ACPI */ /* --------------------- Cyclic DMA API extensions -------------------- */ @@ -1322,7 +1340,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) spin_lock_irqsave(&dwc->lock, flags); - /* assert channel is idle */ + /* Assert channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) { dev_err(chan2dev(&dwc->chan), "BUG: Attempted to start non-idle channel\n"); @@ -1334,7 +1352,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask); - /* setup DMAC channel registers */ + /* Setup DMAC channel registers */ channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); channel_writel(dwc, CTL_HI, 0); @@ -1501,7 +1519,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, last = desc; } - /* lets make a cyclic list */ + /* Let's make a cyclic list */ last->lli.llp = cdesc->desc[0]->txd.phys; dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " @@ -1636,7 +1654,6 @@ dw_dma_parse_dt(struct platform_device *pdev) static int dw_probe(struct platform_device *pdev) { - const struct platform_device_id *match; struct dw_dma_platform_data *pdata; struct resource *io; struct dw_dma *dw; @@ -1706,7 +1723,7 @@ static int dw_probe(struct platform_device *pdev) dw->regs = regs; - /* get hardware configuration parameters */ + /* Get hardware configuration parameters */ if (autocfg) { max_blk_size = dma_readl(dw, MAX_BLK_SIZE); @@ -1720,18 +1737,13 @@ static int dw_probe(struct platform_device *pdev) memcpy(dw->data_width, pdata->data_width, 4); } - /* Get the base request line if set */ - match = platform_get_device_id(pdev); - if (match) - dw->request_line_base = (unsigned int)match->driver_data; - /* Calculate all channel mask before DMA setup */ dw->all_chan_mask = (1 << nr_channels) - 1; - /* force dma off, just in case */ + /* Force dma off, just in case */ dw_dma_off(dw); - /* disable BLOCK interrupts as well */ + /* Disable BLOCK interrupts as well */ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, @@ -1741,7 +1753,7 @@ static int dw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dw); - /* create a pool of consistent memory blocks for hardware descriptors */ + /* Create a pool of consistent memory blocks for hardware descriptors */ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, sizeof(struct dw_desc), 4, 0); if (!dw->desc_pool) { @@ -1781,8 +1793,9 @@ static int dw_probe(struct platform_device *pdev) channel_clear_bit(dw, CH_EN, dwc->mask); dwc->direction = DMA_TRANS_NONE; + dwc->request_line = ~0; - /* hardware configuration */ + /* Hardware configuration */ if (autocfg) { unsigned int dwc_params; @@ -1842,12 +1855,15 @@ static int dw_probe(struct platform_device *pdev) if (pdev->dev.of_node) { err = of_dma_controller_register(pdev->dev.of_node, - dw_dma_xlate, dw); - if (err && err != -ENODEV) + dw_dma_of_xlate, dw); + if (err) dev_err(&pdev->dev, "could not register of_dma_controller\n"); } + if (ACPI_HANDLE(&pdev->dev)) + dw_dma_acpi_controller_register(dw); + return 0; } @@ -1912,18 +1928,19 @@ static const struct dev_pm_ops dw_dev_pm_ops = { }; #ifdef CONFIG_OF -static const struct of_device_id dw_dma_id_table[] = { +static const struct of_device_id dw_dma_of_id_table[] = { { .compatible = "snps,dma-spear1340" }, {} }; -MODULE_DEVICE_TABLE(of, dw_dma_id_table); +MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); #endif -static const struct platform_device_id dw_dma_ids[] = { - /* Name, Request Line Base */ - { "INTL9C60", (kernel_ulong_t)16 }, +#ifdef CONFIG_ACPI +static const struct acpi_device_id dw_dma_acpi_id_table[] = { + { "INTL9C60", 0 }, { } }; +#endif static struct platform_driver dw_driver = { .probe = dw_probe, @@ -1932,9 +1949,9 @@ static struct platform_driver dw_driver = { .driver = { .name = "dw_dmac", .pm = &dw_dev_pm_ops, - .of_match_table = of_match_ptr(dw_dma_id_table), + .of_match_table = of_match_ptr(dw_dma_of_id_table), + .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), }, - .id_table = dw_dma_ids, }; static int __init dw_init(void) diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 4d02c36..9d41720 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -212,8 +212,11 @@ struct dw_dma_chan { /* hardware configuration */ unsigned int block_size; bool nollp; + + /* custom slave configuration */ unsigned int request_line; - struct dw_dma_slave slave; + unsigned char src_master; + unsigned char dst_master; /* configuration passed via DMA_SLAVE_CONFIG */ struct dma_slave_config dma_sconfig; @@ -247,7 +250,6 @@ struct dw_dma { /* hardware configuration */ unsigned char nr_masters; unsigned char data_width[4]; - unsigned int request_line_base; struct dw_dma_chan chan[0]; }; diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 70b8975..f285833 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -859,8 +859,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); - if (imxdmac->sg_list) - kfree(imxdmac->sg_list); + kfree(imxdmac->sg_list); imxdmac->sg_list = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); @@ -1145,7 +1144,7 @@ err: return ret; } -static int __exit imxdma_remove(struct platform_device *pdev) +static int imxdma_remove(struct platform_device *pdev) { struct imxdma_engine *imxdma = platform_get_drvdata(pdev); @@ -1162,7 +1161,7 @@ static struct platform_driver imxdma_driver = { .name = "imx-dma", }, .id_table = imx_dma_devtype, - .remove = __exit_p(imxdma_remove), + .remove = imxdma_remove, }; static int __init imxdma_module_init(void) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f082aa3..092867b 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1462,7 +1462,7 @@ err_irq: return ret; } -static int __exit sdma_remove(struct platform_device *pdev) +static int sdma_remove(struct platform_device *pdev) { return -EBUSY; } @@ -1473,7 +1473,7 @@ static struct platform_driver sdma_driver = { .of_match_table = sdma_dt_ids, }, .id_table = sdma_devtypes, - .remove = __exit_p(sdma_remove), + .remove = sdma_remove, }; static int __init sdma_module_init(void) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1879a59..17a2393 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -892,7 +892,7 @@ MODULE_PARM_DESC(ioat_interrupt_style, * ioat_dma_setup_interrupts - setup interrupt handler * @device: ioat device */ -static int ioat_dma_setup_interrupts(struct ioatdma_device *device) +int ioat_dma_setup_interrupts(struct ioatdma_device *device) { struct ioat_chan_common *chan; struct pci_dev *pdev = device->pdev; @@ -941,6 +941,7 @@ msix: } } intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; + device->irq_mode = IOAT_MSIX; goto done; msix_single_vector: @@ -956,6 +957,7 @@ msix_single_vector: pci_disable_msix(pdev); goto msi; } + device->irq_mode = IOAT_MSIX_SINGLE; goto done; msi: @@ -969,6 +971,7 @@ msi: pci_disable_msi(pdev); goto intx; } + device->irq_mode = IOAT_MSIX; goto done; intx: @@ -977,6 +980,7 @@ intx: if (err) goto err_no_irq; + device->irq_mode = IOAT_INTX; done: if (device->intr_quirk) device->intr_quirk(device); @@ -987,9 +991,11 @@ done: err_no_irq: /* Disable all interrupt generation */ writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); + device->irq_mode = IOAT_NOIRQ; dev_err(dev, "no usable interrupts\n"); return err; } +EXPORT_SYMBOL(ioat_dma_setup_interrupts); static void ioat_disable_interrupts(struct ioatdma_device *device) { diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 53a4cbb..54fb7b9 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -39,6 +39,7 @@ #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) +#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev) #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) @@ -48,6 +49,14 @@ */ #define NULL_DESC_BUFFER_SIZE 1 +enum ioat_irq_mode { + IOAT_NOIRQ = 0, + IOAT_MSIX, + IOAT_MSIX_SINGLE, + IOAT_MSI, + IOAT_INTX +}; + /** * struct ioatdma_device - internal representation of a IOAT device * @pdev: PCI-Express device @@ -72,11 +81,16 @@ struct ioatdma_device { void __iomem *reg_base; struct pci_pool *dma_pool; struct pci_pool *completion_pool; +#define MAX_SED_POOLS 5 + struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; + struct kmem_cache *sed_pool; struct dma_device common; u8 version; struct msix_entry msix_entries[4]; struct ioat_chan_common *idx[4]; struct dca_provider *dca; + enum ioat_irq_mode irq_mode; + u32 cap; void (*intr_quirk)(struct ioatdma_device *device); int (*enumerate_channels)(struct ioatdma_device *device); int (*reset_hw)(struct ioat_chan_common *chan); @@ -131,6 +145,20 @@ struct ioat_dma_chan { u16 active; }; +/** + * struct ioat_sed_ent - wrapper around super extended hardware descriptor + * @hw: hardware SED + * @sed_dma: dma address for the SED + * @list: list member + * @parent: point to the dma descriptor that's the parent + */ +struct ioat_sed_ent { + struct ioat_sed_raw_descriptor *hw; + dma_addr_t dma; + struct ioat_ring_ent *parent; + unsigned int hw_pool; +}; + static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) { return container_of(c, struct ioat_chan_common, common); @@ -179,7 +207,7 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, struct device *dev = to_dev(chan); dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" - " ctl: %#x (op: %d int_en: %d compl: %d)\n", id, + " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, (unsigned long long) tx->phys, (unsigned long long) hw->next, tx->cookie, tx->flags, hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); @@ -201,7 +229,7 @@ ioat_chan_by_index(struct ioatdma_device *device, int index) return device->idx[index]; } -static inline u64 ioat_chansts(struct ioat_chan_common *chan) +static inline u64 ioat_chansts_32(struct ioat_chan_common *chan) { u8 ver = chan->device->version; u64 status; @@ -218,6 +246,26 @@ static inline u64 ioat_chansts(struct ioat_chan_common *chan) return status; } +#if BITS_PER_LONG == 64 + +static inline u64 ioat_chansts(struct ioat_chan_common *chan) +{ + u8 ver = chan->device->version; + u64 status; + + /* With IOAT v3.3 the status register is 64bit. */ + if (ver >= IOAT_VER_3_3) + status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver)); + else + status = ioat_chansts_32(chan); + + return status; +} + +#else +#define ioat_chansts ioat_chansts_32 +#endif + static inline void ioat_start(struct ioat_chan_common *chan) { u8 ver = chan->device->version; @@ -321,6 +369,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan, dma_addr_t *phys_complete); void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); void ioat_kobject_del(struct ioatdma_device *device); +int ioat_dma_setup_interrupts(struct ioatdma_device *device); extern const struct sysfs_ops ioat_sysfs_ops; extern struct ioat_sysfs_entry ioat_version_attr; extern struct ioat_sysfs_entry ioat_cap_attr; diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index e100f64..29bf944 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -137,6 +137,7 @@ struct ioat_ring_ent { #ifdef DEBUG int id; #endif + struct ioat_sed_ent *sed; }; static inline struct ioat_ring_ent * @@ -157,6 +158,7 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) int ioat2_dma_probe(struct ioatdma_device *dev, int dca); int ioat3_dma_probe(struct ioatdma_device *dev, int dca); +void ioat3_dma_remove(struct ioatdma_device *dev); struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index e8336cc..ca6ea9b 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -55,7 +55,7 @@ /* * Support routines for v3+ hardware */ - +#include #include #include #include @@ -70,6 +70,10 @@ /* ioat hardware assumes at least two sources for raid operations */ #define src_cnt_to_sw(x) ((x) + 2) #define src_cnt_to_hw(x) ((x) - 2) +#define ndest_to_sw(x) ((x) + 1) +#define ndest_to_hw(x) ((x) - 1) +#define src16_cnt_to_sw(x) ((x) + 9) +#define src16_cnt_to_hw(x) ((x) - 9) /* provide a lookup table for setting the source address in the base or * extended descriptor of an xor or pq descriptor @@ -77,7 +81,20 @@ static const u8 xor_idx_to_desc = 0xe0; static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; static const u8 pq_idx_to_desc = 0xf8; +static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2 }; static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; +static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6 }; + +/* + * technically sources 1 and 2 do not require SED, but the op will have + * at least 9 descriptors so that's irrelevant. + */ +static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1 }; + +static void ioat3_eh(struct ioat2_dma_chan *ioat); static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) { @@ -101,6 +118,13 @@ static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) return raw->field[pq_idx_to_field[idx]]; } +static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) +{ + struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; + + return raw->field[pq16_idx_to_field[idx]]; +} + static void pq_set_src(struct ioat_raw_descriptor *descs[2], dma_addr_t addr, u32 offset, u8 coef, int idx) { @@ -111,6 +135,167 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2], pq->coef[idx] = coef; } +static int sed_get_pq16_pool_idx(int src_cnt) +{ + + return pq16_idx_to_sed[src_cnt]; +} + +static bool is_jf_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_JSF0: + case PCI_DEVICE_ID_INTEL_IOAT_JSF1: + case PCI_DEVICE_ID_INTEL_IOAT_JSF2: + case PCI_DEVICE_ID_INTEL_IOAT_JSF3: + case PCI_DEVICE_ID_INTEL_IOAT_JSF4: + case PCI_DEVICE_ID_INTEL_IOAT_JSF5: + case PCI_DEVICE_ID_INTEL_IOAT_JSF6: + case PCI_DEVICE_ID_INTEL_IOAT_JSF7: + case PCI_DEVICE_ID_INTEL_IOAT_JSF8: + case PCI_DEVICE_ID_INTEL_IOAT_JSF9: + return true; + default: + return false; + } +} + +static bool is_snb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_SNB0: + case PCI_DEVICE_ID_INTEL_IOAT_SNB1: + case PCI_DEVICE_ID_INTEL_IOAT_SNB2: + case PCI_DEVICE_ID_INTEL_IOAT_SNB3: + case PCI_DEVICE_ID_INTEL_IOAT_SNB4: + case PCI_DEVICE_ID_INTEL_IOAT_SNB5: + case PCI_DEVICE_ID_INTEL_IOAT_SNB6: + case PCI_DEVICE_ID_INTEL_IOAT_SNB7: + case PCI_DEVICE_ID_INTEL_IOAT_SNB8: + case PCI_DEVICE_ID_INTEL_IOAT_SNB9: + return true; + default: + return false; + } +} + +static bool is_ivb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_IVB0: + case PCI_DEVICE_ID_INTEL_IOAT_IVB1: + case PCI_DEVICE_ID_INTEL_IOAT_IVB2: + case PCI_DEVICE_ID_INTEL_IOAT_IVB3: + case PCI_DEVICE_ID_INTEL_IOAT_IVB4: + case PCI_DEVICE_ID_INTEL_IOAT_IVB5: + case PCI_DEVICE_ID_INTEL_IOAT_IVB6: + case PCI_DEVICE_ID_INTEL_IOAT_IVB7: + case PCI_DEVICE_ID_INTEL_IOAT_IVB8: + case PCI_DEVICE_ID_INTEL_IOAT_IVB9: + return true; + default: + return false; + } + +} + +static bool is_hsw_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_HSW0: + case PCI_DEVICE_ID_INTEL_IOAT_HSW1: + case PCI_DEVICE_ID_INTEL_IOAT_HSW2: + case PCI_DEVICE_ID_INTEL_IOAT_HSW3: + case PCI_DEVICE_ID_INTEL_IOAT_HSW4: + case PCI_DEVICE_ID_INTEL_IOAT_HSW5: + case PCI_DEVICE_ID_INTEL_IOAT_HSW6: + case PCI_DEVICE_ID_INTEL_IOAT_HSW7: + case PCI_DEVICE_ID_INTEL_IOAT_HSW8: + case PCI_DEVICE_ID_INTEL_IOAT_HSW9: + return true; + default: + return false; + } + +} + +static bool is_xeon_cb32(struct pci_dev *pdev) +{ + return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || + is_hsw_ioat(pdev); +} + +static bool is_bwd_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_BWD0: + case PCI_DEVICE_ID_INTEL_IOAT_BWD1: + case PCI_DEVICE_ID_INTEL_IOAT_BWD2: + case PCI_DEVICE_ID_INTEL_IOAT_BWD3: + return true; + default: + return false; + } +} + +static bool is_bwd_noraid(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_BWD2: + case PCI_DEVICE_ID_INTEL_IOAT_BWD3: + return true; + default: + return false; + } + +} + +static void pq16_set_src(struct ioat_raw_descriptor *desc[3], + dma_addr_t addr, u32 offset, u8 coef, int idx) +{ + struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; + struct ioat_pq16a_descriptor *pq16 = + (struct ioat_pq16a_descriptor *)desc[1]; + struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; + + raw->field[pq16_idx_to_field[idx]] = addr + offset; + + if (idx < 8) + pq->coef[idx] = coef; + else + pq16->coef[idx - 8] = coef; +} + +static struct ioat_sed_ent * +ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) +{ + struct ioat_sed_ent *sed; + gfp_t flags = __GFP_ZERO | GFP_ATOMIC; + + sed = kmem_cache_alloc(device->sed_pool, flags); + if (!sed) + return NULL; + + sed->hw_pool = hw_pool; + sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], + flags, &sed->dma); + if (!sed->hw) { + kmem_cache_free(device->sed_pool, sed); + return NULL; + } + + return sed; +} + +static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) +{ + if (!sed) + return; + + dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); + kmem_cache_free(device->sed_pool, sed); +} + static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, int idx) { @@ -223,6 +408,54 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, } break; } + case IOAT_OP_PQ_16S: + case IOAT_OP_PQ_VAL_16S: { + struct ioat_pq_descriptor *pq = desc->pq; + int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); + struct ioat_raw_descriptor *descs[4]; + int i; + + /* in the 'continue' case don't unmap the dests as sources */ + if (dmaf_p_disabled_continue(flags)) + src_cnt--; + else if (dmaf_continue(flags)) + src_cnt -= 3; + + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + descs[0] = (struct ioat_raw_descriptor *)pq; + descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); + descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); + for (i = 0; i < src_cnt; i++) { + dma_addr_t src = pq16_get_src(descs, i); + + ioat_unmap(pdev, src - offset, len, + PCI_DMA_TODEVICE, flags, 0); + } + + /* the dests are sources in pq validate operations */ + if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { + if (!(flags & DMA_PREP_PQ_DISABLE_P)) + ioat_unmap(pdev, pq->p_addr - offset, + len, PCI_DMA_TODEVICE, + flags, 0); + if (!(flags & DMA_PREP_PQ_DISABLE_Q)) + ioat_unmap(pdev, pq->q_addr - offset, + len, PCI_DMA_TODEVICE, + flags, 0); + break; + } + } + + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (!(flags & DMA_PREP_PQ_DISABLE_P)) + ioat_unmap(pdev, pq->p_addr - offset, len, + PCI_DMA_BIDIRECTIONAL, flags, 1); + if (!(flags & DMA_PREP_PQ_DISABLE_Q)) + ioat_unmap(pdev, pq->q_addr - offset, len, + PCI_DMA_BIDIRECTIONAL, flags, 1); + } + break; + } default: dev_err(&pdev->dev, "%s: unknown op type: %#x\n", __func__, desc->hw->ctl_f.op); @@ -250,6 +483,63 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) return false; } +static u64 ioat3_get_current_completion(struct ioat_chan_common *chan) +{ + u64 phys_complete; + u64 completion; + + completion = *chan->completion; + phys_complete = ioat_chansts_to_addr(completion); + + dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, + (unsigned long long) phys_complete); + + return phys_complete; +} + +static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan, + u64 *phys_complete) +{ + *phys_complete = ioat3_get_current_completion(chan); + if (*phys_complete == chan->last_completion) + return false; + + clear_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + + return true; +} + +static void +desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc) +{ + struct ioat_dma_descriptor *hw = desc->hw; + + switch (hw->ctl_f.op) { + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ_VAL_16S: + { + struct ioat_pq_descriptor *pq = desc->pq; + + /* check if there's error written */ + if (!pq->dwbes_f.wbes) + return; + + /* need to set a chanerr var for checking to clear later */ + + if (pq->dwbes_f.p_val_err) + *desc->result |= SUM_CHECK_P_RESULT; + + if (pq->dwbes_f.q_val_err) + *desc->result |= SUM_CHECK_Q_RESULT; + + return; + } + default: + return; + } +} + /** * __cleanup - reclaim used descriptors * @ioat: channel (ring) to clean @@ -260,6 +550,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) { struct ioat_chan_common *chan = &ioat->base; + struct ioatdma_device *device = chan->device; struct ioat_ring_ent *desc; bool seen_current = false; int idx = ioat->tail, i; @@ -268,6 +559,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued); + /* + * At restart of the channel, the completion address and the + * channel status will be 0 due to starting a new chain. Since + * it's new chain and the first descriptor "fails", there is + * nothing to clean up. We do not want to reap the entire submitted + * chain due to this 0 address value and then BUG. + */ + if (!phys_complete) + return; + active = ioat2_ring_active(ioat); for (i = 0; i < active && !seen_current; i++) { struct dma_async_tx_descriptor *tx; @@ -276,6 +577,11 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); desc = ioat2_get_ring_ent(ioat, idx + i); dump_desc_dbg(ioat, desc); + + /* set err stat if we are using dwbes */ + if (device->cap & IOAT_CAP_DWBES) + desc_get_errstat(ioat, desc); + tx = &desc->txd; if (tx->cookie) { dma_cookie_complete(tx); @@ -294,6 +600,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) BUG_ON(i + 1 >= active); i++; } + + /* cleanup super extended descriptors */ + if (desc->sed) { + ioat3_free_sed(device, desc->sed); + desc->sed = NULL; + } } smp_mb(); /* finish all descriptor reads before incrementing tail */ ioat->tail = idx + i; @@ -314,11 +626,22 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) static void ioat3_cleanup(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; + u64 phys_complete; spin_lock_bh(&chan->cleanup_lock); - if (ioat_cleanup_preamble(chan, &phys_complete)) + + if (ioat3_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); + + if (is_ioat_halted(*chan->completion)) { + u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + + if (chanerr & IOAT_CHANERR_HANDLE_MASK) { + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); + ioat3_eh(ioat); + } + } + spin_unlock_bh(&chan->cleanup_lock); } @@ -333,15 +656,78 @@ static void ioat3_cleanup_event(unsigned long data) static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; + u64 phys_complete; ioat2_quiesce(chan, 0); - if (ioat_cleanup_preamble(chan, &phys_complete)) + if (ioat3_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); __ioat2_restart_chan(ioat); } +static void ioat3_eh(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + struct pci_dev *pdev = to_pdev(chan); + struct ioat_dma_descriptor *hw; + u64 phys_complete; + struct ioat_ring_ent *desc; + u32 err_handled = 0; + u32 chanerr_int; + u32 chanerr; + + /* cleanup so tail points to descriptor that caused the error */ + if (ioat3_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); + + dev_dbg(to_dev(chan), "%s: error = %x:%x\n", + __func__, chanerr, chanerr_int); + + desc = ioat2_get_ring_ent(ioat, ioat->tail); + hw = desc->hw; + dump_desc_dbg(ioat, desc); + + switch (hw->ctl_f.op) { + case IOAT_OP_XOR_VAL: + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { + *desc->result |= SUM_CHECK_P_RESULT; + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; + } + break; + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ_VAL_16S: + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { + *desc->result |= SUM_CHECK_P_RESULT; + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; + } + if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { + *desc->result |= SUM_CHECK_Q_RESULT; + err_handled |= IOAT_CHANERR_XOR_Q_ERR; + } + break; + } + + /* fault on unhandled error or spurious halt */ + if (chanerr ^ err_handled || chanerr == 0) { + dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", + __func__, chanerr, err_handled); + BUG(); + } + + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); + pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); + + /* mark faulting descriptor as complete */ + *chan->completion = desc->txd.phys; + + spin_lock_bh(&ioat->prep_lock); + ioat3_restart_channel(ioat); + spin_unlock_bh(&ioat->prep_lock); +} + static void check_active(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; @@ -605,7 +991,8 @@ dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct int i; dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" - " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n", + " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" + " src_cnt: %d)\n", desc_id(desc), (unsigned long long) desc->txd.phys, (unsigned long long) (pq_ex ? pq_ex->next : pq->next), desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, @@ -617,6 +1004,42 @@ dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct (unsigned long long) pq_get_src(descs, i), pq->coef[i]); dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); + dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); +} + +static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat, + struct ioat_ring_ent *desc) +{ + struct device *dev = to_dev(&ioat->base); + struct ioat_pq_descriptor *pq = desc->pq; + struct ioat_raw_descriptor *descs[] = { (void *)pq, + (void *)pq, + (void *)pq }; + int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); + int i; + + if (desc->sed) { + descs[1] = (void *)desc->sed->hw; + descs[2] = (void *)desc->sed->hw + 64; + } + + dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" + " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" + " src_cnt: %d)\n", + desc_id(desc), (unsigned long long) desc->txd.phys, + (unsigned long long) pq->next, + desc->txd.flags, pq->size, pq->ctl, + pq->ctl_f.op, pq->ctl_f.int_en, + pq->ctl_f.compl_write, + pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", + pq->ctl_f.src_cnt); + for (i = 0; i < src_cnt; i++) { + dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, + (unsigned long long) pq16_get_src(descs, i), + pq->coef[i]); + } + dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); + dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); } static struct dma_async_tx_descriptor * @@ -627,6 +1050,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_chan_common *chan = &ioat->base; + struct ioatdma_device *device = chan->device; struct ioat_ring_ent *compl_desc; struct ioat_ring_ent *desc; struct ioat_ring_ent *ext; @@ -637,6 +1061,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, u32 offset = 0; u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; int i, s, idx, with_ext, num_descs; + int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; dev_dbg(to_dev(chan), "%s\n", __func__); /* the engine requires at least two sources (we provide @@ -662,7 +1087,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, * order. */ if (likely(num_descs) && - ioat2_check_space_lock(ioat, num_descs+1) == 0) + ioat2_check_space_lock(ioat, num_descs + cb32) == 0) idx = ioat->head; else return NULL; @@ -700,6 +1125,9 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, pq->q_addr = dst[1] + offset; pq->ctl = 0; pq->ctl_f.op = op; + /* we turn on descriptor write back error status */ + if (device->cap & IOAT_CAP_DWBES) + pq->ctl_f.wb_en = result ? 1 : 0; pq->ctl_f.src_cnt = src_cnt_to_hw(s); pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); @@ -716,26 +1144,140 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); dump_pq_desc_dbg(ioat, desc, ext); - /* completion descriptor carries interrupt bit */ - compl_desc = ioat2_get_ring_ent(ioat, idx + i); - compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; - hw = compl_desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - hw->ctl_f.compl_write = 1; - hw->size = NULL_DESC_BUFFER_SIZE; - dump_desc_dbg(ioat, compl_desc); + if (!cb32) { + pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + pq->ctl_f.compl_write = 1; + compl_desc = desc; + } else { + /* completion descriptor carries interrupt bit */ + compl_desc = ioat2_get_ring_ent(ioat, idx + i); + compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; + hw = compl_desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + dump_desc_dbg(ioat, compl_desc); + } + /* we leave the channel locked to ensure in order submission */ return &compl_desc->txd; } static struct dma_async_tx_descriptor * +__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, + const dma_addr_t *dst, const dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_chan_common *chan = &ioat->base; + struct ioatdma_device *device = chan->device; + struct ioat_ring_ent *desc; + size_t total_len = len; + struct ioat_pq_descriptor *pq; + u32 offset = 0; + u8 op; + int i, s, idx, num_descs; + + /* this function only handles src_cnt 9 - 16 */ + BUG_ON(src_cnt < 9); + + /* this function is only called with 9-16 sources */ + op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; + + dev_dbg(to_dev(chan), "%s\n", __func__); + + num_descs = ioat2_xferlen_to_descs(ioat, len); + + /* + * 16 source pq is only available on cb3.3 and has no completion + * write hw bug. + */ + if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0) + idx = ioat->head; + else + return NULL; + + i = 0; + + do { + struct ioat_raw_descriptor *descs[4]; + size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); + + desc = ioat2_get_ring_ent(ioat, idx + i); + pq = desc->pq; + + descs[0] = (struct ioat_raw_descriptor *) pq; + + desc->sed = ioat3_alloc_sed(device, + sed_get_pq16_pool_idx(src_cnt)); + if (!desc->sed) { + dev_err(to_dev(chan), + "%s: no free sed entries\n", __func__); + return NULL; + } + + pq->sed_addr = desc->sed->dma; + desc->sed->parent = desc; + + descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; + descs[2] = (void *)descs[1] + 64; + + for (s = 0; s < src_cnt; s++) + pq16_set_src(descs, src[s], offset, scf[s], s); + + /* see the comment for dma_maxpq in include/linux/dmaengine.h */ + if (dmaf_p_disabled_continue(flags)) + pq16_set_src(descs, dst[1], offset, 1, s++); + else if (dmaf_continue(flags)) { + pq16_set_src(descs, dst[0], offset, 0, s++); + pq16_set_src(descs, dst[1], offset, 1, s++); + pq16_set_src(descs, dst[1], offset, 0, s++); + } + + pq->size = xfer_size; + pq->p_addr = dst[0] + offset; + pq->q_addr = dst[1] + offset; + pq->ctl = 0; + pq->ctl_f.op = op; + pq->ctl_f.src_cnt = src16_cnt_to_hw(s); + /* we turn on descriptor write back error status */ + if (device->cap & IOAT_CAP_DWBES) + pq->ctl_f.wb_en = result ? 1 : 0; + pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); + pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); + + len -= xfer_size; + offset += xfer_size; + } while (++i < num_descs); + + /* last pq descriptor carries the unmap parameters and fence bit */ + desc->txd.flags = flags; + desc->len = total_len; + if (result) + desc->result = result; + pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + + /* with cb3.3 we should be able to do completion w/o a null desc */ + pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + pq->ctl_f.compl_write = 1; + + dump_pq16_desc_dbg(ioat, desc); + + /* we leave the channel locked to ensure in order submission */ + return &desc->txd; +} + +static struct dma_async_tx_descriptor * ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags) { + struct dma_device *dma = chan->device; + /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) dst[0] = dst[1]; @@ -755,11 +1297,20 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, single_source_coef[0] = scf[0]; single_source_coef[1] = 0; - return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, - single_source_coef, len, flags); - } else - return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf, - len, flags); + return (src_cnt > 8) && (dma->max_pq > 8) ? + __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, + 2, single_source_coef, len, + flags) : + __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, + single_source_coef, len, flags); + + } else { + return (src_cnt > 8) && (dma->max_pq > 8) ? + __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, + scf, len, flags) : + __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, + scf, len, flags); + } } struct dma_async_tx_descriptor * @@ -767,6 +1318,8 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags) { + struct dma_device *dma = chan->device; + /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) pq[0] = pq[1]; @@ -778,14 +1331,18 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, */ *pqres = 0; - return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, - flags); + return (src_cnt > 8) && (dma->max_pq > 8) ? + __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, + flags) : + __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, + flags); } static struct dma_async_tx_descriptor * ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { + struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -794,8 +1351,11 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = dst; /* specify valid address for disabled result */ - return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, - flags); + return (src_cnt > 8) && (dma->max_pq > 8) ? + __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, + flags) : + __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, + flags); } struct dma_async_tx_descriptor * @@ -803,6 +1363,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) { + struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -816,8 +1377,12 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = pq[0]; /* specify valid address for disabled result */ - return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, - len, flags); + + return (src_cnt > 8) && (dma->max_pq > 8) ? + __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, + scf, len, flags) : + __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, + scf, len, flags); } static struct dma_async_tx_descriptor * @@ -1167,6 +1732,56 @@ static int ioat3_dma_self_test(struct ioatdma_device *device) return 0; } +static int ioat3_irq_reinit(struct ioatdma_device *device) +{ + int msixcnt = device->common.chancnt; + struct pci_dev *pdev = device->pdev; + int i; + struct msix_entry *msix; + struct ioat_chan_common *chan; + int err = 0; + + switch (device->irq_mode) { + case IOAT_MSIX: + + for (i = 0; i < msixcnt; i++) { + msix = &device->msix_entries[i]; + chan = ioat_chan_by_index(device, i); + devm_free_irq(&pdev->dev, msix->vector, chan); + } + + pci_disable_msix(pdev); + break; + + case IOAT_MSIX_SINGLE: + msix = &device->msix_entries[0]; + chan = ioat_chan_by_index(device, 0); + devm_free_irq(&pdev->dev, msix->vector, chan); + pci_disable_msix(pdev); + break; + + case IOAT_MSI: + chan = ioat_chan_by_index(device, 0); + devm_free_irq(&pdev->dev, pdev->irq, chan); + pci_disable_msi(pdev); + break; + + case IOAT_INTX: + chan = ioat_chan_by_index(device, 0); + devm_free_irq(&pdev->dev, pdev->irq, chan); + break; + + default: + return 0; + } + + device->irq_mode = IOAT_NOIRQ; + + err = ioat_dma_setup_interrupts(device); + + return err; +} + static int ioat3_reset_hw(struct ioat_chan_common *chan) { /* throw away whatever the channel was doing and get it @@ -1183,80 +1798,65 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - /* clear any pending errors */ - err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); + if (device->version < IOAT_VER_3_3) { + /* clear any pending errors */ + err = pci_read_config_dword(pdev, + IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); + if (err) { + dev_err(&pdev->dev, + "channel error register unreachable\n"); + return err; + } + pci_write_config_dword(pdev, + IOAT_PCI_CHANERR_INT_OFFSET, chanerr); + + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { + pci_write_config_dword(pdev, + IOAT_PCI_DMAUNCERRSTS_OFFSET, + 0x10); + } + } + + err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); if (err) { - dev_err(&pdev->dev, "channel error register unreachable\n"); + dev_err(&pdev->dev, "Failed to reset!\n"); return err; } - pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr); - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) - pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); + if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) + err = ioat3_irq_reinit(device); - return ioat2_reset_sync(chan, msecs_to_jiffies(200)); + return err; } -static bool is_jf_ioat(struct pci_dev *pdev) +static void ioat3_intr_quirk(struct ioatdma_device *device) { - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_JSF0: - case PCI_DEVICE_ID_INTEL_IOAT_JSF1: - case PCI_DEVICE_ID_INTEL_IOAT_JSF2: - case PCI_DEVICE_ID_INTEL_IOAT_JSF3: - case PCI_DEVICE_ID_INTEL_IOAT_JSF4: - case PCI_DEVICE_ID_INTEL_IOAT_JSF5: - case PCI_DEVICE_ID_INTEL_IOAT_JSF6: - case PCI_DEVICE_ID_INTEL_IOAT_JSF7: - case PCI_DEVICE_ID_INTEL_IOAT_JSF8: - case PCI_DEVICE_ID_INTEL_IOAT_JSF9: - return true; - default: - return false; - } -} + struct dma_device *dma; + struct dma_chan *c; + struct ioat_chan_common *chan; + u32 errmask; -static bool is_snb_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_SNB0: - case PCI_DEVICE_ID_INTEL_IOAT_SNB1: - case PCI_DEVICE_ID_INTEL_IOAT_SNB2: - case PCI_DEVICE_ID_INTEL_IOAT_SNB3: - case PCI_DEVICE_ID_INTEL_IOAT_SNB4: - case PCI_DEVICE_ID_INTEL_IOAT_SNB5: - case PCI_DEVICE_ID_INTEL_IOAT_SNB6: - case PCI_DEVICE_ID_INTEL_IOAT_SNB7: - case PCI_DEVICE_ID_INTEL_IOAT_SNB8: - case PCI_DEVICE_ID_INTEL_IOAT_SNB9: - return true; - default: - return false; - } -} + dma = &device->common; -static bool is_ivb_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_IVB0: - case PCI_DEVICE_ID_INTEL_IOAT_IVB1: - case PCI_DEVICE_ID_INTEL_IOAT_IVB2: - case PCI_DEVICE_ID_INTEL_IOAT_IVB3: - case PCI_DEVICE_ID_INTEL_IOAT_IVB4: - case PCI_DEVICE_ID_INTEL_IOAT_IVB5: - case PCI_DEVICE_ID_INTEL_IOAT_IVB6: - case PCI_DEVICE_ID_INTEL_IOAT_IVB7: - case PCI_DEVICE_ID_INTEL_IOAT_IVB8: - case PCI_DEVICE_ID_INTEL_IOAT_IVB9: - return true; - default: - return false; + /* + * if we have descriptor write back error status, we mask the + * error interrupts + */ + if (device->cap & IOAT_CAP_DWBES) { + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); + errmask = readl(chan->reg_base + + IOAT_CHANERR_MASK_OFFSET); + errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | + IOAT_CHANERR_XOR_Q_ERR; + writel(errmask, chan->reg_base + + IOAT_CHANERR_MASK_OFFSET); + } } - } int ioat3_dma_probe(struct ioatdma_device *device, int dca) @@ -1268,30 +1868,33 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) struct ioat_chan_common *chan; bool is_raid_device = false; int err; - u32 cap; device->enumerate_channels = ioat2_enumerate_channels; device->reset_hw = ioat3_reset_hw; device->self_test = ioat3_dma_self_test; + device->intr_quirk = ioat3_intr_quirk; dma = &device->common; dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev)) + if (is_xeon_cb32(pdev)) dma->copy_align = 6; dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; - cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); + device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); + + if (is_bwd_noraid(pdev)) + device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); /* dca is incompatible with raid operations */ - if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) - cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); + if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) + device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); - if (cap & IOAT_CAP_XOR) { + if (device->cap & IOAT_CAP_XOR) { is_raid_device = true; dma->max_xor = 8; dma->xor_align = 6; @@ -1302,53 +1905,86 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) dma_cap_set(DMA_XOR_VAL, dma->cap_mask); dma->device_prep_dma_xor_val = ioat3_prep_xor_val; } - if (cap & IOAT_CAP_PQ) { + + if (device->cap & IOAT_CAP_PQ) { is_raid_device = true; - dma_set_maxpq(dma, 8, 0); - dma->pq_align = 6; - dma_cap_set(DMA_PQ, dma->cap_mask); dma->device_prep_dma_pq = ioat3_prep_pq; - - dma_cap_set(DMA_PQ_VAL, dma->cap_mask); dma->device_prep_dma_pq_val = ioat3_prep_pq_val; + dma_cap_set(DMA_PQ, dma->cap_mask); + dma_cap_set(DMA_PQ_VAL, dma->cap_mask); - if (!(cap & IOAT_CAP_XOR)) { - dma->max_xor = 8; - dma->xor_align = 6; + if (device->cap & IOAT_CAP_RAID16SS) { + dma_set_maxpq(dma, 16, 0); + dma->pq_align = 0; + } else { + dma_set_maxpq(dma, 8, 0); + if (is_xeon_cb32(pdev)) + dma->pq_align = 6; + else + dma->pq_align = 0; + } - dma_cap_set(DMA_XOR, dma->cap_mask); + if (!(device->cap & IOAT_CAP_XOR)) { dma->device_prep_dma_xor = ioat3_prep_pqxor; - - dma_cap_set(DMA_XOR_VAL, dma->cap_mask); dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; + dma_cap_set(DMA_XOR, dma->cap_mask); + dma_cap_set(DMA_XOR_VAL, dma->cap_mask); + + if (device->cap & IOAT_CAP_RAID16SS) { + dma->max_xor = 16; + dma->xor_align = 0; + } else { + dma->max_xor = 8; + if (is_xeon_cb32(pdev)) + dma->xor_align = 6; + else + dma->xor_align = 0; + } } } - if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) { + + if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) { dma_cap_set(DMA_MEMSET, dma->cap_mask); dma->device_prep_dma_memset = ioat3_prep_memset_lock; } - if (is_raid_device) { - dma->device_tx_status = ioat3_tx_status; - device->cleanup_fn = ioat3_cleanup_event; - device->timer_fn = ioat3_timer_event; - } else { - dma->device_tx_status = ioat_dma_tx_status; - device->cleanup_fn = ioat2_cleanup_event; - device->timer_fn = ioat2_timer_event; + dma->device_tx_status = ioat3_tx_status; + device->cleanup_fn = ioat3_cleanup_event; + device->timer_fn = ioat3_timer_event; + + if (is_xeon_cb32(pdev)) { + dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); + dma->device_prep_dma_xor_val = NULL; + + dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); + dma->device_prep_dma_pq_val = NULL; } - #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA - dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); - dma->device_prep_dma_pq_val = NULL; - #endif + /* starting with CB3.3 super extended descriptors are supported */ + if (device->cap & IOAT_CAP_RAID16SS) { + char pool_name[14]; + int i; + + /* allocate sw descriptor pool for SED */ + device->sed_pool = kmem_cache_create("ioat_sed", + sizeof(struct ioat_sed_ent), 0, 0, NULL); + if (!device->sed_pool) + return -ENOMEM; + + for (i = 0; i < MAX_SED_POOLS; i++) { + snprintf(pool_name, 14, "ioat_hw%d_sed", i); - #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA - dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); - dma->device_prep_dma_xor_val = NULL; - #endif + /* allocate SED DMA pool */ + device->sed_hw_pool[i] = dma_pool_create(pool_name, + &pdev->dev, + SED_SIZE * (i + 1), 64, 0); + if (!device->sed_hw_pool[i]) + goto sed_pool_cleanup; + + } + } err = ioat_probe(device); if (err) @@ -1371,4 +2007,28 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) device->dca = ioat3_dca_init(pdev, device->reg_base); return 0; + +sed_pool_cleanup: + if (device->sed_pool) { + int i; + kmem_cache_destroy(device->sed_pool); + + for (i = 0; i < MAX_SED_POOLS; i++) + if (device->sed_hw_pool[i]) + dma_pool_destroy(device->sed_hw_pool[i]); + } + + return -ENOMEM; +} + +void ioat3_dma_remove(struct ioatdma_device *device) +{ + if (device->sed_pool) { + int i; + kmem_cache_destroy(device->sed_pool); + + for (i = 0; i < MAX_SED_POOLS; i++) + if (device->sed_hw_pool[i]) + dma_pool_destroy(device->sed_hw_pool[i]); + } } diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 7cb74c6..5ee57d4 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -30,11 +30,6 @@ #define IOAT_PCI_DID_SCNB 0x65FF #define IOAT_PCI_DID_SNB 0x402F -#define IOAT_VER_1_2 0x12 /* Version 1.2 */ -#define IOAT_VER_2_0 0x20 /* Version 2.0 */ -#define IOAT_VER_3_0 0x30 /* Version 3.0 */ -#define IOAT_VER_3_2 0x32 /* Version 3.2 */ - #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 @@ -46,6 +41,29 @@ #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f +#define PCI_DEVICE_ID_INTEL_IOAT_HSW0 0x2f20 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW1 0x2f21 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW2 0x2f22 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW3 0x2f23 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW4 0x2f24 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW5 0x2f25 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW6 0x2f26 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW7 0x2f27 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW8 0x2f2e +#define PCI_DEVICE_ID_INTEL_IOAT_HSW9 0x2f2f + +#define PCI_DEVICE_ID_INTEL_IOAT_BWD0 0x0C50 +#define PCI_DEVICE_ID_INTEL_IOAT_BWD1 0x0C51 +#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52 +#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53 + +#define IOAT_VER_1_2 0x12 /* Version 1.2 */ +#define IOAT_VER_2_0 0x20 /* Version 2.0 */ +#define IOAT_VER_3_0 0x30 /* Version 3.0 */ +#define IOAT_VER_3_2 0x32 /* Version 3.2 */ +#define IOAT_VER_3_3 0x33 /* Version 3.3 */ + + int system_has_dca_enabled(struct pci_dev *pdev); struct ioat_dma_descriptor { @@ -147,7 +165,17 @@ struct ioat_xor_ext_descriptor { }; struct ioat_pq_descriptor { - uint32_t size; + union { + uint32_t size; + uint32_t dwbes; + struct { + unsigned int rsvd:25; + unsigned int p_val_err:1; + unsigned int q_val_err:1; + unsigned int rsvd1:4; + unsigned int wbes:1; + } dwbes_f; + }; union { uint32_t ctl; struct { @@ -162,9 +190,14 @@ struct ioat_pq_descriptor { unsigned int hint:1; unsigned int p_disable:1; unsigned int q_disable:1; - unsigned int rsvd:11; + unsigned int rsvd2:2; + unsigned int wb_en:1; + unsigned int prl_en:1; + unsigned int rsvd3:7; #define IOAT_OP_PQ 0x89 #define IOAT_OP_PQ_VAL 0x8a + #define IOAT_OP_PQ_16S 0xa0 + #define IOAT_OP_PQ_VAL_16S 0xa1 unsigned int op:8; } ctl_f; }; @@ -172,7 +205,10 @@ struct ioat_pq_descriptor { uint64_t p_addr; uint64_t next; uint64_t src_addr2; - uint64_t src_addr3; + union { + uint64_t src_addr3; + uint64_t sed_addr; + }; uint8_t coef[8]; uint64_t q_addr; }; @@ -221,4 +257,40 @@ struct ioat_pq_update_descriptor { struct ioat_raw_descriptor { uint64_t field[8]; }; + +struct ioat_pq16a_descriptor { + uint8_t coef[8]; + uint64_t src_addr3; + uint64_t src_addr4; + uint64_t src_addr5; + uint64_t src_addr6; + uint64_t src_addr7; + uint64_t src_addr8; + uint64_t src_addr9; +}; + +struct ioat_pq16b_descriptor { + uint64_t src_addr10; + uint64_t src_addr11; + uint64_t src_addr12; + uint64_t src_addr13; + uint64_t src_addr14; + uint64_t src_addr15; + uint64_t src_addr16; + uint64_t rsvd; +}; + +union ioat_sed_pq_descriptor { + struct ioat_pq16a_descriptor a; + struct ioat_pq16b_descriptor b; +}; + +#define SED_SIZE 64 + +struct ioat_sed_raw_descriptor { + uint64_t a[8]; + uint64_t b[8]; + uint64_t c[8]; +}; + #endif diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 71c7ecd..2c8d560 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -94,6 +94,23 @@ static struct pci_device_id ioat_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, + + /* I/OAT v3.3 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, + { 0, } }; MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); @@ -190,6 +207,9 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; + if (device->version >= IOAT_VER_3_0) + ioat3_dma_remove(device); + dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 1391798..2f1cfa0 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -79,6 +79,8 @@ #define IOAT_CAP_APIC 0x00000080 #define IOAT_CAP_XOR 0x00000100 #define IOAT_CAP_PQ 0x00000200 +#define IOAT_CAP_DWBES 0x00002000 +#define IOAT_CAP_RAID16SS 0x00020000 #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ @@ -93,6 +95,8 @@ #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 #define IOAT_CHANCTRL_INT_REARM 0x0001 #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ + IOAT_CHANCTRL_ERR_INT_EN |\ + IOAT_CHANCTRL_ERR_COMPLETION_EN |\ IOAT_CHANCTRL_ANY_ERR_ABORT_EN) #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 8c61d17..d39c2cd 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1642,7 +1642,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) return dma_async_device_register(&idmac->dma); } -static void __exit ipu_idmac_exit(struct ipu *ipu) +static void ipu_idmac_exit(struct ipu *ipu) { int i; struct idmac *idmac = &ipu->idmac; @@ -1756,7 +1756,7 @@ err_noirq: return ret; } -static int __exit ipu_remove(struct platform_device *pdev) +static int ipu_remove(struct platform_device *pdev) { struct ipu *ipu = platform_get_drvdata(pdev); @@ -1781,7 +1781,7 @@ static struct platform_driver ipu_platform_driver = { .name = "ipu-core", .owner = THIS_MODULE, }, - .remove = __exit_p(ipu_remove), + .remove = ipu_remove, }; static int __init ipu_init(void) diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 69d04d2..7aa0864 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -13,43 +13,31 @@ #include #include #include -#include +#include #include #include #include static LIST_HEAD(of_dma_list); -static DEFINE_SPINLOCK(of_dma_lock); +static DEFINE_MUTEX(of_dma_lock); /** - * of_dma_get_controller - Get a DMA controller in DT DMA helpers list + * of_dma_find_controller - Get a DMA controller in DT DMA helpers list * @dma_spec: pointer to DMA specifier as found in the device tree * * Finds a DMA controller with matching device node and number for dma cells - * in a list of registered DMA controllers. If a match is found the use_count - * variable is increased and a valid pointer to the DMA data stored is retuned. - * A NULL pointer is returned if no match is found. + * in a list of registered DMA controllers. If a match is found a valid pointer + * to the DMA data stored is retuned. A NULL pointer is returned if no match is + * found. */ -static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec) +static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) { struct of_dma *ofdma; - spin_lock(&of_dma_lock); - - if (list_empty(&of_dma_list)) { - spin_unlock(&of_dma_lock); - return NULL; - } - list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) if ((ofdma->of_node == dma_spec->np) && - (ofdma->of_dma_nbcells == dma_spec->args_count)) { - ofdma->use_count++; - spin_unlock(&of_dma_lock); + (ofdma->of_dma_nbcells == dma_spec->args_count)) return ofdma; - } - - spin_unlock(&of_dma_lock); pr_debug("%s: can't find DMA controller %s\n", __func__, dma_spec->np->full_name); @@ -58,22 +46,6 @@ static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec) } /** - * of_dma_put_controller - Decrement use count for a registered DMA controller - * @of_dma: pointer to DMA controller data - * - * Decrements the use_count variable in the DMA data structure. This function - * should be called only when a valid pointer is returned from - * of_dma_get_controller() and no further accesses to data referenced by that - * pointer are needed. - */ -static void of_dma_put_controller(struct of_dma *ofdma) -{ - spin_lock(&of_dma_lock); - ofdma->use_count--; - spin_unlock(&of_dma_lock); -} - -/** * of_dma_controller_register - Register a DMA controller to DT DMA helpers * @np: device node of DMA controller * @of_dma_xlate: translation function which converts a phandle @@ -93,6 +65,7 @@ int of_dma_controller_register(struct device_node *np, { struct of_dma *ofdma; int nbcells; + const __be32 *prop; if (!np || !of_dma_xlate) { pr_err("%s: not enough information provided\n", __func__); @@ -103,8 +76,11 @@ int of_dma_controller_register(struct device_node *np, if (!ofdma) return -ENOMEM; - nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL)); - if (!nbcells) { + prop = of_get_property(np, "#dma-cells", NULL); + if (prop) + nbcells = be32_to_cpup(prop); + + if (!prop || !nbcells) { pr_err("%s: #dma-cells property is missing or invalid\n", __func__); kfree(ofdma); @@ -115,12 +91,11 @@ int of_dma_controller_register(struct device_node *np, ofdma->of_dma_nbcells = nbcells; ofdma->of_dma_xlate = of_dma_xlate; ofdma->of_dma_data = data; - ofdma->use_count = 0; /* Now queue of_dma controller structure in list */ - spin_lock(&of_dma_lock); + mutex_lock(&of_dma_lock); list_add_tail(&ofdma->of_dma_controllers, &of_dma_list); - spin_unlock(&of_dma_lock); + mutex_unlock(&of_dma_lock); return 0; } @@ -132,32 +107,20 @@ EXPORT_SYMBOL_GPL(of_dma_controller_register); * * Memory allocated by of_dma_controller_register() is freed here. */ -int of_dma_controller_free(struct device_node *np) +void of_dma_controller_free(struct device_node *np) { struct of_dma *ofdma; - spin_lock(&of_dma_lock); - - if (list_empty(&of_dma_list)) { - spin_unlock(&of_dma_lock); - return -ENODEV; - } + mutex_lock(&of_dma_lock); list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) if (ofdma->of_node == np) { - if (ofdma->use_count) { - spin_unlock(&of_dma_lock); - return -EBUSY; - } - list_del(&ofdma->of_dma_controllers); - spin_unlock(&of_dma_lock); kfree(ofdma); - return 0; + break; } - spin_unlock(&of_dma_lock); - return -ENODEV; + mutex_unlock(&of_dma_lock); } EXPORT_SYMBOL_GPL(of_dma_controller_free); @@ -172,8 +135,8 @@ EXPORT_SYMBOL_GPL(of_dma_controller_free); * specifiers, matches the name provided. Returns 0 if the name matches and * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV. */ -static int of_dma_match_channel(struct device_node *np, char *name, int index, - struct of_phandle_args *dma_spec) +static int of_dma_match_channel(struct device_node *np, const char *name, + int index, struct of_phandle_args *dma_spec) { const char *s; @@ -198,7 +161,7 @@ static int of_dma_match_channel(struct device_node *np, char *name, int index, * Returns pointer to appropriate dma channel on success or NULL on error. */ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, - char *name) + const char *name) { struct of_phandle_args dma_spec; struct of_dma *ofdma; @@ -220,14 +183,15 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, if (of_dma_match_channel(np, name, i, &dma_spec)) continue; - ofdma = of_dma_get_controller(&dma_spec); - - if (!ofdma) - continue; + mutex_lock(&of_dma_lock); + ofdma = of_dma_find_controller(&dma_spec); - chan = ofdma->of_dma_xlate(&dma_spec, ofdma); + if (ofdma) + chan = ofdma->of_dma_xlate(&dma_spec, ofdma); + else + chan = NULL; - of_dma_put_controller(ofdma); + mutex_unlock(&of_dma_lock); of_node_put(dma_spec.np); diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 08b43bf..ec3fc4f 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include "virt-dma.h" @@ -67,6 +69,10 @@ static const unsigned es_bytes[] = { [OMAP_DMA_DATA_TYPE_S32] = 4, }; +static struct of_dma_filter_info omap_dma_info = { + .filter_fn = omap_dma_filter_fn, +}; + static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) { return container_of(d, struct omap_dmadev, ddev); @@ -629,8 +635,22 @@ static int omap_dma_probe(struct platform_device *pdev) pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", rc); omap_dma_free(od); - } else { - platform_set_drvdata(pdev, od); + return rc; + } + + platform_set_drvdata(pdev, od); + + if (pdev->dev.of_node) { + omap_dma_info.dma_cap = od->ddev.cap_mask; + + /* Device-tree DMA controller registration */ + rc = of_dma_controller_register(pdev->dev.of_node, + of_dma_simple_xlate, &omap_dma_info); + if (rc) { + pr_warn("OMAP-DMA: failed to register DMA controller\n"); + dma_async_device_unregister(&od->ddev); + omap_dma_free(od); + } } dev_info(&pdev->dev, "OMAP DMA engine driver\n"); @@ -642,18 +662,32 @@ static int omap_dma_remove(struct platform_device *pdev) { struct omap_dmadev *od = platform_get_drvdata(pdev); + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&od->ddev); omap_dma_free(od); return 0; } +static const struct of_device_id omap_dma_match[] = { + { .compatible = "ti,omap2420-sdma", }, + { .compatible = "ti,omap2430-sdma", }, + { .compatible = "ti,omap3430-sdma", }, + { .compatible = "ti,omap3630-sdma", }, + { .compatible = "ti,omap4430-sdma", }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_dma_match); + static struct platform_driver omap_dma_driver = { .probe = omap_dma_probe, .remove = omap_dma_remove, .driver = { .name = "omap-dma-engine", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(omap_dma_match), }, }; diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index d01faeb..ce3dc3e 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -476,7 +476,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); if (!ret) { - ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); + ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC); if (ret) { spin_lock(&pd_chan->lock); pd_chan->descs_allocated++; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 5dbc594..a17553f 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "dmaengine.h" #define PL330_MAX_CHAN 8 @@ -2288,13 +2289,12 @@ static inline void fill_queue(struct dma_pl330_chan *pch) /* If already submitted */ if (desc->status == BUSY) - break; + continue; ret = pl330_submit_req(pch->pl330_chid, &desc->req); if (!ret) { desc->status = BUSY; - break; } else if (ret == -EAGAIN) { /* QFull or DMAC Dying */ break; @@ -2904,9 +2904,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; res = &adev->res; - pi->base = devm_request_and_ioremap(&adev->dev, res); - if (!pi->base) - return -ENXIO; + pi->base = devm_ioremap_resource(&adev->dev, res); + if (IS_ERR(pi->base)) + return PTR_ERR(pi->base); amba_set_drvdata(adev, pdmac); diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig new file mode 100644 index 0000000..5c1dee2 --- /dev/null +++ b/drivers/dma/sh/Kconfig @@ -0,0 +1,24 @@ +# +# DMA engine configuration for sh +# + +config SH_DMAE_BASE + bool "Renesas SuperH DMA Engine support" + depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) + depends on !SH_DMA_API + default y + select DMA_ENGINE + help + Enable support for the Renesas SuperH DMA controllers. + +config SH_DMAE + tristate "Renesas SuperH DMAC support" + depends on SH_DMAE_BASE + help + Enable support for the Renesas SuperH DMA controllers. + +config SUDMAC + tristate "Renesas SUDMAC support" + depends on SH_DMAE_BASE + help + Enable support for the Renesas SUDMAC controllers. diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 54ae957..c07ca46 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile @@ -1,2 +1,3 @@ -obj-$(CONFIG_SH_DMAE) += shdma-base.o +obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o obj-$(CONFIG_SH_DMAE) += shdma.o +obj-$(CONFIG_SUDMAC) += sudmac.o diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c new file mode 100644 index 0000000..e7c94bb --- /dev/null +++ b/drivers/dma/sh/sudmac.c @@ -0,0 +1,428 @@ +/* + * Renesas SUDMAC support + * + * Copyright (C) 2013 Renesas Solutions Corp. + * + * based on drivers/dma/sh/shdma.c: + * Copyright (C) 2011-2012 Guennadi Liakhovetski + * Copyright (C) 2009 Nobuhiro Iwamatsu + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +struct sudmac_chan { + struct shdma_chan shdma_chan; + void __iomem *base; + char dev_id[16]; /* unique name per DMAC of channel */ + + u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */ + u32 cfg; + u32 dint_end_bit; +}; + +struct sudmac_device { + struct shdma_dev shdma_dev; + struct sudmac_pdata *pdata; + void __iomem *chan_reg; +}; + +struct sudmac_regs { + u32 base_addr; + u32 base_byte_count; +}; + +struct sudmac_desc { + struct sudmac_regs hw; + struct shdma_desc shdma_desc; +}; + +#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan) +#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc) +#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \ + struct sudmac_device, shdma_dev.dma_dev) + +/* SUDMAC register */ +#define SUDMAC_CH0CFG 0x00 +#define SUDMAC_CH0BA 0x10 +#define SUDMAC_CH0BBC 0x18 +#define SUDMAC_CH0CA 0x20 +#define SUDMAC_CH0CBC 0x28 +#define SUDMAC_CH0DEN 0x30 +#define SUDMAC_DSTSCLR 0x38 +#define SUDMAC_DBUFCTRL 0x3C +#define SUDMAC_DINTCTRL 0x40 +#define SUDMAC_DINTSTS 0x44 +#define SUDMAC_DINTSTSCLR 0x48 +#define SUDMAC_CH0SHCTRL 0x50 + +/* Definitions for the sudmac_channel.config */ +#define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */ +#define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */ +#define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */ + +/* Definitions for the sudmac_channel.dint_end_bit */ +#define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */ +#define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */ + +#define SUDMAC_DRV_NAME "sudmac" + +static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg) +{ + iowrite32(data, sc->base + reg); +} + +static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg) +{ + return ioread32(sc->base + reg); +} + +static bool sudmac_is_busy(struct sudmac_chan *sc) +{ + u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset); + + if (den) + return true; /* working */ + + return false; /* waiting */ +} + +static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw, + struct shdma_desc *sdesc) +{ + sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset); + sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset); + sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset); +} + +static void sudmac_start(struct sudmac_chan *sc) +{ + u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); + + sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL); + sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset); +} + +static void sudmac_start_xfer(struct shdma_chan *schan, + struct shdma_desc *sdesc) +{ + struct sudmac_chan *sc = to_chan(schan); + struct sudmac_desc *sd = to_desc(sdesc); + + sudmac_set_reg(sc, &sd->hw, sdesc); + sudmac_start(sc); +} + +static bool sudmac_channel_busy(struct shdma_chan *schan) +{ + struct sudmac_chan *sc = to_chan(schan); + + return sudmac_is_busy(sc); +} + +static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id) +{ +} + +static const struct sudmac_slave_config *sudmac_find_slave( + struct sudmac_chan *sc, int slave_id) +{ + struct sudmac_device *sdev = to_sdev(sc); + struct sudmac_pdata *pdata = sdev->pdata; + const struct sudmac_slave_config *cfg; + int i; + + for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) + if (cfg->slave_id == slave_id) + return cfg; + + return NULL; +} + +static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try) +{ + struct sudmac_chan *sc = to_chan(schan); + const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); + + if (!cfg) + return -ENODEV; + + return 0; +} + +static inline void sudmac_dma_halt(struct sudmac_chan *sc) +{ + u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); + + sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset); + sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL); + sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR); +} + +static int sudmac_desc_setup(struct shdma_chan *schan, + struct shdma_desc *sdesc, + dma_addr_t src, dma_addr_t dst, size_t *len) +{ + struct sudmac_chan *sc = to_chan(schan); + struct sudmac_desc *sd = to_desc(sdesc); + + dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n", + __func__, src, dst, *len); + + if (*len > schan->max_xfer_len) + *len = schan->max_xfer_len; + + if (dst) + sd->hw.base_addr = dst; + else if (src) + sd->hw.base_addr = src; + sd->hw.base_byte_count = *len; + + return 0; +} + +static void sudmac_halt(struct shdma_chan *schan) +{ + struct sudmac_chan *sc = to_chan(schan); + + sudmac_dma_halt(sc); +} + +static bool sudmac_chan_irq(struct shdma_chan *schan, int irq) +{ + struct sudmac_chan *sc = to_chan(schan); + u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS); + + if (!(dintsts & sc->dint_end_bit)) + return false; + + /* DMA stop */ + sudmac_dma_halt(sc); + + return true; +} + +static size_t sudmac_get_partial(struct shdma_chan *schan, + struct shdma_desc *sdesc) +{ + struct sudmac_chan *sc = to_chan(schan); + struct sudmac_desc *sd = to_desc(sdesc); + u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset); + + return sd->hw.base_byte_count - current_byte_count; +} + +static bool sudmac_desc_completed(struct shdma_chan *schan, + struct shdma_desc *sdesc) +{ + struct sudmac_chan *sc = to_chan(schan); + struct sudmac_desc *sd = to_desc(sdesc); + u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset); + + return sd->hw.base_addr + sd->hw.base_byte_count == current_addr; +} + +static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, + unsigned long flags) +{ + struct shdma_dev *sdev = &su_dev->shdma_dev; + struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); + struct sudmac_chan *sc; + struct shdma_chan *schan; + int err; + + sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); + if (!sc) { + dev_err(sdev->dma_dev.dev, + "No free memory for allocating dma channels!\n"); + return -ENOMEM; + } + + schan = &sc->shdma_chan; + schan->max_xfer_len = 64 * 1024 * 1024 - 1; + + shdma_chan_probe(sdev, schan, id); + + sc->base = su_dev->chan_reg; + + /* get platform_data */ + sc->offset = su_dev->pdata->channel->offset; + if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE) + sc->cfg |= SUDMAC_SENDBUFM; + if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE) + sc->cfg |= SUDMAC_RCVENDM; + sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT; + + if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0) + sc->dint_end_bit |= SUDMAC_CH0ENDE; + if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1) + sc->dint_end_bit |= SUDMAC_CH1ENDE; + + /* set up channel irq */ + if (pdev->id >= 0) + snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d", + pdev->id, id); + else + snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id); + + err = shdma_request_irq(schan, irq, flags, sc->dev_id); + if (err) { + dev_err(sdev->dma_dev.dev, + "DMA channel %d request_irq failed %d\n", id, err); + goto err_no_irq; + } + + return 0; + +err_no_irq: + /* remove from dmaengine device node */ + shdma_chan_remove(schan); + return err; +} + +static void sudmac_chan_remove(struct sudmac_device *su_dev) +{ + struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; + struct shdma_chan *schan; + int i; + + shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { + struct sudmac_chan *sc = to_chan(schan); + + BUG_ON(!schan); + + shdma_free_irq(&sc->shdma_chan); + shdma_chan_remove(schan); + } + dma_dev->chancnt = 0; +} + +static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) +{ + /* SUDMAC doesn't need the address */ + return 0; +} + +static struct shdma_desc *sudmac_embedded_desc(void *buf, int i) +{ + return &((struct sudmac_desc *)buf)[i].shdma_desc; +} + +static const struct shdma_ops sudmac_shdma_ops = { + .desc_completed = sudmac_desc_completed, + .halt_channel = sudmac_halt, + .channel_busy = sudmac_channel_busy, + .slave_addr = sudmac_slave_addr, + .desc_setup = sudmac_desc_setup, + .set_slave = sudmac_set_slave, + .setup_xfer = sudmac_setup_xfer, + .start_xfer = sudmac_start_xfer, + .embedded_desc = sudmac_embedded_desc, + .chan_irq = sudmac_chan_irq, + .get_partial = sudmac_get_partial, +}; + +static int sudmac_probe(struct platform_device *pdev) +{ + struct sudmac_pdata *pdata = pdev->dev.platform_data; + int err, i; + struct sudmac_device *su_dev; + struct dma_device *dma_dev; + struct resource *chan, *irq_res; + + /* get platform data */ + if (!pdata) + return -ENODEV; + + chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!chan || !irq_res) + return -ENODEV; + + err = -ENOMEM; + su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), + GFP_KERNEL); + if (!su_dev) { + dev_err(&pdev->dev, "Not enough memory\n"); + return err; + } + + dma_dev = &su_dev->shdma_dev.dma_dev; + + su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan); + if (!su_dev->chan_reg) + return err; + + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + + su_dev->shdma_dev.ops = &sudmac_shdma_ops; + su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc); + err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num); + if (err < 0) + return err; + + /* platform data */ + su_dev->pdata = pdev->dev.platform_data; + + platform_set_drvdata(pdev, su_dev); + + /* Create DMA Channel */ + for (i = 0; i < pdata->channel_num; i++) { + err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED); + if (err) + goto chan_probe_err; + } + + err = dma_async_device_register(&su_dev->shdma_dev.dma_dev); + if (err < 0) + goto chan_probe_err; + + return err; + +chan_probe_err: + sudmac_chan_remove(su_dev); + + platform_set_drvdata(pdev, NULL); + shdma_cleanup(&su_dev->shdma_dev); + + return err; +} + +static int sudmac_remove(struct platform_device *pdev) +{ + struct sudmac_device *su_dev = platform_get_drvdata(pdev); + struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; + + dma_async_device_unregister(dma_dev); + sudmac_chan_remove(su_dev); + shdma_cleanup(&su_dev->shdma_dev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver sudmac_driver = { + .driver = { + .owner = THIS_MODULE, + .name = SUDMAC_DRV_NAME, + }, + .probe = sudmac_probe, + .remove = sudmac_remove, +}; +module_platform_driver(sudmac_driver); + +MODULE_AUTHOR("Yoshihiro Shimoda"); +MODULE_DESCRIPTION("Renesas SUDMAC driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" SUDMAC_DRV_NAME); diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 1d627e2..1765a0a 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include "dmaengine.h" @@ -78,6 +79,7 @@ struct sirfsoc_dma { struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; void __iomem *base; int irq; + struct clk *clk; bool is_marco; }; @@ -639,6 +641,12 @@ static int sirfsoc_dma_probe(struct platform_device *op) return -EINVAL; } + sdma->clk = devm_clk_get(dev, NULL); + if (IS_ERR(sdma->clk)) { + dev_err(dev, "failed to get a clock.\n"); + return PTR_ERR(sdma->clk); + } + ret = of_address_to_resource(dn, 0, &res); if (ret) { dev_err(dev, "Error parsing memory region!\n"); @@ -698,6 +706,8 @@ static int sirfsoc_dma_probe(struct platform_device *op) tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); + clk_prepare_enable(sdma->clk); + /* Register DMA engine */ dev_set_drvdata(dev, sdma); ret = dma_async_device_register(dma); @@ -720,6 +730,7 @@ static int sirfsoc_dma_remove(struct platform_device *op) struct device *dev = &op->dev; struct sirfsoc_dma *sdma = dev_get_drvdata(dev); + clk_disable_unprepare(sdma->clk); dma_async_device_unregister(&sdma->dma); free_irq(sdma->irq, sdma); irq_dispose_mapping(sdma->irq); @@ -742,7 +753,18 @@ static struct platform_driver sirfsoc_dma_driver = { }, }; -module_platform_driver(sirfsoc_dma_driver); +static __init int sirfsoc_dma_init(void) +{ + return platform_driver_register(&sirfsoc_dma_driver); +} + +static void __exit sirfsoc_dma_exit(void) +{ + platform_driver_unregister(&sirfsoc_dma_driver); +} + +subsys_initcall(sirfsoc_dma_init); +module_exit(sirfsoc_dma_exit); MODULE_AUTHOR("Rongjun Ying , " "Barry Song "); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index fcee27e..ce19340 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -199,6 +200,7 @@ struct tegra_dma_channel { /* Channel-slave specific configuration */ struct dma_slave_config dma_sconfig; + struct tegra_dma_channel_regs channel_reg; }; /* tegra_dma: Tegra DMA specific information */ @@ -1213,7 +1215,6 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = { .support_channel_pause = false, }; -#if defined(CONFIG_OF) /* Tegra30 specific DMA controller information */ static const struct tegra_dma_chip_data tegra30_dma_chip_data = { .nr_channels = 32, @@ -1243,7 +1244,6 @@ static const struct of_device_id tegra_dma_of_match[] = { }, }; MODULE_DEVICE_TABLE(of, tegra_dma_of_match); -#endif static int tegra_dma_probe(struct platform_device *pdev) { @@ -1252,20 +1252,14 @@ static int tegra_dma_probe(struct platform_device *pdev) int ret; int i; const struct tegra_dma_chip_data *cdata = NULL; + const struct of_device_id *match; - if (pdev->dev.of_node) { - const struct of_device_id *match; - match = of_match_device(of_match_ptr(tegra_dma_of_match), - &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "Error: No device match found\n"); - return -ENODEV; - } - cdata = match->data; - } else { - /* If no device tree then fallback to tegra20 */ - cdata = &tegra20_dma_chip_data; + match = of_match_device(tegra_dma_of_match, &pdev->dev); + if (!match) { + dev_err(&pdev->dev, "Error: No device match found\n"); + return -ENODEV; } + cdata = match->data; tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * sizeof(struct tegra_dma_channel), GFP_KERNEL); @@ -1448,11 +1442,74 @@ static int tegra_dma_runtime_resume(struct device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int tegra_dma_pm_suspend(struct device *dev) +{ + struct tegra_dma *tdma = dev_get_drvdata(dev); + int i; + int ret; + + /* Enable clock before accessing register */ + ret = tegra_dma_runtime_resume(dev); + if (ret < 0) + return ret; + + tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL); + for (i = 0; i < tdma->chip_data->nr_channels; i++) { + struct tegra_dma_channel *tdc = &tdma->channels[i]; + struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; + + ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); + ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR); + ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR); + ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ); + ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ); + } + + /* Disable clock */ + tegra_dma_runtime_suspend(dev); + return 0; +} + +static int tegra_dma_pm_resume(struct device *dev) +{ + struct tegra_dma *tdma = dev_get_drvdata(dev); + int i; + int ret; + + /* Enable clock before accessing register */ + ret = tegra_dma_runtime_resume(dev); + if (ret < 0) + return ret; + + tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen); + tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); + tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); + + for (i = 0; i < tdma->chip_data->nr_channels; i++) { + struct tegra_dma_channel *tdc = &tdma->channels[i]; + struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; + + tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq); + tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr); + tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq); + tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr); + tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, + (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB)); + } + + /* Disable clock */ + tegra_dma_runtime_suspend(dev); + return 0; +} +#endif + static const struct dev_pm_ops tegra_dma_dev_pm_ops = { #ifdef CONFIG_PM_RUNTIME .runtime_suspend = tegra_dma_runtime_suspend, .runtime_resume = tegra_dma_runtime_resume, #endif + SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) }; static struct platform_driver tegra_dmac_driver = { @@ -1460,7 +1517,7 @@ static struct platform_driver tegra_dmac_driver = { .name = "tegra-apbdma", .owner = THIS_MODULE, .pm = &tegra_dma_dev_pm_ops, - .of_match_table = of_match_ptr(tegra_dma_of_match), + .of_match_table = tegra_dma_of_match, }, .probe = tegra_dma_probe, .remove = tegra_dma_remove, diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 952f823..26107ba 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -823,7 +823,7 @@ static struct platform_driver td_driver = { .owner = THIS_MODULE, }, .probe = td_probe, - .remove = __exit_p(td_remove), + .remove = td_remove, }; module_platform_driver(td_driver); diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 913f55c..a59fb48 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -1190,7 +1190,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) return 0; } -static int __exit txx9dmac_chan_remove(struct platform_device *pdev) +static int txx9dmac_chan_remove(struct platform_device *pdev) { struct txx9dmac_chan *dc = platform_get_drvdata(pdev); @@ -1252,7 +1252,7 @@ static int __init txx9dmac_probe(struct platform_device *pdev) return 0; } -static int __exit txx9dmac_remove(struct platform_device *pdev) +static int txx9dmac_remove(struct platform_device *pdev) { struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); @@ -1299,14 +1299,14 @@ static const struct dev_pm_ops txx9dmac_dev_pm_ops = { }; static struct platform_driver txx9dmac_chan_driver = { - .remove = __exit_p(txx9dmac_chan_remove), + .remove = txx9dmac_chan_remove, .driver = { .name = "txx9dmac-chan", }, }; static struct platform_driver txx9dmac_driver = { - .remove = __exit_p(txx9dmac_remove), + .remove = txx9dmac_remove, .shutdown = txx9dmac_shutdown, .driver = { .name = "txx9dmac", diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 5899a76..67610a6 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -87,7 +87,7 @@ static struct device *mci_pdev; /* * various constants for Memory Controllers */ -static const char *mem_types[] = { +static const char * const mem_types[] = { [MEM_EMPTY] = "Empty", [MEM_RESERVED] = "Reserved", [MEM_UNKNOWN] = "Unknown", @@ -107,7 +107,7 @@ static const char *mem_types[] = { [MEM_RDDR3] = "Registered-DDR3" }; -static const char *dev_types[] = { +static const char * const dev_types[] = { [DEV_UNKNOWN] = "Unknown", [DEV_X1] = "x1", [DEV_X2] = "x2", @@ -118,7 +118,7 @@ static const char *dev_types[] = { [DEV_X64] = "x64" }; -static const char *edac_caps[] = { +static const char * const edac_caps[] = { [EDAC_UNKNOWN] = "Unknown", [EDAC_NONE] = "None", [EDAC_RESERVED] = "Reserved", @@ -327,17 +327,17 @@ static struct device_attribute *dynamic_csrow_dimm_attr[] = { }; /* possible dynamic channel ce_count attribute files */ -DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch0_ce_count, S_IRUGO, channel_ce_count_show, NULL, 0); -DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch1_ce_count, S_IRUGO, channel_ce_count_show, NULL, 1); -DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch2_ce_count, S_IRUGO, channel_ce_count_show, NULL, 2); -DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch3_ce_count, S_IRUGO, channel_ce_count_show, NULL, 3); -DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch4_ce_count, S_IRUGO, channel_ce_count_show, NULL, 4); -DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch5_ce_count, S_IRUGO, channel_ce_count_show, NULL, 5); /* Total possible dynamic ce_count attribute file table */ diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 5168a13..3297301 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -16,7 +16,7 @@ comment "Extcon Device Drivers" config EXTCON_GPIO tristate "GPIO extcon support" - depends on GENERIC_GPIO + depends on GPIOLIB help Say Y here to enable GPIO based extcon support. Note that GPIO extcon supports single state per extcon instance. diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 27ac423..7ef316f 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -389,10 +389,8 @@ static void queue_bus_reset_event(struct client *client) struct bus_reset_event *e; e = kzalloc(sizeof(*e), GFP_KERNEL); - if (e == NULL) { - fw_notice(client->device->card, "out of memory when allocating event\n"); + if (e == NULL) return; - } fill_bus_reset_event(&e->reset, client); @@ -693,10 +691,9 @@ static void handle_request(struct fw_card *card, struct fw_request *request, r = kmalloc(sizeof(*r), GFP_ATOMIC); e = kmalloc(sizeof(*e), GFP_ATOMIC); - if (r == NULL || e == NULL) { - fw_notice(card, "out of memory when allocating event\n"); + if (r == NULL || e == NULL) goto failed; - } + r->card = card; r->request = request; r->data = payload; @@ -930,10 +927,9 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, struct iso_interrupt_event *e; e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); - if (e == NULL) { - fw_notice(context->card, "out of memory when allocating event\n"); + if (e == NULL) return; - } + e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; e->interrupt.closure = client->iso_closure; e->interrupt.cycle = cycle; @@ -950,10 +946,9 @@ static void iso_mc_callback(struct fw_iso_context *context, struct iso_interrupt_mc_event *e; e = kmalloc(sizeof(*e), GFP_ATOMIC); - if (e == NULL) { - fw_notice(context->card, "out of memory when allocating event\n"); + if (e == NULL) return; - } + e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; e->interrupt.closure = client->iso_closure; e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, @@ -1366,8 +1361,7 @@ static int init_iso_resource(struct client *client, int ret; if ((request->channels == 0 && request->bandwidth == 0) || - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || - request->bandwidth < 0) + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) return -EINVAL; r = kmalloc(sizeof(*r), GFP_KERNEL); @@ -1582,10 +1576,9 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); - if (e == NULL) { - fw_notice(card, "out of memory when allocating event\n"); + if (e == NULL) break; - } + e->phy_packet.closure = client->phy_receiver_closure; e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; e->phy_packet.rcode = RCODE_COMPLETE; diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 03ce7d9..664a6ff 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -692,10 +692,8 @@ static void create_units(struct fw_device *device) * match the drivers id_tables against it. */ unit = kzalloc(sizeof(*unit), GFP_KERNEL); - if (unit == NULL) { - fw_err(device->card, "out of memory for unit\n"); + if (unit == NULL) continue; - } unit->directory = ci.p + value - 1; unit->device.bus = &fw_bus_type; diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 4d56536..815b0fc 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -356,10 +356,8 @@ static struct fwnet_fragment_info *fwnet_frag_new( } new = kmalloc(sizeof(*new), GFP_ATOMIC); - if (!new) { - dev_err(&pd->skb->dev->dev, "out of memory\n"); + if (!new) return NULL; - } new->offset = offset; new->len = len; @@ -402,8 +400,6 @@ fail_w_fi: fail_w_new: kfree(new); fail: - dev_err(&net->dev, "out of memory\n"); - return NULL; } @@ -609,7 +605,6 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net)); if (unlikely(!skb)) { - dev_err(&net->dev, "out of memory\n"); net->stats.rx_dropped++; return -ENOMEM; diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 45912e6..9e1db64 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -54,6 +54,10 @@ #include "core.h" #include "ohci.h" +#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args) +#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args) +#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args) + #define DESCRIPTOR_OUTPUT_MORE 0 #define DESCRIPTOR_OUTPUT_LAST (1 << 12) #define DESCRIPTOR_INPUT_MORE (2 << 12) @@ -68,6 +72,8 @@ #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) #define DESCRIPTOR_WAIT (3 << 0) +#define DESCRIPTOR_CMD (0xf << 12) + struct descriptor { __le16 req_count; __le16 control; @@ -149,10 +155,11 @@ struct context { struct descriptor *last; /* - * The last descriptor in the DMA program. It contains the branch + * The last descriptor block in the DMA program. It contains the branch * address that must be updated upon appending a new descriptor. */ struct descriptor *prev; + int prev_z; descriptor_callback_t callback; @@ -270,7 +277,9 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025 +#define PCI_DEVICE_ID_VIA_VT630X 0x3044 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd +#define PCI_REV_ID_VIA_VT6306 0x46 #define QUIRK_CYCLE_TIMER 1 #define QUIRK_RESET_PACKET 2 @@ -278,6 +287,8 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define QUIRK_NO_1394A 8 #define QUIRK_NO_MSI 16 #define QUIRK_TI_SLLZ059 32 +#define QUIRK_IR_WAKE 64 +#define QUIRK_PHY_LCTRL_TIMEOUT 128 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { @@ -290,7 +301,10 @@ static const struct { QUIRK_BE_HEADERS}, {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, - QUIRK_NO_MSI}, + QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI}, + + {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_PHY_LCTRL_TIMEOUT}, {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, QUIRK_RESET_PACKET}, @@ -319,6 +333,9 @@ static const struct { {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, QUIRK_RESET_PACKET}, + {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306, + QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, + {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, }; @@ -333,6 +350,8 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) ", disable MSI = " __stringify(QUIRK_NO_MSI) ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) + ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) + ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT) ")"); #define OHCI_PARAM_DEBUG_AT_AR 1 @@ -359,8 +378,7 @@ static void log_irqs(struct fw_ohci *ohci, u32 evt) !(evt & OHCI1394_busReset)) return; - dev_notice(ohci->card.device, - "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, + ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, evt & OHCI1394_selfIDComplete ? " selfID" : "", evt & OHCI1394_RQPkt ? " AR_req" : "", evt & OHCI1394_RSPkt ? " AR_resp" : "", @@ -406,21 +424,19 @@ static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count) if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) return; - dev_notice(ohci->card.device, - "%d selfIDs, generation %d, local node ID %04x\n", - self_id_count, generation, ohci->node_id); + ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n", + self_id_count, generation, ohci->node_id); for (s = ohci->self_id_buffer; self_id_count--; ++s) if ((*s & 1 << 23) == 0) - dev_notice(ohci->card.device, - "selfID 0: %08x, phy %d [%c%c%c] " - "%s gc=%d %s %s%s%s\n", + ohci_notice(ohci, + "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n", *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), speed[*s >> 14 & 3], *s >> 16 & 63, power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); else - dev_notice(ohci->card.device, + ohci_notice(ohci, "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", *s, *s >> 24 & 63, _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), @@ -470,9 +486,8 @@ static void log_ar_at_event(struct fw_ohci *ohci, evt = 0x1f; if (evt == OHCI1394_evt_bus_reset) { - dev_notice(ohci->card.device, - "A%c evt_bus_reset, generation %d\n", - dir, (header[2] >> 16) & 0xff); + ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n", + dir, (header[2] >> 16) & 0xff); return; } @@ -491,32 +506,26 @@ static void log_ar_at_event(struct fw_ohci *ohci, switch (tcode) { case 0xa: - dev_notice(ohci->card.device, - "A%c %s, %s\n", - dir, evts[evt], tcodes[tcode]); + ohci_notice(ohci, "A%c %s, %s\n", + dir, evts[evt], tcodes[tcode]); break; case 0xe: - dev_notice(ohci->card.device, - "A%c %s, PHY %08x %08x\n", - dir, evts[evt], header[1], header[2]); + ohci_notice(ohci, "A%c %s, PHY %08x %08x\n", + dir, evts[evt], header[1], header[2]); break; case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: - dev_notice(ohci->card.device, - "A%c spd %x tl %02x, " - "%04x -> %04x, %s, " - "%s, %04x%08x%s\n", - dir, speed, header[0] >> 10 & 0x3f, - header[1] >> 16, header[0] >> 16, evts[evt], - tcodes[tcode], header[1] & 0xffff, header[2], specific); + ohci_notice(ohci, + "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n", + dir, speed, header[0] >> 10 & 0x3f, + header[1] >> 16, header[0] >> 16, evts[evt], + tcodes[tcode], header[1] & 0xffff, header[2], specific); break; default: - dev_notice(ohci->card.device, - "A%c spd %x tl %02x, " - "%04x -> %04x, %s, " - "%s%s\n", - dir, speed, header[0] >> 10 & 0x3f, - header[1] >> 16, header[0] >> 16, evts[evt], - tcodes[tcode], specific); + ohci_notice(ohci, + "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", + dir, speed, header[0] >> 10 & 0x3f, + header[1] >> 16, header[0] >> 16, evts[evt], + tcodes[tcode], specific); } } @@ -563,7 +572,8 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr) if (i >= 3) msleep(1); } - dev_err(ohci->card.device, "failed to read phy reg\n"); + ohci_err(ohci, "failed to read phy reg %d\n", addr); + dump_stack(); return -EBUSY; } @@ -585,7 +595,8 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) if (i >= 3) msleep(1); } - dev_err(ohci->card.device, "failed to write phy reg\n"); + ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val); + dump_stack(); return -EBUSY; } @@ -690,8 +701,7 @@ static void ar_context_abort(struct ar_context *ctx, const char *error_msg) reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); flush_writes(ohci); - dev_err(ohci->card.device, "AR error: %s; DMA stopped\n", - error_msg); + ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg); } /* FIXME: restart? */ } @@ -1157,6 +1167,7 @@ static int context_init(struct context *ctx, struct fw_ohci *ohci, ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); ctx->last = ctx->buffer_tail->buffer; ctx->prev = ctx->buffer_tail->buffer; + ctx->prev_z = 1; return 0; } @@ -1221,14 +1232,35 @@ static void context_append(struct context *ctx, { dma_addr_t d_bus; struct descriptor_buffer *desc = ctx->buffer_tail; + struct descriptor *d_branch; d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); desc->used += (z + extra) * sizeof(*d); wmb(); /* finish init of new descriptors before branch_address update */ - ctx->prev->branch_address = cpu_to_le32(d_bus | z); - ctx->prev = find_branch_descriptor(d, z); + + d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); + d_branch->branch_address = cpu_to_le32(d_bus | z); + + /* + * VT6306 incorrectly checks only the single descriptor at the + * CommandPtr when the wake bit is written, so if it's a + * multi-descriptor block starting with an INPUT_MORE, put a copy of + * the branch address in the first descriptor. + * + * Not doing this for transmit contexts since not sure how it interacts + * with skip addresses. + */ + if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && + d_branch != ctx->prev && + (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == + cpu_to_le16(DESCRIPTOR_INPUT_MORE)) { + ctx->prev->branch_address = cpu_to_le32(d_bus | z); + } + + ctx->prev = d; + ctx->prev_z = z; } static void context_stop(struct context *ctx) @@ -1248,7 +1280,7 @@ static void context_stop(struct context *ctx) if (i) udelay(10); } - dev_err(ohci->card.device, "DMA context still active (0x%08x)\n", reg); + ohci_err(ohci, "DMA context still active (0x%08x)\n", reg); } struct driver_data { @@ -1557,7 +1589,7 @@ static void handle_local_lock(struct fw_ohci *ohci, goto out; } - dev_err(ohci->card.device, "swap not done (CSR lock timeout)\n"); + ohci_err(ohci, "swap not done (CSR lock timeout)\n"); fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); out: @@ -1632,8 +1664,7 @@ static void detect_dead_context(struct fw_ohci *ohci, ctl = reg_read(ohci, CONTROL_SET(regs)); if (ctl & CONTEXT_DEAD) - dev_err(ohci->card.device, - "DMA context %s has stopped, error code: %s\n", + ohci_err(ohci, "DMA context %s has stopped, error code: %s\n", name, evts[ctl & 0x1f]); } @@ -1815,8 +1846,8 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) reg = reg_read(ohci, OHCI1394_NodeID); if (!(reg & OHCI1394_NodeID_idValid)) { - dev_notice(ohci->card.device, - "node ID not valid, new bus reset in progress\n"); + ohci_notice(ohci, + "node ID not valid, new bus reset in progress\n"); return -EBUSY; } self_id |= ((reg & 0x3f) << 24); /* phy ID */ @@ -1863,12 +1894,12 @@ static void bus_reset_work(struct work_struct *work) reg = reg_read(ohci, OHCI1394_NodeID); if (!(reg & OHCI1394_NodeID_idValid)) { - dev_notice(ohci->card.device, - "node ID not valid, new bus reset in progress\n"); + ohci_notice(ohci, + "node ID not valid, new bus reset in progress\n"); return; } if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { - dev_notice(ohci->card.device, "malconfigured bus\n"); + ohci_notice(ohci, "malconfigured bus\n"); return; } ohci->node_id = reg & (OHCI1394_NodeID_busNumber | @@ -1882,7 +1913,7 @@ static void bus_reset_work(struct work_struct *work) reg = reg_read(ohci, OHCI1394_SelfIDCount); if (reg & OHCI1394_SelfIDCount_selfIDError) { - dev_notice(ohci->card.device, "inconsistent self IDs\n"); + ohci_notice(ohci, "self ID receive error\n"); return; } /* @@ -1894,7 +1925,7 @@ static void bus_reset_work(struct work_struct *work) self_id_count = (reg >> 3) & 0xff; if (self_id_count > 252) { - dev_notice(ohci->card.device, "inconsistent self IDs\n"); + ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg); return; } @@ -1902,7 +1933,10 @@ static void bus_reset_work(struct work_struct *work) rmb(); for (i = 1, j = 0; j < self_id_count; i += 2, j++) { - if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) { + u32 id = cond_le32_to_cpu(ohci->self_id_cpu[i]); + u32 id2 = cond_le32_to_cpu(ohci->self_id_cpu[i + 1]); + + if (id != ~id2) { /* * If the invalid data looks like a cycle start packet, * it's likely to be the result of the cycle master @@ -1910,33 +1944,30 @@ static void bus_reset_work(struct work_struct *work) * so far are valid and should be processed so that the * bus manager can then correct the gap count. */ - if (cond_le32_to_cpu(ohci->self_id_cpu[i]) - == 0xffff008f) { - dev_notice(ohci->card.device, - "ignoring spurious self IDs\n"); + if (id == 0xffff008f) { + ohci_notice(ohci, "ignoring spurious self IDs\n"); self_id_count = j; break; - } else { - dev_notice(ohci->card.device, - "inconsistent self IDs\n"); - return; } + + ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n", + j, self_id_count, id, id2); + return; } - ohci->self_id_buffer[j] = - cond_le32_to_cpu(ohci->self_id_cpu[i]); + ohci->self_id_buffer[j] = id; } if (ohci->quirks & QUIRK_TI_SLLZ059) { self_id_count = find_and_insert_self_id(ohci, self_id_count); if (self_id_count < 0) { - dev_notice(ohci->card.device, - "could not construct local self ID\n"); + ohci_notice(ohci, + "could not construct local self ID\n"); return; } } if (self_id_count == 0) { - dev_notice(ohci->card.device, "inconsistent self IDs\n"); + ohci_notice(ohci, "no self IDs\n"); return; } rmb(); @@ -1957,8 +1988,7 @@ static void bus_reset_work(struct work_struct *work) new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; if (new_generation != generation) { - dev_notice(ohci->card.device, - "new bus reset, discarding self ids\n"); + ohci_notice(ohci, "new bus reset, discarding self ids\n"); return; } @@ -2096,7 +2126,7 @@ static irqreturn_t irq_handler(int irq, void *data) } if (unlikely(event & OHCI1394_regAccessFail)) - dev_err(ohci->card.device, "register access failure\n"); + ohci_err(ohci, "register access failure\n"); if (unlikely(event & OHCI1394_postedWriteErr)) { reg_read(ohci, OHCI1394_PostedWriteAddressHi); @@ -2104,13 +2134,12 @@ static irqreturn_t irq_handler(int irq, void *data) reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_postedWriteErr); if (printk_ratelimit()) - dev_err(ohci->card.device, "PCI posted write error\n"); + ohci_err(ohci, "PCI posted write error\n"); } if (unlikely(event & OHCI1394_cycleTooLong)) { if (printk_ratelimit()) - dev_notice(ohci->card.device, - "isochronous cycle too long\n"); + ohci_notice(ohci, "isochronous cycle too long\n"); reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_cycleMaster); } @@ -2123,8 +2152,7 @@ static irqreturn_t irq_handler(int irq, void *data) * them at least two cycles later. (FIXME?) */ if (printk_ratelimit()) - dev_notice(ohci->card.device, - "isochronous cycle inconsistent\n"); + ohci_notice(ohci, "isochronous cycle inconsistent\n"); } if (unlikely(event & OHCI1394_unrecoverableError)) @@ -2246,12 +2274,11 @@ static int ohci_enable(struct fw_card *card, const __be32 *config_rom, size_t length) { struct fw_ohci *ohci = fw_ohci(card); - struct pci_dev *dev = to_pci_dev(card->device); u32 lps, version, irqs; int i, ret; if (software_reset(ohci)) { - dev_err(card->device, "failed to reset ohci card\n"); + ohci_err(ohci, "failed to reset ohci card\n"); return -EBUSY; } @@ -2262,20 +2289,31 @@ static int ohci_enable(struct fw_card *card, * will lock up the machine. Wait 50msec to make sure we have * full link enabled. However, with some cards (well, at least * a JMicron PCIe card), we have to try again sometimes. + * + * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but + * cannot actually use the phy at that time. These need tens of + * millisecods pause between LPS write and first phy access too. + * + * But do not wait for 50msec on Agere/LSI cards. Their phy + * arbitration state machine may time out during such a long wait. */ + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS | OHCI1394_HCControl_postedWriteEnable); flush_writes(ohci); - for (lps = 0, i = 0; !lps && i < 3; i++) { + if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT)) msleep(50); + + for (lps = 0, i = 0; !lps && i < 150; i++) { + msleep(1); lps = reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS; } if (!lps) { - dev_err(card->device, "failed to set Link Power Status\n"); + ohci_err(ohci, "failed to set Link Power Status\n"); return -EIO; } @@ -2284,7 +2322,7 @@ static int ohci_enable(struct fw_card *card, if (ret < 0) return ret; if (ret) - dev_notice(card->device, "local TSB41BA3D phy\n"); + ohci_notice(ohci, "local TSB41BA3D phy\n"); else ohci->quirks &= ~QUIRK_TI_SLLZ059; } @@ -2382,24 +2420,6 @@ static int ohci_enable(struct fw_card *card, reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); - if (!(ohci->quirks & QUIRK_NO_MSI)) - pci_enable_msi(dev); - if (request_irq(dev->irq, irq_handler, - pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, - ohci_driver_name, ohci)) { - dev_err(card->device, "failed to allocate interrupt %d\n", - dev->irq); - pci_disable_msi(dev); - - if (config_rom) { - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, - ohci->next_config_rom, - ohci->next_config_rom_bus); - ohci->next_config_rom = NULL; - } - return -EIO; - } - irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | OHCI1394_RQPkt | OHCI1394_RSPkt | OHCI1394_isochTx | OHCI1394_isochRx | @@ -3578,20 +3598,20 @@ static int pci_probe(struct pci_dev *dev, if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) || pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) { - dev_err(&dev->dev, "invalid MMIO resource\n"); + ohci_err(ohci, "invalid MMIO resource\n"); err = -ENXIO; goto fail_disable; } err = pci_request_region(dev, 0, ohci_driver_name); if (err) { - dev_err(&dev->dev, "MMIO resource unavailable\n"); + ohci_err(ohci, "MMIO resource unavailable\n"); goto fail_disable; } ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); if (ohci->registers == NULL) { - dev_err(&dev->dev, "failed to remap registers\n"); + ohci_err(ohci, "failed to remap registers\n"); err = -ENXIO; goto fail_iomem; } @@ -3675,19 +3695,33 @@ static int pci_probe(struct pci_dev *dev, guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | reg_read(ohci, OHCI1394_GUIDLo); + if (!(ohci->quirks & QUIRK_NO_MSI)) + pci_enable_msi(dev); + if (request_irq(dev->irq, irq_handler, + pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, + ohci_driver_name, ohci)) { + ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq); + err = -EIO; + goto fail_msi; + } + err = fw_card_add(&ohci->card, max_receive, link_speed, guid); if (err) - goto fail_contexts; + goto fail_irq; version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; - dev_notice(&dev->dev, - "added OHCI v%x.%x device as card %d, " - "%d IR + %d IT contexts, quirks 0x%x\n", - version >> 16, version & 0xff, ohci->card.index, - ohci->n_ir, ohci->n_it, ohci->quirks); + ohci_notice(ohci, + "added OHCI v%x.%x device as card %d, " + "%d IR + %d IT contexts, quirks 0x%x\n", + version >> 16, version & 0xff, ohci->card.index, + ohci->n_ir, ohci->n_it, ohci->quirks); return 0; + fail_irq: + free_irq(dev->irq, ohci); + fail_msi: + pci_disable_msi(dev); fail_contexts: kfree(ohci->ir_context_list); kfree(ohci->it_context_list); @@ -3711,19 +3745,21 @@ static int pci_probe(struct pci_dev *dev, kfree(ohci); pmac_ohci_off(dev); fail: - if (err == -ENOMEM) - dev_err(&dev->dev, "out of memory\n"); - return err; } static void pci_remove(struct pci_dev *dev) { - struct fw_ohci *ohci; + struct fw_ohci *ohci = pci_get_drvdata(dev); - ohci = pci_get_drvdata(dev); - reg_write(ohci, OHCI1394_IntMaskClear, ~0); - flush_writes(ohci); + /* + * If the removal is happening from the suspend state, LPS won't be + * enabled and host registers (eg., IntMaskClear) won't be accessible. + */ + if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) { + reg_write(ohci, OHCI1394_IntMaskClear, ~0); + flush_writes(ohci); + } cancel_work_sync(&ohci->bus_reset_work); fw_core_remove_card(&ohci->card); @@ -3766,16 +3802,14 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state) int err; software_reset(ohci); - free_irq(dev->irq, ohci); - pci_disable_msi(dev); err = pci_save_state(dev); if (err) { - dev_err(&dev->dev, "pci_save_state failed\n"); + ohci_err(ohci, "pci_save_state failed\n"); return err; } err = pci_set_power_state(dev, pci_choose_state(dev, state)); if (err) - dev_err(&dev->dev, "pci_set_power_state failed with %d\n", err); + ohci_err(ohci, "pci_set_power_state failed with %d\n", err); pmac_ohci_off(dev); return 0; @@ -3791,7 +3825,7 @@ static int pci_resume(struct pci_dev *dev) pci_restore_state(dev); err = pci_enable_device(dev); if (err) { - dev_err(&dev->dev, "pci_enable_device failed\n"); + ohci_err(ohci, "pci_enable_device failed\n"); return err; } @@ -3837,6 +3871,4 @@ MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); MODULE_LICENSE("GPL"); /* Provide a module alias so root-on-sbp2 initrds don't break. */ -#ifndef CONFIG_IEEE1394_OHCI1394_MODULE MODULE_ALIAS("ohci1394"); -#endif diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 1162d6b..47674b9 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1144,8 +1144,8 @@ static int sbp2_probe(struct device *dev) return -ENODEV; if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) - BUG_ON(dma_set_max_seg_size(device->card->device, - SBP2_MAX_SEG_SIZE)); + WARN_ON(dma_set_max_seg_size(device->card->device, + SBP2_MAX_SEG_SIZE)); shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); if (shost == NULL) @@ -1475,10 +1475,8 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, } orb = kzalloc(sizeof(*orb), GFP_ATOMIC); - if (orb == NULL) { - dev_notice(lu_dev(lu), "failed to alloc ORB\n"); + if (orb == NULL) return SCSI_MLQUEUE_HOST_BUSY; - } /* Initialize rcode to something not RCODE_COMPLETE. */ orb->base.rcode = -1; @@ -1636,9 +1634,7 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); /* Provide a module alias so root-on-sbp2 initrds don't break. */ -#ifndef CONFIG_IEEE1394_SBP2_MODULE MODULE_ALIAS("sbp2"); -#endif static int __init sbp2_init(void) { diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index c22eed9..87d5670 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -38,7 +38,6 @@ config GPIO_DEVRES menuconfig GPIOLIB bool "GPIO Support" depends on ARCH_WANT_OPTIONAL_GPIOLIB || ARCH_REQUIRE_GPIOLIB - select GENERIC_GPIO help This enables GPIO support through the generic GPIO library. You only need to enable this, if you also want to enable diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index dda6a75..90a80eb 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c @@ -255,7 +255,7 @@ static int __get_gpo_state_p3(struct lpc32xx_gpio_chip *group, } /* - * GENERIC_GPIO primitives. + * GPIO primitives. */ static int lpc32xx_gpio_dir_input_p012(struct gpio_chip *chip, unsigned pin) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index adfee98..631736e 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -363,7 +363,7 @@ config I2C_BLACKFIN_TWI_CLK_KHZ config I2C_CBUS_GPIO tristate "CBUS I2C driver" - depends on GENERIC_GPIO + depends on GPIOLIB help Support for CBUS access using I2C API. Mostly relevant for Nokia Internet Tablets (770, N800 and N810). @@ -436,7 +436,7 @@ config I2C_EG20T config I2C_GPIO tristate "GPIO-based bitbanging I2C" - depends on GENERIC_GPIO + depends on GPIOLIB select I2C_ALGOBIT help This is a very simple bitbanging I2C driver utilizing the diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index 5faf244..f7f9865 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig @@ -7,7 +7,7 @@ menu "Multiplexer I2C Chip support" config I2C_ARB_GPIO_CHALLENGE tristate "GPIO-based I2C arbitration" - depends on GENERIC_GPIO && OF + depends on GPIOLIB && OF help If you say yes to this option, support will be included for an I2C multimaster arbitration scheme using GPIOs and a challenge & @@ -19,7 +19,7 @@ config I2C_ARB_GPIO_CHALLENGE config I2C_MUX_GPIO tristate "GPIO-based I2C multiplexer" - depends on GENERIC_GPIO + depends on GPIOLIB help If you say yes to this option, support will be included for a GPIO based I2C multiplexer. This driver provides access to diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 6a195d5..62a2c0e 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -175,7 +175,7 @@ config KEYBOARD_EP93XX config KEYBOARD_GPIO tristate "GPIO Buttons" - depends on GENERIC_GPIO + depends on GPIOLIB help This driver implements support for buttons connected to GPIO pins of various CPUs (and some other chips). @@ -190,7 +190,7 @@ config KEYBOARD_GPIO config KEYBOARD_GPIO_POLLED tristate "Polled GPIO buttons" - depends on GENERIC_GPIO + depends on GPIOLIB select INPUT_POLLDEV help This driver implements support for buttons connected @@ -241,7 +241,7 @@ config KEYBOARD_TCA8418 config KEYBOARD_MATRIX tristate "GPIO driven matrix keypad support" - depends on GENERIC_GPIO + depends on GPIOLIB select INPUT_MATRIXKMAP help Enable support for GPIO driven matrix keypad. diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index af80928..bb698e1 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -214,7 +214,7 @@ config INPUT_APANEL config INPUT_GP2A tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver" depends on I2C - depends on GENERIC_GPIO + depends on GPIOLIB help Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip hooked to an I2C bus. @@ -224,7 +224,7 @@ config INPUT_GP2A config INPUT_GPIO_TILT_POLLED tristate "Polled GPIO tilt switch" - depends on GENERIC_GPIO + depends on GPIOLIB select INPUT_POLLDEV help This driver implements support for tilt switches connected @@ -472,7 +472,7 @@ config INPUT_PWM_BEEPER config INPUT_GPIO_ROTARY_ENCODER tristate "Rotary encoders connected to GPIO pins" - depends on GPIOLIB && GENERIC_GPIO + depends on GPIOLIB help Say Y here to add support for rotary encoders connected to GPIO lines. Check file:Documentation/input/rotary-encoder.txt for more @@ -484,7 +484,7 @@ config INPUT_GPIO_ROTARY_ENCODER config INPUT_RB532_BUTTON tristate "Mikrotik Routerboard 532 button interface" depends on MIKROTIK_RB532 - depends on GPIOLIB && GENERIC_GPIO + depends on GPIOLIB select INPUT_POLLDEV help Say Y here if you want support for the S1 button built into diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index 802bd6a..effa9c5 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig @@ -295,7 +295,7 @@ config MOUSE_VSXXXAA config MOUSE_GPIO tristate "GPIO mouse" - depends on GENERIC_GPIO + depends on GPIOLIB select INPUT_POLLDEV help This driver simulates a mouse on GPIO lines of various CPUs (and some diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index d44806d..ef99229 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -173,7 +173,7 @@ config LEDS_PCA9532_GPIO config LEDS_GPIO tristate "LED Support for GPIO connected LEDs" depends on LEDS_CLASS - depends on GENERIC_GPIO + depends on GPIOLIB help This option enables support for the LEDs connected to GPIO outputs. To be useful the particular board must have LEDs @@ -362,7 +362,7 @@ config LEDS_INTEL_SS4200 config LEDS_LT3593 tristate "LED driver for LT3593 controllers" depends on LEDS_CLASS - depends on GENERIC_GPIO + depends on GPIOLIB help This option enables support for LEDs driven by a Linear Technology LT3593 controller. This controller uses a special one-wire pulse @@ -431,7 +431,7 @@ config LEDS_ASIC3 config LEDS_RENESAS_TPU bool "LED support for Renesas TPU" - depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO + depends on LEDS_CLASS=y && HAVE_CLK && GPIOLIB help This option enables build of the LED TPU platform driver, suitable to drive any TPU channel on newer Renesas SoCs. diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 557bec5..5fab4e6e 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -157,19 +157,6 @@ config MTD_BCM47XX_PARTS comment "User Modules And Translation Layers" -config MTD_CHAR - tristate "Direct char device access to MTD devices" - help - This provides a character device for each MTD device present in - the system, allowing the user to read and write directly to the - memory chips, and also use ioctl() to obtain information about - the device, or to erase parts of it. - -config HAVE_MTD_OTP - bool - help - Enable access to OTP regions using MTD_CHAR. - config MTD_BLKDEVS tristate "Common interface to block layer for MTD 'translation layers'" depends on BLOCK diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 18a38e5..4cfb31e 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile @@ -4,7 +4,7 @@ # Core functionality. obj-$(CONFIG_MTD) += mtd.o -mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o +mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o @@ -15,7 +15,6 @@ obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o # 'Users' - code which presents functionality to userspace. -obj-$(CONFIG_MTD_CHAR) += mtdchar.o obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o obj-$(CONFIG_MTD_BLOCK) += mtdblock.o obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index c219e3d..e4696b3 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -146,7 +146,6 @@ config MTD_CFI_I8 config MTD_OTP bool "Protection Registers aka one-time programmable (OTP) bits" depends on MTD_CFI_ADV_OPTIONS - select HAVE_MTD_OTP default n help This enables support for reading, writing and locking so called diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 12311f5..2a4d55e 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -71,7 +71,6 @@ config MTD_DATAFLASH_WRITE_VERIFY config MTD_DATAFLASH_OTP bool "DataFlash OTP support (Security Register)" depends on MTD_DATAFLASH - select HAVE_MTD_OTP help Newer DataFlash chips (revisions C and D) support 128 bytes of one-time-programmable (OTP) data. The first half may be written @@ -205,69 +204,6 @@ config MTD_BLOCK2MTD comment "Disk-On-Chip Device Drivers" -config MTD_DOC2000 - tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)" - depends on MTD_NAND - select MTD_DOCPROBE - select MTD_NAND_IDS - ---help--- - This provides an MTD device driver for the M-Systems DiskOnChip - 2000 and Millennium devices. Originally designed for the DiskOnChip - 2000, it also now includes support for the DiskOnChip Millennium. - If you have problems with this driver and the DiskOnChip Millennium, - you may wish to try the alternative Millennium driver below. To use - the alternative driver, you will need to undefine DOC_SINGLE_DRIVER - in the source code. - - If you use this device, you probably also want to enable the NFTL - 'NAND Flash Translation Layer' option below, which is used to - emulate a block device by using a kind of file system on the flash - chips. - - NOTE: This driver is deprecated and will probably be removed soon. - Please try the new DiskOnChip driver under "NAND Flash Device - Drivers". - -config MTD_DOC2001 - tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)" - depends on MTD_NAND - select MTD_DOCPROBE - select MTD_NAND_IDS - ---help--- - This provides an alternative MTD device driver for the M-Systems - DiskOnChip Millennium devices. Use this if you have problems with - the combined DiskOnChip 2000 and Millennium driver above. To get - the DiskOnChip probe code to load and use this driver instead of - the other one, you will need to undefine DOC_SINGLE_DRIVER near - the beginning of . - - If you use this device, you probably also want to enable the NFTL - 'NAND Flash Translation Layer' option below, which is used to - emulate a block device by using a kind of file system on the flash - chips. - - NOTE: This driver is deprecated and will probably be removed soon. - Please try the new DiskOnChip driver under "NAND Flash Device - Drivers". - -config MTD_DOC2001PLUS - tristate "M-Systems Disk-On-Chip Millennium Plus" - depends on MTD_NAND - select MTD_DOCPROBE - select MTD_NAND_IDS - ---help--- - This provides an MTD device driver for the M-Systems DiskOnChip - Millennium Plus devices. - - If you use this device, you probably also want to enable the INFTL - 'Inverse NAND Flash Translation Layer' option below, which is used - to emulate a block device by using a kind of file system on the - flash chips. - - NOTE: This driver will soon be replaced by the new DiskOnChip driver - under "NAND Flash Device Drivers" (currently that driver does not - support all Millennium Plus devices). - config MTD_DOCG3 tristate "M-Systems Disk-On-Chip G3" select BCH diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index 369a194..d83bd73 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile @@ -2,12 +2,7 @@ # linux/drivers/mtd/devices/Makefile # -obj-$(CONFIG_MTD_DOC2000) += doc2000.o -obj-$(CONFIG_MTD_DOC2001) += doc2001.o -obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o obj-$(CONFIG_MTD_DOCG3) += docg3.o -obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o -obj-$(CONFIG_MTD_DOCECC) += docecc.o obj-$(CONFIG_MTD_SLRAM) += slram.o obj-$(CONFIG_MTD_PHRAM) += phram.o obj-$(CONFIG_MTD_PMC551) += pmc551.o diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c index 9526628..18e7761 100644 --- a/drivers/mtd/devices/bcm47xxsflash.c +++ b/drivers/mtd/devices/bcm47xxsflash.c @@ -10,7 +10,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Serial flash driver for BCMA bus"); -static const char *probes[] = { "bcm47xxpart", NULL }; +static const char * const probes[] = { "bcm47xxpart", NULL }; static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) @@ -61,6 +61,17 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev) } sflash->priv = b47s; + b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash); + + switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) { + case BCMA_CC_FLASHT_STSER: + b47s->type = BCM47XXSFLASH_TYPE_ST; + break; + case BCMA_CC_FLASHT_ATSER: + b47s->type = BCM47XXSFLASH_TYPE_ATMEL; + break; + } + b47s->window = sflash->window; b47s->blocksize = sflash->blocksize; b47s->numblocks = sflash->numblocks; diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h index ebf6f71..f22f8c4 100644 --- a/drivers/mtd/devices/bcm47xxsflash.h +++ b/drivers/mtd/devices/bcm47xxsflash.h @@ -3,7 +3,66 @@ #include +/* Used for ST flashes only. */ +#define OPCODE_ST_WREN 0x0006 /* Write Enable */ +#define OPCODE_ST_WRDIS 0x0004 /* Write Disable */ +#define OPCODE_ST_RDSR 0x0105 /* Read Status Register */ +#define OPCODE_ST_WRSR 0x0101 /* Write Status Register */ +#define OPCODE_ST_READ 0x0303 /* Read Data Bytes */ +#define OPCODE_ST_PP 0x0302 /* Page Program */ +#define OPCODE_ST_SE 0x02d8 /* Sector Erase */ +#define OPCODE_ST_BE 0x00c7 /* Bulk Erase */ +#define OPCODE_ST_DP 0x00b9 /* Deep Power-down */ +#define OPCODE_ST_RES 0x03ab /* Read Electronic Signature */ +#define OPCODE_ST_CSA 0x1000 /* Keep chip select asserted */ +#define OPCODE_ST_SSE 0x0220 /* Sub-sector Erase */ + +/* Used for Atmel flashes only. */ +#define OPCODE_AT_READ 0x07e8 +#define OPCODE_AT_PAGE_READ 0x07d2 +#define OPCODE_AT_STATUS 0x01d7 +#define OPCODE_AT_BUF1_WRITE 0x0384 +#define OPCODE_AT_BUF2_WRITE 0x0387 +#define OPCODE_AT_BUF1_ERASE_PROGRAM 0x0283 +#define OPCODE_AT_BUF2_ERASE_PROGRAM 0x0286 +#define OPCODE_AT_BUF1_PROGRAM 0x0288 +#define OPCODE_AT_BUF2_PROGRAM 0x0289 +#define OPCODE_AT_PAGE_ERASE 0x0281 +#define OPCODE_AT_BLOCK_ERASE 0x0250 +#define OPCODE_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 +#define OPCODE_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 +#define OPCODE_AT_BUF1_LOAD 0x0253 +#define OPCODE_AT_BUF2_LOAD 0x0255 +#define OPCODE_AT_BUF1_COMPARE 0x0260 +#define OPCODE_AT_BUF2_COMPARE 0x0261 +#define OPCODE_AT_BUF1_REPROGRAM 0x0258 +#define OPCODE_AT_BUF2_REPROGRAM 0x0259 + +/* Status register bits for ST flashes */ +#define SR_ST_WIP 0x01 /* Write In Progress */ +#define SR_ST_WEL 0x02 /* Write Enable Latch */ +#define SR_ST_BP_MASK 0x1c /* Block Protect */ +#define SR_ST_BP_SHIFT 2 +#define SR_ST_SRWD 0x80 /* Status Register Write Disable */ + +/* Status register bits for Atmel flashes */ +#define SR_AT_READY 0x80 +#define SR_AT_MISMATCH 0x40 +#define SR_AT_ID_MASK 0x38 +#define SR_AT_ID_SHIFT 3 + +struct bcma_drv_cc; + +enum bcm47xxsflash_type { + BCM47XXSFLASH_TYPE_ATMEL, + BCM47XXSFLASH_TYPE_ST, +}; + struct bcm47xxsflash { + struct bcma_drv_cc *bcma_cc; + + enum bcm47xxsflash_type type; + u32 window; u32 blocksize; u16 numblocks; diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c deleted file mode 100644 index a4eb8b5..0000000 --- a/drivers/mtd/devices/doc2000.c +++ /dev/null @@ -1,1178 +0,0 @@ - -/* - * Linux driver for Disk-On-Chip 2000 and Millennium - * (c) 1999 Machine Vision Holdings, Inc. - * (c) 1999, 2000 David Woodhouse - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define DOC_SUPPORT_2000 -#define DOC_SUPPORT_2000TSOP -#define DOC_SUPPORT_MILLENNIUM - -#ifdef DOC_SUPPORT_2000 -#define DoC_is_2000(doc) (doc->ChipID == DOC_ChipID_Doc2k) -#else -#define DoC_is_2000(doc) (0) -#endif - -#if defined(DOC_SUPPORT_2000TSOP) || defined(DOC_SUPPORT_MILLENNIUM) -#define DoC_is_Millennium(doc) (doc->ChipID == DOC_ChipID_DocMil) -#else -#define DoC_is_Millennium(doc) (0) -#endif - -/* #define ECC_DEBUG */ - -/* I have no idea why some DoC chips can not use memcpy_from|to_io(). - * This may be due to the different revisions of the ASIC controller built-in or - * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment - * this: - #undef USE_MEMCPY -*/ - -static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf); -static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf); -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len, - size_t *retlen, const u_char *buf); -static int doc_erase (struct mtd_info *mtd, struct erase_info *instr); - -static struct mtd_info *doc2klist = NULL; - -/* Perform the required delay cycles by reading from the appropriate register */ -static void DoC_Delay(struct DiskOnChip *doc, unsigned short cycles) -{ - volatile char dummy; - int i; - - for (i = 0; i < cycles; i++) { - if (DoC_is_Millennium(doc)) - dummy = ReadDOC(doc->virtadr, NOP); - else - dummy = ReadDOC(doc->virtadr, DOCStatus); - } - -} - -/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ -static int _DoC_WaitReady(struct DiskOnChip *doc) -{ - void __iomem *docptr = doc->virtadr; - unsigned long timeo = jiffies + (HZ * 10); - - pr_debug("_DoC_WaitReady called for out-of-line wait\n"); - - /* Out-of-line routine to wait for chip response */ - while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { - /* issue 2 read from NOP register after reading from CDSNControl register - see Software Requirement 11.4 item 2. */ - DoC_Delay(doc, 2); - - if (time_after(jiffies, timeo)) { - pr_debug("_DoC_WaitReady timed out.\n"); - return -EIO; - } - udelay(1); - cond_resched(); - } - - return 0; -} - -static inline int DoC_WaitReady(struct DiskOnChip *doc) -{ - void __iomem *docptr = doc->virtadr; - - /* This is inline, to optimise the common case, where it's ready instantly */ - int ret = 0; - - /* 4 read form NOP register should be issued in prior to the read from CDSNControl - see Software Requirement 11.4 item 2. */ - DoC_Delay(doc, 4); - - if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) - /* Call the out-of-line routine to wait */ - ret = _DoC_WaitReady(doc); - - /* issue 2 read from NOP register after reading from CDSNControl register - see Software Requirement 11.4 item 2. */ - DoC_Delay(doc, 2); - - return ret; -} - -/* DoC_Command: Send a flash command to the flash chip through the CDSN Slow IO register to - bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is - required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ - -static int DoC_Command(struct DiskOnChip *doc, unsigned char command, - unsigned char xtraflags) -{ - void __iomem *docptr = doc->virtadr; - - if (DoC_is_2000(doc)) - xtraflags |= CDSN_CTRL_FLASH_IO; - - /* Assert the CLE (Command Latch Enable) line to the flash chip */ - WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - if (DoC_is_Millennium(doc)) - WriteDOC(command, docptr, CDSNSlowIO); - - /* Send the command */ - WriteDOC_(command, docptr, doc->ioreg); - if (DoC_is_Millennium(doc)) - WriteDOC(command, docptr, WritePipeTerm); - - /* Lower the CLE line */ - WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - /* Wait for the chip to respond - Software requirement 11.4.1 (extended for any command) */ - return DoC_WaitReady(doc); -} - -/* DoC_Address: Set the current address for the flash chip through the CDSN Slow IO register to - bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is - required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ - -static int DoC_Address(struct DiskOnChip *doc, int numbytes, unsigned long ofs, - unsigned char xtraflags1, unsigned char xtraflags2) -{ - int i; - void __iomem *docptr = doc->virtadr; - - if (DoC_is_2000(doc)) - xtraflags1 |= CDSN_CTRL_FLASH_IO; - - /* Assert the ALE (Address Latch Enable) line to the flash chip */ - WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl); - - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - /* Send the address */ - /* Devices with 256-byte page are addressed as: - Column (bits 0-7), Page (bits 8-15, 16-23, 24-31) - * there is no device on the market with page256 - and more than 24 bits. - Devices with 512-byte page are addressed as: - Column (bits 0-7), Page (bits 9-16, 17-24, 25-31) - * 25-31 is sent only if the chip support it. - * bit 8 changes the read command to be sent - (NAND_CMD_READ0 or NAND_CMD_READ1). - */ - - if (numbytes == ADDR_COLUMN || numbytes == ADDR_COLUMN_PAGE) { - if (DoC_is_Millennium(doc)) - WriteDOC(ofs & 0xff, docptr, CDSNSlowIO); - WriteDOC_(ofs & 0xff, docptr, doc->ioreg); - } - - if (doc->page256) { - ofs = ofs >> 8; - } else { - ofs = ofs >> 9; - } - - if (numbytes == ADDR_PAGE || numbytes == ADDR_COLUMN_PAGE) { - for (i = 0; i < doc->pageadrlen; i++, ofs = ofs >> 8) { - if (DoC_is_Millennium(doc)) - WriteDOC(ofs & 0xff, docptr, CDSNSlowIO); - WriteDOC_(ofs & 0xff, docptr, doc->ioreg); - } - } - - if (DoC_is_Millennium(doc)) - WriteDOC(ofs & 0xff, docptr, WritePipeTerm); - - DoC_Delay(doc, 2); /* Needed for some slow flash chips. mf. */ - - /* FIXME: The SlowIO's for millennium could be replaced by - a single WritePipeTerm here. mf. */ - - /* Lower the ALE line */ - WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr, - CDSNControl); - - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - /* Wait for the chip to respond - Software requirement 11.4.1 */ - return DoC_WaitReady(doc); -} - -/* Read a buffer from DoC, taking care of Millennium odditys */ -static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len) -{ - volatile int dummy; - int modulus = 0xffff; - void __iomem *docptr = doc->virtadr; - int i; - - if (len <= 0) - return; - - if (DoC_is_Millennium(doc)) { - /* Read the data via the internal pipeline through CDSN IO register, - see Pipelined Read Operations 11.3 */ - dummy = ReadDOC(docptr, ReadPipeInit); - - /* Millennium should use the LastDataRead register - Pipeline Reads */ - len--; - - /* This is needed for correctly ECC calculation */ - modulus = 0xff; - } - - for (i = 0; i < len; i++) - buf[i] = ReadDOC_(docptr, doc->ioreg + (i & modulus)); - - if (DoC_is_Millennium(doc)) { - buf[i] = ReadDOC(docptr, LastDataRead); - } -} - -/* Write a buffer to DoC, taking care of Millennium odditys */ -static void DoC_WriteBuf(struct DiskOnChip *doc, const u_char * buf, int len) -{ - void __iomem *docptr = doc->virtadr; - int i; - - if (len <= 0) - return; - - for (i = 0; i < len; i++) - WriteDOC_(buf[i], docptr, doc->ioreg + i); - - if (DoC_is_Millennium(doc)) { - WriteDOC(0x00, docptr, WritePipeTerm); - } -} - - -/* DoC_SelectChip: Select a given flash chip within the current floor */ - -static inline int DoC_SelectChip(struct DiskOnChip *doc, int chip) -{ - void __iomem *docptr = doc->virtadr; - - /* Software requirement 11.4.4 before writing DeviceSelect */ - /* Deassert the CE line to eliminate glitches on the FCE# outputs */ - WriteDOC(CDSN_CTRL_WP, docptr, CDSNControl); - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - /* Select the individual flash chip requested */ - WriteDOC(chip, docptr, CDSNDeviceSelect); - DoC_Delay(doc, 4); - - /* Reassert the CE line */ - WriteDOC(CDSN_CTRL_CE | CDSN_CTRL_FLASH_IO | CDSN_CTRL_WP, docptr, - CDSNControl); - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - /* Wait for it to be ready */ - return DoC_WaitReady(doc); -} - -/* DoC_SelectFloor: Select a given floor (bank of flash chips) */ - -static inline int DoC_SelectFloor(struct DiskOnChip *doc, int floor) -{ - void __iomem *docptr = doc->virtadr; - - /* Select the floor (bank) of chips required */ - WriteDOC(floor, docptr, FloorSelect); - - /* Wait for the chip to be ready */ - return DoC_WaitReady(doc); -} - -/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */ - -static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip) -{ - int mfr, id, i, j; - volatile char dummy; - - /* Page in the required floor/chip */ - DoC_SelectFloor(doc, floor); - DoC_SelectChip(doc, chip); - - /* Reset the chip */ - if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) { - pr_debug("DoC_Command (reset) for %d,%d returned true\n", - floor, chip); - return 0; - } - - - /* Read the NAND chip ID: 1. Send ReadID command */ - if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) { - pr_debug("DoC_Command (ReadID) for %d,%d returned true\n", - floor, chip); - return 0; - } - - /* Read the NAND chip ID: 2. Send address byte zero */ - DoC_Address(doc, ADDR_COLUMN, 0, CDSN_CTRL_WP, 0); - - /* Read the manufacturer and device id codes from the device */ - - if (DoC_is_Millennium(doc)) { - DoC_Delay(doc, 2); - dummy = ReadDOC(doc->virtadr, ReadPipeInit); - mfr = ReadDOC(doc->virtadr, LastDataRead); - - DoC_Delay(doc, 2); - dummy = ReadDOC(doc->virtadr, ReadPipeInit); - id = ReadDOC(doc->virtadr, LastDataRead); - } else { - /* CDSN Slow IO register see Software Req 11.4 item 5. */ - dummy = ReadDOC(doc->virtadr, CDSNSlowIO); - DoC_Delay(doc, 2); - mfr = ReadDOC_(doc->virtadr, doc->ioreg); - - /* CDSN Slow IO register see Software Req 11.4 item 5. */ - dummy = ReadDOC(doc->virtadr, CDSNSlowIO); - DoC_Delay(doc, 2); - id = ReadDOC_(doc->virtadr, doc->ioreg); - } - - /* No response - return failure */ - if (mfr == 0xff || mfr == 0) - return 0; - - /* Check it's the same as the first chip we identified. - * M-Systems say that any given DiskOnChip device should only - * contain _one_ type of flash part, although that's not a - * hardware restriction. */ - if (doc->mfr) { - if (doc->mfr == mfr && doc->id == id) - return 1; /* This is the same as the first */ - else - printk(KERN_WARNING - "Flash chip at floor %d, chip %d is different:\n", - floor, chip); - } - - /* Print and store the manufacturer and ID codes. */ - for (i = 0; nand_flash_ids[i].name != NULL; i++) { - if (id == nand_flash_ids[i].id) { - /* Try to identify manufacturer */ - for (j = 0; nand_manuf_ids[j].id != 0x0; j++) { - if (nand_manuf_ids[j].id == mfr) - break; - } - printk(KERN_INFO - "Flash chip found: Manufacturer ID: %2.2X, " - "Chip ID: %2.2X (%s:%s)\n", mfr, id, - nand_manuf_ids[j].name, nand_flash_ids[i].name); - if (!doc->mfr) { - doc->mfr = mfr; - doc->id = id; - doc->chipshift = - ffs((nand_flash_ids[i].chipsize << 20)) - 1; - doc->page256 = (nand_flash_ids[i].pagesize == 256) ? 1 : 0; - doc->pageadrlen = doc->chipshift > 25 ? 3 : 2; - doc->erasesize = - nand_flash_ids[i].erasesize; - return 1; - } - return 0; - } - } - - - /* We haven't fully identified the chip. Print as much as we know. */ - printk(KERN_WARNING "Unknown flash chip found: %2.2X %2.2X\n", - id, mfr); - - printk(KERN_WARNING "Please report to dwmw2@infradead.org\n"); - return 0; -} - -/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */ - -static void DoC_ScanChips(struct DiskOnChip *this, int maxchips) -{ - int floor, chip; - int numchips[MAX_FLOORS]; - int ret = 1; - - this->numchips = 0; - this->mfr = 0; - this->id = 0; - - /* For each floor, find the number of valid chips it contains */ - for (floor = 0; floor < MAX_FLOORS; floor++) { - ret = 1; - numchips[floor] = 0; - for (chip = 0; chip < maxchips && ret != 0; chip++) { - - ret = DoC_IdentChip(this, floor, chip); - if (ret) { - numchips[floor]++; - this->numchips++; - } - } - } - - /* If there are none at all that we recognise, bail */ - if (!this->numchips) { - printk(KERN_NOTICE "No flash chips recognised.\n"); - return; - } - - /* Allocate an array to hold the information for each chip */ - this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL); - if (!this->chips) { - printk(KERN_NOTICE "No memory for allocating chip info structures\n"); - return; - } - - ret = 0; - - /* Fill out the chip array with {floor, chipno} for each - * detected chip in the device. */ - for (floor = 0; floor < MAX_FLOORS; floor++) { - for (chip = 0; chip < numchips[floor]; chip++) { - this->chips[ret].floor = floor; - this->chips[ret].chip = chip; - this->chips[ret].curadr = 0; - this->chips[ret].curmode = 0x50; - ret++; - } - } - - /* Calculate and print the total size of the device */ - this->totlen = this->numchips * (1 << this->chipshift); - - printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n", - this->numchips, this->totlen >> 20); -} - -static int DoC2k_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2) -{ - int tmp1, tmp2, retval; - if (doc1->physadr == doc2->physadr) - return 1; - - /* Use the alias resolution register which was set aside for this - * purpose. If it's value is the same on both chips, they might - * be the same chip, and we write to one and check for a change in - * the other. It's unclear if this register is usuable in the - * DoC 2000 (it's in the Millennium docs), but it seems to work. */ - tmp1 = ReadDOC(doc1->virtadr, AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, AliasResolution); - if (tmp1 != tmp2) - return 0; - - WriteDOC((tmp1 + 1) % 0xff, doc1->virtadr, AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, AliasResolution); - if (tmp2 == (tmp1 + 1) % 0xff) - retval = 1; - else - retval = 0; - - /* Restore register contents. May not be necessary, but do it just to - * be safe. */ - WriteDOC(tmp1, doc1->virtadr, AliasResolution); - - return retval; -} - -/* This routine is found from the docprobe code by symbol_get(), - * which will bump the use count of this module. */ -void DoC2k_init(struct mtd_info *mtd) -{ - struct DiskOnChip *this = mtd->priv; - struct DiskOnChip *old = NULL; - int maxchips; - - /* We must avoid being called twice for the same device. */ - - if (doc2klist) - old = doc2klist->priv; - - while (old) { - if (DoC2k_is_alias(old, this)) { - printk(KERN_NOTICE - "Ignoring DiskOnChip 2000 at 0x%lX - already configured\n", - this->physadr); - iounmap(this->virtadr); - kfree(mtd); - return; - } - if (old->nextdoc) - old = old->nextdoc->priv; - else - old = NULL; - } - - - switch (this->ChipID) { - case DOC_ChipID_Doc2kTSOP: - mtd->name = "DiskOnChip 2000 TSOP"; - this->ioreg = DoC_Mil_CDSN_IO; - /* Pretend it's a Millennium */ - this->ChipID = DOC_ChipID_DocMil; - maxchips = MAX_CHIPS; - break; - case DOC_ChipID_Doc2k: - mtd->name = "DiskOnChip 2000"; - this->ioreg = DoC_2k_CDSN_IO; - maxchips = MAX_CHIPS; - break; - case DOC_ChipID_DocMil: - mtd->name = "DiskOnChip Millennium"; - this->ioreg = DoC_Mil_CDSN_IO; - maxchips = MAX_CHIPS_MIL; - break; - default: - printk("Unknown ChipID 0x%02x\n", this->ChipID); - kfree(mtd); - iounmap(this->virtadr); - return; - } - - printk(KERN_NOTICE "%s found at address 0x%lX\n", mtd->name, - this->physadr); - - mtd->type = MTD_NANDFLASH; - mtd->flags = MTD_CAP_NANDFLASH; - mtd->writebufsize = mtd->writesize = 512; - mtd->oobsize = 16; - mtd->ecc_strength = 2; - mtd->owner = THIS_MODULE; - mtd->_erase = doc_erase; - mtd->_read = doc_read; - mtd->_write = doc_write; - mtd->_read_oob = doc_read_oob; - mtd->_write_oob = doc_write_oob; - this->curfloor = -1; - this->curchip = -1; - mutex_init(&this->lock); - - /* Ident all the chips present. */ - DoC_ScanChips(this, maxchips); - - if (!this->totlen) { - kfree(mtd); - iounmap(this->virtadr); - } else { - this->nextdoc = doc2klist; - doc2klist = mtd; - mtd->size = this->totlen; - mtd->erasesize = this->erasesize; - mtd_device_register(mtd, NULL, 0); - return; - } -} -EXPORT_SYMBOL_GPL(DoC2k_init); - -static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t * retlen, u_char * buf) -{ - struct DiskOnChip *this = mtd->priv; - void __iomem *docptr = this->virtadr; - struct Nand *mychip; - unsigned char syndrome[6], eccbuf[6]; - volatile char dummy; - int i, len256 = 0, ret=0; - size_t left = len; - - mutex_lock(&this->lock); - while (left) { - len = left; - - /* Don't allow a single read to cross a 512-byte block boundary */ - if (from + len > ((from | 0x1ff) + 1)) - len = ((from | 0x1ff) + 1) - from; - - /* The ECC will not be calculated correctly if less than 512 is read */ - if (len != 0x200) - printk(KERN_WARNING - "ECC needs a full sector read (adr: %lx size %lx)\n", - (long) from, (long) len); - - /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */ - - - /* Find the chip which is to be used and select it */ - mychip = &this->chips[from >> (this->chipshift)]; - - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(this, mychip->floor); - DoC_SelectChip(this, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(this, mychip->chip); - } - - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - DoC_Command(this, - (!this->page256 - && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0, - CDSN_CTRL_WP); - DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP, - CDSN_CTRL_ECC_IO); - - /* Prime the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, ECCConf); - WriteDOC(DOC_ECC_EN, docptr, ECCConf); - - /* treat crossing 256-byte sector for 2M x 8bits devices */ - if (this->page256 && from + len > (from | 0xff) + 1) { - len256 = (from | 0xff) + 1 - from; - DoC_ReadBuf(this, buf, len256); - - DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP); - DoC_Address(this, ADDR_COLUMN_PAGE, from + len256, - CDSN_CTRL_WP, CDSN_CTRL_ECC_IO); - } - - DoC_ReadBuf(this, &buf[len256], len - len256); - - /* Let the caller know we completed it */ - *retlen += len; - - /* Read the ECC data through the DiskOnChip ECC logic */ - /* Note: this will work even with 2M x 8bit devices as */ - /* they have 8 bytes of OOB per 256 page. mf. */ - DoC_ReadBuf(this, eccbuf, 6); - - /* Flush the pipeline */ - if (DoC_is_Millennium(this)) { - dummy = ReadDOC(docptr, ECCConf); - dummy = ReadDOC(docptr, ECCConf); - i = ReadDOC(docptr, ECCConf); - } else { - dummy = ReadDOC(docptr, 2k_ECCStatus); - dummy = ReadDOC(docptr, 2k_ECCStatus); - i = ReadDOC(docptr, 2k_ECCStatus); - } - - /* Check the ECC Status */ - if (i & 0x80) { - int nb_errors; - /* There was an ECC error */ -#ifdef ECC_DEBUG - printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from); -#endif - /* Read the ECC syndrome through the DiskOnChip ECC - logic. These syndrome will be all ZERO when there - is no error */ - for (i = 0; i < 6; i++) { - syndrome[i] = - ReadDOC(docptr, ECCSyndrome0 + i); - } - nb_errors = doc_decode_ecc(buf, syndrome); - -#ifdef ECC_DEBUG - printk(KERN_ERR "Errors corrected: %x\n", nb_errors); -#endif - if (nb_errors < 0) { - /* We return error, but have actually done the - read. Not that this can be told to - user-space, via sys_read(), but at least - MTD-aware stuff can know about it by - checking *retlen */ - ret = -EIO; - } - } - -#ifdef PSYCHO_DEBUG - printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - (long)from, eccbuf[0], eccbuf[1], eccbuf[2], - eccbuf[3], eccbuf[4], eccbuf[5]); -#endif - - /* disable the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr , ECCConf); - - /* according to 11.4.1, we need to wait for the busy line - * drop if we read to the end of the page. */ - if(0 == ((from + len) & 0x1ff)) - { - DoC_WaitReady(this); - } - - from += len; - left -= len; - buf += len; - } - - mutex_unlock(&this->lock); - - return ret; -} - -static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t * retlen, const u_char * buf) -{ - struct DiskOnChip *this = mtd->priv; - int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */ - void __iomem *docptr = this->virtadr; - unsigned char eccbuf[6]; - volatile char dummy; - int len256 = 0; - struct Nand *mychip; - size_t left = len; - int status; - - mutex_lock(&this->lock); - while (left) { - len = left; - - /* Don't allow a single write to cross a 512-byte block boundary */ - if (to + len > ((to | 0x1ff) + 1)) - len = ((to | 0x1ff) + 1) - to; - - /* The ECC will not be calculated correctly if less than 512 is written */ -/* DBB- - if (len != 0x200 && eccbuf) - printk(KERN_WARNING - "ECC needs a full sector write (adr: %lx size %lx)\n", - (long) to, (long) len); - -DBB */ - - /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */ - - /* Find the chip which is to be used and select it */ - mychip = &this->chips[to >> (this->chipshift)]; - - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(this, mychip->floor); - DoC_SelectChip(this, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(this, mychip->chip); - } - - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Set device to main plane of flash */ - DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP); - DoC_Command(this, - (!this->page256 - && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0, - CDSN_CTRL_WP); - - DoC_Command(this, NAND_CMD_SEQIN, 0); - DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO); - - /* Prime the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, ECCConf); - WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf); - - /* treat crossing 256-byte sector for 2M x 8bits devices */ - if (this->page256 && to + len > (to | 0xff) + 1) { - len256 = (to | 0xff) + 1 - to; - DoC_WriteBuf(this, buf, len256); - - DoC_Command(this, NAND_CMD_PAGEPROG, 0); - - DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP); - /* There's an implicit DoC_WaitReady() in DoC_Command */ - - dummy = ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(this, 2); - - if (ReadDOC_(docptr, this->ioreg) & 1) { - printk(KERN_ERR "Error programming flash\n"); - /* Error in programming */ - *retlen = 0; - mutex_unlock(&this->lock); - return -EIO; - } - - DoC_Command(this, NAND_CMD_SEQIN, 0); - DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0, - CDSN_CTRL_ECC_IO); - } - - DoC_WriteBuf(this, &buf[len256], len - len256); - - WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr, CDSNControl); - - if (DoC_is_Millennium(this)) { - WriteDOC(0, docptr, NOP); - WriteDOC(0, docptr, NOP); - WriteDOC(0, docptr, NOP); - } else { - WriteDOC_(0, docptr, this->ioreg); - WriteDOC_(0, docptr, this->ioreg); - WriteDOC_(0, docptr, this->ioreg); - } - - WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr, - CDSNControl); - - /* Read the ECC data through the DiskOnChip ECC logic */ - for (di = 0; di < 6; di++) { - eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di); - } - - /* Reset the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr, ECCConf); - -#ifdef PSYCHO_DEBUG - printk - ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], - eccbuf[4], eccbuf[5]); -#endif - DoC_Command(this, NAND_CMD_PAGEPROG, 0); - - DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP); - /* There's an implicit DoC_WaitReady() in DoC_Command */ - - if (DoC_is_Millennium(this)) { - ReadDOC(docptr, ReadPipeInit); - status = ReadDOC(docptr, LastDataRead); - } else { - dummy = ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(this, 2); - status = ReadDOC_(docptr, this->ioreg); - } - - if (status & 1) { - printk(KERN_ERR "Error programming flash\n"); - /* Error in programming */ - *retlen = 0; - mutex_unlock(&this->lock); - return -EIO; - } - - /* Let the caller know we completed it */ - *retlen += len; - - { - unsigned char x[8]; - size_t dummy; - int ret; - - /* Write the ECC data to flash */ - for (di=0; di<6; di++) - x[di] = eccbuf[di]; - - x[6]=0x55; - x[7]=0x55; - - ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x); - if (ret) { - mutex_unlock(&this->lock); - return ret; - } - } - - to += len; - left -= len; - buf += len; - } - - mutex_unlock(&this->lock); - return 0; -} - -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - struct DiskOnChip *this = mtd->priv; - int len256 = 0, ret; - struct Nand *mychip; - uint8_t *buf = ops->oobbuf; - size_t len = ops->len; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - ofs += ops->ooboffs; - - mutex_lock(&this->lock); - - mychip = &this->chips[ofs >> this->chipshift]; - - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(this, mychip->floor); - DoC_SelectChip(this, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(this, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* update address for 2M x 8bit devices. OOB starts on the second */ - /* page to maintain compatibility with doc_read_ecc. */ - if (this->page256) { - if (!(ofs & 0x8)) - ofs += 0x100; - else - ofs -= 0x8; - } - - DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP); - DoC_Address(this, ADDR_COLUMN_PAGE, ofs, CDSN_CTRL_WP, 0); - - /* treat crossing 8-byte OOB data for 2M x 8bit devices */ - /* Note: datasheet says it should automaticaly wrap to the */ - /* next OOB block, but it didn't work here. mf. */ - if (this->page256 && ofs + len > (ofs | 0x7) + 1) { - len256 = (ofs | 0x7) + 1 - ofs; - DoC_ReadBuf(this, buf, len256); - - DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP); - DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), - CDSN_CTRL_WP, 0); - } - - DoC_ReadBuf(this, &buf[len256], len - len256); - - ops->retlen = len; - /* Reading the full OOB data drops us off of the end of the page, - * causing the flash device to go into busy mode, so we need - * to wait until ready 11.4.1 and Toshiba TC58256FT docs */ - - ret = DoC_WaitReady(this); - - mutex_unlock(&this->lock); - return ret; - -} - -static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len, - size_t * retlen, const u_char * buf) -{ - struct DiskOnChip *this = mtd->priv; - int len256 = 0; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - volatile int dummy; - int status; - - // printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len, - // buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(this, mychip->floor); - DoC_SelectChip(this, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(this, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* disable the ECC engine */ - WriteDOC (DOC_ECC_RESET, docptr, ECCConf); - WriteDOC (DOC_ECC_DIS, docptr, ECCConf); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP); - - /* issue the Read2 command to set the pointer to the Spare Data Area. */ - DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP); - - /* update address for 2M x 8bit devices. OOB starts on the second */ - /* page to maintain compatibility with doc_read_ecc. */ - if (this->page256) { - if (!(ofs & 0x8)) - ofs += 0x100; - else - ofs -= 0x8; - } - - /* issue the Serial Data In command to initial the Page Program process */ - DoC_Command(this, NAND_CMD_SEQIN, 0); - DoC_Address(this, ADDR_COLUMN_PAGE, ofs, 0, 0); - - /* treat crossing 8-byte OOB data for 2M x 8bit devices */ - /* Note: datasheet says it should automaticaly wrap to the */ - /* next OOB block, but it didn't work here. mf. */ - if (this->page256 && ofs + len > (ofs | 0x7) + 1) { - len256 = (ofs | 0x7) + 1 - ofs; - DoC_WriteBuf(this, buf, len256); - - DoC_Command(this, NAND_CMD_PAGEPROG, 0); - DoC_Command(this, NAND_CMD_STATUS, 0); - /* DoC_WaitReady() is implicit in DoC_Command */ - - if (DoC_is_Millennium(this)) { - ReadDOC(docptr, ReadPipeInit); - status = ReadDOC(docptr, LastDataRead); - } else { - dummy = ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(this, 2); - status = ReadDOC_(docptr, this->ioreg); - } - - if (status & 1) { - printk(KERN_ERR "Error programming oob data\n"); - /* There was an error */ - *retlen = 0; - return -EIO; - } - DoC_Command(this, NAND_CMD_SEQIN, 0); - DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), 0, 0); - } - - DoC_WriteBuf(this, &buf[len256], len - len256); - - DoC_Command(this, NAND_CMD_PAGEPROG, 0); - DoC_Command(this, NAND_CMD_STATUS, 0); - /* DoC_WaitReady() is implicit in DoC_Command */ - - if (DoC_is_Millennium(this)) { - ReadDOC(docptr, ReadPipeInit); - status = ReadDOC(docptr, LastDataRead); - } else { - dummy = ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(this, 2); - status = ReadDOC_(docptr, this->ioreg); - } - - if (status & 1) { - printk(KERN_ERR "Error programming oob data\n"); - /* There was an error */ - *retlen = 0; - return -EIO; - } - - *retlen = len; - return 0; - -} - -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - struct DiskOnChip *this = mtd->priv; - int ret; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - mutex_lock(&this->lock); - ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len, - &ops->retlen, ops->oobbuf); - - mutex_unlock(&this->lock); - return ret; -} - -static int doc_erase(struct mtd_info *mtd, struct erase_info *instr) -{ - struct DiskOnChip *this = mtd->priv; - __u32 ofs = instr->addr; - __u32 len = instr->len; - volatile int dummy; - void __iomem *docptr = this->virtadr; - struct Nand *mychip; - int status; - - mutex_lock(&this->lock); - - if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) { - mutex_unlock(&this->lock); - return -EINVAL; - } - - instr->state = MTD_ERASING; - - /* FIXME: Do this in the background. Use timers or schedule_task() */ - while(len) { - mychip = &this->chips[ofs >> this->chipshift]; - - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(this, mychip->floor); - DoC_SelectChip(this, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(this, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - DoC_Command(this, NAND_CMD_ERASE1, 0); - DoC_Address(this, ADDR_PAGE, ofs, 0, 0); - DoC_Command(this, NAND_CMD_ERASE2, 0); - - DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP); - - if (DoC_is_Millennium(this)) { - ReadDOC(docptr, ReadPipeInit); - status = ReadDOC(docptr, LastDataRead); - } else { - dummy = ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(this, 2); - status = ReadDOC_(docptr, this->ioreg); - } - - if (status & 1) { - printk(KERN_ERR "Error erasing at 0x%x\n", ofs); - /* There was an error */ - instr->state = MTD_ERASE_FAILED; - goto callback; - } - ofs += mtd->erasesize; - len -= mtd->erasesize; - } - instr->state = MTD_ERASE_DONE; - - callback: - mtd_erase_callback(instr); - - mutex_unlock(&this->lock); - return 0; -} - - -/**************************************************************************** - * - * Module stuff - * - ****************************************************************************/ - -static void __exit cleanup_doc2000(void) -{ - struct mtd_info *mtd; - struct DiskOnChip *this; - - while ((mtd = doc2klist)) { - this = mtd->priv; - doc2klist = this->nextdoc; - - mtd_device_unregister(mtd); - - iounmap(this->virtadr); - kfree(this->chips); - kfree(mtd); - } -} - -module_exit(cleanup_doc2000); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Woodhouse et al."); -MODULE_DESCRIPTION("MTD driver for DiskOnChip 2000 and Millennium"); - diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c deleted file mode 100644 index f692795..0000000 --- a/drivers/mtd/devices/doc2001.c +++ /dev/null @@ -1,824 +0,0 @@ - -/* - * Linux driver for Disk-On-Chip Millennium - * (c) 1999 Machine Vision Holdings, Inc. - * (c) 1999, 2000 David Woodhouse - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -/* #define ECC_DEBUG */ - -/* I have no idea why some DoC chips can not use memcop_form|to_io(). - * This may be due to the different revisions of the ASIC controller built-in or - * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment - * this:*/ -#undef USE_MEMCPY - -static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf); -static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf); -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_erase (struct mtd_info *mtd, struct erase_info *instr); - -static struct mtd_info *docmillist = NULL; - -/* Perform the required delay cycles by reading from the NOP register */ -static void DoC_Delay(void __iomem * docptr, unsigned short cycles) -{ - volatile char dummy; - int i; - - for (i = 0; i < cycles; i++) - dummy = ReadDOC(docptr, NOP); -} - -/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ -static int _DoC_WaitReady(void __iomem * docptr) -{ - unsigned short c = 0xffff; - - pr_debug("_DoC_WaitReady called for out-of-line wait\n"); - - /* Out-of-line routine to wait for chip response */ - while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c) - ; - - if (c == 0) - pr_debug("_DoC_WaitReady timed out.\n"); - - return (c == 0); -} - -static inline int DoC_WaitReady(void __iomem * docptr) -{ - /* This is inline, to optimise the common case, where it's ready instantly */ - int ret = 0; - - /* 4 read form NOP register should be issued in prior to the read from CDSNControl - see Software Requirement 11.4 item 2. */ - DoC_Delay(docptr, 4); - - if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) - /* Call the out-of-line routine to wait */ - ret = _DoC_WaitReady(docptr); - - /* issue 2 read from NOP register after reading from CDSNControl register - see Software Requirement 11.4 item 2. */ - DoC_Delay(docptr, 2); - - return ret; -} - -/* DoC_Command: Send a flash command to the flash chip through the CDSN IO register - with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is - required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ - -static void DoC_Command(void __iomem * docptr, unsigned char command, - unsigned char xtraflags) -{ - /* Assert the CLE (Command Latch Enable) line to the flash chip */ - WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(docptr, 4); - - /* Send the command */ - WriteDOC(command, docptr, Mil_CDSN_IO); - WriteDOC(0x00, docptr, WritePipeTerm); - - /* Lower the CLE line */ - WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(docptr, 4); -} - -/* DoC_Address: Set the current address for the flash chip through the CDSN IO register - with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is - required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ - -static inline void DoC_Address(void __iomem * docptr, int numbytes, unsigned long ofs, - unsigned char xtraflags1, unsigned char xtraflags2) -{ - /* Assert the ALE (Address Latch Enable) line to the flash chip */ - WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(docptr, 4); - - /* Send the address */ - switch (numbytes) - { - case 1: - /* Send single byte, bits 0-7. */ - WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO); - WriteDOC(0x00, docptr, WritePipeTerm); - break; - case 2: - /* Send bits 9-16 followed by 17-23 */ - WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO); - WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO); - WriteDOC(0x00, docptr, WritePipeTerm); - break; - case 3: - /* Send 0-7, 9-16, then 17-23 */ - WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO); - WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO); - WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO); - WriteDOC(0x00, docptr, WritePipeTerm); - break; - default: - return; - } - - /* Lower the ALE line */ - WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(docptr, 4); -} - -/* DoC_SelectChip: Select a given flash chip within the current floor */ -static int DoC_SelectChip(void __iomem * docptr, int chip) -{ - /* Select the individual flash chip requested */ - WriteDOC(chip, docptr, CDSNDeviceSelect); - DoC_Delay(docptr, 4); - - /* Wait for it to be ready */ - return DoC_WaitReady(docptr); -} - -/* DoC_SelectFloor: Select a given floor (bank of flash chips) */ -static int DoC_SelectFloor(void __iomem * docptr, int floor) -{ - /* Select the floor (bank) of chips required */ - WriteDOC(floor, docptr, FloorSelect); - - /* Wait for the chip to be ready */ - return DoC_WaitReady(docptr); -} - -/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */ -static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip) -{ - int mfr, id, i, j; - volatile char dummy; - - /* Page in the required floor/chip - FIXME: is this supported by Millennium ?? */ - DoC_SelectFloor(doc->virtadr, floor); - DoC_SelectChip(doc->virtadr, chip); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(doc->virtadr, NAND_CMD_RESET, CDSN_CTRL_WP); - DoC_WaitReady(doc->virtadr); - - /* Read the NAND chip ID: 1. Send ReadID command */ - DoC_Command(doc->virtadr, NAND_CMD_READID, CDSN_CTRL_WP); - - /* Read the NAND chip ID: 2. Send address byte zero */ - DoC_Address(doc->virtadr, 1, 0x00, CDSN_CTRL_WP, 0x00); - - /* Read the manufacturer and device id codes of the flash device through - CDSN IO register see Software Requirement 11.4 item 5.*/ - dummy = ReadDOC(doc->virtadr, ReadPipeInit); - DoC_Delay(doc->virtadr, 2); - mfr = ReadDOC(doc->virtadr, Mil_CDSN_IO); - - DoC_Delay(doc->virtadr, 2); - id = ReadDOC(doc->virtadr, Mil_CDSN_IO); - dummy = ReadDOC(doc->virtadr, LastDataRead); - - /* No response - return failure */ - if (mfr == 0xff || mfr == 0) - return 0; - - /* FIXME: to deal with multi-flash on multi-Millennium case more carefully */ - for (i = 0; nand_flash_ids[i].name != NULL; i++) { - if ( id == nand_flash_ids[i].id) { - /* Try to identify manufacturer */ - for (j = 0; nand_manuf_ids[j].id != 0x0; j++) { - if (nand_manuf_ids[j].id == mfr) - break; - } - printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, " - "Chip ID: %2.2X (%s:%s)\n", - mfr, id, nand_manuf_ids[j].name, nand_flash_ids[i].name); - doc->mfr = mfr; - doc->id = id; - doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1; - break; - } - } - - if (nand_flash_ids[i].name == NULL) - return 0; - else - return 1; -} - -/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */ -static void DoC_ScanChips(struct DiskOnChip *this) -{ - int floor, chip; - int numchips[MAX_FLOORS_MIL]; - int ret; - - this->numchips = 0; - this->mfr = 0; - this->id = 0; - - /* For each floor, find the number of valid chips it contains */ - for (floor = 0,ret = 1; floor < MAX_FLOORS_MIL; floor++) { - numchips[floor] = 0; - for (chip = 0; chip < MAX_CHIPS_MIL && ret != 0; chip++) { - ret = DoC_IdentChip(this, floor, chip); - if (ret) { - numchips[floor]++; - this->numchips++; - } - } - } - /* If there are none at all that we recognise, bail */ - if (!this->numchips) { - printk("No flash chips recognised.\n"); - return; - } - - /* Allocate an array to hold the information for each chip */ - this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL); - if (!this->chips){ - printk("No memory for allocating chip info structures\n"); - return; - } - - /* Fill out the chip array with {floor, chipno} for each - * detected chip in the device. */ - for (floor = 0, ret = 0; floor < MAX_FLOORS_MIL; floor++) { - for (chip = 0 ; chip < numchips[floor] ; chip++) { - this->chips[ret].floor = floor; - this->chips[ret].chip = chip; - this->chips[ret].curadr = 0; - this->chips[ret].curmode = 0x50; - ret++; - } - } - - /* Calculate and print the total size of the device */ - this->totlen = this->numchips * (1 << this->chipshift); - printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n", - this->numchips ,this->totlen >> 20); -} - -static int DoCMil_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2) -{ - int tmp1, tmp2, retval; - - if (doc1->physadr == doc2->physadr) - return 1; - - /* Use the alias resolution register which was set aside for this - * purpose. If it's value is the same on both chips, they might - * be the same chip, and we write to one and check for a change in - * the other. It's unclear if this register is usuable in the - * DoC 2000 (it's in the Millenium docs), but it seems to work. */ - tmp1 = ReadDOC(doc1->virtadr, AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, AliasResolution); - if (tmp1 != tmp2) - return 0; - - WriteDOC((tmp1+1) % 0xff, doc1->virtadr, AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, AliasResolution); - if (tmp2 == (tmp1+1) % 0xff) - retval = 1; - else - retval = 0; - - /* Restore register contents. May not be necessary, but do it just to - * be safe. */ - WriteDOC(tmp1, doc1->virtadr, AliasResolution); - - return retval; -} - -/* This routine is found from the docprobe code by symbol_get(), - * which will bump the use count of this module. */ -void DoCMil_init(struct mtd_info *mtd) -{ - struct DiskOnChip *this = mtd->priv; - struct DiskOnChip *old = NULL; - - /* We must avoid being called twice for the same device. */ - if (docmillist) - old = docmillist->priv; - - while (old) { - if (DoCMil_is_alias(this, old)) { - printk(KERN_NOTICE "Ignoring DiskOnChip Millennium at " - "0x%lX - already configured\n", this->physadr); - iounmap(this->virtadr); - kfree(mtd); - return; - } - if (old->nextdoc) - old = old->nextdoc->priv; - else - old = NULL; - } - - mtd->name = "DiskOnChip Millennium"; - printk(KERN_NOTICE "DiskOnChip Millennium found at address 0x%lX\n", - this->physadr); - - mtd->type = MTD_NANDFLASH; - mtd->flags = MTD_CAP_NANDFLASH; - - /* FIXME: erase size is not always 8KiB */ - mtd->erasesize = 0x2000; - mtd->writebufsize = mtd->writesize = 512; - mtd->oobsize = 16; - mtd->ecc_strength = 2; - mtd->owner = THIS_MODULE; - mtd->_erase = doc_erase; - mtd->_read = doc_read; - mtd->_write = doc_write; - mtd->_read_oob = doc_read_oob; - mtd->_write_oob = doc_write_oob; - this->curfloor = -1; - this->curchip = -1; - - /* Ident all the chips present. */ - DoC_ScanChips(this); - - if (!this->totlen) { - kfree(mtd); - iounmap(this->virtadr); - } else { - this->nextdoc = docmillist; - docmillist = mtd; - mtd->size = this->totlen; - mtd_device_register(mtd, NULL, 0); - return; - } -} -EXPORT_SYMBOL_GPL(DoCMil_init); - -static int doc_read (struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - int i, ret; - volatile char dummy; - unsigned char syndrome[6], eccbuf[6]; - struct DiskOnChip *this = mtd->priv; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[from >> (this->chipshift)]; - - /* Don't allow a single read to cross a 512-byte block boundary */ - if (from + len > ((from | 0x1ff) + 1)) - len = ((from | 0x1ff) + 1) - from; - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* issue the Read0 or Read1 command depend on which half of the page - we are accessing. Polling the Flash Ready bit after issue 3 bytes - address in Sequence Read Mode, see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, (from >> 8) & 1, CDSN_CTRL_WP); - DoC_Address(docptr, 3, from, CDSN_CTRL_WP, 0x00); - DoC_WaitReady(docptr); - - /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ - WriteDOC (DOC_ECC_RESET, docptr, ECCConf); - WriteDOC (DOC_ECC_EN, docptr, ECCConf); - - /* Read the data via the internal pipeline through CDSN IO register, - see Pipelined Read Operations 11.3 */ - dummy = ReadDOC(docptr, ReadPipeInit); -#ifndef USE_MEMCPY - for (i = 0; i < len-1; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff)); - } -#else - memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1); -#endif - buf[len - 1] = ReadDOC(docptr, LastDataRead); - - /* Let the caller know we completed it */ - *retlen = len; - ret = 0; - - /* Read the ECC data from Spare Data Area, - see Reed-Solomon EDC/ECC 11.1 */ - dummy = ReadDOC(docptr, ReadPipeInit); -#ifndef USE_MEMCPY - for (i = 0; i < 5; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - eccbuf[i] = ReadDOC(docptr, Mil_CDSN_IO + i); - } -#else - memcpy_fromio(eccbuf, docptr + DoC_Mil_CDSN_IO, 5); -#endif - eccbuf[5] = ReadDOC(docptr, LastDataRead); - - /* Flush the pipeline */ - dummy = ReadDOC(docptr, ECCConf); - dummy = ReadDOC(docptr, ECCConf); - - /* Check the ECC Status */ - if (ReadDOC(docptr, ECCConf) & 0x80) { - int nb_errors; - /* There was an ECC error */ -#ifdef ECC_DEBUG - printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); -#endif - /* Read the ECC syndrome through the DiskOnChip ECC logic. - These syndrome will be all ZERO when there is no error */ - for (i = 0; i < 6; i++) { - syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i); - } - nb_errors = doc_decode_ecc(buf, syndrome); -#ifdef ECC_DEBUG - printk("ECC Errors corrected: %x\n", nb_errors); -#endif - if (nb_errors < 0) { - /* We return error, but have actually done the read. Not that - this can be told to user-space, via sys_read(), but at least - MTD-aware stuff can know about it by checking *retlen */ - ret = -EIO; - } - } - -#ifdef PSYCHO_DEBUG - printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], - eccbuf[4], eccbuf[5]); -#endif - - /* disable the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr , ECCConf); - - return ret; -} - -static int doc_write (struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) -{ - int i,ret = 0; - char eccbuf[6]; - volatile char dummy; - struct DiskOnChip *this = mtd->priv; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[to >> (this->chipshift)]; - -#if 0 - /* Don't allow a single write to cross a 512-byte block boundary */ - if (to + len > ( (to | 0x1ff) + 1)) - len = ((to | 0x1ff) + 1) - to; -#else - /* Don't allow writes which aren't exactly one block */ - if (to & 0x1ff || len != 0x200) - return -EINVAL; -#endif - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0x00); - DoC_WaitReady(docptr); - /* Set device to main plane of flash */ - DoC_Command(docptr, NAND_CMD_READ0, 0x00); - - /* issue the Serial Data In command to initial the Page Program process */ - DoC_Command(docptr, NAND_CMD_SEQIN, 0x00); - DoC_Address(docptr, 3, to, 0x00, 0x00); - DoC_WaitReady(docptr); - - /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ - WriteDOC (DOC_ECC_RESET, docptr, ECCConf); - WriteDOC (DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf); - - /* Write the data via the internal pipeline through CDSN IO register, - see Pipelined Write Operations 11.2 */ -#ifndef USE_MEMCPY - for (i = 0; i < len; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - WriteDOC(buf[i], docptr, Mil_CDSN_IO + i); - } -#else - memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len); -#endif - WriteDOC(0x00, docptr, WritePipeTerm); - - /* Write ECC data to flash, the ECC info is generated by the DiskOnChip ECC logic - see Reed-Solomon EDC/ECC 11.1 */ - WriteDOC(0, docptr, NOP); - WriteDOC(0, docptr, NOP); - WriteDOC(0, docptr, NOP); - - /* Read the ECC data through the DiskOnChip ECC logic */ - for (i = 0; i < 6; i++) { - eccbuf[i] = ReadDOC(docptr, ECCSyndrome0 + i); - } - - /* ignore the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr , ECCConf); - -#ifndef USE_MEMCPY - /* Write the ECC data to flash */ - for (i = 0; i < 6; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - WriteDOC(eccbuf[i], docptr, Mil_CDSN_IO + i); - } -#else - memcpy_toio(docptr + DoC_Mil_CDSN_IO, eccbuf, 6); -#endif - - /* write the block status BLOCK_USED (0x5555) at the end of ECC data - FIXME: this is only a hack for programming the IPL area for LinuxBIOS - and should be replace with proper codes in user space utilities */ - WriteDOC(0x55, docptr, Mil_CDSN_IO); - WriteDOC(0x55, docptr, Mil_CDSN_IO + 1); - - WriteDOC(0x00, docptr, WritePipeTerm); - -#ifdef PSYCHO_DEBUG - printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], - eccbuf[4], eccbuf[5]); -#endif - - /* Commit the Page Program command and wait for ready - see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00); - DoC_WaitReady(docptr); - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5.*/ - DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP); - dummy = ReadDOC(docptr, ReadPipeInit); - DoC_Delay(docptr, 2); - if (ReadDOC(docptr, Mil_CDSN_IO) & 1) { - printk("Error programming flash\n"); - /* Error in programming - FIXME: implement Bad Block Replacement (in nftl.c ??) */ - ret = -EIO; - } - dummy = ReadDOC(docptr, LastDataRead); - - /* Let the caller know we completed it */ - *retlen = len; - - return ret; -} - -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ -#ifndef USE_MEMCPY - int i; -#endif - volatile char dummy; - struct DiskOnChip *this = mtd->priv; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - uint8_t *buf = ops->oobbuf; - size_t len = ops->len; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - ofs += ops->ooboffs; - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* disable the ECC engine */ - WriteDOC (DOC_ECC_RESET, docptr, ECCConf); - WriteDOC (DOC_ECC_DIS, docptr, ECCConf); - - /* issue the Read2 command to set the pointer to the Spare Data Area. - Polling the Flash Ready bit after issue 3 bytes address in - Sequence Read Mode, see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP); - DoC_Address(docptr, 3, ofs, CDSN_CTRL_WP, 0x00); - DoC_WaitReady(docptr); - - /* Read the data out via the internal pipeline through CDSN IO register, - see Pipelined Read Operations 11.3 */ - dummy = ReadDOC(docptr, ReadPipeInit); -#ifndef USE_MEMCPY - for (i = 0; i < len-1; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i); - } -#else - memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1); -#endif - buf[len - 1] = ReadDOC(docptr, LastDataRead); - - ops->retlen = len; - - return 0; -} - -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ -#ifndef USE_MEMCPY - int i; -#endif - volatile char dummy; - int ret = 0; - struct DiskOnChip *this = mtd->priv; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - uint8_t *buf = ops->oobbuf; - size_t len = ops->len; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - ofs += ops->ooboffs; - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* disable the ECC engine */ - WriteDOC (DOC_ECC_RESET, docptr, ECCConf); - WriteDOC (DOC_ECC_DIS, docptr, ECCConf); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, CDSN_CTRL_WP); - DoC_WaitReady(docptr); - /* issue the Read2 command to set the pointer to the Spare Data Area. */ - DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP); - - /* issue the Serial Data In command to initial the Page Program process */ - DoC_Command(docptr, NAND_CMD_SEQIN, 0x00); - DoC_Address(docptr, 3, ofs, 0x00, 0x00); - - /* Write the data via the internal pipeline through CDSN IO register, - see Pipelined Write Operations 11.2 */ -#ifndef USE_MEMCPY - for (i = 0; i < len; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - WriteDOC(buf[i], docptr, Mil_CDSN_IO + i); - } -#else - memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len); -#endif - WriteDOC(0x00, docptr, WritePipeTerm); - - /* Commit the Page Program command and wait for ready - see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00); - DoC_WaitReady(docptr); - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5.*/ - DoC_Command(docptr, NAND_CMD_STATUS, 0x00); - dummy = ReadDOC(docptr, ReadPipeInit); - DoC_Delay(docptr, 2); - if (ReadDOC(docptr, Mil_CDSN_IO) & 1) { - printk("Error programming oob data\n"); - /* FIXME: implement Bad Block Replacement (in nftl.c ??) */ - ops->retlen = 0; - ret = -EIO; - } - dummy = ReadDOC(docptr, LastDataRead); - - ops->retlen = len; - - return ret; -} - -int doc_erase (struct mtd_info *mtd, struct erase_info *instr) -{ - volatile char dummy; - struct DiskOnChip *this = mtd->priv; - __u32 ofs = instr->addr; - __u32 len = instr->len; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - - if (len != mtd->erasesize) - printk(KERN_WARNING "Erase not right size (%x != %x)n", - len, mtd->erasesize); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - instr->state = MTD_ERASE_PENDING; - - /* issue the Erase Setup command */ - DoC_Command(docptr, NAND_CMD_ERASE1, 0x00); - DoC_Address(docptr, 2, ofs, 0x00, 0x00); - - /* Commit the Erase Start command and wait for ready - see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_ERASE2, 0x00); - DoC_WaitReady(docptr); - - instr->state = MTD_ERASING; - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5. - FIXME: it seems that we are not wait long enough, some blocks are not - erased fully */ - DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP); - dummy = ReadDOC(docptr, ReadPipeInit); - DoC_Delay(docptr, 2); - if (ReadDOC(docptr, Mil_CDSN_IO) & 1) { - printk("Error Erasing at 0x%x\n", ofs); - /* There was an error - FIXME: implement Bad Block Replacement (in nftl.c ??) */ - instr->state = MTD_ERASE_FAILED; - } else - instr->state = MTD_ERASE_DONE; - dummy = ReadDOC(docptr, LastDataRead); - - mtd_erase_callback(instr); - - return 0; -} - -/**************************************************************************** - * - * Module stuff - * - ****************************************************************************/ - -static void __exit cleanup_doc2001(void) -{ - struct mtd_info *mtd; - struct DiskOnChip *this; - - while ((mtd=docmillist)) { - this = mtd->priv; - docmillist = this->nextdoc; - - mtd_device_unregister(mtd); - - iounmap(this->virtadr); - kfree(this->chips); - kfree(mtd); - } -} - -module_exit(cleanup_doc2001); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Woodhouse et al."); -MODULE_DESCRIPTION("Alternative driver for DiskOnChip Millennium"); diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c deleted file mode 100644 index 4f2220a..0000000 --- a/drivers/mtd/devices/doc2001plus.c +++ /dev/null @@ -1,1080 +0,0 @@ -/* - * Linux driver for Disk-On-Chip Millennium Plus - * - * (c) 2002-2003 Greg Ungerer - * (c) 2002-2003 SnapGear Inc - * (c) 1999 Machine Vision Holdings, Inc. - * (c) 1999, 2000 David Woodhouse - * - * Released under GPL - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -/* #define ECC_DEBUG */ - -/* I have no idea why some DoC chips can not use memcop_form|to_io(). - * This may be due to the different revisions of the ASIC controller built-in or - * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment - * this:*/ -#undef USE_MEMCPY - -static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf); -static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf); -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_erase (struct mtd_info *mtd, struct erase_info *instr); - -static struct mtd_info *docmilpluslist = NULL; - - -/* Perform the required delay cycles by writing to the NOP register */ -static void DoC_Delay(void __iomem * docptr, int cycles) -{ - int i; - - for (i = 0; (i < cycles); i++) - WriteDOC(0, docptr, Mplus_NOP); -} - -#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1) - -/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ -static int _DoC_WaitReady(void __iomem * docptr) -{ - unsigned int c = 0xffff; - - pr_debug("_DoC_WaitReady called for out-of-line wait\n"); - - /* Out-of-line routine to wait for chip response */ - while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c) - ; - - if (c == 0) - pr_debug("_DoC_WaitReady timed out.\n"); - - return (c == 0); -} - -static inline int DoC_WaitReady(void __iomem * docptr) -{ - /* This is inline, to optimise the common case, where it's ready instantly */ - int ret = 0; - - /* read form NOP register should be issued prior to the read from CDSNControl - see Software Requirement 11.4 item 2. */ - DoC_Delay(docptr, 4); - - if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) - /* Call the out-of-line routine to wait */ - ret = _DoC_WaitReady(docptr); - - return ret; -} - -/* For some reason the Millennium Plus seems to occasionally put itself - * into reset mode. For me this happens randomly, with no pattern that I - * can detect. M-systems suggest always check this on any block level - * operation and setting to normal mode if in reset mode. - */ -static inline void DoC_CheckASIC(void __iomem * docptr) -{ - /* Make sure the DoC is in normal mode */ - if ((ReadDOC(docptr, Mplus_DOCControl) & DOC_MODE_NORMAL) == 0) { - WriteDOC((DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_DOCControl); - WriteDOC(~(DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_CtrlConfirm); - } -} - -/* DoC_Command: Send a flash command to the flash chip through the Flash - * command register. Need 2 Write Pipeline Terminates to complete send. - */ -static void DoC_Command(void __iomem * docptr, unsigned char command, - unsigned char xtraflags) -{ - WriteDOC(command, docptr, Mplus_FlashCmd); - WriteDOC(command, docptr, Mplus_WritePipeTerm); - WriteDOC(command, docptr, Mplus_WritePipeTerm); -} - -/* DoC_Address: Set the current address for the flash chip through the Flash - * Address register. Need 2 Write Pipeline Terminates to complete send. - */ -static inline void DoC_Address(struct DiskOnChip *doc, int numbytes, - unsigned long ofs, unsigned char xtraflags1, - unsigned char xtraflags2) -{ - void __iomem * docptr = doc->virtadr; - - /* Allow for possible Mill Plus internal flash interleaving */ - ofs >>= doc->interleave; - - switch (numbytes) { - case 1: - /* Send single byte, bits 0-7. */ - WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress); - break; - case 2: - /* Send bits 9-16 followed by 17-23 */ - WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress); - WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress); - break; - case 3: - /* Send 0-7, 9-16, then 17-23 */ - WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress); - WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress); - WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress); - break; - default: - return; - } - - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); -} - -/* DoC_SelectChip: Select a given flash chip within the current floor */ -static int DoC_SelectChip(void __iomem * docptr, int chip) -{ - /* No choice for flash chip on Millennium Plus */ - return 0; -} - -/* DoC_SelectFloor: Select a given floor (bank of flash chips) */ -static int DoC_SelectFloor(void __iomem * docptr, int floor) -{ - WriteDOC((floor & 0x3), docptr, Mplus_DeviceSelect); - return 0; -} - -/* - * Translate the given offset into the appropriate command and offset. - * This does the mapping using the 16bit interleave layout defined by - * M-Systems, and looks like this for a sector pair: - * +-----------+-------+-------+-------+--------------+---------+-----------+ - * | 0 --- 511 |512-517|518-519|520-521| 522 --- 1033 |1034-1039|1040 - 1055| - * +-----------+-------+-------+-------+--------------+---------+-----------+ - * | Data 0 | ECC 0 |Flags0 |Flags1 | Data 1 |ECC 1 | OOB 1 + 2 | - * +-----------+-------+-------+-------+--------------+---------+-----------+ - */ -/* FIXME: This lives in INFTL not here. Other users of flash devices - may not want it */ -static unsigned int DoC_GetDataOffset(struct mtd_info *mtd, loff_t *from) -{ - struct DiskOnChip *this = mtd->priv; - - if (this->interleave) { - unsigned int ofs = *from & 0x3ff; - unsigned int cmd; - - if (ofs < 512) { - cmd = NAND_CMD_READ0; - ofs &= 0x1ff; - } else if (ofs < 1014) { - cmd = NAND_CMD_READ1; - ofs = (ofs & 0x1ff) + 10; - } else { - cmd = NAND_CMD_READOOB; - ofs = ofs - 1014; - } - - *from = (*from & ~0x3ff) | ofs; - return cmd; - } else { - /* No interleave */ - if ((*from) & 0x100) - return NAND_CMD_READ1; - return NAND_CMD_READ0; - } -} - -static unsigned int DoC_GetECCOffset(struct mtd_info *mtd, loff_t *from) -{ - unsigned int ofs, cmd; - - if (*from & 0x200) { - cmd = NAND_CMD_READOOB; - ofs = 10 + (*from & 0xf); - } else { - cmd = NAND_CMD_READ1; - ofs = (*from & 0xf); - } - - *from = (*from & ~0x3ff) | ofs; - return cmd; -} - -static unsigned int DoC_GetFlagsOffset(struct mtd_info *mtd, loff_t *from) -{ - unsigned int ofs, cmd; - - cmd = NAND_CMD_READ1; - ofs = (*from & 0x200) ? 8 : 6; - *from = (*from & ~0x3ff) | ofs; - return cmd; -} - -static unsigned int DoC_GetHdrOffset(struct mtd_info *mtd, loff_t *from) -{ - unsigned int ofs, cmd; - - cmd = NAND_CMD_READOOB; - ofs = (*from & 0x200) ? 24 : 16; - *from = (*from & ~0x3ff) | ofs; - return cmd; -} - -static inline void MemReadDOC(void __iomem * docptr, unsigned char *buf, int len) -{ -#ifndef USE_MEMCPY - int i; - for (i = 0; i < len; i++) - buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i); -#else - memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len); -#endif -} - -static inline void MemWriteDOC(void __iomem * docptr, unsigned char *buf, int len) -{ -#ifndef USE_MEMCPY - int i; - for (i = 0; i < len; i++) - WriteDOC(buf[i], docptr, Mil_CDSN_IO + i); -#else - memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len); -#endif -} - -/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */ -static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip) -{ - int mfr, id, i, j; - volatile char dummy; - void __iomem * docptr = doc->virtadr; - - /* Page in the required floor/chip */ - DoC_SelectFloor(docptr, floor); - DoC_SelectChip(docptr, chip); - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0); - DoC_WaitReady(docptr); - - /* Read the NAND chip ID: 1. Send ReadID command */ - DoC_Command(docptr, NAND_CMD_READID, 0); - - /* Read the NAND chip ID: 2. Send address byte zero */ - DoC_Address(doc, 1, 0x00, 0, 0x00); - - WriteDOC(0, docptr, Mplus_FlashControl); - DoC_WaitReady(docptr); - - /* Read the manufacturer and device id codes of the flash device through - CDSN IO register see Software Requirement 11.4 item 5.*/ - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - - mfr = ReadDOC(docptr, Mil_CDSN_IO); - if (doc->interleave) - dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */ - - id = ReadDOC(docptr, Mil_CDSN_IO); - if (doc->interleave) - dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */ - - dummy = ReadDOC(docptr, Mplus_LastDataRead); - dummy = ReadDOC(docptr, Mplus_LastDataRead); - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - /* No response - return failure */ - if (mfr == 0xff || mfr == 0) - return 0; - - for (i = 0; nand_flash_ids[i].name != NULL; i++) { - if (id == nand_flash_ids[i].id) { - /* Try to identify manufacturer */ - for (j = 0; nand_manuf_ids[j].id != 0x0; j++) { - if (nand_manuf_ids[j].id == mfr) - break; - } - printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, " - "Chip ID: %2.2X (%s:%s)\n", mfr, id, - nand_manuf_ids[j].name, nand_flash_ids[i].name); - doc->mfr = mfr; - doc->id = id; - doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1; - doc->erasesize = nand_flash_ids[i].erasesize << doc->interleave; - break; - } - } - - if (nand_flash_ids[i].name == NULL) - return 0; - return 1; -} - -/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */ -static void DoC_ScanChips(struct DiskOnChip *this) -{ - int floor, chip; - int numchips[MAX_FLOORS_MPLUS]; - int ret; - - this->numchips = 0; - this->mfr = 0; - this->id = 0; - - /* Work out the intended interleave setting */ - this->interleave = 0; - if (this->ChipID == DOC_ChipID_DocMilPlus32) - this->interleave = 1; - - /* Check the ASIC agrees */ - if ( (this->interleave << 2) != - (ReadDOC(this->virtadr, Mplus_Configuration) & 4)) { - u_char conf = ReadDOC(this->virtadr, Mplus_Configuration); - printk(KERN_NOTICE "Setting DiskOnChip Millennium Plus interleave to %s\n", - this->interleave?"on (16-bit)":"off (8-bit)"); - conf ^= 4; - WriteDOC(conf, this->virtadr, Mplus_Configuration); - } - - /* For each floor, find the number of valid chips it contains */ - for (floor = 0,ret = 1; floor < MAX_FLOORS_MPLUS; floor++) { - numchips[floor] = 0; - for (chip = 0; chip < MAX_CHIPS_MPLUS && ret != 0; chip++) { - ret = DoC_IdentChip(this, floor, chip); - if (ret) { - numchips[floor]++; - this->numchips++; - } - } - } - /* If there are none at all that we recognise, bail */ - if (!this->numchips) { - printk("No flash chips recognised.\n"); - return; - } - - /* Allocate an array to hold the information for each chip */ - this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL); - if (!this->chips){ - printk("MTD: No memory for allocating chip info structures\n"); - return; - } - - /* Fill out the chip array with {floor, chipno} for each - * detected chip in the device. */ - for (floor = 0, ret = 0; floor < MAX_FLOORS_MPLUS; floor++) { - for (chip = 0 ; chip < numchips[floor] ; chip++) { - this->chips[ret].floor = floor; - this->chips[ret].chip = chip; - this->chips[ret].curadr = 0; - this->chips[ret].curmode = 0x50; - ret++; - } - } - - /* Calculate and print the total size of the device */ - this->totlen = this->numchips * (1 << this->chipshift); - printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n", - this->numchips ,this->totlen >> 20); -} - -static int DoCMilPlus_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2) -{ - int tmp1, tmp2, retval; - - if (doc1->physadr == doc2->physadr) - return 1; - - /* Use the alias resolution register which was set aside for this - * purpose. If it's value is the same on both chips, they might - * be the same chip, and we write to one and check for a change in - * the other. It's unclear if this register is usuable in the - * DoC 2000 (it's in the Millennium docs), but it seems to work. */ - tmp1 = ReadDOC(doc1->virtadr, Mplus_AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution); - if (tmp1 != tmp2) - return 0; - - WriteDOC((tmp1+1) % 0xff, doc1->virtadr, Mplus_AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution); - if (tmp2 == (tmp1+1) % 0xff) - retval = 1; - else - retval = 0; - - /* Restore register contents. May not be necessary, but do it just to - * be safe. */ - WriteDOC(tmp1, doc1->virtadr, Mplus_AliasResolution); - - return retval; -} - -/* This routine is found from the docprobe code by symbol_get(), - * which will bump the use count of this module. */ -void DoCMilPlus_init(struct mtd_info *mtd) -{ - struct DiskOnChip *this = mtd->priv; - struct DiskOnChip *old = NULL; - - /* We must avoid being called twice for the same device. */ - if (docmilpluslist) - old = docmilpluslist->priv; - - while (old) { - if (DoCMilPlus_is_alias(this, old)) { - printk(KERN_NOTICE "Ignoring DiskOnChip Millennium " - "Plus at 0x%lX - already configured\n", - this->physadr); - iounmap(this->virtadr); - kfree(mtd); - return; - } - if (old->nextdoc) - old = old->nextdoc->priv; - else - old = NULL; - } - - mtd->name = "DiskOnChip Millennium Plus"; - printk(KERN_NOTICE "DiskOnChip Millennium Plus found at " - "address 0x%lX\n", this->physadr); - - mtd->type = MTD_NANDFLASH; - mtd->flags = MTD_CAP_NANDFLASH; - mtd->writebufsize = mtd->writesize = 512; - mtd->oobsize = 16; - mtd->ecc_strength = 2; - mtd->owner = THIS_MODULE; - mtd->_erase = doc_erase; - mtd->_read = doc_read; - mtd->_write = doc_write; - mtd->_read_oob = doc_read_oob; - mtd->_write_oob = doc_write_oob; - this->curfloor = -1; - this->curchip = -1; - - /* Ident all the chips present. */ - DoC_ScanChips(this); - - if (!this->totlen) { - kfree(mtd); - iounmap(this->virtadr); - } else { - this->nextdoc = docmilpluslist; - docmilpluslist = mtd; - mtd->size = this->totlen; - mtd->erasesize = this->erasesize; - mtd_device_register(mtd, NULL, 0); - return; - } -} -EXPORT_SYMBOL_GPL(DoCMilPlus_init); - -#if 0 -static int doc_dumpblk(struct mtd_info *mtd, loff_t from) -{ - int i; - loff_t fofs; - struct DiskOnChip *this = mtd->priv; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[from >> (this->chipshift)]; - unsigned char *bp, buf[1056]; - char c[32]; - - from &= ~0x3ff; - - /* Don't allow read past end of device */ - if (from >= this->totlen) - return -EINVAL; - - DoC_CheckASIC(docptr); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0); - DoC_WaitReady(docptr); - - fofs = from; - DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0); - DoC_Address(this, 3, fofs, 0, 0x00); - WriteDOC(0, docptr, Mplus_FlashControl); - DoC_WaitReady(docptr); - - /* disable the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); - - ReadDOC(docptr, Mplus_ReadPipeInit); - ReadDOC(docptr, Mplus_ReadPipeInit); - - /* Read the data via the internal pipeline through CDSN IO - register, see Pipelined Read Operations 11.3 */ - MemReadDOC(docptr, buf, 1054); - buf[1054] = ReadDOC(docptr, Mplus_LastDataRead); - buf[1055] = ReadDOC(docptr, Mplus_LastDataRead); - - memset(&c[0], 0, sizeof(c)); - printk("DUMP OFFSET=%x:\n", (int)from); - - for (i = 0, bp = &buf[0]; (i < 1056); i++) { - if ((i % 16) == 0) - printk("%08x: ", i); - printk(" %02x", *bp); - c[(i & 0xf)] = ((*bp >= 0x20) && (*bp <= 0x7f)) ? *bp : '.'; - bp++; - if (((i + 1) % 16) == 0) - printk(" %s\n", c); - } - printk("\n"); - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - return 0; -} -#endif - -static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - int ret, i; - volatile char dummy; - loff_t fofs; - unsigned char syndrome[6], eccbuf[6]; - struct DiskOnChip *this = mtd->priv; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[from >> (this->chipshift)]; - - /* Don't allow a single read to cross a 512-byte block boundary */ - if (from + len > ((from | 0x1ff) + 1)) - len = ((from | 0x1ff) + 1) - from; - - DoC_CheckASIC(docptr); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0); - DoC_WaitReady(docptr); - - fofs = from; - DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0); - DoC_Address(this, 3, fofs, 0, 0x00); - WriteDOC(0, docptr, Mplus_FlashControl); - DoC_WaitReady(docptr); - - /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ - WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); - WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf); - - /* Let the caller know we completed it */ - *retlen = len; - ret = 0; - - ReadDOC(docptr, Mplus_ReadPipeInit); - ReadDOC(docptr, Mplus_ReadPipeInit); - - /* Read the data via the internal pipeline through CDSN IO - register, see Pipelined Read Operations 11.3 */ - MemReadDOC(docptr, buf, len); - - /* Read the ECC data following raw data */ - MemReadDOC(docptr, eccbuf, 4); - eccbuf[4] = ReadDOC(docptr, Mplus_LastDataRead); - eccbuf[5] = ReadDOC(docptr, Mplus_LastDataRead); - - /* Flush the pipeline */ - dummy = ReadDOC(docptr, Mplus_ECCConf); - dummy = ReadDOC(docptr, Mplus_ECCConf); - - /* Check the ECC Status */ - if (ReadDOC(docptr, Mplus_ECCConf) & 0x80) { - int nb_errors; - /* There was an ECC error */ -#ifdef ECC_DEBUG - printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); -#endif - /* Read the ECC syndrome through the DiskOnChip ECC logic. - These syndrome will be all ZERO when there is no error */ - for (i = 0; i < 6; i++) - syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); - - nb_errors = doc_decode_ecc(buf, syndrome); -#ifdef ECC_DEBUG - printk("ECC Errors corrected: %x\n", nb_errors); -#endif - if (nb_errors < 0) { - /* We return error, but have actually done the - read. Not that this can be told to user-space, via - sys_read(), but at least MTD-aware stuff can know - about it by checking *retlen */ -#ifdef ECC_DEBUG - printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n", - __FILE__, __LINE__, (int)from); - printk(" syndrome= %*phC\n", 6, syndrome); - printk(" eccbuf= %*phC\n", 6, eccbuf); -#endif - ret = -EIO; - } - } - -#ifdef PSYCHO_DEBUG - printk("ECC DATA at %lx: %*ph\n", (long)from, 6, eccbuf); -#endif - /* disable the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf); - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - return ret; -} - -static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) -{ - int i, before, ret = 0; - loff_t fto; - volatile char dummy; - char eccbuf[6]; - struct DiskOnChip *this = mtd->priv; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[to >> (this->chipshift)]; - - /* Don't allow writes which aren't exactly one block (512 bytes) */ - if ((to & 0x1ff) || (len != 0x200)) - return -EINVAL; - - /* Determine position of OOB flags, before or after data */ - before = (this->interleave && (to & 0x200)); - - DoC_CheckASIC(docptr); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0); - DoC_WaitReady(docptr); - - /* Set device to appropriate plane of flash */ - fto = to; - WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd); - - /* On interleaved devices the flags for 2nd half 512 are before data */ - if (before) - fto -= 2; - - /* issue the Serial Data In command to initial the Page Program process */ - DoC_Command(docptr, NAND_CMD_SEQIN, 0x00); - DoC_Address(this, 3, fto, 0x00, 0x00); - - /* Disable the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); - - if (before) { - /* Write the block status BLOCK_USED (0x5555) */ - WriteDOC(0x55, docptr, Mil_CDSN_IO); - WriteDOC(0x55, docptr, Mil_CDSN_IO); - } - - /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ - WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf); - - MemWriteDOC(docptr, (unsigned char *) buf, len); - - /* Write ECC data to flash, the ECC info is generated by - the DiskOnChip ECC logic see Reed-Solomon EDC/ECC 11.1 */ - DoC_Delay(docptr, 3); - - /* Read the ECC data through the DiskOnChip ECC logic */ - for (i = 0; i < 6; i++) - eccbuf[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); - - /* disable the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); - - /* Write the ECC data to flash */ - MemWriteDOC(docptr, eccbuf, 6); - - if (!before) { - /* Write the block status BLOCK_USED (0x5555) */ - WriteDOC(0x55, docptr, Mil_CDSN_IO+6); - WriteDOC(0x55, docptr, Mil_CDSN_IO+7); - } - -#ifdef PSYCHO_DEBUG - printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], - eccbuf[4], eccbuf[5]); -#endif - - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - - /* Commit the Page Program command and wait for ready - see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00); - DoC_WaitReady(docptr); - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5.*/ - DoC_Command(docptr, NAND_CMD_STATUS, 0); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - DoC_Delay(docptr, 2); - if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) { - printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to); - /* Error in programming - FIXME: implement Bad Block Replacement (in nftl.c ??) */ - ret = -EIO; - } - dummy = ReadDOC(docptr, Mplus_LastDataRead); - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - /* Let the caller know we completed it */ - *retlen = len; - - return ret; -} - -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - loff_t fofs, base; - struct DiskOnChip *this = mtd->priv; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - size_t i, size, got, want; - uint8_t *buf = ops->oobbuf; - size_t len = ops->len; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - ofs += ops->ooboffs; - - DoC_CheckASIC(docptr); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect); - - /* disable the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); - DoC_WaitReady(docptr); - - /* Maximum of 16 bytes in the OOB region, so limit read to that */ - if (len > 16) - len = 16; - got = 0; - want = len; - - for (i = 0; ((i < 3) && (want > 0)); i++) { - /* Figure out which region we are accessing... */ - fofs = ofs; - base = ofs & 0xf; - if (!this->interleave) { - DoC_Command(docptr, NAND_CMD_READOOB, 0); - size = 16 - base; - } else if (base < 6) { - DoC_Command(docptr, DoC_GetECCOffset(mtd, &fofs), 0); - size = 6 - base; - } else if (base < 8) { - DoC_Command(docptr, DoC_GetFlagsOffset(mtd, &fofs), 0); - size = 8 - base; - } else { - DoC_Command(docptr, DoC_GetHdrOffset(mtd, &fofs), 0); - size = 16 - base; - } - if (size > want) - size = want; - - /* Issue read command */ - DoC_Address(this, 3, fofs, 0, 0x00); - WriteDOC(0, docptr, Mplus_FlashControl); - DoC_WaitReady(docptr); - - ReadDOC(docptr, Mplus_ReadPipeInit); - ReadDOC(docptr, Mplus_ReadPipeInit); - MemReadDOC(docptr, &buf[got], size - 2); - buf[got + size - 2] = ReadDOC(docptr, Mplus_LastDataRead); - buf[got + size - 1] = ReadDOC(docptr, Mplus_LastDataRead); - - ofs += size; - got += size; - want -= size; - } - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - ops->retlen = len; - return 0; -} - -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - volatile char dummy; - loff_t fofs, base; - struct DiskOnChip *this = mtd->priv; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - size_t i, size, got, want; - int ret = 0; - uint8_t *buf = ops->oobbuf; - size_t len = ops->len; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - ofs += ops->ooboffs; - - DoC_CheckASIC(docptr); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect); - - - /* Maximum of 16 bytes in the OOB region, so limit write to that */ - if (len > 16) - len = 16; - got = 0; - want = len; - - for (i = 0; ((i < 3) && (want > 0)); i++) { - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0); - DoC_WaitReady(docptr); - - /* Figure out which region we are accessing... */ - fofs = ofs; - base = ofs & 0x0f; - if (!this->interleave) { - WriteDOC(NAND_CMD_READOOB, docptr, Mplus_FlashCmd); - size = 16 - base; - } else if (base < 6) { - WriteDOC(DoC_GetECCOffset(mtd, &fofs), docptr, Mplus_FlashCmd); - size = 6 - base; - } else if (base < 8) { - WriteDOC(DoC_GetFlagsOffset(mtd, &fofs), docptr, Mplus_FlashCmd); - size = 8 - base; - } else { - WriteDOC(DoC_GetHdrOffset(mtd, &fofs), docptr, Mplus_FlashCmd); - size = 16 - base; - } - if (size > want) - size = want; - - /* Issue the Serial Data In command to initial the Page Program process */ - DoC_Command(docptr, NAND_CMD_SEQIN, 0x00); - DoC_Address(this, 3, fofs, 0, 0x00); - - /* Disable the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); - - /* Write the data via the internal pipeline through CDSN IO - register, see Pipelined Write Operations 11.2 */ - MemWriteDOC(docptr, (unsigned char *) &buf[got], size); - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - - /* Commit the Page Program command and wait for ready - see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00); - DoC_WaitReady(docptr); - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5.*/ - DoC_Command(docptr, NAND_CMD_STATUS, 0x00); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - DoC_Delay(docptr, 2); - if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) { - printk("MTD: Error 0x%x programming oob at 0x%x\n", - dummy, (int)ofs); - /* FIXME: implement Bad Block Replacement */ - ops->retlen = 0; - ret = -EIO; - } - dummy = ReadDOC(docptr, Mplus_LastDataRead); - - ofs += size; - got += size; - want -= size; - } - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - ops->retlen = len; - return ret; -} - -int doc_erase(struct mtd_info *mtd, struct erase_info *instr) -{ - volatile char dummy; - struct DiskOnChip *this = mtd->priv; - __u32 ofs = instr->addr; - __u32 len = instr->len; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - - DoC_CheckASIC(docptr); - - if (len != mtd->erasesize) - printk(KERN_WARNING "MTD: Erase not right size (%x != %x)n", - len, mtd->erasesize); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - instr->state = MTD_ERASE_PENDING; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect); - - DoC_Command(docptr, NAND_CMD_RESET, 0x00); - DoC_WaitReady(docptr); - - DoC_Command(docptr, NAND_CMD_ERASE1, 0); - DoC_Address(this, 2, ofs, 0, 0x00); - DoC_Command(docptr, NAND_CMD_ERASE2, 0); - DoC_WaitReady(docptr); - instr->state = MTD_ERASING; - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5. */ - DoC_Command(docptr, NAND_CMD_STATUS, 0); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) { - printk("MTD: Error 0x%x erasing at 0x%x\n", dummy, ofs); - /* FIXME: implement Bad Block Replacement (in nftl.c ??) */ - instr->state = MTD_ERASE_FAILED; - } else { - instr->state = MTD_ERASE_DONE; - } - dummy = ReadDOC(docptr, Mplus_LastDataRead); - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - mtd_erase_callback(instr); - - return 0; -} - -/**************************************************************************** - * - * Module stuff - * - ****************************************************************************/ - -static void __exit cleanup_doc2001plus(void) -{ - struct mtd_info *mtd; - struct DiskOnChip *this; - - while ((mtd=docmilpluslist)) { - this = mtd->priv; - docmilpluslist = this->nextdoc; - - mtd_device_unregister(mtd); - - iounmap(this->virtadr); - kfree(this->chips); - kfree(mtd); - } -} - -module_exit(cleanup_doc2001plus); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Greg Ungerer et al."); -MODULE_DESCRIPTION("Driver for DiskOnChip Millennium Plus"); diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c deleted file mode 100644 index 4a1c39b..0000000 --- a/drivers/mtd/devices/docecc.c +++ /dev/null @@ -1,521 +0,0 @@ -/* - * ECC algorithm for M-systems disk on chip. We use the excellent Reed - * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the - * GNU GPL License. The rest is simply to convert the disk on chip - * syndrome into a standard syndome. - * - * Author: Fabrice Bellard (fabrice.bellard@netgem.com) - * Copyright (C) 2000 Netgem S.A. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define DEBUG_ECC 0 -/* need to undef it (from asm/termbits.h) */ -#undef B0 - -#define MM 10 /* Symbol size in bits */ -#define KK (1023-4) /* Number of data symbols per block */ -#define B0 510 /* First root of generator polynomial, alpha form */ -#define PRIM 1 /* power of alpha used to generate roots of generator poly */ -#define NN ((1 << MM) - 1) - -typedef unsigned short dtype; - -/* 1+x^3+x^10 */ -static const int Pp[MM+1] = { 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1 }; - -/* This defines the type used to store an element of the Galois Field - * used by the code. Make sure this is something larger than a char if - * if anything larger than GF(256) is used. - * - * Note: unsigned char will work up to GF(256) but int seems to run - * faster on the Pentium. - */ -typedef int gf; - -/* No legal value in index form represents zero, so - * we need a special value for this purpose - */ -#define A0 (NN) - -/* Compute x % NN, where NN is 2**MM - 1, - * without a slow divide - */ -static inline gf -modnn(int x) -{ - while (x >= NN) { - x -= NN; - x = (x >> MM) + (x & NN); - } - return x; -} - -#define CLEAR(a,n) {\ -int ci;\ -for(ci=(n)-1;ci >=0;ci--)\ -(a)[ci] = 0;\ -} - -#define COPY(a,b,n) {\ -int ci;\ -for(ci=(n)-1;ci >=0;ci--)\ -(a)[ci] = (b)[ci];\ -} - -#define COPYDOWN(a,b,n) {\ -int ci;\ -for(ci=(n)-1;ci >=0;ci--)\ -(a)[ci] = (b)[ci];\ -} - -#define Ldec 1 - -/* generate GF(2**m) from the irreducible polynomial p(X) in Pp[0]..Pp[m] - lookup tables: index->polynomial form alpha_to[] contains j=alpha**i; - polynomial form -> index form index_of[j=alpha**i] = i - alpha=2 is the primitive element of GF(2**m) - HARI's COMMENT: (4/13/94) alpha_to[] can be used as follows: - Let @ represent the primitive element commonly called "alpha" that - is the root of the primitive polynomial p(x). Then in GF(2^m), for any - 0 <= i <= 2^m-2, - @^i = a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1) - where the binary vector (a(0),a(1),a(2),...,a(m-1)) is the representation - of the integer "alpha_to[i]" with a(0) being the LSB and a(m-1) the MSB. Thus for - example the polynomial representation of @^5 would be given by the binary - representation of the integer "alpha_to[5]". - Similarly, index_of[] can be used as follows: - As above, let @ represent the primitive element of GF(2^m) that is - the root of the primitive polynomial p(x). In order to find the power - of @ (alpha) that has the polynomial representation - a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1) - we consider the integer "i" whose binary representation with a(0) being LSB - and a(m-1) MSB is (a(0),a(1),...,a(m-1)) and locate the entry - "index_of[i]". Now, @^index_of[i] is that element whose polynomial - representation is (a(0),a(1),a(2),...,a(m-1)). - NOTE: - The element alpha_to[2^m-1] = 0 always signifying that the - representation of "@^infinity" = 0 is (0,0,0,...,0). - Similarly, the element index_of[0] = A0 always signifying - that the power of alpha which has the polynomial representation - (0,0,...,0) is "infinity". - -*/ - -static void -generate_gf(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1]) -{ - register int i, mask; - - mask = 1; - Alpha_to[MM] = 0; - for (i = 0; i < MM; i++) { - Alpha_to[i] = mask; - Index_of[Alpha_to[i]] = i; - /* If Pp[i] == 1 then, term @^i occurs in poly-repr of @^MM */ - if (Pp[i] != 0) - Alpha_to[MM] ^= mask; /* Bit-wise EXOR operation */ - mask <<= 1; /* single left-shift */ - } - Index_of[Alpha_to[MM]] = MM; - /* - * Have obtained poly-repr of @^MM. Poly-repr of @^(i+1) is given by - * poly-repr of @^i shifted left one-bit and accounting for any @^MM - * term that may occur when poly-repr of @^i is shifted. - */ - mask >>= 1; - for (i = MM + 1; i < NN; i++) { - if (Alpha_to[i - 1] >= mask) - Alpha_to[i] = Alpha_to[MM] ^ ((Alpha_to[i - 1] ^ mask) << 1); - else - Alpha_to[i] = Alpha_to[i - 1] << 1; - Index_of[Alpha_to[i]] = i; - } - Index_of[0] = A0; - Alpha_to[NN] = 0; -} - -/* - * Performs ERRORS+ERASURES decoding of RS codes. bb[] is the content - * of the feedback shift register after having processed the data and - * the ECC. - * - * Return number of symbols corrected, or -1 if codeword is illegal - * or uncorrectable. If eras_pos is non-null, the detected error locations - * are written back. NOTE! This array must be at least NN-KK elements long. - * The corrected data are written in eras_val[]. They must be xor with the data - * to retrieve the correct data : data[erase_pos[i]] ^= erase_val[i] . - * - * First "no_eras" erasures are declared by the calling program. Then, the - * maximum # of errors correctable is t_after_eras = floor((NN-KK-no_eras)/2). - * If the number of channel errors is not greater than "t_after_eras" the - * transmitted codeword will be recovered. Details of algorithm can be found - * in R. Blahut's "Theory ... of Error-Correcting Codes". - - * Warning: the eras_pos[] array must not contain duplicate entries; decoder failure - * will result. The decoder *could* check for this condition, but it would involve - * extra time on every decoding operation. - * */ -static int -eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1], - gf bb[NN - KK + 1], gf eras_val[NN-KK], int eras_pos[NN-KK], - int no_eras) -{ - int deg_lambda, el, deg_omega; - int i, j, r,k; - gf u,q,tmp,num1,num2,den,discr_r; - gf lambda[NN-KK + 1], s[NN-KK + 1]; /* Err+Eras Locator poly - * and syndrome poly */ - gf b[NN-KK + 1], t[NN-KK + 1], omega[NN-KK + 1]; - gf root[NN-KK], reg[NN-KK + 1], loc[NN-KK]; - int syn_error, count; - - syn_error = 0; - for(i=0;i 0) { - /* Init lambda to be the erasure locator polynomial */ - lambda[1] = Alpha_to[modnn(PRIM * eras_pos[0])]; - for (i = 1; i < no_eras; i++) { - u = modnn(PRIM*eras_pos[i]); - for (j = i+1; j > 0; j--) { - tmp = Index_of[lambda[j - 1]]; - if(tmp != A0) - lambda[j] ^= Alpha_to[modnn(u + tmp)]; - } - } -#if DEBUG_ECC >= 1 - /* Test code that verifies the erasure locator polynomial just constructed - Needed only for decoder debugging. */ - - /* find roots of the erasure location polynomial */ - for(i=1;i<=no_eras;i++) - reg[i] = Index_of[lambda[i]]; - count = 0; - for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) { - q = 1; - for (j = 1; j <= no_eras; j++) - if (reg[j] != A0) { - reg[j] = modnn(reg[j] + j); - q ^= Alpha_to[reg[j]]; - } - if (q != 0) - continue; - /* store root and error location number indices */ - root[count] = i; - loc[count] = k; - count++; - } - if (count != no_eras) { - printf("\n lambda(x) is WRONG\n"); - count = -1; - goto finish; - } -#if DEBUG_ECC >= 2 - printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n"); - for (i = 0; i < count; i++) - printf("%d ", loc[i]); - printf("\n"); -#endif -#endif - } - for(i=0;i 0; j--){ - if (reg[j] != A0) { - reg[j] = modnn(reg[j] + j); - q ^= Alpha_to[reg[j]]; - } - } - if (q != 0) - continue; - /* store root (index-form) and error location number */ - root[count] = i; - loc[count] = k; - /* If we've already found max possible roots, - * abort the search to save time - */ - if(++count == deg_lambda) - break; - } - if (deg_lambda != count) { - /* - * deg(lambda) unequal to number of roots => uncorrectable - * error detected - */ - count = -1; - goto finish; - } - /* - * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo - * x**(NN-KK)). in index form. Also find deg(omega). - */ - deg_omega = 0; - for (i = 0; i < NN-KK;i++){ - tmp = 0; - j = (deg_lambda < i) ? deg_lambda : i; - for(;j >= 0; j--){ - if ((s[i + 1 - j] != A0) && (lambda[j] != A0)) - tmp ^= Alpha_to[modnn(s[i + 1 - j] + lambda[j])]; - } - if(tmp != 0) - deg_omega = i; - omega[i] = Index_of[tmp]; - } - omega[NN-KK] = A0; - - /* - * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 = - * inv(X(l))**(B0-1) and den = lambda_pr(inv(X(l))) all in poly-form - */ - for (j = count-1; j >=0; j--) { - num1 = 0; - for (i = deg_omega; i >= 0; i--) { - if (omega[i] != A0) - num1 ^= Alpha_to[modnn(omega[i] + i * root[j])]; - } - num2 = Alpha_to[modnn(root[j] * (B0 - 1) + NN)]; - den = 0; - - /* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */ - for (i = min(deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) { - if(lambda[i+1] != A0) - den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])]; - } - if (den == 0) { -#if DEBUG_ECC >= 1 - printf("\n ERROR: denominator = 0\n"); -#endif - /* Convert to dual- basis */ - count = -1; - goto finish; - } - /* Apply error to data */ - if (num1 != 0) { - eras_val[j] = Alpha_to[modnn(Index_of[num1] + Index_of[num2] + NN - Index_of[den])]; - } else { - eras_val[j] = 0; - } - } - finish: - for(i=0;i> 2) | ((ecc1[2] & 0x0f) << 6); - bb[2] = ((ecc1[2] & 0xf0) >> 4) | ((ecc1[3] & 0x3f) << 4); - bb[3] = ((ecc1[3] & 0xc0) >> 6) | ((ecc1[0] & 0xff) << 2); - - nb_errors = eras_dec_rs(Alpha_to, Index_of, bb, - error_val, error_pos, 0); - if (nb_errors <= 0) - goto the_end; - - /* correct the errors */ - for(i=0;i= NB_DATA && pos < KK) { - nb_errors = -1; - goto the_end; - } - if (pos < NB_DATA) { - /* extract bit position (MSB first) */ - pos = 10 * (NB_DATA - 1 - pos) - 6; - /* now correct the following 10 bits. At most two bytes - can be modified since pos is even */ - index = (pos >> 3) ^ 1; - bitpos = pos & 7; - if ((index >= 0 && index < SECTOR_SIZE) || - index == (SECTOR_SIZE + 1)) { - val = error_val[i] >> (2 + bitpos); - parity ^= val; - if (index < SECTOR_SIZE) - sector[index] ^= val; - } - index = ((pos >> 3) + 1) ^ 1; - bitpos = (bitpos + 10) & 7; - if (bitpos == 0) - bitpos = 8; - if ((index >= 0 && index < SECTOR_SIZE) || - index == (SECTOR_SIZE + 1)) { - val = error_val[i] << (8 - bitpos); - parity ^= val; - if (index < SECTOR_SIZE) - sector[index] ^= val; - } - } - } - - /* use parity to test extra errors */ - if ((parity & 0xff) != 0) - nb_errors = -1; - - the_end: - kfree(Alpha_to); - kfree(Index_of); - return nb_errors; -} - -EXPORT_SYMBOL_GPL(doc_decode_ecc); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Fabrice Bellard "); -MODULE_DESCRIPTION("ECC code for correcting errors detected by DiskOnChip 2000 and Millennium ECC hardware"); diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 8510ccb..3e1b0a0 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -123,7 +123,7 @@ static inline void doc_flash_address(struct docg3 *docg3, u8 addr) doc_writeb(docg3, addr, DOC_FLASHADDRESS); } -static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL }; +static char const * const part_probes[] = { "cmdlinepart", "saftlpart", NULL }; static int doc_register_readb(struct docg3 *docg3, int reg) { @@ -2144,18 +2144,7 @@ static struct platform_driver g3_driver = { .remove = __exit_p(docg3_release), }; -static int __init docg3_init(void) -{ - return platform_driver_probe(&g3_driver, docg3_probe); -} -module_init(docg3_init); - - -static void __exit docg3_exit(void) -{ - platform_driver_unregister(&g3_driver); -} -module_exit(docg3_exit); +module_platform_driver_probe(g3_driver, docg3_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Robert Jarzmik "); diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c deleted file mode 100644 index 88b3fd3..0000000 --- a/drivers/mtd/devices/docprobe.c +++ /dev/null @@ -1,325 +0,0 @@ - -/* Linux driver for Disk-On-Chip devices */ -/* Probe routines common to all DoC devices */ -/* (C) 1999 Machine Vision Holdings, Inc. */ -/* (C) 1999-2003 David Woodhouse */ - - -/* DOC_PASSIVE_PROBE: - In order to ensure that the BIOS checksum is correct at boot time, and - hence that the onboard BIOS extension gets executed, the DiskOnChip - goes into reset mode when it is read sequentially: all registers - return 0xff until the chip is woken up again by writing to the - DOCControl register. - - Unfortunately, this means that the probe for the DiskOnChip is unsafe, - because one of the first things it does is write to where it thinks - the DOCControl register should be - which may well be shared memory - for another device. I've had machines which lock up when this is - attempted. Hence the possibility to do a passive probe, which will fail - to detect a chip in reset mode, but is at least guaranteed not to lock - the machine. - - If you have this problem, uncomment the following line: -#define DOC_PASSIVE_PROBE -*/ - - -/* DOC_SINGLE_DRIVER: - Millennium driver has been merged into DOC2000 driver. - - The old Millennium-only driver has been retained just in case there - are problems with the new code. If the combined driver doesn't work - for you, you can try the old one by undefining DOC_SINGLE_DRIVER - below and also enabling it in your configuration. If this fixes the - problems, please send a report to the MTD mailing list at - . -*/ -#define DOC_SINGLE_DRIVER - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - -static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS; -module_param(doc_config_location, ulong, 0); -MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip"); - -static unsigned long __initdata doc_locations[] = { -#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__) -#ifdef CONFIG_MTD_DOCPROBE_HIGH - 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000, - 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000, - 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000, - 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000, - 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000, -#else /* CONFIG_MTD_DOCPROBE_HIGH */ - 0xc8000, 0xca000, 0xcc000, 0xce000, - 0xd0000, 0xd2000, 0xd4000, 0xd6000, - 0xd8000, 0xda000, 0xdc000, 0xde000, - 0xe0000, 0xe2000, 0xe4000, 0xe6000, - 0xe8000, 0xea000, 0xec000, 0xee000, -#endif /* CONFIG_MTD_DOCPROBE_HIGH */ -#endif - 0xffffffff }; - -/* doccheck: Probe a given memory window to see if there's a DiskOnChip present */ - -static inline int __init doccheck(void __iomem *potential, unsigned long physadr) -{ - void __iomem *window=potential; - unsigned char tmp, tmpb, tmpc, ChipID; -#ifndef DOC_PASSIVE_PROBE - unsigned char tmp2; -#endif - - /* Routine copied from the Linux DOC driver */ - -#ifdef CONFIG_MTD_DOCPROBE_55AA - /* Check for 0x55 0xAA signature at beginning of window, - this is no longer true once we remove the IPL (for Millennium */ - if (ReadDOC(window, Sig1) != 0x55 || ReadDOC(window, Sig2) != 0xaa) - return 0; -#endif /* CONFIG_MTD_DOCPROBE_55AA */ - -#ifndef DOC_PASSIVE_PROBE - /* It's not possible to cleanly detect the DiskOnChip - the - * bootup procedure will put the device into reset mode, and - * it's not possible to talk to it without actually writing - * to the DOCControl register. So we store the current contents - * of the DOCControl register's location, in case we later decide - * that it's not a DiskOnChip, and want to put it back how we - * found it. - */ - tmp2 = ReadDOC(window, DOCControl); - - /* Reset the DiskOnChip ASIC */ - WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, - window, DOCControl); - WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, - window, DOCControl); - - /* Enable the DiskOnChip ASIC */ - WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, - window, DOCControl); - WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, - window, DOCControl); -#endif /* !DOC_PASSIVE_PROBE */ - - /* We need to read the ChipID register four times. For some - newer DiskOnChip 2000 units, the first three reads will - return the DiskOnChip Millennium ident. Don't ask. */ - ChipID = ReadDOC(window, ChipID); - - switch (ChipID) { - case DOC_ChipID_Doc2k: - /* Check the TOGGLE bit in the ECC register */ - tmp = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT; - tmpb = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT; - tmpc = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT; - if (tmp != tmpb && tmp == tmpc) - return ChipID; - break; - - case DOC_ChipID_DocMil: - /* Check for the new 2000 with Millennium ASIC */ - ReadDOC(window, ChipID); - ReadDOC(window, ChipID); - if (ReadDOC(window, ChipID) != DOC_ChipID_DocMil) - ChipID = DOC_ChipID_Doc2kTSOP; - - /* Check the TOGGLE bit in the ECC register */ - tmp = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT; - tmpb = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT; - tmpc = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT; - if (tmp != tmpb && tmp == tmpc) - return ChipID; - break; - - case DOC_ChipID_DocMilPlus16: - case DOC_ChipID_DocMilPlus32: - case 0: - /* Possible Millennium+, need to do more checks */ -#ifndef DOC_PASSIVE_PROBE - /* Possibly release from power down mode */ - for (tmp = 0; (tmp < 4); tmp++) - ReadDOC(window, Mplus_Power); - - /* Reset the DiskOnChip ASIC */ - tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | - DOC_MODE_BDECT; - WriteDOC(tmp, window, Mplus_DOCControl); - WriteDOC(~tmp, window, Mplus_CtrlConfirm); - - mdelay(1); - /* Enable the DiskOnChip ASIC */ - tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | - DOC_MODE_BDECT; - WriteDOC(tmp, window, Mplus_DOCControl); - WriteDOC(~tmp, window, Mplus_CtrlConfirm); - mdelay(1); -#endif /* !DOC_PASSIVE_PROBE */ - - ChipID = ReadDOC(window, ChipID); - - switch (ChipID) { - case DOC_ChipID_DocMilPlus16: - case DOC_ChipID_DocMilPlus32: - /* Check the TOGGLE bit in the toggle register */ - tmp = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT; - tmpb = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT; - tmpc = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT; - if (tmp != tmpb && tmp == tmpc) - return ChipID; - default: - break; - } - /* FALL TRHU */ - - default: - -#ifdef CONFIG_MTD_DOCPROBE_55AA - printk(KERN_DEBUG "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n", - ChipID, physadr); -#endif -#ifndef DOC_PASSIVE_PROBE - /* Put back the contents of the DOCControl register, in case it's not - * actually a DiskOnChip. - */ - WriteDOC(tmp2, window, DOCControl); -#endif - return 0; - } - - printk(KERN_WARNING "DiskOnChip failed TOGGLE test, dropping.\n"); - -#ifndef DOC_PASSIVE_PROBE - /* Put back the contents of the DOCControl register: it's not a DiskOnChip */ - WriteDOC(tmp2, window, DOCControl); -#endif - return 0; -} - -static int docfound; - -extern void DoC2k_init(struct mtd_info *); -extern void DoCMil_init(struct mtd_info *); -extern void DoCMilPlus_init(struct mtd_info *); - -static void __init DoC_Probe(unsigned long physadr) -{ - void __iomem *docptr; - struct DiskOnChip *this; - struct mtd_info *mtd; - int ChipID; - char namebuf[15]; - char *name = namebuf; - void (*initroutine)(struct mtd_info *) = NULL; - - docptr = ioremap(physadr, DOC_IOREMAP_LEN); - - if (!docptr) - return; - - if ((ChipID = doccheck(docptr, physadr))) { - if (ChipID == DOC_ChipID_Doc2kTSOP) { - /* Remove this at your own peril. The hardware driver works but nothing prevents you from erasing bad blocks */ - printk(KERN_NOTICE "Refusing to drive DiskOnChip 2000 TSOP until Bad Block Table is correctly supported by INFTL\n"); - iounmap(docptr); - return; - } - docfound = 1; - mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL); - if (!mtd) { - printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n"); - iounmap(docptr); - return; - } - - this = (struct DiskOnChip *)(&mtd[1]); - mtd->priv = this; - this->virtadr = docptr; - this->physadr = physadr; - this->ChipID = ChipID; - sprintf(namebuf, "with ChipID %2.2X", ChipID); - - switch(ChipID) { - case DOC_ChipID_Doc2kTSOP: - name="2000 TSOP"; - initroutine = symbol_request(DoC2k_init); - break; - - case DOC_ChipID_Doc2k: - name="2000"; - initroutine = symbol_request(DoC2k_init); - break; - - case DOC_ChipID_DocMil: - name="Millennium"; -#ifdef DOC_SINGLE_DRIVER - initroutine = symbol_request(DoC2k_init); -#else - initroutine = symbol_request(DoCMil_init); -#endif /* DOC_SINGLE_DRIVER */ - break; - - case DOC_ChipID_DocMilPlus16: - case DOC_ChipID_DocMilPlus32: - name="MillenniumPlus"; - initroutine = symbol_request(DoCMilPlus_init); - break; - } - - if (initroutine) { - (*initroutine)(mtd); - symbol_put_addr(initroutine); - return; - } - printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr); - kfree(mtd); - } - iounmap(docptr); -} - - -/**************************************************************************** - * - * Module stuff - * - ****************************************************************************/ - -static int __init init_doc(void) -{ - int i; - - if (doc_config_location) { - printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location); - DoC_Probe(doc_config_location); - } else { - for (i=0; (doc_locations[i] != 0xffffffff); i++) { - DoC_Probe(doc_locations[i]); - } - } - /* No banner message any more. Print a message if no DiskOnChip - found, so the user knows we at least tried. */ - if (!docfound) - printk(KERN_INFO "No recognised DiskOnChip devices found\n"); - return -EAGAIN; -} - -module_init(init_doc); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Woodhouse "); -MODULE_DESCRIPTION("Probe code for DiskOnChip 2000 and Millennium devices"); - diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c index 2ec5da9..dccef9f 100644 --- a/drivers/mtd/devices/elm.c +++ b/drivers/mtd/devices/elm.c @@ -81,14 +81,21 @@ static u32 elm_read_reg(struct elm_info *info, int offset) * @dev: ELM device * @bch_type: Type of BCH ecc */ -void elm_config(struct device *dev, enum bch_ecc bch_type) +int elm_config(struct device *dev, enum bch_ecc bch_type) { u32 reg_val; struct elm_info *info = dev_get_drvdata(dev); + if (!info) { + dev_err(dev, "Unable to configure elm - device not probed?\n"); + return -ENODEV; + } + reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16); elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val); info->bch_type = bch_type; + + return 0; } EXPORT_SYMBOL(elm_config); diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 5b6b072..2f3d2a5 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -681,6 +681,7 @@ struct flash_info { u16 flags; #define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */ #define M25P_NO_ERASE 0x02 /* No erase command needed */ +#define SST_WRITE 0x04 /* use SST byte programming */ }; #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ @@ -728,6 +729,7 @@ static const struct spi_device_id m25p_ids[] = { { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, + { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, /* Everspin */ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) }, @@ -740,7 +742,6 @@ static const struct spi_device_id m25p_ids[] = { { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, - { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, /* Macronix */ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, @@ -753,8 +754,10 @@ static const struct spi_device_id m25p_ids[] = { { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, + { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) }, /* Micron */ + { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) }, { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, @@ -781,14 +784,15 @@ static const struct spi_device_id m25p_ids[] = { { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, /* SST -- large erase sizes are "overlays", "sectors" are 4K */ - { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) }, - { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) }, - { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) }, - { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) }, - { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) }, - { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) }, - { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) }, - { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) }, + { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, + { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) }, + { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) }, + { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) }, + { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) }, + { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) }, + { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) }, + { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) }, + { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, /* ST Microelectronics -- newer production may have feature updates */ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, @@ -838,6 +842,7 @@ static const struct spi_device_id m25p_ids[] = { { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) }, /* Catalyst / On Semiconductor -- non-JEDEC */ @@ -1000,7 +1005,7 @@ static int m25p_probe(struct spi_device *spi) } /* sst flash chips use AAI word program */ - if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) + if (info->flags & SST_WRITE) flash->mtd._write = sst_write; else flash->mtd._write = m25p80_write; diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 945c9f76..28779b6 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -105,8 +105,6 @@ static const struct of_device_id dataflash_dt_ids[] = { { .compatible = "atmel,dataflash", }, { /* sentinel */ } }; -#else -#define dataflash_dt_ids NULL #endif /* ......................................................................... */ @@ -914,7 +912,7 @@ static struct spi_driver dataflash_driver = { .driver = { .name = "mtd_dataflash", .owner = THIS_MODULE, - .of_match_table = dataflash_dt_ids, + .of_match_table = of_match_ptr(dataflash_dt_ids), }, .probe = dataflash_probe, diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 3ed17c4..bed9d58 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -249,22 +249,6 @@ config MTD_LANTIQ help Support for NOR flash attached to the Lantiq SoC's External Bus Unit. -config MTD_DILNETPC - tristate "CFI Flash device mapped on DIL/Net PC" - depends on X86 && MTD_CFI_INTELEXT && BROKEN - help - MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". - For details, see - and - -config MTD_DILNETPC_BOOTSIZE - hex "Size of DIL/Net PC flash boot partition" - depends on MTD_DILNETPC - default "0x80000" - help - The amount of space taken up by the kernel or Etherboot - on the DIL/Net PC flash chips. - config MTD_L440GX tristate "BIOS flash chip on Intel L440GX boards" depends on X86 && MTD_JEDECPROBE @@ -274,42 +258,6 @@ config MTD_L440GX BE VERY CAREFUL. -config MTD_TQM8XXL - tristate "CFI Flash device mapped on TQM8XXL" - depends on MTD_CFI && TQM8xxL - help - The TQM8xxL PowerPC board has up to two banks of CFI-compliant - chips, currently uses AMD one. This 'mapping' driver supports - that arrangement, allowing the CFI probe and command set driver - code to communicate with the chips on the TQM8xxL board. More at - . - -config MTD_RPXLITE - tristate "CFI Flash device mapped on RPX Lite or CLLF" - depends on MTD_CFI && (RPXCLASSIC || RPXLITE) - help - The RPXLite PowerPC board has CFI-compliant chips mapped in - a strange sparse mapping. This 'mapping' driver supports that - arrangement, allowing the CFI probe and command set driver code - to communicate with the chips on the RPXLite board. More at - . - -config MTD_MBX860 - tristate "System flash on MBX860 board" - depends on MTD_CFI && MBX - help - This enables access routines for the flash chips on the Motorola - MBX860 board. If you have one of these boards and would like - to use the flash chips on it, say 'Y'. - -config MTD_DBOX2 - tristate "CFI Flash device mapped on D-Box2" - depends on DBOX2 && MTD_CFI_INTELSTD && MTD_CFI_INTELEXT && MTD_CFI_AMDSTD - help - This enables access routines for the flash chips on the Nokia/Sagem - D-Box 2 board. If you have one of these boards and would like to use - the flash chips on it, say 'Y'. - config MTD_CFI_FLAGADM tristate "CFI Flash device mapping on FlagaDM" depends on 8xx && MTD_CFI @@ -349,15 +297,6 @@ config MTD_IXP4XX IXDP425 and Coyote. If you have an IXP4xx based board and would like to use the flash chips on it, say 'Y'. -config MTD_IXP2000 - tristate "CFI Flash device mapped on Intel IXP2000 based systems" - depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP2000 - help - This enables MTD access to flash devices on platforms based - on Intel's IXP2000 family of network processors. If you have an - IXP2000 based board and would like to use the flash chips on it, - say 'Y'. - config MTD_AUTCPU12 bool "NV-RAM mapping AUTCPU12 board" depends on ARCH_AUTCPU12 @@ -372,13 +311,6 @@ config MTD_IMPA7 This enables access to the NOR Flash on the impA7 board of implementa GmbH. If you have such a board, say 'Y' here. -config MTD_H720X - tristate "Hynix evaluation board mappings" - depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 ) - help - This enables access to the flash chips on the Hynix evaluation boards. - If you have such a board, say 'Y'. - # This needs CFI or JEDEC, depending on the cards found. config MTD_PCI tristate "PCI MTD driver" @@ -419,7 +351,7 @@ config MTD_BFIN_ASYNC config MTD_GPIO_ADDR tristate "GPIO-assisted Flash Chip Support" - depends on GENERIC_GPIO || GPIOLIB + depends on GPIOLIB depends on MTD_COMPLEX_MAPPINGS help Map driver which allows flashes to be partially physically addressed @@ -433,15 +365,6 @@ config MTD_UCLINUX help Map driver to support image based filesystems for uClinux. -config MTD_DMV182 - tristate "Map driver for Dy-4 SVME/DMV-182 board." - depends on DMV182 - select MTD_MAP_BANK_WIDTH_32 - select MTD_CFI_I8 - select MTD_CFI_AMDSTD - help - Map driver for Dy-4 SVME/DMV-182 board. - config MTD_INTEL_VR_NOR tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0" depends on PCI diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 4ded287..395a124 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -9,7 +9,6 @@ endif # Chip mappings obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o obj-$(CONFIG_MTD_DC21285) += dc21285.o -obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o obj-$(CONFIG_MTD_L440GX) += l440gx.o obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o obj-$(CONFIG_MTD_ESB2ROM) += esb2rom.o @@ -17,15 +16,12 @@ obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o -obj-$(CONFIG_MTD_MBX860) += mbx860.o obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o obj-$(CONFIG_MTD_PHYSMAP) += physmap.o obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o obj-$(CONFIG_MTD_PISMO) += pismo.o obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o -obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o -obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o @@ -34,7 +30,6 @@ obj-$(CONFIG_MTD_TS5500) += ts5500_flash.o obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o obj-$(CONFIG_MTD_VMAX) += vmax301.o obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o -obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o obj-$(CONFIG_MTD_PCI) += pci.o obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o @@ -42,10 +37,7 @@ obj-$(CONFIG_MTD_IMPA7) += impa7.o obj-$(CONFIG_MTD_UCLINUX) += uclinux.o obj-$(CONFIG_MTD_NETtel) += nettel.o obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o -obj-$(CONFIG_MTD_H720X) += h720x-flash.o obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o -obj-$(CONFIG_MTD_IXP2000) += ixp2000.o -obj-$(CONFIG_MTD_DMV182) += dmv182.o obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c index f833edf..319b04a 100644 --- a/drivers/mtd/maps/bfin-async-flash.c +++ b/drivers/mtd/maps/bfin-async-flash.c @@ -122,7 +122,8 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi switch_back(state); } -static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; +static const char * const part_probe_types[] = { + "cmdlinepart", "RedBoot", NULL }; static int bfin_flash_probe(struct platform_device *pdev) { diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c index 586a1c7..0455166 100644 --- a/drivers/mtd/maps/ck804xrom.c +++ b/drivers/mtd/maps/ck804xrom.c @@ -308,8 +308,7 @@ static int ck804xrom_init_one(struct pci_dev *pdev, out: /* Free any left over map structures */ - if (map) - kfree(map); + kfree(map); /* See if I have any map structures */ if (list_empty(&window->maps)) { diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c deleted file mode 100644 index 85bdece..0000000 --- a/drivers/mtd/maps/dbox2-flash.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * D-Box 2 flash driver - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* partition_info gives details on the logical partitions that the split the - * single flash device into. If the size if zero we use up to the end of the - * device. */ -static struct mtd_partition partition_info[]= { - { - .name = "BR bootloader", - .size = 128 * 1024, - .offset = 0, - .mask_flags = MTD_WRITEABLE - }, - { - .name = "FLFS (U-Boot)", - .size = 128 * 1024, - .offset = MTDPART_OFS_APPEND, - .mask_flags = 0 - }, - { - .name = "Root (SquashFS)", - .size = 7040 * 1024, - .offset = MTDPART_OFS_APPEND, - .mask_flags = 0 - }, - { - .name = "var (JFFS2)", - .size = 896 * 1024, - .offset = MTDPART_OFS_APPEND, - .mask_flags = 0 - }, - { - .name = "Flash without bootloader", - .size = MTDPART_SIZ_FULL, - .offset = 128 * 1024, - .mask_flags = 0 - }, - { - .name = "Complete Flash", - .size = MTDPART_SIZ_FULL, - .offset = 0, - .mask_flags = MTD_WRITEABLE - } -}; - -#define NUM_PARTITIONS ARRAY_SIZE(partition_info) - -#define WINDOW_ADDR 0x10000000 -#define WINDOW_SIZE 0x800000 - -static struct mtd_info *mymtd; - - -struct map_info dbox2_flash_map = { - .name = "D-Box 2 flash memory", - .size = WINDOW_SIZE, - .bankwidth = 4, - .phys = WINDOW_ADDR, -}; - -static int __init init_dbox2_flash(void) -{ - printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR); - dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); - - if (!dbox2_flash_map.virt) { - printk("Failed to ioremap\n"); - return -EIO; - } - simple_map_init(&dbox2_flash_map); - - // Probe for dual Intel 28F320 or dual AMD - mymtd = do_map_probe("cfi_probe", &dbox2_flash_map); - if (!mymtd) { - // Probe for single Intel 28F640 - dbox2_flash_map.bankwidth = 2; - - mymtd = do_map_probe("cfi_probe", &dbox2_flash_map); - } - - if (mymtd) { - mymtd->owner = THIS_MODULE; - - /* Create MTD devices for each partition. */ - mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); - - return 0; - } - - iounmap((void *)dbox2_flash_map.virt); - return -ENXIO; -} - -static void __exit cleanup_dbox2_flash(void) -{ - if (mymtd) { - mtd_device_unregister(mymtd); - map_destroy(mymtd); - } - if (dbox2_flash_map.virt) { - iounmap((void *)dbox2_flash_map.virt); - dbox2_flash_map.virt = 0; - } -} - -module_init(init_dbox2_flash); -module_exit(cleanup_dbox2_flash); - - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Kári Davíðsson , Bastian Blank , Alexander Wild "); -MODULE_DESCRIPTION("MTD map driver for D-Box 2 board"); diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c index 080f060..f8a7dd1 100644 --- a/drivers/mtd/maps/dc21285.c +++ b/drivers/mtd/maps/dc21285.c @@ -143,9 +143,8 @@ static struct map_info dc21285_map = { .copy_from = dc21285_copy_from, }; - /* Partition stuff */ -static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; +static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; static int __init init_dc21285(void) { diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c deleted file mode 100644 index 3e393f0..0000000 --- a/drivers/mtd/maps/dilnetpc.c +++ /dev/null @@ -1,496 +0,0 @@ -/* dilnetpc.c -- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP" - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA - * - * The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems - * featuring the AMD Elan SC410 processor. There are two variants of this - * board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash - * ROM (Intel 28F016S3) and 8 megs of DRAM, the ADNP version has 4 megs - * flash and 16 megs of RAM. - * For details, see http://www.ssv-embedded.de/ssv/pc104/p169.htm - * and http://www.ssv-embedded.de/ssv/pc104/p170.htm - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -/* -** The DIL/NetPC keeps its BIOS in two distinct flash blocks. -** Destroying any of these blocks transforms the DNPC into -** a paperweight (albeit not a very useful one, considering -** it only weighs a few grams). -** -** Therefore, the BIOS blocks must never be erased or written to -** except by people who know exactly what they are doing (e.g. -** to install a BIOS update). These partitions are marked read-only -** by default, but can be made read/write by undefining -** DNPC_BIOS_BLOCKS_WRITEPROTECTED: -*/ -#define DNPC_BIOS_BLOCKS_WRITEPROTECTED - -/* -** The ID string (in ROM) is checked to determine whether we -** are running on a DNP/1486 or ADNP/1486 -*/ -#define BIOSID_BASE 0x000fe100 - -#define ID_DNPC "DNP1486" -#define ID_ADNP "ADNP1486" - -/* -** Address where the flash should appear in CPU space -*/ -#define FLASH_BASE 0x2000000 - -/* -** Chip Setup and Control (CSC) indexed register space -*/ -#define CSC_INDEX 0x22 -#define CSC_DATA 0x23 - -#define CSC_MMSWAR 0x30 /* MMS window C-F attributes register */ -#define CSC_MMSWDSR 0x31 /* MMS window C-F device select register */ - -#define CSC_RBWR 0xa7 /* GPIO Read-Back/Write Register B */ - -#define CSC_CR 0xd0 /* internal I/O device disable/Echo */ - /* Z-bus/configuration register */ - -#define CSC_PCCMDCR 0xf1 /* PC card mode and DMA control register */ - - -/* -** PC Card indexed register space: -*/ - -#define PCC_INDEX 0x3e0 -#define PCC_DATA 0x3e1 - -#define PCC_AWER_B 0x46 /* Socket B Address Window enable register */ -#define PCC_MWSAR_1_Lo 0x58 /* memory window 1 start address low register */ -#define PCC_MWSAR_1_Hi 0x59 /* memory window 1 start address high register */ -#define PCC_MWEAR_1_Lo 0x5A /* memory window 1 stop address low register */ -#define PCC_MWEAR_1_Hi 0x5B /* memory window 1 stop address high register */ -#define PCC_MWAOR_1_Lo 0x5C /* memory window 1 address offset low register */ -#define PCC_MWAOR_1_Hi 0x5D /* memory window 1 address offset high register */ - - -/* -** Access to SC4x0's Chip Setup and Control (CSC) -** and PC Card (PCC) indexed registers: -*/ -static inline void setcsc(int reg, unsigned char data) -{ - outb(reg, CSC_INDEX); - outb(data, CSC_DATA); -} - -static inline unsigned char getcsc(int reg) -{ - outb(reg, CSC_INDEX); - return(inb(CSC_DATA)); -} - -static inline void setpcc(int reg, unsigned char data) -{ - outb(reg, PCC_INDEX); - outb(data, PCC_DATA); -} - -static inline unsigned char getpcc(int reg) -{ - outb(reg, PCC_INDEX); - return(inb(PCC_DATA)); -} - - -/* -************************************************************ -** Enable access to DIL/NetPC's flash by mapping it into -** the SC4x0's MMS Window C. -************************************************************ -*/ -static void dnpc_map_flash(unsigned long flash_base, unsigned long flash_size) -{ - unsigned long flash_end = flash_base + flash_size - 1; - - /* - ** enable setup of MMS windows C-F: - */ - /* - enable PC Card indexed register space */ - setcsc(CSC_CR, getcsc(CSC_CR) | 0x2); - /* - set PC Card controller to operate in standard mode */ - setcsc(CSC_PCCMDCR, getcsc(CSC_PCCMDCR) & ~1); - - /* - ** Program base address and end address of window - ** where the flash ROM should appear in CPU address space - */ - setpcc(PCC_MWSAR_1_Lo, (flash_base >> 12) & 0xff); - setpcc(PCC_MWSAR_1_Hi, (flash_base >> 20) & 0x3f); - setpcc(PCC_MWEAR_1_Lo, (flash_end >> 12) & 0xff); - setpcc(PCC_MWEAR_1_Hi, (flash_end >> 20) & 0x3f); - - /* program offset of first flash location to appear in this window (0) */ - setpcc(PCC_MWAOR_1_Lo, ((0 - flash_base) >> 12) & 0xff); - setpcc(PCC_MWAOR_1_Hi, ((0 - flash_base)>> 20) & 0x3f); - - /* set attributes for MMS window C: non-cacheable, write-enabled */ - setcsc(CSC_MMSWAR, getcsc(CSC_MMSWAR) & ~0x11); - - /* select physical device ROMCS0 (i.e. flash) for MMS Window C */ - setcsc(CSC_MMSWDSR, getcsc(CSC_MMSWDSR) & ~0x03); - - /* enable memory window 1 */ - setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) | 0x02); - - /* now disable PC Card indexed register space again */ - setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2); -} - - -/* -************************************************************ -** Disable access to DIL/NetPC's flash by mapping it into -** the SC4x0's MMS Window C. -************************************************************ -*/ -static void dnpc_unmap_flash(void) -{ - /* - enable PC Card indexed register space */ - setcsc(CSC_CR, getcsc(CSC_CR) | 0x2); - - /* disable memory window 1 */ - setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) & ~0x02); - - /* now disable PC Card indexed register space again */ - setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2); -} - - - -/* -************************************************************ -** Enable/Disable VPP to write to flash -************************************************************ -*/ - -static DEFINE_SPINLOCK(dnpc_spin); -static int vpp_counter = 0; -/* -** This is what has to be done for the DNP board .. -*/ -static void dnp_set_vpp(struct map_info *not_used, int on) -{ - spin_lock_irq(&dnpc_spin); - - if (on) - { - if(++vpp_counter == 1) - setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x4); - } - else - { - if(--vpp_counter == 0) - setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4); - else - BUG_ON(vpp_counter < 0); - } - spin_unlock_irq(&dnpc_spin); -} - -/* -** .. and this the ADNP version: -*/ -static void adnp_set_vpp(struct map_info *not_used, int on) -{ - spin_lock_irq(&dnpc_spin); - - if (on) - { - if(++vpp_counter == 1) - setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x8); - } - else - { - if(--vpp_counter == 0) - setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8); - else - BUG_ON(vpp_counter < 0); - } - spin_unlock_irq(&dnpc_spin); -} - - - -#define DNP_WINDOW_SIZE 0x00200000 /* DNP flash size is 2MiB */ -#define ADNP_WINDOW_SIZE 0x00400000 /* ADNP flash size is 4MiB */ -#define WINDOW_ADDR FLASH_BASE - -static struct map_info dnpc_map = { - .name = "ADNP Flash Bank", - .size = ADNP_WINDOW_SIZE, - .bankwidth = 1, - .set_vpp = adnp_set_vpp, - .phys = WINDOW_ADDR -}; - -/* -** The layout of the flash is somewhat "strange": -** -** 1. 960 KiB (15 blocks) : Space for ROM Bootloader and user data -** 2. 64 KiB (1 block) : System BIOS -** 3. 960 KiB (15 blocks) : User Data (DNP model) or -** 3. 3008 KiB (47 blocks) : User Data (ADNP model) -** 4. 64 KiB (1 block) : System BIOS Entry -*/ - -static struct mtd_partition partition_info[]= -{ - { - .name = "ADNP boot", - .offset = 0, - .size = 0xf0000, - }, - { - .name = "ADNP system BIOS", - .offset = MTDPART_OFS_NXTBLK, - .size = 0x10000, -#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED - .mask_flags = MTD_WRITEABLE, -#endif - }, - { - .name = "ADNP file system", - .offset = MTDPART_OFS_NXTBLK, - .size = 0x2f0000, - }, - { - .name = "ADNP system BIOS entry", - .offset = MTDPART_OFS_NXTBLK, - .size = MTDPART_SIZ_FULL, -#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED - .mask_flags = MTD_WRITEABLE, -#endif - }, -}; - -#define NUM_PARTITIONS ARRAY_SIZE(partition_info) - -static struct mtd_info *mymtd; -static struct mtd_info *lowlvl_parts[NUM_PARTITIONS]; -static struct mtd_info *merged_mtd; - -/* -** "Highlevel" partition info: -** -** Using the MTD concat layer, we can re-arrange partitions to our -** liking: we construct a virtual MTD device by concatenating the -** partitions, specifying the sequence such that the boot block -** is immediately followed by the filesystem block (i.e. the stupid -** system BIOS block is mapped to a different place). When re-partitioning -** this concatenated MTD device, we can set the boot block size to -** an arbitrary (though erase block aligned) value i.e. not one that -** is dictated by the flash's physical layout. We can thus set the -** boot block to be e.g. 64 KB (which is fully sufficient if we want -** to boot an etherboot image) or to -say- 1.5 MB if we want to boot -** a large kernel image. In all cases, the remainder of the flash -** is available as file system space. -*/ - -static struct mtd_partition higlvl_partition_info[]= -{ - { - .name = "ADNP boot block", - .offset = 0, - .size = CONFIG_MTD_DILNETPC_BOOTSIZE, - }, - { - .name = "ADNP file system space", - .offset = MTDPART_OFS_NXTBLK, - .size = ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000, - }, - { - .name = "ADNP system BIOS + BIOS Entry", - .offset = MTDPART_OFS_NXTBLK, - .size = MTDPART_SIZ_FULL, -#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED - .mask_flags = MTD_WRITEABLE, -#endif - }, -}; - -#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info) - - -static int dnp_adnp_probe(void) -{ - char *biosid, rc = -1; - - biosid = (char*)ioremap(BIOSID_BASE, 16); - if(biosid) - { - if(!strcmp(biosid, ID_DNPC)) - rc = 1; /* this is a DNPC */ - else if(!strcmp(biosid, ID_ADNP)) - rc = 0; /* this is a ADNPC */ - } - iounmap((void *)biosid); - return(rc); -} - - -static int __init init_dnpc(void) -{ - int is_dnp; - - /* - ** determine hardware (DNP/ADNP/invalid) - */ - if((is_dnp = dnp_adnp_probe()) < 0) - return -ENXIO; - - /* - ** Things are set up for ADNP by default - ** -> modify all that needs to be different for DNP - */ - if(is_dnp) - { /* - ** Adjust window size, select correct set_vpp function. - ** The partitioning scheme is identical on both DNP - ** and ADNP except for the size of the third partition. - */ - int i; - dnpc_map.size = DNP_WINDOW_SIZE; - dnpc_map.set_vpp = dnp_set_vpp; - partition_info[2].size = 0xf0000; - - /* - ** increment all string pointers so the leading 'A' gets skipped, - ** thus turning all occurrences of "ADNP ..." into "DNP ..." - */ - ++dnpc_map.name; - for(i = 0; i < NUM_PARTITIONS; i++) - ++partition_info[i].name; - higlvl_partition_info[1].size = DNP_WINDOW_SIZE - - CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000; - for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++) - ++higlvl_partition_info[i].name; - } - - printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%llx\n", - is_dnp ? "DNPC" : "ADNP", dnpc_map.size, (unsigned long long)dnpc_map.phys); - - dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size); - - dnpc_map_flash(dnpc_map.phys, dnpc_map.size); - - if (!dnpc_map.virt) { - printk("Failed to ioremap_nocache\n"); - return -EIO; - } - simple_map_init(&dnpc_map); - - printk("FLASH virtual address: 0x%p\n", dnpc_map.virt); - - mymtd = do_map_probe("jedec_probe", &dnpc_map); - - if (!mymtd) - mymtd = do_map_probe("cfi_probe", &dnpc_map); - - /* - ** If flash probes fail, try to make flashes accessible - ** at least as ROM. Ajust erasesize in this case since - ** the default one (128M) will break our partitioning - */ - if (!mymtd) - if((mymtd = do_map_probe("map_rom", &dnpc_map))) - mymtd->erasesize = 0x10000; - - if (!mymtd) { - iounmap(dnpc_map.virt); - return -ENXIO; - } - - mymtd->owner = THIS_MODULE; - - /* - ** Supply pointers to lowlvl_parts[] array to add_mtd_partitions() - ** -> add_mtd_partitions() will _not_ register MTD devices for - ** the partitions, but will instead store pointers to the MTD - ** objects it creates into our lowlvl_parts[] array. - ** NOTE: we arrange the pointers such that the sequence of the - ** partitions gets re-arranged: partition #2 follows - ** partition #0. - */ - partition_info[0].mtdp = &lowlvl_parts[0]; - partition_info[1].mtdp = &lowlvl_parts[2]; - partition_info[2].mtdp = &lowlvl_parts[1]; - partition_info[3].mtdp = &lowlvl_parts[3]; - - mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); - - /* - ** now create a virtual MTD device by concatenating the for partitions - ** (in the sequence given by the lowlvl_parts[] array. - */ - merged_mtd = mtd_concat_create(lowlvl_parts, NUM_PARTITIONS, "(A)DNP Flash Concatenated"); - if(merged_mtd) - { /* - ** now partition the new device the way we want it. This time, - ** we do not supply mtd pointers in higlvl_partition_info, so - ** add_mtd_partitions() will register the devices. - */ - mtd_device_register(merged_mtd, higlvl_partition_info, - NUM_HIGHLVL_PARTITIONS); - } - - return 0; -} - -static void __exit cleanup_dnpc(void) -{ - if(merged_mtd) { - mtd_device_unregister(merged_mtd); - mtd_concat_destroy(merged_mtd); - } - - if (mymtd) { - mtd_device_unregister(mymtd); - map_destroy(mymtd); - } - if (dnpc_map.virt) { - iounmap(dnpc_map.virt); - dnpc_unmap_flash(); - dnpc_map.virt = NULL; - } -} - -module_init(init_dnpc); -module_exit(cleanup_dnpc); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH"); -MODULE_DESCRIPTION("MTD map driver for SSV DIL/NetPC DNP & ADNP"); diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c deleted file mode 100644 index 6538ac6..0000000 --- a/drivers/mtd/maps/dmv182.c +++ /dev/null @@ -1,146 +0,0 @@ - -/* - * drivers/mtd/maps/dmv182.c - * - * Flash map driver for the Dy4 SVME182 board - * - * Copyright 2003-2004, TimeSys Corporation - * - * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * This driver currently handles only the 16MiB user flash bank 1 on the - * board. It does not provide access to bank 0 (contains the Dy4 FFW), bank 2 - * (VxWorks boot), or the optional 48MiB expansion flash. - * - * scott.wood@timesys.com: On the newer boards with 128MiB flash, it - * now supports the first 96MiB (the boot flash bank containing FFW - * is excluded). The VxWorks loader is in partition 1. - */ - -#define FLASH_BASE_ADDR 0xf0000000 -#define FLASH_BANK_SIZE (128*1024*1024) - -MODULE_AUTHOR("Scott Wood, TimeSys Corporation "); -MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board"); -MODULE_LICENSE("GPL"); - -static struct map_info svme182_map = { - .name = "Dy4 SVME182", - .bankwidth = 32, - .size = 128 * 1024 * 1024 -}; - -#define BOOTIMAGE_PART_SIZE ((6*1024*1024)-RESERVED_PART_SIZE) - -// Allow 6MiB for the kernel -#define NEW_BOOTIMAGE_PART_SIZE (6 * 1024 * 1024) -// Allow 1MiB for the bootloader -#define NEW_BOOTLOADER_PART_SIZE (1024 * 1024) -// Use the remaining 9MiB at the end of flash for the RFS -#define NEW_RFS_PART_SIZE (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \ - NEW_BOOTIMAGE_PART_SIZE) - -static struct mtd_partition svme182_partitions[] = { - // The Lower PABS is only 128KiB, but the partition code doesn't - // like partitions that don't end on the largest erase block - // size of the device, even if all of the erase blocks in the - // partition are small ones. The hardware should prevent - // writes to the actual PABS areas. - { - name: "Lower PABS and CPU 0 bootloader or kernel", - size: 6*1024*1024, - offset: 0, - }, - { - name: "Root Filesystem", - size: 10*1024*1024, - offset: MTDPART_OFS_NXTBLK - }, - { - name: "CPU1 Bootloader", - size: 1024*1024, - offset: MTDPART_OFS_NXTBLK, - }, - { - name: "Extra", - size: 110*1024*1024, - offset: MTDPART_OFS_NXTBLK - }, - { - name: "Foundation Firmware and Upper PABS", - size: 1024*1024, - offset: MTDPART_OFS_NXTBLK, - mask_flags: MTD_WRITEABLE // read-only - } -}; - -static struct mtd_info *this_mtd; - -static int __init init_svme182(void) -{ - struct mtd_partition *partitions; - int num_parts = ARRAY_SIZE(svme182_partitions); - - partitions = svme182_partitions; - - svme182_map.virt = ioremap(FLASH_BASE_ADDR, svme182_map.size); - - if (svme182_map.virt == 0) { - printk("Failed to ioremap FLASH memory area.\n"); - return -EIO; - } - - simple_map_init(&svme182_map); - - this_mtd = do_map_probe("cfi_probe", &svme182_map); - if (!this_mtd) - { - iounmap((void *)svme182_map.virt); - return -ENXIO; - } - - printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n", - this_mtd->size >> 20, FLASH_BASE_ADDR); - - this_mtd->owner = THIS_MODULE; - mtd_device_register(this_mtd, partitions, num_parts); - - return 0; -} - -static void __exit cleanup_svme182(void) -{ - if (this_mtd) - { - mtd_device_unregister(this_mtd); - map_destroy(this_mtd); - } - - if (svme182_map.virt) - { - iounmap((void *)svme182_map.virt); - svme182_map.virt = 0; - } - - return; -} - -module_init(init_svme182); -module_exit(cleanup_svme182); diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c index 7b643de..5ede282 100644 --- a/drivers/mtd/maps/gpio-addr-flash.c +++ b/drivers/mtd/maps/gpio-addr-flash.c @@ -157,7 +157,8 @@ static void gf_copy_to(struct map_info *map, unsigned long to, memcpy_toio(map->virt + (to % state->win_size), from, len); } -static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; +static const char * const part_probe_types[] = { + "cmdlinepart", "RedBoot", NULL }; /** * gpio_flash_probe() - setup a mapping for a GPIO assisted flash diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c deleted file mode 100644 index 8ed6cb4..0000000 --- a/drivers/mtd/maps/h720x-flash.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Flash memory access on Hynix GMS30C7201/HMS30C7202 based - * evaluation boards - * - * (C) 2002 Jungjun Kim - * 2003 Thomas Gleixner - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -static struct mtd_info *mymtd; - -static struct map_info h720x_map = { - .name = "H720X", - .bankwidth = 4, - .size = H720X_FLASH_SIZE, - .phys = H720X_FLASH_PHYS, -}; - -static struct mtd_partition h720x_partitions[] = { - { - .name = "ArMon", - .size = 0x00080000, - .offset = 0, - .mask_flags = MTD_WRITEABLE - },{ - .name = "Env", - .size = 0x00040000, - .offset = 0x00080000, - .mask_flags = MTD_WRITEABLE - },{ - .name = "Kernel", - .size = 0x00180000, - .offset = 0x000c0000, - .mask_flags = MTD_WRITEABLE - },{ - .name = "Ramdisk", - .size = 0x00400000, - .offset = 0x00240000, - .mask_flags = MTD_WRITEABLE - },{ - .name = "jffs2", - .size = MTDPART_SIZ_FULL, - .offset = MTDPART_OFS_APPEND - } -}; - -#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions) - -/* - * Initialize FLASH support - */ -static int __init h720x_mtd_init(void) -{ - h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size); - - if (!h720x_map.virt) { - printk(KERN_ERR "H720x-MTD: ioremap failed\n"); - return -EIO; - } - - simple_map_init(&h720x_map); - - // Probe for flash bankwidth 4 - printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n"); - mymtd = do_map_probe("cfi_probe", &h720x_map); - if (!mymtd) { - printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n"); - // Probe for bankwidth 2 - h720x_map.bankwidth = 2; - mymtd = do_map_probe("cfi_probe", &h720x_map); - } - - if (mymtd) { - mymtd->owner = THIS_MODULE; - - mtd_device_parse_register(mymtd, NULL, NULL, - h720x_partitions, NUM_PARTITIONS); - return 0; - } - - iounmap((void *)h720x_map.virt); - return -ENXIO; -} - -/* - * Cleanup - */ -static void __exit h720x_mtd_cleanup(void) -{ - - if (mymtd) { - mtd_device_unregister(mymtd); - map_destroy(mymtd); - } - - if (h720x_map.virt) { - iounmap((void *)h720x_map.virt); - h720x_map.virt = 0; - } -} - - -module_init(h720x_mtd_init); -module_exit(h720x_mtd_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Thomas Gleixner "); -MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards"); diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c index 834a06c..4968674 100644 --- a/drivers/mtd/maps/impa7.c +++ b/drivers/mtd/maps/impa7.c @@ -24,14 +24,12 @@ #define NUM_FLASHBANKS 2 #define BUSWIDTH 4 -/* can be { "cfi_probe", "jedec_probe", "map_rom", NULL } */ -#define PROBETYPES { "jedec_probe", NULL } - #define MSG_PREFIX "impA7:" /* prefix for our printk()'s */ #define MTDID "impa7-%d" /* for mtdparts= partitioning */ static struct mtd_info *impa7_mtd[NUM_FLASHBANKS]; +static const char * const rom_probe_types[] = { "jedec_probe", NULL }; static struct map_info impa7_map[NUM_FLASHBANKS] = { { @@ -60,8 +58,7 @@ static struct mtd_partition partitions[] = static int __init init_impa7(void) { - static const char *rom_probe_types[] = PROBETYPES; - const char **type; + const char * const *type; int i; static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = { { WINDOW_ADDR0, WINDOW_SIZE0 }, diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c index b14053b..f581ac1 100644 --- a/drivers/mtd/maps/intel_vr_nor.c +++ b/drivers/mtd/maps/intel_vr_nor.c @@ -82,9 +82,9 @@ static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) static int vr_nor_mtd_setup(struct vr_nor_mtd *p) { - static const char *probe_types[] = + static const char * const probe_types[] = { "cfi_probe", "jedec_probe", NULL }; - const char **type; + const char * const *type; for (type = probe_types; !p->info && *type; type++) p->info = do_map_probe(*type, &p->map); diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c deleted file mode 100644 index 4a41ced..0000000 --- a/drivers/mtd/maps/ixp2000.c +++ /dev/null @@ -1,253 +0,0 @@ -/* - * drivers/mtd/maps/ixp2000.c - * - * Mapping for the Intel XScale IXP2000 based systems - * - * Copyright (C) 2002 Intel Corp. - * Copyright (C) 2003-2004 MontaVista Software, Inc. - * - * Original Author: Naeem M Afzal - * Maintainer: Deepak Saxena - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include - -#include - -struct ixp2000_flash_info { - struct mtd_info *mtd; - struct map_info map; - struct resource *res; -}; - -static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs) -{ - unsigned long (*set_bank)(unsigned long) = - (unsigned long(*)(unsigned long))map->map_priv_2; - - return (set_bank ? set_bank(ofs) : ofs); -} - -#ifdef __ARMEB__ -/* - * Rev A0 and A1 of IXP2400 silicon have a broken addressing unit which - * causes the lower address bits to be XORed with 0x11 on 8 bit accesses - * and XORed with 0x10 on 16 bit accesses. See the spec update, erratum 44. - */ -static int erratum44_workaround = 0; - -static inline unsigned long address_fix8_write(unsigned long addr) -{ - if (erratum44_workaround) { - return (addr ^ 3); - } - return addr; -} -#else - -#define address_fix8_write(x) (x) -#endif - -static map_word ixp2000_flash_read8(struct map_info *map, unsigned long ofs) -{ - map_word val; - - val.x[0] = *((u8 *)(map->map_priv_1 + flash_bank_setup(map, ofs))); - return val; -} - -/* - * We can't use the standard memcpy due to the broken SlowPort - * address translation on rev A0 and A1 silicon and the fact that - * we have banked flash. - */ -static void ixp2000_flash_copy_from(struct map_info *map, void *to, - unsigned long from, ssize_t len) -{ - from = flash_bank_setup(map, from); - while(len--) - *(__u8 *) to++ = *(__u8 *)(map->map_priv_1 + from++); -} - -static void ixp2000_flash_write8(struct map_info *map, map_word d, unsigned long ofs) -{ - *(__u8 *) (address_fix8_write(map->map_priv_1 + - flash_bank_setup(map, ofs))) = d.x[0]; -} - -static void ixp2000_flash_copy_to(struct map_info *map, unsigned long to, - const void *from, ssize_t len) -{ - to = flash_bank_setup(map, to); - while(len--) { - unsigned long tmp = address_fix8_write(map->map_priv_1 + to++); - *(__u8 *)(tmp) = *(__u8 *)(from++); - } -} - - -static int ixp2000_flash_remove(struct platform_device *dev) -{ - struct flash_platform_data *plat = dev->dev.platform_data; - struct ixp2000_flash_info *info = platform_get_drvdata(dev); - - platform_set_drvdata(dev, NULL); - - if(!info) - return 0; - - if (info->mtd) { - mtd_device_unregister(info->mtd); - map_destroy(info->mtd); - } - if (info->map.map_priv_1) - iounmap((void *) info->map.map_priv_1); - - if (info->res) { - release_resource(info->res); - kfree(info->res); - } - - if (plat->exit) - plat->exit(); - - return 0; -} - - -static int ixp2000_flash_probe(struct platform_device *dev) -{ - static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; - struct ixp2000_flash_data *ixp_data = dev->dev.platform_data; - struct flash_platform_data *plat; - struct ixp2000_flash_info *info; - unsigned long window_size; - int err = -1; - - if (!ixp_data) - return -ENODEV; - - plat = ixp_data->platform_data; - if (!plat) - return -ENODEV; - - window_size = resource_size(dev->resource); - dev_info(&dev->dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n", - ixp_data->nr_banks, ((u32)window_size >> 20)); - - if (plat->width != 1) { - dev_err(&dev->dev, "IXP2000 MTD map only supports 8-bit mode, asking for %d\n", - plat->width * 8); - return -EIO; - } - - info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL); - if(!info) { - err = -ENOMEM; - goto Error; - } - - platform_set_drvdata(dev, info); - - /* - * Tell the MTD layer we're not 1:1 mapped so that it does - * not attempt to do a direct access on us. - */ - info->map.phys = NO_XIP; - - info->map.size = ixp_data->nr_banks * window_size; - info->map.bankwidth = 1; - - /* - * map_priv_2 is used to store a ptr to the bank_setup routine - */ - info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup; - - info->map.name = dev_name(&dev->dev); - info->map.read = ixp2000_flash_read8; - info->map.write = ixp2000_flash_write8; - info->map.copy_from = ixp2000_flash_copy_from; - info->map.copy_to = ixp2000_flash_copy_to; - - info->res = request_mem_region(dev->resource->start, - resource_size(dev->resource), - dev_name(&dev->dev)); - if (!info->res) { - dev_err(&dev->dev, "Could not reserve memory region\n"); - err = -ENOMEM; - goto Error; - } - - info->map.map_priv_1 = - (unsigned long)ioremap(dev->resource->start, - resource_size(dev->resource)); - if (!info->map.map_priv_1) { - dev_err(&dev->dev, "Failed to ioremap flash region\n"); - err = -EIO; - goto Error; - } - -#if defined(__ARMEB__) - /* - * Enable erratum 44 workaround for NPUs with broken slowport - */ - - erratum44_workaround = ixp2000_has_broken_slowport(); - dev_info(&dev->dev, "Erratum 44 workaround %s\n", - erratum44_workaround ? "enabled" : "disabled"); -#endif - - info->mtd = do_map_probe(plat->map_name, &info->map); - if (!info->mtd) { - dev_err(&dev->dev, "map_probe failed\n"); - err = -ENXIO; - goto Error; - } - info->mtd->owner = THIS_MODULE; - - err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0); - if (err) - goto Error; - - return 0; - -Error: - ixp2000_flash_remove(dev); - return err; -} - -static struct platform_driver ixp2000_flash_driver = { - .probe = ixp2000_flash_probe, - .remove = ixp2000_flash_remove, - .driver = { - .name = "IXP2000-Flash", - .owner = THIS_MODULE, - }, -}; - -module_platform_driver(ixp2000_flash_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Deepak Saxena "); -MODULE_ALIAS("platform:IXP2000-Flash"); diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index e864fc6..52b3410 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c @@ -148,7 +148,7 @@ struct ixp4xx_flash_info { struct resource *res; }; -static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; +static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; static int ixp4xx_flash_remove(struct platform_device *dev) { diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c index d1da6ed..d7ac65d 100644 --- a/drivers/mtd/maps/lantiq-flash.c +++ b/drivers/mtd/maps/lantiq-flash.c @@ -46,8 +46,7 @@ struct ltq_mtd { }; static const char ltq_map_name[] = "ltq_nor"; -static const char *ltq_probe_types[] = { - "cmdlinepart", "ofpart", NULL }; +static const char * const ltq_probe_types[] = { "cmdlinepart", "ofpart", NULL }; static map_word ltq_read16(struct map_info *map, unsigned long adr) diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c deleted file mode 100644 index 93fa56c..0000000 --- a/drivers/mtd/maps/mbx860.c +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Handle mapping of the flash on MBX860 boards - * - * Author: Anton Todorov - * Copyright: (C) 2001 Emness Technology - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - - -#define WINDOW_ADDR 0xfe000000 -#define WINDOW_SIZE 0x00200000 - -/* Flash / Partition sizing */ -#define MAX_SIZE_KiB 8192 -#define BOOT_PARTITION_SIZE_KiB 512 -#define KERNEL_PARTITION_SIZE_KiB 5632 -#define APP_PARTITION_SIZE_KiB 2048 - -#define NUM_PARTITIONS 3 - -/* partition_info gives details on the logical partitions that the split the - * single flash device into. If the size if zero we use up to the end of the - * device. */ -static struct mtd_partition partition_info[]={ - { .name = "MBX flash BOOT partition", - .offset = 0, - .size = BOOT_PARTITION_SIZE_KiB*1024 }, - { .name = "MBX flash DATA partition", - .offset = BOOT_PARTITION_SIZE_KiB*1024, - .size = (KERNEL_PARTITION_SIZE_KiB)*1024 }, - { .name = "MBX flash APPLICATION partition", - .offset = (BOOT_PARTITION_SIZE_KiB+KERNEL_PARTITION_SIZE_KiB)*1024 } -}; - - -static struct mtd_info *mymtd; - -struct map_info mbx_map = { - .name = "MBX flash", - .size = WINDOW_SIZE, - .phys = WINDOW_ADDR, - .bankwidth = 4, -}; - -static int __init init_mbx(void) -{ - printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR); - mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); - - if (!mbx_map.virt) { - printk("Failed to ioremap\n"); - return -EIO; - } - simple_map_init(&mbx_map); - - mymtd = do_map_probe("jedec_probe", &mbx_map); - if (mymtd) { - mymtd->owner = THIS_MODULE; - mtd_device_register(mymtd, NULL, 0); - mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); - return 0; - } - - iounmap((void *)mbx_map.virt); - return -ENXIO; -} - -static void __exit cleanup_mbx(void) -{ - if (mymtd) { - mtd_device_unregister(mymtd); - map_destroy(mymtd); - } - if (mbx_map.virt) { - iounmap((void *)mbx_map.virt); - mbx_map.virt = 0; - } -} - -module_init(init_mbx); -module_exit(cleanup_mbx); - -MODULE_AUTHOR("Anton Todorov "); -MODULE_DESCRIPTION("MTD map driver for Motorola MBX860 board"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c index c3aebd5..c2604f8 100644 --- a/drivers/mtd/maps/pci.c +++ b/drivers/mtd/maps/pci.c @@ -283,8 +283,7 @@ static int mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (err) goto release; - /* tsk - do_map_probe should take const char * */ - mtd = do_map_probe((char *)info->map_name, &map->map); + mtd = do_map_probe(info->map_name, &map->map); err = -ENODEV; if (!mtd) goto release; diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 21b0b71..e7a592c 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c @@ -87,21 +87,18 @@ static void physmap_set_vpp(struct map_info *map, int state) spin_unlock_irqrestore(&info->vpp_lock, flags); } -static const char *rom_probe_types[] = { - "cfi_probe", - "jedec_probe", - "qinfo_probe", - "map_rom", - NULL }; -static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs", - NULL }; +static const char * const rom_probe_types[] = { + "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL }; + +static const char * const part_probe_types[] = { + "cmdlinepart", "RedBoot", "afs", NULL }; static int physmap_flash_probe(struct platform_device *dev) { struct physmap_flash_data *physmap_data; struct physmap_flash_info *info; - const char **probe_type; - const char **part_types; + const char * const *probe_type; + const char * const *part_types; int err = 0; int i; int devices_found = 0; diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 363939d..d111097 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -71,6 +71,9 @@ static int of_flash_remove(struct platform_device *dev) return 0; } +static const char * const rom_probe_types[] = { + "cfi_probe", "jedec_probe", "map_rom" }; + /* Helper function to handle probing of the obsolete "direct-mapped" * compatible binding, which has an extra "probe-type" property * describing the type of flash probe necessary. */ @@ -80,8 +83,6 @@ static struct mtd_info *obsolete_probe(struct platform_device *dev, struct device_node *dp = dev->dev.of_node; const char *of_probe; struct mtd_info *mtd; - static const char *rom_probe_types[] - = { "cfi_probe", "jedec_probe", "map_rom"}; int i; dev_warn(&dev->dev, "Device tree uses obsolete \"direct-mapped\" " @@ -111,9 +112,10 @@ static struct mtd_info *obsolete_probe(struct platform_device *dev, specifies the list of partition probers to use. If none is given then the default is use. These take precedence over other device tree information. */ -static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", - "ofpart", "ofoldpart", NULL }; -static const char **of_get_probes(struct device_node *dp) +static const char * const part_probe_types_def[] = { + "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL }; + +static const char * const *of_get_probes(struct device_node *dp) { const char *cp; int cplen; @@ -142,7 +144,7 @@ static const char **of_get_probes(struct device_node *dp) return res; } -static void of_free_probes(const char **probes) +static void of_free_probes(const char * const *probes) { if (probes != part_probe_types_def) kfree(probes); @@ -151,7 +153,7 @@ static void of_free_probes(const char **probes) static struct of_device_id of_flash_match[]; static int of_flash_probe(struct platform_device *dev) { - const char **part_probe_types; + const char * const *part_probe_types; const struct of_device_id *match; struct device_node *dp = dev->dev.of_node; struct resource res; diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c index 2de66b0..71fdda2 100644 --- a/drivers/mtd/maps/plat-ram.c +++ b/drivers/mtd/maps/plat-ram.c @@ -199,7 +199,7 @@ static int platram_probe(struct platform_device *pdev) * supplied by the platform_data struct */ if (pdata->map_probes) { - const char **map_probes = pdata->map_probes; + const char * const *map_probes = pdata->map_probes; for ( ; !info->mtd && *map_probes; map_probes++) info->mtd = do_map_probe(*map_probes , &info->map); diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index 43e3dbb..acb1dbc 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c @@ -45,9 +45,7 @@ struct pxa2xx_flash_info { struct map_info map; }; - -static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; - +static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; static int pxa2xx_flash_probe(struct platform_device *pdev) { diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c index 49c3fe7..ac02fbf 100644 --- a/drivers/mtd/maps/rbtx4939-flash.c +++ b/drivers/mtd/maps/rbtx4939-flash.c @@ -45,14 +45,15 @@ static int rbtx4939_flash_remove(struct platform_device *dev) return 0; } -static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; +static const char * const rom_probe_types[] = { + "cfi_probe", "jedec_probe", NULL }; static int rbtx4939_flash_probe(struct platform_device *dev) { struct rbtx4939_flash_data *pdata; struct rbtx4939_flash_info *info; struct resource *res; - const char **probe_type; + const char * const *probe_type; int err = 0; unsigned long size; diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c deleted file mode 100644 index ed88225..0000000 --- a/drivers/mtd/maps/rpxlite.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Handle mapping of the flash on the RPX Lite and CLLF boards - */ - -#include -#include -#include -#include -#include -#include -#include - - -#define WINDOW_ADDR 0xfe000000 -#define WINDOW_SIZE 0x800000 - -static struct mtd_info *mymtd; - -static struct map_info rpxlite_map = { - .name = "RPX", - .size = WINDOW_SIZE, - .bankwidth = 4, - .phys = WINDOW_ADDR, -}; - -static int __init init_rpxlite(void) -{ - printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR); - rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); - - if (!rpxlite_map.virt) { - printk("Failed to ioremap\n"); - return -EIO; - } - simple_map_init(&rpxlite_map); - mymtd = do_map_probe("cfi_probe", &rpxlite_map); - if (mymtd) { - mymtd->owner = THIS_MODULE; - mtd_device_register(mymtd, NULL, 0); - return 0; - } - - iounmap((void *)rpxlite_map.virt); - return -ENXIO; -} - -static void __exit cleanup_rpxlite(void) -{ - if (mymtd) { - mtd_device_unregister(mymtd); - map_destroy(mymtd); - } - if (rpxlite_map.virt) { - iounmap((void *)rpxlite_map.virt); - rpxlite_map.virt = 0; - } -} - -module_init(init_rpxlite); -module_exit(cleanup_rpxlite); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Arnold Christensen "); -MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards"); diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index f694417..29e3dca 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c @@ -244,7 +244,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev, return ERR_PTR(ret); } -static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; +static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL }; static int sa1100_mtd_probe(struct platform_device *pdev) { diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c index 9d900ad..83a7a70 100644 --- a/drivers/mtd/maps/solutionengine.c +++ b/drivers/mtd/maps/solutionengine.c @@ -31,7 +31,7 @@ struct map_info soleng_flash_map = { .bankwidth = 4, }; -static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; +static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; #ifdef CONFIG_MTD_SUPERH_RESERVE static struct mtd_partition superh_se_partitions[] = { diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c deleted file mode 100644 index d785879..0000000 --- a/drivers/mtd/maps/tqm8xxl.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Handle mapping of the flash memory access routines - * on TQM8xxL based devices. - * - * based on rpxlite.c - * - * Copyright(C) 2001 Kirk Lee - * - * This code is GPLed - * - */ - -/* - * According to TQM8xxL hardware manual, TQM8xxL series have - * following flash memory organisations: - * | capacity | | chip type | | bank0 | | bank1 | - * 2MiB 512Kx16 2MiB 0 - * 4MiB 1Mx16 4MiB 0 - * 8MiB 1Mx16 4MiB 4MiB - * Thus, we choose CONFIG_MTD_CFI_I2 & CONFIG_MTD_CFI_B4 at - * kernel configuration. - */ -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#define FLASH_ADDR 0x40000000 -#define FLASH_SIZE 0x00800000 -#define FLASH_BANK_MAX 4 - -// trivial struct to describe partition information -struct mtd_part_def -{ - int nums; - unsigned char *type; - struct mtd_partition* mtd_part; -}; - -//static struct mtd_info *mymtd; -static struct mtd_info* mtd_banks[FLASH_BANK_MAX]; -static struct map_info* map_banks[FLASH_BANK_MAX]; -static struct mtd_part_def part_banks[FLASH_BANK_MAX]; -static unsigned long num_banks; -static void __iomem *start_scan_addr; - -/* - * Here are partition information for all known TQM8xxL series devices. - * See include/linux/mtd/partitions.h for definition of the mtd_partition - * structure. - * - * The *_max_flash_size is the maximum possible mapped flash size which - * is not necessarily the actual flash size. It must correspond to the - * value specified in the mapping definition defined by the - * "struct map_desc *_io_desc" for the corresponding machine. - */ - -/* Currently, TQM8xxL has up to 8MiB flash */ -static unsigned long tqm8xxl_max_flash_size = 0x00800000; - -/* partition definition for first flash bank - * (cf. "drivers/char/flash_config.c") - */ -static struct mtd_partition tqm8xxl_partitions[] = { - { - .name = "ppcboot", - .offset = 0x00000000, - .size = 0x00020000, /* 128KB */ - .mask_flags = MTD_WRITEABLE, /* force read-only */ - }, - { - .name = "kernel", /* default kernel image */ - .offset = 0x00020000, - .size = 0x000e0000, - .mask_flags = MTD_WRITEABLE, /* force read-only */ - }, - { - .name = "user", - .offset = 0x00100000, - .size = 0x00100000, - }, - { - .name = "initrd", - .offset = 0x00200000, - .size = 0x00200000, - } -}; -/* partition definition for second flash bank */ -static struct mtd_partition tqm8xxl_fs_partitions[] = { - { - .name = "cramfs", - .offset = 0x00000000, - .size = 0x00200000, - }, - { - .name = "jffs", - .offset = 0x00200000, - .size = 0x00200000, - //.size = MTDPART_SIZ_FULL, - } -}; - -static int __init init_tqm_mtd(void) -{ - int idx = 0, ret = 0; - unsigned long flash_addr, flash_size, mtd_size = 0; - /* pointer to TQM8xxL board info data */ - bd_t *bd = (bd_t *)__res; - - flash_addr = bd->bi_flashstart; - flash_size = bd->bi_flashsize; - - //request maximum flash size address space - start_scan_addr = ioremap(flash_addr, flash_size); - if (!start_scan_addr) { - printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __func__, flash_addr); - return -EIO; - } - - for (idx = 0 ; idx < FLASH_BANK_MAX ; idx++) { - if(mtd_size >= flash_size) - break; - - printk(KERN_INFO "%s: chip probing count %d\n", __func__, idx); - - map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL); - if(map_banks[idx] == NULL) { - ret = -ENOMEM; - /* FIXME: What if some MTD devices were probed already? */ - goto error_mem; - } - - map_banks[idx]->name = kmalloc(16, GFP_KERNEL); - - if (!map_banks[idx]->name) { - ret = -ENOMEM; - /* FIXME: What if some MTD devices were probed already? */ - goto error_mem; - } - sprintf(map_banks[idx]->name, "TQM8xxL%d", idx); - - map_banks[idx]->size = flash_size; - map_banks[idx]->bankwidth = 4; - - simple_map_init(map_banks[idx]); - - map_banks[idx]->virt = start_scan_addr; - map_banks[idx]->phys = flash_addr; - /* FIXME: This looks utterly bogus, but I'm trying to - preserve the behaviour of the original (shown here)... - - map_banks[idx]->map_priv_1 = - start_scan_addr + ((idx > 0) ? - (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0); - */ - - if (idx && mtd_banks[idx-1]) { - map_banks[idx]->virt += mtd_banks[idx-1]->size; - map_banks[idx]->phys += mtd_banks[idx-1]->size; - } - - //start to probe flash chips - mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]); - - if (mtd_banks[idx]) { - mtd_banks[idx]->owner = THIS_MODULE; - mtd_size += mtd_banks[idx]->size; - num_banks++; - - printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __func__, num_banks, - mtd_banks[idx]->name, mtd_banks[idx]->size); - } - } - - /* no supported flash chips found */ - if (!num_banks) { - printk(KERN_NOTICE "TQM8xxL: No support flash chips found!\n"); - ret = -ENXIO; - goto error_mem; - } - - /* - * Select Static partition definitions - */ - part_banks[0].mtd_part = tqm8xxl_partitions; - part_banks[0].type = "Static image"; - part_banks[0].nums = ARRAY_SIZE(tqm8xxl_partitions); - - part_banks[1].mtd_part = tqm8xxl_fs_partitions; - part_banks[1].type = "Static file system"; - part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions); - - for(idx = 0; idx < num_banks ; idx++) { - if (part_banks[idx].nums == 0) - printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx); - else - printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n", - idx, part_banks[idx].type); - mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part, - part_banks[idx].nums); - } - return 0; -error_mem: - for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) { - if(map_banks[idx] != NULL) { - kfree(map_banks[idx]->name); - map_banks[idx]->name = NULL; - kfree(map_banks[idx]); - map_banks[idx] = NULL; - } - } -error: - iounmap(start_scan_addr); - return ret; -} - -static void __exit cleanup_tqm_mtd(void) -{ - unsigned int idx = 0; - for(idx = 0 ; idx < num_banks ; idx++) { - /* destroy mtd_info previously allocated */ - if (mtd_banks[idx]) { - mtd_device_unregister(mtd_banks[idx]); - map_destroy(mtd_banks[idx]); - } - /* release map_info not used anymore */ - kfree(map_banks[idx]->name); - kfree(map_banks[idx]); - } - - if (start_scan_addr) { - iounmap(start_scan_addr); - start_scan_addr = 0; - } -} - -module_init(init_tqm_mtd); -module_exit(cleanup_tqm_mtd); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Kirk Lee "); -MODULE_DESCRIPTION("MTD map driver for TQM8xxL boards"); diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c index 1de390e..da2cdb5 100644 --- a/drivers/mtd/maps/tsunami_flash.c +++ b/drivers/mtd/maps/tsunami_flash.c @@ -82,11 +82,12 @@ static void __exit cleanup_tsunami_flash(void) tsunami_flash_mtd = 0; } +static const char * const rom_probe_types[] = { + "cfi_probe", "jedec_probe", "map_rom", NULL }; static int __init init_tsunami_flash(void) { - static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL }; - char **type; + const char * const *type; tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT); diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index dc571eb..c719879 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -38,6 +38,8 @@ #include +#include "mtdcore.h" + static DEFINE_MUTEX(mtd_mutex); /* @@ -365,37 +367,35 @@ static void mtdchar_erase_callback (struct erase_info *instr) wake_up((wait_queue_head_t *)instr->priv); } -#ifdef CONFIG_HAVE_MTD_OTP static int otp_select_filemode(struct mtd_file_info *mfi, int mode) { struct mtd_info *mtd = mfi->mtd; size_t retlen; - int ret = 0; - - /* - * Make a fake call to mtd_read_fact_prot_reg() to check if OTP - * operations are supported. - */ - if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP) - return -EOPNOTSUPP; switch (mode) { case MTD_OTP_FACTORY: + if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == + -EOPNOTSUPP) + return -EOPNOTSUPP; + mfi->mode = MTD_FILE_MODE_OTP_FACTORY; break; case MTD_OTP_USER: + if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) == + -EOPNOTSUPP) + return -EOPNOTSUPP; + mfi->mode = MTD_FILE_MODE_OTP_USER; break; - default: - ret = -EINVAL; case MTD_OTP_OFF: + mfi->mode = MTD_FILE_MODE_NORMAL; break; + default: + return -EINVAL; } - return ret; + + return 0; } -#else -# define otp_select_filemode(f,m) -EOPNOTSUPP -#endif static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, uint64_t start, uint32_t length, void __user *ptr, @@ -888,7 +888,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) break; } -#ifdef CONFIG_HAVE_MTD_OTP case OTPSELECT: { int mode; @@ -944,7 +943,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); break; } -#endif /* This ioctl is being deprecated - it truncates the ECC layout */ case ECCGETLAYOUT: @@ -1185,23 +1183,25 @@ static struct file_system_type mtd_inodefs_type = { }; MODULE_ALIAS_FS("mtd_inodefs"); -static int __init init_mtdchar(void) +int __init init_mtdchar(void) { int ret; ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd", &mtd_fops); if (ret < 0) { - pr_notice("Can't allocate major number %d for " - "Memory Technology Devices.\n", MTD_CHAR_MAJOR); + pr_err("Can't allocate major number %d for MTD\n", + MTD_CHAR_MAJOR); return ret; } ret = register_filesystem(&mtd_inodefs_type); if (ret) { - pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); + pr_err("Can't register mtd_inodefs filesystem, error %d\n", + ret); goto err_unregister_chdev; } + return ret; err_unregister_chdev: @@ -1209,18 +1209,10 @@ err_unregister_chdev: return ret; } -static void __exit cleanup_mtdchar(void) +void __exit cleanup_mtdchar(void) { unregister_filesystem(&mtd_inodefs_type); __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); } -module_init(init_mtdchar); -module_exit(cleanup_mtdchar); - -MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Woodhouse "); -MODULE_DESCRIPTION("Direct character-device access to MTD devices"); MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 322ca65..c400c57 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -42,6 +42,7 @@ #include #include "mtdcore.h" + /* * backing device capabilities for non-mappable devices (such as NAND flash) * - permits private mappings, copies are taken of the data @@ -97,11 +98,7 @@ EXPORT_SYMBOL_GPL(__mtd_next_device); static LIST_HEAD(mtd_notifiers); -#if defined(CONFIG_MTD_CHAR) || defined(CONFIG_MTD_CHAR_MODULE) #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) -#else -#define MTD_DEVT(index) 0 -#endif /* REVISIT once MTD uses the driver model better, whoever allocates * the mtd_info will probably want to use the release() hook... @@ -493,7 +490,7 @@ out_error: * * Returns zero in case of success and a negative error code in case of failure. */ -int mtd_device_parse_register(struct mtd_info *mtd, const char **types, +int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, struct mtd_part_parser_data *parser_data, const struct mtd_partition *parts, int nr_parts) @@ -1117,8 +1114,6 @@ EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); /*====================================================================*/ /* Support for /proc/mtd */ -static struct proc_dir_entry *proc_mtd; - static int mtd_proc_show(struct seq_file *m, void *v) { struct mtd_info *mtd; @@ -1164,6 +1159,8 @@ static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) return ret; } +static struct proc_dir_entry *proc_mtd; + static int __init init_mtd(void) { int ret; @@ -1184,11 +1181,17 @@ static int __init init_mtd(void) if (ret) goto err_bdi3; -#ifdef CONFIG_PROC_FS proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); -#endif /* CONFIG_PROC_FS */ + + ret = init_mtdchar(); + if (ret) + goto out_procfs; + return 0; +out_procfs: + if (proc_mtd) + remove_proc_entry("mtd", NULL); err_bdi3: bdi_destroy(&mtd_bdi_ro_mappable); err_bdi2: @@ -1202,10 +1205,9 @@ err_reg: static void __exit cleanup_mtd(void) { -#ifdef CONFIG_PROC_FS + cleanup_mtdchar(); if (proc_mtd) - remove_proc_entry( "mtd", NULL); -#endif /* CONFIG_PROC_FS */ + remove_proc_entry("mtd", NULL); class_unregister(&mtd_class); bdi_destroy(&mtd_bdi_unmappable); bdi_destroy(&mtd_bdi_ro_mappable); diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h index 961a384..7b03533 100644 --- a/drivers/mtd/mtdcore.h +++ b/drivers/mtd/mtdcore.h @@ -1,23 +1,21 @@ -/* linux/drivers/mtd/mtdcore.h - * - * Header file for driver private mtdcore exports - * +/* + * These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c. + * You should not use them for _anything_ else. */ -/* These are exported solely for the purpose of mtd_blkdevs.c. You - should not use them for _anything_ else */ - extern struct mutex mtd_table_mutex; -extern struct mtd_info *__mtd_next_device(int i); -extern int add_mtd_device(struct mtd_info *mtd); -extern int del_mtd_device(struct mtd_info *mtd); -extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, - int); -extern int del_mtd_partitions(struct mtd_info *); -extern int parse_mtd_partitions(struct mtd_info *master, const char **types, - struct mtd_partition **pparts, - struct mtd_part_parser_data *data); +struct mtd_info *__mtd_next_device(int i); +int add_mtd_device(struct mtd_info *mtd); +int del_mtd_device(struct mtd_info *mtd); +int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); +int del_mtd_partitions(struct mtd_info *); +int parse_mtd_partitions(struct mtd_info *master, const char * const *types, + struct mtd_partition **pparts, + struct mtd_part_parser_data *data); + +int __init init_mtdchar(void); +void __exit cleanup_mtdchar(void); #define mtd_for_each_device(mtd) \ for ((mtd) = __mtd_next_device(0); \ diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 70fa70a..3014933 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -694,7 +694,7 @@ EXPORT_SYMBOL_GPL(deregister_mtd_parser); * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you * are changing this array! */ -static const char *default_mtd_part_types[] = { +static const char * const default_mtd_part_types[] = { "cmdlinepart", "ofpart", NULL @@ -720,7 +720,7 @@ static const char *default_mtd_part_types[] = { * o a positive number of found partitions, in which case on exit @pparts will * point to an array containing this number of &struct mtd_info objects. */ -int parse_mtd_partitions(struct mtd_info *master, const char **types, +int parse_mtd_partitions(struct mtd_info *master, const char *const *types, struct mtd_partition **pparts, struct mtd_part_parser_data *data) { diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 81bf5e5..a60f6c1 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -41,14 +41,6 @@ config MTD_SM_COMMON tristate default n -config MTD_NAND_MUSEUM_IDS - bool "Enable chip ids for obsolete ancient NAND devices" - default n - help - Enable this option only when your board has first generation - NAND chips (page size 256 byte, erase size 4-8KiB). The IDs - of these chips were reused by later, larger chips. - config MTD_NAND_DENALI tristate "Support Denali NAND controller" help @@ -81,15 +73,9 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR scratch register here to enable this feature. On Intel Moorestown boards, the scratch register is at 0xFF108018. -config MTD_NAND_H1900 - tristate "iPAQ H1900 flash" - depends on ARCH_PXA && BROKEN - help - This enables the driver for the iPAQ h1900 flash. - config MTD_NAND_GPIO tristate "GPIO NAND Flash driver" - depends on GENERIC_GPIO && ARM + depends on GPIOLIB && ARM help This enables a GPIO based NAND flash driver. @@ -201,22 +187,6 @@ config MTD_NAND_BF5XX_BOOTROM_ECC If unsure, say N. -config MTD_NAND_RTC_FROM4 - tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)" - depends on SH_SOLUTION_ENGINE - select REED_SOLOMON - select REED_SOLOMON_DEC8 - select BITREVERSE - help - This enables the driver for the Renesas Technology AG-AND - flash interface board (FROM_BOARD4) - -config MTD_NAND_PPCHAMELEONEVB - tristate "NAND Flash device on PPChameleonEVB board" - depends on PPCHAMELEONEVB && BROKEN - help - This enables the NAND flash driver on the PPChameleon EVB Board. - config MTD_NAND_S3C2410 tristate "NAND Flash support for Samsung S3C SoCs" depends on ARCH_S3C24XX || ARCH_S3C64XX diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index d76d912..bb81891 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -15,14 +15,11 @@ obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o -obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o -obj-$(CONFIG_MTD_NAND_H1900) += h1910.o -obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index ffcbcca..2d23d29 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -1737,20 +1737,7 @@ static struct platform_driver atmel_nand_driver = { }, }; -static int __init atmel_nand_init(void) -{ - return platform_driver_probe(&atmel_nand_driver, atmel_nand_probe); -} - - -static void __exit atmel_nand_exit(void) -{ - platform_driver_unregister(&atmel_nand_driver); -} - - -module_init(atmel_nand_init); -module_exit(atmel_nand_exit); +module_platform_driver_probe(atmel_nand_driver, atmel_nand_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rick Bronson"); diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index 4271e94..776df36 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c @@ -874,21 +874,7 @@ static struct platform_driver bf5xx_nand_driver = { }, }; -static int __init bf5xx_nand_init(void) -{ - printk(KERN_INFO "%s, Version %s (c) 2007 Analog Devices, Inc.\n", - DRV_DESC, DRV_VERSION); - - return platform_driver_register(&bf5xx_nand_driver); -} - -static void __exit bf5xx_nand_exit(void) -{ - platform_driver_unregister(&bf5xx_nand_driver); -} - -module_init(bf5xx_nand_init); -module_exit(bf5xx_nand_exit); +module_platform_driver(bf5xx_nand_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRV_AUTHOR); diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index 010d612..c34985a 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -303,13 +303,7 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command, case NAND_CMD_SEQIN: case NAND_CMD_RNDIN: case NAND_CMD_STATUS: - case NAND_CMD_DEPLETE1: case NAND_CMD_RNDOUT: - case NAND_CMD_STATUS_ERROR: - case NAND_CMD_STATUS_ERROR0: - case NAND_CMD_STATUS_ERROR1: - case NAND_CMD_STATUS_ERROR2: - case NAND_CMD_STATUS_ERROR3: cafe_writel(cafe, cafe->ctl2, NAND_CTRL2); return; } @@ -536,8 +530,8 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd, } static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page, - int cached, int raw) + uint32_t offset, int data_len, const uint8_t *buf, + int oob_required, int page, int cached, int raw) { int status; diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 94e17af..c3e15a5 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -577,7 +578,6 @@ static struct davinci_nand_pdata return pdev->dev.platform_data; } #else -#define davinci_nand_of_match NULL static struct davinci_nand_pdata *nand_davinci_get_pdata(struct platform_device *pdev) { @@ -878,22 +878,12 @@ static struct platform_driver nand_davinci_driver = { .driver = { .name = "davinci_nand", .owner = THIS_MODULE, - .of_match_table = davinci_nand_of_match, + .of_match_table = of_match_ptr(davinci_nand_of_match), }, }; MODULE_ALIAS("platform:davinci_nand"); -static int __init nand_davinci_init(void) -{ - return platform_driver_probe(&nand_davinci_driver, nand_davinci_probe); -} -module_init(nand_davinci_init); - -static void __exit nand_davinci_exit(void) -{ - platform_driver_unregister(&nand_davinci_driver); -} -module_exit(nand_davinci_exit); +module_platform_driver_probe(nand_davinci_driver, nand_davinci_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Texas Instruments"); diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c index 546f8cb..9253024 100644 --- a/drivers/mtd/nand/denali_dt.c +++ b/drivers/mtd/nand/denali_dt.c @@ -42,7 +42,7 @@ static void __iomem *request_and_map(struct device *dev, } ptr = devm_ioremap_nocache(dev, res->start, resource_size(res)); - if (!res) + if (!ptr) dev_err(dev, "ioremap_nocache of %s failed!", res->name); return ptr; @@ -90,7 +90,7 @@ static int denali_dt_probe(struct platform_device *ofdev) denali->irq = platform_get_irq(ofdev, 0); if (denali->irq < 0) { dev_err(&ofdev->dev, "no irq defined\n"); - return -ENXIO; + return denali->irq; } denali->flash_reg = request_and_map(&ofdev->dev, denali_reg); @@ -146,21 +146,11 @@ static struct platform_driver denali_dt_driver = { .driver = { .name = "denali-nand-dt", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(denali_nand_dt_ids), + .of_match_table = denali_nand_dt_ids, }, }; -static int __init denali_init_dt(void) -{ - return platform_driver_register(&denali_dt_driver); -} -module_init(denali_init_dt); - -static void __exit denali_exit_dt(void) -{ - platform_driver_unregister(&denali_dt_driver); -} -module_exit(denali_exit_dt); +module_platform_driver(denali_dt_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamie Iles"); diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c index 18fa448..fa25e7a 100644 --- a/drivers/mtd/nand/docg4.c +++ b/drivers/mtd/nand/docg4.c @@ -1397,18 +1397,7 @@ static struct platform_driver docg4_driver = { .remove = __exit_p(cleanup_docg4), }; -static int __init docg4_init(void) -{ - return platform_driver_probe(&docg4_driver, probe_docg4); -} - -static void __exit docg4_exit(void) -{ - platform_driver_unregister(&docg4_driver); -} - -module_init(docg4_init); -module_exit(docg4_exit); +module_platform_driver_probe(docg4_driver, probe_docg4); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mike Dunn"); diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 05ba3f0..911e243 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -1235,18 +1235,7 @@ static struct platform_driver fsmc_nand_driver = { }, }; -static int __init fsmc_nand_init(void) -{ - return platform_driver_probe(&fsmc_nand_driver, - fsmc_nand_probe); -} -module_init(fsmc_nand_init); - -static void __exit fsmc_nand_exit(void) -{ - platform_driver_unregister(&fsmc_nand_driver); -} -module_exit(fsmc_nand_exit); +module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Vipin Kumar , Ashish Priyadarshi"); diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index e789e3f..89065dd 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c @@ -190,7 +190,6 @@ static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev) return r; } #else /* CONFIG_OF */ -#define gpio_nand_id_table NULL static inline int gpio_nand_get_config_of(const struct device *dev, struct gpio_nand_platdata *plat) { @@ -259,8 +258,6 @@ static int gpio_nand_remove(struct platform_device *dev) if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) gpio_free(gpiomtd->plat.gpio_rdy); - kfree(gpiomtd); - return 0; } @@ -297,7 +294,7 @@ static int gpio_nand_probe(struct platform_device *dev) if (!res0) return -EINVAL; - gpiomtd = kzalloc(sizeof(*gpiomtd), GFP_KERNEL); + gpiomtd = devm_kzalloc(&dev->dev, sizeof(*gpiomtd), GFP_KERNEL); if (gpiomtd == NULL) { dev_err(&dev->dev, "failed to create NAND MTD\n"); return -ENOMEM; @@ -412,7 +409,6 @@ err_sync: iounmap(gpiomtd->nand_chip.IO_ADDR_R); release_mem_region(res0->start, resource_size(res0)); err_map: - kfree(gpiomtd); return ret; } @@ -421,7 +417,7 @@ static struct platform_driver gpio_nand_driver = { .remove = gpio_nand_remove, .driver = { .name = "gpio-nand", - .of_match_table = gpio_nand_id_table, + .of_match_table = of_match_ptr(gpio_nand_id_table), }, }; diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c deleted file mode 100644 index 50166e9..0000000 --- a/drivers/mtd/nand/h1910.c +++ /dev/null @@ -1,167 +0,0 @@ -/* - * drivers/mtd/nand/h1910.c - * - * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com) - * - * Derived from drivers/mtd/nand/edb7312.c - * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) - * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Overview: - * This is a device driver for the NAND flash device found on the - * iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is - * a 128Mibit (16MiB x 8 bits) NAND flash device. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * MTD structure for EDB7312 board - */ -static struct mtd_info *h1910_nand_mtd = NULL; - -/* - * Module stuff - */ - -/* - * Define static partitions for flash device - */ -static struct mtd_partition partition_info[] = { - {name:"h1910 NAND Flash", - offset:0, - size:16 * 1024 * 1024} -}; - -#define NUM_PARTITIONS 1 - -/* - * hardware specific access to control-lines - * - * NAND_NCE: bit 0 - don't care - * NAND_CLE: bit 1 - address bit 2 - * NAND_ALE: bit 2 - address bit 3 - */ -static void h1910_hwcontrol(struct mtd_info *mtd, int cmd, - unsigned int ctrl) -{ - struct nand_chip *chip = mtd->priv; - - if (cmd != NAND_CMD_NONE) - writeb(cmd, chip->IO_ADDR_W | ((ctrl & 0x6) << 1)); -} - -/* - * read device ready pin - */ -#if 0 -static int h1910_device_ready(struct mtd_info *mtd) -{ - return (GPLR(55) & GPIO_bit(55)); -} -#endif - -/* - * Main initialization routine - */ -static int __init h1910_init(void) -{ - struct nand_chip *this; - void __iomem *nandaddr; - - if (!machine_is_h1900()) - return -ENODEV; - - nandaddr = ioremap(0x08000000, 0x1000); - if (!nandaddr) { - printk("Failed to ioremap nand flash.\n"); - return -ENOMEM; - } - - /* Allocate memory for MTD device structure and private data */ - h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); - if (!h1910_nand_mtd) { - printk("Unable to allocate h1910 NAND MTD device structure.\n"); - iounmap((void *)nandaddr); - return -ENOMEM; - } - - /* Get pointer to private data */ - this = (struct nand_chip *)(&h1910_nand_mtd[1]); - - /* Initialize structures */ - memset(h1910_nand_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - - /* Link the private data with the MTD structure */ - h1910_nand_mtd->priv = this; - h1910_nand_mtd->owner = THIS_MODULE; - - /* - * Enable VPEN - */ - GPSR(37) = GPIO_bit(37); - - /* insert callbacks */ - this->IO_ADDR_R = nandaddr; - this->IO_ADDR_W = nandaddr; - this->cmd_ctrl = h1910_hwcontrol; - this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */ - /* 15 us command delay time */ - this->chip_delay = 50; - this->ecc.mode = NAND_ECC_SOFT; - - /* Scan to find existence of the device */ - if (nand_scan(h1910_nand_mtd, 1)) { - printk(KERN_NOTICE "No NAND device - returning -ENXIO\n"); - kfree(h1910_nand_mtd); - iounmap((void *)nandaddr); - return -ENXIO; - } - - /* Register the partitions */ - mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info, - NUM_PARTITIONS); - - /* Return happy */ - return 0; -} - -module_init(h1910_init); - -/* - * Clean up routine - */ -static void __exit h1910_cleanup(void) -{ - struct nand_chip *this = (struct nand_chip *)&h1910_nand_mtd[1]; - - /* Release resources, unregister device */ - nand_release(h1910_nand_mtd); - - /* Release io resource */ - iounmap((void *)this->IO_ADDR_W); - - /* Free the MTD device structure */ - kfree(h1910_nand_mtd); -} - -module_exit(h1910_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Joshua Wise "); -MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910"); diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index 0ca22ae..a94facb 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -540,8 +540,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd, } static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page, - int cached, int raw) + uint32_t offset, int data_len, const uint8_t *buf, + int oob_required, int page, int cached, int raw) { int res; diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 42c6392..dfcd0a5 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -4,7 +4,6 @@ * Overview: * This is the generic MTD driver for NAND flash devices. It should be * capable of working with almost all NAND chips currently available. - * Basic support for AG-AND chips is provided. * * Additional technical information is available on * http://www.linux-mtd.infradead.org/doc/nand.html @@ -22,8 +21,6 @@ * Enable cached programming for 2k page size chips * Check, if mtd->ecctype should be set to MTD_ECC_HW * if we have HW ECC support. - * The AG-AND chips have nice features for speed improvement, - * which are not supported yet. Read / program 4 pages in one go. * BBT table is not serialized, has to be fixed * * This program is free software; you can redistribute it and/or modify @@ -515,7 +512,7 @@ EXPORT_SYMBOL_GPL(nand_wait_ready); * @page_addr: the page address for this command, -1 if none * * Send command to NAND device. This function is used for small page devices - * (256/512 Bytes per page). + * (512 Bytes per page). */ static void nand_command(struct mtd_info *mtd, unsigned int command, int column, int page_addr) @@ -631,8 +628,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, } /* Command latch cycle */ - chip->cmd_ctrl(mtd, command & 0xff, - NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); + chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); if (column != -1 || page_addr != -1) { int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE; @@ -671,16 +667,6 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, case NAND_CMD_SEQIN: case NAND_CMD_RNDIN: case NAND_CMD_STATUS: - case NAND_CMD_DEPLETE1: - return; - - case NAND_CMD_STATUS_ERROR: - case NAND_CMD_STATUS_ERROR0: - case NAND_CMD_STATUS_ERROR1: - case NAND_CMD_STATUS_ERROR2: - case NAND_CMD_STATUS_ERROR3: - /* Read error status commands require only a short delay */ - udelay(chip->chip_delay); return; case NAND_CMD_RESET: @@ -836,10 +822,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) */ ndelay(100); - if ((state == FL_ERASING) && (chip->options & NAND_IS_AND)) - chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1); - else - chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); if (in_interrupt() || oops_in_progress) panic_nand_wait(mtd, chip, timeo); @@ -1127,7 +1110,7 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, } /** - * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function + * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function * @mtd: mtd info structure * @chip: nand chip info structure * @data_offs: offset of requested data within the page @@ -1995,6 +1978,67 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, return 0; } + +/** + * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write + * @mtd: mtd info structure + * @chip: nand chip info structure + * @column: column address of subpage within the page + * @data_len: data length + * @oob_required: must write chip->oob_poi to OOB + */ +static int nand_write_subpage_hwecc(struct mtd_info *mtd, + struct nand_chip *chip, uint32_t offset, + uint32_t data_len, const uint8_t *data_buf, + int oob_required) +{ + uint8_t *oob_buf = chip->oob_poi; + uint8_t *ecc_calc = chip->buffers->ecccalc; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + int ecc_steps = chip->ecc.steps; + uint32_t *eccpos = chip->ecc.layout->eccpos; + uint32_t start_step = offset / ecc_size; + uint32_t end_step = (offset + data_len - 1) / ecc_size; + int oob_bytes = mtd->oobsize / ecc_steps; + int step, i; + + for (step = 0; step < ecc_steps; step++) { + /* configure controller for WRITE access */ + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); + + /* write data (untouched subpages already masked by 0xFF) */ + chip->write_buf(mtd, data_buf, ecc_size); + + /* mask ECC of un-touched subpages by padding 0xFF */ + if ((step < start_step) || (step > end_step)) + memset(ecc_calc, 0xff, ecc_bytes); + else + chip->ecc.calculate(mtd, data_buf, ecc_calc); + + /* mask OOB of un-touched subpages by padding 0xFF */ + /* if oob_required, preserve OOB metadata of written subpage */ + if (!oob_required || (step < start_step) || (step > end_step)) + memset(oob_buf, 0xff, oob_bytes); + + data_buf += ecc_size; + ecc_calc += ecc_bytes; + oob_buf += oob_bytes; + } + + /* copy calculated ECC for whole page to chip->buffer->oob */ + /* this include masked-value(0xFF) for unwritten subpages */ + ecc_calc = chip->buffers->ecccalc; + for (i = 0; i < chip->ecc.total; i++) + chip->oob_poi[eccpos[i]] = ecc_calc[i]; + + /* write OOB buffer to NAND device */ + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + + /** * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write * @mtd: mtd info structure @@ -2047,6 +2091,8 @@ static int nand_write_page_syndrome(struct mtd_info *mtd, * nand_write_page - [REPLACEABLE] write one page * @mtd: MTD device structure * @chip: NAND chip descriptor + * @offset: address offset within the page + * @data_len: length of actual data to be written * @buf: the data to write * @oob_required: must write chip->oob_poi to OOB * @page: page number to write @@ -2054,15 +2100,25 @@ static int nand_write_page_syndrome(struct mtd_info *mtd, * @raw: use _raw version of write_page */ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page, - int cached, int raw) + uint32_t offset, int data_len, const uint8_t *buf, + int oob_required, int page, int cached, int raw) { - int status; + int status, subpage; + + if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && + chip->ecc.write_subpage) + subpage = offset || (data_len < mtd->writesize); + else + subpage = 0; chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); if (unlikely(raw)) - status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required); + status = chip->ecc.write_page_raw(mtd, chip, buf, + oob_required); + else if (subpage) + status = chip->ecc.write_subpage(mtd, chip, offset, data_len, + buf, oob_required); else status = chip->ecc.write_page(mtd, chip, buf, oob_required); @@ -2075,7 +2131,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, */ cached = 0; - if (!cached || !(chip->options & NAND_CACHEPRG)) { + if (!cached || !NAND_HAS_CACHEPROG(chip)) { chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); status = chip->waitfunc(mtd, chip); @@ -2176,7 +2232,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, uint8_t *oob = ops->oobbuf; uint8_t *buf = ops->datbuf; - int ret, subpage; + int ret; int oob_required = oob ? 1 : 0; ops->retlen = 0; @@ -2191,10 +2247,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, } column = to & (mtd->writesize - 1); - subpage = column || (writelen & (mtd->writesize - 1)); - - if (subpage && oob) - return -EINVAL; chipnr = (int)(to >> chip->chip_shift); chip->select_chip(mtd, chipnr); @@ -2243,9 +2295,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, /* We still need to erase leftover OOB data */ memset(chip->oob_poi, 0xff, mtd->oobsize); } - - ret = chip->write_page(mtd, chip, wbuf, oob_required, page, - cached, (ops->mode == MTD_OPS_RAW)); + ret = chip->write_page(mtd, chip, column, bytes, wbuf, + oob_required, page, cached, + (ops->mode == MTD_OPS_RAW)); if (ret) break; @@ -2481,24 +2533,6 @@ static void single_erase_cmd(struct mtd_info *mtd, int page) } /** - * multi_erase_cmd - [GENERIC] AND specific block erase command function - * @mtd: MTD device structure - * @page: the page address of the block which will be erased - * - * AND multi block erase command function. Erase 4 consecutive blocks. - */ -static void multi_erase_cmd(struct mtd_info *mtd, int page) -{ - struct nand_chip *chip = mtd->priv; - /* Send commands to erase a block */ - chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++); - chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++); - chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++); - chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); - chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); -} - -/** * nand_erase - [MTD Interface] erase block(s) * @mtd: MTD device structure * @instr: erase instruction @@ -2510,7 +2544,6 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) return nand_erase_nand(mtd, instr, 0); } -#define BBT_PAGE_MASK 0xffffff3f /** * nand_erase_nand - [INTERN] erase block(s) * @mtd: MTD device structure @@ -2524,8 +2557,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, { int page, status, pages_per_block, ret, chipnr; struct nand_chip *chip = mtd->priv; - loff_t rewrite_bbt[NAND_MAX_CHIPS] = {0}; - unsigned int bbt_masked_page = 0xffffffff; loff_t len; pr_debug("%s: start = 0x%012llx, len = %llu\n", @@ -2556,15 +2587,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, goto erase_exit; } - /* - * If BBT requires refresh, set the BBT page mask to see if the BBT - * should be rewritten. Otherwise the mask is set to 0xffffffff which - * can not be matched. This is also done when the bbt is actually - * erased to avoid recursive updates. - */ - if (chip->options & BBT_AUTO_REFRESH && !allowbbt) - bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK; - /* Loop through the pages */ len = instr->len; @@ -2610,15 +2632,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, goto erase_exit; } - /* - * If BBT requires refresh, set the BBT rewrite flag to the - * page being erased. - */ - if (bbt_masked_page != 0xffffffff && - (page & BBT_PAGE_MASK) == bbt_masked_page) - rewrite_bbt[chipnr] = - ((loff_t)page << chip->page_shift); - /* Increment page address and decrement length */ len -= (1 << chip->phys_erase_shift); page += pages_per_block; @@ -2628,15 +2641,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, chipnr++; chip->select_chip(mtd, -1); chip->select_chip(mtd, chipnr); - - /* - * If BBT requires refresh and BBT-PERCHIP, set the BBT - * page mask to see if this BBT should be rewritten. - */ - if (bbt_masked_page != 0xffffffff && - (chip->bbt_td->options & NAND_BBT_PERCHIP)) - bbt_masked_page = chip->bbt_td->pages[chipnr] & - BBT_PAGE_MASK; } } instr->state = MTD_ERASE_DONE; @@ -2653,23 +2657,6 @@ erase_exit: if (!ret) mtd_erase_callback(instr); - /* - * If BBT requires refresh and erase was successful, rewrite any - * selected bad block tables. - */ - if (bbt_masked_page == 0xffffffff || ret) - return ret; - - for (chipnr = 0; chipnr < chip->numchips; chipnr++) { - if (!rewrite_bbt[chipnr]) - continue; - /* Update the BBT for chip */ - pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n", - __func__, chipnr, rewrite_bbt[chipnr], - chip->bbt_td->pages[chipnr]); - nand_update_bbt(mtd, rewrite_bbt[chipnr]); - } - /* Return more or less happy */ return ret; } @@ -2905,8 +2892,6 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, chip->onfi_version = 20; else if (val & (1 << 1)) chip->onfi_version = 10; - else - chip->onfi_version = 0; if (!chip->onfi_version) { pr_info("%s: unsupported ONFI version: %d\n", __func__, val); @@ -3171,6 +3156,30 @@ static void nand_decode_bbm_options(struct mtd_info *mtd, chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; } +static inline bool is_full_id_nand(struct nand_flash_dev *type) +{ + return type->id_len; +} + +static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip, + struct nand_flash_dev *type, u8 *id_data, int *busw) +{ + if (!strncmp(type->id, id_data, type->id_len)) { + mtd->writesize = type->pagesize; + mtd->erasesize = type->erasesize; + mtd->oobsize = type->oobsize; + + chip->cellinfo = id_data[2]; + chip->chipsize = (uint64_t)type->chipsize << 20; + chip->options |= type->options; + + *busw = type->options & NAND_BUSWIDTH_16; + + return true; + } + return false; +} + /* * Get the flash and manufacturer id and lookup if the type is supported. */ @@ -3222,9 +3231,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, if (!type) type = nand_flash_ids; - for (; type->name != NULL; type++) - if (*dev_id == type->id) - break; + for (; type->name != NULL; type++) { + if (is_full_id_nand(type)) { + if (find_full_id_nand(mtd, chip, type, id_data, &busw)) + goto ident_done; + } else if (*dev_id == type->dev_id) { + break; + } + } chip->onfi_version = 0; if (!type->name || !type->pagesize) { @@ -3302,12 +3316,7 @@ ident_done: } chip->badblockbits = 8; - - /* Check for AND chips with 4 page planes */ - if (chip->options & NAND_4PAGE_ARRAY) - chip->erase_cmd = multi_erase_cmd; - else - chip->erase_cmd = single_erase_cmd; + chip->erase_cmd = single_erase_cmd; /* Do not replace user supplied command function! */ if (mtd->writesize > 512 && chip->cmdfunc == nand_command) @@ -3474,6 +3483,10 @@ int nand_scan_tail(struct mtd_info *mtd) chip->ecc.read_oob = nand_read_oob_std; if (!chip->ecc.write_oob) chip->ecc.write_oob = nand_write_oob_std; + if (!chip->ecc.read_subpage) + chip->ecc.read_subpage = nand_read_subpage; + if (!chip->ecc.write_subpage) + chip->ecc.write_subpage = nand_write_subpage_hwecc; case NAND_ECC_HW_SYNDROME: if ((!chip->ecc.calculate || !chip->ecc.correct || diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 916d6e9..2672643 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c @@ -1240,15 +1240,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs) */ static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; -static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; - -static struct nand_bbt_descr agand_flashbased = { - .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, - .offs = 0x20, - .len = 6, - .pattern = scan_agand_pattern -}; - /* Generic flash bbt descriptors */ static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; @@ -1333,22 +1324,6 @@ int nand_default_bbt(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; - /* - * Default for AG-AND. We must use a flash based bad block table as the - * devices have factory marked _good_ blocks. Erasing those blocks - * leads to loss of the good / bad information, so we _must_ store this - * information in a good / bad table during startup. - */ - if (this->options & NAND_IS_AND) { - /* Use the default pattern descriptors */ - if (!this->bbt_td) { - this->bbt_td = &bbt_main_descr; - this->bbt_md = &bbt_mirror_descr; - } - this->bbt_options |= NAND_BBT_USE_FLASH; - return nand_scan_bbt(mtd, &agand_flashbased); - } - /* Is a flash based bad block table requested? */ if (this->bbt_options & NAND_BBT_USE_FLASH) { /* Use the default pattern descriptors */ diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 9c61238..683813a 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -10,163 +10,153 @@ */ #include #include -/* -* Chip ID list -* -* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size, -* options -* -* Pagesize; 0, 256, 512 -* 0 get this information from the extended chip ID -+ 256 256 Byte page size -* 512 512 Byte page size -*/ -struct nand_flash_dev nand_flash_ids[] = { +#include + +#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS +#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) + #define SP_OPTIONS NAND_NEED_READRDY #define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16) -#ifdef CONFIG_MTD_NAND_MUSEUM_IDS - {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS}, - {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS}, - {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS}, - {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS}, - {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS}, - {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS}, - - {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS}, - {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS}, - {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16}, - {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16}, -#endif - - {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS}, - {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS}, - {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16}, - {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16}, - - {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS}, - {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS}, - {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16}, - {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16}, - - {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS}, - {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS}, - {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16}, - {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16}, - - {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS}, - {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS}, - {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS}, - {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16}, - {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16}, - {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16}, - {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16}, - - {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS}, +/* + * The chip ID list: + * name, device ID, page size, chip size in MiB, eraseblock size, options + * + * If page size and eraseblock size are 0, the sizes are taken from the + * extended chip ID. + */ +struct nand_flash_dev nand_flash_ids[] = { + /* + * Some incompatible NAND chips share device ID's and so must be + * listed by full ID. We list them first so that we can easily identify + * the most specific match. + */ + {"TC58NVG2S0F 4G 3.3V 8-bit", + { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} }, + SZ_4K, SZ_512, SZ_256K, 0, 8, 224}, + {"TC58NVG3S0F 8G 3.3V 8-bit", + { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} }, + SZ_4K, SZ_1K, SZ_256K, 0, 8, 232}, + {"TC58NVG5D2 32G 3.3V 8-bit", + { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} }, + SZ_8K, SZ_4K, SZ_1M, 0, 8, 640}, + {"TC58NVG6D2 64G 3.3V 8-bit", + { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} }, + SZ_8K, SZ_8K, SZ_2M, 0, 8, 640}, + + LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS), + + LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16), + + LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16), + + LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16), + + LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16), + + LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS), /* - * These are the new chips with large page size. The pagesize and the - * erasesize is determined from the extended id bytes + * These are the new chips with large page size. Their page size and + * eraseblock size are determined from the extended ID bytes. */ -#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS -#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) /* 512 Megabit */ - {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS}, - {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS}, - {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS}, - {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS}, - {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS}, - {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16}, - {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16}, - {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16}, - {"NAND 64MiB 3,3V 16-bit", 0xC0, 0, 64, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16), /* 1 Gigabit */ - {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS}, - {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS}, - {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS}, - {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16}, - {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16}, - {"NAND 128MiB 1,8V 16-bit", 0xAD, 0, 128, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16), /* 2 Gigabit */ - {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS}, - {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, LP_OPTIONS}, - {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, LP_OPTIONS16}, - {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16), /* 4 Gigabit */ - {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, LP_OPTIONS}, - {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, LP_OPTIONS}, - {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, LP_OPTIONS16}, - {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16), /* 8 Gigabit */ - {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, LP_OPTIONS}, - {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, LP_OPTIONS}, - {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, LP_OPTIONS16}, - {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16), /* 16 Gigabit */ - {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, LP_OPTIONS}, - {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, LP_OPTIONS}, - {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16}, - {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16), /* 32 Gigabit */ - {"NAND 4GiB 1,8V 8-bit", 0xA7, 0, 4096, 0, LP_OPTIONS}, - {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS}, - {"NAND 4GiB 1,8V 16-bit", 0xB7, 0, 4096, 0, LP_OPTIONS16}, - {"NAND 4GiB 3,3V 16-bit", 0xC7, 0, 4096, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16), /* 64 Gigabit */ - {"NAND 8GiB 1,8V 8-bit", 0xAE, 0, 8192, 0, LP_OPTIONS}, - {"NAND 8GiB 3,3V 8-bit", 0xDE, 0, 8192, 0, LP_OPTIONS}, - {"NAND 8GiB 1,8V 16-bit", 0xBE, 0, 8192, 0, LP_OPTIONS16}, - {"NAND 8GiB 3,3V 16-bit", 0xCE, 0, 8192, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16), /* 128 Gigabit */ - {"NAND 16GiB 1,8V 8-bit", 0x1A, 0, 16384, 0, LP_OPTIONS}, - {"NAND 16GiB 3,3V 8-bit", 0x3A, 0, 16384, 0, LP_OPTIONS}, - {"NAND 16GiB 1,8V 16-bit", 0x2A, 0, 16384, 0, LP_OPTIONS16}, - {"NAND 16GiB 3,3V 16-bit", 0x4A, 0, 16384, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16), /* 256 Gigabit */ - {"NAND 32GiB 1,8V 8-bit", 0x1C, 0, 32768, 0, LP_OPTIONS}, - {"NAND 32GiB 3,3V 8-bit", 0x3C, 0, 32768, 0, LP_OPTIONS}, - {"NAND 32GiB 1,8V 16-bit", 0x2C, 0, 32768, 0, LP_OPTIONS16}, - {"NAND 32GiB 3,3V 16-bit", 0x4C, 0, 32768, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16), /* 512 Gigabit */ - {"NAND 64GiB 1,8V 8-bit", 0x1E, 0, 65536, 0, LP_OPTIONS}, - {"NAND 64GiB 3,3V 8-bit", 0x3E, 0, 65536, 0, LP_OPTIONS}, - {"NAND 64GiB 1,8V 16-bit", 0x2E, 0, 65536, 0, LP_OPTIONS16}, - {"NAND 64GiB 3,3V 16-bit", 0x4E, 0, 65536, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16), - /* - * Renesas AND 1 Gigabit. Those chips do not support extended id and - * have a strange page/block layout ! The chosen minimum erasesize is - * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page - * planes 1 block = 2 pages, but due to plane arrangement the blocks - * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would - * increase the eraseblock size so we chose a combined one which can be - * erased in one go There are more speed improvements for reads and - * writes possible, but not implemented now - */ - {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, - NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH}, - - {NULL,} + {NULL} }; -/* -* Manufacturer ID list -*/ +/* Manufacturer IDs */ struct nand_manufacturers nand_manuf_ids[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, {NAND_MFR_SAMSUNG, "Samsung"}, diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 891c52a..cb38f3d 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -218,7 +218,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " #define STATE_CMD_READOOB 0x00000005 /* read OOB area */ #define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */ #define STATE_CMD_STATUS 0x00000007 /* read status */ -#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */ #define STATE_CMD_SEQIN 0x00000009 /* sequential data input */ #define STATE_CMD_READID 0x0000000A /* read ID */ #define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */ @@ -263,14 +262,13 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " #define NS_OPER_STATES 6 /* Maximum number of states in operation */ #define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */ -#define OPT_PAGE256 0x00000001 /* 256-byte page chips */ #define OPT_PAGE512 0x00000002 /* 512-byte page chips */ #define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ #define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ #define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ #define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ -#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */ +#define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */ /* Remove action bits from state */ #define NS_STATE(x) ((x) & ~ACTION_MASK) @@ -406,8 +404,6 @@ static struct nandsim_operations { {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}}, /* Read status */ {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}}, - /* Read multi-plane status */ - {OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}}, /* Read ID */ {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}}, /* Large page devices read page */ @@ -699,10 +695,7 @@ static int init_nandsim(struct mtd_info *mtd) ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec; ns->options = 0; - if (ns->geom.pgsz == 256) { - ns->options |= OPT_PAGE256; - } - else if (ns->geom.pgsz == 512) { + if (ns->geom.pgsz == 512) { ns->options |= OPT_PAGE512; if (ns->busw == 8) ns->options |= OPT_PAGE512_8BIT; @@ -769,9 +762,9 @@ static int init_nandsim(struct mtd_info *mtd) } /* Detect how many ID bytes the NAND chip outputs */ - for (i = 0; nand_flash_ids[i].name != NULL; i++) { - if (second_id_byte != nand_flash_ids[i].id) - continue; + for (i = 0; nand_flash_ids[i].name != NULL; i++) { + if (second_id_byte != nand_flash_ids[i].dev_id) + continue; } if (ns->busw == 16) @@ -1079,8 +1072,6 @@ static char *get_state_name(uint32_t state) return "STATE_CMD_ERASE1"; case STATE_CMD_STATUS: return "STATE_CMD_STATUS"; - case STATE_CMD_STATUS_M: - return "STATE_CMD_STATUS_M"; case STATE_CMD_SEQIN: return "STATE_CMD_SEQIN"; case STATE_CMD_READID: @@ -1145,7 +1136,6 @@ static int check_command(int cmd) case NAND_CMD_RNDOUTSTART: return 0; - case NAND_CMD_STATUS_MULTI: default: return 1; } @@ -1171,8 +1161,6 @@ static uint32_t get_state_by_command(unsigned command) return STATE_CMD_ERASE1; case NAND_CMD_STATUS: return STATE_CMD_STATUS; - case NAND_CMD_STATUS_MULTI: - return STATE_CMD_STATUS_M; case NAND_CMD_SEQIN: return STATE_CMD_SEQIN; case NAND_CMD_READID: @@ -2306,7 +2294,7 @@ static int __init ns_init_module(void) nand->geom.idbytes = 2; nand->regs.status = NS_STATUS_OK(nand); nand->nxstate = STATE_UNKNOWN; - nand->options |= OPT_PAGE256; /* temporary value */ + nand->options |= OPT_PAGE512; /* temporary value */ nand->ids[0] = first_id_byte; nand->ids[1] = second_id_byte; nand->ids[2] = third_id_byte; diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c index a619119..cd6be2e 100644 --- a/drivers/mtd/nand/nuc900_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c @@ -177,15 +177,6 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, case NAND_CMD_SEQIN: case NAND_CMD_RNDIN: case NAND_CMD_STATUS: - case NAND_CMD_DEPLETE1: - return; - - case NAND_CMD_STATUS_ERROR: - case NAND_CMD_STATUS_ERROR0: - case NAND_CMD_STATUS_ERROR1: - case NAND_CMD_STATUS_ERROR2: - case NAND_CMD_STATUS_ERROR3: - udelay(chip->chip_delay); return; case NAND_CMD_RESET: diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 8e820dd..81b80af 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1023,9 +1023,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) int status, state = this->state; if (state == FL_ERASING) - timeo += (HZ * 400) / 1000; + timeo += msecs_to_jiffies(400); else - timeo += (HZ * 20) / 1000; + timeo += msecs_to_jiffies(20); writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command); while (time_before(jiffies, timeo)) { @@ -1701,8 +1701,9 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt) elm_node = of_find_node_by_phandle(be32_to_cpup(parp)); pdev = of_find_device_by_node(elm_node); info->elm_dev = &pdev->dev; - elm_config(info->elm_dev, bch_type); - info->is_elm_used = true; + + if (elm_config(info->elm_dev, bch_type) == 0) + info->is_elm_used = true; } if (info->is_elm_used && (mtd->writesize <= 4096)) { diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index cd72b92..8fbd002 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -231,18 +231,7 @@ static struct platform_driver orion_nand_driver = { }, }; -static int __init orion_nand_init(void) -{ - return platform_driver_probe(&orion_nand_driver, orion_nand_probe); -} - -static void __exit orion_nand_exit(void) -{ - platform_driver_unregister(&orion_nand_driver); -} - -module_init(orion_nand_init); -module_exit(orion_nand_exit); +module_platform_driver_probe(orion_nand_driver, orion_nand_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tzachi Perelstein"); diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c deleted file mode 100644 index 0ddd90e..0000000 --- a/drivers/mtd/nand/ppchameleonevb.c +++ /dev/null @@ -1,403 +0,0 @@ -/* - * drivers/mtd/nand/ppchameleonevb.c - * - * Copyright (C) 2003 DAVE Srl (info@wawnet.biz) - * - * Derived from drivers/mtd/nand/edb7312.c - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Overview: - * This is a device driver for the NAND flash devices found on the - * PPChameleon/PPChameleonEVB system. - * PPChameleon options (autodetected): - * - BA model: no NAND - * - ME model: 32MB (Samsung K9F5608U0B) - * - HI model: 128MB (Samsung K9F1G08UOM) - * PPChameleonEVB options: - * - 32MB (Samsung K9F5608U0B) - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#undef USE_READY_BUSY_PIN -#define USE_READY_BUSY_PIN -/* see datasheets (tR) */ -#define NAND_BIG_DELAY_US 25 -#define NAND_SMALL_DELAY_US 10 - -/* handy sizes */ -#define SZ_4M 0x00400000 -#define NAND_SMALL_SIZE 0x02000000 -#define NAND_MTD_NAME "ppchameleon-nand" -#define NAND_EVB_MTD_NAME "ppchameleonevb-nand" - -/* GPIO pins used to drive NAND chip mounted on processor module */ -#define NAND_nCE_GPIO_PIN (0x80000000 >> 1) -#define NAND_CLE_GPIO_PIN (0x80000000 >> 2) -#define NAND_ALE_GPIO_PIN (0x80000000 >> 3) -#define NAND_RB_GPIO_PIN (0x80000000 >> 4) -/* GPIO pins used to drive NAND chip mounted on EVB */ -#define NAND_EVB_nCE_GPIO_PIN (0x80000000 >> 14) -#define NAND_EVB_CLE_GPIO_PIN (0x80000000 >> 15) -#define NAND_EVB_ALE_GPIO_PIN (0x80000000 >> 16) -#define NAND_EVB_RB_GPIO_PIN (0x80000000 >> 31) - -/* - * MTD structure for PPChameleonEVB board - */ -static struct mtd_info *ppchameleon_mtd = NULL; -static struct mtd_info *ppchameleonevb_mtd = NULL; - -/* - * Module stuff - */ -static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR; -static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR; - -#ifdef MODULE -module_param(ppchameleon_fio_pbase, ulong, 0); -module_param(ppchameleonevb_fio_pbase, ulong, 0); -#else -__setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase); -__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase); -#endif - -/* - * Define static partitions for flash devices - */ -static struct mtd_partition partition_info_hi[] = { - { .name = "PPChameleon HI Nand Flash", - .offset = 0, - .size = 128 * 1024 * 1024 - } -}; - -static struct mtd_partition partition_info_me[] = { - { .name = "PPChameleon ME Nand Flash", - .offset = 0, - .size = 32 * 1024 * 1024 - } -}; - -static struct mtd_partition partition_info_evb[] = { - { .name = "PPChameleonEVB Nand Flash", - .offset = 0, - .size = 32 * 1024 * 1024 - } -}; - -#define NUM_PARTITIONS 1 - -/* - * hardware specific access to control-lines - */ -static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd, - unsigned int ctrl) -{ - struct nand_chip *chip = mtd->priv; - - if (ctrl & NAND_CTRL_CHANGE) { -#error Missing headerfiles. No way to fix this. -tglx - switch (cmd) { - case NAND_CTL_SETCLE: - MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR); - break; - case NAND_CTL_CLRCLE: - MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR); - break; - case NAND_CTL_SETALE: - MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR); - break; - case NAND_CTL_CLRALE: - MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR); - break; - case NAND_CTL_SETNCE: - MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR); - break; - case NAND_CTL_CLRNCE: - MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR); - break; - } - } - if (cmd != NAND_CMD_NONE) - writeb(cmd, chip->IO_ADDR_W); -} - -static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd, - unsigned int ctrl) -{ - struct nand_chip *chip = mtd->priv; - - if (ctrl & NAND_CTRL_CHANGE) { -#error Missing headerfiles. No way to fix this. -tglx - switch (cmd) { - case NAND_CTL_SETCLE: - MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR); - break; - case NAND_CTL_CLRCLE: - MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR); - break; - case NAND_CTL_SETALE: - MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR); - break; - case NAND_CTL_CLRALE: - MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR); - break; - case NAND_CTL_SETNCE: - MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR); - break; - case NAND_CTL_CLRNCE: - MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR); - break; - } - } - if (cmd != NAND_CMD_NONE) - writeb(cmd, chip->IO_ADDR_W); -} - -#ifdef USE_READY_BUSY_PIN -/* - * read device ready pin - */ -static int ppchameleon_device_ready(struct mtd_info *minfo) -{ - if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_RB_GPIO_PIN) - return 1; - return 0; -} - -static int ppchameleonevb_device_ready(struct mtd_info *minfo) -{ - if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN) - return 1; - return 0; -} -#endif - -/* - * Main initialization routine - */ -static int __init ppchameleonevb_init(void) -{ - struct nand_chip *this; - void __iomem *ppchameleon_fio_base; - void __iomem *ppchameleonevb_fio_base; - - /********************************* - * Processor module NAND (if any) * - *********************************/ - /* Allocate memory for MTD device structure and private data */ - ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); - if (!ppchameleon_mtd) { - printk("Unable to allocate PPChameleon NAND MTD device structure.\n"); - return -ENOMEM; - } - - /* map physical address */ - ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M); - if (!ppchameleon_fio_base) { - printk("ioremap PPChameleon NAND flash failed\n"); - kfree(ppchameleon_mtd); - return -EIO; - } - - /* Get pointer to private data */ - this = (struct nand_chip *)(&ppchameleon_mtd[1]); - - /* Initialize structures */ - memset(ppchameleon_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - - /* Link the private data with the MTD structure */ - ppchameleon_mtd->priv = this; - ppchameleon_mtd->owner = THIS_MODULE; - - /* Initialize GPIOs */ - /* Pin mapping for NAND chip */ - /* - CE GPIO_01 - CLE GPIO_02 - ALE GPIO_03 - R/B GPIO_04 - */ - /* output select */ - out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xC0FFFFFF); - /* three-state select */ - out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xC0FFFFFF); - /* enable output driver */ - out_be32((volatile unsigned *)GPIO0_TCR, - in_be32((volatile unsigned *)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN); -#ifdef USE_READY_BUSY_PIN - /* three-state select */ - out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFF3FFFFF); - /* high-impedecence */ - out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_RB_GPIO_PIN)); - /* input select */ - out_be32((volatile unsigned *)GPIO0_ISR1H, - (in_be32((volatile unsigned *)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000); -#endif - - /* insert callbacks */ - this->IO_ADDR_R = ppchameleon_fio_base; - this->IO_ADDR_W = ppchameleon_fio_base; - this->cmd_ctrl = ppchameleon_hwcontrol; -#ifdef USE_READY_BUSY_PIN - this->dev_ready = ppchameleon_device_ready; -#endif - this->chip_delay = NAND_BIG_DELAY_US; - /* ECC mode */ - this->ecc.mode = NAND_ECC_SOFT; - - /* Scan to find existence of the device (it could not be mounted) */ - if (nand_scan(ppchameleon_mtd, 1)) { - iounmap((void *)ppchameleon_fio_base); - ppchameleon_fio_base = NULL; - kfree(ppchameleon_mtd); - goto nand_evb_init; - } -#ifndef USE_READY_BUSY_PIN - /* Adjust delay if necessary */ - if (ppchameleon_mtd->size == NAND_SMALL_SIZE) - this->chip_delay = NAND_SMALL_DELAY_US; -#endif - - ppchameleon_mtd->name = "ppchameleon-nand"; - - /* Register the partitions */ - mtd_device_parse_register(ppchameleon_mtd, NULL, NULL, - ppchameleon_mtd->size == NAND_SMALL_SIZE ? - partition_info_me : partition_info_hi, - NUM_PARTITIONS); - - nand_evb_init: - /**************************** - * EVB NAND (always present) * - ****************************/ - /* Allocate memory for MTD device structure and private data */ - ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); - if (!ppchameleonevb_mtd) { - printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n"); - if (ppchameleon_fio_base) - iounmap(ppchameleon_fio_base); - return -ENOMEM; - } - - /* map physical address */ - ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M); - if (!ppchameleonevb_fio_base) { - printk("ioremap PPChameleonEVB NAND flash failed\n"); - kfree(ppchameleonevb_mtd); - if (ppchameleon_fio_base) - iounmap(ppchameleon_fio_base); - return -EIO; - } - - /* Get pointer to private data */ - this = (struct nand_chip *)(&ppchameleonevb_mtd[1]); - - /* Initialize structures */ - memset(ppchameleonevb_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - - /* Link the private data with the MTD structure */ - ppchameleonevb_mtd->priv = this; - - /* Initialize GPIOs */ - /* Pin mapping for NAND chip */ - /* - CE GPIO_14 - CLE GPIO_15 - ALE GPIO_16 - R/B GPIO_31 - */ - /* output select */ - out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xFFFFFFF0); - out_be32((volatile unsigned *)GPIO0_OSRL, in_be32((volatile unsigned *)GPIO0_OSRL) & 0x3FFFFFFF); - /* three-state select */ - out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFFFFFFF0); - out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0x3FFFFFFF); - /* enable output driver */ - out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN | - NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN); -#ifdef USE_READY_BUSY_PIN - /* three-state select */ - out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0xFFFFFFFC); - /* high-impedecence */ - out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN)); - /* input select */ - out_be32((volatile unsigned *)GPIO0_ISR1L, - (in_be32((volatile unsigned *)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001); -#endif - - /* insert callbacks */ - this->IO_ADDR_R = ppchameleonevb_fio_base; - this->IO_ADDR_W = ppchameleonevb_fio_base; - this->cmd_ctrl = ppchameleonevb_hwcontrol; -#ifdef USE_READY_BUSY_PIN - this->dev_ready = ppchameleonevb_device_ready; -#endif - this->chip_delay = NAND_SMALL_DELAY_US; - - /* ECC mode */ - this->ecc.mode = NAND_ECC_SOFT; - - /* Scan to find existence of the device */ - if (nand_scan(ppchameleonevb_mtd, 1)) { - iounmap((void *)ppchameleonevb_fio_base); - kfree(ppchameleonevb_mtd); - if (ppchameleon_fio_base) - iounmap(ppchameleon_fio_base); - return -ENXIO; - } - - ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; - - /* Register the partitions */ - mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL, - ppchameleon_mtd->size == NAND_SMALL_SIZE ? - partition_info_me : partition_info_hi, - NUM_PARTITIONS); - - /* Return happy */ - return 0; -} - -module_init(ppchameleonevb_init); - -/* - * Clean up routine - */ -static void __exit ppchameleonevb_cleanup(void) -{ - struct nand_chip *this; - - /* Release resources, unregister device(s) */ - nand_release(ppchameleon_mtd); - nand_release(ppchameleonevb_mtd); - - /* Release iomaps */ - this = (struct nand_chip *) &ppchameleon_mtd[1]; - iounmap((void *) this->IO_ADDR_R); - this = (struct nand_chip *) &ppchameleonevb_mtd[1]; - iounmap((void *) this->IO_ADDR_R); - - /* Free the MTD device structure */ - kfree (ppchameleon_mtd); - kfree (ppchameleonevb_mtd); -} -module_exit(ppchameleonevb_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("DAVE Srl "); -MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board"); diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 37ee75c..dec80ca 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -989,7 +989,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) } pxa3xx_flash_ids[0].name = f->name; - pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; + pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff; pxa3xx_flash_ids[0].pagesize = f->page_size; chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size; pxa3xx_flash_ids[0].chipsize = chipsize >> 20; diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c deleted file mode 100644 index e55b5cf..0000000 --- a/drivers/mtd/nand/rtc_from4.c +++ /dev/null @@ -1,624 +0,0 @@ -/* - * drivers/mtd/nand/rtc_from4.c - * - * Copyright (C) 2004 Red Hat, Inc. - * - * Derived from drivers/mtd/nand/spia.c - * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Overview: - * This is a device driver for the AG-AND flash device found on the - * Renesas Technology Corp. Flash ROM 4-slot interface board (FROM_BOARD4), - * which utilizes the Renesas HN29V1G91T-30 part. - * This chip is a 1 GBibit (128MiB x 8 bits) AG-AND flash device. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * MTD structure for Renesas board - */ -static struct mtd_info *rtc_from4_mtd = NULL; - -#define RTC_FROM4_MAX_CHIPS 2 - -/* HS77x9 processor register defines */ -#define SH77X9_BCR1 ((volatile unsigned short *)(0xFFFFFF60)) -#define SH77X9_BCR2 ((volatile unsigned short *)(0xFFFFFF62)) -#define SH77X9_WCR1 ((volatile unsigned short *)(0xFFFFFF64)) -#define SH77X9_WCR2 ((volatile unsigned short *)(0xFFFFFF66)) -#define SH77X9_MCR ((volatile unsigned short *)(0xFFFFFF68)) -#define SH77X9_PCR ((volatile unsigned short *)(0xFFFFFF6C)) -#define SH77X9_FRQCR ((volatile unsigned short *)(0xFFFFFF80)) - -/* - * Values specific to the Renesas Technology Corp. FROM_BOARD4 (used with HS77x9 processor) - */ -/* Address where flash is mapped */ -#define RTC_FROM4_FIO_BASE 0x14000000 - -/* CLE and ALE are tied to address lines 5 & 4, respectively */ -#define RTC_FROM4_CLE (1 << 5) -#define RTC_FROM4_ALE (1 << 4) - -/* address lines A24-A22 used for chip selection */ -#define RTC_FROM4_NAND_ADDR_SLOT3 (0x00800000) -#define RTC_FROM4_NAND_ADDR_SLOT4 (0x00C00000) -#define RTC_FROM4_NAND_ADDR_FPGA (0x01000000) -/* mask address lines A24-A22 used for chip selection */ -#define RTC_FROM4_NAND_ADDR_MASK (RTC_FROM4_NAND_ADDR_SLOT3 | RTC_FROM4_NAND_ADDR_SLOT4 | RTC_FROM4_NAND_ADDR_FPGA) - -/* FPGA status register for checking device ready (bit zero) */ -#define RTC_FROM4_FPGA_SR (RTC_FROM4_NAND_ADDR_FPGA | 0x00000002) -#define RTC_FROM4_DEVICE_READY 0x0001 - -/* FPGA Reed-Solomon ECC Control register */ - -#define RTC_FROM4_RS_ECC_CTL (RTC_FROM4_NAND_ADDR_FPGA | 0x00000050) -#define RTC_FROM4_RS_ECC_CTL_CLR (1 << 7) -#define RTC_FROM4_RS_ECC_CTL_GEN (1 << 6) -#define RTC_FROM4_RS_ECC_CTL_FD_E (1 << 5) - -/* FPGA Reed-Solomon ECC code base */ -#define RTC_FROM4_RS_ECC (RTC_FROM4_NAND_ADDR_FPGA | 0x00000060) -#define RTC_FROM4_RS_ECCN (RTC_FROM4_NAND_ADDR_FPGA | 0x00000080) - -/* FPGA Reed-Solomon ECC check register */ -#define RTC_FROM4_RS_ECC_CHK (RTC_FROM4_NAND_ADDR_FPGA | 0x00000070) -#define RTC_FROM4_RS_ECC_CHK_ERROR (1 << 7) - -#define ERR_STAT_ECC_AVAILABLE 0x20 - -/* Undefine for software ECC */ -#define RTC_FROM4_HWECC 1 - -/* Define as 1 for no virtual erase blocks (in JFFS2) */ -#define RTC_FROM4_NO_VIRTBLOCKS 0 - -/* - * Module stuff - */ -static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE); - -static const struct mtd_partition partition_info[] = { - { - .name = "Renesas flash partition 1", - .offset = 0, - .size = MTDPART_SIZ_FULL}, -}; - -#define NUM_PARTITIONS 1 - -/* - * hardware specific flash bbt decriptors - * Note: this is to allow debugging by disabling - * NAND_BBT_CREATE and/or NAND_BBT_WRITE - * - */ -static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' }; -static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' }; - -static struct nand_bbt_descr rtc_from4_bbt_main_descr = { - .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE - | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, - .offs = 40, - .len = 4, - .veroffs = 44, - .maxblocks = 4, - .pattern = bbt_pattern -}; - -static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = { - .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE - | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, - .offs = 40, - .len = 4, - .veroffs = 44, - .maxblocks = 4, - .pattern = mirror_pattern -}; - -#ifdef RTC_FROM4_HWECC - -/* the Reed Solomon control structure */ -static struct rs_control *rs_decoder; - -/* - * hardware specific Out Of Band information - */ -static struct nand_ecclayout rtc_from4_nand_oobinfo = { - .eccbytes = 32, - .eccpos = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31}, - .oobfree = {{32, 32}} -}; - -#endif - -/* - * rtc_from4_hwcontrol - hardware specific access to control-lines - * @mtd: MTD device structure - * @cmd: hardware control command - * - * Address lines (A5 and A4) are used to control Command and Address Latch - * Enable on this board, so set the read/write address appropriately. - * - * Chip Enable is also controlled by the Chip Select (CS5) and - * Address lines (A24-A22), so no action is required here. - * - */ -static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd, - unsigned int ctrl) -{ - struct nand_chip *chip = (mtd->priv); - - if (cmd == NAND_CMD_NONE) - return; - - if (ctrl & NAND_CLE) - writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_CLE); - else - writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_ALE); -} - -/* - * rtc_from4_nand_select_chip - hardware specific chip select - * @mtd: MTD device structure - * @chip: Chip to select (0 == slot 3, 1 == slot 4) - * - * The chip select is based on address lines A24-A22. - * This driver uses flash slots 3 and 4 (A23-A22). - * - */ -static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip) -{ - struct nand_chip *this = mtd->priv; - - this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK); - this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK); - - switch (chip) { - - case 0: /* select slot 3 chip */ - this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3); - this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3); - break; - case 1: /* select slot 4 chip */ - this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4); - this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4); - break; - - } -} - -/* - * rtc_from4_nand_device_ready - hardware specific ready/busy check - * @mtd: MTD device structure - * - * This board provides the Ready/Busy state in the status register - * of the FPGA. Bit zero indicates the RDY(1)/BSY(0) signal. - * - */ -static int rtc_from4_nand_device_ready(struct mtd_info *mtd) -{ - unsigned short status; - - status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_FPGA_SR)); - - return (status & RTC_FROM4_DEVICE_READY); - -} - -/* - * deplete - code to perform device recovery in case there was a power loss - * @mtd: MTD device structure - * @chip: Chip to select (0 == slot 3, 1 == slot 4) - * - * If there was a sudden loss of power during an erase operation, a - * "device recovery" operation must be performed when power is restored - * to ensure correct operation. This routine performs the required steps - * for the requested chip. - * - * See page 86 of the data sheet for details. - * - */ -static void deplete(struct mtd_info *mtd, int chip) -{ - struct nand_chip *this = mtd->priv; - - /* wait until device is ready */ - while (!this->dev_ready(mtd)) ; - - this->select_chip(mtd, chip); - - /* Send the commands for device recovery, phase 1 */ - this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000); - this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1); - - /* Send the commands for device recovery, phase 2 */ - this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004); - this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1); - -} - -#ifdef RTC_FROM4_HWECC -/* - * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function - * @mtd: MTD device structure - * @mode: I/O mode; read or write - * - * enable hardware ECC for data read or write - * - */ -static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode) -{ - volatile unsigned short *rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL); - unsigned short status; - - switch (mode) { - case NAND_ECC_READ: - status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_FD_E; - - *rs_ecc_ctl = status; - break; - - case NAND_ECC_READSYN: - status = 0x00; - - *rs_ecc_ctl = status; - break; - - case NAND_ECC_WRITE: - status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_GEN | RTC_FROM4_RS_ECC_CTL_FD_E; - - *rs_ecc_ctl = status; - break; - - default: - BUG(); - break; - } - -} - -/* - * rtc_from4_calculate_ecc - hardware specific code to read ECC code - * @mtd: MTD device structure - * @dat: buffer containing the data to generate ECC codes - * @ecc_code ECC codes calculated - * - * The ECC code is calculated by the FPGA. All we have to do is read the values - * from the FPGA registers. - * - * Note: We read from the inverted registers, since data is inverted before - * the code is calculated. So all 0xff data (blank page) results in all 0xff rs code - * - */ -static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) -{ - volatile unsigned short *rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN); - unsigned short value; - int i; - - for (i = 0; i < 8; i++) { - value = *rs_eccn; - ecc_code[i] = (unsigned char)value; - rs_eccn++; - } - ecc_code[7] |= 0x0f; /* set the last four bits (not used) */ -} - -/* - * rtc_from4_correct_data - hardware specific code to correct data using ECC code - * @mtd: MTD device structure - * @buf: buffer containing the data to generate ECC codes - * @ecc1 ECC codes read - * @ecc2 ECC codes calculated - * - * The FPGA tells us fast, if there's an error or not. If no, we go back happy - * else we read the ecc results from the fpga and call the rs library to decode - * and hopefully correct the error. - * - */ -static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_char *ecc1, u_char *ecc2) -{ - int i, j, res; - unsigned short status; - uint16_t par[6], syn[6]; - uint8_t ecc[8]; - volatile unsigned short *rs_ecc; - - status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK)); - - if (!(status & RTC_FROM4_RS_ECC_CHK_ERROR)) { - return 0; - } - - /* Read the syndrome pattern from the FPGA and correct the bitorder */ - rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC); - for (i = 0; i < 8; i++) { - ecc[i] = bitrev8(*rs_ecc); - rs_ecc++; - } - - /* convert into 6 10bit syndrome fields */ - par[5] = rs_decoder->index_of[(((uint16_t) ecc[0] >> 0) & 0x0ff) | (((uint16_t) ecc[1] << 8) & 0x300)]; - par[4] = rs_decoder->index_of[(((uint16_t) ecc[1] >> 2) & 0x03f) | (((uint16_t) ecc[2] << 6) & 0x3c0)]; - par[3] = rs_decoder->index_of[(((uint16_t) ecc[2] >> 4) & 0x00f) | (((uint16_t) ecc[3] << 4) & 0x3f0)]; - par[2] = rs_decoder->index_of[(((uint16_t) ecc[3] >> 6) & 0x003) | (((uint16_t) ecc[4] << 2) & 0x3fc)]; - par[1] = rs_decoder->index_of[(((uint16_t) ecc[5] >> 0) & 0x0ff) | (((uint16_t) ecc[6] << 8) & 0x300)]; - par[0] = (((uint16_t) ecc[6] >> 2) & 0x03f) | (((uint16_t) ecc[7] << 6) & 0x3c0); - - /* Convert to computable syndrome */ - for (i = 0; i < 6; i++) { - syn[i] = par[0]; - for (j = 1; j < 6; j++) - if (par[j] != rs_decoder->nn) - syn[i] ^= rs_decoder->alpha_to[rs_modnn(rs_decoder, par[j] + i * j)]; - - /* Convert to index form */ - syn[i] = rs_decoder->index_of[syn[i]]; - } - - /* Let the library code do its magic. */ - res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL); - if (res > 0) { - pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res); - } - return res; -} - -/** - * rtc_from4_errstat - perform additional error status checks - * @mtd: MTD device structure - * @this: NAND chip structure - * @state: state or the operation - * @status: status code returned from read status - * @page: startpage inside the chip, must be called with (page & this->pagemask) - * - * Perform additional error status checks on erase and write failures - * to determine if errors are correctable. For this device, correctable - * 1-bit errors on erase and write are considered acceptable. - * - * note: see pages 34..37 of data sheet for details. - * - */ -static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this, - int state, int status, int page) -{ - int er_stat = 0; - int rtn, retlen; - size_t len; - uint8_t *buf; - int i; - - this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1); - - if (state == FL_ERASING) { - - for (i = 0; i < 4; i++) { - if (!(status & 1 << (i + 1))) - continue; - this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1), - -1, -1); - rtn = this->read_byte(mtd); - this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1); - - /* err_ecc_not_avail */ - if (!(rtn & ERR_STAT_ECC_AVAILABLE)) - er_stat |= 1 << (i + 1); - } - - } else if (state == FL_WRITING) { - - unsigned long corrected = mtd->ecc_stats.corrected; - - /* single bank write logic */ - this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1); - rtn = this->read_byte(mtd); - this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1); - - if (!(rtn & ERR_STAT_ECC_AVAILABLE)) { - /* err_ecc_not_avail */ - er_stat |= 1 << 1; - goto out; - } - - len = mtd->writesize; - buf = kmalloc(len, GFP_KERNEL); - if (!buf) { - er_stat = 1; - goto out; - } - - /* recovery read */ - rtn = nand_do_read(mtd, page, len, &retlen, buf); - - /* if read failed or > 1-bit error corrected */ - if (rtn || (mtd->ecc_stats.corrected - corrected) > 1) - er_stat |= 1 << 1; - kfree(buf); - } -out: - rtn = status; - if (er_stat == 0) { /* if ECC is available */ - rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */ - } - - return rtn; -} -#endif - -/* - * Main initialization routine - */ -static int __init rtc_from4_init(void) -{ - struct nand_chip *this; - unsigned short bcr1, bcr2, wcr2; - int i; - int ret; - - /* Allocate memory for MTD device structure and private data */ - rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); - if (!rtc_from4_mtd) { - printk("Unable to allocate Renesas NAND MTD device structure.\n"); - return -ENOMEM; - } - - /* Get pointer to private data */ - this = (struct nand_chip *)(&rtc_from4_mtd[1]); - - /* Initialize structures */ - memset(rtc_from4_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - - /* Link the private data with the MTD structure */ - rtc_from4_mtd->priv = this; - rtc_from4_mtd->owner = THIS_MODULE; - - /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */ - bcr1 = *SH77X9_BCR1 & ~0x0002; - bcr1 |= 0x0002; - *SH77X9_BCR1 = bcr1; - - /* set */ - bcr2 = *SH77X9_BCR2 & ~0x0c00; - bcr2 |= 0x0800; - *SH77X9_BCR2 = bcr2; - - /* set area 5 wait states */ - wcr2 = *SH77X9_WCR2 & ~0x1c00; - wcr2 |= 0x1c00; - *SH77X9_WCR2 = wcr2; - - /* Set address of NAND IO lines */ - this->IO_ADDR_R = rtc_from4_fio_base; - this->IO_ADDR_W = rtc_from4_fio_base; - /* Set address of hardware control function */ - this->cmd_ctrl = rtc_from4_hwcontrol; - /* Set address of chip select function */ - this->select_chip = rtc_from4_nand_select_chip; - /* command delay time (in us) */ - this->chip_delay = 100; - /* return the status of the Ready/Busy line */ - this->dev_ready = rtc_from4_nand_device_ready; - -#ifdef RTC_FROM4_HWECC - printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n"); - - this->ecc.mode = NAND_ECC_HW_SYNDROME; - this->ecc.size = 512; - this->ecc.bytes = 8; - this->ecc.strength = 3; - /* return the status of extra status and ECC checks */ - this->errstat = rtc_from4_errstat; - /* set the nand_oobinfo to support FPGA H/W error detection */ - this->ecc.layout = &rtc_from4_nand_oobinfo; - this->ecc.hwctl = rtc_from4_enable_hwecc; - this->ecc.calculate = rtc_from4_calculate_ecc; - this->ecc.correct = rtc_from4_correct_data; - - /* We could create the decoder on demand, if memory is a concern. - * This way we have it handy, if an error happens - * - * Symbolsize is 10 (bits) - * Primitve polynomial is x^10+x^3+1 - * first consecutive root is 0 - * primitve element to generate roots = 1 - * generator polinomial degree = 6 - */ - rs_decoder = init_rs(10, 0x409, 0, 1, 6); - if (!rs_decoder) { - printk(KERN_ERR "Could not create a RS decoder\n"); - ret = -ENOMEM; - goto err_1; - } -#else - printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n"); - - this->ecc.mode = NAND_ECC_SOFT; -#endif - - /* set the bad block tables to support debugging */ - this->bbt_td = &rtc_from4_bbt_main_descr; - this->bbt_md = &rtc_from4_bbt_mirror_descr; - - /* Scan to find existence of the device */ - if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) { - ret = -ENXIO; - goto err_2; - } - - /* Perform 'device recovery' for each chip in case there was a power loss. */ - for (i = 0; i < this->numchips; i++) { - deplete(rtc_from4_mtd, i); - } - -#if RTC_FROM4_NO_VIRTBLOCKS - /* use a smaller erase block to minimize wasted space when a block is bad */ - /* note: this uses eight times as much RAM as using the default and makes */ - /* mounts take four times as long. */ - rtc_from4_mtd->flags |= MTD_NO_VIRTBLOCKS; -#endif - - /* Register the partitions */ - ret = mtd_device_register(rtc_from4_mtd, partition_info, - NUM_PARTITIONS); - if (ret) - goto err_3; - - /* Return happy */ - return 0; -err_3: - nand_release(rtc_from4_mtd); -err_2: - free_rs(rs_decoder); -err_1: - kfree(rtc_from4_mtd); - return ret; -} - -module_init(rtc_from4_init); - -/* - * Clean up routine - */ -static void __exit rtc_from4_cleanup(void) -{ - /* Release resource, unregister partitions */ - nand_release(rtc_from4_mtd); - - /* Free the MTD device structure */ - kfree(rtc_from4_mtd); - -#ifdef RTC_FROM4_HWECC - /* Free the reed solomon resources */ - if (rs_decoder) { - free_rs(rs_decoder); - } -#endif -} - -module_exit(rtc_from4_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("d.marlin #include #include +#include #include "sm_common.h" static struct nand_ecclayout nand_oob_sm = { @@ -67,44 +68,37 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs) return error; } - static struct nand_flash_dev nand_smartmedia_flash_ids[] = { - {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0}, - {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0}, - {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0}, - {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0}, - {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0}, - {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM}, - {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0}, - {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0}, - {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0}, - {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM}, - {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0}, - {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM}, - {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, - {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM}, - {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, - {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM}, - {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, - {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM}, - {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, - {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM}, - {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 }, - {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM}, - {NULL,} + LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0), + LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0), + LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0), + LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0), + LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0), + LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0), + LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0), + LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0), + LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0), + LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM), + {NULL} }; static struct nand_flash_dev nand_xd_flash_ids[] = { - - {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, - {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, - {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, - {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, - {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, NAND_BROKEN_XD}, - {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, NAND_BROKEN_XD}, - {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD}, - {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD}, - {NULL,} + LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0), + LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0), + LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0), + LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0), + LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD), + LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD), + LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD), + LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD), + {NULL} }; int sm_register_device(struct mtd_info *mtd, int smartmedia) diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c index e1e8748..7ed654c 100644 --- a/drivers/mtd/nand/txx9ndfmc.c +++ b/drivers/mtd/nand/txx9ndfmc.c @@ -427,18 +427,7 @@ static struct platform_driver txx9ndfmc_driver = { }, }; -static int __init txx9ndfmc_init(void) -{ - return platform_driver_probe(&txx9ndfmc_driver, txx9ndfmc_probe); -} - -static void __exit txx9ndfmc_exit(void) -{ - platform_driver_unregister(&txx9ndfmc_driver); -} - -module_init(txx9ndfmc_init); -module_exit(txx9ndfmc_exit); +module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver"); diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 30bd907..553d6d6 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -55,6 +55,7 @@ static int parse_ofpart_partitions(struct mtd_info *master, while ((pp = of_get_next_child(node, pp))) { const __be32 *reg; int len; + int a_cells, s_cells; reg = of_get_property(pp, "reg", &len); if (!reg) { @@ -62,8 +63,10 @@ static int parse_ofpart_partitions(struct mtd_info *master, continue; } - (*pparts)[i].offset = be32_to_cpu(reg[0]); - (*pparts)[i].size = be32_to_cpu(reg[1]); + a_cells = of_n_addr_cells(pp); + s_cells = of_n_size_cells(pp); + (*pparts)[i].offset = of_read_number(reg, a_cells); + (*pparts)[i].size = of_read_number(reg + a_cells, s_cells); partname = of_get_property(pp, "label", &len); if (!partname) diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig index 91467bb..ab26072 100644 --- a/drivers/mtd/onenand/Kconfig +++ b/drivers/mtd/onenand/Kconfig @@ -40,7 +40,6 @@ config MTD_ONENAND_SAMSUNG config MTD_ONENAND_OTP bool "OneNAND OTP Support" - select HAVE_MTD_OTP help One Block of the NAND Flash Array memory is reserved as a One-Time Programmable Block memory area. @@ -68,10 +67,4 @@ config MTD_ONENAND_2X_PROGRAM And more recent chips -config MTD_ONENAND_SIM - tristate "OneNAND simulator support" - help - The simulator may simulate various OneNAND flash chips for the - OneNAND MTD layer. - endif # MTD_ONENAND diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile index 2b7884c..9d6540e 100644 --- a/drivers/mtd/onenand/Makefile +++ b/drivers/mtd/onenand/Makefile @@ -10,7 +10,4 @@ obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o -# Simulator -obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o - onenand-objs = onenand_base.o onenand_bbt.o diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index eec2aed..d98b198 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -832,19 +832,7 @@ static struct platform_driver omap2_onenand_driver = { }, }; -static int __init omap2_onenand_init(void) -{ - printk(KERN_INFO "OneNAND driver initializing\n"); - return platform_driver_register(&omap2_onenand_driver); -} - -static void __exit omap2_onenand_exit(void) -{ - platform_driver_unregister(&omap2_onenand_driver); -} - -module_init(omap2_onenand_init); -module_exit(omap2_onenand_exit); +module_platform_driver(omap2_onenand_driver); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c deleted file mode 100644 index 85399e3..0000000 --- a/drivers/mtd/onenand/onenand_sim.c +++ /dev/null @@ -1,564 +0,0 @@ -/* - * linux/drivers/mtd/onenand/onenand_sim.c - * - * The OneNAND simulator - * - * Copyright © 2005-2007 Samsung Electronics - * Kyungmin Park - * - * Vishak G , Rohit Hagargundgi - * Flex-OneNAND simulator support - * Copyright (C) Samsung Electronics, 2008 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#ifndef CONFIG_ONENAND_SIM_MANUFACTURER -#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec -#endif - -#ifndef CONFIG_ONENAND_SIM_DEVICE_ID -#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04 -#endif - -#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1) - -#ifndef CONFIG_ONENAND_SIM_VERSION_ID -#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e -#endif - -#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID -#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND -#endif - -/* Initial boundary values for Flex-OneNAND Simulator */ -#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY -#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01 -#endif - -#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY -#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01 -#endif - -static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER; -static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID; -static int version_id = CONFIG_ONENAND_SIM_VERSION_ID; -static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID; -static int boundary[] = { - CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY, - CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY, -}; - -struct onenand_flash { - void __iomem *base; - void __iomem *data; -}; - -#define ONENAND_CORE(flash) (flash->data) -#define ONENAND_CORE_SPARE(flash, this, offset) \ - ((flash->data) + (this->chipsize) + (offset >> 5)) - -#define ONENAND_MAIN_AREA(this, offset) \ - (this->base + ONENAND_DATARAM + offset) - -#define ONENAND_SPARE_AREA(this, offset) \ - (this->base + ONENAND_SPARERAM + offset) - -#define ONENAND_GET_WP_STATUS(this) \ - (readw(this->base + ONENAND_REG_WP_STATUS)) - -#define ONENAND_SET_WP_STATUS(v, this) \ - (writew(v, this->base + ONENAND_REG_WP_STATUS)) - -/* It has all 0xff chars */ -#define MAX_ONENAND_PAGESIZE (4096 + 128) -static unsigned char *ffchars; - -#if CONFIG_FLEXONENAND -#define PARTITION_NAME "Flex-OneNAND simulator partition" -#else -#define PARTITION_NAME "OneNAND simulator partition" -#endif - -static struct mtd_partition os_partitions[] = { - { - .name = PARTITION_NAME, - .offset = 0, - .size = MTDPART_SIZ_FULL, - }, -}; - -/* - * OneNAND simulator mtd - */ -struct onenand_info { - struct mtd_info mtd; - struct mtd_partition *parts; - struct onenand_chip onenand; - struct onenand_flash flash; -}; - -static struct onenand_info *info; - -#define DPRINTK(format, args...) \ -do { \ - printk(KERN_DEBUG "%s[%d]: " format "\n", __func__, \ - __LINE__, ##args); \ -} while (0) - -/** - * onenand_lock_handle - Handle Lock scheme - * @this: OneNAND device structure - * @cmd: The command to be sent - * - * Send lock command to OneNAND device. - * The lock scheme depends on chip type. - */ -static void onenand_lock_handle(struct onenand_chip *this, int cmd) -{ - int block_lock_scheme; - int status; - - status = ONENAND_GET_WP_STATUS(this); - block_lock_scheme = !(this->options & ONENAND_HAS_CONT_LOCK); - - switch (cmd) { - case ONENAND_CMD_UNLOCK: - case ONENAND_CMD_UNLOCK_ALL: - if (block_lock_scheme) - ONENAND_SET_WP_STATUS(ONENAND_WP_US, this); - else - ONENAND_SET_WP_STATUS(status | ONENAND_WP_US, this); - break; - - case ONENAND_CMD_LOCK: - if (block_lock_scheme) - ONENAND_SET_WP_STATUS(ONENAND_WP_LS, this); - else - ONENAND_SET_WP_STATUS(status | ONENAND_WP_LS, this); - break; - - case ONENAND_CMD_LOCK_TIGHT: - if (block_lock_scheme) - ONENAND_SET_WP_STATUS(ONENAND_WP_LTS, this); - else - ONENAND_SET_WP_STATUS(status | ONENAND_WP_LTS, this); - break; - - default: - break; - } -} - -/** - * onenand_bootram_handle - Handle BootRAM area - * @this: OneNAND device structure - * @cmd: The command to be sent - * - * Emulate BootRAM area. It is possible to do basic operation using BootRAM. - */ -static void onenand_bootram_handle(struct onenand_chip *this, int cmd) -{ - switch (cmd) { - case ONENAND_CMD_READID: - writew(manuf_id, this->base); - writew(device_id, this->base + 2); - writew(version_id, this->base + 4); - break; - - default: - /* REVIST: Handle other commands */ - break; - } -} - -/** - * onenand_update_interrupt - Set interrupt register - * @this: OneNAND device structure - * @cmd: The command to be sent - * - * Update interrupt register. The status depends on command. - */ -static void onenand_update_interrupt(struct onenand_chip *this, int cmd) -{ - int interrupt = ONENAND_INT_MASTER; - - switch (cmd) { - case ONENAND_CMD_READ: - case ONENAND_CMD_READOOB: - interrupt |= ONENAND_INT_READ; - break; - - case ONENAND_CMD_PROG: - case ONENAND_CMD_PROGOOB: - interrupt |= ONENAND_INT_WRITE; - break; - - case ONENAND_CMD_ERASE: - interrupt |= ONENAND_INT_ERASE; - break; - - case ONENAND_CMD_RESET: - interrupt |= ONENAND_INT_RESET; - break; - - default: - break; - } - - writew(interrupt, this->base + ONENAND_REG_INTERRUPT); -} - -/** - * onenand_check_overwrite - Check if over-write happened - * @dest: The destination pointer - * @src: The source pointer - * @count: The length to be check - * - * Returns: 0 on same, otherwise 1 - * - * Compare the source with destination - */ -static int onenand_check_overwrite(void *dest, void *src, size_t count) -{ - unsigned int *s = (unsigned int *) src; - unsigned int *d = (unsigned int *) dest; - int i; - - count >>= 2; - for (i = 0; i < count; i++) - if ((*s++ ^ *d++) != 0) - return 1; - - return 0; -} - -/** - * onenand_data_handle - Handle OneNAND Core and DataRAM - * @this: OneNAND device structure - * @cmd: The command to be sent - * @dataram: Which dataram used - * @offset: The offset to OneNAND Core - * - * Copy data from OneNAND Core to DataRAM (read) - * Copy data from DataRAM to OneNAND Core (write) - * Erase the OneNAND Core (erase) - */ -static void onenand_data_handle(struct onenand_chip *this, int cmd, - int dataram, unsigned int offset) -{ - struct mtd_info *mtd = &info->mtd; - struct onenand_flash *flash = this->priv; - int main_offset, spare_offset, die = 0; - void __iomem *src; - void __iomem *dest; - unsigned int i; - static int pi_operation; - int erasesize, rgn; - - if (dataram) { - main_offset = mtd->writesize; - spare_offset = mtd->oobsize; - } else { - main_offset = 0; - spare_offset = 0; - } - - if (pi_operation) { - die = readw(this->base + ONENAND_REG_START_ADDRESS2); - die >>= ONENAND_DDP_SHIFT; - } - - switch (cmd) { - case FLEXONENAND_CMD_PI_ACCESS: - pi_operation = 1; - break; - - case ONENAND_CMD_RESET: - pi_operation = 0; - break; - - case ONENAND_CMD_READ: - src = ONENAND_CORE(flash) + offset; - dest = ONENAND_MAIN_AREA(this, main_offset); - if (pi_operation) { - writew(boundary[die], this->base + ONENAND_DATARAM); - break; - } - memcpy(dest, src, mtd->writesize); - /* Fall through */ - - case ONENAND_CMD_READOOB: - src = ONENAND_CORE_SPARE(flash, this, offset); - dest = ONENAND_SPARE_AREA(this, spare_offset); - memcpy(dest, src, mtd->oobsize); - break; - - case ONENAND_CMD_PROG: - src = ONENAND_MAIN_AREA(this, main_offset); - dest = ONENAND_CORE(flash) + offset; - if (pi_operation) { - boundary[die] = readw(this->base + ONENAND_DATARAM); - break; - } - /* To handle partial write */ - for (i = 0; i < (1 << mtd->subpage_sft); i++) { - int off = i * this->subpagesize; - if (!memcmp(src + off, ffchars, this->subpagesize)) - continue; - if (memcmp(dest + off, ffchars, this->subpagesize) && - onenand_check_overwrite(dest + off, src + off, this->subpagesize)) - printk(KERN_ERR "over-write happened at 0x%08x\n", offset); - memcpy(dest + off, src + off, this->subpagesize); - } - /* Fall through */ - - case ONENAND_CMD_PROGOOB: - src = ONENAND_SPARE_AREA(this, spare_offset); - /* Check all data is 0xff chars */ - if (!memcmp(src, ffchars, mtd->oobsize)) - break; - - dest = ONENAND_CORE_SPARE(flash, this, offset); - if (memcmp(dest, ffchars, mtd->oobsize) && - onenand_check_overwrite(dest, src, mtd->oobsize)) - printk(KERN_ERR "OOB: over-write happened at 0x%08x\n", - offset); - memcpy(dest, src, mtd->oobsize); - break; - - case ONENAND_CMD_ERASE: - if (pi_operation) - break; - - if (FLEXONENAND(this)) { - rgn = flexonenand_region(mtd, offset); - erasesize = mtd->eraseregions[rgn].erasesize; - } else - erasesize = mtd->erasesize; - - memset(ONENAND_CORE(flash) + offset, 0xff, erasesize); - memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff, - (erasesize >> 5)); - break; - - default: - break; - } -} - -/** - * onenand_command_handle - Handle command - * @this: OneNAND device structure - * @cmd: The command to be sent - * - * Emulate OneNAND command. - */ -static void onenand_command_handle(struct onenand_chip *this, int cmd) -{ - unsigned long offset = 0; - int block = -1, page = -1, bufferram = -1; - int dataram = 0; - - switch (cmd) { - case ONENAND_CMD_UNLOCK: - case ONENAND_CMD_LOCK: - case ONENAND_CMD_LOCK_TIGHT: - case ONENAND_CMD_UNLOCK_ALL: - onenand_lock_handle(this, cmd); - break; - - case ONENAND_CMD_BUFFERRAM: - /* Do nothing */ - return; - - default: - block = (int) readw(this->base + ONENAND_REG_START_ADDRESS1); - if (block & (1 << ONENAND_DDP_SHIFT)) { - block &= ~(1 << ONENAND_DDP_SHIFT); - /* The half of chip block */ - block += this->chipsize >> (this->erase_shift + 1); - } - if (cmd == ONENAND_CMD_ERASE) - break; - - page = (int) readw(this->base + ONENAND_REG_START_ADDRESS8); - page = (page >> ONENAND_FPA_SHIFT); - bufferram = (int) readw(this->base + ONENAND_REG_START_BUFFER); - bufferram >>= ONENAND_BSA_SHIFT; - bufferram &= ONENAND_BSA_DATARAM1; - dataram = (bufferram == ONENAND_BSA_DATARAM1) ? 1 : 0; - break; - } - - if (block != -1) - offset = onenand_addr(this, block); - - if (page != -1) - offset += page << this->page_shift; - - onenand_data_handle(this, cmd, dataram, offset); - - onenand_update_interrupt(this, cmd); -} - -/** - * onenand_writew - [OneNAND Interface] Emulate write operation - * @value: value to write - * @addr: address to write - * - * Write OneNAND register with value - */ -static void onenand_writew(unsigned short value, void __iomem * addr) -{ - struct onenand_chip *this = info->mtd.priv; - - /* BootRAM handling */ - if (addr < this->base + ONENAND_DATARAM) { - onenand_bootram_handle(this, value); - return; - } - /* Command handling */ - if (addr == this->base + ONENAND_REG_COMMAND) - onenand_command_handle(this, value); - - writew(value, addr); -} - -/** - * flash_init - Initialize OneNAND simulator - * @flash: OneNAND simulator data strucutres - * - * Initialize OneNAND simulator. - */ -static int __init flash_init(struct onenand_flash *flash) -{ - int density, size; - int buffer_size; - - flash->base = kzalloc(131072, GFP_KERNEL); - if (!flash->base) { - printk(KERN_ERR "Unable to allocate base address.\n"); - return -ENOMEM; - } - - density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT; - density &= ONENAND_DEVICE_DENSITY_MASK; - size = ((16 << 20) << density); - - ONENAND_CORE(flash) = vmalloc(size + (size >> 5)); - if (!ONENAND_CORE(flash)) { - printk(KERN_ERR "Unable to allocate nand core address.\n"); - kfree(flash->base); - return -ENOMEM; - } - - memset(ONENAND_CORE(flash), 0xff, size + (size >> 5)); - - /* Setup registers */ - writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID); - writew(device_id, flash->base + ONENAND_REG_DEVICE_ID); - writew(version_id, flash->base + ONENAND_REG_VERSION_ID); - writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY); - - if (density < 2 && (!CONFIG_FLEXONENAND)) - buffer_size = 0x0400; /* 1KiB page */ - else - buffer_size = 0x0800; /* 2KiB page */ - writew(buffer_size, flash->base + ONENAND_REG_DATA_BUFFER_SIZE); - - return 0; -} - -/** - * flash_exit - Clean up OneNAND simulator - * @flash: OneNAND simulator data structures - * - * Clean up OneNAND simulator. - */ -static void flash_exit(struct onenand_flash *flash) -{ - vfree(ONENAND_CORE(flash)); - kfree(flash->base); -} - -static int __init onenand_sim_init(void) -{ - /* Allocate all 0xff chars pointer */ - ffchars = kmalloc(MAX_ONENAND_PAGESIZE, GFP_KERNEL); - if (!ffchars) { - printk(KERN_ERR "Unable to allocate ff chars.\n"); - return -ENOMEM; - } - memset(ffchars, 0xff, MAX_ONENAND_PAGESIZE); - - /* Allocate OneNAND simulator mtd pointer */ - info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL); - if (!info) { - printk(KERN_ERR "Unable to allocate core structures.\n"); - kfree(ffchars); - return -ENOMEM; - } - - /* Override write_word function */ - info->onenand.write_word = onenand_writew; - - if (flash_init(&info->flash)) { - printk(KERN_ERR "Unable to allocate flash.\n"); - kfree(ffchars); - kfree(info); - return -ENOMEM; - } - - info->parts = os_partitions; - - info->onenand.base = info->flash.base; - info->onenand.priv = &info->flash; - - info->mtd.name = "OneNAND simulator"; - info->mtd.priv = &info->onenand; - info->mtd.owner = THIS_MODULE; - - if (onenand_scan(&info->mtd, 1)) { - flash_exit(&info->flash); - kfree(ffchars); - kfree(info); - return -ENXIO; - } - - mtd_device_register(&info->mtd, info->parts, - ARRAY_SIZE(os_partitions)); - - return 0; -} - -static void __exit onenand_sim_exit(void) -{ - struct onenand_chip *this = info->mtd.priv; - struct onenand_flash *flash = this->priv; - - onenand_release(&info->mtd); - flash_exit(flash); - kfree(ffchars); - kfree(info); -} - -module_init(onenand_sim_init); -module_exit(onenand_sim_exit); - -MODULE_AUTHOR("Kyungmin Park "); -MODULE_DESCRIPTION("The OneNAND flash simulator"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index ee70577..dada66b 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1700,7 +1700,8 @@ static int bfin_mac_probe(struct platform_device *pdev) } bfin_mac_hwtstamp_init(ndev); - if (bfin_phc_init(ndev, &pdev->dev)) { + rc = bfin_phc_init(ndev, &pdev->dev); + if (rc) { dev_err(&pdev->dev, "Cannot register PHC device!\n"); goto out_err_phc; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index e1e5bb9..fd7b547 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2640,9 +2640,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, req = get_mac_list_cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_GET_MAC_LIST, sizeof(*req), - wrb, &get_mac_list_cmd); - + OPCODE_COMMON_GET_MAC_LIST, + get_mac_list_cmd.size, wrb, &get_mac_list_cmd); req->hdr.domain = domain; req->mac_type = MAC_ADDRESS_TYPE_NETWORK; req->perm_override = 1; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6c52a60..a444110 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1827,7 +1827,7 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) mdelay(1); } else { be_rx_compl_discard(rxo, rxcp); - be_cq_notify(adapter, rx_cq->id, true, 1); + be_cq_notify(adapter, rx_cq->id, false, 1); if (rxcp->num_rcvd == 0) break; } @@ -2533,11 +2533,6 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) q = &rxo->q; if (q->created) { be_cmd_rxq_destroy(adapter, q); - /* After the rxq is invalidated, wait for a grace time - * of 1ms for all dma to end and the flush compl to - * arrive - */ - mdelay(1); be_rx_cq_clean(rxo); } be_queue_free(adapter, q); @@ -2564,6 +2559,7 @@ static int be_close(struct net_device *netdev) * all tx skbs are freed. */ be_tx_compl_clean(adapter); + netif_tx_disable(netdev); be_rx_qs_destroy(adapter); @@ -2672,6 +2668,7 @@ static int be_open(struct net_device *netdev) if (!status) be_link_status_update(adapter, link_status); + netif_tx_start_all_queues(netdev); be_roce_dev_open(adapter); return 0; err: @@ -2783,6 +2780,8 @@ static void be_vf_clear(struct be_adapter *adapter) goto done; } + pci_disable_sriov(adapter->pdev); + for_all_vfs(adapter, vf_cfg, vf) { if (lancer_chip(adapter)) be_cmd_set_mac_list(adapter, NULL, 0, vf + 1); @@ -2792,7 +2791,6 @@ static void be_vf_clear(struct be_adapter *adapter) be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); } - pci_disable_sriov(adapter->pdev); done: kfree(adapter->vf_cfg); adapter->num_vfs = 0; @@ -2889,13 +2887,8 @@ static int be_vf_setup(struct be_adapter *adapter) dev_info(dev, "Device supports %d VFs and not %d\n", adapter->dev_num_vfs, num_vfs); adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs); - - status = pci_enable_sriov(adapter->pdev, num_vfs); - if (status) { - dev_err(dev, "SRIOV enable failed\n"); - adapter->num_vfs = 0; + if (!adapter->num_vfs) return 0; - } } status = be_vf_setup_init(adapter); @@ -2944,6 +2937,15 @@ static int be_vf_setup(struct be_adapter *adapter) be_cmd_enable_vf(adapter, vf + 1); } + + if (!old_vfs) { + status = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (status) { + dev_err(dev, "SRIOV enable failed\n"); + adapter->num_vfs = 0; + goto err; + } + } return 0; err: dev_err(dev, "VF setup failed\n"); @@ -3198,7 +3200,7 @@ static int be_setup(struct be_adapter *adapter) be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); - if (be_physfn(adapter) && num_vfs) { + if (be_physfn(adapter)) { if (adapter->dev_num_vfs) be_vf_setup(adapter); else diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index ceb4d43..9ce5b71 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -198,6 +198,11 @@ struct bufdesc_ex { #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) +struct fec_enet_delayed_work { + struct delayed_work delay_work; + bool timeout; +}; + /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and * tx_bd_base always point to the base of the buffer descriptors. The * cur_rx and cur_tx point to the currently available buffer. @@ -232,9 +237,6 @@ struct fec_enet_private { /* The ring entries to be free()ed */ struct bufdesc *dirty_tx; - /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ - spinlock_t hw_lock; - struct platform_device *pdev; int opened; @@ -269,7 +271,7 @@ struct fec_enet_private { int hwts_rx_en; int hwts_tx_en; struct timer_list time_keep; - + struct fec_enet_delayed_work delay_work; }; void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index e25bf83..aff0310 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -445,6 +445,13 @@ fec_restart(struct net_device *ndev, int duplex) u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ + if (netif_running(ndev)) { + netif_device_detach(ndev); + napi_disable(&fep->napi); + netif_stop_queue(ndev); + netif_tx_lock(ndev); + } + /* Whack a reset. We should wait for this. */ writel(1, fep->hwp + FEC_ECNTRL); udelay(10); @@ -605,6 +612,13 @@ fec_restart(struct net_device *ndev, int duplex) /* Enable interrupts we wish to service */ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + + if (netif_running(ndev)) { + netif_device_attach(ndev); + napi_enable(&fep->napi); + netif_wake_queue(ndev); + netif_tx_unlock(ndev); + } } static void @@ -644,8 +658,22 @@ fec_timeout(struct net_device *ndev) ndev->stats.tx_errors++; - fec_restart(ndev, fep->full_duplex); - netif_wake_queue(ndev); + fep->delay_work.timeout = true; + schedule_delayed_work(&(fep->delay_work.delay_work), 0); +} + +static void fec_enet_work(struct work_struct *work) +{ + struct fec_enet_private *fep = + container_of(work, + struct fec_enet_private, + delay_work.delay_work.work); + + if (fep->delay_work.timeout) { + fep->delay_work.timeout = false; + fec_restart(fep->netdev, fep->full_duplex); + netif_wake_queue(fep->netdev); + } } static void @@ -1024,16 +1052,12 @@ static void fec_enet_adjust_link(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phy_dev = fep->phy_dev; - unsigned long flags; - int status_change = 0; - spin_lock_irqsave(&fep->hw_lock, flags); - /* Prevent a state halted on mii error */ if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { phy_dev->state = PHY_RESUMING; - goto spin_unlock; + return; } if (phy_dev->link) { @@ -1061,9 +1085,6 @@ static void fec_enet_adjust_link(struct net_device *ndev) } } -spin_unlock: - spin_unlock_irqrestore(&fep->hw_lock, flags); - if (status_change) phy_print_status(phy_dev); } @@ -1732,7 +1753,6 @@ static int fec_enet_init(struct net_device *ndev) return -ENOMEM; memset(cbd_base, 0, PAGE_SIZE); - spin_lock_init(&fep->hw_lock); fep->netdev = ndev; @@ -1952,6 +1972,7 @@ fec_probe(struct platform_device *pdev) if (fep->bufdesc_ex && fep->ptp_clock) netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); + INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work); return 0; failed_register: @@ -1984,6 +2005,7 @@ fec_drv_remove(struct platform_device *pdev) struct fec_enet_private *fep = netdev_priv(ndev); int i; + cancel_delayed_work_sync(&(fep->delay_work.delay_work)); unregister_netdev(ndev); fec_enet_mii_remove(fep); del_timer_sync(&fep->time_keep); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 07f6baa..9a95abf 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -912,8 +912,10 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, &efx->pci_dev->dev); - if (!ptp->phc_clock) + if (IS_ERR(ptp->phc_clock)) { + rc = PTR_ERR(ptp->phc_clock); goto fail3; + } INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 66e025a..f3c2d03 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -930,7 +930,7 @@ static int tile_net_setup_interrupts(struct net_device *dev) if (info->has_iqueue) { gxio_mpipe_request_notif_ring_interrupt( &context, cpu_x(cpu), cpu_y(cpu), - 1, ingress_irq, info->iqueue.ring); + KERNEL_PL, ingress_irq, info->iqueue.ring); } } diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index c655fe6..5734480 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -1990,7 +1990,8 @@ spider_net_open(struct net_device *netdev) goto alloc_rx_failed; /* Allocate rx skbs */ - if (spider_net_alloc_rx_skbs(card)) + result = spider_net_alloc_rx_skbs(card); + if (result) goto alloc_skbs_failed; spider_net_set_multi(netdev); diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index a06fca6..22b4527 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -609,7 +609,7 @@ static int bfin_sir_open(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; - int err = -ENOMEM; + int err; self->newspeed = 0; self->speed = 9600; @@ -623,8 +623,10 @@ static int bfin_sir_open(struct net_device *dev) bfin_sir_set_speed(port, 9600); self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME); - if (!self->irlap) + if (!self->irlap) { + err = -ENOMEM; goto err_irlap; + } INIT_WORK(&self->work, bfin_sir_send_work); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 4503452..1e11f2b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -126,7 +126,7 @@ config MDIO_BITBANG config MDIO_GPIO tristate "Support for GPIO lib-based bitbanged MDIO buses" - depends on MDIO_BITBANG && GENERIC_GPIO + depends on MDIO_BITBANG && GPIOLIB ---help--- Supports GPIO lib-based MDIO busses. diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 24fbec2..078795f 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -613,6 +613,13 @@ static const struct usb_device_id products [] = { .driver_info = 0, }, +/* Dell Wireless 5804 (Novatel E371) - handled by qmi_wwan */ +{ + USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x819b, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* AnyDATA ADU960S - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 834e405..cf887c2 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -501,6 +501,13 @@ static const struct usb_device_id products[] = { USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* Dell Wireless 5804 (Novatel E371) */ + USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x819b, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&qmi_wwan_info, + }, { /* ADU960S */ USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index a923d61..a79e9d3 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -426,6 +426,13 @@ static void sierra_net_dosync(struct usbnet *dev) dev_dbg(&dev->udev->dev, "%s", __func__); + /* The SIERRA_NET_HIP_MSYNC_ID command appears to request that the + * firmware restart itself. After restarting, the modem will respond + * with the SIERRA_NET_HIP_RESTART_ID indication. The driver continues + * sending MSYNC commands every few seconds until it receives the + * RESTART event from the firmware + */ + /* tell modem we are ready */ status = sierra_net_send_sync(dev); if (status < 0) @@ -704,6 +711,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) /* set context index initially to 0 - prepares tx hdr template */ sierra_net_set_ctx_index(priv, 0); + /* prepare sync message template */ + memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); + /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE; if (dev->udev->speed != USB_SPEED_HIGH) @@ -739,11 +749,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) kfree(priv); return -ENODEV; } - /* prepare sync message from template */ - memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); - - /* initiate the sync sequence */ - sierra_net_dosync(dev); return 0; } @@ -766,8 +771,9 @@ static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf) netdev_err(dev->net, "usb_control_msg failed, status %d\n", status); - sierra_net_set_private(dev, NULL); + usbnet_status_stop(dev); + sierra_net_set_private(dev, NULL); kfree(priv); } @@ -908,6 +914,24 @@ static const struct driver_info sierra_net_info_direct_ip = { .tx_fixup = sierra_net_tx_fixup, }; +static int +sierra_net_probe(struct usb_interface *udev, const struct usb_device_id *prod) +{ + int ret; + + ret = usbnet_probe(udev, prod); + if (ret == 0) { + struct usbnet *dev = usb_get_intfdata(udev); + + ret = usbnet_status_start(dev, GFP_KERNEL); + if (ret == 0) { + /* Interrupt URB now set up; initiate sync sequence */ + sierra_net_dosync(dev); + } + } + return ret; +} + #define DIRECT_IP_DEVICE(vend, prod) \ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \ .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ @@ -930,7 +954,7 @@ MODULE_DEVICE_TABLE(usb, products); static struct usb_driver sierra_net_driver = { .name = "sierra_net", .id_table = products, - .probe = usbnet_probe, + .probe = sierra_net_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 1e5a9b7..f95cb03 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -252,6 +252,70 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf) return 0; } +/* Submit the interrupt URB if not previously submitted, increasing refcount */ +int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags) +{ + int ret = 0; + + WARN_ON_ONCE(dev->interrupt == NULL); + if (dev->interrupt) { + mutex_lock(&dev->interrupt_mutex); + + if (++dev->interrupt_count == 1) + ret = usb_submit_urb(dev->interrupt, mem_flags); + + dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n", + dev->interrupt_count); + mutex_unlock(&dev->interrupt_mutex); + } + return ret; +} +EXPORT_SYMBOL_GPL(usbnet_status_start); + +/* For resume; submit interrupt URB if previously submitted */ +static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags) +{ + int ret = 0; + + mutex_lock(&dev->interrupt_mutex); + if (dev->interrupt_count) { + ret = usb_submit_urb(dev->interrupt, mem_flags); + dev_dbg(&dev->udev->dev, + "submitted interrupt URB for resume\n"); + } + mutex_unlock(&dev->interrupt_mutex); + return ret; +} + +/* Kill the interrupt URB if all submitters want it killed */ +void usbnet_status_stop(struct usbnet *dev) +{ + if (dev->interrupt) { + mutex_lock(&dev->interrupt_mutex); + WARN_ON(dev->interrupt_count == 0); + + if (dev->interrupt_count && --dev->interrupt_count == 0) + usb_kill_urb(dev->interrupt); + + dev_dbg(&dev->udev->dev, + "decremented interrupt URB count to %d\n", + dev->interrupt_count); + mutex_unlock(&dev->interrupt_mutex); + } +} +EXPORT_SYMBOL_GPL(usbnet_status_stop); + +/* For suspend; always kill interrupt URB */ +static void __usbnet_status_stop_force(struct usbnet *dev) +{ + if (dev->interrupt) { + mutex_lock(&dev->interrupt_mutex); + usb_kill_urb(dev->interrupt); + dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n"); + mutex_unlock(&dev->interrupt_mutex); + } +} + /* Passes this packet up the stack, updating its accounting. * Some link protocols batch packets, so their rx_fixup paths * can return clones as well as just modify the original skb. @@ -725,7 +789,7 @@ int usbnet_stop (struct net_device *net) if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) usbnet_terminate_urbs(dev); - usb_kill_urb(dev->interrupt); + usbnet_status_stop(dev); usbnet_purge_paused_rxq(dev); @@ -787,7 +851,7 @@ int usbnet_open (struct net_device *net) /* start any status interrupt transfer */ if (dev->interrupt) { - retval = usb_submit_urb (dev->interrupt, GFP_KERNEL); + retval = usbnet_status_start(dev, GFP_KERNEL); if (retval < 0) { netif_err(dev, ifup, dev->net, "intr submit %d\n", retval); @@ -1458,6 +1522,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->delay.data = (unsigned long) dev; init_timer (&dev->delay); mutex_init (&dev->phy_mutex); + mutex_init(&dev->interrupt_mutex); + dev->interrupt_count = 0; dev->net = net; strcpy (net->name, "usb%d"); @@ -1593,7 +1659,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message) */ netif_device_detach (dev->net); usbnet_terminate_urbs(dev); - usb_kill_urb(dev->interrupt); + __usbnet_status_stop_force(dev); /* * reattach so runtime management can use and @@ -1613,9 +1679,8 @@ int usbnet_resume (struct usb_interface *intf) int retval; if (!--dev->suspend_count) { - /* resume interrupt URBs */ - if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags)) - usb_submit_urb(dev->interrupt, GFP_NOIO); + /* resume interrupt URB if it was previously submitted */ + __usbnet_status_start_force(dev, GFP_NOIO); spin_lock_irq(&dev->txq.lock); while ((res = usb_get_from_anchor(&dev->deferred))) { diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 23049ae..d5a57a9 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -84,13 +84,10 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) phy = get_phy_device(mdio, addr, is_c45); if (!phy || IS_ERR(phy)) { - phy = phy_device_create(mdio, addr, 0, false, NULL); - if (!phy || IS_ERR(phy)) { - dev_err(&mdio->dev, - "error creating PHY at address %i\n", - addr); - continue; - } + dev_err(&mdio->dev, + "cannot get PHY at address %i\n", + addr); + continue; } /* Associate the OF node with the device structure so it diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 748f8f3..32e66a6 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -174,6 +174,7 @@ int pci_bus_add_device(struct pci_dev *dev) * Can not put in pci_device_add yet because resources * are not assigned yet for some devices. */ + pci_fixup_device(pci_fixup_final, dev); pci_create_sysfs_dev_files(dev); dev->match_driver = true; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d40bed7..2c10752 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -563,8 +563,10 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ entry->msi_attrib.pos = dev->msi_cap; - entry->mask_pos = dev->msi_cap + (control & PCI_MSI_FLAGS_64BIT) ? - PCI_MSI_MASK_64 : PCI_MSI_MASK_32; + if (control & PCI_MSI_FLAGS_64BIT) + entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; + else + entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; /* All MSIs are unmasked by default, Mask them all */ if (entry->msi_attrib.maskbit) pci_read_config_dword(dev, entry->mask_pos, &entry->masked); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 631aeb7..70f10fa 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1341,7 +1341,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) list_add_tail(&dev->bus_list, &bus->devices); up_write(&pci_bus_sem); - pci_fixup_device(pci_fixup_final, dev); ret = pcibios_add_device(dev); WARN_ON(ret < 0); diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig index 0e1f99c..f8a2ae4 100644 --- a/drivers/pinctrl/sh-pfc/Kconfig +++ b/drivers/pinctrl/sh-pfc/Kconfig @@ -6,7 +6,7 @@ if ARCH_SHMOBILE || SUPERH config PINCTRL_SH_PFC # XXX move off the gpio dependency - depends on GENERIC_GPIO + depends on GPIOLIB select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB select PINMUX select PINCONF @@ -40,19 +40,19 @@ config PINCTRL_PFC_R8A7779 config PINCTRL_PFC_SH7203 def_bool y depends on CPU_SUBTYPE_SH7203 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7264 def_bool y depends on CPU_SUBTYPE_SH7264 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7269 def_bool y depends on CPU_SUBTYPE_SH7269 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7372 @@ -68,55 +68,55 @@ config PINCTRL_PFC_SH73A0 config PINCTRL_PFC_SH7720 def_bool y depends on CPU_SUBTYPE_SH7720 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7722 def_bool y depends on CPU_SUBTYPE_SH7722 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7723 def_bool y depends on CPU_SUBTYPE_SH7723 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7724 def_bool y depends on CPU_SUBTYPE_SH7724 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7734 def_bool y depends on CPU_SUBTYPE_SH7734 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7757 def_bool y depends on CPU_SUBTYPE_SH7757 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7785 def_bool y depends on CPU_SUBTYPE_SH7785 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7786 def_bool y depends on CPU_SUBTYPE_SH7786 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SHX3 def_bool y depends on CPU_SUBTYPE_SHX3 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC endif diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index a5d97ea..8bb2644 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -66,7 +66,7 @@ config REGULATOR_USERSPACE_CONSUMER config REGULATOR_GPIO tristate "GPIO regulator support" - depends on GENERIC_GPIO + depends on GPIOLIB help This driver provides support for regulators that can be controlled via gpios. diff --git a/drivers/rtc/rtc-tile.c b/drivers/rtc/rtc-tile.c index 249b653..fc3dee9 100644 --- a/drivers/rtc/rtc-tile.c +++ b/drivers/rtc/rtc-tile.c @@ -146,6 +146,7 @@ exit_driver_unregister: */ static void __exit tile_rtc_driver_exit(void) { + platform_device_unregister(tile_rtc_platform_device); platform_driver_unregister(&tile_rtc_platform_driver); } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 141d8c1..92a9345 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -62,7 +62,7 @@ config SPI_ALTERA config SPI_ATH79 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver" - depends on ATH79 && GENERIC_GPIO + depends on ATH79 && GPIOLIB select SPI_BITBANG help This enables support for the SPI controller present on the @@ -175,7 +175,7 @@ config SPI_FALCON config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" - depends on GENERIC_GPIO + depends on GPIOLIB select SPI_BITBANG help This simple GPIO bitbanging SPI master uses the arch-neutral GPIO @@ -259,7 +259,7 @@ config SPI_FSL_ESPI config SPI_OC_TINY tristate "OpenCores tiny SPI" - depends on GENERIC_GPIO + depends on GPIOLIB select SPI_BITBANG help This is the driver for OpenCores tiny SPI master controller. @@ -457,7 +457,7 @@ config SPI_TOPCLIFF_PCH config SPI_TXX9 tristate "Toshiba TXx9 SPI controller" - depends on GENERIC_GPIO && CPU_TX49XX + depends on GPIOLIB && CPU_TX49XX help SPI driver for Toshiba TXx9 MIPS SoCs diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c index fa385a3..0907706 100644 --- a/drivers/ssb/driver_mipscore.c +++ b/drivers/ssb/driver_mipscore.c @@ -18,7 +18,7 @@ #include "ssb_private.h" -static const char *part_probes[] = { "bcm47xxpart", NULL }; +static const char * const part_probes[] = { "bcm47xxpart", NULL }; static struct physmap_flash_data ssb_pflash_data = { .part_probe_types = part_probes, diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 9f61d46..c0c95be 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -54,7 +54,7 @@ config ANDROID_TIMED_OUTPUT config ANDROID_TIMED_GPIO tristate "Android timed gpio driver" - depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT + depends on GPIOLIB && ANDROID_TIMED_OUTPUT default n config ANDROID_LOW_MEMORY_KILLER diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig index e2e786d..ad45dfb 100644 --- a/drivers/staging/iio/accel/Kconfig +++ b/drivers/staging/iio/accel/Kconfig @@ -61,7 +61,7 @@ config LIS3L02DQ depends on SPI select IIO_TRIGGER if IIO_BUFFER depends on !IIO_BUFFER || IIO_KFIFO_BUF - depends on GENERIC_GPIO + depends on GPIOLIB help Say yes here to build SPI support for the ST microelectronics accelerometer. The driver supplies direct access via sysfs files diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig index d990829..cabc7a3 100644 --- a/drivers/staging/iio/adc/Kconfig +++ b/drivers/staging/iio/adc/Kconfig @@ -73,7 +73,7 @@ config AD7780 config AD7816 tristate "Analog Devices AD7816/7/8 temperature sensor and ADC driver" depends on SPI - depends on GENERIC_GPIO + depends on GPIOLIB help Say yes here to build support for Analog Devices AD7816/7/8 temperature sensors and ADC. diff --git a/drivers/staging/iio/addac/Kconfig b/drivers/staging/iio/addac/Kconfig index 698a897..e6795e0 100644 --- a/drivers/staging/iio/addac/Kconfig +++ b/drivers/staging/iio/addac/Kconfig @@ -5,7 +5,7 @@ menu "Analog digital bi-direction converters" config ADT7316 tristate "Analog Devices ADT7316/7/8 ADT7516/7/9 temperature sensor, ADC and DAC driver" - depends on GENERIC_GPIO + depends on GPIOLIB help Say yes here to build support for Analog Devices ADT7316, ADT7317, ADT7318 and ADT7516, ADT7517, ADT7519 temperature sensors, ADC and DAC. diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig index 49f69ef..ce360f1 100644 --- a/drivers/staging/iio/resolver/Kconfig +++ b/drivers/staging/iio/resolver/Kconfig @@ -13,7 +13,7 @@ config AD2S90 config AD2S1200 tristate "Analog Devices ad2s1200/ad2s1205 driver" depends on SPI - depends on GENERIC_GPIO + depends on GPIOLIB help Say yes here to build support for Analog Devices spi resolver to digital converters, ad2s1200 and ad2s1205, provides direct access @@ -22,7 +22,7 @@ config AD2S1200 config AD2S1210 tristate "Analog Devices ad2s1210 driver" depends on SPI - depends on GENERIC_GPIO + depends on GPIOLIB help Say yes here to build support for Analog Devices spi resolver to digital converters, ad2s1210, provides direct access via sysfs. diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig index d44d3ad..1a051da 100644 --- a/drivers/staging/iio/trigger/Kconfig +++ b/drivers/staging/iio/trigger/Kconfig @@ -14,7 +14,7 @@ config IIO_PERIODIC_RTC_TRIGGER config IIO_GPIO_TRIGGER tristate "GPIO trigger" - depends on GENERIC_GPIO + depends on GPIOLIB help Provides support for using GPIO pins as IIO triggers. diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index a764f16..5e3c025 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -67,15 +67,16 @@ config THERMAL_GOV_USER_SPACE Enable this to let the user space manage the platform thermals. config CPU_THERMAL - tristate "generic cpu cooling support" + bool "generic cpu cooling support" depends on CPU_FREQ select CPU_FREQ_TABLE help This implements the generic cpu cooling mechanism through frequency - reduction, cpu hotplug and any other ways of reducing temperature. An - ACPI version of this already exists(drivers/acpi/processor_thermal.c). + reduction. An ACPI version of this already exists + (drivers/acpi/processor_thermal.c). This will be useful for platforms using the generic thermal interface and not the ACPI interface. + If you want this support, you should say Y here. config THERMAL_EMULATION @@ -86,6 +87,10 @@ config THERMAL_EMULATION user can manually input temperature and test the different trip threshold behaviour for simulation purpose. + WARNING: Be careful while enabling this option on production systems, + because userland can easily disable the thermal policy by simply + flooding this sysfs node with low temperature values. + config SPEAR_THERMAL bool "SPEAr thermal sensor driver" depends on PLAT_SPEAR @@ -117,15 +122,6 @@ config EXYNOS_THERMAL If you say yes here you get support for TMU (Thermal Management Unit) on SAMSUNG EXYNOS series of SoC. -config EXYNOS_THERMAL_EMUL - bool "EXYNOS TMU emulation mode support" - depends on EXYNOS_THERMAL - help - Exynos 4412 and 4414 and 5 series has emulation mode on TMU. - Enable this option will be make sysfs node in exynos thermal platform - device directory to support emulation mode. With emulation mode sysfs - node, you can manually input temperature to TMU for simulation purpose. - config DOVE_THERMAL tristate "Temperature sensor on Marvell Dove SoCs" depends on ARCH_DOVE @@ -144,6 +140,14 @@ config DB8500_THERMAL created. Cooling devices can be bound to the trip points to cool this thermal zone if trip points reached. +config ARMADA_THERMAL + tristate "Armada 370/XP thermal management" + depends on ARCH_MVEBU + depends on OF + help + Enable this option if you want to have support for thermal management + controller present in Armada 370 and Armada XP SoC. + config DB8500_CPUFREQ_COOLING tristate "DB8500 cpufreq cooling" depends on ARCH_U8500 diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index d3a2b38..c054d41 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -3,14 +3,15 @@ # obj-$(CONFIG_THERMAL) += thermal_sys.o +thermal_sys-y += thermal_core.o # governors -obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o -obj-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o -obj-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o +thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o +thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o +thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o # cpufreq cooling -obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o +thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o # platform thermal drivers obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o @@ -19,6 +20,7 @@ obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o +obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c new file mode 100644 index 0000000..5b4d75f --- /dev/null +++ b/drivers/thermal/armada_thermal.c @@ -0,0 +1,232 @@ +/* + * Marvell Armada 370/XP thermal sensor driver + * + * Copyright (C) 2013 Marvell + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define THERMAL_VALID_OFFSET 9 +#define THERMAL_VALID_MASK 0x1 +#define THERMAL_TEMP_OFFSET 10 +#define THERMAL_TEMP_MASK 0x1ff + +/* Thermal Manager Control and Status Register */ +#define PMU_TDC0_SW_RST_MASK (0x1 << 1) +#define PMU_TM_DISABLE_OFFS 0 +#define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS) +#define PMU_TDC0_REF_CAL_CNT_OFFS 11 +#define PMU_TDC0_REF_CAL_CNT_MASK (0x1ff << PMU_TDC0_REF_CAL_CNT_OFFS) +#define PMU_TDC0_OTF_CAL_MASK (0x1 << 30) +#define PMU_TDC0_START_CAL_MASK (0x1 << 25) + +struct armada_thermal_ops; + +/* Marvell EBU Thermal Sensor Dev Structure */ +struct armada_thermal_priv { + void __iomem *sensor; + void __iomem *control; + struct armada_thermal_ops *ops; +}; + +struct armada_thermal_ops { + /* Initialize the sensor */ + void (*init_sensor)(struct armada_thermal_priv *); + + /* Test for a valid sensor value (optional) */ + bool (*is_valid)(struct armada_thermal_priv *); +}; + +static void armadaxp_init_sensor(struct armada_thermal_priv *priv) +{ + unsigned long reg; + + reg = readl_relaxed(priv->control); + reg |= PMU_TDC0_OTF_CAL_MASK; + writel(reg, priv->control); + + /* Reference calibration value */ + reg &= ~PMU_TDC0_REF_CAL_CNT_MASK; + reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS); + writel(reg, priv->control); + + /* Reset the sensor */ + reg = readl_relaxed(priv->control); + writel((reg | PMU_TDC0_SW_RST_MASK), priv->control); + + writel(reg, priv->control); + + /* Enable the sensor */ + reg = readl_relaxed(priv->sensor); + reg &= ~PMU_TM_DISABLE_MASK; + writel(reg, priv->sensor); +} + +static void armada370_init_sensor(struct armada_thermal_priv *priv) +{ + unsigned long reg; + + reg = readl_relaxed(priv->control); + reg |= PMU_TDC0_OTF_CAL_MASK; + writel(reg, priv->control); + + /* Reference calibration value */ + reg &= ~PMU_TDC0_REF_CAL_CNT_MASK; + reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS); + writel(reg, priv->control); + + reg &= ~PMU_TDC0_START_CAL_MASK; + writel(reg, priv->control); + + mdelay(10); +} + +static bool armada_is_valid(struct armada_thermal_priv *priv) +{ + unsigned long reg = readl_relaxed(priv->sensor); + + return (reg >> THERMAL_VALID_OFFSET) & THERMAL_VALID_MASK; +} + +static int armada_get_temp(struct thermal_zone_device *thermal, + unsigned long *temp) +{ + struct armada_thermal_priv *priv = thermal->devdata; + unsigned long reg; + + /* Valid check */ + if (priv->ops->is_valid && !priv->ops->is_valid(priv)) { + dev_err(&thermal->device, + "Temperature sensor reading not valid\n"); + return -EIO; + } + + reg = readl_relaxed(priv->sensor); + reg = (reg >> THERMAL_TEMP_OFFSET) & THERMAL_TEMP_MASK; + *temp = (3153000000UL - (10000000UL*reg)) / 13825; + return 0; +} + +static struct thermal_zone_device_ops ops = { + .get_temp = armada_get_temp, +}; + +static const struct armada_thermal_ops armadaxp_ops = { + .init_sensor = armadaxp_init_sensor, +}; + +static const struct armada_thermal_ops armada370_ops = { + .is_valid = armada_is_valid, + .init_sensor = armada370_init_sensor, +}; + +static const struct of_device_id armada_thermal_id_table[] = { + { + .compatible = "marvell,armadaxp-thermal", + .data = &armadaxp_ops, + }, + { + .compatible = "marvell,armada370-thermal", + .data = &armada370_ops, + }, + { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, armada_thermal_id_table); + +static int armada_thermal_probe(struct platform_device *pdev) +{ + struct thermal_zone_device *thermal; + const struct of_device_id *match; + struct armada_thermal_priv *priv; + struct resource *res; + + match = of_match_device(armada_thermal_id_table, &pdev->dev); + if (!match) + return -ENODEV; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Failed to get platform resource\n"); + return -ENODEV; + } + + priv->sensor = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->sensor)) + return PTR_ERR(priv->sensor); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(&pdev->dev, "Failed to get platform resource\n"); + return -ENODEV; + } + + priv->control = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->control)) + return PTR_ERR(priv->control); + + priv->ops = (struct armada_thermal_ops *)match->data; + priv->ops->init_sensor(priv); + + thermal = thermal_zone_device_register("armada_thermal", 0, 0, + priv, &ops, NULL, 0, 0); + if (IS_ERR(thermal)) { + dev_err(&pdev->dev, + "Failed to register thermal zone device\n"); + return PTR_ERR(thermal); + } + + platform_set_drvdata(pdev, thermal); + + return 0; +} + +static int armada_thermal_exit(struct platform_device *pdev) +{ + struct thermal_zone_device *armada_thermal = + platform_get_drvdata(pdev); + + thermal_zone_device_unregister(armada_thermal); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver armada_thermal_driver = { + .probe = armada_thermal_probe, + .remove = armada_thermal_exit, + .driver = { + .name = "armada_thermal", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(armada_thermal_id_table), + }, +}; + +module_platform_driver(armada_thermal_driver); + +MODULE_AUTHOR("Ezequiel Garcia "); +MODULE_DESCRIPTION("Armada 370/XP thermal driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 8dc44cb..c94bf2e 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -20,10 +20,8 @@ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include #include #include -#include #include #include #include @@ -31,21 +29,19 @@ #include /** - * struct cpufreq_cooling_device + * struct cpufreq_cooling_device - data for cooling device with cpufreq * @id: unique integer value corresponding to each cpufreq_cooling_device * registered. - * @cool_dev: thermal_cooling_device pointer to keep track of the the - * egistered cooling device. + * @cool_dev: thermal_cooling_device pointer to keep track of the + * registered cooling device. * @cpufreq_state: integer value representing the current state of cpufreq * cooling devices. * @cpufreq_val: integer value representing the absolute value of the clipped * frequency. * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. - * @node: list_head to link all cpufreq_cooling_device together. * * This structure is required for keeping information of each - * cpufreq_cooling_device registered as a list whose head is represented by - * cooling_cpufreq_list. In order to prevent corruption of this list a + * cpufreq_cooling_device registered. In order to prevent corruption of this a * mutex lock cooling_cpufreq_lock is used. */ struct cpufreq_cooling_device { @@ -54,9 +50,7 @@ struct cpufreq_cooling_device { unsigned int cpufreq_state; unsigned int cpufreq_val; struct cpumask allowed_cpus; - struct list_head node; }; -static LIST_HEAD(cooling_cpufreq_list); static DEFINE_IDR(cpufreq_idr); static DEFINE_MUTEX(cooling_cpufreq_lock); @@ -70,6 +64,11 @@ static struct cpufreq_cooling_device *notify_device; * get_idr - function to get a unique id. * @idr: struct idr * handle used to create a id. * @id: int * value generated by this function. + * + * This function will populate @id with an unique + * id, using the idr API. + * + * Return: 0 on success, an error code on failure. */ static int get_idr(struct idr *idr, int *id) { @@ -81,6 +80,7 @@ static int get_idr(struct idr *idr, int *id) if (unlikely(ret < 0)) return ret; *id = ret; + return 0; } @@ -99,63 +99,162 @@ static void release_idr(struct idr *idr, int id) /* Below code defines functions to be used for cpufreq as cooling device */ /** - * is_cpufreq_valid - function to check if a cpu has frequency transition policy. + * is_cpufreq_valid - function to check frequency transitioning capability. * @cpu: cpu for which check is needed. + * + * This function will check the current state of the system if + * it is capable of changing the frequency for a given @cpu. + * + * Return: 0 if the system is not currently capable of changing + * the frequency of given cpu. !0 in case the frequency is changeable. */ static int is_cpufreq_valid(int cpu) { struct cpufreq_policy policy; + return !cpufreq_get_policy(&policy, cpu); } +enum cpufreq_cooling_property { + GET_LEVEL, + GET_FREQ, + GET_MAXL, +}; + /** - * get_cpu_frequency - get the absolute value of frequency from level. - * @cpu: cpu for which frequency is fetched. - * @level: level of frequency, equals cooling state of cpu cooling device - * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc + * get_property - fetch a property of interest for a give cpu. + * @cpu: cpu for which the property is required + * @input: query parameter + * @output: query return + * @property: type of query (frequency, level, max level) + * + * This is the common function to + * 1. get maximum cpu cooling states + * 2. translate frequency to cooling state + * 3. translate cooling state to frequency + * Note that the code may be not in good shape + * but it is written in this way in order to: + * a) reduce duplicate code as most of the code can be shared. + * b) make sure the logic is consistent when translating between + * cooling states and frequencies. + * + * Return: 0 on success, -EINVAL when invalid parameters are passed. */ -static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) +static int get_property(unsigned int cpu, unsigned long input, + unsigned int *output, + enum cpufreq_cooling_property property) { - int ret = 0, i = 0; - unsigned long level_index; - bool descend = false; + int i, j; + unsigned long max_level = 0, level = 0; + unsigned int freq = CPUFREQ_ENTRY_INVALID; + int descend = -1; struct cpufreq_frequency_table *table = cpufreq_frequency_get_table(cpu); + + if (!output) + return -EINVAL; + if (!table) - return ret; + return -EINVAL; - while (table[i].frequency != CPUFREQ_TABLE_END) { + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + /* ignore invalid entries */ if (table[i].frequency == CPUFREQ_ENTRY_INVALID) continue; - /*check if table in ascending or descending order*/ - if ((table[i + 1].frequency != CPUFREQ_TABLE_END) && - (table[i + 1].frequency < table[i].frequency) - && !descend) { - descend = true; - } + /* ignore duplicate entry */ + if (freq == table[i].frequency) + continue; + + /* get the frequency order */ + if (freq != CPUFREQ_ENTRY_INVALID && descend != -1) + descend = !!(freq > table[i].frequency); - /*return if level matched and table in descending order*/ - if (descend && i == level) - return table[i].frequency; - i++; + freq = table[i].frequency; + max_level++; } - i--; - if (level > i || descend) - return ret; - level_index = i - level; + /* get max level */ + if (property == GET_MAXL) { + *output = (unsigned int)max_level; + return 0; + } - /*Scan the table in reverse order and match the level*/ - while (i >= 0) { + if (property == GET_FREQ) + level = descend ? input : (max_level - input - 1); + + for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + /* ignore invalid entry */ if (table[i].frequency == CPUFREQ_ENTRY_INVALID) continue; - /*return if level matched*/ - if (i == level_index) - return table[i].frequency; - i--; + + /* ignore duplicate entry */ + if (freq == table[i].frequency) + continue; + + /* now we have a valid frequency entry */ + freq = table[i].frequency; + + if (property == GET_LEVEL && (unsigned int)input == freq) { + /* get level by frequency */ + *output = descend ? j : (max_level - j - 1); + return 0; + } + if (property == GET_FREQ && level == j) { + /* get frequency by level */ + *output = freq; + return 0; + } + j++; } - return ret; + + return -EINVAL; +} + +/** + * cpufreq_cooling_get_level - for a give cpu, return the cooling level. + * @cpu: cpu for which the level is required + * @freq: the frequency of interest + * + * This function will match the cooling level corresponding to the + * requested @freq and return it. + * + * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID + * otherwise. + */ +unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) +{ + unsigned int val; + + if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL)) + return THERMAL_CSTATE_INVALID; + + return (unsigned long)val; +} +EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level); + +/** + * get_cpu_frequency - get the absolute value of frequency from level. + * @cpu: cpu for which frequency is fetched. + * @level: cooling level + * + * This function matches cooling level with frequency. Based on a cooling level + * of frequency, equals cooling state of cpu cooling device, it will return + * the corresponding frequency. + * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc + * + * Return: 0 on error, the corresponding frequency otherwise. + */ +static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) +{ + int ret = 0; + unsigned int freq; + + ret = get_property(cpu, level, &freq, GET_FREQ); + if (ret) + return 0; + + return freq; } /** @@ -163,13 +262,19 @@ static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) * @cpufreq_device: cpufreq_cooling_device pointer containing frequency * clipping data. * @cooling_state: value of the cooling state. + * + * Function used to make sure the cpufreq layer is aware of current thermal + * limits. The limits are applied by updating the cpufreq policy. + * + * Return: 0 on success, an error code otherwise (-EINVAL in case wrong + * cooling state). */ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, - unsigned long cooling_state) + unsigned long cooling_state) { unsigned int cpuid, clip_freq; - struct cpumask *maskPtr = &cpufreq_device->allowed_cpus; - unsigned int cpu = cpumask_any(maskPtr); + struct cpumask *mask = &cpufreq_device->allowed_cpus; + unsigned int cpu = cpumask_any(mask); /* Check if the old cooling action is same as new cooling action */ @@ -184,7 +289,7 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, cpufreq_device->cpufreq_val = clip_freq; notify_device = cpufreq_device; - for_each_cpu(cpuid, maskPtr) { + for_each_cpu(cpuid, mask) { if (is_cpufreq_valid(cpuid)) cpufreq_update_policy(cpuid); } @@ -199,9 +304,15 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, * @nb: struct notifier_block * with callback info. * @event: value showing cpufreq event for which this function invoked. * @data: callback-specific data + * + * Callback to highjack the notification on cpufreq policy transition. + * Every time there is a change in policy, we will intercept and + * update the cpufreq policy with thermal constraints. + * + * Return: 0 (success) */ static int cpufreq_thermal_notifier(struct notifier_block *nb, - unsigned long event, void *data) + unsigned long event, void *data) { struct cpufreq_policy *policy = data; unsigned long max_freq = 0; @@ -212,7 +323,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, if (cpumask_test_cpu(policy->cpu, ¬ify_device->allowed_cpus)) max_freq = notify_device->cpufreq_val; - /* Never exceed user_policy.max*/ + /* Never exceed user_policy.max */ if (max_freq > policy->user_policy.max) max_freq = policy->user_policy.max; @@ -222,50 +333,46 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, return 0; } -/* - * cpufreq cooling device callback functions are defined below - */ +/* cpufreq cooling device callback functions are defined below */ /** * cpufreq_get_max_state - callback function to get the max cooling state. * @cdev: thermal cooling device pointer. * @state: fill this variable with the max cooling state. + * + * Callback for the thermal cooling device to return the cpufreq + * max cooling state. + * + * Return: 0 on success, an error code otherwise. */ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; - struct cpumask *maskPtr = &cpufreq_device->allowed_cpus; + struct cpumask *mask = &cpufreq_device->allowed_cpus; unsigned int cpu; - struct cpufreq_frequency_table *table; - unsigned long count = 0; - int i = 0; - - cpu = cpumask_any(maskPtr); - table = cpufreq_frequency_get_table(cpu); - if (!table) { - *state = 0; - return 0; - } + unsigned int count = 0; + int ret; - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { - if (table[i].frequency == CPUFREQ_ENTRY_INVALID) - continue; - count++; - } + cpu = cpumask_any(mask); - if (count > 0) { - *state = --count; - return 0; - } + ret = get_property(cpu, 0, &count, GET_MAXL); - return -EINVAL; + if (count > 0) + *state = count; + + return ret; } /** * cpufreq_get_cur_state - callback function to get the current cooling state. * @cdev: thermal cooling device pointer. * @state: fill this variable with the current cooling state. + * + * Callback for the thermal cooling device to return the cpufreq + * current cooling state. + * + * Return: 0 on success, an error code otherwise. */ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) @@ -273,6 +380,7 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; *state = cpufreq_device->cpufreq_state; + return 0; } @@ -280,6 +388,11 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, * cpufreq_set_cur_state - callback function to set the current cooling state. * @cdev: thermal cooling device pointer. * @state: set this variable to the current cooling state. + * + * Callback for the thermal cooling device to change the cpufreq + * current cooling state. + * + * Return: 0 on success, an error code otherwise. */ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) @@ -304,9 +417,16 @@ static struct notifier_block thermal_cpufreq_notifier_block = { /** * cpufreq_cooling_register - function to create cpufreq cooling device. * @clip_cpus: cpumask of cpus where the frequency constraints will happen. + * + * This interface function registers the cpufreq cooling device with the name + * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq + * cooling devices. + * + * Return: a valid struct thermal_cooling_device pointer on success, + * on failure, it returns a corresponding ERR_PTR(). */ -struct thermal_cooling_device *cpufreq_cooling_register( - const struct cpumask *clip_cpus) +struct thermal_cooling_device * +cpufreq_cooling_register(const struct cpumask *clip_cpus) { struct thermal_cooling_device *cool_dev; struct cpufreq_cooling_device *cpufreq_dev = NULL; @@ -315,9 +435,9 @@ struct thermal_cooling_device *cpufreq_cooling_register( int ret = 0, i; struct cpufreq_policy policy; - /*Verify that all the clip cpus have same freq_min, freq_max limit*/ + /* Verify that all the clip cpus have same freq_min, freq_max limit */ for_each_cpu(i, clip_cpus) { - /*continue if cpufreq policy not found and not return error*/ + /* continue if cpufreq policy not found and not return error */ if (!cpufreq_get_policy(&policy, i)) continue; if (min == 0 && max == 0) { @@ -325,12 +445,12 @@ struct thermal_cooling_device *cpufreq_cooling_register( max = policy.cpuinfo.max_freq; } else { if (min != policy.cpuinfo.min_freq || - max != policy.cpuinfo.max_freq) + max != policy.cpuinfo.max_freq) return ERR_PTR(-EINVAL); } } cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device), - GFP_KERNEL); + GFP_KERNEL); if (!cpufreq_dev) return ERR_PTR(-ENOMEM); @@ -342,10 +462,11 @@ struct thermal_cooling_device *cpufreq_cooling_register( return ERR_PTR(-EINVAL); } - sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id); + snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", + cpufreq_dev->id); cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev, - &cpufreq_cooling_ops); + &cpufreq_cooling_ops); if (!cool_dev) { release_idr(&cpufreq_idr, cpufreq_dev->id); kfree(cpufreq_dev); @@ -358,17 +479,20 @@ struct thermal_cooling_device *cpufreq_cooling_register( /* Register the notifier for first cpufreq cooling device */ if (cpufreq_dev_count == 0) cpufreq_register_notifier(&thermal_cpufreq_notifier_block, - CPUFREQ_POLICY_NOTIFIER); + CPUFREQ_POLICY_NOTIFIER); cpufreq_dev_count++; mutex_unlock(&cooling_cpufreq_lock); + return cool_dev; } -EXPORT_SYMBOL(cpufreq_cooling_register); +EXPORT_SYMBOL_GPL(cpufreq_cooling_register); /** * cpufreq_cooling_unregister - function to remove cpufreq cooling device. * @cdev: thermal cooling device pointer. + * + * This interface function unregisters the "thermal-cpufreq-%x" cooling device. */ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { @@ -378,14 +502,13 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) cpufreq_dev_count--; /* Unregister the notifier for the last cpufreq cooling device */ - if (cpufreq_dev_count == 0) { + if (cpufreq_dev_count == 0) cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, - CPUFREQ_POLICY_NOTIFIER); - } + CPUFREQ_POLICY_NOTIFIER); mutex_unlock(&cooling_cpufreq_lock); thermal_cooling_device_unregister(cpufreq_dev->cool_dev); release_idr(&cpufreq_idr, cpufreq_dev->id); kfree(cpufreq_dev); } -EXPORT_SYMBOL(cpufreq_cooling_unregister); +EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister); diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c index 2141985..786d192 100644 --- a/drivers/thermal/db8500_cpufreq_cooling.c +++ b/drivers/thermal/db8500_cpufreq_cooling.c @@ -37,7 +37,7 @@ static int db8500_cpufreq_cooling_probe(struct platform_device *pdev) cpumask_set_cpu(0, &mask_val); cdev = cpufreq_cooling_register(&mask_val); - if (IS_ERR_OR_NULL(cdev)) { + if (IS_ERR(cdev)) { dev_err(&pdev->dev, "Failed to register cooling device\n"); return PTR_ERR(cdev); } diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c index 61ce60a..1e3b3bf 100644 --- a/drivers/thermal/db8500_thermal.c +++ b/drivers/thermal/db8500_thermal.c @@ -419,7 +419,8 @@ static int db8500_thermal_probe(struct platform_device *pdev) low_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW"); if (low_irq < 0) { dev_err(&pdev->dev, "Get IRQ_HOTMON_LOW failed.\n"); - return low_irq; + ret = low_irq; + goto out_unlock; } ret = devm_request_threaded_irq(&pdev->dev, low_irq, NULL, @@ -427,13 +428,14 @@ static int db8500_thermal_probe(struct platform_device *pdev) "dbx500_temp_low", pzone); if (ret < 0) { dev_err(&pdev->dev, "Failed to allocate temp low irq.\n"); - return ret; + goto out_unlock; } high_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH"); if (high_irq < 0) { dev_err(&pdev->dev, "Get IRQ_HOTMON_HIGH failed.\n"); - return high_irq; + ret = high_irq; + goto out_unlock; } ret = devm_request_threaded_irq(&pdev->dev, high_irq, NULL, @@ -441,15 +443,16 @@ static int db8500_thermal_probe(struct platform_device *pdev) "dbx500_temp_high", pzone); if (ret < 0) { dev_err(&pdev->dev, "Failed to allocate temp high irq.\n"); - return ret; + goto out_unlock; } pzone->therm_dev = thermal_zone_device_register("db8500_thermal_zone", ptrips->num_trips, 0, pzone, &thdev_ops, NULL, 0, 0); - if (IS_ERR_OR_NULL(pzone->therm_dev)) { + if (IS_ERR(pzone->therm_dev)) { dev_err(&pdev->dev, "Register thermal zone device failed.\n"); - return PTR_ERR(pzone->therm_dev); + ret = PTR_ERR(pzone->therm_dev); + goto out_unlock; } dev_info(&pdev->dev, "Thermal zone device registered.\n"); @@ -461,9 +464,11 @@ static int db8500_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pzone); pzone->mode = THERMAL_DEVICE_ENABLED; + +out_unlock: mutex_unlock(&pzone->th_lock); - return 0; + return ret; } static int db8500_thermal_remove(struct platform_device *pdev) diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c index 3078c40..4b15a5f 100644 --- a/drivers/thermal/dove_thermal.c +++ b/drivers/thermal/dove_thermal.c @@ -107,12 +107,13 @@ static int dove_get_temp(struct thermal_zone_device *thermal, } /* - * Calculate temperature. See Section 8.10.1 of 88AP510, - * Documentation/arm/Marvell/README + * Calculate temperature. According to Marvell internal + * documentation the formula for this is: + * Celsius = (322-reg)/1.3625 */ reg = readl_relaxed(priv->sensor); reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK; - *temp = ((2281638UL - (7298*reg)) / 10); + *temp = ((3220000000UL - (10000000UL * reg)) / 13625); return 0; } diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c index b777ae6..d20ce9e 100644 --- a/drivers/thermal/exynos_thermal.c +++ b/drivers/thermal/exynos_thermal.c @@ -98,13 +98,13 @@ #define IDLE_INTERVAL 10000 #define MCELSIUS 1000 -#ifdef CONFIG_EXYNOS_THERMAL_EMUL +#ifdef CONFIG_THERMAL_EMULATION #define EXYNOS_EMUL_TIME 0x57F0 #define EXYNOS_EMUL_TIME_SHIFT 16 #define EXYNOS_EMUL_DATA_SHIFT 8 #define EXYNOS_EMUL_DATA_MASK 0xFF #define EXYNOS_EMUL_ENABLE 0x1 -#endif /* CONFIG_EXYNOS_THERMAL_EMUL */ +#endif /* CONFIG_THERMAL_EMULATION */ /* CPU Zone information */ #define PANIC_ZONE 4 @@ -143,6 +143,7 @@ struct thermal_cooling_conf { struct thermal_sensor_conf { char name[SENSOR_NAME_LEN]; int (*read_temperature)(void *data); + int (*write_emul_temp)(void *drv_data, unsigned long temp); struct thermal_trip_point_conf trip_data; struct thermal_cooling_conf cooling_data; void *private_data; @@ -240,26 +241,6 @@ static int exynos_get_crit_temp(struct thermal_zone_device *thermal, return ret; } -static int exynos_get_frequency_level(unsigned int cpu, unsigned int freq) -{ - int i = 0, ret = -EINVAL; - struct cpufreq_frequency_table *table = NULL; -#ifdef CONFIG_CPU_FREQ - table = cpufreq_frequency_get_table(cpu); -#endif - if (!table) - return ret; - - while (table[i].frequency != CPUFREQ_TABLE_END) { - if (table[i].frequency == CPUFREQ_ENTRY_INVALID) - continue; - if (table[i].frequency == freq) - return i; - i++; - } - return ret; -} - /* Bind callback functions for thermal zone */ static int exynos_bind(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) @@ -286,8 +267,8 @@ static int exynos_bind(struct thermal_zone_device *thermal, /* Bind the thermal zone to the cpufreq cooling device */ for (i = 0; i < tab_size; i++) { clip_data = (struct freq_clip_table *)&(tab_ptr[i]); - level = exynos_get_frequency_level(0, clip_data->freq_clip_max); - if (level < 0) + level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max); + if (level == THERMAL_CSTATE_INVALID) return 0; switch (GET_ZONE(i)) { case MONITOR_ZONE: @@ -367,6 +348,23 @@ static int exynos_get_temp(struct thermal_zone_device *thermal, return 0; } +/* Get temperature callback functions for thermal zone */ +static int exynos_set_emul_temp(struct thermal_zone_device *thermal, + unsigned long temp) +{ + void *data; + int ret = -EINVAL; + + if (!th_zone->sensor_conf) { + pr_info("Temperature sensor not initialised\n"); + return -EINVAL; + } + data = th_zone->sensor_conf->private_data; + if (th_zone->sensor_conf->write_emul_temp) + ret = th_zone->sensor_conf->write_emul_temp(data, temp); + return ret; +} + /* Get the temperature trend */ static int exynos_get_trend(struct thermal_zone_device *thermal, int trip, enum thermal_trend *trend) @@ -390,6 +388,7 @@ static struct thermal_zone_device_ops const exynos_dev_ops = { .bind = exynos_bind, .unbind = exynos_unbind, .get_temp = exynos_get_temp, + .set_emul_temp = exynos_set_emul_temp, .get_trend = exynos_get_trend, .get_mode = exynos_get_mode, .set_mode = exynos_set_mode, @@ -712,6 +711,47 @@ static int exynos_tmu_read(struct exynos_tmu_data *data) return temp; } +#ifdef CONFIG_THERMAL_EMULATION +static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) +{ + struct exynos_tmu_data *data = drv_data; + unsigned int reg; + int ret = -EINVAL; + + if (data->soc == SOC_ARCH_EXYNOS4210) + goto out; + + if (temp && temp < MCELSIUS) + goto out; + + mutex_lock(&data->lock); + clk_enable(data->clk); + + reg = readl(data->base + EXYNOS_EMUL_CON); + + if (temp) { + temp /= MCELSIUS; + + reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) | + (temp_to_code(data, temp) + << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE; + } else { + reg &= ~EXYNOS_EMUL_ENABLE; + } + + writel(reg, data->base + EXYNOS_EMUL_CON); + + clk_disable(data->clk); + mutex_unlock(&data->lock); + return 0; +out: + return ret; +} +#else +static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) + { return -EINVAL; } +#endif/*CONFIG_THERMAL_EMULATION*/ + static void exynos_tmu_work(struct work_struct *work) { struct exynos_tmu_data *data = container_of(work, @@ -745,6 +785,7 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id) static struct thermal_sensor_conf exynos_sensor_conf = { .name = "exynos-therm", .read_temperature = (int (*)(void *))exynos_tmu_read, + .write_emul_temp = exynos_tmu_set_emulation, }; #if defined(CONFIG_CPU_EXYNOS4210) @@ -814,6 +855,10 @@ static const struct of_device_id exynos_tmu_match[] = { .data = (void *)EXYNOS4210_TMU_DRV_DATA, }, { + .compatible = "samsung,exynos4412-tmu", + .data = (void *)EXYNOS_TMU_DRV_DATA, + }, + { .compatible = "samsung,exynos5250-tmu", .data = (void *)EXYNOS_TMU_DRV_DATA, }, @@ -851,93 +896,6 @@ static inline struct exynos_tmu_platform_data *exynos_get_driver_data( platform_get_device_id(pdev)->driver_data; } -#ifdef CONFIG_EXYNOS_THERMAL_EMUL -static ssize_t exynos_tmu_emulation_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); - struct exynos_tmu_data *data = platform_get_drvdata(pdev); - unsigned int reg; - u8 temp_code; - int temp = 0; - - if (data->soc == SOC_ARCH_EXYNOS4210) - goto out; - - mutex_lock(&data->lock); - clk_enable(data->clk); - reg = readl(data->base + EXYNOS_EMUL_CON); - clk_disable(data->clk); - mutex_unlock(&data->lock); - - if (reg & EXYNOS_EMUL_ENABLE) { - reg >>= EXYNOS_EMUL_DATA_SHIFT; - temp_code = reg & EXYNOS_EMUL_DATA_MASK; - temp = code_to_temp(data, temp_code); - } -out: - return sprintf(buf, "%d\n", temp * MCELSIUS); -} - -static ssize_t exynos_tmu_emulation_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); - struct exynos_tmu_data *data = platform_get_drvdata(pdev); - unsigned int reg; - int temp; - - if (data->soc == SOC_ARCH_EXYNOS4210) - goto out; - - if (!sscanf(buf, "%d\n", &temp) || temp < 0) - return -EINVAL; - - mutex_lock(&data->lock); - clk_enable(data->clk); - - reg = readl(data->base + EXYNOS_EMUL_CON); - - if (temp) { - /* Both CELSIUS and MCELSIUS type are available for input */ - if (temp > MCELSIUS) - temp /= MCELSIUS; - - reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) | - (temp_to_code(data, (temp / MCELSIUS)) - << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE; - } else { - reg &= ~EXYNOS_EMUL_ENABLE; - } - - writel(reg, data->base + EXYNOS_EMUL_CON); - - clk_disable(data->clk); - mutex_unlock(&data->lock); - -out: - return count; -} - -static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show, - exynos_tmu_emulation_store); -static int create_emulation_sysfs(struct device *dev) -{ - return device_create_file(dev, &dev_attr_emulation); -} -static void remove_emulation_sysfs(struct device *dev) -{ - device_remove_file(dev, &dev_attr_emulation); -} -#else -static inline int create_emulation_sysfs(struct device *dev) { return 0; } -static inline void remove_emulation_sysfs(struct device *dev) {} -#endif - static int exynos_tmu_probe(struct platform_device *pdev) { struct exynos_tmu_data *data; @@ -983,12 +941,16 @@ static int exynos_tmu_probe(struct platform_device *pdev) return ret; } - data->clk = clk_get(NULL, "tmu_apbif"); + data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); if (IS_ERR(data->clk)) { dev_err(&pdev->dev, "Failed to get clock\n"); return PTR_ERR(data->clk); } + ret = clk_prepare(data->clk); + if (ret) + return ret; + if (pdata->type == SOC_ARCH_EXYNOS || pdata->type == SOC_ARCH_EXYNOS4210) data->soc = pdata->type; @@ -1037,14 +999,10 @@ static int exynos_tmu_probe(struct platform_device *pdev) goto err_clk; } - ret = create_emulation_sysfs(&pdev->dev); - if (ret) - dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n"); - return 0; err_clk: platform_set_drvdata(pdev, NULL); - clk_put(data->clk); + clk_unprepare(data->clk); return ret; } @@ -1052,13 +1010,11 @@ static int exynos_tmu_remove(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - remove_emulation_sysfs(&pdev->dev); - exynos_tmu_control(pdev, false); exynos_unregister_thermal(); - clk_put(data->clk); + clk_unprepare(data->clk); platform_set_drvdata(pdev, NULL); diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c index 792479f..944ba2f 100644 --- a/drivers/thermal/fair_share.c +++ b/drivers/thermal/fair_share.c @@ -22,9 +22,6 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include #include #include "thermal_core.h" @@ -111,23 +108,15 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) static struct thermal_governor thermal_gov_fair_share = { .name = "fair_share", .throttle = fair_share_throttle, - .owner = THIS_MODULE, }; -static int __init thermal_gov_fair_share_init(void) +int thermal_gov_fair_share_register(void) { return thermal_register_governor(&thermal_gov_fair_share); } -static void __exit thermal_gov_fair_share_exit(void) +void thermal_gov_fair_share_unregister(void) { thermal_unregister_governor(&thermal_gov_fair_share); } -/* This should load after thermal framework */ -fs_initcall(thermal_gov_fair_share_init); -module_exit(thermal_gov_fair_share_exit); - -MODULE_AUTHOR("Durgadoss R"); -MODULE_DESCRIPTION("A simple weight based thermal throttling governor"); -MODULE_LICENSE("GPL"); diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c index e5500ed..dfeceaf 100644 --- a/drivers/thermal/kirkwood_thermal.c +++ b/drivers/thermal/kirkwood_thermal.c @@ -41,21 +41,21 @@ static int kirkwood_get_temp(struct thermal_zone_device *thermal, reg = readl_relaxed(priv->sensor); /* Valid check */ - if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) & - KIRKWOOD_THERMAL_VALID_MASK) { + if (!((reg >> KIRKWOOD_THERMAL_VALID_OFFSET) & + KIRKWOOD_THERMAL_VALID_MASK)) { dev_err(&thermal->device, "Temperature sensor reading not valid\n"); return -EIO; } /* - * Calculate temperature. See Section 8.10.1 of the 88AP510, - * datasheet, which has the same sensor. - * Documentation/arm/Marvell/README + * Calculate temperature. According to Marvell internal + * documentation the formula for this is: + * Celsius = (322-reg)/1.3625 */ reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) & KIRKWOOD_THERMAL_TEMP_MASK; - *temp = ((2281638UL - (7298*reg)) / 10); + *temp = ((3220000000UL - (10000000UL * reg)) / 13625); return 0; } diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 2cc5b61..8d7edd4 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -377,6 +378,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) spin_lock_init(&common->lock); common->dev = dev; + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (irq) { int ret; @@ -419,12 +423,15 @@ static int rcar_thermal_probe(struct platform_device *pdev) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(dev, "Could not allocate priv\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error_unregister; } priv->base = devm_ioremap_resource(dev, res); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto error_unregister; + } priv->common = common; priv->id = i; @@ -443,10 +450,10 @@ static int rcar_thermal_probe(struct platform_device *pdev) goto error_unregister; } - list_move_tail(&priv->list, &common->head); - if (rcar_has_irq_support(priv)) rcar_thermal_irq_enable(priv); + + list_move_tail(&priv->list, &common->head); } platform_set_drvdata(pdev, common); @@ -456,8 +463,14 @@ static int rcar_thermal_probe(struct platform_device *pdev) return 0; error_unregister: - rcar_thermal_for_each_priv(priv, common) + rcar_thermal_for_each_priv(priv, common) { thermal_zone_device_unregister(priv->zone); + if (rcar_has_irq_support(priv)) + rcar_thermal_irq_disable(priv); + } + + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return ret; } @@ -465,13 +478,20 @@ error_unregister: static int rcar_thermal_remove(struct platform_device *pdev) { struct rcar_thermal_common *common = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; struct rcar_thermal_priv *priv; - rcar_thermal_for_each_priv(priv, common) + rcar_thermal_for_each_priv(priv, common) { thermal_zone_device_unregister(priv->zone); + if (rcar_has_irq_support(priv)) + rcar_thermal_irq_disable(priv); + } platform_set_drvdata(pdev, NULL); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + return 0; } diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index 407cde3..4d4ddae 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -22,9 +22,6 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include #include #include "thermal_core.h" @@ -59,9 +56,12 @@ static unsigned long get_target_state(struct thermal_instance *instance, switch (trend) { case THERMAL_TREND_RAISING: - if (throttle) + if (throttle) { cur_state = cur_state < instance->upper ? (cur_state + 1) : instance->upper; + if (cur_state < instance->lower) + cur_state = instance->lower; + } break; case THERMAL_TREND_RAISE_FULL: if (throttle) @@ -71,8 +71,11 @@ static unsigned long get_target_state(struct thermal_instance *instance, if (cur_state == instance->lower) { if (!throttle) cur_state = -1; - } else + } else { cur_state -= 1; + if (cur_state > instance->upper) + cur_state = instance->upper; + } break; case THERMAL_TREND_DROP_FULL: if (cur_state == instance->lower) { @@ -180,23 +183,14 @@ static int step_wise_throttle(struct thermal_zone_device *tz, int trip) static struct thermal_governor thermal_gov_step_wise = { .name = "step_wise", .throttle = step_wise_throttle, - .owner = THIS_MODULE, }; -static int __init thermal_gov_step_wise_init(void) +int thermal_gov_step_wise_register(void) { return thermal_register_governor(&thermal_gov_step_wise); } -static void __exit thermal_gov_step_wise_exit(void) +void thermal_gov_step_wise_unregister(void) { thermal_unregister_governor(&thermal_gov_step_wise); } - -/* This should load after thermal framework */ -fs_initcall(thermal_gov_step_wise_init); -module_exit(thermal_gov_step_wise_exit); - -MODULE_AUTHOR("Durgadoss R"); -MODULE_DESCRIPTION("A step-by-step thermal throttling governor"); -MODULE_LICENSE("GPL"); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c new file mode 100644 index 0000000..d755440 --- /dev/null +++ b/drivers/thermal/thermal_core.c @@ -0,0 +1,2011 @@ +/* + * thermal.c - Generic Thermal Management Sysfs support. + * + * Copyright (C) 2008 Intel Corp + * Copyright (C) 2008 Zhang Rui + * Copyright (C) 2008 Sujith Thomas + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "thermal_core.h" + +MODULE_AUTHOR("Zhang Rui"); +MODULE_DESCRIPTION("Generic thermal management sysfs support"); +MODULE_LICENSE("GPL v2"); + +static DEFINE_IDR(thermal_tz_idr); +static DEFINE_IDR(thermal_cdev_idr); +static DEFINE_MUTEX(thermal_idr_lock); + +static LIST_HEAD(thermal_tz_list); +static LIST_HEAD(thermal_cdev_list); +static LIST_HEAD(thermal_governor_list); + +static DEFINE_MUTEX(thermal_list_lock); +static DEFINE_MUTEX(thermal_governor_lock); + +static struct thermal_governor *__find_governor(const char *name) +{ + struct thermal_governor *pos; + + list_for_each_entry(pos, &thermal_governor_list, governor_list) + if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH)) + return pos; + + return NULL; +} + +int thermal_register_governor(struct thermal_governor *governor) +{ + int err; + const char *name; + struct thermal_zone_device *pos; + + if (!governor) + return -EINVAL; + + mutex_lock(&thermal_governor_lock); + + err = -EBUSY; + if (__find_governor(governor->name) == NULL) { + err = 0; + list_add(&governor->governor_list, &thermal_governor_list); + } + + mutex_lock(&thermal_list_lock); + + list_for_each_entry(pos, &thermal_tz_list, node) { + if (pos->governor) + continue; + if (pos->tzp) + name = pos->tzp->governor_name; + else + name = DEFAULT_THERMAL_GOVERNOR; + if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH)) + pos->governor = governor; + } + + mutex_unlock(&thermal_list_lock); + mutex_unlock(&thermal_governor_lock); + + return err; +} + +void thermal_unregister_governor(struct thermal_governor *governor) +{ + struct thermal_zone_device *pos; + + if (!governor) + return; + + mutex_lock(&thermal_governor_lock); + + if (__find_governor(governor->name) == NULL) + goto exit; + + mutex_lock(&thermal_list_lock); + + list_for_each_entry(pos, &thermal_tz_list, node) { + if (!strnicmp(pos->governor->name, governor->name, + THERMAL_NAME_LENGTH)) + pos->governor = NULL; + } + + mutex_unlock(&thermal_list_lock); + list_del(&governor->governor_list); +exit: + mutex_unlock(&thermal_governor_lock); + return; +} + +static int get_idr(struct idr *idr, struct mutex *lock, int *id) +{ + int ret; + + if (lock) + mutex_lock(lock); + ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); + if (lock) + mutex_unlock(lock); + if (unlikely(ret < 0)) + return ret; + *id = ret; + return 0; +} + +static void release_idr(struct idr *idr, struct mutex *lock, int id) +{ + if (lock) + mutex_lock(lock); + idr_remove(idr, id); + if (lock) + mutex_unlock(lock); +} + +int get_tz_trend(struct thermal_zone_device *tz, int trip) +{ + enum thermal_trend trend; + + if (!tz->ops->get_trend || tz->ops->get_trend(tz, trip, &trend)) { + if (tz->temperature > tz->last_temperature) + trend = THERMAL_TREND_RAISING; + else if (tz->temperature < tz->last_temperature) + trend = THERMAL_TREND_DROPPING; + else + trend = THERMAL_TREND_STABLE; + } + + return trend; +} +EXPORT_SYMBOL(get_tz_trend); + +struct thermal_instance *get_thermal_instance(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, int trip) +{ + struct thermal_instance *pos = NULL; + struct thermal_instance *target_instance = NULL; + + mutex_lock(&tz->lock); + mutex_lock(&cdev->lock); + + list_for_each_entry(pos, &tz->thermal_instances, tz_node) { + if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { + target_instance = pos; + break; + } + } + + mutex_unlock(&cdev->lock); + mutex_unlock(&tz->lock); + + return target_instance; +} +EXPORT_SYMBOL(get_thermal_instance); + +static void print_bind_err_msg(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, int ret) +{ + dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n", + tz->type, cdev->type, ret); +} + +static void __bind(struct thermal_zone_device *tz, int mask, + struct thermal_cooling_device *cdev) +{ + int i, ret; + + for (i = 0; i < tz->trips; i++) { + if (mask & (1 << i)) { + ret = thermal_zone_bind_cooling_device(tz, i, cdev, + THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); + if (ret) + print_bind_err_msg(tz, cdev, ret); + } + } +} + +static void __unbind(struct thermal_zone_device *tz, int mask, + struct thermal_cooling_device *cdev) +{ + int i; + + for (i = 0; i < tz->trips; i++) + if (mask & (1 << i)) + thermal_zone_unbind_cooling_device(tz, i, cdev); +} + +static void bind_cdev(struct thermal_cooling_device *cdev) +{ + int i, ret; + const struct thermal_zone_params *tzp; + struct thermal_zone_device *pos = NULL; + + mutex_lock(&thermal_list_lock); + + list_for_each_entry(pos, &thermal_tz_list, node) { + if (!pos->tzp && !pos->ops->bind) + continue; + + if (!pos->tzp && pos->ops->bind) { + ret = pos->ops->bind(pos, cdev); + if (ret) + print_bind_err_msg(pos, cdev, ret); + } + + tzp = pos->tzp; + if (!tzp || !tzp->tbp) + continue; + + for (i = 0; i < tzp->num_tbps; i++) { + if (tzp->tbp[i].cdev || !tzp->tbp[i].match) + continue; + if (tzp->tbp[i].match(pos, cdev)) + continue; + tzp->tbp[i].cdev = cdev; + __bind(pos, tzp->tbp[i].trip_mask, cdev); + } + } + + mutex_unlock(&thermal_list_lock); +} + +static void bind_tz(struct thermal_zone_device *tz) +{ + int i, ret; + struct thermal_cooling_device *pos = NULL; + const struct thermal_zone_params *tzp = tz->tzp; + + if (!tzp && !tz->ops->bind) + return; + + mutex_lock(&thermal_list_lock); + + /* If there is no platform data, try to use ops->bind */ + if (!tzp && tz->ops->bind) { + list_for_each_entry(pos, &thermal_cdev_list, node) { + ret = tz->ops->bind(tz, pos); + if (ret) + print_bind_err_msg(tz, pos, ret); + } + goto exit; + } + + if (!tzp || !tzp->tbp) + goto exit; + + list_for_each_entry(pos, &thermal_cdev_list, node) { + for (i = 0; i < tzp->num_tbps; i++) { + if (tzp->tbp[i].cdev || !tzp->tbp[i].match) + continue; + if (tzp->tbp[i].match(tz, pos)) + continue; + tzp->tbp[i].cdev = pos; + __bind(tz, tzp->tbp[i].trip_mask, pos); + } + } +exit: + mutex_unlock(&thermal_list_lock); +} + +static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, + int delay) +{ + if (delay > 1000) + mod_delayed_work(system_freezable_wq, &tz->poll_queue, + round_jiffies(msecs_to_jiffies(delay))); + else if (delay) + mod_delayed_work(system_freezable_wq, &tz->poll_queue, + msecs_to_jiffies(delay)); + else + cancel_delayed_work(&tz->poll_queue); +} + +static void monitor_thermal_zone(struct thermal_zone_device *tz) +{ + mutex_lock(&tz->lock); + + if (tz->passive) + thermal_zone_device_set_polling(tz, tz->passive_delay); + else if (tz->polling_delay) + thermal_zone_device_set_polling(tz, tz->polling_delay); + else + thermal_zone_device_set_polling(tz, 0); + + mutex_unlock(&tz->lock); +} + +static void handle_non_critical_trips(struct thermal_zone_device *tz, + int trip, enum thermal_trip_type trip_type) +{ + if (tz->governor) + tz->governor->throttle(tz, trip); +} + +static void handle_critical_trips(struct thermal_zone_device *tz, + int trip, enum thermal_trip_type trip_type) +{ + long trip_temp; + + tz->ops->get_trip_temp(tz, trip, &trip_temp); + + /* If we have not crossed the trip_temp, we do not care. */ + if (tz->temperature < trip_temp) + return; + + if (tz->ops->notify) + tz->ops->notify(tz, trip, trip_type); + + if (trip_type == THERMAL_TRIP_CRITICAL) { + dev_emerg(&tz->device, + "critical temperature reached(%d C),shutting down\n", + tz->temperature / 1000); + orderly_poweroff(true); + } +} + +static void handle_thermal_trip(struct thermal_zone_device *tz, int trip) +{ + enum thermal_trip_type type; + + tz->ops->get_trip_type(tz, trip, &type); + + if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT) + handle_critical_trips(tz, trip, type); + else + handle_non_critical_trips(tz, trip, type); + /* + * Alright, we handled this trip successfully. + * So, start monitoring again. + */ + monitor_thermal_zone(tz); +} + +/** + * thermal_zone_get_temp() - returns its the temperature of thermal zone + * @tz: a valid pointer to a struct thermal_zone_device + * @temp: a valid pointer to where to store the resulting temperature. + * + * When a valid thermal zone reference is passed, it will fetch its + * temperature and fill @temp. + * + * Return: On success returns 0, an error code otherwise + */ +int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp) +{ + int ret = -EINVAL; +#ifdef CONFIG_THERMAL_EMULATION + int count; + unsigned long crit_temp = -1UL; + enum thermal_trip_type type; +#endif + + if (!tz || IS_ERR(tz)) + goto exit; + + mutex_lock(&tz->lock); + + ret = tz->ops->get_temp(tz, temp); +#ifdef CONFIG_THERMAL_EMULATION + if (!tz->emul_temperature) + goto skip_emul; + + for (count = 0; count < tz->trips; count++) { + ret = tz->ops->get_trip_type(tz, count, &type); + if (!ret && type == THERMAL_TRIP_CRITICAL) { + ret = tz->ops->get_trip_temp(tz, count, &crit_temp); + break; + } + } + + if (ret) + goto skip_emul; + + if (*temp < crit_temp) + *temp = tz->emul_temperature; +skip_emul: +#endif + mutex_unlock(&tz->lock); +exit: + return ret; +} +EXPORT_SYMBOL_GPL(thermal_zone_get_temp); + +static void update_temperature(struct thermal_zone_device *tz) +{ + long temp; + int ret; + + ret = thermal_zone_get_temp(tz, &temp); + if (ret) { + dev_warn(&tz->device, "failed to read out thermal zone %d\n", + tz->id); + return; + } + + mutex_lock(&tz->lock); + tz->last_temperature = tz->temperature; + tz->temperature = temp; + mutex_unlock(&tz->lock); +} + +void thermal_zone_device_update(struct thermal_zone_device *tz) +{ + int count; + + update_temperature(tz); + + for (count = 0; count < tz->trips; count++) + handle_thermal_trip(tz, count); +} +EXPORT_SYMBOL_GPL(thermal_zone_device_update); + +static void thermal_zone_device_check(struct work_struct *work) +{ + struct thermal_zone_device *tz = container_of(work, struct + thermal_zone_device, + poll_queue.work); + thermal_zone_device_update(tz); +} + +/* sys I/F for thermal zone */ + +#define to_thermal_zone(_dev) \ + container_of(_dev, struct thermal_zone_device, device) + +static ssize_t +type_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + + return sprintf(buf, "%s\n", tz->type); +} + +static ssize_t +temp_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + long temperature; + int ret; + + ret = thermal_zone_get_temp(tz, &temperature); + + if (ret) + return ret; + + return sprintf(buf, "%ld\n", temperature); +} + +static ssize_t +mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + enum thermal_device_mode mode; + int result; + + if (!tz->ops->get_mode) + return -EPERM; + + result = tz->ops->get_mode(tz, &mode); + if (result) + return result; + + return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled" + : "disabled"); +} + +static ssize_t +mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int result; + + if (!tz->ops->set_mode) + return -EPERM; + + if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) + result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED); + else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) + result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED); + else + result = -EINVAL; + + if (result) + return result; + + return count; +} + +static ssize_t +trip_point_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + enum thermal_trip_type type; + int trip, result; + + if (!tz->ops->get_trip_type) + return -EPERM; + + if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip)) + return -EINVAL; + + result = tz->ops->get_trip_type(tz, trip, &type); + if (result) + return result; + + switch (type) { + case THERMAL_TRIP_CRITICAL: + return sprintf(buf, "critical\n"); + case THERMAL_TRIP_HOT: + return sprintf(buf, "hot\n"); + case THERMAL_TRIP_PASSIVE: + return sprintf(buf, "passive\n"); + case THERMAL_TRIP_ACTIVE: + return sprintf(buf, "active\n"); + default: + return sprintf(buf, "unknown\n"); + } +} + +static ssize_t +trip_point_temp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int trip, ret; + unsigned long temperature; + + if (!tz->ops->set_trip_temp) + return -EPERM; + + if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) + return -EINVAL; + + if (kstrtoul(buf, 10, &temperature)) + return -EINVAL; + + ret = tz->ops->set_trip_temp(tz, trip, temperature); + + return ret ? ret : count; +} + +static ssize_t +trip_point_temp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int trip, ret; + long temperature; + + if (!tz->ops->get_trip_temp) + return -EPERM; + + if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) + return -EINVAL; + + ret = tz->ops->get_trip_temp(tz, trip, &temperature); + + if (ret) + return ret; + + return sprintf(buf, "%ld\n", temperature); +} + +static ssize_t +trip_point_hyst_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int trip, ret; + unsigned long temperature; + + if (!tz->ops->set_trip_hyst) + return -EPERM; + + if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip)) + return -EINVAL; + + if (kstrtoul(buf, 10, &temperature)) + return -EINVAL; + + /* + * We are not doing any check on the 'temperature' value + * here. The driver implementing 'set_trip_hyst' has to + * take care of this. + */ + ret = tz->ops->set_trip_hyst(tz, trip, temperature); + + return ret ? ret : count; +} + +static ssize_t +trip_point_hyst_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int trip, ret; + unsigned long temperature; + + if (!tz->ops->get_trip_hyst) + return -EPERM; + + if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip)) + return -EINVAL; + + ret = tz->ops->get_trip_hyst(tz, trip, &temperature); + + return ret ? ret : sprintf(buf, "%ld\n", temperature); +} + +static ssize_t +passive_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + struct thermal_cooling_device *cdev = NULL; + int state; + + if (!sscanf(buf, "%d\n", &state)) + return -EINVAL; + + /* sanity check: values below 1000 millicelcius don't make sense + * and can cause the system to go into a thermal heart attack + */ + if (state && state < 1000) + return -EINVAL; + + if (state && !tz->forced_passive) { + mutex_lock(&thermal_list_lock); + list_for_each_entry(cdev, &thermal_cdev_list, node) { + if (!strncmp("Processor", cdev->type, + sizeof("Processor"))) + thermal_zone_bind_cooling_device(tz, + THERMAL_TRIPS_NONE, cdev, + THERMAL_NO_LIMIT, + THERMAL_NO_LIMIT); + } + mutex_unlock(&thermal_list_lock); + if (!tz->passive_delay) + tz->passive_delay = 1000; + } else if (!state && tz->forced_passive) { + mutex_lock(&thermal_list_lock); + list_for_each_entry(cdev, &thermal_cdev_list, node) { + if (!strncmp("Processor", cdev->type, + sizeof("Processor"))) + thermal_zone_unbind_cooling_device(tz, + THERMAL_TRIPS_NONE, + cdev); + } + mutex_unlock(&thermal_list_lock); + tz->passive_delay = 0; + } + + tz->forced_passive = state; + + thermal_zone_device_update(tz); + + return count; +} + +static ssize_t +passive_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + + return sprintf(buf, "%d\n", tz->forced_passive); +} + +static ssize_t +policy_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret = -EINVAL; + struct thermal_zone_device *tz = to_thermal_zone(dev); + struct thermal_governor *gov; + + mutex_lock(&thermal_governor_lock); + + gov = __find_governor(buf); + if (!gov) + goto exit; + + tz->governor = gov; + ret = count; + +exit: + mutex_unlock(&thermal_governor_lock); + return ret; +} + +static ssize_t +policy_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + + return sprintf(buf, "%s\n", tz->governor->name); +} + +#ifdef CONFIG_THERMAL_EMULATION +static ssize_t +emul_temp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int ret = 0; + unsigned long temperature; + + if (kstrtoul(buf, 10, &temperature)) + return -EINVAL; + + if (!tz->ops->set_emul_temp) { + mutex_lock(&tz->lock); + tz->emul_temperature = temperature; + mutex_unlock(&tz->lock); + } else { + ret = tz->ops->set_emul_temp(tz, temperature); + } + + return ret ? ret : count; +} +static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store); +#endif/*CONFIG_THERMAL_EMULATION*/ + +static DEVICE_ATTR(type, 0444, type_show, NULL); +static DEVICE_ATTR(temp, 0444, temp_show, NULL); +static DEVICE_ATTR(mode, 0644, mode_show, mode_store); +static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store); +static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store); + +/* sys I/F for cooling device */ +#define to_cooling_device(_dev) \ + container_of(_dev, struct thermal_cooling_device, device) + +static ssize_t +thermal_cooling_device_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct thermal_cooling_device *cdev = to_cooling_device(dev); + + return sprintf(buf, "%s\n", cdev->type); +} + +static ssize_t +thermal_cooling_device_max_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct thermal_cooling_device *cdev = to_cooling_device(dev); + unsigned long state; + int ret; + + ret = cdev->ops->get_max_state(cdev, &state); + if (ret) + return ret; + return sprintf(buf, "%ld\n", state); +} + +static ssize_t +thermal_cooling_device_cur_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct thermal_cooling_device *cdev = to_cooling_device(dev); + unsigned long state; + int ret; + + ret = cdev->ops->get_cur_state(cdev, &state); + if (ret) + return ret; + return sprintf(buf, "%ld\n", state); +} + +static ssize_t +thermal_cooling_device_cur_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_cooling_device *cdev = to_cooling_device(dev); + unsigned long state; + int result; + + if (!sscanf(buf, "%ld\n", &state)) + return -EINVAL; + + if ((long)state < 0) + return -EINVAL; + + result = cdev->ops->set_cur_state(cdev, state); + if (result) + return result; + return count; +} + +static struct device_attribute dev_attr_cdev_type = +__ATTR(type, 0444, thermal_cooling_device_type_show, NULL); +static DEVICE_ATTR(max_state, 0444, + thermal_cooling_device_max_state_show, NULL); +static DEVICE_ATTR(cur_state, 0644, + thermal_cooling_device_cur_state_show, + thermal_cooling_device_cur_state_store); + +static ssize_t +thermal_cooling_device_trip_point_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct thermal_instance *instance; + + instance = + container_of(attr, struct thermal_instance, attr); + + if (instance->trip == THERMAL_TRIPS_NONE) + return sprintf(buf, "-1\n"); + else + return sprintf(buf, "%d\n", instance->trip); +} + +/* Device management */ + +#if defined(CONFIG_THERMAL_HWMON) + +/* hwmon sys I/F */ +#include + +/* thermal zone devices with the same type share one hwmon device */ +struct thermal_hwmon_device { + char type[THERMAL_NAME_LENGTH]; + struct device *device; + int count; + struct list_head tz_list; + struct list_head node; +}; + +struct thermal_hwmon_attr { + struct device_attribute attr; + char name[16]; +}; + +/* one temperature input for each thermal zone */ +struct thermal_hwmon_temp { + struct list_head hwmon_node; + struct thermal_zone_device *tz; + struct thermal_hwmon_attr temp_input; /* hwmon sys attr */ + struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ +}; + +static LIST_HEAD(thermal_hwmon_list); + +static ssize_t +name_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", hwmon->type); +} +static DEVICE_ATTR(name, 0444, name_show, NULL); + +static ssize_t +temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + long temperature; + int ret; + struct thermal_hwmon_attr *hwmon_attr + = container_of(attr, struct thermal_hwmon_attr, attr); + struct thermal_hwmon_temp *temp + = container_of(hwmon_attr, struct thermal_hwmon_temp, + temp_input); + struct thermal_zone_device *tz = temp->tz; + + ret = thermal_zone_get_temp(tz, &temperature); + + if (ret) + return ret; + + return sprintf(buf, "%ld\n", temperature); +} + +static ssize_t +temp_crit_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct thermal_hwmon_attr *hwmon_attr + = container_of(attr, struct thermal_hwmon_attr, attr); + struct thermal_hwmon_temp *temp + = container_of(hwmon_attr, struct thermal_hwmon_temp, + temp_crit); + struct thermal_zone_device *tz = temp->tz; + long temperature; + int ret; + + ret = tz->ops->get_trip_temp(tz, 0, &temperature); + if (ret) + return ret; + + return sprintf(buf, "%ld\n", temperature); +} + + +static struct thermal_hwmon_device * +thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz) +{ + struct thermal_hwmon_device *hwmon; + + mutex_lock(&thermal_list_lock); + list_for_each_entry(hwmon, &thermal_hwmon_list, node) + if (!strcmp(hwmon->type, tz->type)) { + mutex_unlock(&thermal_list_lock); + return hwmon; + } + mutex_unlock(&thermal_list_lock); + + return NULL; +} + +/* Find the temperature input matching a given thermal zone */ +static struct thermal_hwmon_temp * +thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, + const struct thermal_zone_device *tz) +{ + struct thermal_hwmon_temp *temp; + + mutex_lock(&thermal_list_lock); + list_for_each_entry(temp, &hwmon->tz_list, hwmon_node) + if (temp->tz == tz) { + mutex_unlock(&thermal_list_lock); + return temp; + } + mutex_unlock(&thermal_list_lock); + + return NULL; +} + +static int +thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) +{ + struct thermal_hwmon_device *hwmon; + struct thermal_hwmon_temp *temp; + int new_hwmon_device = 1; + int result; + + hwmon = thermal_hwmon_lookup_by_type(tz); + if (hwmon) { + new_hwmon_device = 0; + goto register_sys_interface; + } + + hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL); + if (!hwmon) + return -ENOMEM; + + INIT_LIST_HEAD(&hwmon->tz_list); + strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); + hwmon->device = hwmon_device_register(NULL); + if (IS_ERR(hwmon->device)) { + result = PTR_ERR(hwmon->device); + goto free_mem; + } + dev_set_drvdata(hwmon->device, hwmon); + result = device_create_file(hwmon->device, &dev_attr_name); + if (result) + goto free_mem; + + register_sys_interface: + temp = kzalloc(sizeof(struct thermal_hwmon_temp), GFP_KERNEL); + if (!temp) { + result = -ENOMEM; + goto unregister_name; + } + + temp->tz = tz; + hwmon->count++; + + snprintf(temp->temp_input.name, sizeof(temp->temp_input.name), + "temp%d_input", hwmon->count); + temp->temp_input.attr.attr.name = temp->temp_input.name; + temp->temp_input.attr.attr.mode = 0444; + temp->temp_input.attr.show = temp_input_show; + sysfs_attr_init(&temp->temp_input.attr.attr); + result = device_create_file(hwmon->device, &temp->temp_input.attr); + if (result) + goto free_temp_mem; + + if (tz->ops->get_crit_temp) { + unsigned long temperature; + if (!tz->ops->get_crit_temp(tz, &temperature)) { + snprintf(temp->temp_crit.name, + sizeof(temp->temp_crit.name), + "temp%d_crit", hwmon->count); + temp->temp_crit.attr.attr.name = temp->temp_crit.name; + temp->temp_crit.attr.attr.mode = 0444; + temp->temp_crit.attr.show = temp_crit_show; + sysfs_attr_init(&temp->temp_crit.attr.attr); + result = device_create_file(hwmon->device, + &temp->temp_crit.attr); + if (result) + goto unregister_input; + } + } + + mutex_lock(&thermal_list_lock); + if (new_hwmon_device) + list_add_tail(&hwmon->node, &thermal_hwmon_list); + list_add_tail(&temp->hwmon_node, &hwmon->tz_list); + mutex_unlock(&thermal_list_lock); + + return 0; + + unregister_input: + device_remove_file(hwmon->device, &temp->temp_input.attr); + free_temp_mem: + kfree(temp); + unregister_name: + if (new_hwmon_device) { + device_remove_file(hwmon->device, &dev_attr_name); + hwmon_device_unregister(hwmon->device); + } + free_mem: + if (new_hwmon_device) + kfree(hwmon); + + return result; +} + +static void +thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) +{ + struct thermal_hwmon_device *hwmon; + struct thermal_hwmon_temp *temp; + + hwmon = thermal_hwmon_lookup_by_type(tz); + if (unlikely(!hwmon)) { + /* Should never happen... */ + dev_dbg(&tz->device, "hwmon device lookup failed!\n"); + return; + } + + temp = thermal_hwmon_lookup_temp(hwmon, tz); + if (unlikely(!temp)) { + /* Should never happen... */ + dev_dbg(&tz->device, "temperature input lookup failed!\n"); + return; + } + + device_remove_file(hwmon->device, &temp->temp_input.attr); + if (tz->ops->get_crit_temp) + device_remove_file(hwmon->device, &temp->temp_crit.attr); + + mutex_lock(&thermal_list_lock); + list_del(&temp->hwmon_node); + kfree(temp); + if (!list_empty(&hwmon->tz_list)) { + mutex_unlock(&thermal_list_lock); + return; + } + list_del(&hwmon->node); + mutex_unlock(&thermal_list_lock); + + device_remove_file(hwmon->device, &dev_attr_name); + hwmon_device_unregister(hwmon->device); + kfree(hwmon); +} +#else +static int +thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) +{ + return 0; +} + +static void +thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) +{ +} +#endif + +/** + * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone + * @tz: pointer to struct thermal_zone_device + * @trip: indicates which trip point the cooling devices is + * associated with in this thermal zone. + * @cdev: pointer to struct thermal_cooling_device + * @upper: the Maximum cooling state for this trip point. + * THERMAL_NO_LIMIT means no upper limit, + * and the cooling device can be in max_state. + * @lower: the Minimum cooling state can be used for this trip point. + * THERMAL_NO_LIMIT means no lower limit, + * and the cooling device can be in cooling state 0. + * + * This interface function bind a thermal cooling device to the certain trip + * point of a thermal zone device. + * This function is usually called in the thermal zone device .bind callback. + * + * Return: 0 on success, the proper error value otherwise. + */ +int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, + int trip, + struct thermal_cooling_device *cdev, + unsigned long upper, unsigned long lower) +{ + struct thermal_instance *dev; + struct thermal_instance *pos; + struct thermal_zone_device *pos1; + struct thermal_cooling_device *pos2; + unsigned long max_state; + int result; + + if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) + return -EINVAL; + + list_for_each_entry(pos1, &thermal_tz_list, node) { + if (pos1 == tz) + break; + } + list_for_each_entry(pos2, &thermal_cdev_list, node) { + if (pos2 == cdev) + break; + } + + if (tz != pos1 || cdev != pos2) + return -EINVAL; + + cdev->ops->get_max_state(cdev, &max_state); + + /* lower default 0, upper default max_state */ + lower = lower == THERMAL_NO_LIMIT ? 0 : lower; + upper = upper == THERMAL_NO_LIMIT ? max_state : upper; + + if (lower > upper || upper > max_state) + return -EINVAL; + + dev = + kzalloc(sizeof(struct thermal_instance), GFP_KERNEL); + if (!dev) + return -ENOMEM; + dev->tz = tz; + dev->cdev = cdev; + dev->trip = trip; + dev->upper = upper; + dev->lower = lower; + dev->target = THERMAL_NO_TARGET; + + result = get_idr(&tz->idr, &tz->lock, &dev->id); + if (result) + goto free_mem; + + sprintf(dev->name, "cdev%d", dev->id); + result = + sysfs_create_link(&tz->device.kobj, &cdev->device.kobj, dev->name); + if (result) + goto release_idr; + + sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); + sysfs_attr_init(&dev->attr.attr); + dev->attr.attr.name = dev->attr_name; + dev->attr.attr.mode = 0444; + dev->attr.show = thermal_cooling_device_trip_point_show; + result = device_create_file(&tz->device, &dev->attr); + if (result) + goto remove_symbol_link; + + mutex_lock(&tz->lock); + mutex_lock(&cdev->lock); + list_for_each_entry(pos, &tz->thermal_instances, tz_node) + if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { + result = -EEXIST; + break; + } + if (!result) { + list_add_tail(&dev->tz_node, &tz->thermal_instances); + list_add_tail(&dev->cdev_node, &cdev->thermal_instances); + } + mutex_unlock(&cdev->lock); + mutex_unlock(&tz->lock); + + if (!result) + return 0; + + device_remove_file(&tz->device, &dev->attr); +remove_symbol_link: + sysfs_remove_link(&tz->device.kobj, dev->name); +release_idr: + release_idr(&tz->idr, &tz->lock, dev->id); +free_mem: + kfree(dev); + return result; +} +EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device); + +/** + * thermal_zone_unbind_cooling_device() - unbind a cooling device from a + * thermal zone. + * @tz: pointer to a struct thermal_zone_device. + * @trip: indicates which trip point the cooling devices is + * associated with in this thermal zone. + * @cdev: pointer to a struct thermal_cooling_device. + * + * This interface function unbind a thermal cooling device from the certain + * trip point of a thermal zone device. + * This function is usually called in the thermal zone device .unbind callback. + * + * Return: 0 on success, the proper error value otherwise. + */ +int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, + int trip, + struct thermal_cooling_device *cdev) +{ + struct thermal_instance *pos, *next; + + mutex_lock(&tz->lock); + mutex_lock(&cdev->lock); + list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) { + if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { + list_del(&pos->tz_node); + list_del(&pos->cdev_node); + mutex_unlock(&cdev->lock); + mutex_unlock(&tz->lock); + goto unbind; + } + } + mutex_unlock(&cdev->lock); + mutex_unlock(&tz->lock); + + return -ENODEV; + +unbind: + device_remove_file(&tz->device, &pos->attr); + sysfs_remove_link(&tz->device.kobj, pos->name); + release_idr(&tz->idr, &tz->lock, pos->id); + kfree(pos); + return 0; +} +EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device); + +static void thermal_release(struct device *dev) +{ + struct thermal_zone_device *tz; + struct thermal_cooling_device *cdev; + + if (!strncmp(dev_name(dev), "thermal_zone", + sizeof("thermal_zone") - 1)) { + tz = to_thermal_zone(dev); + kfree(tz); + } else { + cdev = to_cooling_device(dev); + kfree(cdev); + } +} + +static struct class thermal_class = { + .name = "thermal", + .dev_release = thermal_release, +}; + +/** + * thermal_cooling_device_register() - register a new thermal cooling device + * @type: the thermal cooling device type. + * @devdata: device private data. + * @ops: standard thermal cooling devices callbacks. + * + * This interface function adds a new thermal cooling device (fan/processor/...) + * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself + * to all the thermal zone devices registered at the same time. + * + * Return: a pointer to the created struct thermal_cooling_device or an + * ERR_PTR. Caller must check return value with IS_ERR*() helpers. + */ +struct thermal_cooling_device * +thermal_cooling_device_register(char *type, void *devdata, + const struct thermal_cooling_device_ops *ops) +{ + struct thermal_cooling_device *cdev; + int result; + + if (type && strlen(type) >= THERMAL_NAME_LENGTH) + return ERR_PTR(-EINVAL); + + if (!ops || !ops->get_max_state || !ops->get_cur_state || + !ops->set_cur_state) + return ERR_PTR(-EINVAL); + + cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL); + if (!cdev) + return ERR_PTR(-ENOMEM); + + result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id); + if (result) { + kfree(cdev); + return ERR_PTR(result); + } + + strlcpy(cdev->type, type ? : "", sizeof(cdev->type)); + mutex_init(&cdev->lock); + INIT_LIST_HEAD(&cdev->thermal_instances); + cdev->ops = ops; + cdev->updated = true; + cdev->device.class = &thermal_class; + cdev->devdata = devdata; + dev_set_name(&cdev->device, "cooling_device%d", cdev->id); + result = device_register(&cdev->device); + if (result) { + release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); + kfree(cdev); + return ERR_PTR(result); + } + + /* sys I/F */ + if (type) { + result = device_create_file(&cdev->device, &dev_attr_cdev_type); + if (result) + goto unregister; + } + + result = device_create_file(&cdev->device, &dev_attr_max_state); + if (result) + goto unregister; + + result = device_create_file(&cdev->device, &dev_attr_cur_state); + if (result) + goto unregister; + + /* Add 'this' new cdev to the global cdev list */ + mutex_lock(&thermal_list_lock); + list_add(&cdev->node, &thermal_cdev_list); + mutex_unlock(&thermal_list_lock); + + /* Update binding information for 'this' new cdev */ + bind_cdev(cdev); + + return cdev; + +unregister: + release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); + device_unregister(&cdev->device); + return ERR_PTR(result); +} +EXPORT_SYMBOL_GPL(thermal_cooling_device_register); + +/** + * thermal_cooling_device_unregister - removes the registered thermal cooling device + * @cdev: the thermal cooling device to remove. + * + * thermal_cooling_device_unregister() must be called when the device is no + * longer needed. + */ +void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) +{ + int i; + const struct thermal_zone_params *tzp; + struct thermal_zone_device *tz; + struct thermal_cooling_device *pos = NULL; + + if (!cdev) + return; + + mutex_lock(&thermal_list_lock); + list_for_each_entry(pos, &thermal_cdev_list, node) + if (pos == cdev) + break; + if (pos != cdev) { + /* thermal cooling device not found */ + mutex_unlock(&thermal_list_lock); + return; + } + list_del(&cdev->node); + + /* Unbind all thermal zones associated with 'this' cdev */ + list_for_each_entry(tz, &thermal_tz_list, node) { + if (tz->ops->unbind) { + tz->ops->unbind(tz, cdev); + continue; + } + + if (!tz->tzp || !tz->tzp->tbp) + continue; + + tzp = tz->tzp; + for (i = 0; i < tzp->num_tbps; i++) { + if (tzp->tbp[i].cdev == cdev) { + __unbind(tz, tzp->tbp[i].trip_mask, cdev); + tzp->tbp[i].cdev = NULL; + } + } + } + + mutex_unlock(&thermal_list_lock); + + if (cdev->type[0]) + device_remove_file(&cdev->device, &dev_attr_cdev_type); + device_remove_file(&cdev->device, &dev_attr_max_state); + device_remove_file(&cdev->device, &dev_attr_cur_state); + + release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); + device_unregister(&cdev->device); + return; +} +EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); + +void thermal_cdev_update(struct thermal_cooling_device *cdev) +{ + struct thermal_instance *instance; + unsigned long target = 0; + + /* cooling device is updated*/ + if (cdev->updated) + return; + + mutex_lock(&cdev->lock); + /* Make sure cdev enters the deepest cooling state */ + list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { + if (instance->target == THERMAL_NO_TARGET) + continue; + if (instance->target > target) + target = instance->target; + } + mutex_unlock(&cdev->lock); + cdev->ops->set_cur_state(cdev, target); + cdev->updated = true; +} +EXPORT_SYMBOL(thermal_cdev_update); + +/** + * thermal_notify_framework - Sensor drivers use this API to notify framework + * @tz: thermal zone device + * @trip: indicates which trip point has been crossed + * + * This function handles the trip events from sensor drivers. It starts + * throttling the cooling devices according to the policy configured. + * For CRITICAL and HOT trip points, this notifies the respective drivers, + * and does actual throttling for other trip points i.e ACTIVE and PASSIVE. + * The throttling policy is based on the configured platform data; if no + * platform data is provided, this uses the step_wise throttling policy. + */ +void thermal_notify_framework(struct thermal_zone_device *tz, int trip) +{ + handle_thermal_trip(tz, trip); +} +EXPORT_SYMBOL_GPL(thermal_notify_framework); + +/** + * create_trip_attrs() - create attributes for trip points + * @tz: the thermal zone device + * @mask: Writeable trip point bitmap. + * + * helper function to instantiate sysfs entries for every trip + * point and its properties of a struct thermal_zone_device. + * + * Return: 0 on success, the proper error value otherwise. + */ +static int create_trip_attrs(struct thermal_zone_device *tz, int mask) +{ + int indx; + int size = sizeof(struct thermal_attr) * tz->trips; + + tz->trip_type_attrs = kzalloc(size, GFP_KERNEL); + if (!tz->trip_type_attrs) + return -ENOMEM; + + tz->trip_temp_attrs = kzalloc(size, GFP_KERNEL); + if (!tz->trip_temp_attrs) { + kfree(tz->trip_type_attrs); + return -ENOMEM; + } + + if (tz->ops->get_trip_hyst) { + tz->trip_hyst_attrs = kzalloc(size, GFP_KERNEL); + if (!tz->trip_hyst_attrs) { + kfree(tz->trip_type_attrs); + kfree(tz->trip_temp_attrs); + return -ENOMEM; + } + } + + + for (indx = 0; indx < tz->trips; indx++) { + /* create trip type attribute */ + snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH, + "trip_point_%d_type", indx); + + sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr); + tz->trip_type_attrs[indx].attr.attr.name = + tz->trip_type_attrs[indx].name; + tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO; + tz->trip_type_attrs[indx].attr.show = trip_point_type_show; + + device_create_file(&tz->device, + &tz->trip_type_attrs[indx].attr); + + /* create trip temp attribute */ + snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH, + "trip_point_%d_temp", indx); + + sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr); + tz->trip_temp_attrs[indx].attr.attr.name = + tz->trip_temp_attrs[indx].name; + tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; + tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; + if (mask & (1 << indx)) { + tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; + tz->trip_temp_attrs[indx].attr.store = + trip_point_temp_store; + } + + device_create_file(&tz->device, + &tz->trip_temp_attrs[indx].attr); + + /* create Optional trip hyst attribute */ + if (!tz->ops->get_trip_hyst) + continue; + snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH, + "trip_point_%d_hyst", indx); + + sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr); + tz->trip_hyst_attrs[indx].attr.attr.name = + tz->trip_hyst_attrs[indx].name; + tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO; + tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show; + if (tz->ops->set_trip_hyst) { + tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR; + tz->trip_hyst_attrs[indx].attr.store = + trip_point_hyst_store; + } + + device_create_file(&tz->device, + &tz->trip_hyst_attrs[indx].attr); + } + return 0; +} + +static void remove_trip_attrs(struct thermal_zone_device *tz) +{ + int indx; + + for (indx = 0; indx < tz->trips; indx++) { + device_remove_file(&tz->device, + &tz->trip_type_attrs[indx].attr); + device_remove_file(&tz->device, + &tz->trip_temp_attrs[indx].attr); + if (tz->ops->get_trip_hyst) + device_remove_file(&tz->device, + &tz->trip_hyst_attrs[indx].attr); + } + kfree(tz->trip_type_attrs); + kfree(tz->trip_temp_attrs); + kfree(tz->trip_hyst_attrs); +} + +/** + * thermal_zone_device_register() - register a new thermal zone device + * @type: the thermal zone device type + * @trips: the number of trip points the thermal zone support + * @mask: a bit string indicating the writeablility of trip points + * @devdata: private device data + * @ops: standard thermal zone device callbacks + * @tzp: thermal zone platform parameters + * @passive_delay: number of milliseconds to wait between polls when + * performing passive cooling + * @polling_delay: number of milliseconds to wait between polls when checking + * whether trip points have been crossed (0 for interrupt + * driven systems) + * + * This interface function adds a new thermal zone device (sensor) to + * /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the + * thermal cooling devices registered at the same time. + * thermal_zone_device_unregister() must be called when the device is no + * longer needed. The passive cooling depends on the .get_trend() return value. + * + * Return: a pointer to the created struct thermal_zone_device or an + * in case of error, an ERR_PTR. Caller must check return value with + * IS_ERR*() helpers. + */ +struct thermal_zone_device *thermal_zone_device_register(const char *type, + int trips, int mask, void *devdata, + const struct thermal_zone_device_ops *ops, + const struct thermal_zone_params *tzp, + int passive_delay, int polling_delay) +{ + struct thermal_zone_device *tz; + enum thermal_trip_type trip_type; + int result; + int count; + int passive = 0; + + if (type && strlen(type) >= THERMAL_NAME_LENGTH) + return ERR_PTR(-EINVAL); + + if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips) + return ERR_PTR(-EINVAL); + + if (!ops || !ops->get_temp) + return ERR_PTR(-EINVAL); + + if (trips > 0 && !ops->get_trip_type) + return ERR_PTR(-EINVAL); + + tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL); + if (!tz) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&tz->thermal_instances); + idr_init(&tz->idr); + mutex_init(&tz->lock); + result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id); + if (result) { + kfree(tz); + return ERR_PTR(result); + } + + strlcpy(tz->type, type ? : "", sizeof(tz->type)); + tz->ops = ops; + tz->tzp = tzp; + tz->device.class = &thermal_class; + tz->devdata = devdata; + tz->trips = trips; + tz->passive_delay = passive_delay; + tz->polling_delay = polling_delay; + + dev_set_name(&tz->device, "thermal_zone%d", tz->id); + result = device_register(&tz->device); + if (result) { + release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); + kfree(tz); + return ERR_PTR(result); + } + + /* sys I/F */ + if (type) { + result = device_create_file(&tz->device, &dev_attr_type); + if (result) + goto unregister; + } + + result = device_create_file(&tz->device, &dev_attr_temp); + if (result) + goto unregister; + + if (ops->get_mode) { + result = device_create_file(&tz->device, &dev_attr_mode); + if (result) + goto unregister; + } + + result = create_trip_attrs(tz, mask); + if (result) + goto unregister; + + for (count = 0; count < trips; count++) { + tz->ops->get_trip_type(tz, count, &trip_type); + if (trip_type == THERMAL_TRIP_PASSIVE) + passive = 1; + } + + if (!passive) { + result = device_create_file(&tz->device, &dev_attr_passive); + if (result) + goto unregister; + } + +#ifdef CONFIG_THERMAL_EMULATION + result = device_create_file(&tz->device, &dev_attr_emul_temp); + if (result) + goto unregister; +#endif + /* Create policy attribute */ + result = device_create_file(&tz->device, &dev_attr_policy); + if (result) + goto unregister; + + /* Update 'this' zone's governor information */ + mutex_lock(&thermal_governor_lock); + + if (tz->tzp) + tz->governor = __find_governor(tz->tzp->governor_name); + else + tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR); + + mutex_unlock(&thermal_governor_lock); + + result = thermal_add_hwmon_sysfs(tz); + if (result) + goto unregister; + + mutex_lock(&thermal_list_lock); + list_add_tail(&tz->node, &thermal_tz_list); + mutex_unlock(&thermal_list_lock); + + /* Bind cooling devices for this zone */ + bind_tz(tz); + + INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check); + + thermal_zone_device_update(tz); + + if (!result) + return tz; + +unregister: + release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); + device_unregister(&tz->device); + return ERR_PTR(result); +} +EXPORT_SYMBOL_GPL(thermal_zone_device_register); + +/** + * thermal_device_unregister - removes the registered thermal zone device + * @tz: the thermal zone device to remove + */ +void thermal_zone_device_unregister(struct thermal_zone_device *tz) +{ + int i; + const struct thermal_zone_params *tzp; + struct thermal_cooling_device *cdev; + struct thermal_zone_device *pos = NULL; + + if (!tz) + return; + + tzp = tz->tzp; + + mutex_lock(&thermal_list_lock); + list_for_each_entry(pos, &thermal_tz_list, node) + if (pos == tz) + break; + if (pos != tz) { + /* thermal zone device not found */ + mutex_unlock(&thermal_list_lock); + return; + } + list_del(&tz->node); + + /* Unbind all cdevs associated with 'this' thermal zone */ + list_for_each_entry(cdev, &thermal_cdev_list, node) { + if (tz->ops->unbind) { + tz->ops->unbind(tz, cdev); + continue; + } + + if (!tzp || !tzp->tbp) + break; + + for (i = 0; i < tzp->num_tbps; i++) { + if (tzp->tbp[i].cdev == cdev) { + __unbind(tz, tzp->tbp[i].trip_mask, cdev); + tzp->tbp[i].cdev = NULL; + } + } + } + + mutex_unlock(&thermal_list_lock); + + thermal_zone_device_set_polling(tz, 0); + + if (tz->type[0]) + device_remove_file(&tz->device, &dev_attr_type); + device_remove_file(&tz->device, &dev_attr_temp); + if (tz->ops->get_mode) + device_remove_file(&tz->device, &dev_attr_mode); + device_remove_file(&tz->device, &dev_attr_policy); + remove_trip_attrs(tz); + tz->governor = NULL; + + thermal_remove_hwmon_sysfs(tz); + release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); + idr_destroy(&tz->idr); + mutex_destroy(&tz->lock); + device_unregister(&tz->device); + return; +} +EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); + +/** + * thermal_zone_get_zone_by_name() - search for a zone and returns its ref + * @name: thermal zone name to fetch the temperature + * + * When only one zone is found with the passed name, returns a reference to it. + * + * Return: On success returns a reference to an unique thermal zone with + * matching name equals to @name, an ERR_PTR otherwise (-EINVAL for invalid + * paramenters, -ENODEV for not found and -EEXIST for multiple matches). + */ +struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name) +{ + struct thermal_zone_device *pos = NULL, *ref = ERR_PTR(-EINVAL); + unsigned int found = 0; + + if (!name) + goto exit; + + mutex_lock(&thermal_list_lock); + list_for_each_entry(pos, &thermal_tz_list, node) + if (!strnicmp(name, pos->type, THERMAL_NAME_LENGTH)) { + found++; + ref = pos; + } + mutex_unlock(&thermal_list_lock); + + /* nothing has been found, thus an error code for it */ + if (found == 0) + ref = ERR_PTR(-ENODEV); + else if (found > 1) + /* Success only when an unique zone is found */ + ref = ERR_PTR(-EEXIST); + +exit: + return ref; +} +EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); + +#ifdef CONFIG_NET +static struct genl_family thermal_event_genl_family = { + .id = GENL_ID_GENERATE, + .name = THERMAL_GENL_FAMILY_NAME, + .version = THERMAL_GENL_VERSION, + .maxattr = THERMAL_GENL_ATTR_MAX, +}; + +static struct genl_multicast_group thermal_event_mcgrp = { + .name = THERMAL_GENL_MCAST_GROUP_NAME, +}; + +int thermal_generate_netlink_event(struct thermal_zone_device *tz, + enum events event) +{ + struct sk_buff *skb; + struct nlattr *attr; + struct thermal_genl_event *thermal_event; + void *msg_header; + int size; + int result; + static unsigned int thermal_event_seqnum; + + if (!tz) + return -EINVAL; + + /* allocate memory */ + size = nla_total_size(sizeof(struct thermal_genl_event)) + + nla_total_size(0); + + skb = genlmsg_new(size, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + /* add the genetlink message header */ + msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++, + &thermal_event_genl_family, 0, + THERMAL_GENL_CMD_EVENT); + if (!msg_header) { + nlmsg_free(skb); + return -ENOMEM; + } + + /* fill the data */ + attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, + sizeof(struct thermal_genl_event)); + + if (!attr) { + nlmsg_free(skb); + return -EINVAL; + } + + thermal_event = nla_data(attr); + if (!thermal_event) { + nlmsg_free(skb); + return -EINVAL; + } + + memset(thermal_event, 0, sizeof(struct thermal_genl_event)); + + thermal_event->orig = tz->id; + thermal_event->event = event; + + /* send multicast genetlink message */ + result = genlmsg_end(skb, msg_header); + if (result < 0) { + nlmsg_free(skb); + return result; + } + + result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); + if (result) + dev_err(&tz->device, "Failed to send netlink event:%d", result); + + return result; +} +EXPORT_SYMBOL_GPL(thermal_generate_netlink_event); + +static int genetlink_init(void) +{ + int result; + + result = genl_register_family(&thermal_event_genl_family); + if (result) + return result; + + result = genl_register_mc_group(&thermal_event_genl_family, + &thermal_event_mcgrp); + if (result) + genl_unregister_family(&thermal_event_genl_family); + return result; +} + +static void genetlink_exit(void) +{ + genl_unregister_family(&thermal_event_genl_family); +} +#else /* !CONFIG_NET */ +static inline int genetlink_init(void) { return 0; } +static inline void genetlink_exit(void) {} +#endif /* !CONFIG_NET */ + +static int __init thermal_register_governors(void) +{ + int result; + + result = thermal_gov_step_wise_register(); + if (result) + return result; + + result = thermal_gov_fair_share_register(); + if (result) + return result; + + return thermal_gov_user_space_register(); +} + +static void thermal_unregister_governors(void) +{ + thermal_gov_step_wise_unregister(); + thermal_gov_fair_share_unregister(); + thermal_gov_user_space_unregister(); +} + +static int __init thermal_init(void) +{ + int result; + + result = thermal_register_governors(); + if (result) + goto error; + + result = class_register(&thermal_class); + if (result) + goto unregister_governors; + + result = genetlink_init(); + if (result) + goto unregister_class; + + return 0; + +unregister_governors: + thermal_unregister_governors(); +unregister_class: + class_unregister(&thermal_class); +error: + idr_destroy(&thermal_tz_idr); + idr_destroy(&thermal_cdev_idr); + mutex_destroy(&thermal_idr_lock); + mutex_destroy(&thermal_list_lock); + mutex_destroy(&thermal_governor_lock); + return result; +} + +static void __exit thermal_exit(void) +{ + genetlink_exit(); + class_unregister(&thermal_class); + thermal_unregister_governors(); + idr_destroy(&thermal_tz_idr); + idr_destroy(&thermal_cdev_idr); + mutex_destroy(&thermal_idr_lock); + mutex_destroy(&thermal_list_lock); + mutex_destroy(&thermal_governor_lock); +} + +fs_initcall(thermal_init); +module_exit(thermal_exit); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 0d3205a..7cf2f66 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -50,4 +50,31 @@ struct thermal_instance { struct list_head cdev_node; /* node in cdev->thermal_instances */ }; +int thermal_register_governor(struct thermal_governor *); +void thermal_unregister_governor(struct thermal_governor *); + +#ifdef CONFIG_THERMAL_GOV_STEP_WISE +int thermal_gov_step_wise_register(void); +void thermal_gov_step_wise_unregister(void); +#else +static inline int thermal_gov_step_wise_register(void) { return 0; } +static inline void thermal_gov_step_wise_unregister(void) {} +#endif /* CONFIG_THERMAL_GOV_STEP_WISE */ + +#ifdef CONFIG_THERMAL_GOV_FAIR_SHARE +int thermal_gov_fair_share_register(void); +void thermal_gov_fair_share_unregister(void); +#else +static inline int thermal_gov_fair_share_register(void) { return 0; } +static inline void thermal_gov_fair_share_unregister(void) {} +#endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */ + +#ifdef CONFIG_THERMAL_GOV_USER_SPACE +int thermal_gov_user_space_register(void); +void thermal_gov_user_space_unregister(void); +#else +static inline int thermal_gov_user_space_register(void) { return 0; } +static inline void thermal_gov_user_space_unregister(void) {} +#endif /* CONFIG_THERMAL_GOV_USER_SPACE */ + #endif /* __THERMAL_CORE_H__ */ diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c deleted file mode 100644 index 5b7863a..0000000 --- a/drivers/thermal/thermal_sys.c +++ /dev/null @@ -1,1888 +0,0 @@ -/* - * thermal.c - Generic Thermal Management Sysfs support. - * - * Copyright (C) 2008 Intel Corp - * Copyright (C) 2008 Zhang Rui - * Copyright (C) 2008 Sujith Thomas - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "thermal_core.h" - -MODULE_AUTHOR("Zhang Rui"); -MODULE_DESCRIPTION("Generic thermal management sysfs support"); -MODULE_LICENSE("GPL"); - -static DEFINE_IDR(thermal_tz_idr); -static DEFINE_IDR(thermal_cdev_idr); -static DEFINE_MUTEX(thermal_idr_lock); - -static LIST_HEAD(thermal_tz_list); -static LIST_HEAD(thermal_cdev_list); -static LIST_HEAD(thermal_governor_list); - -static DEFINE_MUTEX(thermal_list_lock); -static DEFINE_MUTEX(thermal_governor_lock); - -static struct thermal_governor *__find_governor(const char *name) -{ - struct thermal_governor *pos; - - list_for_each_entry(pos, &thermal_governor_list, governor_list) - if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH)) - return pos; - - return NULL; -} - -int thermal_register_governor(struct thermal_governor *governor) -{ - int err; - const char *name; - struct thermal_zone_device *pos; - - if (!governor) - return -EINVAL; - - mutex_lock(&thermal_governor_lock); - - err = -EBUSY; - if (__find_governor(governor->name) == NULL) { - err = 0; - list_add(&governor->governor_list, &thermal_governor_list); - } - - mutex_lock(&thermal_list_lock); - - list_for_each_entry(pos, &thermal_tz_list, node) { - if (pos->governor) - continue; - if (pos->tzp) - name = pos->tzp->governor_name; - else - name = DEFAULT_THERMAL_GOVERNOR; - if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH)) - pos->governor = governor; - } - - mutex_unlock(&thermal_list_lock); - mutex_unlock(&thermal_governor_lock); - - return err; -} -EXPORT_SYMBOL_GPL(thermal_register_governor); - -void thermal_unregister_governor(struct thermal_governor *governor) -{ - struct thermal_zone_device *pos; - - if (!governor) - return; - - mutex_lock(&thermal_governor_lock); - - if (__find_governor(governor->name) == NULL) - goto exit; - - mutex_lock(&thermal_list_lock); - - list_for_each_entry(pos, &thermal_tz_list, node) { - if (!strnicmp(pos->governor->name, governor->name, - THERMAL_NAME_LENGTH)) - pos->governor = NULL; - } - - mutex_unlock(&thermal_list_lock); - list_del(&governor->governor_list); -exit: - mutex_unlock(&thermal_governor_lock); - return; -} -EXPORT_SYMBOL_GPL(thermal_unregister_governor); - -static int get_idr(struct idr *idr, struct mutex *lock, int *id) -{ - int ret; - - if (lock) - mutex_lock(lock); - ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); - if (lock) - mutex_unlock(lock); - if (unlikely(ret < 0)) - return ret; - *id = ret; - return 0; -} - -static void release_idr(struct idr *idr, struct mutex *lock, int id) -{ - if (lock) - mutex_lock(lock); - idr_remove(idr, id); - if (lock) - mutex_unlock(lock); -} - -int get_tz_trend(struct thermal_zone_device *tz, int trip) -{ - enum thermal_trend trend; - - if (!tz->ops->get_trend || tz->ops->get_trend(tz, trip, &trend)) { - if (tz->temperature > tz->last_temperature) - trend = THERMAL_TREND_RAISING; - else if (tz->temperature < tz->last_temperature) - trend = THERMAL_TREND_DROPPING; - else - trend = THERMAL_TREND_STABLE; - } - - return trend; -} -EXPORT_SYMBOL(get_tz_trend); - -struct thermal_instance *get_thermal_instance(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, int trip) -{ - struct thermal_instance *pos = NULL; - struct thermal_instance *target_instance = NULL; - - mutex_lock(&tz->lock); - mutex_lock(&cdev->lock); - - list_for_each_entry(pos, &tz->thermal_instances, tz_node) { - if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { - target_instance = pos; - break; - } - } - - mutex_unlock(&cdev->lock); - mutex_unlock(&tz->lock); - - return target_instance; -} -EXPORT_SYMBOL(get_thermal_instance); - -static void print_bind_err_msg(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, int ret) -{ - dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n", - tz->type, cdev->type, ret); -} - -static void __bind(struct thermal_zone_device *tz, int mask, - struct thermal_cooling_device *cdev) -{ - int i, ret; - - for (i = 0; i < tz->trips; i++) { - if (mask & (1 << i)) { - ret = thermal_zone_bind_cooling_device(tz, i, cdev, - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); - if (ret) - print_bind_err_msg(tz, cdev, ret); - } - } -} - -static void __unbind(struct thermal_zone_device *tz, int mask, - struct thermal_cooling_device *cdev) -{ - int i; - - for (i = 0; i < tz->trips; i++) - if (mask & (1 << i)) - thermal_zone_unbind_cooling_device(tz, i, cdev); -} - -static void bind_cdev(struct thermal_cooling_device *cdev) -{ - int i, ret; - const struct thermal_zone_params *tzp; - struct thermal_zone_device *pos = NULL; - - mutex_lock(&thermal_list_lock); - - list_for_each_entry(pos, &thermal_tz_list, node) { - if (!pos->tzp && !pos->ops->bind) - continue; - - if (!pos->tzp && pos->ops->bind) { - ret = pos->ops->bind(pos, cdev); - if (ret) - print_bind_err_msg(pos, cdev, ret); - } - - tzp = pos->tzp; - if (!tzp || !tzp->tbp) - continue; - - for (i = 0; i < tzp->num_tbps; i++) { - if (tzp->tbp[i].cdev || !tzp->tbp[i].match) - continue; - if (tzp->tbp[i].match(pos, cdev)) - continue; - tzp->tbp[i].cdev = cdev; - __bind(pos, tzp->tbp[i].trip_mask, cdev); - } - } - - mutex_unlock(&thermal_list_lock); -} - -static void bind_tz(struct thermal_zone_device *tz) -{ - int i, ret; - struct thermal_cooling_device *pos = NULL; - const struct thermal_zone_params *tzp = tz->tzp; - - if (!tzp && !tz->ops->bind) - return; - - mutex_lock(&thermal_list_lock); - - /* If there is no platform data, try to use ops->bind */ - if (!tzp && tz->ops->bind) { - list_for_each_entry(pos, &thermal_cdev_list, node) { - ret = tz->ops->bind(tz, pos); - if (ret) - print_bind_err_msg(tz, pos, ret); - } - goto exit; - } - - if (!tzp || !tzp->tbp) - goto exit; - - list_for_each_entry(pos, &thermal_cdev_list, node) { - for (i = 0; i < tzp->num_tbps; i++) { - if (tzp->tbp[i].cdev || !tzp->tbp[i].match) - continue; - if (tzp->tbp[i].match(tz, pos)) - continue; - tzp->tbp[i].cdev = pos; - __bind(tz, tzp->tbp[i].trip_mask, pos); - } - } -exit: - mutex_unlock(&thermal_list_lock); -} - -static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, - int delay) -{ - if (delay > 1000) - mod_delayed_work(system_freezable_wq, &tz->poll_queue, - round_jiffies(msecs_to_jiffies(delay))); - else if (delay) - mod_delayed_work(system_freezable_wq, &tz->poll_queue, - msecs_to_jiffies(delay)); - else - cancel_delayed_work(&tz->poll_queue); -} - -static void monitor_thermal_zone(struct thermal_zone_device *tz) -{ - mutex_lock(&tz->lock); - - if (tz->passive) - thermal_zone_device_set_polling(tz, tz->passive_delay); - else if (tz->polling_delay) - thermal_zone_device_set_polling(tz, tz->polling_delay); - else - thermal_zone_device_set_polling(tz, 0); - - mutex_unlock(&tz->lock); -} - -static void handle_non_critical_trips(struct thermal_zone_device *tz, - int trip, enum thermal_trip_type trip_type) -{ - if (tz->governor) - tz->governor->throttle(tz, trip); -} - -static void handle_critical_trips(struct thermal_zone_device *tz, - int trip, enum thermal_trip_type trip_type) -{ - long trip_temp; - - tz->ops->get_trip_temp(tz, trip, &trip_temp); - - /* If we have not crossed the trip_temp, we do not care. */ - if (tz->temperature < trip_temp) - return; - - if (tz->ops->notify) - tz->ops->notify(tz, trip, trip_type); - - if (trip_type == THERMAL_TRIP_CRITICAL) { - dev_emerg(&tz->device, - "critical temperature reached(%d C),shutting down\n", - tz->temperature / 1000); - orderly_poweroff(true); - } -} - -static void handle_thermal_trip(struct thermal_zone_device *tz, int trip) -{ - enum thermal_trip_type type; - - tz->ops->get_trip_type(tz, trip, &type); - - if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT) - handle_critical_trips(tz, trip, type); - else - handle_non_critical_trips(tz, trip, type); - /* - * Alright, we handled this trip successfully. - * So, start monitoring again. - */ - monitor_thermal_zone(tz); -} - -static int thermal_zone_get_temp(struct thermal_zone_device *tz, - unsigned long *temp) -{ - int ret = 0; -#ifdef CONFIG_THERMAL_EMULATION - int count; - unsigned long crit_temp = -1UL; - enum thermal_trip_type type; -#endif - - mutex_lock(&tz->lock); - - ret = tz->ops->get_temp(tz, temp); -#ifdef CONFIG_THERMAL_EMULATION - if (!tz->emul_temperature) - goto skip_emul; - - for (count = 0; count < tz->trips; count++) { - ret = tz->ops->get_trip_type(tz, count, &type); - if (!ret && type == THERMAL_TRIP_CRITICAL) { - ret = tz->ops->get_trip_temp(tz, count, &crit_temp); - break; - } - } - - if (ret) - goto skip_emul; - - if (*temp < crit_temp) - *temp = tz->emul_temperature; -skip_emul: -#endif - mutex_unlock(&tz->lock); - return ret; -} - -static void update_temperature(struct thermal_zone_device *tz) -{ - long temp; - int ret; - - ret = thermal_zone_get_temp(tz, &temp); - if (ret) { - dev_warn(&tz->device, "failed to read out thermal zone %d\n", - tz->id); - return; - } - - mutex_lock(&tz->lock); - tz->last_temperature = tz->temperature; - tz->temperature = temp; - mutex_unlock(&tz->lock); -} - -void thermal_zone_device_update(struct thermal_zone_device *tz) -{ - int count; - - update_temperature(tz); - - for (count = 0; count < tz->trips; count++) - handle_thermal_trip(tz, count); -} -EXPORT_SYMBOL(thermal_zone_device_update); - -static void thermal_zone_device_check(struct work_struct *work) -{ - struct thermal_zone_device *tz = container_of(work, struct - thermal_zone_device, - poll_queue.work); - thermal_zone_device_update(tz); -} - -/* sys I/F for thermal zone */ - -#define to_thermal_zone(_dev) \ - container_of(_dev, struct thermal_zone_device, device) - -static ssize_t -type_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - - return sprintf(buf, "%s\n", tz->type); -} - -static ssize_t -temp_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - long temperature; - int ret; - - ret = thermal_zone_get_temp(tz, &temperature); - - if (ret) - return ret; - - return sprintf(buf, "%ld\n", temperature); -} - -static ssize_t -mode_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - enum thermal_device_mode mode; - int result; - - if (!tz->ops->get_mode) - return -EPERM; - - result = tz->ops->get_mode(tz, &mode); - if (result) - return result; - - return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled" - : "disabled"); -} - -static ssize_t -mode_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int result; - - if (!tz->ops->set_mode) - return -EPERM; - - if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) - result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED); - else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) - result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED); - else - result = -EINVAL; - - if (result) - return result; - - return count; -} - -static ssize_t -trip_point_type_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - enum thermal_trip_type type; - int trip, result; - - if (!tz->ops->get_trip_type) - return -EPERM; - - if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip)) - return -EINVAL; - - result = tz->ops->get_trip_type(tz, trip, &type); - if (result) - return result; - - switch (type) { - case THERMAL_TRIP_CRITICAL: - return sprintf(buf, "critical\n"); - case THERMAL_TRIP_HOT: - return sprintf(buf, "hot\n"); - case THERMAL_TRIP_PASSIVE: - return sprintf(buf, "passive\n"); - case THERMAL_TRIP_ACTIVE: - return sprintf(buf, "active\n"); - default: - return sprintf(buf, "unknown\n"); - } -} - -static ssize_t -trip_point_temp_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip, ret; - unsigned long temperature; - - if (!tz->ops->set_trip_temp) - return -EPERM; - - if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) - return -EINVAL; - - if (kstrtoul(buf, 10, &temperature)) - return -EINVAL; - - ret = tz->ops->set_trip_temp(tz, trip, temperature); - - return ret ? ret : count; -} - -static ssize_t -trip_point_temp_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip, ret; - long temperature; - - if (!tz->ops->get_trip_temp) - return -EPERM; - - if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) - return -EINVAL; - - ret = tz->ops->get_trip_temp(tz, trip, &temperature); - - if (ret) - return ret; - - return sprintf(buf, "%ld\n", temperature); -} - -static ssize_t -trip_point_hyst_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip, ret; - unsigned long temperature; - - if (!tz->ops->set_trip_hyst) - return -EPERM; - - if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip)) - return -EINVAL; - - if (kstrtoul(buf, 10, &temperature)) - return -EINVAL; - - /* - * We are not doing any check on the 'temperature' value - * here. The driver implementing 'set_trip_hyst' has to - * take care of this. - */ - ret = tz->ops->set_trip_hyst(tz, trip, temperature); - - return ret ? ret : count; -} - -static ssize_t -trip_point_hyst_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip, ret; - unsigned long temperature; - - if (!tz->ops->get_trip_hyst) - return -EPERM; - - if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip)) - return -EINVAL; - - ret = tz->ops->get_trip_hyst(tz, trip, &temperature); - - return ret ? ret : sprintf(buf, "%ld\n", temperature); -} - -static ssize_t -passive_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_cooling_device *cdev = NULL; - int state; - - if (!sscanf(buf, "%d\n", &state)) - return -EINVAL; - - /* sanity check: values below 1000 millicelcius don't make sense - * and can cause the system to go into a thermal heart attack - */ - if (state && state < 1000) - return -EINVAL; - - if (state && !tz->forced_passive) { - mutex_lock(&thermal_list_lock); - list_for_each_entry(cdev, &thermal_cdev_list, node) { - if (!strncmp("Processor", cdev->type, - sizeof("Processor"))) - thermal_zone_bind_cooling_device(tz, - THERMAL_TRIPS_NONE, cdev, - THERMAL_NO_LIMIT, - THERMAL_NO_LIMIT); - } - mutex_unlock(&thermal_list_lock); - if (!tz->passive_delay) - tz->passive_delay = 1000; - } else if (!state && tz->forced_passive) { - mutex_lock(&thermal_list_lock); - list_for_each_entry(cdev, &thermal_cdev_list, node) { - if (!strncmp("Processor", cdev->type, - sizeof("Processor"))) - thermal_zone_unbind_cooling_device(tz, - THERMAL_TRIPS_NONE, - cdev); - } - mutex_unlock(&thermal_list_lock); - tz->passive_delay = 0; - } - - tz->forced_passive = state; - - thermal_zone_device_update(tz); - - return count; -} - -static ssize_t -passive_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - - return sprintf(buf, "%d\n", tz->forced_passive); -} - -static ssize_t -policy_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int ret = -EINVAL; - struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_governor *gov; - - mutex_lock(&thermal_governor_lock); - - gov = __find_governor(buf); - if (!gov) - goto exit; - - tz->governor = gov; - ret = count; - -exit: - mutex_unlock(&thermal_governor_lock); - return ret; -} - -static ssize_t -policy_show(struct device *dev, struct device_attribute *devattr, char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - - return sprintf(buf, "%s\n", tz->governor->name); -} - -#ifdef CONFIG_THERMAL_EMULATION -static ssize_t -emul_temp_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int ret = 0; - unsigned long temperature; - - if (kstrtoul(buf, 10, &temperature)) - return -EINVAL; - - if (!tz->ops->set_emul_temp) { - mutex_lock(&tz->lock); - tz->emul_temperature = temperature; - mutex_unlock(&tz->lock); - } else { - ret = tz->ops->set_emul_temp(tz, temperature); - } - - return ret ? ret : count; -} -static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store); -#endif/*CONFIG_THERMAL_EMULATION*/ - -static DEVICE_ATTR(type, 0444, type_show, NULL); -static DEVICE_ATTR(temp, 0444, temp_show, NULL); -static DEVICE_ATTR(mode, 0644, mode_show, mode_store); -static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store); -static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store); - -/* sys I/F for cooling device */ -#define to_cooling_device(_dev) \ - container_of(_dev, struct thermal_cooling_device, device) - -static ssize_t -thermal_cooling_device_type_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct thermal_cooling_device *cdev = to_cooling_device(dev); - - return sprintf(buf, "%s\n", cdev->type); -} - -static ssize_t -thermal_cooling_device_max_state_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct thermal_cooling_device *cdev = to_cooling_device(dev); - unsigned long state; - int ret; - - ret = cdev->ops->get_max_state(cdev, &state); - if (ret) - return ret; - return sprintf(buf, "%ld\n", state); -} - -static ssize_t -thermal_cooling_device_cur_state_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct thermal_cooling_device *cdev = to_cooling_device(dev); - unsigned long state; - int ret; - - ret = cdev->ops->get_cur_state(cdev, &state); - if (ret) - return ret; - return sprintf(buf, "%ld\n", state); -} - -static ssize_t -thermal_cooling_device_cur_state_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_cooling_device *cdev = to_cooling_device(dev); - unsigned long state; - int result; - - if (!sscanf(buf, "%ld\n", &state)) - return -EINVAL; - - if ((long)state < 0) - return -EINVAL; - - result = cdev->ops->set_cur_state(cdev, state); - if (result) - return result; - return count; -} - -static struct device_attribute dev_attr_cdev_type = -__ATTR(type, 0444, thermal_cooling_device_type_show, NULL); -static DEVICE_ATTR(max_state, 0444, - thermal_cooling_device_max_state_show, NULL); -static DEVICE_ATTR(cur_state, 0644, - thermal_cooling_device_cur_state_show, - thermal_cooling_device_cur_state_store); - -static ssize_t -thermal_cooling_device_trip_point_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct thermal_instance *instance; - - instance = - container_of(attr, struct thermal_instance, attr); - - if (instance->trip == THERMAL_TRIPS_NONE) - return sprintf(buf, "-1\n"); - else - return sprintf(buf, "%d\n", instance->trip); -} - -/* Device management */ - -#if defined(CONFIG_THERMAL_HWMON) - -/* hwmon sys I/F */ -#include - -/* thermal zone devices with the same type share one hwmon device */ -struct thermal_hwmon_device { - char type[THERMAL_NAME_LENGTH]; - struct device *device; - int count; - struct list_head tz_list; - struct list_head node; -}; - -struct thermal_hwmon_attr { - struct device_attribute attr; - char name[16]; -}; - -/* one temperature input for each thermal zone */ -struct thermal_hwmon_temp { - struct list_head hwmon_node; - struct thermal_zone_device *tz; - struct thermal_hwmon_attr temp_input; /* hwmon sys attr */ - struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ -}; - -static LIST_HEAD(thermal_hwmon_list); - -static ssize_t -name_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", hwmon->type); -} -static DEVICE_ATTR(name, 0444, name_show, NULL); - -static ssize_t -temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - long temperature; - int ret; - struct thermal_hwmon_attr *hwmon_attr - = container_of(attr, struct thermal_hwmon_attr, attr); - struct thermal_hwmon_temp *temp - = container_of(hwmon_attr, struct thermal_hwmon_temp, - temp_input); - struct thermal_zone_device *tz = temp->tz; - - ret = thermal_zone_get_temp(tz, &temperature); - - if (ret) - return ret; - - return sprintf(buf, "%ld\n", temperature); -} - -static ssize_t -temp_crit_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct thermal_hwmon_attr *hwmon_attr - = container_of(attr, struct thermal_hwmon_attr, attr); - struct thermal_hwmon_temp *temp - = container_of(hwmon_attr, struct thermal_hwmon_temp, - temp_crit); - struct thermal_zone_device *tz = temp->tz; - long temperature; - int ret; - - ret = tz->ops->get_trip_temp(tz, 0, &temperature); - if (ret) - return ret; - - return sprintf(buf, "%ld\n", temperature); -} - - -static struct thermal_hwmon_device * -thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz) -{ - struct thermal_hwmon_device *hwmon; - - mutex_lock(&thermal_list_lock); - list_for_each_entry(hwmon, &thermal_hwmon_list, node) - if (!strcmp(hwmon->type, tz->type)) { - mutex_unlock(&thermal_list_lock); - return hwmon; - } - mutex_unlock(&thermal_list_lock); - - return NULL; -} - -/* Find the temperature input matching a given thermal zone */ -static struct thermal_hwmon_temp * -thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, - const struct thermal_zone_device *tz) -{ - struct thermal_hwmon_temp *temp; - - mutex_lock(&thermal_list_lock); - list_for_each_entry(temp, &hwmon->tz_list, hwmon_node) - if (temp->tz == tz) { - mutex_unlock(&thermal_list_lock); - return temp; - } - mutex_unlock(&thermal_list_lock); - - return NULL; -} - -static int -thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) -{ - struct thermal_hwmon_device *hwmon; - struct thermal_hwmon_temp *temp; - int new_hwmon_device = 1; - int result; - - hwmon = thermal_hwmon_lookup_by_type(tz); - if (hwmon) { - new_hwmon_device = 0; - goto register_sys_interface; - } - - hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL); - if (!hwmon) - return -ENOMEM; - - INIT_LIST_HEAD(&hwmon->tz_list); - strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); - hwmon->device = hwmon_device_register(NULL); - if (IS_ERR(hwmon->device)) { - result = PTR_ERR(hwmon->device); - goto free_mem; - } - dev_set_drvdata(hwmon->device, hwmon); - result = device_create_file(hwmon->device, &dev_attr_name); - if (result) - goto free_mem; - - register_sys_interface: - temp = kzalloc(sizeof(struct thermal_hwmon_temp), GFP_KERNEL); - if (!temp) { - result = -ENOMEM; - goto unregister_name; - } - - temp->tz = tz; - hwmon->count++; - - snprintf(temp->temp_input.name, sizeof(temp->temp_input.name), - "temp%d_input", hwmon->count); - temp->temp_input.attr.attr.name = temp->temp_input.name; - temp->temp_input.attr.attr.mode = 0444; - temp->temp_input.attr.show = temp_input_show; - sysfs_attr_init(&temp->temp_input.attr.attr); - result = device_create_file(hwmon->device, &temp->temp_input.attr); - if (result) - goto free_temp_mem; - - if (tz->ops->get_crit_temp) { - unsigned long temperature; - if (!tz->ops->get_crit_temp(tz, &temperature)) { - snprintf(temp->temp_crit.name, - sizeof(temp->temp_crit.name), - "temp%d_crit", hwmon->count); - temp->temp_crit.attr.attr.name = temp->temp_crit.name; - temp->temp_crit.attr.attr.mode = 0444; - temp->temp_crit.attr.show = temp_crit_show; - sysfs_attr_init(&temp->temp_crit.attr.attr); - result = device_create_file(hwmon->device, - &temp->temp_crit.attr); - if (result) - goto unregister_input; - } - } - - mutex_lock(&thermal_list_lock); - if (new_hwmon_device) - list_add_tail(&hwmon->node, &thermal_hwmon_list); - list_add_tail(&temp->hwmon_node, &hwmon->tz_list); - mutex_unlock(&thermal_list_lock); - - return 0; - - unregister_input: - device_remove_file(hwmon->device, &temp->temp_input.attr); - free_temp_mem: - kfree(temp); - unregister_name: - if (new_hwmon_device) { - device_remove_file(hwmon->device, &dev_attr_name); - hwmon_device_unregister(hwmon->device); - } - free_mem: - if (new_hwmon_device) - kfree(hwmon); - - return result; -} - -static void -thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) -{ - struct thermal_hwmon_device *hwmon; - struct thermal_hwmon_temp *temp; - - hwmon = thermal_hwmon_lookup_by_type(tz); - if (unlikely(!hwmon)) { - /* Should never happen... */ - dev_dbg(&tz->device, "hwmon device lookup failed!\n"); - return; - } - - temp = thermal_hwmon_lookup_temp(hwmon, tz); - if (unlikely(!temp)) { - /* Should never happen... */ - dev_dbg(&tz->device, "temperature input lookup failed!\n"); - return; - } - - device_remove_file(hwmon->device, &temp->temp_input.attr); - if (tz->ops->get_crit_temp) - device_remove_file(hwmon->device, &temp->temp_crit.attr); - - mutex_lock(&thermal_list_lock); - list_del(&temp->hwmon_node); - kfree(temp); - if (!list_empty(&hwmon->tz_list)) { - mutex_unlock(&thermal_list_lock); - return; - } - list_del(&hwmon->node); - mutex_unlock(&thermal_list_lock); - - device_remove_file(hwmon->device, &dev_attr_name); - hwmon_device_unregister(hwmon->device); - kfree(hwmon); -} -#else -static int -thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) -{ - return 0; -} - -static void -thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) -{ -} -#endif - -/** - * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone - * @tz: thermal zone device - * @trip: indicates which trip point the cooling devices is - * associated with in this thermal zone. - * @cdev: thermal cooling device - * - * This function is usually called in the thermal zone device .bind callback. - */ -int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, - int trip, - struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower) -{ - struct thermal_instance *dev; - struct thermal_instance *pos; - struct thermal_zone_device *pos1; - struct thermal_cooling_device *pos2; - unsigned long max_state; - int result; - - if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) - return -EINVAL; - - list_for_each_entry(pos1, &thermal_tz_list, node) { - if (pos1 == tz) - break; - } - list_for_each_entry(pos2, &thermal_cdev_list, node) { - if (pos2 == cdev) - break; - } - - if (tz != pos1 || cdev != pos2) - return -EINVAL; - - cdev->ops->get_max_state(cdev, &max_state); - - /* lower default 0, upper default max_state */ - lower = lower == THERMAL_NO_LIMIT ? 0 : lower; - upper = upper == THERMAL_NO_LIMIT ? max_state : upper; - - if (lower > upper || upper > max_state) - return -EINVAL; - - dev = - kzalloc(sizeof(struct thermal_instance), GFP_KERNEL); - if (!dev) - return -ENOMEM; - dev->tz = tz; - dev->cdev = cdev; - dev->trip = trip; - dev->upper = upper; - dev->lower = lower; - dev->target = THERMAL_NO_TARGET; - - result = get_idr(&tz->idr, &tz->lock, &dev->id); - if (result) - goto free_mem; - - sprintf(dev->name, "cdev%d", dev->id); - result = - sysfs_create_link(&tz->device.kobj, &cdev->device.kobj, dev->name); - if (result) - goto release_idr; - - sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); - sysfs_attr_init(&dev->attr.attr); - dev->attr.attr.name = dev->attr_name; - dev->attr.attr.mode = 0444; - dev->attr.show = thermal_cooling_device_trip_point_show; - result = device_create_file(&tz->device, &dev->attr); - if (result) - goto remove_symbol_link; - - mutex_lock(&tz->lock); - mutex_lock(&cdev->lock); - list_for_each_entry(pos, &tz->thermal_instances, tz_node) - if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { - result = -EEXIST; - break; - } - if (!result) { - list_add_tail(&dev->tz_node, &tz->thermal_instances); - list_add_tail(&dev->cdev_node, &cdev->thermal_instances); - } - mutex_unlock(&cdev->lock); - mutex_unlock(&tz->lock); - - if (!result) - return 0; - - device_remove_file(&tz->device, &dev->attr); -remove_symbol_link: - sysfs_remove_link(&tz->device.kobj, dev->name); -release_idr: - release_idr(&tz->idr, &tz->lock, dev->id); -free_mem: - kfree(dev); - return result; -} -EXPORT_SYMBOL(thermal_zone_bind_cooling_device); - -/** - * thermal_zone_unbind_cooling_device - unbind a cooling device from a thermal zone - * @tz: thermal zone device - * @trip: indicates which trip point the cooling devices is - * associated with in this thermal zone. - * @cdev: thermal cooling device - * - * This function is usually called in the thermal zone device .unbind callback. - */ -int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, - int trip, - struct thermal_cooling_device *cdev) -{ - struct thermal_instance *pos, *next; - - mutex_lock(&tz->lock); - mutex_lock(&cdev->lock); - list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) { - if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { - list_del(&pos->tz_node); - list_del(&pos->cdev_node); - mutex_unlock(&cdev->lock); - mutex_unlock(&tz->lock); - goto unbind; - } - } - mutex_unlock(&cdev->lock); - mutex_unlock(&tz->lock); - - return -ENODEV; - -unbind: - device_remove_file(&tz->device, &pos->attr); - sysfs_remove_link(&tz->device.kobj, pos->name); - release_idr(&tz->idr, &tz->lock, pos->id); - kfree(pos); - return 0; -} -EXPORT_SYMBOL(thermal_zone_unbind_cooling_device); - -static void thermal_release(struct device *dev) -{ - struct thermal_zone_device *tz; - struct thermal_cooling_device *cdev; - - if (!strncmp(dev_name(dev), "thermal_zone", - sizeof("thermal_zone") - 1)) { - tz = to_thermal_zone(dev); - kfree(tz); - } else { - cdev = to_cooling_device(dev); - kfree(cdev); - } -} - -static struct class thermal_class = { - .name = "thermal", - .dev_release = thermal_release, -}; - -/** - * thermal_cooling_device_register - register a new thermal cooling device - * @type: the thermal cooling device type. - * @devdata: device private data. - * @ops: standard thermal cooling devices callbacks. - */ -struct thermal_cooling_device * -thermal_cooling_device_register(char *type, void *devdata, - const struct thermal_cooling_device_ops *ops) -{ - struct thermal_cooling_device *cdev; - int result; - - if (type && strlen(type) >= THERMAL_NAME_LENGTH) - return ERR_PTR(-EINVAL); - - if (!ops || !ops->get_max_state || !ops->get_cur_state || - !ops->set_cur_state) - return ERR_PTR(-EINVAL); - - cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL); - if (!cdev) - return ERR_PTR(-ENOMEM); - - result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id); - if (result) { - kfree(cdev); - return ERR_PTR(result); - } - - strcpy(cdev->type, type ? : ""); - mutex_init(&cdev->lock); - INIT_LIST_HEAD(&cdev->thermal_instances); - cdev->ops = ops; - cdev->updated = true; - cdev->device.class = &thermal_class; - cdev->devdata = devdata; - dev_set_name(&cdev->device, "cooling_device%d", cdev->id); - result = device_register(&cdev->device); - if (result) { - release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); - kfree(cdev); - return ERR_PTR(result); - } - - /* sys I/F */ - if (type) { - result = device_create_file(&cdev->device, &dev_attr_cdev_type); - if (result) - goto unregister; - } - - result = device_create_file(&cdev->device, &dev_attr_max_state); - if (result) - goto unregister; - - result = device_create_file(&cdev->device, &dev_attr_cur_state); - if (result) - goto unregister; - - /* Add 'this' new cdev to the global cdev list */ - mutex_lock(&thermal_list_lock); - list_add(&cdev->node, &thermal_cdev_list); - mutex_unlock(&thermal_list_lock); - - /* Update binding information for 'this' new cdev */ - bind_cdev(cdev); - - return cdev; - -unregister: - release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); - device_unregister(&cdev->device); - return ERR_PTR(result); -} -EXPORT_SYMBOL(thermal_cooling_device_register); - -/** - * thermal_cooling_device_unregister - removes the registered thermal cooling device - * @cdev: the thermal cooling device to remove. - * - * thermal_cooling_device_unregister() must be called when the device is no - * longer needed. - */ -void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) -{ - int i; - const struct thermal_zone_params *tzp; - struct thermal_zone_device *tz; - struct thermal_cooling_device *pos = NULL; - - if (!cdev) - return; - - mutex_lock(&thermal_list_lock); - list_for_each_entry(pos, &thermal_cdev_list, node) - if (pos == cdev) - break; - if (pos != cdev) { - /* thermal cooling device not found */ - mutex_unlock(&thermal_list_lock); - return; - } - list_del(&cdev->node); - - /* Unbind all thermal zones associated with 'this' cdev */ - list_for_each_entry(tz, &thermal_tz_list, node) { - if (tz->ops->unbind) { - tz->ops->unbind(tz, cdev); - continue; - } - - if (!tz->tzp || !tz->tzp->tbp) - continue; - - tzp = tz->tzp; - for (i = 0; i < tzp->num_tbps; i++) { - if (tzp->tbp[i].cdev == cdev) { - __unbind(tz, tzp->tbp[i].trip_mask, cdev); - tzp->tbp[i].cdev = NULL; - } - } - } - - mutex_unlock(&thermal_list_lock); - - if (cdev->type[0]) - device_remove_file(&cdev->device, &dev_attr_cdev_type); - device_remove_file(&cdev->device, &dev_attr_max_state); - device_remove_file(&cdev->device, &dev_attr_cur_state); - - release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); - device_unregister(&cdev->device); - return; -} -EXPORT_SYMBOL(thermal_cooling_device_unregister); - -void thermal_cdev_update(struct thermal_cooling_device *cdev) -{ - struct thermal_instance *instance; - unsigned long target = 0; - - /* cooling device is updated*/ - if (cdev->updated) - return; - - mutex_lock(&cdev->lock); - /* Make sure cdev enters the deepest cooling state */ - list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { - if (instance->target == THERMAL_NO_TARGET) - continue; - if (instance->target > target) - target = instance->target; - } - mutex_unlock(&cdev->lock); - cdev->ops->set_cur_state(cdev, target); - cdev->updated = true; -} -EXPORT_SYMBOL(thermal_cdev_update); - -/** - * notify_thermal_framework - Sensor drivers use this API to notify framework - * @tz: thermal zone device - * @trip: indicates which trip point has been crossed - * - * This function handles the trip events from sensor drivers. It starts - * throttling the cooling devices according to the policy configured. - * For CRITICAL and HOT trip points, this notifies the respective drivers, - * and does actual throttling for other trip points i.e ACTIVE and PASSIVE. - * The throttling policy is based on the configured platform data; if no - * platform data is provided, this uses the step_wise throttling policy. - */ -void notify_thermal_framework(struct thermal_zone_device *tz, int trip) -{ - handle_thermal_trip(tz, trip); -} -EXPORT_SYMBOL(notify_thermal_framework); - -/** - * create_trip_attrs - create attributes for trip points - * @tz: the thermal zone device - * @mask: Writeable trip point bitmap. - */ -static int create_trip_attrs(struct thermal_zone_device *tz, int mask) -{ - int indx; - int size = sizeof(struct thermal_attr) * tz->trips; - - tz->trip_type_attrs = kzalloc(size, GFP_KERNEL); - if (!tz->trip_type_attrs) - return -ENOMEM; - - tz->trip_temp_attrs = kzalloc(size, GFP_KERNEL); - if (!tz->trip_temp_attrs) { - kfree(tz->trip_type_attrs); - return -ENOMEM; - } - - if (tz->ops->get_trip_hyst) { - tz->trip_hyst_attrs = kzalloc(size, GFP_KERNEL); - if (!tz->trip_hyst_attrs) { - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - return -ENOMEM; - } - } - - - for (indx = 0; indx < tz->trips; indx++) { - /* create trip type attribute */ - snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_type", indx); - - sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr); - tz->trip_type_attrs[indx].attr.attr.name = - tz->trip_type_attrs[indx].name; - tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_type_attrs[indx].attr.show = trip_point_type_show; - - device_create_file(&tz->device, - &tz->trip_type_attrs[indx].attr); - - /* create trip temp attribute */ - snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_temp", indx); - - sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr); - tz->trip_temp_attrs[indx].attr.attr.name = - tz->trip_temp_attrs[indx].name; - tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; - if (mask & (1 << indx)) { - tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; - tz->trip_temp_attrs[indx].attr.store = - trip_point_temp_store; - } - - device_create_file(&tz->device, - &tz->trip_temp_attrs[indx].attr); - - /* create Optional trip hyst attribute */ - if (!tz->ops->get_trip_hyst) - continue; - snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_hyst", indx); - - sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr); - tz->trip_hyst_attrs[indx].attr.attr.name = - tz->trip_hyst_attrs[indx].name; - tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show; - if (tz->ops->set_trip_hyst) { - tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR; - tz->trip_hyst_attrs[indx].attr.store = - trip_point_hyst_store; - } - - device_create_file(&tz->device, - &tz->trip_hyst_attrs[indx].attr); - } - return 0; -} - -static void remove_trip_attrs(struct thermal_zone_device *tz) -{ - int indx; - - for (indx = 0; indx < tz->trips; indx++) { - device_remove_file(&tz->device, - &tz->trip_type_attrs[indx].attr); - device_remove_file(&tz->device, - &tz->trip_temp_attrs[indx].attr); - if (tz->ops->get_trip_hyst) - device_remove_file(&tz->device, - &tz->trip_hyst_attrs[indx].attr); - } - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - kfree(tz->trip_hyst_attrs); -} - -/** - * thermal_zone_device_register - register a new thermal zone device - * @type: the thermal zone device type - * @trips: the number of trip points the thermal zone support - * @mask: a bit string indicating the writeablility of trip points - * @devdata: private device data - * @ops: standard thermal zone device callbacks - * @tzp: thermal zone platform parameters - * @passive_delay: number of milliseconds to wait between polls when - * performing passive cooling - * @polling_delay: number of milliseconds to wait between polls when checking - * whether trip points have been crossed (0 for interrupt - * driven systems) - * - * thermal_zone_device_unregister() must be called when the device is no - * longer needed. The passive cooling depends on the .get_trend() return value. - */ -struct thermal_zone_device *thermal_zone_device_register(const char *type, - int trips, int mask, void *devdata, - const struct thermal_zone_device_ops *ops, - const struct thermal_zone_params *tzp, - int passive_delay, int polling_delay) -{ - struct thermal_zone_device *tz; - enum thermal_trip_type trip_type; - int result; - int count; - int passive = 0; - - if (type && strlen(type) >= THERMAL_NAME_LENGTH) - return ERR_PTR(-EINVAL); - - if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips) - return ERR_PTR(-EINVAL); - - if (!ops || !ops->get_temp) - return ERR_PTR(-EINVAL); - - if (trips > 0 && !ops->get_trip_type) - return ERR_PTR(-EINVAL); - - tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL); - if (!tz) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&tz->thermal_instances); - idr_init(&tz->idr); - mutex_init(&tz->lock); - result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id); - if (result) { - kfree(tz); - return ERR_PTR(result); - } - - strcpy(tz->type, type ? : ""); - tz->ops = ops; - tz->tzp = tzp; - tz->device.class = &thermal_class; - tz->devdata = devdata; - tz->trips = trips; - tz->passive_delay = passive_delay; - tz->polling_delay = polling_delay; - - dev_set_name(&tz->device, "thermal_zone%d", tz->id); - result = device_register(&tz->device); - if (result) { - release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); - kfree(tz); - return ERR_PTR(result); - } - - /* sys I/F */ - if (type) { - result = device_create_file(&tz->device, &dev_attr_type); - if (result) - goto unregister; - } - - result = device_create_file(&tz->device, &dev_attr_temp); - if (result) - goto unregister; - - if (ops->get_mode) { - result = device_create_file(&tz->device, &dev_attr_mode); - if (result) - goto unregister; - } - - result = create_trip_attrs(tz, mask); - if (result) - goto unregister; - - for (count = 0; count < trips; count++) { - tz->ops->get_trip_type(tz, count, &trip_type); - if (trip_type == THERMAL_TRIP_PASSIVE) - passive = 1; - } - - if (!passive) { - result = device_create_file(&tz->device, &dev_attr_passive); - if (result) - goto unregister; - } - -#ifdef CONFIG_THERMAL_EMULATION - result = device_create_file(&tz->device, &dev_attr_emul_temp); - if (result) - goto unregister; -#endif - /* Create policy attribute */ - result = device_create_file(&tz->device, &dev_attr_policy); - if (result) - goto unregister; - - /* Update 'this' zone's governor information */ - mutex_lock(&thermal_governor_lock); - - if (tz->tzp) - tz->governor = __find_governor(tz->tzp->governor_name); - else - tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR); - - mutex_unlock(&thermal_governor_lock); - - result = thermal_add_hwmon_sysfs(tz); - if (result) - goto unregister; - - mutex_lock(&thermal_list_lock); - list_add_tail(&tz->node, &thermal_tz_list); - mutex_unlock(&thermal_list_lock); - - /* Bind cooling devices for this zone */ - bind_tz(tz); - - INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check); - - thermal_zone_device_update(tz); - - if (!result) - return tz; - -unregister: - release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); - device_unregister(&tz->device); - return ERR_PTR(result); -} -EXPORT_SYMBOL(thermal_zone_device_register); - -/** - * thermal_device_unregister - removes the registered thermal zone device - * @tz: the thermal zone device to remove - */ -void thermal_zone_device_unregister(struct thermal_zone_device *tz) -{ - int i; - const struct thermal_zone_params *tzp; - struct thermal_cooling_device *cdev; - struct thermal_zone_device *pos = NULL; - - if (!tz) - return; - - tzp = tz->tzp; - - mutex_lock(&thermal_list_lock); - list_for_each_entry(pos, &thermal_tz_list, node) - if (pos == tz) - break; - if (pos != tz) { - /* thermal zone device not found */ - mutex_unlock(&thermal_list_lock); - return; - } - list_del(&tz->node); - - /* Unbind all cdevs associated with 'this' thermal zone */ - list_for_each_entry(cdev, &thermal_cdev_list, node) { - if (tz->ops->unbind) { - tz->ops->unbind(tz, cdev); - continue; - } - - if (!tzp || !tzp->tbp) - break; - - for (i = 0; i < tzp->num_tbps; i++) { - if (tzp->tbp[i].cdev == cdev) { - __unbind(tz, tzp->tbp[i].trip_mask, cdev); - tzp->tbp[i].cdev = NULL; - } - } - } - - mutex_unlock(&thermal_list_lock); - - thermal_zone_device_set_polling(tz, 0); - - if (tz->type[0]) - device_remove_file(&tz->device, &dev_attr_type); - device_remove_file(&tz->device, &dev_attr_temp); - if (tz->ops->get_mode) - device_remove_file(&tz->device, &dev_attr_mode); - device_remove_file(&tz->device, &dev_attr_policy); - remove_trip_attrs(tz); - tz->governor = NULL; - - thermal_remove_hwmon_sysfs(tz); - release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); - idr_destroy(&tz->idr); - mutex_destroy(&tz->lock); - device_unregister(&tz->device); - return; -} -EXPORT_SYMBOL(thermal_zone_device_unregister); - -#ifdef CONFIG_NET -static struct genl_family thermal_event_genl_family = { - .id = GENL_ID_GENERATE, - .name = THERMAL_GENL_FAMILY_NAME, - .version = THERMAL_GENL_VERSION, - .maxattr = THERMAL_GENL_ATTR_MAX, -}; - -static struct genl_multicast_group thermal_event_mcgrp = { - .name = THERMAL_GENL_MCAST_GROUP_NAME, -}; - -int thermal_generate_netlink_event(struct thermal_zone_device *tz, - enum events event) -{ - struct sk_buff *skb; - struct nlattr *attr; - struct thermal_genl_event *thermal_event; - void *msg_header; - int size; - int result; - static unsigned int thermal_event_seqnum; - - if (!tz) - return -EINVAL; - - /* allocate memory */ - size = nla_total_size(sizeof(struct thermal_genl_event)) + - nla_total_size(0); - - skb = genlmsg_new(size, GFP_ATOMIC); - if (!skb) - return -ENOMEM; - - /* add the genetlink message header */ - msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++, - &thermal_event_genl_family, 0, - THERMAL_GENL_CMD_EVENT); - if (!msg_header) { - nlmsg_free(skb); - return -ENOMEM; - } - - /* fill the data */ - attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, - sizeof(struct thermal_genl_event)); - - if (!attr) { - nlmsg_free(skb); - return -EINVAL; - } - - thermal_event = nla_data(attr); - if (!thermal_event) { - nlmsg_free(skb); - return -EINVAL; - } - - memset(thermal_event, 0, sizeof(struct thermal_genl_event)); - - thermal_event->orig = tz->id; - thermal_event->event = event; - - /* send multicast genetlink message */ - result = genlmsg_end(skb, msg_header); - if (result < 0) { - nlmsg_free(skb); - return result; - } - - result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); - if (result) - dev_err(&tz->device, "Failed to send netlink event:%d", result); - - return result; -} -EXPORT_SYMBOL(thermal_generate_netlink_event); - -static int genetlink_init(void) -{ - int result; - - result = genl_register_family(&thermal_event_genl_family); - if (result) - return result; - - result = genl_register_mc_group(&thermal_event_genl_family, - &thermal_event_mcgrp); - if (result) - genl_unregister_family(&thermal_event_genl_family); - return result; -} - -static void genetlink_exit(void) -{ - genl_unregister_family(&thermal_event_genl_family); -} -#else /* !CONFIG_NET */ -static inline int genetlink_init(void) { return 0; } -static inline void genetlink_exit(void) {} -#endif /* !CONFIG_NET */ - -static int __init thermal_init(void) -{ - int result = 0; - - result = class_register(&thermal_class); - if (result) { - idr_destroy(&thermal_tz_idr); - idr_destroy(&thermal_cdev_idr); - mutex_destroy(&thermal_idr_lock); - mutex_destroy(&thermal_list_lock); - return result; - } - result = genetlink_init(); - return result; -} - -static void __exit thermal_exit(void) -{ - class_unregister(&thermal_class); - idr_destroy(&thermal_tz_idr); - idr_destroy(&thermal_cdev_idr); - mutex_destroy(&thermal_idr_lock); - mutex_destroy(&thermal_list_lock); - genetlink_exit(); -} - -fs_initcall(thermal_init); -module_exit(thermal_exit); diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c index 6bbb380..10adcdd 100644 --- a/drivers/thermal/user_space.c +++ b/drivers/thermal/user_space.c @@ -22,9 +22,6 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include #include #include "thermal_core.h" @@ -46,23 +43,15 @@ static int notify_user_space(struct thermal_zone_device *tz, int trip) static struct thermal_governor thermal_gov_user_space = { .name = "user_space", .throttle = notify_user_space, - .owner = THIS_MODULE, }; -static int __init thermal_gov_user_space_init(void) +int thermal_gov_user_space_register(void) { return thermal_register_governor(&thermal_gov_user_space); } -static void __exit thermal_gov_user_space_exit(void) +void thermal_gov_user_space_unregister(void) { thermal_unregister_governor(&thermal_gov_user_space); } -/* This should load after thermal framework */ -fs_initcall(thermal_gov_user_space_init); -module_exit(thermal_gov_user_space_exit); - -MODULE_AUTHOR("Durgadoss R"); -MODULE_DESCRIPTION("A user space Thermal notifier"); -MODULE_LICENSE("GPL"); diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c index 1d215cd..b083a35 100644 --- a/drivers/usb/host/ehci-tilegx.c +++ b/drivers/usb/host/ehci-tilegx.c @@ -118,8 +118,10 @@ static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev) hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev, dev_name(&pdev->dev)); - if (!hcd) - return -ENOMEM; + if (!hcd) { + ret = -ENOMEM; + goto err_hcd; + } /* * We don't use rsrc_start to map in our registers, but seems like @@ -176,6 +178,7 @@ err_have_irq: err_no_irq: tilegx_stop_ehc(); usb_put_hcd(hcd); +err_hcd: gxio_usb_host_destroy(&pdata->usb_ctx); return ret; } diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c index 1ae7b28..ea73009 100644 --- a/drivers/usb/host/ohci-tilegx.c +++ b/drivers/usb/host/ohci-tilegx.c @@ -112,8 +112,10 @@ static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev) hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev, dev_name(&pdev->dev)); - if (!hcd) - return -ENOMEM; + if (!hcd) { + ret = -ENOMEM; + goto err_hcd; + } /* * We don't use rsrc_start to map in our registers, but seems like @@ -165,6 +167,7 @@ err_have_irq: err_no_irq: tilegx_stop_ohc(); usb_put_hcd(hcd); +err_hcd: gxio_usb_host_destroy(&pdata->usb_ctx); return ret; } diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index aab2ab2..371d0e7 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -128,7 +128,7 @@ config TWL6030_USB config USB_GPIO_VBUS tristate "GPIO based peripheral-only VBUS sensing 'transceiver'" - depends on GENERIC_GPIO + depends on GPIOLIB help Provides simple GPIO VBUS sensing for controllers with an internal transceiver via the usb_phy interface, and diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index c04ccdf..d71d60f 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -2429,7 +2429,7 @@ config FB_MXS select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT select FB_MODE_HELPERS - select OF_VIDEOMODE + select VIDEOMODE_HELPERS help Framebuffer support for the MXS SoC. @@ -2483,7 +2483,7 @@ config FB_SSD1307 tristate "Solomon SSD1307 framebuffer support" depends on FB && I2C depends on OF - depends on GENERIC_GPIO + depends on GPIOLIB select FB_SYS_FOPS select FB_SYS_FILLRECT select FB_SYS_COPYAREA diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 2e166c3..d5ab658 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -36,14 +36,14 @@ config LCD_CORGI config LCD_L4F00242T03 tristate "Epson L4F00242T03 LCD" - depends on SPI_MASTER && GENERIC_GPIO + depends on SPI_MASTER && GPIOLIB help SPI driver for Epson L4F00242T03. This provides basic support for init and powering the LCD up/down through a sysfs interface. config LCD_LMS283GF05 tristate "Samsung LMS283GF05 LCD" - depends on SPI_MASTER && GENERIC_GPIO + depends on SPI_MASTER && GPIOLIB help SPI driver for Samsung LMS283GF05. This provides basic support for powering the LCD up/down through a sysfs interface. diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 1b2c26d..21223d4 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -42,7 +42,6 @@ #include #include #include -#include