summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/sleep.c27
-rw-r--r--drivers/block/zram/Kconfig35
-rw-r--r--drivers/block/zram/Makefile4
-rw-r--r--drivers/block/zram/zcomp.c401
-rw-r--r--drivers/block/zram/zcomp.h48
-rw-r--r--drivers/block/zram/zcomp_lz4.c66
-rw-r--r--drivers/block/zram/zcomp_lz4.h17
-rw-r--r--drivers/block/zram/zcomp_lzo.c66
-rw-r--r--drivers/block/zram/zcomp_lzo.h17
-rw-r--r--drivers/block/zram/zram_drv.c1393
-rw-r--r--drivers/block/zram/zram_drv.h46
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h1
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c29
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/i2c/busses/i2c-imx.c8
-rw-r--r--drivers/isdn/i4l/isdn_common.c8
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c21
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/hamradio/bpqether.c8
-rw-r--r--drivers/net/ieee802154/at86rf230.c15
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/smsc75xx.c62
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c9
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c5
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/scsi/xen-scsifront.c33
-rw-r--r--drivers/staging/android/ion/ion.c3
-rw-r--r--drivers/tty/serial/8250/8250_dw.c2
-rw-r--r--drivers/usb/dwc2/gadget.c7
-rw-r--r--drivers/usb/dwc2/hcd_intr.c3
-rw-r--r--drivers/usb/gadget/composite.c3
-rw-r--r--drivers/usb/host/xhci.c7
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/sierra.c4
54 files changed, 1445 insertions, 1045 deletions
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index e3322adaaae0..4f07029de209 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -124,6 +124,12 @@ void __init acpi_nvs_nosave_s3(void)
nvs_nosave_s3 = true;
}
+static int __init init_nvs_save_s3(const struct dmi_system_id *d)
+{
+ nvs_nosave_s3 = false;
+ return 0;
+}
+
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
@@ -318,6 +324,27 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
},
},
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Asus 1025C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
+ },
+ },
+ /*
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189431
+ * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
+ * saving during S3.
+ */
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Lenovo G50-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
+ },
+ },
{},
};
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 386ba3d1a6ee..cb53957d58f9 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -1,8 +1,7 @@
config ZRAM
tristate "Compressed RAM block device support"
- depends on BLOCK && SYSFS && ZSMALLOC
- select LZO_COMPRESS
- select LZO_DECOMPRESS
+ depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
+ select CRYPTO_LZO
default n
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
@@ -13,14 +12,26 @@ config ZRAM
It has several use cases, for example: /tmp storage, use as swap
disks and maybe many more.
- See zram.txt for more information.
+ See Documentation/blockdev/zram.txt for more information.
-config ZRAM_LZ4_COMPRESS
- bool "Enable LZ4 algorithm support"
- depends on ZRAM
- select LZ4_COMPRESS
- select LZ4_DECOMPRESS
- default n
+config ZRAM_WRITEBACK
+ bool "Write back incompressible page to backing device"
+ depends on ZRAM
+ default n
+ help
+ With incompressible page, there is no memory saving to keep it
+ in memory. Instead, write it out to backing device.
+ For this feature, admin should set up backing device via
+ /sys/block/zramX/backing_dev.
+
+ See Documentation/blockdev/zram.txt for more information.
+
+config ZRAM_MEMORY_TRACKING
+ bool "Track zRam block status"
+ depends on ZRAM && DEBUG_FS
help
- This option enables LZ4 compression algorithm support. Compression
- algorithm can be changed using `comp_algorithm' device attribute. \ No newline at end of file
+ With this feature, admin can track the state of allocated blocks
+ of zRAM. Admin could see the information via
+ /sys/kernel/debug/zram/zramX/block_state.
+
+ See Documentation/blockdev/zram.txt for more information.
diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile
index be0763ff57a2..9e2b79e9a990 100644
--- a/drivers/block/zram/Makefile
+++ b/drivers/block/zram/Makefile
@@ -1,5 +1,3 @@
-zram-y := zcomp_lzo.o zcomp.o zram_drv.o
-
-zram-$(CONFIG_ZRAM_LZ4_COMPRESS) += zcomp_lz4.o
+zram-y := zcomp.o zram_drv.o
obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index c53617752b93..c084a7f9763d 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -13,315 +13,233 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/cpu.h>
+#include <linux/crypto.h>
#include "zcomp.h"
-#include "zcomp_lzo.h"
-#ifdef CONFIG_ZRAM_LZ4_COMPRESS
-#include "zcomp_lz4.h"
-#endif
-
-/*
- * single zcomp_strm backend
- */
-struct zcomp_strm_single {
- struct mutex strm_lock;
- struct zcomp_strm *zstrm;
-};
-
-/*
- * multi zcomp_strm backend
- */
-struct zcomp_strm_multi {
- /* protect strm list */
- spinlock_t strm_lock;
- /* max possible number of zstrm streams */
- int max_strm;
- /* number of available zstrm streams */
- int avail_strm;
- /* list of available strms */
- struct list_head idle_strm;
- wait_queue_head_t strm_wait;
-};
-static struct zcomp_backend *backends[] = {
- &zcomp_lzo,
-#ifdef CONFIG_ZRAM_LZ4_COMPRESS
- &zcomp_lz4,
+static const char * const backends[] = {
+ "lzo",
+#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
+ "lz4",
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_DEFLATE)
+ "deflate",
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
+ "lz4hc",
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_842)
+ "842",
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
+ "zstd",
#endif
NULL
};
-static struct zcomp_backend *find_backend(const char *compress)
+static void zcomp_strm_free(struct zcomp_strm *zstrm)
{
- int i = 0;
- while (backends[i]) {
- if (sysfs_streq(compress, backends[i]->name))
- break;
- i++;
- }
- return backends[i];
-}
-
-static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
-{
- if (zstrm->private)
- comp->backend->destroy(zstrm->private);
+ if (!IS_ERR_OR_NULL(zstrm->tfm))
+ crypto_free_comp(zstrm->tfm);
free_pages((unsigned long)zstrm->buffer, 1);
kfree(zstrm);
}
/*
- * allocate new zcomp_strm structure with ->private initialized by
+ * allocate new zcomp_strm structure with ->tfm initialized by
* backend, return NULL on error
*/
static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
{
- struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_NOIO);
+ struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
if (!zstrm)
return NULL;
- zstrm->private = comp->backend->create();
+ zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
- zstrm->buffer = (void *)__get_free_pages(GFP_NOIO | __GFP_ZERO, 1);
- if (!zstrm->private || !zstrm->buffer) {
- zcomp_strm_free(comp, zstrm);
+ zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
+ zcomp_strm_free(zstrm);
zstrm = NULL;
}
return zstrm;
}
-/*
- * get idle zcomp_strm or wait until other process release
- * (zcomp_strm_release()) one for us
- */
-static struct zcomp_strm *zcomp_strm_multi_find(struct zcomp *comp)
+bool zcomp_available_algorithm(const char *comp)
{
- struct zcomp_strm_multi *zs = comp->stream;
- struct zcomp_strm *zstrm;
-
- while (1) {
- spin_lock(&zs->strm_lock);
- if (!list_empty(&zs->idle_strm)) {
- zstrm = list_entry(zs->idle_strm.next,
- struct zcomp_strm, list);
- list_del(&zstrm->list);
- spin_unlock(&zs->strm_lock);
- return zstrm;
- }
- /* zstrm streams limit reached, wait for idle stream */
- if (zs->avail_strm >= zs->max_strm) {
- spin_unlock(&zs->strm_lock);
- wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
- continue;
- }
- /* allocate new zstrm stream */
- zs->avail_strm++;
- spin_unlock(&zs->strm_lock);
-
- zstrm = zcomp_strm_alloc(comp);
- if (!zstrm) {
- spin_lock(&zs->strm_lock);
- zs->avail_strm--;
- spin_unlock(&zs->strm_lock);
- wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
- continue;
- }
- break;
- }
- return zstrm;
-}
+ int i = 0;
-/* add stream back to idle list and wake up waiter or free the stream */
-static void zcomp_strm_multi_release(struct zcomp *comp, struct zcomp_strm *zstrm)
-{
- struct zcomp_strm_multi *zs = comp->stream;
-
- spin_lock(&zs->strm_lock);
- if (zs->avail_strm <= zs->max_strm) {
- list_add(&zstrm->list, &zs->idle_strm);
- spin_unlock(&zs->strm_lock);
- wake_up(&zs->strm_wait);
- return;
+ while (backends[i]) {
+ if (sysfs_streq(comp, backends[i]))
+ return true;
+ i++;
}
- zs->avail_strm--;
- spin_unlock(&zs->strm_lock);
- zcomp_strm_free(comp, zstrm);
-}
-
-/* change max_strm limit */
-static bool zcomp_strm_multi_set_max_streams(struct zcomp *comp, int num_strm)
-{
- struct zcomp_strm_multi *zs = comp->stream;
- struct zcomp_strm *zstrm;
-
- spin_lock(&zs->strm_lock);
- zs->max_strm = num_strm;
/*
- * if user has lowered the limit and there are idle streams,
- * immediately free as much streams (and memory) as we can.
+ * Crypto does not ignore a trailing new line symbol,
+ * so make sure you don't supply a string containing
+ * one.
+ * This also means that we permit zcomp initialisation
+ * with any compressing algorithm known to crypto api.
*/
- while (zs->avail_strm > num_strm && !list_empty(&zs->idle_strm)) {
- zstrm = list_entry(zs->idle_strm.next,
- struct zcomp_strm, list);
- list_del(&zstrm->list);
- zcomp_strm_free(comp, zstrm);
- zs->avail_strm--;
- }
- spin_unlock(&zs->strm_lock);
- return true;
+ return crypto_has_comp(comp, 0, 0) == 1;
}
-static void zcomp_strm_multi_destroy(struct zcomp *comp)
+/* show available compressors */
+ssize_t zcomp_available_show(const char *comp, char *buf)
{
- struct zcomp_strm_multi *zs = comp->stream;
- struct zcomp_strm *zstrm;
+ bool known_algorithm = false;
+ ssize_t sz = 0;
+ int i = 0;
- while (!list_empty(&zs->idle_strm)) {
- zstrm = list_entry(zs->idle_strm.next,
- struct zcomp_strm, list);
- list_del(&zstrm->list);
- zcomp_strm_free(comp, zstrm);
+ for (; backends[i]; i++) {
+ if (!strcmp(comp, backends[i])) {
+ known_algorithm = true;
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
+ "[%s] ", backends[i]);
+ } else {
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
+ "%s ", backends[i]);
+ }
}
- kfree(zs);
-}
-static int zcomp_strm_multi_create(struct zcomp *comp, int max_strm)
-{
- struct zcomp_strm *zstrm;
- struct zcomp_strm_multi *zs;
-
- comp->destroy = zcomp_strm_multi_destroy;
- comp->strm_find = zcomp_strm_multi_find;
- comp->strm_release = zcomp_strm_multi_release;
- comp->set_max_streams = zcomp_strm_multi_set_max_streams;
- zs = kmalloc(sizeof(struct zcomp_strm_multi), GFP_KERNEL);
- if (!zs)
- return -ENOMEM;
-
- comp->stream = zs;
- spin_lock_init(&zs->strm_lock);
- INIT_LIST_HEAD(&zs->idle_strm);
- init_waitqueue_head(&zs->strm_wait);
- zs->max_strm = max_strm;
- zs->avail_strm = 1;
+ /*
+ * Out-of-tree module known to crypto api or a missing
+ * entry in `backends'.
+ */
+ if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
+ "[%s] ", comp);
- zstrm = zcomp_strm_alloc(comp);
- if (!zstrm) {
- kfree(zs);
- return -ENOMEM;
- }
- list_add(&zstrm->list, &zs->idle_strm);
- return 0;
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
+ return sz;
}
-static struct zcomp_strm *zcomp_strm_single_find(struct zcomp *comp)
+struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
- struct zcomp_strm_single *zs = comp->stream;
- mutex_lock(&zs->strm_lock);
- return zs->zstrm;
+ return *get_cpu_ptr(comp->stream);
}
-static void zcomp_strm_single_release(struct zcomp *comp,
- struct zcomp_strm *zstrm)
+void zcomp_stream_put(struct zcomp *comp)
{
- struct zcomp_strm_single *zs = comp->stream;
- mutex_unlock(&zs->strm_lock);
+ put_cpu_ptr(comp->stream);
}
-static bool zcomp_strm_single_set_max_streams(struct zcomp *comp, int num_strm)
+int zcomp_compress(struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len)
{
- /* zcomp_strm_single support only max_comp_streams == 1 */
- return false;
-}
+ /*
+ * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
+ * because sometimes we can endup having a bigger compressed data
+ * due to various reasons: for example compression algorithms tend
+ * to add some padding to the compressed buffer. Speaking of padding,
+ * comp algorithm `842' pads the compressed length to multiple of 8
+ * and returns -ENOSP when the dst memory is not big enough, which
+ * is not something that ZRAM wants to see. We can handle the
+ * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
+ * receive -ERRNO from the compressing backend we can't help it
+ * anymore. To make `842' happy we need to tell the exact size of
+ * the dst buffer, zram_drv will take care of the fact that
+ * compressed buffer is too big.
+ */
+ *dst_len = PAGE_SIZE * 2;
-static void zcomp_strm_single_destroy(struct zcomp *comp)
-{
- struct zcomp_strm_single *zs = comp->stream;
- zcomp_strm_free(comp, zs->zstrm);
- kfree(zs);
+ return crypto_comp_compress(zstrm->tfm,
+ src, PAGE_SIZE,
+ zstrm->buffer, dst_len);
}
-static int zcomp_strm_single_create(struct zcomp *comp)
+int zcomp_decompress(struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst)
{
- struct zcomp_strm_single *zs;
-
- comp->destroy = zcomp_strm_single_destroy;
- comp->strm_find = zcomp_strm_single_find;
- comp->strm_release = zcomp_strm_single_release;
- comp->set_max_streams = zcomp_strm_single_set_max_streams;
- zs = kmalloc(sizeof(struct zcomp_strm_single), GFP_KERNEL);
- if (!zs)
- return -ENOMEM;
+ unsigned int dst_len = PAGE_SIZE;
- comp->stream = zs;
- mutex_init(&zs->strm_lock);
- zs->zstrm = zcomp_strm_alloc(comp);
- if (!zs->zstrm) {
- kfree(zs);
- return -ENOMEM;
- }
- return 0;
+ return crypto_comp_decompress(zstrm->tfm,
+ src, src_len,
+ dst, &dst_len);
}
-/* show available compressors */
-ssize_t zcomp_available_show(const char *comp, char *buf)
+static int __zcomp_cpu_notifier(struct zcomp *comp,
+ unsigned long action, unsigned long cpu)
{
- ssize_t sz = 0;
- int i = 0;
+ struct zcomp_strm *zstrm;
- while (backends[i]) {
- if (!strcmp(comp, backends[i]->name))
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", backends[i]->name);
- else
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "%s ", backends[i]->name);
- i++;
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
+ break;
+ zstrm = zcomp_strm_alloc(comp);
+ if (IS_ERR_OR_NULL(zstrm)) {
+ pr_err("Can't allocate a compression stream\n");
+ return NOTIFY_BAD;
+ }
+ *per_cpu_ptr(comp->stream, cpu) = zstrm;
+ break;
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ zstrm = *per_cpu_ptr(comp->stream, cpu);
+ if (!IS_ERR_OR_NULL(zstrm))
+ zcomp_strm_free(zstrm);
+ *per_cpu_ptr(comp->stream, cpu) = NULL;
+ break;
+ default:
+ break;
}
- sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
- return sz;
+ return NOTIFY_OK;
}
-bool zcomp_available_algorithm(const char *comp)
+static int zcomp_cpu_notifier(struct notifier_block *nb,
+ unsigned long action, void *pcpu)
{
- return find_backend(comp) != NULL;
-}
+ unsigned long cpu = (unsigned long)pcpu;
+ struct zcomp *comp = container_of(nb, typeof(*comp), notifier);
-bool zcomp_set_max_streams(struct zcomp *comp, int num_strm)
-{
- return comp->set_max_streams(comp, num_strm);
+ return __zcomp_cpu_notifier(comp, action, cpu);
}
-struct zcomp_strm *zcomp_strm_find(struct zcomp *comp)
+static int zcomp_init(struct zcomp *comp)
{
- return comp->strm_find(comp);
-}
+ unsigned long cpu;
+ int ret;
-void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm)
-{
- comp->strm_release(comp, zstrm);
-}
+ comp->notifier.notifier_call = zcomp_cpu_notifier;
-int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
- const unsigned char *src, size_t *dst_len)
-{
- return comp->backend->compress(src, zstrm->buffer, dst_len,
- zstrm->private);
-}
+ comp->stream = alloc_percpu(struct zcomp_strm *);
+ if (!comp->stream)
+ return -ENOMEM;
-int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
- size_t src_len, unsigned char *dst)
-{
- return comp->backend->decompress(src, src_len, dst);
+ cpu_notifier_register_begin();
+ for_each_online_cpu(cpu) {
+ ret = __zcomp_cpu_notifier(comp, CPU_UP_PREPARE, cpu);
+ if (ret == NOTIFY_BAD)
+ goto cleanup;
+ }
+ __register_cpu_notifier(&comp->notifier);
+ cpu_notifier_register_done();
+ return 0;
+
+cleanup:
+ for_each_online_cpu(cpu)
+ __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
+ cpu_notifier_register_done();
+ return -ENOMEM;
}
void zcomp_destroy(struct zcomp *comp)
{
- comp->destroy(comp);
+ unsigned long cpu;
+
+ cpu_notifier_register_begin();
+ for_each_online_cpu(cpu)
+ __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
+ __unregister_cpu_notifier(&comp->notifier);
+ cpu_notifier_register_done();
+
+ free_percpu(comp->stream);
kfree(comp);
}
@@ -331,27 +249,22 @@ void zcomp_destroy(struct zcomp *comp)
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
* case of allocation error, or any other error potentially
- * returned by functions zcomp_strm_{multi,single}_create.
+ * returned by zcomp_init().
*/
-struct zcomp *zcomp_create(const char *compress, int max_strm)
+struct zcomp *zcomp_create(const char *compress)
{
struct zcomp *comp;
- struct zcomp_backend *backend;
int error;
- backend = find_backend(compress);
- if (!backend)
+ if (!zcomp_available_algorithm(compress))
return ERR_PTR(-EINVAL);
comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
if (!comp)
return ERR_PTR(-ENOMEM);
- comp->backend = backend;
- if (max_strm > 1)
- error = zcomp_strm_multi_create(comp, max_strm);
- else
- error = zcomp_strm_single_create(comp);
+ comp->name = compress;
+ error = zcomp_init(comp);
if (error) {
kfree(comp);
return ERR_PTR(error);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 46e2b9f8f1f0..478cac2ed465 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -10,60 +10,34 @@
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
-#include <linux/mutex.h>
-
struct zcomp_strm {
/* compression/decompression buffer */
void *buffer;
- /*
- * The private data of the compression stream, only compression
- * stream backend can touch this (e.g. compression algorithm
- * working memory)
- */
- void *private;
- /* used in multi stream backend, protected by backend strm_lock */
- struct list_head list;
-};
-
-/* static compression backend */
-struct zcomp_backend {
- int (*compress)(const unsigned char *src, unsigned char *dst,
- size_t *dst_len, void *private);
-
- int (*decompress)(const unsigned char *src, size_t src_len,
- unsigned char *dst);
-
- void *(*create)(void);
- void (*destroy)(void *private);
-
- const char *name;
+ struct crypto_comp *tfm;
};
/* dynamic per-device compression frontend */
struct zcomp {
- void *stream;
- struct zcomp_backend *backend;
+ struct zcomp_strm * __percpu *stream;
+ struct notifier_block notifier;
- struct zcomp_strm *(*strm_find)(struct zcomp *comp);
- void (*strm_release)(struct zcomp *comp, struct zcomp_strm *zstrm);
- bool (*set_max_streams)(struct zcomp *comp, int num_strm);
- void (*destroy)(struct zcomp *comp);
+ const char *name;
};
ssize_t zcomp_available_show(const char *comp, char *buf);
bool zcomp_available_algorithm(const char *comp);
-struct zcomp *zcomp_create(const char *comp, int max_strm);
+struct zcomp *zcomp_create(const char *comp);
void zcomp_destroy(struct zcomp *comp);
-struct zcomp_strm *zcomp_strm_find(struct zcomp *comp);
-void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm);
+struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
+void zcomp_stream_put(struct zcomp *comp);
-int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
- const unsigned char *src, size_t *dst_len);
+int zcomp_compress(struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len);
-int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
- size_t src_len, unsigned char *dst);
+int zcomp_decompress(struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst);
bool zcomp_set_max_streams(struct zcomp *comp, int num_strm);
#endif /* _ZCOMP_H_ */
diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c
deleted file mode 100644
index dd6083124276..000000000000
--- a/drivers/block/zram/zcomp_lz4.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2014 Sergey Senozhatsky.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/lz4.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-
-#include "zcomp_lz4.h"
-
-static void *zcomp_lz4_create(void)
-{
- void *ret;
-
- /*
- * This function can be called in swapout/fs write path
- * so we can't use GFP_FS|IO. And it assumes we already
- * have at least one stream in zram initialization so we
- * don't do best effort to allocate more stream in here.
- * A default stream will work well without further multiple
- * streams. That's why we use NORETRY | NOWARN.
- */
- ret = kzalloc(LZ4_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
- __GFP_NOWARN);
- if (!ret)
- ret = __vmalloc(LZ4_MEM_COMPRESS,
- GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
- __GFP_ZERO | __GFP_HIGHMEM,
- PAGE_KERNEL);
- return ret;
-}
-
-static void zcomp_lz4_destroy(void *private)
-{
- kvfree(private);
-}
-
-static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
- size_t *dst_len, void *private)
-{
- /* return : Success if return 0 */
- return lz4_compress(src, PAGE_SIZE, dst, dst_len, private);
-}
-
-static int zcomp_lz4_decompress(const unsigned char *src, size_t src_len,
- unsigned char *dst)
-{
- size_t dst_len = PAGE_SIZE;
- /* return : Success if return 0 */
- return lz4_decompress_unknownoutputsize(src, src_len, dst, &dst_len);
-}
-
-struct zcomp_backend zcomp_lz4 = {
- .compress = zcomp_lz4_compress,
- .decompress = zcomp_lz4_decompress,
- .create = zcomp_lz4_create,
- .destroy = zcomp_lz4_destroy,
- .name = "lz4",
-};
diff --git a/drivers/block/zram/zcomp_lz4.h b/drivers/block/zram/zcomp_lz4.h
deleted file mode 100644
index 60613fb29dd8..000000000000
--- a/drivers/block/zram/zcomp_lz4.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (C) 2014 Sergey Senozhatsky.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ZCOMP_LZ4_H_
-#define _ZCOMP_LZ4_H_
-
-#include "zcomp.h"
-
-extern struct zcomp_backend zcomp_lz4;
-
-#endif /* _ZCOMP_LZ4_H_ */
diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c
deleted file mode 100644
index edc549920fa0..000000000000
--- a/drivers/block/zram/zcomp_lzo.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2014 Sergey Senozhatsky.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/lzo.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-
-#include "zcomp_lzo.h"
-
-static void *lzo_create(void)
-{
- void *ret;
-
- /*
- * This function can be called in swapout/fs write path
- * so we can't use GFP_FS|IO. And it assumes we already
- * have at least one stream in zram initialization so we
- * don't do best effort to allocate more stream in here.
- * A default stream will work well without further multiple
- * streams. That's why we use NORETRY | NOWARN.
- */
- ret = kzalloc(LZO1X_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
- __GFP_NOWARN);
- if (!ret)
- ret = __vmalloc(LZO1X_MEM_COMPRESS,
- GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
- __GFP_ZERO | __GFP_HIGHMEM,
- PAGE_KERNEL);
- return ret;
-}
-
-static void lzo_destroy(void *private)
-{
- kvfree(private);
-}
-
-static int lzo_compress(const unsigned char *src, unsigned char *dst,
- size_t *dst_len, void *private)
-{
- int ret = lzo1x_1_compress(src, PAGE_SIZE, dst, dst_len, private);
- return ret == LZO_E_OK ? 0 : ret;
-}
-
-static int lzo_decompress(const unsigned char *src, size_t src_len,
- unsigned char *dst)
-{
- size_t dst_len = PAGE_SIZE;
- int ret = lzo1x_decompress_safe(src, src_len, dst, &dst_len);
- return ret == LZO_E_OK ? 0 : ret;
-}
-
-struct zcomp_backend zcomp_lzo = {
- .compress = lzo_compress,
- .decompress = lzo_decompress,
- .create = lzo_create,
- .destroy = lzo_destroy,
- .name = "lzo",
-};
diff --git a/drivers/block/zram/zcomp_lzo.h b/drivers/block/zram/zcomp_lzo.h
deleted file mode 100644
index 128c5807fa14..000000000000
--- a/drivers/block/zram/zcomp_lzo.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (C) 2014 Sergey Senozhatsky.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ZCOMP_LZO_H_
-#define _ZCOMP_LZO_H_
-
-#include "zcomp.h"
-
-extern struct zcomp_backend zcomp_lzo;
-
-#endif /* _ZCOMP_LZO_H_ */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index bbdf32de1452..74d18b3d729b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -25,11 +25,13 @@
#include <linux/genhd.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/backing-dev.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/sysfs.h>
+#include <linux/debugfs.h>
#include "zram_drv.h"
@@ -40,83 +42,102 @@ static DEFINE_MUTEX(zram_index_mutex);
static int zram_major;
static const char *default_compressor = "lzo";
-/*
- * We don't need to see memory allocation errors more than once every 1
- * second to know that a problem is occurring.
- */
-#define ALLOC_ERROR_LOG_RATE_MS 1000
-
-
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
-static inline void deprecated_attr_warn(const char *name)
+static void zram_free_page(struct zram *zram, size_t index);
+
+static void zram_slot_lock(struct zram *zram, u32 index)
{
- pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
- task_pid_nr(current),
- current->comm,
- name,
- "See zram documentation.");
+ bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
}
-#define ZRAM_ATTR_RO(name) \
-static ssize_t name##_show(struct device *d, \
- struct device_attribute *attr, char *b) \
-{ \
- struct zram *zram = dev_to_zram(d); \
- \
- deprecated_attr_warn(__stringify(name)); \
- return scnprintf(b, PAGE_SIZE, "%llu\n", \
- (u64)atomic64_read(&zram->stats.name)); \
-} \
-static DEVICE_ATTR_RO(name);
+static void zram_slot_unlock(struct zram *zram, u32 index)
+{
+ bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value);
+}
static inline bool init_done(struct zram *zram)
{
return zram->disksize;
}
+static inline bool zram_allocated(struct zram *zram, u32 index)
+{
+
+ return (zram->table[index].value >> (ZRAM_FLAG_SHIFT + 1)) ||
+ zram->table[index].handle;
+}
+
static inline struct zram *dev_to_zram(struct device *dev)
{
return (struct zram *)dev_to_disk(dev)->private_data;
}
+static unsigned long zram_get_handle(struct zram *zram, u32 index)
+{
+ return zram->table[index].handle;
+}
+
+static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
+{
+ zram->table[index].handle = handle;
+}
+
/* flag operations require table entry bit_spin_lock() being held */
-static int zram_test_flag(struct zram_meta *meta, u32 index,
+static bool zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
- return meta->table[index].value & BIT(flag);
+ return zram->table[index].value & BIT(flag);
}
-static void zram_set_flag(struct zram_meta *meta, u32 index,
+static void zram_set_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
- meta->table[index].value |= BIT(flag);
+ zram->table[index].value |= BIT(flag);
}
-static void zram_clear_flag(struct zram_meta *meta, u32 index,
+static void zram_clear_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
- meta->table[index].value &= ~BIT(flag);
+ zram->table[index].value &= ~BIT(flag);
+}
+
+static inline void zram_set_element(struct zram *zram, u32 index,
+ unsigned long element)
+{
+ zram->table[index].element = element;
+}
+
+static unsigned long zram_get_element(struct zram *zram, u32 index)
+{
+ return zram->table[index].element;
}
-static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
+static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
- return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
+ return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
}
-static void zram_set_obj_size(struct zram_meta *meta,
+static void zram_set_obj_size(struct zram *zram,
u32 index, size_t size)
{
- unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
+ unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
- meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
+ zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
}
+#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
return bvec->bv_len != PAGE_SIZE;
}
+#else
+static inline bool is_partial_io(struct bio_vec *bvec)
+{
+ return false;
+}
+#endif
/*
* Check if request is within bounds and aligned on zram logical blocks.
@@ -144,8 +165,7 @@ static inline bool valid_io_request(struct zram *zram,
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
- if (*offset + bvec->bv_len >= PAGE_SIZE)
- (*index)++;
+ *index += (*offset + bvec->bv_len) / PAGE_SIZE;
*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}
@@ -164,34 +184,39 @@ static inline void update_used_max(struct zram *zram,
} while (old_max != cur_max);
}
-static bool page_zero_filled(void *ptr)
+static inline void zram_fill_page(char *ptr, unsigned long len,
+ unsigned long value)
+{
+ int i;
+ unsigned long *page = (unsigned long *)ptr;
+
+ WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
+
+ if (likely(value == 0)) {
+ memset(ptr, 0, len);
+ } else {
+ for (i = 0; i < len / sizeof(*page); i++)
+ page[i] = value;
+ }
+}
+
+static bool page_same_filled(void *ptr, unsigned long *element)
{
unsigned int pos;
unsigned long *page;
+ unsigned long val;
page = (unsigned long *)ptr;
+ val = page[0];
- for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
- if (page[pos])
+ for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
+ if (val != page[pos])
return false;
}
- return true;
-}
+ *element = val;
-static void handle_zero_page(struct bio_vec *bvec)
-{
- struct page *page = bvec->bv_page;
- void *user_mem;
-
- user_mem = kmap_atomic(page);
- if (is_partial_io(bvec))
- memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
- else
- clear_page(user_mem);
- kunmap_atomic(user_mem);
-
- flush_dcache_page(page);
+ return true;
}
static ssize_t initstate_show(struct device *dev,
@@ -215,142 +240,535 @@ static ssize_t disksize_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
}
-static ssize_t orig_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t mem_limit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
{
+ u64 limit;
+ char *tmp;
struct zram *zram = dev_to_zram(dev);
- deprecated_attr_warn("orig_data_size");
- return scnprintf(buf, PAGE_SIZE, "%llu\n",
- (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
+ limit = memparse(buf, &tmp);
+ if (buf == tmp) /* no chars parsed, invalid input */
+ return -EINVAL;
+
+ down_write(&zram->init_lock);
+ zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
+ up_write(&zram->init_lock);
+
+ return len;
}
-static ssize_t mem_used_total_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t mem_used_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
{
- u64 val = 0;
+ int err;
+ unsigned long val;
struct zram *zram = dev_to_zram(dev);
- deprecated_attr_warn("mem_used_total");
+ err = kstrtoul(buf, 10, &val);
+ if (err || val != 0)
+ return -EINVAL;
+
down_read(&zram->init_lock);
if (init_done(zram)) {
- struct zram_meta *meta = zram->meta;
- val = zs_get_total_pages(meta->mem_pool);
+ atomic_long_set(&zram->stats.max_used_pages,
+ zs_get_total_pages(zram->mem_pool));
}
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
+ return len;
}
-static ssize_t mem_limit_show(struct device *dev,
+#ifdef CONFIG_ZRAM_WRITEBACK
+static bool zram_wb_enabled(struct zram *zram)
+{
+ return zram->backing_dev;
+}
+
+static void reset_bdev(struct zram *zram)
+{
+ struct block_device *bdev;
+
+ if (!zram_wb_enabled(zram))
+ return;
+
+ bdev = zram->bdev;
+ if (zram->old_block_size)
+ set_blocksize(bdev, zram->old_block_size);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ /* hope filp_close flush all of IO */
+ filp_close(zram->backing_dev, NULL);
+ zram->backing_dev = NULL;
+ zram->old_block_size = 0;
+ zram->bdev = NULL;
+
+ kvfree(zram->bitmap);
+ zram->bitmap = NULL;
+}
+
+static ssize_t backing_dev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u64 val;
struct zram *zram = dev_to_zram(dev);
+ struct file *file = zram->backing_dev;
+ char *p;
+ ssize_t ret;
- deprecated_attr_warn("mem_limit");
down_read(&zram->init_lock);
- val = zram->limit_pages;
- up_read(&zram->init_lock);
+ if (!zram_wb_enabled(zram)) {
+ memcpy(buf, "none\n", 5);
+ up_read(&zram->init_lock);
+ return 5;
+ }
- return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
+ p = file_path(file, buf, PAGE_SIZE - 1);
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
+ goto out;
+ }
+
+ ret = strlen(p);
+ memmove(buf, p, ret);
+ buf[ret++] = '\n';
+out:
+ up_read(&zram->init_lock);
+ return ret;
}
-static ssize_t mem_limit_store(struct device *dev,
+static ssize_t backing_dev_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- u64 limit;
- char *tmp;
+ char *file_name;
+ size_t sz;
+ struct file *backing_dev = NULL;
+ struct inode *inode;
+ struct address_space *mapping;
+ unsigned int bitmap_sz, old_block_size = 0;
+ unsigned long nr_pages, *bitmap = NULL;
+ struct block_device *bdev = NULL;
+ int err;
struct zram *zram = dev_to_zram(dev);
+ gfp_t kmalloc_flags;
- limit = memparse(buf, &tmp);
- if (buf == tmp) /* no chars parsed, invalid input */
- return -EINVAL;
+ file_name = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!file_name)
+ return -ENOMEM;
down_write(&zram->init_lock);
- zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
+ if (init_done(zram)) {
+ pr_info("Can't setup backing device for initialized device\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ strlcpy(file_name, buf, PATH_MAX);
+ /* ignore trailing newline */
+ sz = strlen(file_name);
+ if (sz > 0 && file_name[sz - 1] == '\n')
+ file_name[sz - 1] = 0x00;
+
+ backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
+ if (IS_ERR(backing_dev)) {
+ err = PTR_ERR(backing_dev);
+ backing_dev = NULL;
+ goto out;
+ }
+
+ mapping = backing_dev->f_mapping;
+ inode = mapping->host;
+
+ /* Support only block device in this moment */
+ if (!S_ISBLK(inode->i_mode)) {
+ err = -ENOTBLK;
+ goto out;
+ }
+
+ bdev = bdgrab(I_BDEV(inode));
+ err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
+ if (err < 0)
+ goto out;
+
+ nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+ bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
+ kmalloc_flags = GFP_KERNEL | __GFP_ZERO;
+ if (bitmap_sz > PAGE_SIZE)
+ kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY;
+
+ bitmap = kmalloc_node(bitmap_sz, kmalloc_flags, NUMA_NO_NODE);
+ if (!bitmap && bitmap_sz > PAGE_SIZE)
+ bitmap = vzalloc(bitmap_sz);
+
+ if (!bitmap) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ old_block_size = block_size(bdev);
+ err = set_blocksize(bdev, PAGE_SIZE);
+ if (err)
+ goto out;
+
+ reset_bdev(zram);
+ spin_lock_init(&zram->bitmap_lock);
+
+ zram->old_block_size = old_block_size;
+ zram->bdev = bdev;
+ zram->backing_dev = backing_dev;
+ zram->bitmap = bitmap;
+ zram->nr_pages = nr_pages;
up_write(&zram->init_lock);
+ pr_info("setup backing device %s\n", file_name);
+ kfree(file_name);
+
return len;
+out:
+ if (bitmap)
+ kvfree(bitmap);
+
+ if (bdev)
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+
+ if (backing_dev)
+ filp_close(backing_dev, NULL);
+
+ up_write(&zram->init_lock);
+
+ kfree(file_name);
+
+ return err;
}
-static ssize_t mem_used_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static unsigned long get_entry_bdev(struct zram *zram)
{
- u64 val = 0;
- struct zram *zram = dev_to_zram(dev);
+ unsigned long entry;
- deprecated_attr_warn("mem_used_max");
- down_read(&zram->init_lock);
- if (init_done(zram))
- val = atomic_long_read(&zram->stats.max_used_pages);
- up_read(&zram->init_lock);
+ spin_lock(&zram->bitmap_lock);
+ /* skip 0 bit to confuse zram.handle = 0 */
+ entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
+ if (entry == zram->nr_pages) {
+ spin_unlock(&zram->bitmap_lock);
+ return 0;
+ }
+
+ set_bit(entry, zram->bitmap);
+ spin_unlock(&zram->bitmap_lock);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
+ return entry;
}
-static ssize_t mem_used_max_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static void put_entry_bdev(struct zram *zram, unsigned long entry)
{
- int err;
- unsigned long val;
- struct zram *zram = dev_to_zram(dev);
+ int was_set;
- err = kstrtoul(buf, 10, &val);
- if (err || val != 0)
- return -EINVAL;
+ spin_lock(&zram->bitmap_lock);
+ was_set = test_and_clear_bit(entry, zram->bitmap);
+ spin_unlock(&zram->bitmap_lock);
+ WARN_ON_ONCE(!was_set);
+}
- down_read(&zram->init_lock);
- if (init_done(zram)) {
- struct zram_meta *meta = zram->meta;
- atomic_long_set(&zram->stats.max_used_pages,
- zs_get_total_pages(meta->mem_pool));
+static void zram_page_end_io(struct bio *bio)
+{
+ struct page *page = bio->bi_io_vec[0].bv_page;
+
+ page_endio(page, bio_data_dir(bio), bio->bi_error);
+ bio_put(bio);
+}
+
+/*
+ * Returns 1 if the submission is successful.
+ */
+static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent)
+{
+ struct bio *bio;
+
+ bio = bio_alloc(GFP_ATOMIC, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+ bio->bi_bdev = zram->bdev;
+ if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
+ bio_put(bio);
+ return -EIO;
}
- up_read(&zram->init_lock);
- return len;
+ if (!parent) {
+ bio->bi_rw = 0;
+ bio->bi_end_io = zram_page_end_io;
+ } else {
+ bio->bi_rw = parent->bi_rw;
+ bio_chain(bio, parent);
+ }
+
+ submit_bio(READ, bio);
+ return 1;
}
-static ssize_t max_comp_streams_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+struct zram_work {
+ struct work_struct work;
+ struct zram *zram;
+ unsigned long entry;
+ struct bio *bio;
+};
+
+#if PAGE_SIZE != 4096
+static void zram_sync_read(struct work_struct *work)
{
- int val;
- struct zram *zram = dev_to_zram(dev);
+ struct bio_vec bvec;
+ struct zram_work *zw = container_of(work, struct zram_work, work);
+ struct zram *zram = zw->zram;
+ unsigned long entry = zw->entry;
+ struct bio *bio = zw->bio;
- down_read(&zram->init_lock);
- val = zram->max_comp_streams;
- up_read(&zram->init_lock);
+ read_from_bdev_async(zram, &bvec, entry, bio);
+}
- return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+/*
+ * Block layer want one ->make_request_fn to be active at a time
+ * so if we use chained IO with parent IO in same context,
+ * it's a deadlock. To avoid, it, it uses worker thread context.
+ */
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *bio)
+{
+ struct zram_work work;
+
+ work.zram = zram;
+ work.entry = entry;
+ work.bio = bio;
+
+ INIT_WORK_ONSTACK(&work.work, zram_sync_read);
+ queue_work(system_unbound_wq, &work.work);
+ flush_work(&work.work);
+ destroy_work_on_stack(&work.work);
+
+ return 1;
+}
+#else
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *bio)
+{
+ WARN_ON(1);
+ return -EIO;
}
+#endif
-static ssize_t max_comp_streams_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent, bool sync)
{
- int num;
- struct zram *zram = dev_to_zram(dev);
- int ret;
+ if (sync)
+ return read_from_bdev_sync(zram, bvec, entry, parent);
+ else
+ return read_from_bdev_async(zram, bvec, entry, parent);
+}
- ret = kstrtoint(buf, 0, &num);
- if (ret < 0)
- return ret;
- if (num < 1)
+static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *parent,
+ unsigned long *pentry)
+{
+ struct bio *bio;
+ unsigned long entry;
+
+ bio = bio_alloc(GFP_ATOMIC, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ entry = get_entry_bdev(zram);
+ if (!entry) {
+ bio_put(bio);
+ return -ENOSPC;
+ }
+
+ bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+ bio->bi_bdev = zram->bdev;
+ if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset)) {
+ bio_put(bio);
+ put_entry_bdev(zram, entry);
+ return -EIO;
+ }
+
+ if (!parent) {
+ bio->bi_rw = REQ_WRITE | REQ_SYNC;
+ bio->bi_end_io = zram_page_end_io;
+ } else {
+ bio->bi_rw = parent->bi_rw;
+ bio_chain(bio, parent);
+ }
+
+ submit_bio(WRITE, bio);
+ *pentry = entry;
+
+ return 0;
+}
+
+static void zram_wb_clear(struct zram *zram, u32 index)
+{
+ unsigned long entry;
+
+ zram_clear_flag(zram, index, ZRAM_WB);
+ entry = zram_get_element(zram, index);
+ zram_set_element(zram, index, 0);
+ put_entry_bdev(zram, entry);
+}
+
+#else
+static bool zram_wb_enabled(struct zram *zram) { return false; }
+static inline void reset_bdev(struct zram *zram) {};
+static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *parent,
+ unsigned long *pentry)
+
+{
+ return -EIO;
+}
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent, bool sync)
+{
+ return -EIO;
+}
+static void zram_wb_clear(struct zram *zram, u32 index) {}
+#endif
+
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+
+static struct dentry *zram_debugfs_root;
+
+static void zram_debugfs_create(void)
+{
+ zram_debugfs_root = debugfs_create_dir("zram", NULL);
+}
+
+static void zram_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(zram_debugfs_root);
+}
+
+static void zram_accessed(struct zram *zram, u32 index)
+{
+ zram->table[index].ac_time = ktime_get_boottime();
+}
+
+static void zram_reset_access(struct zram *zram, u32 index)
+{
+ zram->table[index].ac_time.tv64 = 0;
+}
+
+static ssize_t read_block_state(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *kbuf;
+ ssize_t index, written = 0;
+ struct zram *zram = file->private_data;
+ unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+ struct timespec64 ts;
+ gfp_t kmalloc_flags;
+
+ kmalloc_flags = GFP_KERNEL;
+ if (count > PAGE_SIZE)
+ kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY;
+
+ kbuf = kmalloc_node(count, kmalloc_flags, NUMA_NO_NODE);
+ if (!kbuf && count > PAGE_SIZE)
+ kbuf = vmalloc(count);
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ kvfree(kbuf);
return -EINVAL;
+ }
- down_write(&zram->init_lock);
- if (init_done(zram)) {
- if (!zcomp_set_max_streams(zram->comp, num)) {
- pr_info("Cannot change max compression streams\n");
- ret = -EINVAL;
- goto out;
+ for (index = *ppos; index < nr_pages; index++) {
+ int copied;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ ts = ktime_to_timespec64(zram->table[index].ac_time);
+ copied = snprintf(kbuf + written, count,
+ "%12zd %12lld.%06lu %c%c%c\n",
+ index, (s64)ts.tv_sec,
+ ts.tv_nsec / NSEC_PER_USEC,
+ zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
+ zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
+ zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.');
+
+ if (count < copied) {
+ zram_slot_unlock(zram, index);
+ break;
}
+ written += copied;
+ count -= copied;
+next:
+ zram_slot_unlock(zram, index);
+ *ppos += 1;
}
- zram->max_comp_streams = num;
- ret = len;
-out:
- up_write(&zram->init_lock);
- return ret;
+ up_read(&zram->init_lock);
+ if (copy_to_user(buf, kbuf, written))
+ written = -EFAULT;
+ kvfree(kbuf);
+
+ return written;
+}
+
+static const struct file_operations proc_zram_block_state_op = {
+ .open = simple_open,
+ .read = read_block_state,
+ .llseek = default_llseek,
+};
+
+static void zram_debugfs_register(struct zram *zram)
+{
+ if (!zram_debugfs_root)
+ return;
+
+ zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
+ zram_debugfs_root);
+ debugfs_create_file("block_state", 0400, zram->debugfs_dir,
+ zram, &proc_zram_block_state_op);
+}
+
+static void zram_debugfs_unregister(struct zram *zram)
+{
+ debugfs_remove_recursive(zram->debugfs_dir);
+}
+#else
+static void zram_debugfs_create(void) {};
+static void zram_debugfs_destroy(void) {};
+static void zram_accessed(struct zram *zram, u32 index) {};
+static void zram_reset_access(struct zram *zram, u32 index) {};
+static void zram_debugfs_register(struct zram *zram) {};
+static void zram_debugfs_unregister(struct zram *zram) {};
+#endif
+
+/*
+ * We switched to per-cpu streams and this attr is not needed anymore.
+ * However, we will keep it around for some time, because:
+ * a) we may revert per-cpu streams in the future
+ * b) it's visible to user space and we need to follow our 2 years
+ * retirement rule; but we already have a number of 'soon to be
+ * altered' attrs, so max_comp_streams need to wait for the next
+ * layoff cycle.
+ */
+static ssize_t max_comp_streams_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
+}
+
+static ssize_t max_comp_streams_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ return len;
}
static ssize_t comp_algorithm_show(struct device *dev,
@@ -370,9 +788,16 @@ static ssize_t comp_algorithm_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
+ char compressor[ARRAY_SIZE(zram->compressor)];
size_t sz;
- if (!zcomp_available_algorithm(buf))
+ strlcpy(compressor, buf, sizeof(compressor));
+ /* ignore trailing newline */
+ sz = strlen(compressor);
+ if (sz > 0 && compressor[sz - 1] == '\n')
+ compressor[sz - 1] = 0x00;
+
+ if (!zcomp_available_algorithm(compressor))
return -EINVAL;
down_write(&zram->init_lock);
@@ -381,13 +806,8 @@ static ssize_t comp_algorithm_store(struct device *dev,
pr_info("Can't change algorithm for initialized device\n");
return -EBUSY;
}
- strlcpy(zram->compressor, buf, sizeof(zram->compressor));
-
- /* ignore trailing newline */
- sz = strlen(zram->compressor);
- if (sz > 0 && zram->compressor[sz - 1] == '\n')
- zram->compressor[sz - 1] = 0x00;
+ strcpy(zram->compressor, compressor);
up_write(&zram->init_lock);
return len;
}
@@ -396,7 +816,6 @@ static ssize_t compact_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
- struct zram_meta *meta;
down_read(&zram->init_lock);
if (!init_done(zram)) {
@@ -404,8 +823,7 @@ static ssize_t compact_store(struct device *dev,
return -EINVAL;
}
- meta = zram->meta;
- zs_compact(meta->mem_pool);
+ zs_compact(zram->mem_pool);
up_read(&zram->init_lock);
return len;
@@ -442,97 +860,78 @@ static ssize_t mm_stat_show(struct device *dev,
down_read(&zram->init_lock);
if (init_done(zram)) {
- mem_used = zs_get_total_pages(zram->meta->mem_pool);
- zs_pool_stats(zram->meta->mem_pool, &pool_stats);
+ mem_used = zs_get_total_pages(zram->mem_pool);
+ zs_pool_stats(zram->mem_pool, &pool_stats);
}
orig_size = atomic64_read(&zram->stats.pages_stored);
max_used = atomic_long_read(&zram->stats.max_used_pages);
ret = scnprintf(buf, PAGE_SIZE,
- "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
+ "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
orig_size << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.compr_data_size),
mem_used << PAGE_SHIFT,
zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT,
- (u64)atomic64_read(&zram->stats.zero_pages),
- pool_stats.pages_compacted);
+ (u64)atomic64_read(&zram->stats.same_pages),
+ pool_stats.pages_compacted,
+ (u64)atomic64_read(&zram->stats.huge_pages));
up_read(&zram->init_lock);
return ret;
}
-static DEVICE_ATTR_RO(io_stat);
-static DEVICE_ATTR_RO(mm_stat);
-ZRAM_ATTR_RO(num_reads);
-ZRAM_ATTR_RO(num_writes);
-ZRAM_ATTR_RO(failed_reads);
-ZRAM_ATTR_RO(failed_writes);
-ZRAM_ATTR_RO(invalid_io);
-ZRAM_ATTR_RO(notify_free);
-ZRAM_ATTR_RO(zero_pages);
-ZRAM_ATTR_RO(compr_data_size);
-
-static inline bool zram_meta_get(struct zram *zram)
-{
- if (atomic_inc_not_zero(&zram->refcount))
- return true;
- return false;
-}
-
-static inline void zram_meta_put(struct zram *zram)
+static ssize_t debug_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- atomic_dec(&zram->refcount);
+ int version = 1;
+ struct zram *zram = dev_to_zram(dev);
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+ ret = scnprintf(buf, PAGE_SIZE,
+ "version: %d\n%8llu\n",
+ version,
+ (u64)atomic64_read(&zram->stats.writestall));
+ up_read(&zram->init_lock);
+
+ return ret;
}
-static void zram_meta_free(struct zram_meta *meta, u64 disksize)
+static DEVICE_ATTR_RO(io_stat);
+static DEVICE_ATTR_RO(mm_stat);
+static DEVICE_ATTR_RO(debug_stat);
+
+static void zram_meta_free(struct zram *zram, u64 disksize)
{
size_t num_pages = disksize >> PAGE_SHIFT;
size_t index;
/* Free all pages that are still in this zram device */
- for (index = 0; index < num_pages; index++) {
- unsigned long handle = meta->table[index].handle;
-
- if (!handle)
- continue;
-
- zs_free(meta->mem_pool, handle);
- }
+ for (index = 0; index < num_pages; index++)
+ zram_free_page(zram, index);
- zs_destroy_pool(meta->mem_pool);
- vfree(meta->table);
- kfree(meta);
+ zs_destroy_pool(zram->mem_pool);
+ vfree(zram->table);
}
-static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
+static bool zram_meta_alloc(struct zram *zram, u64 disksize)
{
size_t num_pages;
- struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
-
- if (!meta)
- return NULL;
num_pages = disksize >> PAGE_SHIFT;
- meta->table = vzalloc(num_pages * sizeof(*meta->table));
- if (!meta->table) {
- pr_err("Error allocating zram address table\n");
- goto out_error;
- }
+ zram->table = vzalloc(num_pages * sizeof(*zram->table));
+ if (!zram->table)
+ return false;
- meta->mem_pool = zs_create_pool(pool_name);
- if (!meta->mem_pool) {
- pr_err("Error creating memory pool\n");
- goto out_error;
+ zram->mem_pool = zs_create_pool(zram->disk->disk_name);
+ if (!zram->mem_pool) {
+ vfree(zram->table);
+ return false;
}
- return meta;
-
-out_error:
- vfree(meta->table);
- kfree(meta);
- return NULL;
+ return true;
}
/*
@@ -542,243 +941,313 @@ out_error:
*/
static void zram_free_page(struct zram *zram, size_t index)
{
- struct zram_meta *meta = zram->meta;
- unsigned long handle = meta->table[index].handle;
+ unsigned long handle;
- if (unlikely(!handle)) {
- /*
- * No memory is allocated for zero filled pages.
- * Simply clear zero page flag.
- */
- if (zram_test_flag(meta, index, ZRAM_ZERO)) {
- zram_clear_flag(meta, index, ZRAM_ZERO);
- atomic64_dec(&zram->stats.zero_pages);
- }
+ zram_reset_access(zram, index);
+
+ if (zram_test_flag(zram, index, ZRAM_HUGE)) {
+ zram_clear_flag(zram, index, ZRAM_HUGE);
+ atomic64_dec(&zram->stats.huge_pages);
+ }
+
+ if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
+ zram_wb_clear(zram, index);
+ atomic64_dec(&zram->stats.pages_stored);
return;
}
- zs_free(meta->mem_pool, handle);
+ /*
+ * No memory is allocated for same element filled pages.
+ * Simply clear same page flag.
+ */
+ if (zram_test_flag(zram, index, ZRAM_SAME)) {
+ zram_clear_flag(zram, index, ZRAM_SAME);
+ zram_set_element(zram, index, 0);
+ atomic64_dec(&zram->stats.same_pages);
+ atomic64_dec(&zram->stats.pages_stored);
+ return;
+ }
+
+ handle = zram_get_handle(zram, index);
+ if (!handle)
+ return;
+
+ zs_free(zram->mem_pool, handle);
- atomic64_sub(zram_get_obj_size(meta, index),
+ atomic64_sub(zram_get_obj_size(zram, index),
&zram->stats.compr_data_size);
atomic64_dec(&zram->stats.pages_stored);
- meta->table[index].handle = 0;
- zram_set_obj_size(meta, index, 0);
+ zram_set_handle(zram, index, 0);
+ zram_set_obj_size(zram, index, 0);
}
-static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
+ struct bio *bio, bool partial_io)
{
- int ret = 0;
- unsigned char *cmem;
- struct zram_meta *meta = zram->meta;
+ int ret;
unsigned long handle;
- size_t size;
-
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
- handle = meta->table[index].handle;
- size = zram_get_obj_size(meta, index);
+ unsigned int size;
+ void *src, *dst;
+
+ if (zram_wb_enabled(zram)) {
+ zram_slot_lock(zram, index);
+ if (zram_test_flag(zram, index, ZRAM_WB)) {
+ struct bio_vec bvec;
+
+ zram_slot_unlock(zram, index);
+
+ bvec.bv_page = page;
+ bvec.bv_len = PAGE_SIZE;
+ bvec.bv_offset = 0;
+ return read_from_bdev(zram, &bvec,
+ zram_get_element(zram, index),
+ bio, partial_io);
+ }
+ zram_slot_unlock(zram, index);
+ }
- if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- memset(mem, 0, PAGE_SIZE);
+ zram_slot_lock(zram, index);
+ handle = zram_get_handle(zram, index);
+ if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
+ unsigned long value;
+ void *mem;
+
+ value = handle ? zram_get_element(zram, index) : 0;
+ mem = kmap_atomic(page);
+ zram_fill_page(mem, PAGE_SIZE, value);
+ kunmap_atomic(mem);
+ zram_slot_unlock(zram, index);
return 0;
}
- cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
- if (size == PAGE_SIZE)
- memcpy(mem, cmem, PAGE_SIZE);
- else
- ret = zcomp_decompress(zram->comp, cmem, size, mem);
- zs_unmap_object(meta->mem_pool, handle);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ size = zram_get_obj_size(zram, index);
+
+ src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+ if (size == PAGE_SIZE) {
+ dst = kmap_atomic(page);
+ memcpy(dst, src, PAGE_SIZE);
+ kunmap_atomic(dst);
+ ret = 0;
+ } else {
+ struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
+
+ dst = kmap_atomic(page);
+ ret = zcomp_decompress(zstrm, src, size, dst);
+ kunmap_atomic(dst);
+ zcomp_stream_put(zram->comp);
+ }
+ zs_unmap_object(zram->mem_pool, handle);
+ zram_slot_unlock(zram, index);
/* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret)) {
+ if (unlikely(ret))
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
- return ret;
- }
- return 0;
+ return ret;
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
+ u32 index, int offset, struct bio *bio)
{
int ret;
struct page *page;
- unsigned char *user_mem, *uncmem = NULL;
- struct zram_meta *meta = zram->meta;
- page = bvec->bv_page;
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
- if (unlikely(!meta->table[index].handle) ||
- zram_test_flag(meta, index, ZRAM_ZERO)) {
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- handle_zero_page(bvec);
- return 0;
+ page = bvec->bv_page;
+ if (is_partial_io(bvec)) {
+ /* Use a temporary buffer to decompress the page */
+ page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
+ if (!page)
+ return -ENOMEM;
}
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- if (is_partial_io(bvec))
- /* Use a temporary buffer to decompress the page */
- uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+ ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
+ if (unlikely(ret))
+ goto out;
- user_mem = kmap_atomic(page);
- if (!is_partial_io(bvec))
- uncmem = user_mem;
+ if (is_partial_io(bvec)) {
+ void *dst = kmap_atomic(bvec->bv_page);
+ void *src = kmap_atomic(page);
- if (!uncmem) {
- pr_err("Unable to allocate temp memory\n");
- ret = -ENOMEM;
- goto out_cleanup;
+ memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
+ kunmap_atomic(src);
+ kunmap_atomic(dst);
}
-
- ret = zram_decompress_page(zram, uncmem, index);
- /* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret))
- goto out_cleanup;
-
+out:
if (is_partial_io(bvec))
- memcpy(user_mem + bvec->bv_offset, uncmem + offset,
- bvec->bv_len);
+ __free_page(page);
- flush_dcache_page(page);
- ret = 0;
-out_cleanup:
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
return ret;
}
-static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset)
+static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *bio)
{
int ret = 0;
- size_t clen;
- unsigned long handle;
- struct page *page;
- unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
- struct zram_meta *meta = zram->meta;
- struct zcomp_strm *zstrm = NULL;
unsigned long alloced_pages;
- static unsigned long zram_rs_time;
-
- page = bvec->bv_page;
- if (is_partial_io(bvec)) {
- /*
- * This is a partial IO. We need to read the full page
- * before to write the changes.
- */
- uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
- if (!uncmem) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zram_decompress_page(zram, uncmem, index);
- if (ret)
- goto out;
- }
-
- zstrm = zcomp_strm_find(zram->comp);
- user_mem = kmap_atomic(page);
-
- if (is_partial_io(bvec)) {
- memcpy(uncmem + offset, user_mem + bvec->bv_offset,
- bvec->bv_len);
- kunmap_atomic(user_mem);
- user_mem = NULL;
- } else {
- uncmem = user_mem;
- }
+ unsigned long handle = 0;
+ unsigned int comp_len = 0;
+ void *src, *dst, *mem;
+ struct zcomp_strm *zstrm;
+ struct page *page = bvec->bv_page;
+ unsigned long element = 0;
+ enum zram_pageflags flags = 0;
+ bool allow_wb = true;
- if (page_zero_filled(uncmem)) {
- if (user_mem)
- kunmap_atomic(user_mem);
+ mem = kmap_atomic(page);
+ if (page_same_filled(mem, &element)) {
+ kunmap_atomic(mem);
/* Free memory associated with this sector now. */
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
- zram_free_page(zram, index);
- zram_set_flag(meta, index, ZRAM_ZERO);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
-
- atomic64_inc(&zram->stats.zero_pages);
- ret = 0;
+ flags = ZRAM_SAME;
+ atomic64_inc(&zram->stats.same_pages);
goto out;
}
+ kunmap_atomic(mem);
- ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
- if (!is_partial_io(bvec)) {
- kunmap_atomic(user_mem);
- user_mem = NULL;
- uncmem = NULL;
- }
+compress_again:
+ zstrm = zcomp_stream_get(zram->comp);
+ src = kmap_atomic(page);
+ ret = zcomp_compress(zstrm, src, &comp_len);
+ kunmap_atomic(src);
if (unlikely(ret)) {
+ zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
- goto out;
+ zs_free(zram->mem_pool, handle);
+ return ret;
}
- src = zstrm->buffer;
- if (unlikely(clen > max_zpage_size)) {
- clen = PAGE_SIZE;
- if (is_partial_io(bvec))
- src = uncmem;
+
+ if (unlikely(comp_len > max_zpage_size)) {
+ comp_len = PAGE_SIZE;
+ if (zram_wb_enabled(zram) && allow_wb) {
+ zcomp_stream_put(zram->comp);
+ ret = write_to_bdev(zram, bvec, index, bio, &element);
+ if (!ret) {
+ flags = ZRAM_WB;
+ ret = 1;
+ goto out;
+ }
+ allow_wb = false;
+ goto compress_again;
+ }
}
- handle = zs_malloc(meta->mem_pool, clen, GFP_NOIO | __GFP_HIGHMEM |
+ /*
+ * handle allocation has 2 paths:
+ * a) fast path is executed with preemption disabled (for
+ * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
+ * since we can't sleep;
+ * b) slow path enables preemption and attempts to allocate
+ * the page with __GFP_DIRECT_RECLAIM bit set. we have to
+ * put per-cpu compression stream and, thus, to re-do
+ * the compression once handle is allocated.
+ *
+ * if we have a 'non-null' handle here then we are coming
+ * from the slow path and handle has already been allocated.
+ */
+ if (!handle)
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ __GFP_KSWAPD_RECLAIM |
+ __GFP_NOWARN |
+ __GFP_HIGHMEM |
__GFP_MOVABLE);
if (!handle) {
- if (printk_timed_ratelimit(&zram_rs_time,
- ALLOC_ERROR_LOG_RATE_MS))
- pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
- index, clen);
-
- ret = -ENOMEM;
- goto out;
+ zcomp_stream_put(zram->comp);
+ atomic64_inc(&zram->stats.writestall);
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
+ if (handle)
+ goto compress_again;
+ return -ENOMEM;
}
- alloced_pages = zs_get_total_pages(meta->mem_pool);
+ alloced_pages = zs_get_total_pages(zram->mem_pool);
update_used_max(zram, alloced_pages);
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
- zs_free(meta->mem_pool, handle);
- ret = -ENOMEM;
- goto out;
+ zcomp_stream_put(zram->comp);
+ zs_free(zram->mem_pool, handle);
+ return -ENOMEM;
}
- cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
+ dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
- if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
+ src = zstrm->buffer;
+ if (comp_len == PAGE_SIZE)
src = kmap_atomic(page);
- memcpy(cmem, src, PAGE_SIZE);
+ memcpy(dst, src, comp_len);
+ if (comp_len == PAGE_SIZE)
kunmap_atomic(src);
- } else {
- memcpy(cmem, src, clen);
- }
-
- zcomp_strm_release(zram->comp, zstrm);
- zstrm = NULL;
- zs_unmap_object(meta->mem_pool, handle);
+ zcomp_stream_put(zram->comp);
+ zs_unmap_object(zram->mem_pool, handle);
+ atomic64_add(comp_len, &zram->stats.compr_data_size);
+out:
/*
* Free memory associated with this sector
* before overwriting unused sectors.
*/
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_slot_lock(zram, index);
zram_free_page(zram, index);
- meta->table[index].handle = handle;
- zram_set_obj_size(meta, index, clen);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ if (comp_len == PAGE_SIZE) {
+ zram_set_flag(zram, index, ZRAM_HUGE);
+ atomic64_inc(&zram->stats.huge_pages);
+ }
+
+ if (flags) {
+ zram_set_flag(zram, index, flags);
+ zram_set_element(zram, index, element);
+ } else {
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, comp_len);
+ }
+ zram_slot_unlock(zram, index);
/* Update stats */
- atomic64_add(clen, &zram->stats.compr_data_size);
atomic64_inc(&zram->stats.pages_stored);
+ return ret;
+}
+
+static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio)
+{
+ int ret;
+ struct page *page = NULL;
+ void *src;
+ struct bio_vec vec;
+
+ vec = *bvec;
+ if (is_partial_io(bvec)) {
+ void *dst;
+ /*
+ * This is a partial IO. We need to read the full page
+ * before to write the changes.
+ */
+ page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
+ if (!page)
+ return -ENOMEM;
+
+ ret = __zram_bvec_read(zram, page, index, bio, true);
+ if (ret)
+ goto out;
+
+ src = kmap_atomic(bvec->bv_page);
+ dst = kmap_atomic(page);
+ memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+
+ vec.bv_page = page;
+ vec.bv_len = PAGE_SIZE;
+ vec.bv_offset = 0;
+ }
+
+ ret = __zram_bvec_write(zram, &vec, index, bio);
out:
- if (zstrm)
- zcomp_strm_release(zram->comp, zstrm);
if (is_partial_io(bvec))
- kfree(uncmem);
+ __free_page(page);
return ret;
}
@@ -791,7 +1260,6 @@ static void zram_bio_discard(struct zram *zram, u32 index,
int offset, struct bio *bio)
{
size_t n = bio->bi_iter.bi_size;
- struct zram_meta *meta = zram->meta;
/*
* zram manages data in physical block size units. Because logical block
@@ -812,17 +1280,22 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
while (n >= PAGE_SIZE) {
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_slot_lock(zram, index);
zram_free_page(zram, index);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_slot_unlock(zram, index);
atomic64_inc(&zram->stats.notify_free);
index++;
n -= PAGE_SIZE;
}
}
+/*
+ * Returns errno if it has some problem. Otherwise return 0 or 1.
+ * Returns 0 if IO request was done synchronously
+ * Returns 1 if IO request was successfully submitted.
+ */
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, int rw)
+ int offset, int rw, struct bio *bio)
{
unsigned long start_time = jiffies;
int ret;
@@ -832,15 +1305,20 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
if (rw == READ) {
atomic64_inc(&zram->stats.num_reads);
- ret = zram_bvec_read(zram, bvec, index, offset);
+ ret = zram_bvec_read(zram, bvec, index, offset, bio);
+ flush_dcache_page(bvec->bv_page);
} else {
atomic64_inc(&zram->stats.num_writes);
- ret = zram_bvec_write(zram, bvec, index, offset);
+ ret = zram_bvec_write(zram, bvec, index, offset, bio);
}
generic_end_io_acct(rw, &zram->disk->part0, start_time);
- if (unlikely(ret)) {
+ zram_slot_lock(zram, index);
+ zram_accessed(zram, index);
+ zram_slot_unlock(zram, index);
+
+ if (unlikely(ret < 0)) {
if (rw == READ)
atomic64_inc(&zram->stats.failed_reads);
else
@@ -869,31 +1347,20 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
- int max_transfer_size = PAGE_SIZE - offset;
-
- if (bvec.bv_len > max_transfer_size) {
- /*
- * zram_bvec_rw() can only make operation on a single
- * zram page. Split the bio vector.
- */
- struct bio_vec bv;
-
- bv.bv_page = bvec.bv_page;
- bv.bv_len = max_transfer_size;
- bv.bv_offset = bvec.bv_offset;
+ struct bio_vec bv = bvec;
+ unsigned int unwritten = bvec.bv_len;
- if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
+ do {
+ bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
+ unwritten);
+ if (zram_bvec_rw(zram, &bv, index, offset, rw, bio) < 0)
goto out;
- bv.bv_len = bvec.bv_len - max_transfer_size;
- bv.bv_offset += max_transfer_size;
- if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
- goto out;
- } else
- if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
- goto out;
+ bv.bv_offset += bv.bv_len;
+ unwritten -= bv.bv_len;
- update_position(&index, &offset, &bvec);
+ update_position(&index, &offset, &bv);
+ } while (unwritten);
}
bio_endio(bio);
@@ -910,22 +1377,15 @@ static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
- if (unlikely(!zram_meta_get(zram)))
- goto error;
-
- blk_queue_split(queue, &bio, queue->bio_split);
-
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
- goto put_zram;
+ goto error;
}
__zram_make_request(zram, bio);
- zram_meta_put(zram);
return BLK_QC_T_NONE;
-put_zram:
- zram_meta_put(zram);
+
error:
bio_io_error(bio);
return BLK_QC_T_NONE;
@@ -935,45 +1395,39 @@ static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
- struct zram_meta *meta;
zram = bdev->bd_disk->private_data;
- meta = zram->meta;
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_slot_lock(zram, index);
zram_free_page(zram, index);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_slot_unlock(zram, index);
atomic64_inc(&zram->stats.notify_free);
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
- int offset, err = -EIO;
+ int offset, ret;
u32 index;
struct zram *zram;
struct bio_vec bv;
zram = bdev->bd_disk->private_data;
- if (unlikely(!zram_meta_get(zram)))
- goto out;
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
atomic64_inc(&zram->stats.invalid_io);
- err = -EINVAL;
- goto put_zram;
+ ret = -EINVAL;
+ goto out;
}
index = sector >> SECTORS_PER_PAGE_SHIFT;
- offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
+ offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
bv.bv_page = page;
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- err = zram_bvec_rw(zram, &bv, index, offset, rw);
-put_zram:
- zram_meta_put(zram);
+ ret = zram_bvec_rw(zram, &bv, index, offset, rw, NULL);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
@@ -983,14 +1437,24 @@ out:
* bio->bi_end_io does things to handle the error
* (e.g., SetPageError, set_page_dirty and extra works).
*/
- if (err == 0)
+ if (unlikely(ret < 0))
+ return ret;
+
+ switch (ret) {
+ case 0:
page_endio(page, rw, 0);
- return err;
+ break;
+ case 1:
+ ret = 0;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ return ret;
}
static void zram_reset_device(struct zram *zram)
{
- struct zram_meta *meta;
struct zcomp *comp;
u64 disksize;
@@ -1003,33 +1467,19 @@ static void zram_reset_device(struct zram *zram)
return;
}
- meta = zram->meta;
comp = zram->comp;
disksize = zram->disksize;
- /*
- * Refcount will go down to 0 eventually and r/w handler
- * cannot handle further I/O so it will bail out by
- * check zram_meta_get.
- */
- zram_meta_put(zram);
- /*
- * We want to free zram_meta in process context to avoid
- * deadlock between reclaim path and any other locks.
- */
- wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
-
- /* Reset stats */
- memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- zram->max_comp_streams = 1;
set_capacity(zram->disk, 0);
part_stat_set_all(&zram->disk->part0, 0);
up_write(&zram->init_lock);
/* I/O operation under all of CPU are done so let's free */
- zram_meta_free(meta, disksize);
+ zram_meta_free(zram, disksize);
+ memset(&zram->stats, 0, sizeof(zram->stats));
zcomp_destroy(comp);
+ reset_bdev(zram);
}
static ssize_t disksize_store(struct device *dev,
@@ -1037,7 +1487,6 @@ static ssize_t disksize_store(struct device *dev,
{
u64 disksize;
struct zcomp *comp;
- struct zram_meta *meta;
struct zram *zram = dev_to_zram(dev);
int err;
@@ -1045,12 +1494,20 @@ static ssize_t disksize_store(struct device *dev,
if (!disksize)
return -EINVAL;
+ down_write(&zram->init_lock);
+ if (init_done(zram)) {
+ pr_info("Cannot change disksize for initialized device\n");
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
disksize = PAGE_ALIGN(disksize);
- meta = zram_meta_alloc(zram->disk->disk_name, disksize);
- if (!meta)
- return -ENOMEM;
+ if (!zram_meta_alloc(zram, disksize)) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
- comp = zcomp_create(zram->compressor, zram->max_comp_streams);
+ comp = zcomp_create(zram->compressor);
if (IS_ERR(comp)) {
pr_err("Cannot initialise %s compressing backend\n",
zram->compressor);
@@ -1058,35 +1515,19 @@ static ssize_t disksize_store(struct device *dev,
goto out_free_meta;
}
- down_write(&zram->init_lock);
- if (init_done(zram)) {
- pr_info("Cannot change disksize for initialized device\n");
- err = -EBUSY;
- goto out_destroy_comp;
- }
-
- init_waitqueue_head(&zram->io_done);
- atomic_set(&zram->refcount, 1);
- zram->meta = meta;
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- up_write(&zram->init_lock);
- /*
- * Revalidate disk out of the init_lock to avoid lockdep splat.
- * It's okay because disk's capacity is protected by init_lock
- * so that revalidate_disk always sees up-to-date capacity.
- */
revalidate_disk(zram->disk);
+ up_write(&zram->init_lock);
return len;
-out_destroy_comp:
- up_write(&zram->init_lock);
- zcomp_destroy(comp);
out_free_meta:
- zram_meta_free(meta, disksize);
+ zram_meta_free(zram, disksize);
+out_unlock:
+ up_write(&zram->init_lock);
return err;
}
@@ -1161,38 +1602,33 @@ static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
static DEVICE_ATTR_WO(reset);
-static DEVICE_ATTR_RO(orig_data_size);
-static DEVICE_ATTR_RO(mem_used_total);
-static DEVICE_ATTR_RW(mem_limit);
-static DEVICE_ATTR_RW(mem_used_max);
+static DEVICE_ATTR_WO(mem_limit);
+static DEVICE_ATTR_WO(mem_used_max);
static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
+#ifdef CONFIG_ZRAM_WRITEBACK
+static DEVICE_ATTR_RW(backing_dev);
+#endif
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
&dev_attr_initstate.attr,
&dev_attr_reset.attr,
- &dev_attr_num_reads.attr,
- &dev_attr_num_writes.attr,
- &dev_attr_failed_reads.attr,
- &dev_attr_failed_writes.attr,
&dev_attr_compact.attr,
- &dev_attr_invalid_io.attr,
- &dev_attr_notify_free.attr,
- &dev_attr_zero_pages.attr,
- &dev_attr_orig_data_size.attr,
- &dev_attr_compr_data_size.attr,
- &dev_attr_mem_used_total.attr,
&dev_attr_mem_limit.attr,
&dev_attr_mem_used_max.attr,
&dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
+#ifdef CONFIG_ZRAM_WRITEBACK
+ &dev_attr_backing_dev.attr,
+#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
+ &dev_attr_debug_stat.attr,
NULL,
};
-static struct attribute_group zram_disk_attr_group = {
+static const struct attribute_group zram_disk_attr_group = {
.attrs = zram_disk_attrs,
};
@@ -1250,6 +1686,7 @@ static int zram_add(void)
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
+
/*
* To ensure that we always get PAGE_SIZE aligned
* and n*PAGE_SIZED sized I/O requests.
@@ -1260,8 +1697,6 @@ static int zram_add(void)
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
- zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
- zram->disk->queue->limits.chunk_sectors = 0;
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
/*
* zram_bio_discard() will clear all logical blocks if logical block
@@ -1277,6 +1712,8 @@ static int zram_add(void)
zram->disk->queue->limits.discard_zeroes_data = 0;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
+ zram->disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_STABLE_WRITES;
add_disk(zram->disk);
ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
@@ -1287,9 +1724,8 @@ static int zram_add(void)
goto out_free_disk;
}
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
- zram->meta = NULL;
- zram->max_comp_streams = 1;
+ zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
@@ -1323,6 +1759,7 @@ static int zram_remove(struct zram *zram)
zram->claim = true;
mutex_unlock(&bdev->bd_mutex);
+ zram_debugfs_unregister(zram);
/*
* Remove sysfs first, so no one will perform a disksize
* store while we destroy the devices. This also helps during
@@ -1340,8 +1777,8 @@ static int zram_remove(struct zram *zram)
pr_info("Removed device: %s\n", zram->disk->disk_name);
- blk_cleanup_queue(zram->disk->queue);
del_gendisk(zram->disk);
+ blk_cleanup_queue(zram->disk->queue);
put_disk(zram->disk);
kfree(zram);
return 0;
@@ -1421,6 +1858,7 @@ static void destroy_devices(void)
{
class_unregister(&zram_control_class);
idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
+ zram_debugfs_destroy();
idr_destroy(&zram_index_idr);
unregister_blkdev(zram_major, "zram");
}
@@ -1435,6 +1873,7 @@ static int __init zram_init(void)
return ret;
}
+ zram_debugfs_create();
zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
pr_err("Unable to get major number\n");
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 8e92339686d7..bbda650f0dc1 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -15,8 +15,9 @@
#ifndef _ZRAM_DRV_H_
#define _ZRAM_DRV_H_
-#include <linux/spinlock.h>
+#include <linux/rwsem.h>
#include <linux/zsmalloc.h>
+#include <linux/crypto.h>
#include "zcomp.h"
@@ -59,9 +60,11 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
- /* Page consists entirely of zeros */
- ZRAM_ZERO = ZRAM_FLAG_SHIFT,
- ZRAM_ACCESS, /* page is now accessed */
+ /* zram slot is locked */
+ ZRAM_LOCK = ZRAM_FLAG_SHIFT,
+ ZRAM_SAME, /* Page consists the same element */
+ ZRAM_WB, /* page is stored on backing_device */
+ ZRAM_HUGE, /* Incompressible page */
__NR_ZRAM_PAGEFLAGS,
};
@@ -70,8 +73,14 @@ enum zram_pageflags {
/* Allocated for each disk page */
struct zram_table_entry {
- unsigned long handle;
+ union {
+ unsigned long handle;
+ unsigned long element;
+ };
unsigned long value;
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+ ktime_t ac_time;
+#endif
};
struct zram_stats {
@@ -82,18 +91,16 @@ struct zram_stats {
atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */
- atomic64_t zero_pages; /* no. of zero filled pages */
+ atomic64_t same_pages; /* no. of same element filled pages */
+ atomic64_t huge_pages; /* no. of huge pages */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
+ atomic64_t writestall; /* no. of write slow paths */
};
-struct zram_meta {
+struct zram {
struct zram_table_entry *table;
struct zs_pool *mem_pool;
-};
-
-struct zram {
- struct zram_meta *meta;
struct zcomp *comp;
struct gendisk *disk;
/* Prevent concurrent execution of device init */
@@ -102,21 +109,28 @@ struct zram {
* the number of pages zram can consume for storing compressed data
*/
unsigned long limit_pages;
- int max_comp_streams;
struct zram_stats stats;
- atomic_t refcount; /* refcount for zram_meta */
- /* wait all IO under all of cpu are done */
- wait_queue_head_t io_done;
/*
* This is the limit on amount of *uncompressed* worth of data
* we can store in a disk.
*/
u64 disksize; /* bytes */
- char compressor[10];
+ char compressor[CRYPTO_MAX_ALG_NAME];
/*
* zram is claimed so open request will be failed
*/
bool claim; /* Protected by bdev->bd_mutex */
+#ifdef CONFIG_ZRAM_WRITEBACK
+ struct file *backing_dev;
+ struct block_device *bdev;
+ unsigned int old_block_size;
+ unsigned long *bitmap;
+ unsigned long nr_pages;
+ spinlock_t bitmap_lock;
+#endif
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+ struct dentry *debugfs_dir;
+#endif
};
#endif
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 1ba2fd73852d..0f0c06ab414b 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -660,7 +660,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct k3_dma_dev *d = ofdma->of_dma_data;
unsigned int request = dma_spec->args[0];
- if (request > d->dma_requests)
+ if (request >= d->dma_requests)
return NULL;
return dma_get_slave_channel(&(d->chans[request].vc.chan));
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
index 27319a8335e2..345dc4d0851e 100644
--- a/drivers/gpu/drm/armada/armada_hw.h
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -160,6 +160,7 @@ enum {
CFG_ALPHAM_GRA = 0x1 << 16,
CFG_ALPHAM_CFG = 0x2 << 16,
CFG_ALPHA_MASK = 0xff << 8,
+#define CFG_ALPHA(x) ((x) << 8)
CFG_PIXCMD_MASK = 0xff,
};
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 5c22b380f8f3..f8a69ec63550 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -27,6 +27,7 @@ struct armada_ovl_plane_properties {
uint16_t contrast;
uint16_t saturation;
uint32_t colorkey_mode;
+ uint32_t colorkey_enable;
};
struct armada_ovl_plane {
@@ -62,11 +63,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
spin_lock_irq(&dcrtc->irq_lock);
- armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
- CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
- dcrtc->base + LCD_SPU_DMA_CTRL1);
-
- armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+ armada_updatel(prop->colorkey_mode,
+ CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+ dcrtc->base + LCD_SPU_DMA_CTRL1);
+ if (dcrtc->variant->has_spu_adv_reg)
+ armada_updatel(prop->colorkey_enable,
+ ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
+ dcrtc->base + LCD_SPU_ADV_REG);
spin_unlock_irq(&dcrtc->irq_lock);
}
@@ -339,8 +342,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
dplane->prop.colorkey_vb |= K2B(val);
update_attr = true;
} else if (property == priv->colorkey_mode_prop) {
- dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
- dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+ if (val == CKMODE_DISABLE) {
+ dplane->prop.colorkey_mode =
+ CFG_CKMODE(CKMODE_DISABLE) |
+ CFG_ALPHAM_CFG | CFG_ALPHA(255);
+ dplane->prop.colorkey_enable = 0;
+ } else {
+ dplane->prop.colorkey_mode =
+ CFG_CKMODE(val) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
+ }
update_attr = true;
} else if (property == priv->brightness_prop) {
dplane->prop.brightness = val - 256;
@@ -469,7 +481,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
dplane->prop.colorkey_yr = 0xfefefe00;
dplane->prop.colorkey_ug = 0x01010100;
dplane->prop.colorkey_vb = 0x01010100;
- dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+ dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
dplane->prop.brightness = 0;
dplane->prop.contrast = 0x4000;
dplane->prop.saturation = 0x4000;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 34cebcdc2fc4..9cae5f69b07c 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -190,7 +190,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
unsigned long val;
val = readl(ctx->addr + DECON_WINCONx(win));
- val &= ~WINCONx_BPPMODE_MASK;
+ val &= WINCONx_ENWIN_F;
switch (fb->pixel_format) {
case DRM_FORMAT_XRGB1555:
@@ -278,8 +278,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1);
writel(val, ctx->addr + DECON_VIDOSDxB(win));
- val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
- VIDOSD_Wx_ALPHA_B_F(0x0);
+ val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
+ VIDOSD_Wx_ALPHA_B_F(0xff);
writel(val, ctx->addr + DECON_VIDOSDxC(win));
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 11b87d2a7913..ba69d1c72221 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -526,21 +526,25 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
GSC_IN_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV61:
- cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
break;
case DRM_FORMAT_YUV422:
cfg |= GSC_IN_YUV422_3P;
break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_IN_YUV420_3P;
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
break;
default:
dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
@@ -800,18 +804,25 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
GSC_OUT_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV61:
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
break;
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
+ break;
case DRM_FORMAT_YUV422:
+ cfg |= GSC_OUT_YUV422_3P;
+ break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_OUT_YUV420_3P;
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
- GSC_OUT_YUV420_2P);
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
break;
default:
dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 9ad592707aaf..ade10966d6af 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -138,6 +138,7 @@
#define GSC_OUT_YUV420_3P (3 << 4)
#define GSC_OUT_YUV422_1P (4 << 4)
#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV422_3P (6 << 4)
#define GSC_OUT_YUV444 (7 << 4)
#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
#define GSC_OUT_TILE_C_16x8 (0 << 2)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 495c279da200..ae560f5977fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -602,7 +602,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct nouveau_bo *nvbo;
uint32_t data;
- if (unlikely(r->bo_index > req->nr_buffers)) {
+ if (unlikely(r->bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
@@ -612,7 +612,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
if (b->presumed.valid)
continue;
- if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+ if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b62c50d1b1e4..b184956bd430 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2487,8 +2487,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
features->device_type |= WACOM_DEVICETYPE_PAD;
- features->x_max = 4096;
- features->y_max = 4096;
+ if (features->type == INTUOSHT2) {
+ features->x_max = features->x_max / 10;
+ features->y_max = features->y_max / 10;
+ }
+ else {
+ features->x_max = 4096;
+ features->y_max = 4096;
+ }
}
else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
features->device_type |= WACOM_DEVICETYPE_PAD;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index a4abf7dc9576..cf1b57a054d0 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -677,9 +677,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
struct imx_i2c_dma *dma = i2c_imx->dma;
struct device *dev = &i2c_imx->adapter.dev;
- temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
- temp |= I2CR_DMAEN;
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
dma->chan_using = dma->chan_rx;
dma->dma_transfer_dir = DMA_DEV_TO_MEM;
@@ -792,6 +789,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
int i, result;
unsigned int temp;
int block_data = msgs->flags & I2C_M_RECV_LEN;
+ int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data;
dev_dbg(&i2c_imx->adapter.dev,
"<%s> write slave address: addr=0x%x\n",
@@ -818,12 +816,14 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
*/
if ((msgs->len - 1) || block_data)
temp &= ~I2CR_TXAK;
+ if (use_dma)
+ temp |= I2CR_DMAEN;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
- if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data)
+ if (use_dma)
return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg);
/* read data */
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index e4c43a17b333..8088c34336aa 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1655,13 +1655,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
} else
return -EINVAL;
case IIOCDBGVAR:
- if (arg) {
- if (copy_to_user(argp, &dev, sizeof(ulong)))
- return -EFAULT;
- return 0;
- } else
- return -EINVAL;
- break;
+ return -EINVAL;
default:
if ((cmd & IIOCDRVCTL) == IIOCDRVCTL)
cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 2ebd180b0071..9620e1df9b3f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3691,6 +3691,13 @@ static int run(struct mddev *mddev)
disk->rdev->saved_raid_disk < 0)
conf->fullsync = 1;
}
+
+ if (disk->replacement &&
+ !test_bit(In_sync, &disk->replacement->flags) &&
+ disk->replacement->saved_raid_disk < 0) {
+ conf->fullsync = 1;
+ }
+
disk->recovery_disabled = mddev->recovery_disabled - 1;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index b5e64b02200c..1ea068815419 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1634,6 +1634,7 @@ struct bnx2x {
struct link_vars link_vars;
u32 link_cnt;
struct bnx2x_link_report_data last_reported_link;
+ bool force_link_down;
struct mdio_if_info mdio;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 949a82458a29..ebc4518d598a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1277,6 +1277,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
{
struct bnx2x_link_report_data cur_data;
+ if (bp->force_link_down) {
+ bp->link_vars.link_up = 0;
+ return;
+ }
+
/* reread mf_cfg */
if (IS_PF(bp) && !CHIP_IS_E1(bp))
bnx2x_read_mf_cfg(bp);
@@ -2840,6 +2845,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->pending_max = 0;
}
+ bp->force_link_down = false;
if (bp->port.pmf) {
rc = bnx2x_initial_phy_init(bp, load_mode);
if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 8ddb68a3fdb6..403fa8d98aa3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10222,6 +10222,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bp->sp_rtnl_state = 0;
smp_mb();
+ /* Immediately indicate link as down */
+ bp->link_vars.link_up = 0;
+ bp->force_link_down = true;
+ netif_carrier_off(bp->dev);
+ BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_nic_load(bp, LOAD_NORMAL);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9904d768a20a..4ffacafddacb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4591,7 +4591,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_request_irq(bp);
if (rc) {
netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
- goto open_err;
+ goto open_err_irq;
}
}
@@ -4629,6 +4629,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
open_err:
bnxt_disable_napi(bp);
+
+open_err_irq:
bnxt_del_napi(bp);
open_err_free_mem:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 090e00650601..a3e1498ca67c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -338,7 +338,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
enable ? "set" : "unset", pi->port_id, i, -err);
else
- txq->dcb_prio = value;
+ txq->dcb_prio = enable ? value : 0;
}
}
#endif /* CONFIG_CHELSIO_T4_DCB */
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 3c677ed3c29e..4d9014d5b36d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -78,7 +78,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic)
enic->rfs_h.max = enic->config.num_arfs;
enic->rfs_h.free = enic->rfs_h.max;
enic->rfs_h.toclean = 0;
- enic_rfs_timer_start(enic);
}
void enic_rfs_flw_tbl_free(struct enic *enic)
@@ -87,7 +86,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
enic_rfs_timer_stop(enic);
spin_lock_bh(&enic->rfs_h.lock);
- enic->rfs_h.free = 0;
for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
struct hlist_head *hhead;
struct hlist_node *tmp;
@@ -98,6 +96,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
enic_delfltr(enic, n->fltr_id);
hlist_del(&n->node);
kfree(n);
+ enic->rfs_h.free++;
}
}
spin_unlock_bh(&enic->rfs_h.lock);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 029fa5bee520..8390597aecb8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1760,7 +1760,7 @@ static int enic_open(struct net_device *netdev)
vnic_intr_unmask(&enic->intr[i]);
enic_notify_timer_start(enic);
- enic_rfs_flw_tbl_init(enic);
+ enic_rfs_timer_start(enic);
return 0;
@@ -2694,6 +2694,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
enic->notify_timer.function = enic_notify_timer;
enic->notify_timer.data = (unsigned long)enic;
+ enic_rfs_flw_tbl_init(enic);
enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 105dd00ddc1a..cd2afe92f1da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1814,7 +1814,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
if (enable_addr != 0)
rar_high |= IXGBE_RAH_AV;
+ /* Record lower 32 bits of MAC address and then make
+ * sure that write is flushed to hardware before writing
+ * the upper 16 bits and setting the valid bit.
+ */
IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
return 0;
@@ -1846,8 +1851,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ /* Clear the address valid bit and upper 16 bits of the address
+ * before clearing the lower bits. This way we aren't updating
+ * a live filter.
+ */
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
/* clear VMDq pool/queue selection for this RAR */
hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 688b6da5a9bb..35e1468d8196 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -461,8 +461,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
/* Fastpath interrupts */
for (j = 0; j < 64; j++) {
if ((0x2ULL << j) & status) {
- hwfn->simd_proto_handler[j].func(
- hwfn->simd_proto_handler[j].token);
+ struct qed_simd_fp_handler *p_handler =
+ &hwfn->simd_proto_handler[j];
+
+ if (p_handler->func)
+ p_handler->func(p_handler->token);
+ else
+ DP_NOTICE(hwfn,
+ "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
+ j, status);
+
status &= ~(0x2ULL << j);
rc = IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index ccbb04503b27..b53a18e365c2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
ret = kstrtoul(buf, 16, &data);
+ if (ret)
+ return ret;
switch (data) {
case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index c90ae4d4be7d..7886a8a5b55b 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -635,7 +635,7 @@ qcaspi_netdev_open(struct net_device *dev)
return ret;
}
- netif_start_queue(qca->net_dev);
+ /* SPI thread takes care of TX queue */
return 0;
}
@@ -739,6 +739,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
qca->net_dev->stats.tx_errors++;
/* Trigger tx queue flush and QCA7000 reset */
qca->sync = QCASPI_SYNC_UNKNOWN;
+
+ if (qca->spi_thread)
+ wake_up_process(qca->spi_thread);
}
static int
@@ -865,22 +868,22 @@ qca_spi_probe(struct spi_device *spi)
if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
(qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
- dev_info(&spi->dev, "Invalid clkspeed: %d\n",
- qcaspi_clkspeed);
+ dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+ qcaspi_clkspeed);
return -EINVAL;
}
if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
(qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
- dev_info(&spi->dev, "Invalid burst len: %d\n",
- qcaspi_burst_len);
+ dev_err(&spi->dev, "Invalid burst len: %d\n",
+ qcaspi_burst_len);
return -EINVAL;
}
if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
(qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
- dev_info(&spi->dev, "Invalid pluggable: %d\n",
- qcaspi_pluggable);
+ dev_err(&spi->dev, "Invalid pluggable: %d\n",
+ qcaspi_pluggable);
return -EINVAL;
}
@@ -941,8 +944,8 @@ qca_spi_probe(struct spi_device *spi)
}
if (register_netdev(qcaspi_devs)) {
- dev_info(&spi->dev, "Unable to register net device %s\n",
- qcaspi_devs->name);
+ dev_err(&spi->dev, "Unable to register net device %s\n",
+ qcaspi_devs->name);
free_netdev(qcaspi_devs);
return -EFAULT;
}
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 33bd3b902304..6be315303d61 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1517,6 +1517,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
static int match_first_device(struct device *dev, void *data)
{
+ if (dev->parent && dev->parent->of_node)
+ return of_device_is_compatible(dev->parent->of_node,
+ "ti,davinci_mdio");
+
return !strncmp(dev_name(dev), "davinci_mdio", 12);
}
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index d95a50ae996d..8748e8c9ce96 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -89,10 +89,6 @@
static const char banner[] __initconst = KERN_INFO \
"AX.25: bpqether driver version 004\n";
-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-static char bpq_eth_addr[6];
-
static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
static int bpq_device_event(struct notifier_block *, unsigned long, void *);
@@ -515,8 +511,8 @@ static int bpq_new_device(struct net_device *edev)
bpq->ethdev = edev;
bpq->axdev = ndev;
- memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
- memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
+ eth_broadcast_addr(bpq->dest_addr);
+ eth_broadcast_addr(bpq->acpt_addr);
err = register_netdevice(ndev);
if (err)
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 0fbbba7a0cae..f72c2967ae82 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -932,7 +932,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
static int
at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
{
- BUG_ON(!level);
+ WARN_ON(!level);
*level = 0xbe;
return 0;
}
@@ -1108,8 +1108,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 addr = le16_to_cpu(filt->short_addr);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for saddr\n");
+ dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
__at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
__at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
}
@@ -1117,8 +1116,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan = le16_to_cpu(filt->pan_id);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for pan id\n");
+ dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
__at86rf230_write(lp, RG_PAN_ID_0, pan);
__at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
}
@@ -1127,15 +1125,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
u8 i, addr[8];
memcpy(addr, &filt->ieee_addr, 8);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+ dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
for (i = 0; i < 8; i++)
__at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for panc change\n");
+ dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
if (filt->pan_coord)
at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
else
@@ -1239,7 +1235,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
}
-
static int
at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
{
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 43617ded3773..91de25c53274 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -49,7 +49,7 @@ struct fakelb_phy {
static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
{
- BUG_ON(!level);
+ WARN_ON(!level);
*level = 0xbe;
return 0;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 39672984dde1..58b1e18fdd64 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
(netdev->flags & IFF_ALLMULTI)) {
rx_creg &= 0xfffe;
rx_creg |= 0x0002;
- dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
+ dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
} else {
/* ~RX_MULTICAST, ~RX_PROMISCUOUS */
rx_creg &= 0x00fc;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 7337e6c0e126..478937418a33 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -81,6 +81,9 @@ static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
+static int smsc75xx_link_ok_nopm(struct usbnet *dev);
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
+
static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
u32 *data, int in_pm)
{
@@ -840,6 +843,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
return -EIO;
}
+ /* phy workaround for gig link */
+ smsc75xx_phy_gig_workaround(dev);
+
smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
@@ -978,6 +984,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
return -EIO;
}
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
+{
+ struct mii_if_info *mii = &dev->mii;
+ int ret = 0, timeout = 0;
+ u32 buf, link_up = 0;
+
+ /* Set the phy in Gig loopback */
+ smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
+
+ /* Wait for the link up */
+ do {
+ link_up = smsc75xx_link_ok_nopm(dev);
+ usleep_range(10000, 20000);
+ timeout++;
+ } while ((!link_up) && (timeout < 1000));
+
+ if (timeout >= 1000) {
+ netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
+ return -EIO;
+ }
+
+ /* phy reset */
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+ return ret;
+ }
+
+ buf |= PMT_CTL_PHY_RST;
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
+ return ret;
+ }
+
+ timeout = 0;
+ do {
+ usleep_range(10000, 20000);
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
+ ret);
+ return ret;
+ }
+ timeout++;
+ } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
+
+ if (timeout >= 100) {
+ netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int smsc75xx_reset(struct usbnet *dev)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index bcf29bf6f727..b4d9b47584d6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -4290,6 +4290,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
if (bus) {
+ /* Stop watchdog task */
+ if (bus->watchdog_tsk) {
+ send_sig(SIGTERM, bus->watchdog_tsk, 1);
+ kthread_stop(bus->watchdog_tsk);
+ bus->watchdog_tsk = NULL;
+ }
+
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index d1fab97d6b01..6ce2a73fe0e4 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -457,8 +457,17 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
list_add(&slot->slot_list, &pci_hotplug_slot_list);
result = fs_add_slot(pci_slot);
+ if (result)
+ goto err_list_del;
+
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
dbg("Added slot %s to the list\n", name);
+ goto out;
+
+err_list_del:
+ list_del(&slot->slot_list);
+ pci_slot->hotplug = NULL;
+ pci_destroy_slot(pci_slot);
out:
mutex_unlock(&pci_hp_mutex);
return result;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index cbe58480b474..6b0f7e0d7dbd 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -132,6 +132,7 @@ int pciehp_unconfigure_device(struct slot *p_slot);
void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
+void pcie_shutdown_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
void pcie_reenable_notification(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 8f6ded43760a..47cc3568514e 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -77,6 +77,12 @@ static int reset_slot (struct hotplug_slot *slot, int probe);
*/
static void release_slot(struct hotplug_slot *hotplug_slot)
{
+ struct slot *slot = hotplug_slot->private;
+
+ /* queued work needs hotplug_slot name */
+ cancel_delayed_work(&slot->work);
+ drain_workqueue(slot->wq);
+
kfree(hotplug_slot->ops);
kfree(hotplug_slot->info);
kfree(hotplug_slot);
@@ -276,6 +282,7 @@ static void pciehp_remove(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
+ pcie_shutdown_notification(ctrl);
cleanup_slot(ctrl);
pciehp_release_ctrl(ctrl);
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 63c6c7fce3eb..cd982778a6b8 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -741,7 +741,7 @@ int pcie_init_notification(struct controller *ctrl)
return 0;
}
-static void pcie_shutdown_notification(struct controller *ctrl)
+void pcie_shutdown_notification(struct controller *ctrl)
{
if (ctrl->notification_enabled) {
pcie_disable_notification(ctrl);
@@ -776,7 +776,7 @@ abort:
static void pcie_cleanup_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
- cancel_delayed_work(&slot->work);
+
destroy_workqueue(slot->wq);
kfree(slot);
}
@@ -853,7 +853,6 @@ abort:
void pciehp_release_ctrl(struct controller *ctrl)
{
- pcie_shutdown_notification(ctrl);
pcie_cleanup_slot(ctrl);
kfree(ctrl);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 566897f24dee..5f040619393f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1338,6 +1338,10 @@ static void pci_configure_mps(struct pci_dev *dev)
if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
return;
+ /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
+ if (dev->is_virtfn)
+ return;
+
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9dc8687bf048..e1b32ed0aa20 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -676,10 +676,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
static int scsifront_sdev_configure(struct scsi_device *sdev)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
+ int err;
- if (info && current == info->curr)
- xenbus_printf(XBT_NIL, info->dev->nodename,
+ if (info && current == info->curr) {
+ err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateConnected);
+ if (err) {
+ xenbus_dev_error(info->dev, err,
+ "%s: writing dev_state_path", __func__);
+ return err;
+ }
+ }
return 0;
}
@@ -687,10 +694,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
static void scsifront_sdev_destroy(struct scsi_device *sdev)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
+ int err;
- if (info && current == info->curr)
- xenbus_printf(XBT_NIL, info->dev->nodename,
+ if (info && current == info->curr) {
+ err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(info->dev, err,
+ "%s: writing dev_state_path", __func__);
+ }
}
static struct scsi_host_template scsifront_sht = {
@@ -1025,9 +1037,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
if (scsi_add_device(info->host, chn, tgt, lun)) {
dev_err(&dev->dev, "scsi_add_device\n");
- xenbus_printf(XBT_NIL, dev->nodename,
+ err = xenbus_printf(XBT_NIL, dev->nodename,
info->dev_state_path,
"%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(dev, err,
+ "%s: writing dev_state_path", __func__);
}
break;
case VSCSIFRONT_OP_DEL_LUN:
@@ -1041,10 +1056,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
}
break;
case VSCSIFRONT_OP_READD_LUN:
- if (device_state == XenbusStateConnected)
- xenbus_printf(XBT_NIL, dev->nodename,
+ if (device_state == XenbusStateConnected) {
+ err = xenbus_printf(XBT_NIL, dev->nodename,
info->dev_state_path,
"%d", XenbusStateConnected);
+ if (err)
+ xenbus_dev_error(dev, err,
+ "%s: writing dev_state_path", __func__);
+ }
break;
default:
break;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 5ed1ed37fad8..77903dbdd2d9 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -403,7 +403,8 @@ static void ion_handle_get(struct ion_handle *handle)
}
/* Must hold the client lock */
-static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
+static struct ion_handle *ion_handle_get_check_overflow(
+ struct ion_handle *handle)
{
if (atomic_read(&handle->ref.refcount) + 1 == 0)
return ERR_PTR(-EOVERFLOW);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 8435c3f204c1..a30d68c4b689 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -224,7 +224,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
unsigned int rate;
int ret;
- if (IS_ERR(d->clk) || !old)
+ if (IS_ERR(d->clk))
goto out;
clk_disable_unprepare(d->clk);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 98705b83d2dc..842c1ae7a291 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3657,9 +3657,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
}
ret = usb_add_gadget_udc(dev, &hsotg->gadget);
- if (ret)
+ if (ret) {
+ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
+ hsotg->ctrl_req);
return ret;
-
+ }
dwc2_hsotg_dump(hsotg);
return 0;
@@ -3672,6 +3674,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
{
usb_del_gadget_udc(&hsotg->gadget);
+ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
return 0;
}
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index bda0b21b850f..51866f3f2052 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -931,9 +931,8 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
DWC2_HC_XFER_COMPLETE, NULL);
- if (!len) {
+ if (!len && !qtd->isoc_split_offset) {
qtd->complete_split = 0;
- qtd->isoc_split_offset = 0;
return 0;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index ce6a44e136dc..ebbcbe899a34 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1771,6 +1771,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
*/
if (w_value && !f->get_alt)
break;
+
+ spin_lock(&cdev->lock);
value = f->set_alt(f, w_index, w_value);
if (value == USB_GADGET_DELAYED_STATUS) {
DBG(cdev,
@@ -1780,6 +1782,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status);
}
+ spin_unlock(&cdev->lock);
break;
case USB_REQ_GET_INTERFACE:
if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dfc02a6c6d55..91af817278e3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1075,8 +1075,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
command = readl(&xhci->op_regs->command);
command |= CMD_CRS;
writel(command, &xhci->op_regs->command);
+ /*
+ * Some controllers take up to 55+ ms to complete the controller
+ * restore so setting the timeout to 100ms. Xhci specification
+ * doesn't mention any timeout value.
+ */
if (xhci_handshake(&xhci->op_regs->status,
- STS_RESTORE, 0, 10 * 1000)) {
+ STS_RESTORE, 0, 100 * 1000)) {
xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d982c455e18e..2b81939fecd7 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -199,6 +199,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
+#define DELL_PRODUCT_5821E 0x81d7
+
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
@@ -1033,6 +1035,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 07d1ecd564f7..8960a46c83bb 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -790,9 +790,9 @@ static void sierra_close(struct usb_serial_port *port)
kfree(urb->transfer_buffer);
usb_free_urb(urb);
usb_autopm_put_interface_async(serial->interface);
- spin_lock(&portdata->lock);
+ spin_lock_irq(&portdata->lock);
portdata->outstanding_urbs--;
- spin_unlock(&portdata->lock);
+ spin_unlock_irq(&portdata->lock);
}
sierra_stop_rx_urbs(port);