summaryrefslogtreecommitdiff
path: root/drivers/mtd/ubi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/ubi')
-rw-r--r--drivers/mtd/ubi/build.c62
-rw-r--r--drivers/mtd/ubi/io.c10
-rw-r--r--drivers/mtd/ubi/ubi.h12
-rw-r--r--drivers/mtd/ubi/wl.c268
4 files changed, 346 insertions, 6 deletions
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index ae8e55b4f6f9..2af841cb7097 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -125,6 +125,9 @@ struct class ubi_class = {
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
+static ssize_t dev_attribute_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
static struct device_attribute dev_eraseblock_size =
@@ -149,6 +152,13 @@ static struct device_attribute dev_bgt_enabled =
__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_mtd_num =
__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_trigger_scrub =
+ __ATTR(scrub_all, S_IRUGO | S_IWUSR,
+ dev_attribute_show, dev_attribute_store);
+static struct device_attribute dev_mtd_max_scrub_sqnum =
+ __ATTR(scrub_max_sqnum, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_min_scrub_sqnum =
+ __ATTR(scrub_min_sqnum, S_IRUGO, dev_attribute_show, NULL);
/**
* ubi_volume_notify - send a volume change notification.
@@ -341,6 +351,17 @@ int ubi_major2num(int major)
return ubi_num;
}
+static unsigned long long get_max_sqnum(struct ubi_device *ubi)
+{
+ unsigned long long max_sqnum;
+
+ spin_lock(&ubi->ltree_lock);
+ max_sqnum = ubi->global_sqnum - 1;
+ spin_unlock(&ubi->ltree_lock);
+
+ return max_sqnum;
+}
+
/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -385,6 +406,12 @@ static ssize_t dev_attribute_show(struct device *dev,
ret = sprintf(buf, "%d\n", ubi->thread_enabled);
else if (attr == &dev_mtd_num)
ret = sprintf(buf, "%d\n", ubi->mtd->index);
+ else if (attr == &dev_mtd_trigger_scrub)
+ ret = sprintf(buf, "%d\n", atomic_read(&ubi->scrub_work_count));
+ else if (attr == &dev_mtd_max_scrub_sqnum)
+ ret = sprintf(buf, "%llu\n", get_max_sqnum(ubi));
+ else if (attr == &dev_mtd_min_scrub_sqnum)
+ ret = sprintf(buf, "%llu\n", ubi_wl_scrub_get_min_sqnum(ubi));
else
ret = -EINVAL;
@@ -404,10 +431,45 @@ static struct attribute *ubi_dev_attrs[] = {
&dev_min_io_size.attr,
&dev_bgt_enabled.attr,
&dev_mtd_num.attr,
+ &dev_mtd_trigger_scrub.attr,
+ &dev_mtd_max_scrub_sqnum.attr,
+ &dev_mtd_min_scrub_sqnum.attr,
NULL
};
ATTRIBUTE_GROUPS(ubi_dev);
+static ssize_t dev_attribute_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct ubi_device *ubi;
+ unsigned long long scrub_sqnum;
+
+ ubi = container_of(dev, struct ubi_device, dev);
+ ubi = ubi_get_device(ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ if (attr == &dev_mtd_trigger_scrub) {
+ if (kstrtoull(buf, 10, &scrub_sqnum)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!ubi->lookuptbl) {
+ pr_err("lookuptbl is null");
+ goto out;
+ }
+ ret = ubi_wl_scrub_all(ubi, scrub_sqnum);
+ if (ret == 0)
+ ret = count;
+ }
+
+out:
+ ubi_put_device(ubi);
+ return ret;
+}
+
static void dev_release(struct device *dev)
{
struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 10cf3b549959..617541e86f80 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1101,6 +1101,14 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
dbg_io("write VID header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ /*
+ * Re-erase the PEB before using it. This should minimize any issues
+ * from decay of charge in this block.
+ */
+ err = ubi_wl_erase_peb(ubi, pnum);
+ if (err)
+ return err;
+
err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
@@ -1120,6 +1128,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
+ if (!err)
+ ubi_wl_update_peb_sqnum(ubi, pnum, vid_hdr);
return err;
}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 05d9ec66437c..61f039d3289e 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -168,6 +168,8 @@ enum {
* @u.list: link in the protection queue
* @ec: erase counter
* @pnum: physical eraseblock number
+ * @tagged_scrub_all: if the entry is tagged for scrub all
+ * @sqnum: The sequence number of the vol header.
*
* This data structure is used in the WL sub-system. Each physical eraseblock
* has a corresponding &struct wl_entry object which may be kept in different
@@ -180,6 +182,8 @@ struct ubi_wl_entry {
} u;
int ec;
int pnum;
+ unsigned int tagged_scrub_all:1;
+ unsigned long long sqnum;
};
/**
@@ -594,6 +598,8 @@ struct ubi_device {
struct task_struct *bgt_thread;
int thread_enabled;
char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
+ bool scrub_in_progress;
+ atomic_t scrub_work_count;
/* I/O sub-system's stuff */
long long flash_size;
@@ -867,6 +873,12 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
int ubi_is_erase_work(struct ubi_work *wrk);
void ubi_refill_pools(struct ubi_device *ubi);
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
+ssize_t ubi_wl_scrub_all(struct ubi_device *ubi,
+ unsigned long long scrub_sqnum);
+void ubi_wl_update_peb_sqnum(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr);
+unsigned long long ubi_wl_scrub_get_min_sqnum(struct ubi_device *ubi);
+int ubi_wl_erase_peb(struct ubi_device *ubi, int pnum);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 2ae0bc3d02f9..c8c39af32daf 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -102,6 +102,7 @@
#include <linux/crc32.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
+#include <linux/delay.h>
#include "ubi.h"
#include "wl.h"
@@ -488,6 +489,24 @@ out_free:
}
/**
+ * ubi_wl_erase_peb - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: the the physical eraseblock number to erase
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_wl_erase_peb(struct ubi_device *ubi, int pnum)
+{
+ struct ubi_wl_entry *e;
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ spin_unlock(&ubi->wl_lock);
+ return sync_erase(ubi, e, 0);
+}
+
+/**
* serve_prot_queue - check if it is time to stop protecting PEBs.
* @ubi: UBI device description object
*
@@ -631,6 +650,7 @@ static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
return __erase_worker(ubi, &wl_wrk);
}
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
/**
* wear_leveling_worker - wear-leveling worker function.
* @ubi: UBI device description object
@@ -652,6 +672,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
+ int dst_leb_clean = 0;
kfree(wrk);
if (shutdown)
@@ -757,6 +778,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
if (err && err != UBI_IO_BITFLIPS) {
+ dst_leb_clean = 1;
if (err == UBI_IO_FF) {
/*
* We are trying to move PEB without a VID header. UBI
@@ -812,10 +834,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* protection queue.
*/
protect = 1;
+ dst_leb_clean = 1;
goto out_not_moved;
}
if (err == MOVE_RETRY) {
scrubbing = 1;
+ dst_leb_clean = 1;
goto out_not_moved;
}
if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
@@ -842,6 +866,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi->erroneous_peb_count);
goto out_error;
}
+ dst_leb_clean = 1;
erroneous = 1;
goto out_not_moved;
}
@@ -853,9 +878,19 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
}
/* The PEB has been successfully moved */
- if (scrubbing)
- ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
- e1->pnum, vol_id, lnum, e2->pnum);
+ if (scrubbing) {
+ spin_lock(&ubi->wl_lock);
+ if (e1->tagged_scrub_all) {
+ BUG_ON(atomic_read(&ubi->scrub_work_count) <= 0);
+ atomic_dec(&ubi->scrub_work_count);
+ e1->tagged_scrub_all = 0;
+ e2->tagged_scrub_all = 0;
+ } else {
+ ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
+ e1->pnum, vol_id, lnum, e2->pnum);
+ }
+ spin_unlock(&ubi->wl_lock);
+ }
ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
@@ -913,15 +948,24 @@ out_not_moved:
wl_tree_add(e1, &ubi->scrub);
else if (keep)
wl_tree_add(e1, &ubi->used);
+ if (dst_leb_clean) {
+ wl_tree_add(e2, &ubi->free);
+ ubi->free_count++;
+ }
+
ubi_assert(!ubi->move_to_put);
ubi->move_from = ubi->move_to = NULL;
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
- err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
- if (err)
- goto out_ro;
+ if (dst_leb_clean) {
+ ensure_wear_leveling(ubi, 1);
+ } else {
+ err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
+ if (err)
+ goto out_ro;
+ }
if (erase) {
err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
@@ -1208,6 +1252,7 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
+ e->sqnum = UBI_UNKNOWN;
if (e == ubi->move_from) {
/*
* User is putting the physical eraseblock which was selected to
@@ -1244,6 +1289,20 @@ retry:
} else if (in_wl_tree(e, &ubi->scrub)) {
self_check_in_wl_tree(ubi, e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
+
+ /*
+ * Since this PEB has been put we dont need to worry
+ * about it anymore
+ */
+ if (e->tagged_scrub_all) {
+ int wrk_count;
+
+ wrk_count = atomic_read(&ubi->scrub_work_count);
+ BUG_ON(wrk_count <= 0);
+
+ atomic_dec(&ubi->scrub_work_count);
+ e->tagged_scrub_all = 0;
+ }
} else if (in_wl_tree(e, &ubi->erroneous)) {
self_check_in_wl_tree(ubi, e, &ubi->erroneous);
rb_erase(&e->u.rb, &ubi->erroneous);
@@ -1276,6 +1335,197 @@ retry:
}
/**
+ * ubi_wl_scrub_get_min_sqnum - Return the minimum sqnum of the used/pq/scrub.
+ * @ubi: UBI device description object
+ *
+ * This function returns the minimum sqnum of the PEB that are currently in use.
+ *
+ * Return the min sqnum if there are any used PEB's otherwise return ~(0)
+ *
+ */
+unsigned long long ubi_wl_scrub_get_min_sqnum(struct ubi_device *ubi)
+{
+ int i;
+ struct ubi_wl_entry *e, *tmp;
+ struct rb_node *node;
+ unsigned long long min_sqnum = ~((unsigned long long)0);
+
+ spin_lock(&ubi->wl_lock);
+
+ /* Go through the pq list */
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+ list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+ if (e->sqnum < min_sqnum)
+ min_sqnum = e->sqnum;
+ }
+ }
+
+ /* Go through used PEB tree */
+ for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
+ e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ if (e->sqnum < min_sqnum)
+ min_sqnum = e->sqnum;
+ }
+ /* Go through scrub PEB tree */
+ for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+ e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
+ if (e->sqnum < min_sqnum)
+ min_sqnum = e->sqnum;
+ }
+ spin_unlock(&ubi->wl_lock);
+ return min_sqnum;
+}
+
+/**
+ * ubi_wl_update_peb_sqnum - Update the vol hdr sqnum of the PEB.
+ * @pnum: The PEB number.
+ * @vid_hdr: The vol hdr being written to the PEB.
+ *
+ */
+void ubi_wl_update_peb_sqnum(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ struct ubi_wl_entry *e;
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ e->sqnum = be64_to_cpu(vid_hdr->sqnum);
+ e->tagged_scrub_all = 0;
+ spin_unlock(&ubi->wl_lock);
+}
+
+static int is_ubi_readonly(struct ubi_device *ubi)
+{
+ int is_readonly = 0;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->ro_mode || !ubi->thread_enabled ||
+ ubi_dbg_is_bgt_disabled(ubi))
+ is_readonly = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ return is_readonly;
+}
+
+/**
+ * ubi_wl_scan_all - Scan all PEB's
+ * @ubi: UBI device description object
+ * @scrub_sqnum: The max seqnum of the PEB to scrub from the used/pq lists
+ *
+ * This function schedules all device PEBs for scrubbing if the sqnum of the
+ * vol hdr is less than the sqnum in the trigger.
+ *
+ * Return 0 in case of success, (negative) error code otherwise
+ *
+ */
+ssize_t ubi_wl_scrub_all(struct ubi_device *ubi, unsigned long long scrub_sqnum)
+{
+ struct rb_node *node;
+ struct ubi_wl_entry *e, *tmp;
+ int scrub_count = 0;
+ int total_scrub_count = 0;
+ int err, i;
+
+ if (!ubi->lookuptbl) {
+ ubi_err(ubi, "lookuptbl is null");
+ return -ENOENT;
+ }
+
+ if (is_ubi_readonly(ubi)) {
+ ubi_err(ubi, "Cannot *Initiate* scrub:background thread disabled or readonly!");
+ return -EROFS;
+ }
+
+ /* Wait for all currently running work to be done! */
+ down_write(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ ubi_msg(ubi, "Scrub triggered sqnum = %llu!", scrub_sqnum);
+
+ if (ubi->scrub_in_progress) {
+ ubi_err(ubi, "Scrub already in progress, ignoring the trigger");
+ spin_unlock(&ubi->wl_lock);
+ up_write(&ubi->work_sem); /* Allow new work to start. */
+ return -EBUSY;
+ }
+ ubi->scrub_in_progress = true;
+
+ /* Go through scrub PEB tree and count pending */
+ for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+ e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
+ e->tagged_scrub_all = 1;
+ total_scrub_count++;
+ }
+
+ /* Move all used pebs to scrub tree */
+ node = rb_first(&ubi->used);
+ while (node != NULL) {
+ e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ node = rb_next(node);
+
+ if (e->sqnum > scrub_sqnum)
+ continue;
+ rb_erase(&e->u.rb, &ubi->used);
+ wl_tree_add(e, &ubi->scrub);
+ e->tagged_scrub_all = 1;
+ scrub_count++;
+ total_scrub_count++;
+ }
+
+ /* Move all protected pebs to scrub tree */
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+ list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+
+ if (e->sqnum > scrub_sqnum)
+ continue;
+
+ list_del(&e->u.list);
+ wl_tree_add(e, &ubi->scrub);
+ e->tagged_scrub_all = 1;
+ scrub_count++;
+ total_scrub_count++;
+ }
+ }
+
+ atomic_set(&ubi->scrub_work_count, total_scrub_count);
+ spin_unlock(&ubi->wl_lock);
+ up_write(&ubi->work_sem); /* Allow new work to start. */
+
+ /*
+ * Technically scrubbing is the same as wear-leveling, so it is done
+ * by the WL worker.
+ */
+ err = ensure_wear_leveling(ubi, 0);
+ if (err) {
+ ubi_err(ubi, "Failed to start the WL worker err =%d", err);
+ return err;
+ }
+ ubi_msg(ubi, "Scheduled %d PEB's for scrubbing!", scrub_count);
+ ubi_msg(ubi, "Total PEB's for scrub = %d", total_scrub_count);
+
+ /* Wait for scrub to finish */
+ while (atomic_read(&ubi->scrub_work_count) > 0) {
+ /* Poll every second to check if the scrub work is done */
+ msleep(1000);
+
+ if (is_ubi_readonly(ubi)) {
+ ubi_err(ubi, "Cannot *Complete* scrub:background thread disabled or readonly!");
+ return -EROFS;
+ }
+ wake_up_process(ubi->bgt_thread);
+ }
+
+ spin_lock(&ubi->wl_lock);
+ ubi->scrub_in_progress = false;
+ spin_unlock(&ubi->wl_lock);
+ ubi_msg(ubi, "Done scrubbing %d PEB's!", scrub_count);
+ return 0;
+}
+
+/**
* ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to schedule
@@ -1538,6 +1788,8 @@ static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync
e->pnum = aeb->pnum;
e->ec = aeb->ec;
+ e->tagged_scrub_all = 0;
+ e->sqnum = aeb->sqnum;
ubi->lookuptbl[e->pnum] = e;
if (sync) {
@@ -1617,6 +1869,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
+ e->tagged_scrub_all = 0;
+ e->sqnum = aeb->sqnum;
ubi_assert(e->ec >= 0);
wl_tree_add(e, &ubi->free);
@@ -1639,6 +1893,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
+ e->tagged_scrub_all = 0;
+ e->sqnum = aeb->sqnum;
ubi->lookuptbl[e->pnum] = e;
if (!aeb->scrub) {