[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[dm-devel] [PATCH] dm-cache mq policy fast warmup



dm-cache mq policy: Fast warm-up

 

The multiqueue policy promotes a block only after promote_threshold + 4 (READ_PROMOTE_THRESHOLD) times hit on read I/O. It will be useful if the policy promotes the block during its first reference in the following use cases.

 

1. Storage array has information about which blocks are heavily used and can pass it to server for warming up the cache. (A process running in the server can simply read those blocks when device is idle).

2. Database Application or workload capture tool knows where the hot data is and can promote them before it’s actual use.

3. To replace a cache device

 

The below patch will allow a block to get promoted during its first reference only if requested by user via dmsetup message. It can be changed to allow based on particular process or process group also.

 

Signed-off-by: Somasundaram, Krishnasamy <Somasundaram Krishnasamy netapp com>

Reviewed-by: Yanling, Qi <Yanling Qi netapp com>

---

diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c

index 7f1aaa3..9653a3c 100644

--- a/drivers/md/dm-cache-policy-mq.c

+++ b/drivers/md/dm-cache-policy-mq.c

@@ -390,6 +390,10 @@ struct mq_policy {

     */

    unsigned promote_threshold;

+    /* Allow the blocks to be promoted during first reference itself

+    */

+    unsigned cachefill_enabled;

+

    /*

     * The hash table allows us to quickly find an entry by origin

     * block.  Both pre_cache and cache entries are in here.

@@ -648,6 +652,9 @@ static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)

static unsigned adjusted_promote_threshold(struct mq_policy *mq,

                             bool discarded_oblock, int data_dir)

{

+    if (mq->cachefill_enabled)

+          return 1;

+

    if (data_dir == READ)

          return mq->promote_threshold + READ_PROMOTE_THRESHOLD;

@@ -835,7 +842,8 @@ static int map(struct mq_policy *mq, dm_oblock_t oblock,

    if (e && in_cache(mq, e))

          r = cache_entry_found(mq, e, result);

-    else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)

+    else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL &&

+          !mq->cachefill_enabled)

          result->op = POLICY_MISS;

     else if (e)

@@ -1134,21 +1142,22 @@ static int mq_set_config_value(struct dm_cache_policy *p,

                      const char *key, const char *value)

{

    struct mq_policy *mq = to_mq_policy(p);

-    enum io_pattern pattern;

    unsigned long tmp;

+    if (kstrtoul(value, 10, &tmp))

+          return -EINVAL;

+

    if (!strcasecmp(key, "random_threshold"))

-          pattern = PATTERN_RANDOM;

+          mq->tracker.thresholds[PATTERN_RANDOM] = tmp;

    else if (!strcasecmp(key, "sequential_threshold"))

-          pattern = PATTERN_SEQUENTIAL;

+          mq->tracker.thresholds[PATTERN_SEQUENTIAL] = tmp;

+    else if (!strcasecmp(key, "enable_cachefill"))

+          mq->cachefill_enabled = true;

+    else if (!strcasecmp(key, "disable_cachefill"))

+          mq->cachefill_enabled = false;

    else

          return -EINVAL;

-    if (kstrtoul(value, 10, &tmp))

-          return -EINVAL;

-

-    mq->tracker.thresholds[pattern] = tmp;

-

    return 0;

}

@@ -1212,6 +1221,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,

    mq->hit_count = 0;

    mq->generation = 0;

    mq->promote_threshold = 0;

+    mq->cachefill_enabled = 0;

    mutex_init(&mq->lock);

    spin_lock_init(&mq->tick_lock);

 

 


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]