Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat:big key metricx #3000

Open
wants to merge 1 commit into
base: unstable
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions include/pika_server.h
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,8 @@ class PikaServer : public pstd::noncopyable {
// info debug use
void ServerStatus(std::string* info);

std::unordered_map<std::string, int> GetBigKeyStatistics();

/*
* Async migrate used
*/
Expand Down
5 changes: 5 additions & 0 deletions src/pika_admin.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1091,6 +1091,11 @@ void InfoCmd::InfoStats(std::string& info) {
tmp_stream << "is_compact:" << (g_pika_server->IsCompacting() ? "Yes" : "No") << "\r\n";
tmp_stream << "compact_cron:" << g_pika_conf->compact_cron() << "\r\n";
tmp_stream << "compact_interval:" << g_pika_conf->compact_interval() << "\r\n";
tmp_stream << "# Big Key Statistics\r\n";
auto big_key_stats = g_pika_server->GetBigKeyStatistics();
for (const auto& entry : big_key_stats) {
tmp_stream << "key:" << entry.first << ", access_count:" << entry.second << "\r\n";
}
time_t current_time_s = time(nullptr);
PikaServer::BGSlotsReload bgslotsreload_info = g_pika_server->bgslots_reload();
bool is_reloading = g_pika_server->GetSlotsreloading();
Expand Down
18 changes: 18 additions & 0 deletions src/pika_server.cc
Original file line number Diff line number Diff line change
Expand Up @@ -803,6 +803,24 @@ void PikaServer::PurgeDir(const std::string& path) {
PurgeDirTaskSchedule(&DoPurgeDir, static_cast<void*>(dir_path));
}

std::unordered_map<std::string, int> PikaServer::GetBigKeyStatistics() {
std::unordered_map<std::string, int> aggregated_stats;

for (const auto& db_entry : dbs_) {
auto db_name = db_entry.first;
auto db_instance = db_entry.second;

if (db_instance) {
auto stats = g_pika_server->GetBigKeyStatistics();
for (const auto& entry : stats) {
aggregated_stats[entry.first] += entry.second;
}
}
}

return aggregated_stats;
}
Comment on lines +806 to +822
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Fix infinite recursion in GetBigKeyStatistics

The method has a critical infinite recursion issue where it calls itself recursively on line 814, which will cause stack overflow.

Apply this fix to resolve the recursion:

std::unordered_map<std::string, int> PikaServer::GetBigKeyStatistics() {
  std::unordered_map<std::string, int> aggregated_stats;

  for (const auto& db_entry : dbs_) {
    auto db_name = db_entry.first;
    auto db_instance = db_entry.second;

    if (db_instance) {
-       auto stats = g_pika_server->GetBigKeyStatistics();
+       auto stats = db_instance->storage()->GetBigKeyStatistics();
        for (const auto& entry : stats) {
          aggregated_stats[entry.first] += entry.second;
        }
      }
    }

  return aggregated_stats;
}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
std::unordered_map<std::string, int> PikaServer::GetBigKeyStatistics() {
std::unordered_map<std::string, int> aggregated_stats;
for (const auto& db_entry : dbs_) {
auto db_name = db_entry.first;
auto db_instance = db_entry.second;
if (db_instance) {
auto stats = g_pika_server->GetBigKeyStatistics();
for (const auto& entry : stats) {
aggregated_stats[entry.first] += entry.second;
}
}
}
return aggregated_stats;
}
std::unordered_map<std::string, int> PikaServer::GetBigKeyStatistics() {
std::unordered_map<std::string, int> aggregated_stats;
for (const auto& db_entry : dbs_) {
auto db_name = db_entry.first;
auto db_instance = db_entry.second;
if (db_instance) {
auto stats = db_instance->storage()->GetBigKeyStatistics();
for (const auto& entry : stats) {
aggregated_stats[entry.first] += entry.second;
}
}
}
return aggregated_stats;
}



void PikaServer::PurgeDirTaskSchedule(void (*function)(void*), void* arg) {
purge_thread_.StartThread();
Expand Down
19 changes: 19 additions & 0 deletions src/storage/src/redis.h
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,22 @@ class Redis {
void ScanZsets();
void ScanSets();

void CheckBigKeyAndLog(const std::string& key, uint64_t size) {
static const uint64_t kBigKeyThreshold = 10000;
if (size > kBigKeyThreshold) {
std::lock_guard<std::mutex> lock(big_key_access_mutex_);
big_key_access_count_[key]++;
std::cerr << "[BIGKEY DETECTED] Key: " << key
<< ", Size: " << size
<< ", Access Count: " << big_key_access_count_[key] << std::endl;
}
}
Comment on lines +362 to +371
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Consider improving the big key monitoring implementation

The current implementation has several areas that could be enhanced:

  1. The threshold should be configurable rather than hardcoded
  2. Logging to stderr may not be suitable for production environments
  3. Consider adding rate limiting to prevent excessive logging
  4. Add a cleanup mechanism for big_key_access_count_ to prevent unbounded memory growth

Consider these improvements:

+  // Configurable threshold with a reasonable default
+  static const uint64_t kDefaultBigKeyThreshold = 10000;
+  uint64_t big_key_threshold_;
+
   void CheckBigKeyAndLog(const std::string& key, uint64_t size) {
-    static const uint64_t kBigKeyThreshold = 10000;
-    if (size > kBigKeyThreshold) {
+    if (size > big_key_threshold_) {
       std::lock_guard<std::mutex> lock(big_key_access_mutex_);
+      
+      // Rate limit logging using a token bucket
+      if (ShouldLog()) {
         big_key_access_count_[key]++;
-        std::cerr << "[BIGKEY DETECTED] Key: " << key
-                << ", Size: " << size
-                << ", Access Count: " << big_key_access_count_[key] << std::endl;
+        LOG(WARNING) << "[BIGKEY DETECTED] Key: " << key
+                    << ", Size: " << size
+                    << ", Access Count: " << big_key_access_count_[key];
+      }
+      
+      // Cleanup old entries periodically
+      MaybeCleanupBigKeyMap();
     }
   }

Committable suggestion skipped: line range outside the PR's diff.


std::unordered_map<std::string, int> GetBigKeyStatistics() {
std::lock_guard<std::mutex> lock(big_key_access_mutex_); // 确保线程安全
return big_key_access_count_;
}

TypeIterator* CreateIterator(const DataType& type, const std::string& pattern, const Slice* lower_bound, const Slice* upper_bound) {
return CreateIterator(DataTypeTag[static_cast<int>(type)], pattern, lower_bound, upper_bound);
}
Expand Down Expand Up @@ -538,6 +554,9 @@ class Redis {
Status UpdateSpecificKeyStatistics(const DataType& dtype, const std::string& key, uint64_t count);
Status UpdateSpecificKeyDuration(const DataType& dtype, const std::string& key, uint64_t duration);
Status AddCompactKeyTaskIfNeeded(const DataType& dtype, const std::string& key, uint64_t count, uint64_t duration);

std::unordered_map<std::string, int> big_key_access_count_;
std::mutex big_key_access_mutex_;
};

} // namespace storage
Expand Down
22 changes: 22 additions & 0 deletions src/storage/src/redis_hashes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ Status Redis::HDel(const Slice& key, const std::vector<std::string>& fields, int
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Verify consistent placement of CheckBigKeyAndLog calls.

The CheckBigKeyAndLog function has been added consistently across all hash operations. The placement and usage pattern matches that of the list operations, which is good for maintainability.

However, there's one inconsistency in the HashesExpire method at line 1198 where the CheckBigKeyAndLog call is placed after the stale check, unlike other methods.

Move the CheckBigKeyAndLog call before the stale check for consistency:

  if (s.ok()) {
    ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
    CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
-   if (parsed_hashes_meta_value.IsStale()) {
-     return Status::NotFound("Stale");
-   } else if (parsed_hashes_meta_value.Count() == 0) {
-     return Status::NotFound();
-   }
+   if (parsed_hashes_meta_value.IsStale()) {
+     return Status::NotFound("Stale");
+   } else if (parsed_hashes_meta_value.Count() == 0) {
+     return Status::NotFound();
+   }

Also applies to: 163-163, 204-204, 248-248, 306-306, 400-400, 485-485, 529-529, 569-569, 634-634, 710-710, 786-786, 850-850, 914-914, 992-992, 1063-1063, 1135-1135, 1198-1198, 1239-1239, 1277-1277, 1317-1317, 1357-1357

if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) {
*ret = 0;
return Status::OK();
Expand Down Expand Up @@ -159,6 +160,7 @@ Status Redis::HGet(const Slice& key, const Slice& field, std::string* value) {
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_hashes_meta_value.Count() == 0) {
Expand Down Expand Up @@ -199,6 +201,7 @@ Status Redis::HGetall(const Slice& key, std::vector<FieldValue>* fvs) {
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_hashes_meta_value.Count() == 0) {
Expand Down Expand Up @@ -242,6 +245,7 @@ Status Redis::HGetallWithTTL(const Slice& key, std::vector<FieldValue>* fvs, int
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.Count() == 0) {
return Status::NotFound();
} else if (parsed_hashes_meta_value.IsStale()) {
Expand Down Expand Up @@ -299,6 +303,7 @@ Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) {
version = parsed_hashes_meta_value.UpdateVersion();
parsed_hashes_meta_value.SetCount(1);
Expand Down Expand Up @@ -392,6 +397,7 @@ Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) {
version = parsed_hashes_meta_value.UpdateVersion();
parsed_hashes_meta_value.SetCount(1);
Expand Down Expand Up @@ -476,6 +482,7 @@ Status Redis::HKeys(const Slice& key, std::vector<std::string>* fields) {
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_hashes_meta_value.Count() == 0) {
Expand Down Expand Up @@ -519,6 +526,7 @@ Status Redis::HLen(const Slice& key, int32_t* ret, std::string&& prefetch_meta)
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale()) {
*ret = 0;
return Status::NotFound("Stale");
Expand Down Expand Up @@ -558,6 +566,7 @@ Status Redis::HMGet(const Slice& key, const std::vector<std::string>& fields, st
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if ((is_stale = parsed_hashes_meta_value.IsStale()) || parsed_hashes_meta_value.Count() == 0) {
for (size_t idx = 0; idx < fields.size(); ++idx) {
vss->push_back({std::string(), Status::NotFound()});
Expand Down Expand Up @@ -622,6 +631,7 @@ Status Redis::HMSet(const Slice& key, const std::vector<FieldValue>& fvs) {
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) {
version = parsed_hashes_meta_value.InitialMetaValue();
if (!parsed_hashes_meta_value.check_set_count(static_cast<int32_t>(filtered_fvs.size()))) {
Expand Down Expand Up @@ -697,6 +707,7 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) {
version = parsed_hashes_meta_value.InitialMetaValue();
parsed_hashes_meta_value.SetCount(1);
Expand Down Expand Up @@ -772,6 +783,7 @@ Status Redis::HSetnx(const Slice& key, const Slice& field, const Slice& value, i
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) {
version = parsed_hashes_meta_value.InitialMetaValue();
parsed_hashes_meta_value.SetCount(1);
Expand Down Expand Up @@ -835,6 +847,7 @@ Status Redis::HVals(const Slice& key, std::vector<std::string>* values) {
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_hashes_meta_value.Count() == 0) {
Expand Down Expand Up @@ -898,6 +911,7 @@ Status Redis::HScan(const Slice& key, int64_t cursor, const std::string& pattern
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) {
*next_cursor = 0;
return Status::NotFound();
Expand Down Expand Up @@ -975,6 +989,7 @@ Status Redis::HScanx(const Slice& key, const std::string& start_field, const std
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) {
*next_field = "";
return Status::NotFound();
Expand Down Expand Up @@ -1045,6 +1060,7 @@ Status Redis::PKHScanRange(const Slice& key, const Slice& field_start, const std
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) {
return Status::NotFound();
} else {
Expand Down Expand Up @@ -1116,6 +1132,7 @@ Status Redis::PKHRScanRange(const Slice& key, const Slice& field_start, const st
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) {
return Status::NotFound();
} else {
Expand Down Expand Up @@ -1178,6 +1195,7 @@ Status Redis::HashesExpire(const Slice& key, int64_t ttl_millsec, std::string&&
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_hashes_meta_value.Count() == 0) {
Expand Down Expand Up @@ -1218,6 +1236,7 @@ Status Redis::HashesDel(const Slice& key, std::string&& prefetch_meta) {
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_hashes_meta_value.Count() == 0) {
Expand Down Expand Up @@ -1255,6 +1274,7 @@ Status Redis::HashesExpireat(const Slice& key, int64_t timestamp_millsec, std::s
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_hashes_meta_value.Count() == 0) {
Expand Down Expand Up @@ -1294,6 +1314,7 @@ Status Redis::HashesPersist(const Slice& key, std::string&& prefetch_meta) {
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_hashes_meta_value.Count() == 0) {
Expand Down Expand Up @@ -1333,6 +1354,7 @@ Status Redis::HashesTTL(const Slice& key, int64_t* ttl_millsec, std::string&& pr
}
if (s.ok()) {
ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_hashes_meta_value.Count());
if (parsed_hashes_meta_value.IsStale()) {
*ttl_millsec = -2;
return Status::NotFound("Stale");
Expand Down
17 changes: 17 additions & 0 deletions src/storage/src/redis_lists.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ Status Redis::LIndex(const Slice& key, int64_t index, std::string* element) {
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Codebase verification

🛠️ Refactor suggestion

Verification confirms missing CheckBigKeyAndLog call in ListsExpire

The review comment is correct. The CheckBigKeyAndLog function is consistently used across all Redis data type operations, including expire methods, but is missing in ListsExpire. The call should be added after parsing the meta value but before checking the stale state, matching the pattern used in other methods.

The suggested fix in the review comment is accurate:

  if (s.ok()) {
    ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
+   CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
    if (parsed_lists_meta_value.IsStale()) {
      return Status::NotFound("Stale");
🔗 Analysis chain

Verify consistent placement of CheckBigKeyAndLog calls.

The CheckBigKeyAndLog function has been added consistently across all list operations. The function is called at the right location - after parsing the meta value but before checking the stale state, which ensures accurate tracking of large keys before any operation is performed.

However, there's one inconsistency in the ListsExpire method at line 1133 where the CheckBigKeyAndLog call is missing.

Let's verify if this is intentional by checking the implementation in other data structures:

Add the missing CheckBigKeyAndLog call in ListsExpire for consistency:

  if (s.ok()) {
    ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
+   CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());

    if (parsed_lists_meta_value.IsStale()) {
      return Status::NotFound("Stale");

Also applies to: 127-127, 163-163, 204-204, 244-244, 280-280, 339-339, 396-396, 441-441, 502-502, 572-572, 705-705, 750-750, 824-824, 1133-1133, 1174-1174, 1212-1212, 1252-1252, 1292-1292

🏁 Scripts executed

The following scripts were executed for the analysis:

Script:

#!/bin/bash
# Check if other expire methods also skip the CheckBigKeyAndLog call
rg -A 5 "Expire.*\{" | rg -B 5 "CheckBigKeyAndLog"

Length of output: 50


Script:

#!/bin/bash
# Find expire method implementations
ast-grep --pattern 'Expire($$$) {
  $$$
}'

# Also search with ripgrep to be thorough
rg -B 5 -A 10 "Status.*Expire.*\{" src/

Length of output: 19891


Script:

#!/bin/bash
# Check for CheckBigKeyAndLog usage in expire methods
rg -B 5 -A 10 "CheckBigKeyAndLog" src/storage/src/redis_*

Length of output: 83683


Script:

#!/bin/bash
# Check ListsExpire implementation
rg -B 10 -A 10 "ListsExpire.*\{" src/storage/src/redis_lists.cc

Length of output: 1730

uint64_t version = parsed_lists_meta_value.Version();
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
Expand Down Expand Up @@ -123,6 +124,7 @@ Status Redis::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, co
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -239,6 +241,7 @@ Status Redis::LLen(const Slice& key, uint64_t* len, std::string&& prefetch_meta)
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -274,6 +277,7 @@ Status Redis::LPop(const Slice& key, int64_t count, std::vector<std::string>* el
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -332,6 +336,7 @@ Status Redis::LPush(const Slice& key, const std::vector<std::string>& values, ui
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) {
version = parsed_lists_meta_value.InitialMetaValue();
} else {
Expand Down Expand Up @@ -388,6 +393,7 @@ Status Redis::LPushx(const Slice& key, const std::vector<std::string>& values, u
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -432,6 +438,7 @@ Status Redis::LRange(const Slice& key, int64_t start, int64_t stop, std::vector<
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -492,6 +499,7 @@ Status Redis::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.Count() == 0) {
return Status::NotFound();
} else if (parsed_lists_meta_value.IsStale()) {
Expand Down Expand Up @@ -561,6 +569,7 @@ Status Redis::LRem(const Slice& key, int64_t count, const Slice& value, uint64_t
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -693,6 +702,7 @@ Status Redis::LSet(const Slice& key, int64_t index, const Slice& value) {
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -737,6 +747,7 @@ Status Redis::LTrim(const Slice& key, int64_t start, int64_t stop) {
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
uint64_t version = parsed_lists_meta_value.Version();
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
Expand Down Expand Up @@ -810,6 +821,7 @@ Status Redis::RPop(const Slice& key, int64_t count, std::vector<std::string>* el
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -1118,6 +1130,7 @@ Status Redis::ListsExpire(const Slice& key, int64_t ttl_millsec, std::string&& p
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);

if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -1158,6 +1171,7 @@ Status Redis::ListsDel(const Slice& key, std::string&& prefetch_meta) {
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -1195,6 +1209,7 @@ Status Redis::ListsExpireat(const Slice& key, int64_t timestamp_millsec, std::st
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -1234,6 +1249,7 @@ Status Redis::ListsPersist(const Slice& key, std::string&& prefetch_meta) {
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
return Status::NotFound("Stale");
} else if (parsed_lists_meta_value.Count() == 0) {
Expand Down Expand Up @@ -1273,6 +1289,7 @@ Status Redis::ListsTTL(const Slice& key, int64_t* ttl_millsec, std::string&& pre
}
if (s.ok()) {
ParsedListsMetaValue parsed_lists_meta_value(&meta_value);
CheckBigKeyAndLog(key.ToString(), parsed_lists_meta_value.Count());
if (parsed_lists_meta_value.IsStale()) {
*ttl_millsec = -2;
return Status::NotFound("Stale");
Expand Down
Loading
Loading